Import Cronet version 121.0.6103.2

FolderOrigin-RevId: /tmp/copybara-origin/src
Change-Id: I690becfaba7ad4293eba08b4f9d1aa7f953fce20
diff --git a/base/BUILD.gn b/base/BUILD.gn
index 6e6f24e..8f12d13 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -48,11 +48,6 @@
   import("//third_party/protobuf/proto_library.gni")
 }
 
-if (is_apple) {
-  # Buildflags to control time behavior on iOS in file shared with mac.
-  import("//base/time/buildflags/buildflags.gni")
-}
-
 if (is_win) {
   import("//build/config/win/control_flow_guard.gni")
 }
@@ -93,14 +88,6 @@
 assert(!is_win || is_clang,
        "only clang-cl is supported on Windows, see https://crbug.com/988071")
 
-if (is_apple) {
-  assert(!use_blink || enable_mach_absolute_time_ticks,
-         "use_blink requires mach absolute time ticks")
-
-  assert(!is_mac || enable_mach_absolute_time_ticks,
-         "mac requires mach absolute time ticks")
-}
-
 # Determines whether libevent should be dep.
 dep_libevent = !is_fuchsia && !is_win && !is_mac && !is_nacl
 
@@ -109,10 +96,11 @@
 
 if (is_android || is_robolectric) {
   import("//build/config/android/rules.gni")
+  import("//third_party/jni_zero/jni_zero.gni")
 }
 
 if (is_fuchsia) {
-  import("//third_party/fuchsia-sdk/sdk/build/fidl_library.gni")
+  import("//third_party/fuchsia-gn-sdk/src/fidl_library.gni")
 }
 
 # The Rust implementation of base::JSONReader. NaCl depends on base and doesn't
@@ -302,10 +290,10 @@
     "containers/linked_list.cc",
     "containers/linked_list.h",
     "containers/lru_cache.h",
+    "containers/map_util.h",
     "containers/small_map.h",
     "containers/span.h",
     "containers/stack.h",
-    "containers/stack_container.h",
     "containers/unique_ptr_adapters.h",
     "containers/util.h",
     "containers/vector_buffer.h",
@@ -342,6 +330,8 @@
     "features.cc",
     "features.h",
     "file_version_info.h",
+    "files/block_tests_writing_to_special_dirs.cc",
+    "files/block_tests_writing_to_special_dirs.h",
     "files/dir_reader_fallback.h",
     "files/file.cc",
     "files/file.h",
@@ -434,6 +424,7 @@
     "memory/raw_ptr_exclusion.h",
     "memory/raw_ref.h",
     "memory/raw_scoped_refptr_mismatch_checker.h",
+    "memory/raw_span.h",
     "memory/read_only_shared_memory_region.cc",
     "memory/read_only_shared_memory_region.h",
     "memory/ref_counted.cc",
@@ -443,6 +434,7 @@
     "memory/ref_counted_memory.h",
     "memory/safe_ref.h",
     "memory/safe_ref_traits.h",
+    "memory/safety_checks.h",
     "memory/scoped_policy.h",
     "memory/scoped_refptr.h",
     "memory/shared_memory_hooks.h",
@@ -551,8 +543,6 @@
     "power_monitor/battery_level_provider.h",
     "power_monitor/battery_state_sampler.cc",
     "power_monitor/battery_state_sampler.h",
-    "power_monitor/moving_average.cc",
-    "power_monitor/moving_average.h",
     "power_monitor/power_monitor.cc",
     "power_monitor/power_monitor.h",
     "power_monitor/power_monitor_device_source.cc",
@@ -806,6 +796,7 @@
     "task/thread_pool/pooled_task_runner_delegate.h",
     "task/thread_pool/priority_queue.cc",
     "task/thread_pool/priority_queue.h",
+    "task/thread_pool/semaphore.h",
     "task/thread_pool/sequence.cc",
     "task/thread_pool/sequence.h",
     "task/thread_pool/service_thread.cc",
@@ -834,7 +825,6 @@
     "task/thread_pool/worker_thread_set.h",
     "task/updateable_sequenced_task_runner.h",
     "template_util.h",
-    "test/malloc_wrapper.h",
     "test/scoped_logging_settings.h",
     "test/spin_wait.h",
     "third_party/cityhash/city.cc",
@@ -901,8 +891,6 @@
     "time/time_delta_from_string.h",
     "time/time_override.cc",
     "time/time_override.h",
-    "time/time_to_iso8601.cc",
-    "time/time_to_iso8601.h",
     "timer/elapsed_timer.cc",
     "timer/elapsed_timer.h",
     "timer/hi_res_timer_manager.h",
@@ -932,6 +920,7 @@
     "types/expected.h",
     "types/expected_internal.h",
     "types/expected_macros.h",
+    "types/fixed_array.h",
     "types/id_type.h",
     "types/optional_ref.h",
     "types/optional_util.h",
@@ -968,6 +957,13 @@
     #"system/sys_info_openbsd.cc",
   ]
 
+  if (is_apple || current_os == "freebsd" || current_os == "openbsd") {
+    sources += [
+      "posix/sysctl.cc",
+      "posix/sysctl.h",
+    ]
+  }
+
   if (is_posix) {
     sources += [
       "debug/debugger_posix.cc",
@@ -1058,10 +1054,6 @@
   # to provide the appropriate `#define` here.
   defines += [ "IS_RAW_PTR_IMPL" ]
 
-  if (is_apple) {
-    deps += [ "//base/time/buildflags:buildflags" ]
-  }
-
   if (build_rust_json_reader) {
     deps += [ "//third_party/rust/serde_json_lenient/v0_1/wrapper" ]
   }
@@ -1088,6 +1080,7 @@
     ":synchronization_buildflags",
     ":tracing_buildflags",
     "//base/allocator/partition_allocator:buildflags",
+    "//base/allocator/partition_allocator:raw_ptr",
     "//base/numerics:base_numerics",
     "//build:chromecast_buildflags",
     "//build:chromeos_buildflags",
@@ -1119,7 +1112,7 @@
 
   if (use_allocator_shim) {
     if (is_apple) {
-      sources += [ "allocator/early_zone_registration_mac.h" ]
+      sources += [ "allocator/early_zone_registration_apple.h" ]
     }
   }
 
@@ -1129,6 +1122,9 @@
       "test/clang_profiling.cc",
       "test/clang_profiling.h",
     ]
+    if (is_android) {
+      sources += [ "test/clang_profiling_android.cc" ]
+    }
   }
 
   # Allow more direct string conversions on platforms with native utf8
@@ -1205,6 +1201,8 @@
       "android/scoped_hardware_buffer_fence_sync.h",
       "android/scoped_hardware_buffer_handle.cc",
       "android/scoped_hardware_buffer_handle.h",
+      "android/shared_preferences/shared_preferences_manager.cc",
+      "android/shared_preferences/shared_preferences_manager.h",
       "android/statistics_recorder_android.cc",
       "android/sys_utils.cc",
       "android/sys_utils.h",
@@ -1251,6 +1249,7 @@
     ]
 
     deps += [
+      ":base_shared_preferences_jni",
       "//third_party/ashmem",
       "//third_party/cpu_features:ndk_compat",
     ]
@@ -1323,8 +1322,6 @@
       "android/jni_android.h",
       "android/jni_array.cc",
       "android/jni_array.h",
-      "android/jni_generator/jni_generator_helper.h",
-      "android/jni_int_wrapper.h",
       "android/jni_registrar.cc",
       "android/jni_registrar.h",
       "android/jni_string.cc",
@@ -1530,6 +1527,7 @@
       "rand_util_nacl.cc",
       "sync_socket_nacl.cc",
       "system/sys_info_nacl.cc",
+      "task/thread_pool/semaphore/semaphore_default.cc",
       "threading/platform_thread_linux_base.cc",
       "threading/platform_thread_nacl.cc",
     ]
@@ -1555,6 +1553,8 @@
       "files/important_file_writer_cleaner.h",
       "files/scoped_temp_dir.cc",
       "files/scoped_temp_dir.h",
+      "files/scoped_temp_file.cc",
+      "files/scoped_temp_file.h",
       "json/json_file_value_serializer.cc",
       "json/json_file_value_serializer.h",
       "memory/discardable_memory.cc",
@@ -1607,6 +1607,10 @@
       ]
     }
 
+    if (is_linux || is_chromeos || is_android || is_fuchsia) {
+      sources += [ "task/thread_pool/semaphore/semaphore_posix.cc" ]
+    }
+
     if (is_posix) {
       sources += [
         "base_paths_posix.h",
@@ -1694,6 +1698,8 @@
       # Add stuff that doesn't work in NaCl.
       sources += [
         # PartitionAlloc uses SpinLock, which doesn't work in NaCl (see below).
+        "allocator/miracle_parameter.cc",
+        "allocator/miracle_parameter.h",
         "allocator/partition_alloc_features.cc",
         "allocator/partition_alloc_features.h",
         "allocator/partition_alloc_support.cc",
@@ -1739,6 +1745,7 @@
       "memory/platform_shared_memory_region_win.cc",
       "message_loop/message_pump_win.cc",
       "message_loop/message_pump_win.h",
+      "moving_window.h",
       "native_library_win.cc",
       "power_monitor/battery_level_provider_win.cc",
       "power_monitor/power_monitor_device_source_win.cc",
@@ -1777,6 +1784,7 @@
       "synchronization/waitable_event_watcher_win.cc",
       "synchronization/waitable_event_win.cc",
       "system/sys_info_win.cc",
+      "task/thread_pool/semaphore/semaphore_win.cc",
       "threading/platform_thread_win.cc",
       "threading/platform_thread_win.h",
       "threading/thread_local_storage_win.cc",
@@ -1836,6 +1844,8 @@
       "win/reference.h",
       "win/registry.cc",
       "win/registry.h",
+      "win/resource_exhaustion.cc",
+      "win/resource_exhaustion.h",
       "win/resource_util.cc",
       "win/resource_util.h",
       "win/scoped_bstr.cc",
@@ -1885,6 +1895,8 @@
       "win/win_util.cc",
       "win/win_util.h",
       "win/wincrypt_shim.h",
+      "win/window_enumerator.cc",
+      "win/window_enumerator.h",
       "win/windows_defines.inc",
       "win/windows_h_disallowed.h",
       "win/windows_types.h",
@@ -1946,43 +1958,47 @@
       "apple/bridging.h",
       "apple/bundle_locations.h",
       "apple/bundle_locations.mm",
+      "apple/call_with_eh_frame.cc",
+      "apple/call_with_eh_frame.h",
+      "apple/call_with_eh_frame_asm.S",
+      "apple/dispatch_source_mach.cc",
+      "apple/dispatch_source_mach.h",
+      "apple/foundation_util.h",
+      "apple/foundation_util.mm",
+      "apple/mach_logging.cc",
+      "apple/mach_logging.h",
+      "apple/osstatus_logging.h",
+      "apple/osstatus_logging.mm",
       "apple/owned_objc.h",
       "apple/owned_objc.mm",
-      "file_version_info_mac.h",
-      "file_version_info_mac.mm",
-      "files/file_util_mac.mm",
-      "mac/call_with_eh_frame.cc",
-      "mac/call_with_eh_frame.h",
-      "mac/call_with_eh_frame_asm.S",
-      "mac/dispatch_source_mach.cc",
-      "mac/dispatch_source_mach.h",
-      "mac/foundation_util.h",
-      "mac/foundation_util.mm",
-      "mac/mac_logging.h",
-      "mac/mac_logging.mm",
-      "mac/mach_logging.cc",
-      "mac/mach_logging.h",
-      "mac/scoped_mach_port.cc",
-      "mac/scoped_mach_port.h",
-      "mac/scoped_mach_vm.cc",
-      "mac/scoped_mach_vm.h",
-      "mac/scoped_nsautorelease_pool.cc",
-      "mac/scoped_nsautorelease_pool.h",
-      "mac/scoped_nsobject.h",
-      "mac/scoped_objc_class_swizzler.h",
-      "mac/scoped_objc_class_swizzler.mm",
-      "mac/scoped_typeref.h",
-      "memory/platform_shared_memory_mapper_mac.cc",
-      "memory/platform_shared_memory_region_mac.cc",
-      "message_loop/message_pump_mac.h",
-      "message_loop/message_pump_mac.mm",
+      "apple/scoped_cffiledescriptorref.h",
+      "apple/scoped_cftyperef.h",
+      "apple/scoped_dispatch_object.h",
+      "apple/scoped_mach_port.cc",
+      "apple/scoped_mach_port.h",
+      "apple/scoped_mach_vm.cc",
+      "apple/scoped_mach_vm.h",
+      "apple/scoped_nsautorelease_pool.h",
+      "apple/scoped_nsautorelease_pool.mm",
+      "apple/scoped_nsobject.h",
+      "apple/scoped_objc_class_swizzler.h",
+      "apple/scoped_objc_class_swizzler.mm",
+      "apple/scoped_typeref.h",
+      "file_version_info_apple.h",
+      "file_version_info_apple.mm",
+      "files/file_util_apple.mm",
+      "memory/platform_shared_memory_mapper_apple.cc",
+      "memory/platform_shared_memory_region_apple.cc",
+      "message_loop/message_pump_apple.h",
+      "message_loop/message_pump_apple.mm",
       "process/process_metrics_apple.cc",
-      "profiler/module_cache_mac.cc",
-      "strings/sys_string_conversions_mac.mm",
-      "synchronization/waitable_event_mac.cc",
+      "profiler/module_cache_apple.cc",
+      "strings/sys_string_conversions_apple.mm",
+      "synchronization/waitable_event_apple.cc",
       "system/sys_info_apple.mm",
-      "threading/platform_thread_mac.mm",
-      "time/time_mac.mm",
+      "task/thread_pool/semaphore/semaphore_apple.cc",
+      "threading/platform_thread_apple.mm",
+      "time/time_apple.mm",
     ]
     frameworks += [
       "CoreFoundation.framework",
@@ -2009,6 +2025,7 @@
       "mac/close_nocancel.cc",
       "mac/launch_application.h",
       "mac/launch_application.mm",
+      "mac/launch_services_spi.h",
       "mac/launchd.cc",
       "mac/launchd.h",
       "mac/mac_util.h",
@@ -2020,9 +2037,6 @@
       "mac/scoped_aedesc.h",
       "mac/scoped_authorizationref.h",
       "mac/scoped_authorizationref.mm",
-      "mac/scoped_cffiledescriptorref.h",
-      "mac/scoped_cftyperef.h",
-      "mac/scoped_dispatch_object.h",
       "mac/scoped_ionotificationportref.h",
       "mac/scoped_ioobject.h",
       "mac/scoped_ioplugininterface.h",
@@ -2030,7 +2044,6 @@
       "mac/scoped_mach_msg_destroy.h",
       "mac/scoped_sending_event.h",
       "mac/scoped_sending_event.mm",
-      "mac/wrap_cg_display.h",
       "message_loop/message_pump_kqueue.cc",
       "message_loop/message_pump_kqueue.h",
       "native_library_mac.mm",
@@ -2667,11 +2680,11 @@
   defines = [ "BASE_I18N_IMPLEMENTATION" ]
   configs += [ "//build/config/compiler:wexit_time_destructors" ]
   public_deps = [
+    ":base",
     "//third_party/ced",
     "//third_party/icu",
   ]
   deps = [
-    ":base",
     "//base/third_party/dynamic_annotations",
     "//build:chromecast_buildflags",
     "//build:chromeos_buildflags",
@@ -2694,8 +2707,15 @@
   }
 }
 
+executable("containers_memory_benchmark") {
+  sources = [ "containers/containers_memory_benchmark.cc" ]
+  deps = [ ":base" ]
+  testonly = true
+}
+
 test("base_perftests") {
   sources = [
+    "big_endian_perftest.cc",
     "hash/hash_perftest.cc",
     "json/json_perftest.cc",
     "message_loop/message_pump_perftest.cc",
@@ -2726,8 +2746,8 @@
 
   if (use_partition_alloc) {
     sources += [
-      "allocator/partition_allocator/partition_alloc_perftest.cc",
-      "allocator/partition_allocator/partition_lock_perftest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_perftest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_lock_perftest.cc",
     ]
     deps += [ ":partition_alloc_test_support" ]
   }
@@ -2858,6 +2878,8 @@
       "profiler/libunwindstack_unwinder_android.h",
       "profiler/native_unwinder_android.cc",
       "profiler/native_unwinder_android.h",
+      "profiler/native_unwinder_android_memory_regions_map_impl.cc",
+      "profiler/native_unwinder_android_memory_regions_map_impl.h",
     ]
 
     public_deps = [
@@ -2950,7 +2972,7 @@
 if (is_apple) {
   source_set("base_unittests_noarc") {
     testonly = true
-    sources = [ "mac/scoped_nsobject_unittest.mm" ]
+    sources = [ "apple/scoped_nsobject_unittest.mm" ]
 
     # Do not compile with ARC because this target has tests for code that does
     # not compile with ARC.
@@ -3044,8 +3066,8 @@
 
   if (target_cpu == "arm64" && (is_linux || is_android)) {
     sources = [
-      "allocator/partition_allocator/arm_bti_test_functions.S",
-      "allocator/partition_allocator/arm_bti_test_functions.h",
+      "allocator/partition_allocator/src/partition_alloc/arm_bti_test_functions.S",
+      "allocator/partition_allocator/src/partition_alloc/arm_bti_test_functions.h",
     ]
   }
 }
@@ -3078,9 +3100,9 @@
     "allocator/dispatcher/testing/observer_mock.h",
     "allocator/dispatcher/testing/tools.h",
     "allocator/dispatcher/tls_unittest.cc",
-    "allocator/partition_allocator/pointers/raw_ptr_test_support.h",
-    "allocator/partition_allocator/pointers/raw_ptr_unittest.cc",
-    "allocator/partition_allocator/pointers/raw_ref_unittest.cc",
+    "allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_test_support.h",
+    "allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_unittest.cc",
+    "allocator/partition_allocator/src/partition_alloc/pointers/raw_ref_unittest.cc",
     "at_exit_unittest.cc",
     "atomicops_unittest.cc",
     "auto_reset_unittest.cc",
@@ -3116,9 +3138,9 @@
     "containers/intrusive_heap_unittest.cc",
     "containers/linked_list_unittest.cc",
     "containers/lru_cache_unittest.cc",
+    "containers/map_util_unittest.cc",
     "containers/small_map_unittest.cc",
     "containers/span_unittest.cc",
-    "containers/stack_container_unittest.cc",
     "containers/unique_ptr_adapters_unittest.cc",
     "containers/vector_buffer_unittest.cc",
     "cpu_unittest.cc",
@@ -3132,6 +3154,7 @@
     "debug/task_trace_unittest.cc",
     "environment_unittest.cc",
     "feature_list_unittest.cc",
+    "files/block_tests_writing_to_special_dirs_unittest.cc",
     "files/file_enumerator_unittest.cc",
     "files/file_error_or_unittest.cc",
     "files/file_path_unittest.cc",
@@ -3144,6 +3167,7 @@
     "files/memory_mapped_file_unittest.cc",
     "files/safe_base_name_unittest.cc",
     "files/scoped_temp_dir_unittest.cc",
+    "files/scoped_temp_file_unittest.cc",
     "functional/bind_unittest.cc",
     "functional/callback_helpers_unittest.cc",
     "functional/callback_unittest.cc",
@@ -3190,9 +3214,12 @@
     "memory/platform_shared_memory_region_unittest.cc",
     "memory/ptr_util_unittest.cc",
     "memory/raw_ptr_asan_unittest.cc",
+    "memory/raw_ptr_chromium_unittest.cc",
+    "memory/raw_span_unittest.cc",
     "memory/ref_counted_memory_unittest.cc",
     "memory/ref_counted_unittest.cc",
     "memory/safe_ref_unittest.cc",
+    "memory/safety_checks_unitttest.cc",
     "memory/shared_memory_hooks_unittest.cc",
     "memory/shared_memory_mapping_unittest.cc",
     "memory/shared_memory_region_unittest.cc",
@@ -3229,6 +3256,7 @@
     "metrics/sparse_histogram_unittest.cc",
     "metrics/statistics_recorder_starvation_unittest.cc",
     "metrics/statistics_recorder_unittest.cc",
+    "moving_window_unittest.cc",
     "native_library_unittest.cc",
     "no_destructor_unittest.cc",
     "observer_list_threadsafe_unittest.cc",
@@ -3238,7 +3266,6 @@
     "pickle_unittest.cc",
     "power_monitor/battery_level_provider_unittest.cc",
     "power_monitor/battery_state_sampler_unittest.cc",
-    "power_monitor/moving_average_unittest.cc",
     "power_monitor/power_monitor_device_source_unittest.cc",
     "power_monitor/power_monitor_unittest.cc",
     "power_monitor/timer_sampling_event_source_unittest.cc",
@@ -3340,6 +3367,7 @@
     "task/thread_pool/job_task_source_unittest.cc",
     "task/thread_pool/pooled_single_thread_task_runner_manager_unittest.cc",
     "task/thread_pool/priority_queue_unittest.cc",
+    "task/thread_pool/semaphore/semaphore_unittest.cc",
     "task/thread_pool/sequence_unittest.cc",
     "task/thread_pool/service_thread_unittest.cc",
     "task/thread_pool/task_source_sort_key_unittest.cc",
@@ -3420,6 +3448,7 @@
     "types/cxx23_to_underlying_unittest.cc",
     "types/expected_macros_unittest.cc",
     "types/expected_unittest.cc",
+    "types/fixed_array_unittest.cc",
     "types/id_type_unittest.cc",
     "types/optional_ref_unittest.cc",
     "types/optional_unittest.cc",
@@ -3510,7 +3539,9 @@
   ]
 
   if (!is_ios) {
-    sources += [ "allocator/partition_allocator/tagging_unittest.cc" ]
+    sources += [
+      "allocator/partition_allocator/src/partition_alloc/tagging_unittest.cc",
+    ]
 
     data_deps += [ "//base/test:test_proto_descriptor" ]
     data += [ "$root_gen_dir/base/test/test_proto.descriptor" ]
@@ -3571,6 +3602,7 @@
       "win/vector_unittest.cc",
       "win/win_includes_unittest.cc",
       "win/win_util_unittest.cc",
+      "win/window_enumerator_unittest.cc",
       "win/windows_version_unittest.cc",
       "win/winrt_storage_util_unittest.cc",
       "win/wmi_unittest.cc",
@@ -3701,6 +3733,7 @@
     deps += [
       ":base_java_unittest_support",
       "//base/test:test_support_java",
+      "//third_party/jni_zero",
     ]
   }
 
@@ -3708,36 +3741,44 @@
     sources += [ "debug/allocation_trace_unittest.cc" ]
   }
 
+  if (is_apple || current_os == "freebsd" || current_os == "openbsd") {
+    sources += [ "posix/sysctl_unittest.cc" ]
+  }
   if (is_apple) {
     sources += [
       "apple/backup_util_unittest.mm",
-      "mac/bind_objc_block_unittest_arc.mm",
-      "mac/foundation_util_unittest.mm",
-      "strings/sys_string_conversions_mac_unittest.mm",
+      "apple/bind_objc_block_unittest.mm",
+      "apple/dispatch_source_mach_unittest.cc",
+      "apple/foundation_util_unittest.mm",
+      "strings/sys_string_conversions_apple_unittest.mm",
     ]
   }
 
-  if (is_apple && enable_mach_absolute_time_ticks) {
-    sources += [ "time/time_mac_unittest.mm" ]
+  if (is_apple) {
+    sources += [ "time/time_apple_unittest.mm" ]
   }
 
   if (is_mac) {
     sources += [
-      "allocator/partition_allocator/shim/allocator_interception_mac_unittest.mm",
-      "allocator/partition_allocator/shim/malloc_zone_functions_mac_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple_unittest.mm",
+      "allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple_unittest.cc",
+      "apple/call_with_eh_frame_unittest.mm",
+      "apple/scoped_cftyperef_unittest.cc",
+      "apple/scoped_mach_vm_unittest.cc",
+      "apple/scoped_nsautorelease_pool_unittest.mm",
+      "apple/scoped_objc_class_swizzler_unittest.mm",
       "enterprise_util_mac_unittest.mm",
-      "mac/call_with_eh_frame_unittest.mm",
-      "mac/dispatch_source_mach_unittest.cc",
+      "mac/launch_application_unittest.mm",
       "mac/mac_util_unittest.mm",
       "mac/mach_port_rendezvous_unittest.cc",
-      "mac/scoped_mach_vm_unittest.cc",
-      "mac/scoped_objc_class_swizzler_unittest.mm",
       "mac/scoped_sending_event_unittest.mm",
-      "message_loop/message_pump_mac_unittest.mm",
+      "message_loop/message_pump_apple_unittest.mm",
       "power_monitor/thermal_state_observer_mac_unittest.mm",
       "process/memory_unittest_mac.h",
       "process/memory_unittest_mac.mm",
+      "system/sys_info_mac_unittest.mm",
     ]
+    data_deps += [ "//base/mac:launch_application_test_helper" ]
   }
 
   if (is_ios) {
@@ -3779,8 +3820,8 @@
 
     if (use_allocator_shim) {
       sources += [
-        "allocator/partition_allocator/shim/allocator_interception_mac_unittest.mm",
-        "allocator/partition_allocator/shim/malloc_zone_functions_mac_unittest.cc",
+        "allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple_unittest.mm",
+        "allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple_unittest.cc",
       ]
     }
 
@@ -3791,35 +3832,38 @@
 
   if (use_partition_alloc) {
     sources += [
-      "allocator/partition_allocator/address_pool_manager_unittest.cc",
-      "allocator/partition_allocator/address_space_randomization_unittest.cc",
-      "allocator/partition_allocator/compressed_pointer_unittest.cc",
-      "allocator/partition_allocator/freeslot_bitmap_unittest.cc",
-      "allocator/partition_allocator/hardening_unittest.cc",
-      "allocator/partition_allocator/memory_reclaimer_unittest.cc",
-      "allocator/partition_allocator/page_allocator_unittest.cc",
-      "allocator/partition_allocator/partition_alloc_base/bits_pa_unittest.cc",
-      "allocator/partition_allocator/partition_alloc_base/component_export_pa_unittest.cc",
-      "allocator/partition_allocator/partition_alloc_base/cpu_pa_unittest.cc",
-      "allocator/partition_allocator/partition_alloc_base/logging_pa_unittest.cc",
-      "allocator/partition_allocator/partition_alloc_base/rand_util_pa_unittest.cc",
-      "allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error_pa_unittest.cc",
-      "allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf_pa_unittest.cc",
-      "allocator/partition_allocator/partition_alloc_base/strings/stringprintf_pa_unittest.cc",
-      "allocator/partition_allocator/partition_alloc_base/thread_annotations_pa_unittest.cc",
-      "allocator/partition_allocator/partition_alloc_unittest.cc",
-      "allocator/partition_allocator/partition_lock_unittest.cc",
-      "allocator/partition_allocator/reverse_bytes_unittest.cc",
-      "allocator/partition_allocator/thread_cache_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/address_pool_manager_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/address_space_randomization_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/compressed_pointer_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/hardening_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/lightweight_quarantine_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/memory_reclaimer_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/page_allocator_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits_pa_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export_pa_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu_pa_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging_pa_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_pa_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error_pa_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder_pa_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf_pa_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util_pa_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf_pa_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations_pa_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_lock_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/reverse_bytes_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/thread_cache_unittest.cc",
     ]
 
     if (use_starscan) {
       sources += [
-        "allocator/partition_allocator/starscan/pcscan_scheduling_unittest.cc",
-        "allocator/partition_allocator/starscan/pcscan_unittest.cc",
-        "allocator/partition_allocator/starscan/scan_loop_unittest.cc",
-        "allocator/partition_allocator/starscan/stack/stack_unittest.cc",
-        "allocator/partition_allocator/starscan/state_bitmap_unittest.cc",
+        "allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling_unittest.cc",
+        "allocator/partition_allocator/src/partition_alloc/starscan/pcscan_unittest.cc",
+        "allocator/partition_allocator/src/partition_alloc/starscan/scan_loop_unittest.cc",
+        "allocator/partition_allocator/src/partition_alloc/starscan/stack/stack_unittest.cc",
+        "allocator/partition_allocator/src/partition_alloc/starscan/state_bitmap_unittest.cc",
       ]
     }
 
@@ -3834,19 +3878,18 @@
 
     if (is_android) {
       sources += [
-        "allocator/partition_allocator/partition_alloc_base/files/file_path_pa_unittest.cc",
-        "allocator/partition_allocator/partition_alloc_base/native_library_pa_unittest.cc",
+        "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path_pa_unittest.cc",
+        "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library_pa_unittest.cc",
       ]
     } else if (is_fuchsia) {
-      sources += [ "allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc" ]
+      sources += [ "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc" ]
     }
 
     if (enable_pkeys && is_debug && !is_component_build) {
       # This test requires RELRO, which is not enabled in component builds.
       # Also, require a debug build, since we only disable stack protectors in
       # debug builds in partition alloc (see below why it's needed).
-      sources +=
-          [ "allocator/partition_allocator/thread_isolation/pkey_unittest.cc" ]
+      sources += [ "allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey_unittest.cc" ]
 
       # We want to test the pkey code without access to memory that is not
       # pkey-tagged. This will allow us to catch unintended memory accesses
@@ -3980,7 +4023,13 @@
       sources += [ "profiler/win32_stack_frame_unwinder_unittest.cc" ]
       deps += [ ":base_profiler_test_support_library" ]
     }
-    sources += [ "files/os_validation_win_unittest.cc" ]
+
+    # These tests are not built by default because they make heavy use of
+    # parameterized tests. Even when disabled, they incur a startup penalty on
+    # base_unittests. See comments in the file as for why they are disabled by
+    # default, and https://crbug.com/1475518 for one consequence of the process
+    # startup penalty.
+    # sources += [ "files/os_validation_win_unittest.cc" ]
 
     if (toolchain_has_rust && win_enable_cfg_guards) {
       deps += [ ":rust_cfg_win_test" ]
@@ -3996,19 +4045,18 @@
 
   if (use_allocator_shim) {
     sources += [
-      "allocator/partition_allocator/shim/allocator_shim_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_unittest.cc",
       "sampling_heap_profiler/poisson_allocation_sampler_unittest.cc",
       "sampling_heap_profiler/sampling_heap_profiler_unittest.cc",
     ]
 
     if (is_win) {
-      sources +=
-          [ "allocator/partition_allocator/shim/winheap_stubs_win_unittest.cc" ]
+      sources += [ "allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win_unittest.cc" ]
     }
 
     sources += [
       "allocator/partition_alloc_support_unittest.cc",
-      "allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc",
+      "allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc",
     ]
   }
 
@@ -4046,6 +4094,10 @@
       "//third_party/perfetto/protos/perfetto/trace/track_event:lite",
     ]
   }
+
+  if (enable_nocompile_tests_new) {
+    deps += [ ":base_nocompile_tests_new" ]
+  }
 }
 
 # Test that CFG is enabled in Rust code.
@@ -4072,13 +4124,25 @@
   ]
 }
 
+if (enable_nocompile_tests_new) {
+  nocompile_source_set("base_nocompile_tests_new") {
+    sources = [
+      "callback_list_nocompile.nc",
+      "memory/weak_ptr_nocompile.nc",
+      "no_destructor_nocompile.nc",
+      "strings/stringprintf_nocompile.nc",
+      "values_nocompile.nc",
+    ]
+    deps = [ ":base" ]
+  }
+}
+
 if (enable_nocompile_tests) {
   nocompile_test("base_nocompile_tests") {
     sources = [
-      "allocator/partition_allocator/partition_alloc_base/thread_annotations_pa_unittest.nc",
-      "allocator/partition_allocator/pointers/raw_ptr_unittest.nc",
-      "allocator/partition_allocator/pointers/raw_ref_unittest.nc",
-      "callback_list_unittest.nc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations_pa_unittest.nc",
+      "allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_unittest.nc",
+      "allocator/partition_allocator/src/partition_alloc/pointers/raw_ref_unittest.nc",
       "containers/buffer_iterator_unittest.nc",
       "containers/checked_iterators_unittest.nc",
       "containers/contains_unittest.nc",
@@ -4090,10 +4154,8 @@
       "functional/function_ref_unittest.nc",
       "functional/overloaded_unittest.nc",
       "memory/ref_counted_unittest.nc",
-      "memory/weak_ptr_unittest.nc",
       "metrics/field_trial_params_unittest.nc",
       "metrics/histogram_unittest.nc",
-      "no_destructor_unittest.nc",
       "observer_list_unittest.nc",
       "sequence_checker_unittest.nc",
       "strings/string_piece_unittest.nc",
@@ -4103,7 +4165,6 @@
       "traits_bag_unittest.nc",
       "types/pass_key_unittest.nc",
       "types/variant_util_unittest.nc",
-      "values_unittest.nc",
     ]
 
     deps = [
@@ -4113,8 +4174,13 @@
     ]
 
     # Since the nocompile test is not able to inspect the contents of
-    # include_dirs, we need to specify the Abseil include dir manually.
-    include_dirs = [ "//third_party/abseil-cpp" ]
+    # include_dirs, we need to specify the Abseil and PartitionAlloc include dir
+    # manually.
+    include_dirs = [
+      "${root_gen_dir}/base/allocator/partition_allocator/src",
+      "//base/allocator/partition_allocator/src",
+      "//third_party/abseil-cpp",
+    ]
   }
 }
 
@@ -4165,6 +4231,12 @@
       "android/java/src/org/chromium/base/task/TaskRunnerImpl.java",
     ]
 
+    if (use_clang_profiling) {
+      sources += [
+        "test/android/javatests/src/org/chromium/base/test/ClangProfiler.java",
+      ]
+    }
+
     public_deps = [
       ":android_runtime_jni_headers",
       ":android_runtime_unchecked_jni_headers",
@@ -4172,7 +4244,10 @@
   }
 
   generate_jar_jni("android_runtime_jni_headers") {
-    classes = [ "java/lang/Runtime.class" ]
+    classes = [
+      "java/lang/Runtime.class",
+      "java/lang/Throwable.class",
+    ]
   }
 
   generate_jar_jni("android_runtime_unchecked_jni_headers") {
@@ -4183,32 +4258,27 @@
   generate_jni("process_launcher_jni") {
     sources = [ "android/java/src/org/chromium/base/process_launcher/ChildProcessService.java" ]
   }
+
+  generate_jni("base_shared_preferences_jni") {
+    sources = [ "android/java/src/org/chromium/base/shared_preferences/SharedPreferencesManager.java" ]
+  }
 }  # is_android || is_robolectric
 
 if (is_android) {
-  java_library("jni_java") {
-    supports_android = true
-    sources = [
-      "android/java/src/org/chromium/base/JniException.java",
-      "android/java/src/org/chromium/base/JniStaticTestMocker.java",
-      "android/java/src/org/chromium/base/NativeLibraryLoadedStatus.java",
-      "android/java/src/org/chromium/base/annotations/AccessedByNative.java",
-      "android/java/src/org/chromium/base/annotations/CalledByNative.java",
-      "android/java/src/org/chromium/base/annotations/CalledByNativeForTesting.java",
-      "android/java/src/org/chromium/base/annotations/CalledByNativeUnchecked.java",
-      "android/java/src/org/chromium/base/annotations/JNINamespace.java",
-      "android/java/src/org/chromium/base/annotations/NativeClassQualifiedName.java",
-      "android/java/src/org/chromium/base/annotations/NativeMethods.java",
+  # TODO(smaier): delete this once deps have been correctly added everywhere.
+  java_group("jni_java") {
+    public_deps = [
+      # Public because @CheckDiscard is added to generated *Jni.java files.
+      "//build/android:build_java",
+      "//third_party/jni_zero:jni_zero_java",
     ]
-
-    # Public because @CheckDiscard is added to generated *Jni.java files.
-    public_deps = [ "//build/android:build_java" ]
   }
 
   java_cpp_features("java_features_srcjar") {
     # External code should depend on ":base_java" instead.
     visibility = [ ":*" ]
     sources = [
+      "allocator/partition_alloc_features.cc",
       "android/base_features.cc",
       "features.cc",
       "task/task_features.cc",
@@ -4267,10 +4337,10 @@
       "//build/android:build_java",
       "//third_party/android_deps:com_google_code_findbugs_jsr305_java",
       "//third_party/android_deps:com_google_errorprone_error_prone_annotations_java",
+      "//third_party/android_deps:guava_android_java",
       "//third_party/androidx:androidx_annotation_annotation_experimental_java",
       "//third_party/androidx:androidx_annotation_annotation_java",
-      "//third_party/androidx:androidx_collection_collection_java",
-      "//third_party/androidx:androidx_core_core_java",
+      "//third_party/jni_zero:jni_zero_java",
     ]
 
     sources = [
@@ -4300,6 +4370,7 @@
       "android/java/src/org/chromium/base/Flag.java",
       "android/java/src/org/chromium/base/ImportantFileWriterAndroid.java",
       "android/java/src/org/chromium/base/IntStringCallback.java",
+      "android/java/src/org/chromium/base/IntentUtils.java",
       "android/java/src/org/chromium/base/JNIUtils.java",
       "android/java/src/org/chromium/base/JavaExceptionReporter.java",
       "android/java/src/org/chromium/base/JavaHandlerThread.java",
@@ -4344,8 +4415,17 @@
       "android/java/src/org/chromium/base/compat/ApiHelperForS.java",
       "android/java/src/org/chromium/base/jank_tracker/FrameMetricsListener.java",
       "android/java/src/org/chromium/base/jank_tracker/FrameMetricsStore.java",
+      "android/java/src/org/chromium/base/jank_tracker/JankActivityTracker.java",
+      "android/java/src/org/chromium/base/jank_tracker/JankEndScenarioTime.java",
       "android/java/src/org/chromium/base/jank_tracker/JankMetricUMARecorder.java",
       "android/java/src/org/chromium/base/jank_tracker/JankMetrics.java",
+      "android/java/src/org/chromium/base/jank_tracker/JankReportingRunnable.java",
+      "android/java/src/org/chromium/base/jank_tracker/JankReportingScheduler.java",
+      "android/java/src/org/chromium/base/jank_tracker/JankScenario.java",
+      "android/java/src/org/chromium/base/jank_tracker/JankTracker.java",
+      "android/java/src/org/chromium/base/jank_tracker/JankTrackerImpl.java",
+      "android/java/src/org/chromium/base/jank_tracker/JankTrackerStateController.java",
+      "android/java/src/org/chromium/base/jank_tracker/PlaceholderJankTracker.java",
       "android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
       "android/java/src/org/chromium/base/library_loader/LibraryPrefetcher.java",
       "android/java/src/org/chromium/base/library_loader/Linker.java",
@@ -4373,12 +4453,17 @@
       "android/java/src/org/chromium/base/metrics/UmaRecorder.java",
       "android/java/src/org/chromium/base/metrics/UmaRecorderHolder.java",
       "android/java/src/org/chromium/base/supplier/DestroyableObservableSupplier.java",
+      "android/java/src/org/chromium/base/supplier/LazyOneshotSupplier.java",
+      "android/java/src/org/chromium/base/supplier/LazyOneshotSupplierImpl.java",
       "android/java/src/org/chromium/base/supplier/ObservableSupplier.java",
       "android/java/src/org/chromium/base/supplier/ObservableSupplierImpl.java",
       "android/java/src/org/chromium/base/supplier/OneShotCallback.java",
       "android/java/src/org/chromium/base/supplier/OneshotSupplier.java",
       "android/java/src/org/chromium/base/supplier/OneshotSupplierImpl.java",
       "android/java/src/org/chromium/base/supplier/Supplier.java",
+      "android/java/src/org/chromium/base/supplier/SyncOneshotSupplier.java",
+      "android/java/src/org/chromium/base/supplier/SyncOneshotSupplierImpl.java",
+      "android/java/src/org/chromium/base/supplier/TransitiveObservableSupplier.java",
       "android/java/src/org/chromium/base/supplier/UnownedUserDataSupplier.java",
       "android/java/src/org/chromium/base/task/AsyncTask.java",
       "android/java/src/org/chromium/base/task/BackgroundOnlyAsyncTask.java",
@@ -4397,8 +4482,12 @@
       "android/java/src/org/chromium/base/task/UiThreadTaskExecutor.java",
     ]
 
-    if (!is_cronet_build) {
-      sources += [ "android/java/src/org/chromium/base/IntentUtils.java" ]
+    if (use_clang_profiling) {
+      sources += [
+        "test/android/javatests/src/org/chromium/base/test/ClangProfiler.java",
+      ]
+    } else {
+      sources += [ "test/android/javatests/src/stub/org/chromium/base/test/ClangProfiler.java" ]
     }
 
     resources_package = "org.chromium.base"
@@ -4410,6 +4499,25 @@
     ]
   }
 
+  android_library("base_shared_preferences_java") {
+    deps = [
+      ":base_java",
+      ":jni_java",
+      "//third_party/android_deps:guava_android_java",
+      "//third_party/androidx:androidx_annotation_annotation_java",
+    ]
+
+    sources = [
+      "android/java/src/org/chromium/base/shared_preferences/KeyPrefix.java",
+      "android/java/src/org/chromium/base/shared_preferences/KnownPreferenceKeyRegistries.java",
+      "android/java/src/org/chromium/base/shared_preferences/NoOpPreferenceKeyChecker.java",
+      "android/java/src/org/chromium/base/shared_preferences/PreferenceKeyChecker.java",
+      "android/java/src/org/chromium/base/shared_preferences/PreferenceKeyRegistry.java",
+      "android/java/src/org/chromium/base/shared_preferences/SharedPreferencesManager.java",
+      "android/java/src/org/chromium/base/shared_preferences/StrictPreferenceKeyChecker.java",
+    ]
+  }
+
   android_aidl("process_launcher_aidl") {
     import_include = [ "android/java/src" ]
     sources = [
@@ -4425,13 +4533,13 @@
       ":base_java",
       ":base_java_test_support",
       ":base_java_test_support_uncommon",
-      "//base:jni_java",
       "//base/test:test_support_java",
       "//build/android:build_java",
       "//third_party/androidx:androidx_annotation_annotation_java",
       "//third_party/androidx:androidx_test_monitor_java",
       "//third_party/androidx:androidx_test_runner_java",
       "//third_party/hamcrest:hamcrest_java",
+      "//third_party/jni_zero:jni_zero_java",
       "//third_party/junit:junit",
       "//third_party/mockito:mockito_java",
     ]
@@ -4489,9 +4597,9 @@
 
     deps = [
       ":base_java",
-      "//base:jni_java",
       "//build/android:build_java",
       "//third_party/androidx:androidx_annotation_annotation_java",
+      "//third_party/jni_zero:jni_zero_java",
       "//third_party/junit:junit",
     ]
   }
@@ -4501,7 +4609,6 @@
 
     deps = [
       ":base_java",
-      "//base:jni_java",
       "//build/android:build_java",
       "//third_party/accessibility_test_framework:accessibility_test_framework_java",
       "//third_party/android_deps:espresso_java",
@@ -4515,6 +4622,7 @@
       "//third_party/androidx:androidx_test_runner_java",
       "//third_party/androidx:androidx_test_uiautomator_uiautomator_java",
       "//third_party/hamcrest:hamcrest_java",
+      "//third_party/jni_zero:jni_zero_java",
       "//third_party/junit",
     ]
 
@@ -4524,7 +4632,6 @@
       "test/android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java",
       "test/android/javatests/src/org/chromium/base/test/BaseActivityTestRule.java",
       "test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java",
-      "test/android/javatests/src/org/chromium/base/test/BaseChromiumRunnerCommon.java",
       "test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java",
       "test/android/javatests/src/org/chromium/base/test/BaseJUnit4TestRule.java",
       "test/android/javatests/src/org/chromium/base/test/LoadNative.java",
@@ -4569,7 +4676,9 @@
       "test/android/javatests/src/org/chromium/base/test/util/DisabledTest.java",
       "test/android/javatests/src/org/chromium/base/test/util/DoNotBatch.java",
       "test/android/javatests/src/org/chromium/base/test/util/DoNotRevive.java",
+      "test/android/javatests/src/org/chromium/base/test/util/DumpThreadsOnFailureRule.java",
       "test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/EspressoIdleTimeoutRule.java",
       "test/android/javatests/src/org/chromium/base/test/util/Feature.java",
       "test/android/javatests/src/org/chromium/base/test/util/Features.java",
       "test/android/javatests/src/org/chromium/base/test/util/FeaturesBase.java",
@@ -4610,10 +4719,10 @@
 
     deps = [
       "//base:base_java",
-      "//base:jni_java",
       "//third_party/androidx:androidx_annotation_annotation_java",
       "//third_party/androidx:androidx_test_monitor_java",
       "//third_party/hamcrest:hamcrest_java",
+      "//third_party/jni_zero:jni_zero_java",
       "//third_party/junit:junit",
     ]
 
@@ -4679,6 +4788,7 @@
       "android/junit/src/org/chromium/base/BuildInfoTest.java",
       "android/junit/src/org/chromium/base/CallbackControllerTest.java",
       "android/junit/src/org/chromium/base/CollectionUtilTest.java",
+      "android/junit/src/org/chromium/base/CommandLineTest.java",
       "android/junit/src/org/chromium/base/DiscardableReferencePoolTest.java",
       "android/junit/src/org/chromium/base/FeatureListUnitTest.java",
       "android/junit/src/org/chromium/base/FileUtilsTest.java",
@@ -4695,16 +4805,26 @@
       "android/junit/src/org/chromium/base/UnownedUserDataKeyTest.java",
       "android/junit/src/org/chromium/base/jank_tracker/FrameMetricsListenerTest.java",
       "android/junit/src/org/chromium/base/jank_tracker/FrameMetricsStoreTest.java",
+      "android/junit/src/org/chromium/base/jank_tracker/JankActivityTrackerTest.java",
       "android/junit/src/org/chromium/base/jank_tracker/JankMetricUMARecorderTest.java",
+      "android/junit/src/org/chromium/base/jank_tracker/JankReportingRunnableTest.java",
+      "android/junit/src/org/chromium/base/jank_tracker/JankReportingSchedulerTest.java",
       "android/junit/src/org/chromium/base/library_loader/LinkerTest.java",
       "android/junit/src/org/chromium/base/memory/MemoryPressureMonitorTest.java",
       "android/junit/src/org/chromium/base/memory/MemoryPurgeManagerTest.java",
       "android/junit/src/org/chromium/base/metrics/CachingUmaRecorderTest.java",
       "android/junit/src/org/chromium/base/process_launcher/ChildConnectionAllocatorTest.java",
       "android/junit/src/org/chromium/base/process_launcher/ChildProcessConnectionTest.java",
+      "android/junit/src/org/chromium/base/shared_preferences/KeyPrefixTest.java",
+      "android/junit/src/org/chromium/base/shared_preferences/KnownPreferenceKeyRegistriesTest.java",
+      "android/junit/src/org/chromium/base/shared_preferences/SharedPreferencesManagerTest.java",
+      "android/junit/src/org/chromium/base/shared_preferences/StrictPreferenceKeyCheckerTest.java",
+      "android/junit/src/org/chromium/base/supplier/LazyOneshotSupplierImplTest.java",
       "android/junit/src/org/chromium/base/supplier/ObservableSupplierImplTest.java",
       "android/junit/src/org/chromium/base/supplier/OneShotCallbackTest.java",
       "android/junit/src/org/chromium/base/supplier/OneshotSupplierImplTest.java",
+      "android/junit/src/org/chromium/base/supplier/SyncOneshotSupplierImplTest.java",
+      "android/junit/src/org/chromium/base/supplier/TransitiveObservableSupplierTest.java",
       "android/junit/src/org/chromium/base/supplier/UnownedUserDataSupplierTest.java",
       "android/junit/src/org/chromium/base/task/AsyncTaskThreadTest.java",
       "android/junit/src/org/chromium/base/task/SequencedTaskRunnerTaskMigrationTest.java",
@@ -4732,13 +4852,14 @@
       ":base_java_test_support",
       ":base_java_test_support_uncommon",
       ":base_junit_test_support",
-      "//base:jni_java",
+      ":base_shared_preferences_java",
       "//base/test:test_support_java",
       "//third_party/android_deps:guava_android_java",
       "//third_party/androidx:androidx_annotation_annotation_java",
       "//third_party/androidx:androidx_test_core_java",
       "//third_party/androidx:androidx_test_runner_java",
       "//third_party/hamcrest:hamcrest_java",
+      "//third_party/jni_zero:jni_zero_java",
     ]
   }
 
@@ -4772,8 +4893,8 @@
     srcjar_deps = [ ":base_profiler_test_support_jni" ]
 
     deps = [
-      "//base:jni_java",
       "//build/android:build_java",
+      "//third_party/jni_zero:jni_zero_java",
     ]
   }
 
@@ -4782,7 +4903,7 @@
 
     deps = [
       ":base_java",
-      "//base:jni_java",
+      "//third_party/jni_zero:jni_zero_java",
     ]
 
     sources = [
@@ -4867,6 +4988,13 @@
   deps = [ "//base" ]
 }
 
+if (is_linux || is_chromeos) {
+  fuzzer_test("mime_util_xdg_fuzzer") {
+    sources = [ "nix/mime_util_xdg_fuzzer.cc" ]
+    deps = [ "//base" ]
+  }
+}
+
 fuzzer_test("string_number_conversions_fuzzer") {
   sources = [ "strings/string_number_conversions_fuzzer.cc" ]
   deps = [ "//base" ]
@@ -4919,39 +5047,39 @@
 }
 
 # TODO(1151236): Temporarily move test_support from
-# //base/allocator/partition_allocator/BUILD.gn to //base/BUILD.gn. After
+# //base/allocator/partition_allocator/src/partition_alloc/BUILD.gn to //base/BUILD.gn. After
 # fixing partition_allocator tests issue, we will move test_support to
-# //base/allocator/partition_allocator/BUILD.gn again.
+# //base/allocator/partition_allocator/src/partition_alloc/BUILD.gn again.
 source_set("partition_alloc_test_support") {
   testonly = true
 
   sources = [
-    "allocator/partition_allocator/extended_api.cc",
-    "allocator/partition_allocator/extended_api.h",
-    "allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h",
-    "allocator/partition_allocator/partition_alloc_for_testing.h",
-    "allocator/partition_allocator/pointers/raw_ptr_counting_impl_wrapper_for_test.h",
+    "allocator/partition_allocator/src/partition_alloc/extended_api.cc",
+    "allocator/partition_allocator/src/partition_alloc/extended_api.h",
+    "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h",
+    "allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h",
+    "allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_counting_impl_for_test.h",
   ]
   if (is_posix) {
-    sources += [ "allocator/partition_allocator/partition_alloc_base/threading/platform_thread_posix_for_testing.cc" ]
+    sources += [ "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix_for_testing.cc" ]
   }
   if (is_fuchsia) {
     sources += [
-      "allocator/partition_allocator/partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc",
-      "allocator/partition_allocator/partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc",
+      "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
     ]
   }
   if (is_win) {
-    sources += [ "allocator/partition_allocator/partition_alloc_base/threading/platform_thread_win_for_testing.cc" ]
+    sources += [ "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_win_for_testing.cc" ]
   }
   if (is_apple) {
-    sources += [ "allocator/partition_allocator/partition_alloc_base/threading/platform_thread_mac_for_testing.mm" ]
+    sources += [ "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_apple_for_testing.mm" ]
   }
   if (is_linux || is_chromeos) {
-    sources += [ "allocator/partition_allocator/partition_alloc_base/threading/platform_thread_linux_for_testing.cc" ]
+    sources += [ "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_linux_for_testing.cc" ]
   }
   if (is_android) {
-    sources += [ "allocator/partition_allocator/partition_alloc_base/threading/platform_thread_android_for_testing.cc" ]
+    sources += [ "allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_android_for_testing.cc" ]
   }
   public_deps = [
     "allocator/partition_allocator:buildflags",
@@ -4978,3 +5106,12 @@
     configs += [ "//build/config/compiler:optimize_speed" ]
   }
 }
+
+if (enable_base_tracing) {
+  # This group makes visible those targets in subdirectories that are not
+  # explicitly depended on.
+  group("gn_all") {
+    testonly = true
+    deps = [ "//base/tracing:perfetto_diff_tests" ]
+  }
+}
diff --git a/base/PRESUBMIT.py b/base/PRESUBMIT.py
index 8ee2005..8635a1c 100644
--- a/base/PRESUBMIT.py
+++ b/base/PRESUBMIT.py
@@ -36,7 +36,7 @@
         not "/test/" in f.LocalPath() and
         not f.LocalPath().endswith('.java') and
         not f.LocalPath().endswith('_unittest.mm') and
-        not f.LocalPath().endswith('mac/sdk_forward_declarations.h')):
+        not f.LocalPath().endswith('_spi.h')):
       contents = input_api.ReadFile(f)
       if pattern.search(contents):
         files.append(f)
diff --git a/base/allocator/BUILD.gn b/base/allocator/BUILD.gn
index 2c963b6..89e5e92 100644
--- a/base/allocator/BUILD.gn
+++ b/base/allocator/BUILD.gn
@@ -16,11 +16,11 @@
 }
 
 if (is_apple) {
-  source_set("early_zone_registration_mac") {
+  source_set("early_zone_registration_apple") {
     sources = [
-      "early_zone_registration_mac.cc",
-      "early_zone_registration_mac.h",
-      "partition_allocator/shim/early_zone_registration_constants.h",
+      "early_zone_registration_apple.cc",
+      "early_zone_registration_apple.h",
+      "partition_allocator/src/partition_alloc/shim/early_zone_registration_constants.h",
     ]
 
     deps = [
diff --git a/base/allocator/allocator_check.cc b/base/allocator/allocator_check.cc
index d05f8d8..f011797 100644
--- a/base/allocator/allocator_check.cc
+++ b/base/allocator/allocator_check.cc
@@ -4,11 +4,11 @@
 
 #include "base/allocator/allocator_check.h"
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "build/build_config.h"
 
 #if BUILDFLAG(IS_WIN)
-#include "base/allocator/partition_allocator/shim/winheap_stubs_win.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win.h"
 #endif
 
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
@@ -16,7 +16,7 @@
 #endif
 
 #if BUILDFLAG(IS_APPLE)
-#include "base/allocator/partition_allocator/shim/allocator_interception_mac.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h"
 #endif
 
 namespace base::allocator {
diff --git a/base/allocator/dispatcher/dispatcher.cc b/base/allocator/dispatcher/dispatcher.cc
index 616e2e8..4595034 100644
--- a/base/allocator/dispatcher/dispatcher.cc
+++ b/base/allocator/dispatcher/dispatcher.cc
@@ -5,8 +5,8 @@
 #include "base/allocator/dispatcher/dispatcher.h"
 
 #include "base/allocator/dispatcher/internal/dispatch_data.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
 #include "base/check.h"
 #include "base/dcheck_is_on.h"
 #include "base/no_destructor.h"
@@ -16,7 +16,7 @@
 #endif
 
 #if BUILDFLAG(USE_PARTITION_ALLOC)
-#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h"
 #endif
 
 namespace base::allocator::dispatcher {
diff --git a/base/allocator/dispatcher/dispatcher_unittest.cc b/base/allocator/dispatcher/dispatcher_unittest.cc
index 60f5227..176144f 100644
--- a/base/allocator/dispatcher/dispatcher_unittest.cc
+++ b/base/allocator/dispatcher/dispatcher_unittest.cc
@@ -2,22 +2,22 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/allocator/partition_allocator/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 #include "base/allocator/dispatcher/configuration.h"
 #include "base/allocator/dispatcher/dispatcher.h"
 #include "base/allocator/dispatcher/testing/dispatcher_test.h"
 #include "base/allocator/dispatcher/testing/tools.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "build/build_config.h"
 
 #if BUILDFLAG(USE_PARTITION_ALLOC)
-#include "base/allocator/partition_allocator/partition_alloc_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h"
 #endif
 
 #if BUILDFLAG(USE_ALLOCATOR_SHIM)
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
 #endif
 
 #include <tuple>
@@ -99,7 +99,7 @@
 // because it makes PartitionAlloc take a different path that doesn't provide
 // notifications to observer hooks.
 struct PartitionAllocator {
-  void* Alloc(size_t size) { return alloc_.AllocWithFlags(0, size, nullptr); }
+  void* Alloc(size_t size) { return alloc_.AllocInline(size); }
   void Free(void* data) { alloc_.Free(data); }
   ~PartitionAllocator() {
     // Use |DisallowLeaks| to confirm that there is no memory allocated and
diff --git a/base/allocator/dispatcher/initializer.h b/base/allocator/dispatcher/initializer.h
index 3179134..d911463 100644
--- a/base/allocator/dispatcher/initializer.h
+++ b/base/allocator/dispatcher/initializer.h
@@ -28,7 +28,7 @@
                          const VerifiedObservers& verified_observers,
                          const UnverifiedObservers& unverified_observers,
                          std::index_sequence<IndicesToSelect...> indices) {
-  if constexpr (CurrentIndex < std::tuple_size<UnverifiedObservers>::value) {
+  if constexpr (CurrentIndex < std::tuple_size_v<UnverifiedObservers>) {
     // We still have some items left to handle.
     if (check_observer(std::get<CurrentIndex>(unverified_observers))) {
       // The current observer is valid. Hence, append the index of the current
@@ -43,8 +43,7 @@
                                      verified_observers, unverified_observers,
                                      indices);
     }
-  } else if constexpr (CurrentIndex ==
-                       std::tuple_size<UnverifiedObservers>::value) {
+  } else if constexpr (CurrentIndex == std::tuple_size_v<UnverifiedObservers>) {
     // So we have met the end of the tuple of observers to verify.
     // Hence, we extract the additional valid observers, append to the tuple of
     // already verified observers and hand over to the dispatcher.
@@ -54,7 +53,7 @@
 
     // Do a final check that neither the maximum total number of observers nor
     // the maximum number of optional observers is exceeded.
-    static_assert(std::tuple_size<decltype(observers)>::value <=
+    static_assert(std::tuple_size_v<decltype(observers)> <=
                   configuration::kMaximumNumberOfObservers);
     static_assert(sizeof...(IndicesToSelect) <=
                   configuration::kMaximumNumberOfOptionalObservers);
@@ -102,7 +101,7 @@
   template <typename... NewMandatoryObservers,
             std::enable_if_t<
                 internal::LessEqual((sizeof...(NewMandatoryObservers) +
-                                     std::tuple_size<OptionalObservers>::value),
+                                     std::tuple_size_v<OptionalObservers>),
                                     configuration::kMaximumNumberOfObservers),
                 bool> = true>
   Initializer<std::tuple<NewMandatoryObservers*...>, OptionalObservers>
@@ -115,9 +114,9 @@
   // configuration::maximum_number_of_observers.
   template <typename... AdditionalMandatoryObservers,
             std::enable_if_t<internal::LessEqual(
-                                 std::tuple_size<MandatoryObservers>::value +
+                                 std::tuple_size_v<MandatoryObservers> +
                                      sizeof...(AdditionalMandatoryObservers) +
-                                     std::tuple_size<OptionalObservers>::value,
+                                     std::tuple_size_v<OptionalObservers>,
                                  configuration::kMaximumNumberOfObservers),
                              bool> = true>
   Initializer<TupleCat<MandatoryObservers,
@@ -140,7 +139,7 @@
               sizeof...(NewOptionalObservers),
               configuration::kMaximumNumberOfOptionalObservers) &&
               internal::LessEqual((sizeof...(NewOptionalObservers) +
-                                   std::tuple_size<MandatoryObservers>::value),
+                                   std::tuple_size_v<MandatoryObservers>),
                                   configuration::kMaximumNumberOfObservers),
           bool> = true>
   Initializer<MandatoryObservers, std::tuple<NewOptionalObservers*...>>
@@ -156,12 +155,12 @@
       typename... AdditionalOptionalObservers,
       std::enable_if_t<
           internal::LessEqual(
-              std::tuple_size<OptionalObservers>::value +
+              std::tuple_size_v<OptionalObservers> +
                   sizeof...(AdditionalOptionalObservers),
               configuration::kMaximumNumberOfOptionalObservers) &&
-              internal::LessEqual((std::tuple_size<OptionalObservers>::value +
+              internal::LessEqual((std::tuple_size_v<OptionalObservers> +
                                    sizeof...(AdditionalOptionalObservers) +
-                                   std::tuple_size<MandatoryObservers>::value),
+                                   std::tuple_size_v<MandatoryObservers>),
                                   configuration::kMaximumNumberOfObservers),
           bool> = true>
   Initializer<
@@ -203,4 +202,4 @@
 
 }  // namespace base::allocator::dispatcher
 
-#endif  // BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
\ No newline at end of file
+#endif  // BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
diff --git a/base/allocator/dispatcher/internal/dispatch_data.cc b/base/allocator/dispatcher/internal/dispatch_data.cc
index 2b6b7f5..eefe3f1 100644
--- a/base/allocator/dispatcher/internal/dispatch_data.cc
+++ b/base/allocator/dispatcher/internal/dispatch_data.cc
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 #include "base/allocator/dispatcher/internal/dispatch_data.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 
 namespace base::allocator::dispatcher::internal {
 
diff --git a/base/allocator/dispatcher/internal/dispatch_data.h b/base/allocator/dispatcher/internal/dispatch_data.h
index c0fc38d..cad5be9 100644
--- a/base/allocator/dispatcher/internal/dispatch_data.h
+++ b/base/allocator/dispatcher/internal/dispatch_data.h
@@ -5,16 +5,16 @@
 #ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
 #define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/base_export.h"
 #include "build/build_config.h"
 
 #if BUILDFLAG(USE_PARTITION_ALLOC)
-#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h"
 #endif
 
 #if BUILDFLAG(USE_ALLOCATOR_SHIM)
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
 #endif
 
 namespace base::allocator::dispatcher::internal {
diff --git a/base/allocator/dispatcher/internal/dispatcher_internal.h b/base/allocator/dispatcher/internal/dispatcher_internal.h
index 34fe226..21f25f8 100644
--- a/base/allocator/dispatcher/internal/dispatcher_internal.h
+++ b/base/allocator/dispatcher/internal/dispatcher_internal.h
@@ -9,14 +9,14 @@
 #include "base/allocator/dispatcher/internal/dispatch_data.h"
 #include "base/allocator/dispatcher/internal/tools.h"
 #include "base/allocator/dispatcher/subsystem.h"
-#include "base/allocator/partition_allocator/partition_alloc_allocation_data.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_allocation_data.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/check.h"
 #include "base/compiler_specific.h"
 #include "build/build_config.h"
 
 #if BUILDFLAG(USE_ALLOCATOR_SHIM)
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
 #endif
 
 #include <tuple>
diff --git a/base/allocator/dispatcher/internal/dispatcher_internal_unittest.cc b/base/allocator/dispatcher/internal/dispatcher_internal_unittest.cc
index 3ae639d..2fee04d 100644
--- a/base/allocator/dispatcher/internal/dispatcher_internal_unittest.cc
+++ b/base/allocator/dispatcher/internal/dispatcher_internal_unittest.cc
@@ -6,7 +6,7 @@
 #include "base/allocator/dispatcher/testing/dispatcher_test.h"
 #include "base/allocator/dispatcher/testing/observer_mock.h"
 #include "base/allocator/dispatcher/testing/tools.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/dcheck_is_on.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
diff --git a/base/allocator/dispatcher/tls.cc b/base/allocator/dispatcher/tls.cc
index 6524810..9cad125 100644
--- a/base/allocator/dispatcher/tls.cc
+++ b/base/allocator/dispatcher/tls.cc
@@ -8,6 +8,7 @@
 
 #include "base/check.h"
 #include "base/dcheck_is_on.h"
+#include "base/debug/crash_logging.h"
 #include "base/immediate_crash.h"
 #include "build/build_config.h"
 
@@ -18,6 +19,32 @@
 #endif
 
 namespace base::allocator::dispatcher::internal {
+namespace {
+base::debug::CrashKeySize GetCrashKeySize(const std::string& crash_key_name) {
+  if (std::size(crash_key_name) <= 32ul) {
+    return base::debug::CrashKeySize::Size32;
+  }
+  if (std::size(crash_key_name) <= 64ul) {
+    return base::debug::CrashKeySize::Size64;
+  }
+  if (std::size(crash_key_name) <= 256ul) {
+    return base::debug::CrashKeySize::Size256;
+  }
+  CHECK(std::size(crash_key_name) <= 1024ul);
+
+  return base::debug::CrashKeySize::Size1024;
+}
+
+#if DCHECK_IS_ON()
+void Swap(std::atomic_bool& lh_op, std::atomic_bool& rh_op) {
+  auto lh_op_value = lh_op.load(std::memory_order_relaxed);
+  auto rh_op_value = rh_op.load(std::memory_order_relaxed);
+
+  CHECK(lh_op.compare_exchange_strong(lh_op_value, rh_op_value));
+  CHECK(rh_op.compare_exchange_strong(rh_op_value, lh_op_value));
+}
+#endif
+}  // namespace
 
 void* MMapAllocator::AllocateMemory(size_t size_in_bytes) {
   void* const mmap_res = mmap(nullptr, size_in_bytes, PROT_READ | PROT_WRITE,
@@ -43,8 +70,31 @@
   return (munmap_res == 0);
 }
 
+PThreadTLSSystem::PThreadTLSSystem() = default;
+
+PThreadTLSSystem::PThreadTLSSystem(PThreadTLSSystem&& other) {
+  std::swap(crash_key_, other.crash_key_);
+  std::swap(data_access_key_, other.data_access_key_);
+
+#if DCHECK_IS_ON()
+  Swap(initialized_, other.initialized_);
+#endif
+}
+
+PThreadTLSSystem& PThreadTLSSystem::operator=(PThreadTLSSystem&& other) {
+  std::swap(crash_key_, other.crash_key_);
+  std::swap(data_access_key_, other.data_access_key_);
+
+#if DCHECK_IS_ON()
+  Swap(initialized_, other.initialized_);
+#endif
+
+  return *this;
+}
+
 bool PThreadTLSSystem::Setup(
-    OnThreadTerminationFunction thread_termination_function) {
+    OnThreadTerminationFunction thread_termination_function,
+    const base::StringPiece instance_id) {
 #if DCHECK_IS_ON()
   // Initialize must happen outside of the allocation path. Therefore, it is
   // secure to verify with DCHECK.
@@ -61,6 +111,18 @@
   // However, we strongly recommend to setup the TLS system as early as possible
   // to avoid exceeding this limit.
 
+  // Some crashes might be caused by the initialization being performed too late
+  // and running into the problems mentioned above. Since there's no way to
+  // handle this issue programmatically, we include the key into the crashpad
+  // report to allow for later inspection.
+  std::string crash_key_name = "tls_system-";
+  crash_key_name += instance_id;
+
+  crash_key_ = base::debug::AllocateCrashKeyString(
+      crash_key_name.c_str(), GetCrashKeySize(crash_key_name));
+  base::debug::SetCrashKeyString(crash_key_,
+                                 base::NumberToString(data_access_key_));
+
   return (0 == key_create_res);
 }
 
@@ -71,6 +133,9 @@
   DCHECK(initialized_.exchange(false, std::memory_order_acq_rel));
 #endif
 
+  base::debug::ClearCrashKeyString(crash_key_);
+  crash_key_ = nullptr;
+
   auto const key_delete_res = pthread_key_delete(data_access_key_);
   return (0 == key_delete_res);
 }
diff --git a/base/allocator/dispatcher/tls.h b/base/allocator/dispatcher/tls.h
index 9d49187..a31c614 100644
--- a/base/allocator/dispatcher/tls.h
+++ b/base/allocator/dispatcher/tls.h
@@ -20,10 +20,11 @@
 #include <memory>
 #include <mutex>
 
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
 #include "base/base_export.h"
 #include "base/check.h"
 #include "base/compiler_specific.h"
+#include "base/strings/string_piece.h"
 
 #include <pthread.h>
 
@@ -33,6 +34,29 @@
 #define DISABLE_TSAN_INSTRUMENTATION
 #endif
 
+#define STR_HELPER(x) #x
+#define STR(x) STR_HELPER(x)
+
+// Verify that a condition holds and cancel the process in case it doesn't. The
+// functionality is similar to RAW_CHECK but includes more information in the
+// logged messages. It is non allocating to prevent recursions.
+#define TLS_RAW_CHECK(error_message, condition) \
+  TLS_RAW_CHECK_IMPL(error_message, condition, __FILE__, __LINE__)
+
+#define TLS_RAW_CHECK_IMPL(error_message, condition, file, line)        \
+  do {                                                                  \
+    if (!(condition)) {                                                 \
+      constexpr const char* message =                                   \
+          "TLS System: " error_message " Failed condition '" #condition \
+          "' in (" file "@" STR(line) ").\n";                           \
+      ::logging::RawCheckFailure(message);                              \
+    }                                                                   \
+  } while (0)
+
+namespace base::debug {
+struct CrashKeyString;
+}
+
 namespace base::allocator::dispatcher {
 namespace internal {
 
@@ -71,11 +95,20 @@
 
 // The TLS system used by default for the thread local storage. It stores and
 // retrieves thread specific data pointers.
-struct BASE_EXPORT PThreadTLSSystem {
+class BASE_EXPORT PThreadTLSSystem {
+ public:
+  PThreadTLSSystem();
+
+  PThreadTLSSystem(const PThreadTLSSystem&) = delete;
+  PThreadTLSSystem(PThreadTLSSystem&&);
+  PThreadTLSSystem& operator=(const PThreadTLSSystem&) = delete;
+  PThreadTLSSystem& operator=(PThreadTLSSystem&&);
+
   // Initialize the TLS system to store a data set for different threads.
   // @param thread_termination_function An optional function which will be
   // invoked upon termination of a thread.
-  bool Setup(OnThreadTerminationFunction thread_termination_function);
+  bool Setup(OnThreadTerminationFunction thread_termination_function,
+             const base::StringPiece instance_id);
   // Tear down the TLS system. After completing tear down, the thread
   // termination function passed to Setup will not be invoked anymore.
   bool TearDownForTesting();
@@ -88,6 +121,7 @@
   bool SetThreadSpecificData(void* data);
 
  private:
+  base::debug::CrashKeyString* crash_key_ = nullptr;
   pthread_key_t data_access_key_ = 0;
 #if DCHECK_IS_ON()
   // From POSIX standard at https://www.open-std.org/jtc1/sc22/open/n4217.pdf:
@@ -162,16 +196,21 @@
           size_t AllocationChunkSize,
           bool IsDestructibleForTesting>
 struct ThreadLocalStorage {
-  ThreadLocalStorage() : root_(AllocateAndInitializeChunk()) { Initialize(); }
+  explicit ThreadLocalStorage(const base::StringPiece instance_id)
+      : root_(AllocateAndInitializeChunk()) {
+    Initialize(instance_id);
+  }
 
   // Create a new instance of |ThreadLocalStorage| using the passed allocator
   // and TLS system. This initializes the underlying TLS system and creates the
   // first chunk of data.
-  ThreadLocalStorage(AllocatorType allocator, TLSSystemType tlsSystem)
+  ThreadLocalStorage(const base::StringPiece instance_id,
+                     AllocatorType allocator,
+                     TLSSystemType tls_system)
       : allocator_(std::move(allocator)),
-        tls_system_(std::move(tlsSystem)),
+        tls_system_(std::move(tls_system)),
         root_(AllocateAndInitializeChunk()) {
-    Initialize();
+    Initialize(instance_id);
   }
 
   // Deletes an instance of |ThreadLocalStorage| and delete all the data chunks
@@ -207,7 +246,8 @@
 
       // We might be called in the course of handling a memory allocation. We do
       // not use CHECK since they might allocate and cause a recursion.
-      RAW_CHECK(tls_system.SetThreadSpecificData(slot));
+      TLS_RAW_CHECK("Failed to set thread specific data.",
+                    tls_system.SetThreadSpecificData(slot));
 
       // Reset the content to wipe out any previous data.
       Reset(slot->item);
@@ -307,22 +347,24 @@
     // SingleSlot and reset the is_used flag.
     auto* const slot = static_cast<SingleSlot*>(data);
 
-    // We might be called in the course of handling a memory allocation. We do
-    // not use CHECK since they might allocate and cause a recursion.
-    RAW_CHECK(slot && slot->is_used.test_and_set());
+    // We might be called in the course of handling a memory allocation.
+    // Therefore, do not use CHECK since it might allocate and cause a
+    // recursion.
+    TLS_RAW_CHECK("Received an invalid slot.",
+                  slot && slot->is_used.test_and_set());
 
     slot->is_used.clear(std::memory_order_relaxed);
   }
 
   // Perform common initialization during construction of an instance.
-  void Initialize() {
+  void Initialize(const base::StringPiece instance_id) {
     // The constructor must be called outside of the allocation path. Therefore,
     // it is secure to verify with CHECK.
 
     // Passing MarkSlotAsFree as thread_termination_function we ensure the
     // slot/item assigned to the finished thread will be returned to the pool of
     // unused items.
-    CHECK(dereference(tls_system_).Setup(&MarkSlotAsFree));
+    CHECK(dereference(tls_system_).Setup(&MarkSlotAsFree, instance_id));
   }
 
   Chunk* AllocateAndInitializeChunk() {
@@ -331,7 +373,8 @@
 
     // We might be called in the course of handling a memory allocation. We do
     // not use CHECK since they might allocate and cause a recursion.
-    RAW_CHECK(uninitialized_memory != nullptr);
+    TLS_RAW_CHECK("Failed to allocate memory for new chunk.",
+                  uninitialized_memory != nullptr);
 
     return new (uninitialized_memory) Chunk{};
   }
@@ -428,5 +471,10 @@
 
 }  // namespace base::allocator::dispatcher
 
+#undef TLS_RAW_CHECK_IMPL
+#undef TLS_RAW_CHECK
+#undef STR
+#undef STR_HELPER
+
 #endif  // USE_LOCAL_TLS_EMULATION()
 #endif  // BASE_ALLOCATOR_DISPATCHER_TLS_H_
diff --git a/base/allocator/dispatcher/tls_unittest.cc b/base/allocator/dispatcher/tls_unittest.cc
index ccbe08a..720ce0f 100644
--- a/base/allocator/dispatcher/tls_unittest.cc
+++ b/base/allocator/dispatcher/tls_unittest.cc
@@ -9,11 +9,16 @@
 #include <array>
 #include <cstddef>
 #include <functional>
+#include <list>
 #include <mutex>
 #include <thread>
 #include <unordered_set>
 #include <utility>
+#include <vector>
 
+#include "base/debug/crash_logging.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -54,7 +59,7 @@
 
 struct TLSSystemMockBase {
   TLSSystemMockBase() {
-    ON_CALL(*this, Setup(_)).WillByDefault(Return(true));
+    ON_CALL(*this, Setup(_, _)).WillByDefault(Return(true));
     ON_CALL(*this, TearDownForTesting()).WillByDefault(Return(true));
     ON_CALL(*this, SetThreadSpecificData(_)).WillByDefault(Return(true));
   }
@@ -62,34 +67,48 @@
   MOCK_METHOD(
       bool,
       Setup,
-      (internal::OnThreadTerminationFunction thread_termination_function),
+      (internal::OnThreadTerminationFunction thread_termination_function,
+       const base::StringPiece instance_id),
       ());
   MOCK_METHOD(bool, TearDownForTesting, (), ());
   MOCK_METHOD(void*, GetThreadSpecificData, (), ());
   MOCK_METHOD(bool, SetThreadSpecificData, (void* data), ());
 };
 
+struct CrashKeyImplementationMockBase
+    : public base::debug::CrashKeyImplementation {
+  MOCK_METHOD(base::debug::CrashKeyString*,
+              Allocate,
+              (const char name[], base::debug::CrashKeySize size),
+              (override));
+  MOCK_METHOD(void,
+              Set,
+              (base::debug::CrashKeyString * crash_key,
+               base::StringPiece value),
+              (override));
+  MOCK_METHOD(void,
+              Clear,
+              (base::debug::CrashKeyString * crash_key),
+              (override));
+  MOCK_METHOD(void, OutputCrashKeysToStream, (std::ostream & out), (override));
+};
+
 using AllocatorMock = NiceMock<AllocatorMockBase>;
 using TLSSystemMock = NiceMock<TLSSystemMockBase>;
+using CrashKeyImplementationMock = NiceMock<CrashKeyImplementationMockBase>;
 
 template <typename T, typename Allocator, typename TLSSystem>
-ThreadLocalStorage<T,
-                   std::reference_wrapper<Allocator>,
-                   std::reference_wrapper<TLSSystem>,
-                   0,
-                   true>
-CreateThreadLocalStorage(Allocator& allocator, TLSSystem& tlsSystem) {
-  return {std::ref(allocator), std::ref(tlsSystem)};
+auto CreateThreadLocalStorage(Allocator& allocator, TLSSystem& tlsSystem) {
+  return ThreadLocalStorage<T, std::reference_wrapper<Allocator>,
+                            std::reference_wrapper<TLSSystem>, 0, true>(
+      "ThreadLocalStorage", std::ref(allocator), std::ref(tlsSystem));
 }
 
 template <typename T>
-ThreadLocalStorage<T,
-                   internal::DefaultAllocator,
-                   internal::DefaultTLSSystem,
-                   0,
-                   true>
-CreateThreadLocalStorage() {
-  return {};
+auto CreateThreadLocalStorage() {
+  return ThreadLocalStorage<T, internal::DefaultAllocator,
+                            internal::DefaultTLSSystem, 0, true>(
+      "ThreadLocalStorage");
 }
 
 }  // namespace
@@ -200,7 +219,7 @@
 
   EXPECT_CALL(allocator_mock, AllocateMemory(_))
       .WillOnce([](size_t size_in_bytes) { return malloc(size_in_bytes); });
-  EXPECT_CALL(tlsSystem_mock, Setup(NotNull())).WillOnce(Return(true));
+  EXPECT_CALL(tlsSystem_mock, Setup(NotNull(), _)).WillOnce(Return(true));
   EXPECT_CALL(tlsSystem_mock, TearDownForTesting()).WillOnce(Return(true));
   EXPECT_CALL(allocator_mock, FreeMemoryForTesting(_, _))
       .WillOnce([](void* pointer_to_allocated, size_t size_in_bytes) {
@@ -303,7 +322,7 @@
 
   InSequence execution_sequence;
 
-  EXPECT_CALL(tlsSystem_mock, Setup(NotNull())).WillOnce(Return(true));
+  EXPECT_CALL(tlsSystem_mock, Setup(NotNull(), _)).WillOnce(Return(true));
   EXPECT_CALL(tlsSystem_mock, GetThreadSpecificData())
       .WillOnce(Return(nullptr));
   EXPECT_CALL(tlsSystem_mock, SetThreadSpecificData(NotNull()));
@@ -361,7 +380,7 @@
 
     // Setup all expectations here. If we're setting them up in the parent
     // process, they will fail because the parent doesn't execute any test.
-    EXPECT_CALL(tlsSystem_mock, Setup(_)).WillOnce(Return(false));
+    EXPECT_CALL(tlsSystem_mock, Setup(_, _)).WillOnce(Return(false));
     EXPECT_CALL(tlsSystem_mock, GetThreadSpecificData()).Times(0);
     EXPECT_CALL(tlsSystem_mock, SetThreadSpecificData(_)).Times(0);
     EXPECT_CALL(tlsSystem_mock, TearDownForTesting()).Times(0);
@@ -398,7 +417,7 @@
 
     // Setup all expectations here. If we're setting them up in the parent
     // process, they will fail because the parent doesn't execute any test.
-    EXPECT_CALL(tlsSystem_mock, Setup(_)).WillOnce(Return(true));
+    EXPECT_CALL(tlsSystem_mock, Setup(_, _)).WillOnce(Return(true));
     EXPECT_CALL(tlsSystem_mock, TearDownForTesting()).WillOnce(Return(false));
 
     CreateThreadLocalStorage<DataToStore>(allocator_mock, tlsSystem_mock);
@@ -422,17 +441,72 @@
 TEST_F(BasePThreadTLSSystemTest, VerifySetupNTeardownSequence) {
   internal::PThreadTLSSystem sut;
 
-  for (size_t idx = 0; idx < 5; ++idx) {
-    EXPECT_TRUE(sut.Setup(nullptr));
-    EXPECT_TRUE(sut.TearDownForTesting());
+  const std::vector<std::string> instance_ids = {"first", "second"};
+  const std::vector<std::string> crash_key_values = {"tls_system-first",
+                                                     "tls_system-second"};
+  std::list<base::debug::CrashKeyString> crash_keys;
+
+  const auto handle_allocate =
+      [&](const char* name,
+          base::debug::CrashKeySize size) -> base::debug::CrashKeyString* {
+    const auto it_crash_key_value = std::find(std::begin(crash_key_values),
+                                              std::end(crash_key_values), name);
+    EXPECT_NE(it_crash_key_value, std::end(crash_key_values));
+    crash_keys.emplace_back(it_crash_key_value->c_str(), size);
+    return &(crash_keys.back());
+  };
+
+  const auto handle_set = [&](base::debug::CrashKeyString* crash_key,
+                              base::StringPiece value) {
+    const auto it_crash_key =
+        std::find_if(std::begin(crash_keys), std::end(crash_keys),
+                     [&](const base::debug::CrashKeyString& key) {
+                       return &key == crash_key;
+                     });
+    ASSERT_NE(it_crash_key, std::end(crash_keys));
+    ASSERT_GT(value.size(), 0ul);
+  };
+
+  const auto handle_clear = [&](base::debug::CrashKeyString* crash_key) {
+    const auto it_crash_key =
+        std::find_if(std::begin(crash_keys), std::end(crash_keys),
+                     [&](const base::debug::CrashKeyString& key) {
+                       return &key == crash_key;
+                     });
+    ASSERT_NE(it_crash_key, std::end(crash_keys));
+
+    crash_keys.erase(it_crash_key);
+  };
+
+  auto crash_key_implementation =
+      std::make_unique<CrashKeyImplementationMock>();
+
+  InSequence execution_sequence;
+
+  for (size_t instance_index = 0; instance_index < instance_ids.size();
+       ++instance_index) {
+    EXPECT_CALL(*crash_key_implementation, Allocate(_, _))
+        .WillOnce(handle_allocate);
+    EXPECT_CALL(*crash_key_implementation, Set(_, _)).WillOnce(handle_set);
+    EXPECT_CALL(*crash_key_implementation, Clear(_)).WillOnce(handle_clear);
   }
+
+  base::debug::SetCrashKeyImplementation(std::move(crash_key_implementation));
+
+  for (const auto& instance_id : instance_ids) {
+    ASSERT_TRUE(sut.Setup(nullptr, instance_id));
+    ASSERT_TRUE(sut.TearDownForTesting());
+  }
+
+  // Set an empty crash key impl to ensure deletion and evaluation of the mock.
+  base::debug::SetCrashKeyImplementation({});
 }
 
 TEST_F(BasePThreadTLSSystemTest, VerifyThreadTerminationFunctionIsCalled) {
   std::array<std::thread, 10> threads;
 
   internal::PThreadTLSSystem sut;
-  sut.Setup(&ThreadTerminationFunction);
+  sut.Setup(&ThreadTerminationFunction, "PThreadTLSSystemInstance");
 
   for (auto& t : threads) {
     t = std::thread{[&] {
@@ -452,7 +526,7 @@
 
 TEST_F(BasePThreadTLSSystemTest, VerifyGetWithoutSetReturnsNull) {
   internal::PThreadTLSSystem sut;
-  sut.Setup(nullptr);
+  sut.Setup(nullptr, "PThreadTLSSystemInstance");
 
   EXPECT_EQ(nullptr, sut.GetThreadSpecificData());
 
@@ -461,7 +535,7 @@
 
 TEST_F(BasePThreadTLSSystemTest, VerifyGetAfterTeardownReturnsNull) {
   internal::PThreadTLSSystem sut;
-  sut.Setup(nullptr);
+  sut.Setup(nullptr, "PThreadTLSSystemInstance");
   sut.SetThreadSpecificData(this);
   sut.TearDownForTesting();
 
@@ -480,7 +554,7 @@
   std::atomic_bool threads_can_finish{false};
 
   internal::PThreadTLSSystem sut;
-  ASSERT_TRUE(sut.Setup(nullptr));
+  ASSERT_TRUE(sut.Setup(nullptr, "PThreadTLSSystemInstance"));
 
   for (auto& t : threads) {
     t = std::thread{[&] {
@@ -536,7 +610,7 @@
   std::array<std::thread, 50> threads;
 
   internal::PThreadTLSSystem sut;
-  sut.Setup(nullptr);
+  sut.Setup(nullptr, "PThreadTLSSystemInstance");
 
   for (auto& t : threads) {
     t = std::thread{[&] {
@@ -573,8 +647,8 @@
 TEST_F(BasePThreadTLSSystemDeathTest, VerifyDeathIfSetupTwice) {
   internal::PThreadTLSSystem sut;
 
-  EXPECT_TRUE(sut.Setup(nullptr));
-  EXPECT_DEATH(sut.Setup(nullptr), "");
+  EXPECT_TRUE(sut.Setup(nullptr, "PThreadTLSSystemInstance"));
+  EXPECT_DEATH(sut.Setup(nullptr, "PThreadTLSSystemInstance"), "");
 }
 
 TEST_F(BasePThreadTLSSystemDeathTest, VerifyDeathIfTearDownWithoutSetup) {
diff --git a/base/allocator/early_zone_registration_apple.cc b/base/allocator/early_zone_registration_apple.cc
new file mode 100644
index 0000000..b182cf6
--- /dev/null
+++ b/base/allocator/early_zone_registration_apple.cc
@@ -0,0 +1,266 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/early_zone_registration_apple.h"
+
+#include <mach/mach.h>
+#include <malloc/malloc.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/early_zone_registration_constants.h"
+
+// BASE_EXPORT tends to be defined as soon as anything from //base is included.
+#if defined(BASE_EXPORT)
+#error "This file cannot depend on //base"
+#endif
+
+namespace partition_alloc {
+
+#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+void EarlyMallocZoneRegistration() {}
+void AllowDoublePartitionAllocZoneRegistration() {}
+
+#else
+
+extern "C" {
+// abort_report_np() records the message in a special section that both the
+// system CrashReporter and Crashpad collect in crash reports. See also in
+// chrome_exe_main_mac.cc.
+void abort_report_np(const char* fmt, ...);
+}
+
+namespace {
+
+malloc_zone_t* GetDefaultMallocZone() {
+  // malloc_default_zone() does not return... the default zone, but the
+  // initial one. The default one is the first element of the default zone
+  // array.
+  unsigned int zone_count = 0;
+  vm_address_t* zones = nullptr;
+  kern_return_t result = malloc_get_all_zones(
+      mach_task_self(), /*reader=*/nullptr, &zones, &zone_count);
+  if (result != KERN_SUCCESS) {
+    abort_report_np("Cannot enumerate malloc() zones");
+  }
+  return reinterpret_cast<malloc_zone_t*>(zones[0]);
+}
+
+}  // namespace
+
+void EarlyMallocZoneRegistration() {
+  // Must have static storage duration, as raw pointers are passed to
+  // libsystem_malloc.
+  static malloc_zone_t g_delegating_zone;
+  static malloc_introspection_t g_delegating_zone_introspect;
+  static malloc_zone_t* g_default_zone;
+
+  // Make sure that the default zone is instantiated.
+  malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
+
+  g_default_zone = GetDefaultMallocZone();
+
+  // The delegating zone:
+  // - Forwards all allocations to the existing default zone
+  // - Does *not* claim to own any memory, meaning that it will always be
+  //   skipped in free() in libsystem_malloc.dylib.
+  //
+  // This is a temporary zone, until it gets replaced by PartitionAlloc, inside
+  // the main library. Since the main library depends on many external
+  // libraries, we cannot install PartitionAlloc as the default zone without
+  // concurrency issues.
+  //
+  // Instead, what we do is here, while the process is single-threaded:
+  // - Register the delegating zone as the default one.
+  // - Set the original (libsystem_malloc's) one as the second zone
+  //
+  // Later, when PartitionAlloc initializes, we replace the default (delegating)
+  // zone with ours. The end state is:
+  // 1. PartitionAlloc zone
+  // 2. libsystem_malloc zone
+
+  // Set up of the delegating zone. Note that it doesn't just forward calls to
+  // the default zone. This is because the system zone's malloc_zone_t pointer
+  // actually points to a larger struct, containing allocator metadata. So if we
+  // pass as the first parameter the "simple" delegating zone pointer, then we
+  // immediately crash inside the system zone functions. So we need to replace
+  // the zone pointer as well.
+  //
+  // Calls fall into 4 categories:
+  // - Allocation calls: forwarded to the real system zone
+  // - "Is this pointer yours" calls: always answer no
+  // - free(): Should never be called, but is in practice, see comments below.
+  // - Diagnostics and debugging: these are typically called for every
+  //   zone. They are no-ops for us, as we don't want to double-count, or lock
+  //   the data structures of the real zone twice.
+
+  // Allocation: Forward to the real zone.
+  g_delegating_zone.malloc = [](malloc_zone_t* zone, size_t size) {
+    return g_default_zone->malloc(g_default_zone, size);
+  };
+  g_delegating_zone.calloc = [](malloc_zone_t* zone, size_t num_items,
+                                size_t size) {
+    return g_default_zone->calloc(g_default_zone, num_items, size);
+  };
+  g_delegating_zone.valloc = [](malloc_zone_t* zone, size_t size) {
+    return g_default_zone->valloc(g_default_zone, size);
+  };
+  g_delegating_zone.realloc = [](malloc_zone_t* zone, void* ptr, size_t size) {
+    return g_default_zone->realloc(g_default_zone, ptr, size);
+  };
+  g_delegating_zone.batch_malloc = [](malloc_zone_t* zone, size_t size,
+                                      void** results, unsigned num_requested) {
+    return g_default_zone->batch_malloc(g_default_zone, size, results,
+                                        num_requested);
+  };
+  g_delegating_zone.memalign = [](malloc_zone_t* zone, size_t alignment,
+                                  size_t size) {
+    return g_default_zone->memalign(g_default_zone, alignment, size);
+  };
+
+  // Does ptr belong to this zone? Return value is != 0 if so.
+  g_delegating_zone.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
+    return 0;
+  };
+
+  // Free functions.
+  // The normal path for freeing memory is:
+  // 1. Try all zones in order, call zone->size(ptr)
+  // 2. If zone->size(ptr) != 0, call zone->free(ptr) (or free_definite_size)
+  // 3. If no zone matches, crash.
+  //
+  // Since this zone always returns 0 in size() (see above), then zone->free()
+  // should never be called. Unfortunately, this is not the case, as some places
+  // in CoreFoundation call malloc_zone_free(zone, ptr) directly. So rather than
+  // crashing, forward the call. It's the caller's responsibility to use the
+  // same zone for free() as for the allocation (this is in the contract of
+  // malloc_zone_free()).
+  //
+  // However, note that the sequence of calls size() -> free() is not possible
+  // for this zone, as size() always returns 0.
+  g_delegating_zone.free = [](malloc_zone_t* zone, void* ptr) {
+    return g_default_zone->free(g_default_zone, ptr);
+  };
+  g_delegating_zone.free_definite_size = [](malloc_zone_t* zone, void* ptr,
+                                            size_t size) {
+    return g_default_zone->free_definite_size(g_default_zone, ptr, size);
+  };
+  g_delegating_zone.batch_free = [](malloc_zone_t* zone, void** to_be_freed,
+                                    unsigned num_to_be_freed) {
+    return g_default_zone->batch_free(g_default_zone, to_be_freed,
+                                      num_to_be_freed);
+  };
+#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
+  g_delegating_zone.try_free_default = [](malloc_zone_t* zone, void* ptr) {
+    return g_default_zone->try_free_default(g_default_zone, ptr);
+  };
+#endif
+
+  // Diagnostics and debugging.
+  //
+  // Do nothing to reduce memory footprint, the real
+  // zone will do it.
+  g_delegating_zone.pressure_relief = [](malloc_zone_t* zone,
+                                         size_t goal) -> size_t { return 0; };
+
+  // Introspection calls are not all optional, for instance locking and
+  // unlocking before/after fork() is not optional.
+  //
+  // Nothing to enumerate.
+  g_delegating_zone_introspect.enumerator =
+      [](task_t task, void*, unsigned type_mask, vm_address_t zone_address,
+         memory_reader_t reader,
+         vm_range_recorder_t recorder) -> kern_return_t {
+    return KERN_SUCCESS;
+  };
+  // Need to provide a real implementation, it is used for e.g. array sizing.
+  g_delegating_zone_introspect.good_size = [](malloc_zone_t* zone,
+                                              size_t size) {
+    return g_default_zone->introspect->good_size(g_default_zone, size);
+  };
+  // Nothing to do.
+  g_delegating_zone_introspect.check = [](malloc_zone_t* zone) -> boolean_t {
+    return true;
+  };
+  g_delegating_zone_introspect.print = [](malloc_zone_t* zone,
+                                          boolean_t verbose) {};
+  g_delegating_zone_introspect.log = [](malloc_zone_t*, void*) {};
+  // Do not forward the lock / unlock calls. Since the default zone is still
+  // there, we should not lock here, as it would lock the zone twice (all
+  // zones are locked before fork().). Rather, do nothing, since this fake
+  // zone does not need any locking.
+  g_delegating_zone_introspect.force_lock = [](malloc_zone_t* zone) {};
+  g_delegating_zone_introspect.force_unlock = [](malloc_zone_t* zone) {};
+  g_delegating_zone_introspect.reinit_lock = [](malloc_zone_t* zone) {};
+  // No stats.
+  g_delegating_zone_introspect.statistics = [](malloc_zone_t* zone,
+                                               malloc_statistics_t* stats) {};
+  // We are not locked.
+  g_delegating_zone_introspect.zone_locked =
+      [](malloc_zone_t* zone) -> boolean_t { return false; };
+  // Don't support discharge checking.
+  g_delegating_zone_introspect.enable_discharge_checking =
+      [](malloc_zone_t* zone) -> boolean_t { return false; };
+  g_delegating_zone_introspect.disable_discharge_checking =
+      [](malloc_zone_t* zone) {};
+  g_delegating_zone_introspect.discharge = [](malloc_zone_t* zone,
+                                              void* memory) {};
+
+  // Could use something lower to support fewer functions, but this is
+  // consistent with the real zone installed by PartitionAlloc.
+  g_delegating_zone.version = allocator_shim::kZoneVersion;
+  g_delegating_zone.introspect = &g_delegating_zone_introspect;
+  // This name is used in PartitionAlloc's initialization to determine whether
+  // it should replace the delegating zone.
+  g_delegating_zone.zone_name = allocator_shim::kDelegatingZoneName;
+
+  // Register puts the new zone at the end, unregister swaps the new zone with
+  // the last one.
+  // The zone array is, after these lines, in order:
+  // 1. |g_default_zone|...|g_delegating_zone|
+  // 2. |g_delegating_zone|...|  (no more default)
+  // 3. |g_delegating_zone|...|g_default_zone|
+  malloc_zone_register(&g_delegating_zone);
+  malloc_zone_unregister(g_default_zone);
+  malloc_zone_register(g_default_zone);
+
+  // Make sure that the purgeable zone is after the default one.
+  // Will make g_default_zone take the purgeable zone spot
+  malloc_zone_unregister(purgeable_zone);
+  // Add back the purgeable zone as the last one.
+  malloc_zone_register(purgeable_zone);
+
+  // Final configuration:
+  // |g_delegating_zone|...|g_default_zone|purgeable_zone|
+
+  // Sanity check.
+  if (GetDefaultMallocZone() != &g_delegating_zone) {
+    abort_report_np("Failed to install the delegating zone as default.");
+  }
+}
+
+void AllowDoublePartitionAllocZoneRegistration() {
+  unsigned int zone_count = 0;
+  vm_address_t* zones = nullptr;
+  kern_return_t result = malloc_get_all_zones(
+      mach_task_self(), /*reader=*/nullptr, &zones, &zone_count);
+  if (result != KERN_SUCCESS) {
+    abort_report_np("Cannot enumerate malloc() zones");
+  }
+
+  // If PartitionAlloc is one of the zones, *change* its name so that
+  // registration can happen multiple times. This works because zone
+  // registration only keeps a pointer to the struct, it does not copy the data.
+  for (unsigned int i = 0; i < zone_count; i++) {
+    malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
+    if (zone->zone_name &&
+        strcmp(zone->zone_name, allocator_shim::kPartitionAllocZoneName) == 0) {
+      zone->zone_name = "RenamedPartitionAlloc";
+      break;
+    }
+  }
+}
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+}  // namespace partition_alloc
diff --git a/base/allocator/early_zone_registration_apple.h b/base/allocator/early_zone_registration_apple.h
new file mode 100644
index 0000000..272a872
--- /dev/null
+++ b/base/allocator/early_zone_registration_apple.h
@@ -0,0 +1,29 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_APPLE_H_
+#define BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_APPLE_H_
+
+// This is an Apple-only file, used to register PartitionAlloc's zone *before*
+// the process becomes multi-threaded.
+
+namespace partition_alloc {
+
+// Must be called *once*, *before* the process becomes multi-threaded.
+void EarlyMallocZoneRegistration();
+
+// Tricks the registration code to believe that PartitionAlloc was not already
+// registered. This allows a future library load to register PartitionAlloc's
+// zone as well, rather than bailing out.
+//
+// This is mutually exclusive with EarlyMallocZoneRegistration(), and should
+// ideally be removed. Indeed, by allowing two zones to be registered, we still
+// end up with a split heap, and more memory usage.
+//
+// This is a hack for https://crbug.com/1274236.
+void AllowDoublePartitionAllocZoneRegistration();
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_APPLE_H_
diff --git a/base/allocator/early_zone_registration_mac.cc b/base/allocator/early_zone_registration_mac.cc
deleted file mode 100644
index 1c75bc9..0000000
--- a/base/allocator/early_zone_registration_mac.cc
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/early_zone_registration_mac.h"
-
-#include <mach/mach.h>
-#include <malloc/malloc.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/shim/early_zone_registration_constants.h"
-
-// BASE_EXPORT tends to be defined as soon as anything from //base is included.
-#if defined(BASE_EXPORT)
-#error "This file cannot depend on //base"
-#endif
-
-namespace partition_alloc {
-
-#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-void EarlyMallocZoneRegistration() {}
-void AllowDoublePartitionAllocZoneRegistration() {}
-
-#else
-
-extern "C" {
-// abort_report_np() records the message in a special section that both the
-// system CrashReporter and Crashpad collect in crash reports. See also in
-// chrome_exe_main_mac.cc.
-void abort_report_np(const char* fmt, ...);
-}
-
-namespace {
-
-malloc_zone_t* GetDefaultMallocZone() {
-  // malloc_default_zone() does not return... the default zone, but the
-  // initial one. The default one is the first element of the default zone
-  // array.
-  unsigned int zone_count = 0;
-  vm_address_t* zones = nullptr;
-  kern_return_t result =
-      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
-  if (result != KERN_SUCCESS) {
-    abort_report_np("Cannot enumerate malloc() zones");
-  }
-  return reinterpret_cast<malloc_zone_t*>(zones[0]);
-}
-
-}  // namespace
-
-void EarlyMallocZoneRegistration() {
-  // Must have static storage duration, as raw pointers are passed to
-  // libsystem_malloc.
-  static malloc_zone_t g_delegating_zone;
-  static malloc_introspection_t g_delegating_zone_introspect;
-  static malloc_zone_t* g_default_zone;
-
-  // Make sure that the default zone is instantiated.
-  malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
-
-  g_default_zone = GetDefaultMallocZone();
-
-  // The delegating zone:
-  // - Forwards all allocations to the existing default zone
-  // - Does *not* claim to own any memory, meaning that it will always be
-  //   skipped in free() in libsystem_malloc.dylib.
-  //
-  // This is a temporary zone, until it gets replaced by PartitionAlloc, inside
-  // the main library. Since the main library depends on many external
-  // libraries, we cannot install PartitionAlloc as the default zone without
-  // concurrency issues.
-  //
-  // Instead, what we do is here, while the process is single-threaded:
-  // - Register the delegating zone as the default one.
-  // - Set the original (libsystem_malloc's) one as the second zone
-  //
-  // Later, when PartitionAlloc initializes, we replace the default (delegating)
-  // zone with ours. The end state is:
-  // 1. PartitionAlloc zone
-  // 2. libsystem_malloc zone
-
-  // Set up of the delegating zone. Note that it doesn't just forward calls to
-  // the default zone. This is because the system zone's malloc_zone_t pointer
-  // actually points to a larger struct, containing allocator metadata. So if we
-  // pass as the first parameter the "simple" delegating zone pointer, then we
-  // immediately crash inside the system zone functions. So we need to replace
-  // the zone pointer as well.
-  //
-  // Calls fall into 4 categories:
-  // - Allocation calls: forwarded to the real system zone
-  // - "Is this pointer yours" calls: always answer no
-  // - free(): Should never be called, but is in practice, see comments below.
-  // - Diagnostics and debugging: these are typically called for every
-  //   zone. They are no-ops for us, as we don't want to double-count, or lock
-  //   the data structures of the real zone twice.
-
-  // Allocation: Forward to the real zone.
-  g_delegating_zone.malloc = [](malloc_zone_t* zone, size_t size) {
-    return g_default_zone->malloc(g_default_zone, size);
-  };
-  g_delegating_zone.calloc = [](malloc_zone_t* zone, size_t num_items,
-                                size_t size) {
-    return g_default_zone->calloc(g_default_zone, num_items, size);
-  };
-  g_delegating_zone.valloc = [](malloc_zone_t* zone, size_t size) {
-    return g_default_zone->valloc(g_default_zone, size);
-  };
-  g_delegating_zone.realloc = [](malloc_zone_t* zone, void* ptr, size_t size) {
-    return g_default_zone->realloc(g_default_zone, ptr, size);
-  };
-  g_delegating_zone.batch_malloc = [](malloc_zone_t* zone, size_t size,
-                                      void** results, unsigned num_requested) {
-    return g_default_zone->batch_malloc(g_default_zone, size, results,
-                                        num_requested);
-  };
-  g_delegating_zone.memalign = [](malloc_zone_t* zone, size_t alignment,
-                                  size_t size) {
-    return g_default_zone->memalign(g_default_zone, alignment, size);
-  };
-
-  // Does ptr belong to this zone? Return value is != 0 if so.
-  g_delegating_zone.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
-    return 0;
-  };
-
-  // Free functions.
-  // The normal path for freeing memory is:
-  // 1. Try all zones in order, call zone->size(ptr)
-  // 2. If zone->size(ptr) != 0, call zone->free(ptr) (or free_definite_size)
-  // 3. If no zone matches, crash.
-  //
-  // Since this zone always returns 0 in size() (see above), then zone->free()
-  // should never be called. Unfortunately, this is not the case, as some places
-  // in CoreFoundation call malloc_zone_free(zone, ptr) directly. So rather than
-  // crashing, forward the call. It's the caller's responsibility to use the
-  // same zone for free() as for the allocation (this is in the contract of
-  // malloc_zone_free()).
-  //
-  // However, note that the sequence of calls size() -> free() is not possible
-  // for this zone, as size() always returns 0.
-  g_delegating_zone.free = [](malloc_zone_t* zone, void* ptr) {
-    return g_default_zone->free(g_default_zone, ptr);
-  };
-  g_delegating_zone.free_definite_size = [](malloc_zone_t* zone, void* ptr,
-                                            size_t size) {
-    return g_default_zone->free_definite_size(g_default_zone, ptr, size);
-  };
-  g_delegating_zone.batch_free = [](malloc_zone_t* zone, void** to_be_freed,
-                                    unsigned num_to_be_freed) {
-    return g_default_zone->batch_free(g_default_zone, to_be_freed,
-                                      num_to_be_freed);
-  };
-#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
-  g_delegating_zone.try_free_default = [](malloc_zone_t* zone, void* ptr) {
-    return g_default_zone->try_free_default(g_default_zone, ptr);
-  };
-#endif
-
-  // Diagnostics and debugging.
-  //
-  // Do nothing to reduce memory footprint, the real
-  // zone will do it.
-  g_delegating_zone.pressure_relief = [](malloc_zone_t* zone,
-                                         size_t goal) -> size_t { return 0; };
-
-  // Introspection calls are not all optional, for instance locking and
-  // unlocking before/after fork() is not optional.
-  //
-  // Nothing to enumerate.
-  g_delegating_zone_introspect.enumerator =
-      [](task_t task, void*, unsigned type_mask, vm_address_t zone_address,
-         memory_reader_t reader,
-         vm_range_recorder_t recorder) -> kern_return_t {
-    return KERN_SUCCESS;
-  };
-  // Need to provide a real implementation, it is used for e.g. array sizing.
-  g_delegating_zone_introspect.good_size = [](malloc_zone_t* zone,
-                                              size_t size) {
-    return g_default_zone->introspect->good_size(g_default_zone, size);
-  };
-  // Nothing to do.
-  g_delegating_zone_introspect.check = [](malloc_zone_t* zone) -> boolean_t {
-    return true;
-  };
-  g_delegating_zone_introspect.print = [](malloc_zone_t* zone,
-                                          boolean_t verbose) {};
-  g_delegating_zone_introspect.log = [](malloc_zone_t*, void*) {};
-  // Do not forward the lock / unlock calls. Since the default zone is still
-  // there, we should not lock here, as it would lock the zone twice (all
-  // zones are locked before fork().). Rather, do nothing, since this fake
-  // zone does not need any locking.
-  g_delegating_zone_introspect.force_lock = [](malloc_zone_t* zone) {};
-  g_delegating_zone_introspect.force_unlock = [](malloc_zone_t* zone) {};
-  g_delegating_zone_introspect.reinit_lock = [](malloc_zone_t* zone) {};
-  // No stats.
-  g_delegating_zone_introspect.statistics = [](malloc_zone_t* zone,
-                                               malloc_statistics_t* stats) {};
-  // We are not locked.
-  g_delegating_zone_introspect.zone_locked =
-      [](malloc_zone_t* zone) -> boolean_t { return false; };
-  // Don't support discharge checking.
-  g_delegating_zone_introspect.enable_discharge_checking =
-      [](malloc_zone_t* zone) -> boolean_t { return false; };
-  g_delegating_zone_introspect.disable_discharge_checking =
-      [](malloc_zone_t* zone) {};
-  g_delegating_zone_introspect.discharge = [](malloc_zone_t* zone,
-                                              void* memory) {};
-
-  // Could use something lower to support fewer functions, but this is
-  // consistent with the real zone installed by PartitionAlloc.
-  g_delegating_zone.version = allocator_shim::kZoneVersion;
-  g_delegating_zone.introspect = &g_delegating_zone_introspect;
-  // This name is used in PartitionAlloc's initialization to determine whether
-  // it should replace the delegating zone.
-  g_delegating_zone.zone_name = allocator_shim::kDelegatingZoneName;
-
-  // Register puts the new zone at the end, unregister swaps the new zone with
-  // the last one.
-  // The zone array is, after these lines, in order:
-  // 1. |g_default_zone|...|g_delegating_zone|
-  // 2. |g_delegating_zone|...|  (no more default)
-  // 3. |g_delegating_zone|...|g_default_zone|
-  malloc_zone_register(&g_delegating_zone);
-  malloc_zone_unregister(g_default_zone);
-  malloc_zone_register(g_default_zone);
-
-  // Make sure that the purgeable zone is after the default one.
-  // Will make g_default_zone take the purgeable zone spot
-  malloc_zone_unregister(purgeable_zone);
-  // Add back the purgeable zone as the last one.
-  malloc_zone_register(purgeable_zone);
-
-  // Final configuration:
-  // |g_delegating_zone|...|g_default_zone|purgeable_zone|
-
-  // Sanity check.
-  if (GetDefaultMallocZone() != &g_delegating_zone) {
-    abort_report_np("Failed to install the delegating zone as default.");
-  }
-}
-
-void AllowDoublePartitionAllocZoneRegistration() {
-  unsigned int zone_count = 0;
-  vm_address_t* zones = nullptr;
-  kern_return_t result =
-      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
-  if (result != KERN_SUCCESS) {
-    abort_report_np("Cannot enumerate malloc() zones");
-  }
-
-  // If PartitionAlloc is one of the zones, *change* its name so that
-  // registration can happen multiple times. This works because zone
-  // registration only keeps a pointer to the struct, it does not copy the data.
-  for (unsigned int i = 0; i < zone_count; i++) {
-    malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
-    if (zone->zone_name &&
-        strcmp(zone->zone_name, allocator_shim::kPartitionAllocZoneName) == 0) {
-      zone->zone_name = "RenamedPartitionAlloc";
-      break;
-    }
-  }
-}
-
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-}  // namespace partition_alloc
diff --git a/base/allocator/early_zone_registration_mac.h b/base/allocator/early_zone_registration_mac.h
deleted file mode 100644
index fadcf31..0000000
--- a/base/allocator/early_zone_registration_mac.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
-#define BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
-
-// This is an Apple-only file, used to register PartitionAlloc's zone *before*
-// the process becomes multi-threaded.
-
-namespace partition_alloc {
-
-// Must be called *once*, *before* the process becomes multi-threaded.
-void EarlyMallocZoneRegistration();
-
-// Tricks the registration code to believe that PartitionAlloc was not already
-// registered. This allows a future library load to register PartitionAlloc's
-// zone as well, rather than bailing out.
-//
-// This is mutually exclusive with EarlyMallocZoneRegistation(), and should
-// ideally be removed. Indeed, by allowing two zones to be registered, we still
-// end up with a split heap, and more memory usage.
-//
-// This is a hack for crbug.com/1274236.
-void AllowDoublePartitionAllocZoneRegistration();
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_H_
diff --git a/base/allocator/miracle_parameter.cc b/base/allocator/miracle_parameter.cc
new file mode 100644
index 0000000..6681cf0
--- /dev/null
+++ b/base/allocator/miracle_parameter.cc
@@ -0,0 +1,93 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/miracle_parameter.h"
+
+#include "base/command_line.h"
+#include "base/strings/strcat.h"
+#include "base/system/sys_info.h"
+
+namespace base {
+
+namespace miracle_parameter {
+
+namespace {
+
+std::string GetFieldTrialParamByFeatureAsString(
+    const base::Feature& feature,
+    const std::string& param_name,
+    const std::string& default_value) {
+  const std::string value =
+      base::GetFieldTrialParamValueByFeature(feature, param_name);
+  return value.empty() ? default_value : value;
+}
+
+}  // namespace
+
+std::string GetParamNameWithSuffix(const std::string& param_name) {
+  // `base::SysInfo::AmountOfPhysicalMemoryMB()` refers to CommandLine
+  // internally. If the CommandLine is not initialized, we return early to avoid
+  // a crash.
+  if (!base::CommandLine::InitializedForCurrentProcess()) {
+    return param_name;
+  }
+  int physical_memory_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
+  const char* suffix =
+      physical_memory_mb < kMiracleParameterMemory512MB  ? "ForLessThan512MB"
+      : physical_memory_mb < kMiracleParameterMemory1GB  ? "For512MBTo1GB"
+      : physical_memory_mb < kMiracleParameterMemory2GB  ? "For1GBTo2GB"
+      : physical_memory_mb < kMiracleParameterMemory4GB  ? "For2GBTo4GB"
+      : physical_memory_mb < kMiracleParameterMemory8GB  ? "For4GBTo8GB"
+      : physical_memory_mb < kMiracleParameterMemory16GB ? "For8GBTo16GB"
+                                                         : "For16GBAndAbove";
+  return base::StrCat({param_name, suffix});
+}
+
+std::string GetMiracleParameterAsString(const base::Feature& feature,
+                                        const std::string& param_name,
+                                        const std::string& default_value) {
+  return GetFieldTrialParamByFeatureAsString(
+      feature, GetParamNameWithSuffix(param_name),
+      GetFieldTrialParamByFeatureAsString(feature, param_name, default_value));
+}
+
+double GetMiracleParameterAsDouble(const base::Feature& feature,
+                                   const std::string& param_name,
+                                   double default_value) {
+  return base::GetFieldTrialParamByFeatureAsDouble(
+      feature, GetParamNameWithSuffix(param_name),
+      base::GetFieldTrialParamByFeatureAsDouble(feature, param_name,
+                                                default_value));
+}
+
+int GetMiracleParameterAsInt(const base::Feature& feature,
+                             const std::string& param_name,
+                             int default_value) {
+  return base::GetFieldTrialParamByFeatureAsInt(
+      feature, GetParamNameWithSuffix(param_name),
+      base::GetFieldTrialParamByFeatureAsInt(feature, param_name,
+                                             default_value));
+}
+
+bool GetMiracleParameterAsBool(const base::Feature& feature,
+                               const std::string& param_name,
+                               bool default_value) {
+  return base::GetFieldTrialParamByFeatureAsBool(
+      feature, GetParamNameWithSuffix(param_name),
+      base::GetFieldTrialParamByFeatureAsBool(feature, param_name,
+                                              default_value));
+}
+
+base::TimeDelta GetMiracleParameterAsTimeDelta(const base::Feature& feature,
+                                               const std::string& param_name,
+                                               base::TimeDelta default_value) {
+  return base::GetFieldTrialParamByFeatureAsTimeDelta(
+      feature, GetParamNameWithSuffix(param_name),
+      base::GetFieldTrialParamByFeatureAsTimeDelta(feature, param_name,
+                                                   default_value));
+}
+
+}  // namespace miracle_parameter
+
+}  // namespace base
diff --git a/base/allocator/miracle_parameter.h b/base/allocator/miracle_parameter.h
new file mode 100644
index 0000000..d894e1a
--- /dev/null
+++ b/base/allocator/miracle_parameter.h
@@ -0,0 +1,177 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
+#define BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
+
+#include "base/base_export.h"
+#include "base/containers/span.h"
+#include "base/feature_list.h"
+#include "base/metrics/field_trial_params.h"
+
+// This is a mirror copy of the //components/miracle_parameter/ to resolve the
+// dependency cycle of (base->miracle_parameter->base).
+// Eventually the miracle_parameter component will have a public interface in
+// //base/ and this could be removed.
+// TODO(crbug.com/1475915): remove miracle_parameter from
+// //base/allocator/.
+
+namespace base {
+
+namespace miracle_parameter {
+
+namespace {
+
+template <typename Enum>
+Enum GetFieldTrialParamByFeatureAsEnum(
+    const base::Feature& feature,
+    const std::string& param_name,
+    const Enum default_value,
+    const base::span<const typename base::FeatureParam<Enum>::Option>&
+        options) {
+  std::string string_value =
+      base::GetFieldTrialParamValueByFeature(feature, param_name);
+  if (string_value.empty()) {
+    return default_value;
+  }
+
+  for (const auto& option : options) {
+    if (string_value == option.name) {
+      return option.value;
+    }
+  }
+
+  base::LogInvalidEnumValue(feature, param_name, string_value,
+                            static_cast<int>(default_value));
+  return default_value;
+}
+
+}  // namespace
+
+constexpr int kMiracleParameterMemory512MB = 512;
+constexpr int kMiracleParameterMemory1GB = 1024;
+constexpr int kMiracleParameterMemory2GB = 2 * 1024;
+constexpr int kMiracleParameterMemory4GB = 4 * 1024;
+constexpr int kMiracleParameterMemory8GB = 8 * 1024;
+constexpr int kMiracleParameterMemory16GB = 16 * 1024;
+
+// GetParamNameWithSuffix put a parameter name suffix based on
+// the amount of physical memory.
+//
+// - "ForLessThan512MB" for less than 512MB memory devices.
+// - "For512MBTo1GB" for 512MB to 1GB memory devices.
+// - "For1GBTo2GB" for 1GB to 2GB memory devices.
+// - "For2GBTo4GB" for 2GB to 4GB memory devices.
+// - "For4GBTo8GB" for 4GB to 8GB memory devices.
+// - "For8GBTo16GB" for 8GB to 16GB memory devices.
+// - "For16GBAndAbove" for 16GB memory and above devices.
+BASE_EXPORT
+std::string GetParamNameWithSuffix(const std::string& param_name);
+
+// Provides a similar behavior with FeatureParam<std::string> except the return
+// value is determined by the amount of physical memory.
+BASE_EXPORT
+std::string GetMiracleParameterAsString(const base::Feature& feature,
+                                        const std::string& param_name,
+                                        const std::string& default_value);
+
+// Provides a similar behavior with FeatureParam<double> except the return value
+// is determined by the amount of physical memory.
+BASE_EXPORT
+double GetMiracleParameterAsDouble(const base::Feature& feature,
+                                   const std::string& param_name,
+                                   double default_value);
+
+// Provides a similar behavior with FeatureParam<int> except the return value is
+// determined by the amount of physical memory.
+BASE_EXPORT
+int GetMiracleParameterAsInt(const base::Feature& feature,
+                             const std::string& param_name,
+                             int default_value);
+
+// Provides a similar behavior with FeatureParam<bool> except the return value
+// is determined by the amount of physical memory.
+BASE_EXPORT
+bool GetMiracleParameterAsBool(const base::Feature& feature,
+                               const std::string& param_name,
+                               bool default_value);
+
+// Provides a similar behavior with FeatureParam<base::TimeDelta> except the
+// return value is determined by the amount of physical memory.
+BASE_EXPORT
+base::TimeDelta GetMiracleParameterAsTimeDelta(const base::Feature& feature,
+                                               const std::string& param_name,
+                                               base::TimeDelta default_value);
+
+// Provides a similar behavior with FeatureParam<Enum> except the return value
+// is determined by the amount of physical memory.
+template <typename Enum>
+Enum GetMiracleParameterAsEnum(
+    const base::Feature& feature,
+    const std::string& param_name,
+    const Enum default_value,
+    const base::span<const typename base::FeatureParam<Enum>::Option> options) {
+  return GetFieldTrialParamByFeatureAsEnum(
+      feature, GetParamNameWithSuffix(param_name),
+      GetFieldTrialParamByFeatureAsEnum(feature, param_name, default_value,
+                                        options),
+      options);
+}
+
+#define MIRACLE_PARAMETER_FOR_STRING(function_name, feature, param_name,    \
+                                     default_value)                         \
+  std::string function_name() {                                             \
+    static const std::string value =                                        \
+        miracle_parameter::GetMiracleParameterAsString(feature, param_name, \
+                                                       default_value);      \
+    return value;                                                           \
+  }
+
+#define MIRACLE_PARAMETER_FOR_DOUBLE(function_name, feature, param_name,    \
+                                     default_value)                         \
+  double function_name() {                                                  \
+    static const double value =                                             \
+        miracle_parameter::GetMiracleParameterAsDouble(feature, param_name, \
+                                                       default_value);      \
+    return value;                                                           \
+  }
+
+#define MIRACLE_PARAMETER_FOR_INT(function_name, feature, param_name,     \
+                                  default_value)                          \
+  int function_name() {                                                   \
+    static const int value = miracle_parameter::GetMiracleParameterAsInt( \
+        feature, param_name, default_value);                              \
+    return value;                                                         \
+  }
+
+#define MIRACLE_PARAMETER_FOR_BOOL(function_name, feature, param_name,      \
+                                   default_value)                           \
+  bool function_name() {                                                    \
+    static const bool value = miracle_parameter::GetMiracleParameterAsBool( \
+        feature, param_name, default_value);                                \
+    return value;                                                           \
+  }
+
+#define MIRACLE_PARAMETER_FOR_TIME_DELTA(function_name, feature, param_name,   \
+                                         default_value)                        \
+  base::TimeDelta function_name() {                                            \
+    static const base::TimeDelta value =                                       \
+        miracle_parameter::GetMiracleParameterAsTimeDelta(feature, param_name, \
+                                                          default_value);      \
+    return value;                                                              \
+  }
+
+#define MIRACLE_PARAMETER_FOR_ENUM(function_name, feature, param_name,      \
+                                   default_value, type, options)            \
+  type function_name() {                                                    \
+    static const type value = miracle_parameter::GetMiracleParameterAsEnum( \
+        feature, param_name, default_value, base::make_span(options));      \
+    return value;                                                           \
+  }
+
+}  // namespace miracle_parameter
+
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
diff --git a/base/allocator/partition_alloc_features.cc b/base/allocator/partition_alloc_features.cc
index 40c923d..5918df5 100644
--- a/base/allocator/partition_alloc_features.cc
+++ b/base/allocator/partition_alloc_features.cc
@@ -4,10 +4,16 @@
 
 #include "base/allocator/partition_alloc_features.h"
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/miracle_parameter.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
 #include "base/base_export.h"
 #include "base/feature_list.h"
 #include "base/features.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/time/time.h"
 #include "build/build_config.h"
 #include "build/chromecast_buildflags.h"
 #include "build/chromeos_buildflags.h"
@@ -87,24 +93,37 @@
 // Use a larger maximum thread cache cacheable bucket size.
 BASE_FEATURE(kPartitionAllocLargeThreadCacheSize,
              "PartitionAllocLargeThreadCacheSize",
-#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
-             // Not unconditionally enabled on 32 bit Android, since it is a
-             // more memory-constrained platform.
-             FEATURE_DISABLED_BY_DEFAULT
-#else
-             FEATURE_ENABLED_BY_DEFAULT
-#endif
-);
+             FEATURE_ENABLED_BY_DEFAULT);
+
+MIRACLE_PARAMETER_FOR_INT(
+    GetPartitionAllocLargeThreadCacheSizeValue,
+    kPartitionAllocLargeThreadCacheSize,
+    "PartitionAllocLargeThreadCacheSizeValue",
+    ::partition_alloc::ThreadCacheLimits::kLargeSizeThreshold)
+
+MIRACLE_PARAMETER_FOR_INT(
+    GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid,
+    kPartitionAllocLargeThreadCacheSize,
+    "PartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid",
+    ::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold)
 
 BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
              "PartitionAllocLargeEmptySlotSpanRing",
              FEATURE_DISABLED_BY_DEFAULT);
+
+BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantine,
+             "PartitionAllocSchedulerLoopQuarantine",
+             FEATURE_DISABLED_BY_DEFAULT);
+// Scheduler Loop Quarantine's capacity in bytes.
+const base::FeatureParam<int> kPartitionAllocSchedulerLoopQuarantineCapacity{
+    &kPartitionAllocSchedulerLoopQuarantine,
+    "PartitionAllocSchedulerLoopQuarantineCapacity", 0};
 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
 
 BASE_FEATURE(kPartitionAllocBackupRefPtr,
              "PartitionAllocBackupRefPtr",
 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
-    BUILDFLAG(IS_CHROMEOS_ASH) ||                                      \
+    BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS) ||     \
     (BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CASTOS)) ||                  \
     BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG)
              FEATURE_ENABLED_BY_DEFAULT
@@ -116,7 +135,7 @@
 BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtrForAsh);
 BASE_FEATURE(kPartitionAllocBackupRefPtrForAsh,
              "PartitionAllocBackupRefPtrForAsh",
-             FEATURE_DISABLED_BY_DEFAULT);
+             FEATURE_ENABLED_BY_DEFAULT);
 
 constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
     kBackupRefPtrEnabledProcessesOptions[] = {
@@ -194,6 +213,11 @@
              "KillPartitionAllocMemoryTagging",
              FEATURE_DISABLED_BY_DEFAULT);
 
+BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
+BASE_FEATURE(kPartitionAllocPermissiveMte,
+             "PartitionAllocPermissiveMte",
+             FEATURE_ENABLED_BY_DEFAULT);
+
 const base::FeatureParam<bool> kBackupRefPtrAsanEnableDereferenceCheckParam{
     &kPartitionAllocBackupRefPtr, "asan-enable-dereference-check", true};
 const base::FeatureParam<bool> kBackupRefPtrAsanEnableExtractionCheckParam{
@@ -275,6 +299,34 @@
              "PartitionAllocDCScan",
              FEATURE_DISABLED_BY_DEFAULT);
 
+// Whether to straighten free lists for larger slot spans in PurgeMemory() ->
+// ... -> PartitionPurgeSlotSpan().
+BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists,
+             "PartitionAllocStraightenLargerSlotSpanFreeLists",
+             FEATURE_ENABLED_BY_DEFAULT);
+const base::FeatureParam<
+    partition_alloc::StraightenLargerSlotSpanFreeListsMode>::Option
+    kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
+        {partition_alloc::StraightenLargerSlotSpanFreeListsMode::
+             kOnlyWhenUnprovisioning,
+         "only-when-unprovisioning"},
+        {partition_alloc::StraightenLargerSlotSpanFreeListsMode::kAlways,
+         "always"},
+};
+const base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
+    kPartitionAllocStraightenLargerSlotSpanFreeListsMode = {
+        &kPartitionAllocStraightenLargerSlotSpanFreeLists,
+        "mode",
+        partition_alloc::StraightenLargerSlotSpanFreeListsMode::
+            kOnlyWhenUnprovisioning,
+        &kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption,
+};
+
+// Whether to sort free lists for smaller slot spans in PurgeMemory().
+BASE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists,
+             "PartitionAllocSortSmallerSlotSpanFreeLists",
+             FEATURE_ENABLED_BY_DEFAULT);
+
 // Whether to sort the active slot spans in PurgeMemory().
 BASE_FEATURE(kPartitionAllocSortActiveSlotSpans,
              "PartitionAllocSortActiveSlotSpans",
@@ -287,7 +339,7 @@
              FEATURE_DISABLED_BY_DEFAULT);
 #endif
 
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
 // A parameter to exclude or not exclude PartitionAllocSupport from
 // PartialLowModeOnMidRangeDevices. This is used to see how it affects
 // renderer performances, e.g. blink_perf.parser benchmark.
@@ -299,5 +351,89 @@
     false};
 #endif
 
+BASE_FEATURE(kEnableConfigurableThreadCacheMultiplier,
+             "EnableConfigurableThreadCacheMultiplier",
+             base::FEATURE_DISABLED_BY_DEFAULT);
+
+MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplier,
+                             kEnableConfigurableThreadCacheMultiplier,
+                             "ThreadCacheMultiplier",
+                             2.)
+
+MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplierForAndroid,
+                             kEnableConfigurableThreadCacheMultiplier,
+                             "ThreadCacheMultiplierForAndroid",
+                             1.)
+
+constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta(
+    base::TimeDelta time_delta) {
+  return partition_alloc::internal::base::Microseconds(
+      time_delta.InMicroseconds());
+}
+
+constexpr base::TimeDelta FromPartitionAllocTimeDelta(
+    partition_alloc::internal::base::TimeDelta time_delta) {
+  return base::Microseconds(time_delta.InMicroseconds());
+}
+
+BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval,
+             "EnableConfigurableThreadCachePurgeInterval",
+             base::FEATURE_DISABLED_BY_DEFAULT);
+
+MIRACLE_PARAMETER_FOR_TIME_DELTA(
+    GetThreadCacheMinPurgeIntervalValue,
+    kEnableConfigurableThreadCachePurgeInterval,
+    "ThreadCacheMinPurgeInterval",
+    FromPartitionAllocTimeDelta(partition_alloc::kMinPurgeInterval))
+
+MIRACLE_PARAMETER_FOR_TIME_DELTA(
+    GetThreadCacheMaxPurgeIntervalValue,
+    kEnableConfigurableThreadCachePurgeInterval,
+    "ThreadCacheMaxPurgeInterval",
+    FromPartitionAllocTimeDelta(partition_alloc::kMaxPurgeInterval))
+
+MIRACLE_PARAMETER_FOR_TIME_DELTA(
+    GetThreadCacheDefaultPurgeIntervalValue,
+    kEnableConfigurableThreadCachePurgeInterval,
+    "ThreadCacheDefaultPurgeInterval",
+    FromPartitionAllocTimeDelta(partition_alloc::kDefaultPurgeInterval))
+
+const partition_alloc::internal::base::TimeDelta
+GetThreadCacheMinPurgeInterval() {
+  return ToPartitionAllocTimeDelta(GetThreadCacheMinPurgeIntervalValue());
+}
+
+const partition_alloc::internal::base::TimeDelta
+GetThreadCacheMaxPurgeInterval() {
+  return ToPartitionAllocTimeDelta(GetThreadCacheMaxPurgeIntervalValue());
+}
+
+const partition_alloc::internal::base::TimeDelta
+GetThreadCacheDefaultPurgeInterval() {
+  return ToPartitionAllocTimeDelta(GetThreadCacheDefaultPurgeIntervalValue());
+}
+
+BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
+             "EnableConfigurableThreadCacheMinCachedMemoryForPurging",
+             base::FEATURE_DISABLED_BY_DEFAULT);
+
+MIRACLE_PARAMETER_FOR_INT(
+    GetThreadCacheMinCachedMemoryForPurgingBytes,
+    kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
+    "ThreadCacheMinCachedMemoryForPurgingBytes",
+    partition_alloc::kMinCachedMemoryForPurgingBytes)
+
+// An apparent quarantine leak in the buffer partition unacceptably
+// bloats memory when MiraclePtr is enabled in the renderer process.
+// We believe we have found and patched the leak, but out of an
+// abundance of caution, we provide this toggle that allows us to
+// wholly disable MiraclePtr in the buffer partition, if necessary.
+//
+// TODO(crbug.com/1444624): this is unneeded once
+// MiraclePtr-for-Renderer launches.
+BASE_FEATURE(kPartitionAllocDisableBRPInBufferPartition,
+             "PartitionAllocDisableBRPInBufferPartition",
+             FEATURE_DISABLED_BY_DEFAULT);
+
 }  // namespace features
 }  // namespace base
diff --git a/base/allocator/partition_alloc_features.h b/base/allocator/partition_alloc_features.h
index d404ed6..88786d5 100644
--- a/base/allocator/partition_alloc_features.h
+++ b/base/allocator/partition_alloc_features.h
@@ -5,12 +5,15 @@
 #ifndef BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
 #define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
 #include "base/feature_list.h"
 #include "base/metrics/field_trial_params.h"
 #include "base/strings/string_piece.h"
+#include "base/time/time.h"
 #include "build/build_config.h"
 
 namespace base {
@@ -62,8 +65,15 @@
 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
 BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly);
 BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly);
+
 BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize);
+BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValue();
+BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid();
+
 BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing);
+BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSchedulerLoopQuarantine);
+extern const BASE_EXPORT base::FeatureParam<int>
+    kPartitionAllocSchedulerLoopQuarantineCapacity;
 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
 
 enum class BackupRefPtrEnabledProcesses {
@@ -147,6 +157,7 @@
 // Kill switch for memory tagging. Skips any code related to memory tagging when
 // enabled.
 BASE_EXPORT BASE_DECLARE_FEATURE(kKillPartitionAllocMemoryTagging);
+BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
 extern const BASE_EXPORT base::FeatureParam<bool>
     kBackupRefPtrAsanEnableDereferenceCheckParam;
 extern const BASE_EXPORT base::FeatureParam<bool>
@@ -164,18 +175,24 @@
 BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDCScan);
 BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanImmediateFreeing);
 BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanEagerClearing);
-BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans);
 BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseDenserDistribution);
 
 BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryReclaimer);
 extern const BASE_EXPORT base::FeatureParam<TimeDelta>
     kPartitionAllocMemoryReclaimerInterval;
+BASE_EXPORT BASE_DECLARE_FEATURE(
+    kPartitionAllocStraightenLargerSlotSpanFreeLists);
+extern const BASE_EXPORT
+    base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
+        kPartitionAllocStraightenLargerSlotSpanFreeListsMode;
+BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists);
+BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans);
 
 #if BUILDFLAG(IS_WIN)
 BASE_EXPORT BASE_DECLARE_FEATURE(kPageAllocatorRetryOnCommitFailure);
 #endif
 
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
 extern const base::FeatureParam<bool>
     kPartialLowEndModeExcludePartitionAllocSupport;
 #endif
@@ -185,6 +202,24 @@
 inline constexpr base::StringPiece kRendererLiveBRPSyntheticTrialName =
     "BackupRefPtrRendererLive";
 
+BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCacheMultiplier);
+BASE_EXPORT double GetThreadCacheMultiplier();
+BASE_EXPORT double GetThreadCacheMultiplierForAndroid();
+
+BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCachePurgeInterval);
+extern const partition_alloc::internal::base::TimeDelta
+GetThreadCacheMinPurgeInterval();
+extern const partition_alloc::internal::base::TimeDelta
+GetThreadCacheMaxPurgeInterval();
+extern const partition_alloc::internal::base::TimeDelta
+GetThreadCacheDefaultPurgeInterval();
+
+BASE_EXPORT BASE_DECLARE_FEATURE(
+    kEnableConfigurableThreadCacheMinCachedMemoryForPurging);
+BASE_EXPORT int GetThreadCacheMinCachedMemoryForPurgingBytes();
+
+BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDisableBRPInBufferPartition);
+
 }  // namespace features
 }  // namespace base
 
diff --git a/base/allocator/partition_alloc_support.cc b/base/allocator/partition_alloc_support.cc
index 832e077..d0e9bda 100644
--- a/base/allocator/partition_alloc_support.cc
+++ b/base/allocator/partition_alloc_support.cc
@@ -11,20 +11,21 @@
 #include <string>
 
 #include "base/allocator/partition_alloc_features.h"
-#include "base/allocator/partition_allocator/allocation_guard.h"
-#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
-#include "base/allocator/partition_allocator/memory_reclaimer.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-#include "base/allocator/partition_allocator/pointers/raw_ptr.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
-#include "base/allocator/partition_allocator/thread_cache.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/allocation_guard.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
 #include "base/at_exit.h"
 #include "base/check.h"
 #include "base/cpu.h"
@@ -55,12 +56,12 @@
 #include "third_party/abseil-cpp/absl/types/optional.h"
 
 #if BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/shim/nonscannable_allocator.h"
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
-#include "base/allocator/partition_allocator/starscan/stack/stack.h"
-#include "base/allocator/partition_allocator/starscan/stats_collector.h"
-#include "base/allocator/partition_allocator/starscan/stats_reporter.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stats_collector.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stats_reporter.h"
 #endif  // BUILDFLAG(USE_STARSCAN)
 
 #if BUILDFLAG(IS_ANDROID)
@@ -68,7 +69,7 @@
 #endif
 
 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-#include "base/allocator/partition_allocator/memory_reclaimer.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h"
 #endif
 
 namespace base::allocator {
@@ -414,6 +415,9 @@
       "internal::RawPtrBackupRefImpl<false, false>::ReleaseInternal",
       "internal::RawPtrBackupRefImpl<false, true>::ReleaseInternal",
 
+      // ChromeOS signatures
+      "base::allocator::dispatcher::internal::DispatcherImpl<>::FreeFn()",
+
       // Task traces are prefixed with "Task trace:" in
       // |TaskTrace::OutputToStream|
       "Task trace:",
@@ -1097,6 +1101,10 @@
       break;
   }
 
+  const size_t scheduler_loop_quarantine_capacity_in_bytes =
+      static_cast<size_t>(
+          base::features::kPartitionAllocSchedulerLoopQuarantineCapacity.Get());
+
   bool enable_memory_tagging = false;
   partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode =
       partition_alloc::TagViolationReportingMode::kUndefined;
@@ -1127,6 +1135,8 @@
                 partition_alloc::TagViolationReportingMode::kAsynchronous;
             break;
         }
+        partition_alloc::PermissiveMte::SetEnabled(base::FeatureList::IsEnabled(
+            base::features::kPartitionAllocPermissiveMte));
         partition_alloc::internal::
             ChangeMemoryTaggingModeForAllThreadsPerProcess(
                 memory_tagging_reporting_mode);
@@ -1168,7 +1178,8 @@
                                          enable_memory_tagging),
       allocator_shim::UseDedicatedAlignedPartition(
           brp_config.use_dedicated_aligned_partition),
-      brp_config.ref_count_size, bucket_distribution);
+      brp_config.ref_count_size, bucket_distribution,
+      scheduler_loop_quarantine_capacity_in_bytes);
 
   const uint32_t extras_size = allocator_shim::GetMainPartitionRootExtrasSize();
   // As per description, extras are optional and are expected not to
@@ -1277,16 +1288,38 @@
   // initialized later.
   DCHECK(process_type != switches::kZygoteProcess);
 
+  partition_alloc::ThreadCacheRegistry::Instance().SetPurgingConfiguration(
+      base::features::GetThreadCacheMinPurgeInterval(),
+      base::features::GetThreadCacheMaxPurgeInterval(),
+      base::features::GetThreadCacheDefaultPurgeInterval(),
+      size_t(base::features::GetThreadCacheMinCachedMemoryForPurgingBytes()));
+
   base::allocator::StartThreadCachePeriodicPurge();
 
+  if (base::FeatureList::IsEnabled(
+          base::features::kEnableConfigurableThreadCacheMultiplier)) {
+    // If kEnableConfigurableThreadCacheMultiplier is enabled, override the
+    // multiplier value with the corresponding feature param.
 #if BUILDFLAG(IS_ANDROID)
-  // Lower thread cache limits to avoid stranding too much memory in the caches.
-  if (SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled(
-          features::kPartialLowEndModeExcludePartitionAllocSupport)) {
     ::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
-        ::partition_alloc::ThreadCache::kDefaultMultiplier / 2.);
-  }
+        base::features::GetThreadCacheMultiplierForAndroid());
+#else   // BUILDFLAG(IS_ANDROID)
+    ::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
+        base::features::GetThreadCacheMultiplier());
 #endif  // BUILDFLAG(IS_ANDROID)
+  } else {
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
+    // If kEnableConfigurableThreadCacheMultiplier is not enabled, lower
+    // thread cache limits on Android low end device to avoid stranding too much
+    // memory in the caches.
+    if (SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled(
+            features::kPartialLowEndModeExcludePartitionAllocSupport)) {
+      ::partition_alloc::ThreadCacheRegistry::Instance()
+          .SetThreadCacheMultiplier(
+              ::partition_alloc::ThreadCache::kDefaultMultiplier / 2.);
+    }
+#endif  // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
+  }
 
   // Renderer processes are more performance-sensitive, increase thread cache
   // limits.
@@ -1294,16 +1327,19 @@
       base::FeatureList::IsEnabled(
           base::features::kPartitionAllocLargeThreadCacheSize)) {
     largest_cached_size_ =
-        ::partition_alloc::ThreadCacheLimits::kLargeSizeThreshold;
+        size_t(base::features::GetPartitionAllocLargeThreadCacheSizeValue());
 
-#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
+#if BUILDFLAG(IS_ANDROID)
+    // Use appropriately lower amount for Android devices with 3GB or less.
     // Devices almost always report less physical memory than what they actually
-    // have, so anything above 3GiB will catch 4GiB and above.
-    if (base::SysInfo::AmountOfPhysicalMemoryMB() <= 3500) {
-      largest_cached_size_ =
-          ::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold;
+    // have, so use 3.2GB (a threshold commonly uses throughout code) to avoid
+    // accidentally catching devices advertised as 4GB.
+    if (base::SysInfo::AmountOfPhysicalMemoryMB() < 3.2 * 1024) {
+      largest_cached_size_ = size_t(
+          base::features::
+              GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid());
     }
-#endif  // BUILDFLAG(IS_ANDROID) && !defined(ARCH_CPU_64_BITS)
+#endif  // BUILDFLAG(IS_ANDROID)
 
     ::partition_alloc::ThreadCache::SetLargestCachedSize(largest_cached_size_);
   }
@@ -1329,10 +1365,17 @@
       base::SingleThreadTaskRunner::GetCurrentDefault());
 #endif
 
-  if (base::FeatureList::IsEnabled(
-          base::features::kPartitionAllocSortActiveSlotSpans)) {
-    partition_alloc::PartitionRoot::EnableSortActiveSlotSpans();
-  }
+  partition_alloc::PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
+      base::FeatureList::IsEnabled(
+          base::features::kPartitionAllocStraightenLargerSlotSpanFreeLists)
+          ? features::kPartitionAllocStraightenLargerSlotSpanFreeListsMode.Get()
+          : partition_alloc::StraightenLargerSlotSpanFreeListsMode::kNever);
+  partition_alloc::PartitionRoot::SetSortSmallerSlotSpanFreeListsEnabled(
+      base::FeatureList::IsEnabled(
+          base::features::kPartitionAllocSortSmallerSlotSpanFreeLists));
+  partition_alloc::PartitionRoot::SetSortActiveSlotSpansEnabled(
+      base::FeatureList::IsEnabled(
+          base::features::kPartitionAllocSortActiveSlotSpans));
 }
 
 void PartitionAllocSupport::OnForegrounded(bool has_main_frame) {
diff --git a/base/allocator/partition_alloc_support.h b/base/allocator/partition_alloc_support.h
index 9ebc0b8..9e00edd 100644
--- a/base/allocator/partition_alloc_support.h
+++ b/base/allocator/partition_alloc_support.h
@@ -8,9 +8,9 @@
 #include <map>
 #include <string>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/thread_cache.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
 #include "base/base_export.h"
 #include "base/memory/scoped_refptr.h"
 #include "base/synchronization/lock.h"
diff --git a/base/allocator/partition_alloc_support_unittest.cc b/base/allocator/partition_alloc_support_unittest.cc
index 3c65483..1f3e05d 100644
--- a/base/allocator/partition_alloc_support_unittest.cc
+++ b/base/allocator/partition_alloc_support_unittest.cc
@@ -10,9 +10,9 @@
 #include <vector>
 
 #include "base/allocator/partition_alloc_features.h"
-#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/test/gtest_util.h"
 #include "base/test/scoped_feature_list.h"
 #include "base/test/task_environment.h"
diff --git a/base/allocator/partition_allocator/BUILD.gn b/base/allocator/partition_allocator/BUILD.gn
index 9a26e0a..6d6fdd7 100644
--- a/base/allocator/partition_allocator/BUILD.gn
+++ b/base/allocator/partition_allocator/BUILD.gn
@@ -1,760 +1,15 @@
-# Copyright 2022 The Chromium Authors
+# Copyright 2024 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import("//base/allocator/partition_allocator/partition_alloc.gni")
-import("//build/buildflag_header.gni")
-import("//build/config/android/config.gni")
-import("//build/config/chromecast_build.gni")
-import("//build/config/chromeos/ui_mode.gni")
-import("//build/config/compiler/compiler.gni")
-import("//build/config/dcheck_always_on.gni")
-import("//build/config/logging.gni")
-
-# Add partition_alloc.gni and import it for partition_alloc configs.
-
-config("partition_alloc_implementation") {
-  # See also: `partition_alloc_base/component_export.h`
-  defines = [ "IS_PARTITION_ALLOC_IMPL" ]
+group("raw_ptr") {
+  public_deps = [ "src/partition_alloc:raw_ptr" ]
 }
 
-config("memory_tagging") {
-  if (current_cpu == "arm64" && is_clang &&
-      (is_linux || is_chromeos || is_android || is_fuchsia)) {
-    # base/ has access to the MTE intrinsics because it needs to use them,
-    # but they're not backwards compatible. Use base::CPU::has_mte()
-    # beforehand to confirm or use indirect functions (ifuncs) to select
-    # an MTE-specific implementation at dynamic link-time.
-    cflags = [
-      "-Xclang",
-      "-target-feature",
-      "-Xclang",
-      "+mte",
-    ]
-  }
-}
-
-# Used to shim malloc symbols on Android. see //base/allocator/README.md.
-config("wrap_malloc_symbols") {
-  ldflags = [
-    "-Wl,-wrap,calloc",
-    "-Wl,-wrap,free",
-    "-Wl,-wrap,malloc",
-    "-Wl,-wrap,memalign",
-    "-Wl,-wrap,posix_memalign",
-    "-Wl,-wrap,pvalloc",
-    "-Wl,-wrap,realloc",
-    "-Wl,-wrap,valloc",
-
-    # Not allocating memory, but part of the API
-    "-Wl,-wrap,malloc_usable_size",
-
-    # <stdlib.h> functions
-    "-Wl,-wrap,realpath",
-
-    # <string.h> functions
-    "-Wl,-wrap,strdup",
-    "-Wl,-wrap,strndup",
-
-    # <unistd.h> functions
-    "-Wl,-wrap,getcwd",
-
-    # <stdio.h> functions
-    "-Wl,-wrap,asprintf",
-    "-Wl,-wrap,vasprintf",
-  ]
-}
-
-config("mac_no_default_new_delete_symbols") {
-  if (!is_component_build) {
-    # This is already set when we compile libc++, see
-    # buildtools/third_party/libc++/BUILD.gn. But it needs to be set here as
-    # well, since the shim defines the symbols, to prevent them being exported.
-    cflags = [ "-fvisibility-global-new-delete-hidden" ]
-  }
-}
-
-if (is_fuchsia) {
-  config("fuchsia_sync_lib") {
-    libs = [
-      "sync",  # Used by spinning_mutex.h.
-    ]
-  }
-}
-
-if (enable_pkeys && is_debug) {
-  config("no_stack_protector") {
-    cflags = [ "-fno-stack-protector" ]
-  }
-}
-
-_remove_configs = []
-_add_configs = []
-if (!is_debug || partition_alloc_optimized_debug) {
-  _remove_configs += [ "//build/config/compiler:default_optimization" ]
-
-  # Partition alloc is relatively hot (>1% of cycles for users of CrOS).
-  # Use speed-focused optimizations for it.
-  _add_configs += [ "//build/config/compiler:optimize_speed" ]
-} else {
-  _remove_configs += [ "//build/config/compiler:default_optimization" ]
-  _add_configs += [ "//build/config/compiler:no_optimize" ]
-}
-
-component("partition_alloc") {
-  public_deps = [
-    ":allocator_base",
-    ":allocator_core",
-    ":allocator_shim",
-  ]
-}
-
-# Changes the freelist implementation to use pointer offsets in lieu
-# of full-on pointers. Defaults to false, which implies the use of
-# `EncodedPartitionFreelistEntryPtr`.
-#
-# Only usable when pointers are 64-bit.
-use_freelist_pool_offsets = has_64_bit_pointers && false
-
-source_set("allocator_core") {
-  visibility = [ ":*" ]
-
-  sources = [
-    "address_pool_manager.cc",
-    "address_pool_manager.h",
-    "address_pool_manager_bitmap.cc",
-    "address_pool_manager_bitmap.h",
-    "address_pool_manager_types.h",
-    "address_space_randomization.cc",
-    "address_space_randomization.h",
-    "address_space_stats.h",
-    "allocation_guard.cc",
-    "allocation_guard.h",
-    "compressed_pointer.cc",
-    "compressed_pointer.h",
-    "dangling_raw_ptr_checks.cc",
-    "dangling_raw_ptr_checks.h",
-    "freeslot_bitmap.h",
-    "freeslot_bitmap_constants.h",
-    "gwp_asan_support.cc",
-    "gwp_asan_support.h",
-    "memory_reclaimer.cc",
-    "memory_reclaimer.h",
-    "oom.cc",
-    "oom.h",
-    "oom_callback.cc",
-    "oom_callback.h",
-    "page_allocator.cc",
-    "page_allocator.h",
-    "page_allocator_constants.h",
-    "page_allocator_internal.h",
-    "partition_address_space.cc",
-    "partition_address_space.h",
-    "partition_alloc-inl.h",
-    "partition_alloc.cc",
-    "partition_alloc.h",
-    "partition_alloc_allocation_data.h",
-    "partition_alloc_check.h",
-    "partition_alloc_config.h",
-    "partition_alloc_constants.h",
-    "partition_alloc_forward.h",
-    "partition_alloc_hooks.cc",
-    "partition_alloc_hooks.h",
-    "partition_bucket.cc",
-    "partition_bucket.h",
-    "partition_bucket_lookup.h",
-    "partition_cookie.h",
-    "partition_dcheck_helper.cc",
-    "partition_dcheck_helper.h",
-    "partition_direct_map_extent.h",
-    "partition_freelist_entry.cc",
-    "partition_freelist_entry.h",
-    "partition_lock.h",
-    "partition_oom.cc",
-    "partition_oom.h",
-    "partition_page.cc",
-    "partition_page.h",
-    "partition_page_constants.h",
-    "partition_ref_count.h",
-    "partition_root.cc",
-    "partition_root.h",
-    "partition_stats.cc",
-    "partition_stats.h",
-    "partition_superpage_extent_entry.h",
-    "partition_tls.h",
-    "random.cc",
-    "random.h",
-    "reservation_offset_table.cc",
-    "reservation_offset_table.h",
-    "reverse_bytes.h",
-    "spinning_mutex.cc",
-    "spinning_mutex.h",
-    "tagging.cc",
-    "tagging.h",
-    "thread_cache.cc",
-    "thread_cache.h",
-    "thread_isolation/alignment.h",
-    "thread_isolation/pkey.cc",
-    "thread_isolation/pkey.h",
-    "thread_isolation/thread_isolation.cc",
-    "thread_isolation/thread_isolation.h",
-    "yield_processor.h",
-  ]
-
-  if (use_starscan) {
-    sources += [
-      "starscan/logging.h",
-      "starscan/metadata_allocator.cc",
-      "starscan/metadata_allocator.h",
-      "starscan/pcscan.cc",
-      "starscan/pcscan.h",
-      "starscan/pcscan_internal.cc",
-      "starscan/pcscan_internal.h",
-      "starscan/pcscan_scheduling.cc",
-      "starscan/pcscan_scheduling.h",
-      "starscan/raceful_worklist.h",
-      "starscan/scan_loop.h",
-      "starscan/snapshot.cc",
-      "starscan/snapshot.h",
-      "starscan/stack/stack.cc",
-      "starscan/stack/stack.h",
-      "starscan/starscan_fwd.h",
-      "starscan/state_bitmap.h",
-      "starscan/stats_collector.cc",
-      "starscan/stats_collector.h",
-      "starscan/stats_reporter.h",
-      "starscan/write_protector.cc",
-      "starscan/write_protector.h",
-    ]
-  }
-
-  defines = []
-  if (is_win) {
-    sources += [
-      "page_allocator_internals_win.h",
-      "partition_tls_win.cc",
-    ]
-  } else if (is_posix) {
-    sources += [
-      "page_allocator_internals_posix.cc",
-      "page_allocator_internals_posix.h",
-    ]
-  } else if (is_fuchsia) {
-    sources += [ "page_allocator_internals_fuchsia.h" ]
-  }
-  if (is_android) {
-    # The Android NDK supports PR_MTE_* macros as of NDK r23.
-    if (android_ndk_major_version >= 23) {
-      defines += [ "HAS_PR_MTE_MACROS" ]
-    }
-  }
-  if (use_starscan) {
-    if (current_cpu == "x64") {
-      assert(pcscan_stack_supported)
-      sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ]
-    } else if (current_cpu == "x86") {
-      assert(pcscan_stack_supported)
-      sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ]
-    } else if (current_cpu == "arm") {
-      assert(pcscan_stack_supported)
-      sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ]
-    } else if (current_cpu == "arm64") {
-      assert(pcscan_stack_supported)
-      sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ]
-    } else if (current_cpu == "riscv64") {
-      assert(pcscan_stack_supported)
-      sources += [ "starscan/stack/asm/riscv64/push_registers_asm.cc" ]
-    } else {
-      # To support a trampoline for another arch, please refer to v8/src/heap/base.
-      assert(!pcscan_stack_supported)
-    }
-  }
-  if (use_freelist_pool_offsets) {
-    # Freelist built from pool offsets header goes here.
-  } else {
-    sources += [ "encoded_freelist.h" ]
-  }
-
-  public_deps = [
-    ":chromecast_buildflags",
-    ":chromeos_buildflags",
-    ":debugging_buildflags",
-    ":partition_alloc_buildflags",
-  ]
-
-  configs += [
-    ":partition_alloc_implementation",
-    ":memory_tagging",
-  ]
-  deps = [ ":allocator_base" ]
-  public_configs = []
-  if (is_android) {
-    # tagging.cc requires __arm_mte_set_* functions.
-    deps += [ "//third_party/cpu_features:ndk_compat" ]
-  }
-  if (is_fuchsia) {
-    deps += [
-      "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel:fuchsia.kernel_cpp",
-      "//third_party/fuchsia-sdk/sdk/pkg/component_incoming_cpp",
-    ]
-    public_deps += [
-      "//third_party/fuchsia-sdk/sdk/pkg/sync",
-      "//third_party/fuchsia-sdk/sdk/pkg/zx",
-    ]
-
-    # Needed for users of spinning_mutex.h, which for performance reasons,
-    # contains inlined calls to `libsync` inside the header file.
-    # It appends an entry to the "libs" section of the dependent target.
-    public_configs += [ ":fuchsia_sync_lib" ]
-  }
-
-  frameworks = []
-  if (is_mac) {
-    # SecTaskGetCodeSignStatus needs:
-    frameworks += [ "Security.framework" ]
-  }
-
-  if (is_apple) {
-    frameworks += [
-      "CoreFoundation.framework",
-      "Foundation.framework",
-    ]
-  }
-
-  configs += [ "//build/config/compiler:wexit_time_destructors" ]
-  configs -= _remove_configs
-  configs += _add_configs
-
-  # We want to be able to test pkey mode without access to the default pkey.
-  # This is incompatible with stack protectors since the TLS won't be pkey-tagged.
-  if (enable_pkeys && is_debug) {
-    configs += [ ":no_stack_protector" ]
-  }
-}
-
-source_set("allocator_base") {
-  visibility = [ ":*" ]
-
-  sources = [
-    "partition_alloc_base/atomic_ref_count.h",
-    "partition_alloc_base/augmentations/compiler_specific.h",
-    "partition_alloc_base/bit_cast.h",
-    "partition_alloc_base/bits.h",
-    "partition_alloc_base/check.cc",
-    "partition_alloc_base/check.h",
-    "partition_alloc_base/compiler_specific.h",
-    "partition_alloc_base/component_export.h",
-    "partition_alloc_base/cpu.cc",
-    "partition_alloc_base/cpu.h",
-    "partition_alloc_base/cxx20_is_constant_evaluated.h",
-    "partition_alloc_base/debug/alias.cc",
-    "partition_alloc_base/debug/alias.h",
-    "partition_alloc_base/export_template.h",
-    "partition_alloc_base/gtest_prod_util.h",
-    "partition_alloc_base/immediate_crash.h",
-    "partition_alloc_base/logging.cc",
-    "partition_alloc_base/logging.h",
-    "partition_alloc_base/memory/page_size.h",
-    "partition_alloc_base/memory/ref_counted.cc",
-    "partition_alloc_base/memory/ref_counted.h",
-    "partition_alloc_base/memory/scoped_policy.h",
-    "partition_alloc_base/memory/scoped_refptr.h",
-    "partition_alloc_base/no_destructor.h",
-    "partition_alloc_base/notreached.h",
-    "partition_alloc_base/numerics/checked_math.h",
-    "partition_alloc_base/numerics/checked_math_impl.h",
-    "partition_alloc_base/numerics/clamped_math.h",
-    "partition_alloc_base/numerics/clamped_math_impl.h",
-    "partition_alloc_base/numerics/safe_conversions.h",
-    "partition_alloc_base/numerics/safe_conversions_arm_impl.h",
-    "partition_alloc_base/numerics/safe_conversions_impl.h",
-    "partition_alloc_base/numerics/safe_math.h",
-    "partition_alloc_base/numerics/safe_math_arm_impl.h",
-    "partition_alloc_base/numerics/safe_math_clang_gcc_impl.h",
-    "partition_alloc_base/numerics/safe_math_shared_impl.h",
-    "partition_alloc_base/posix/eintr_wrapper.h",
-    "partition_alloc_base/rand_util.cc",
-    "partition_alloc_base/rand_util.h",
-    "partition_alloc_base/scoped_clear_last_error.h",
-    "partition_alloc_base/strings/safe_sprintf.cc",
-    "partition_alloc_base/strings/safe_sprintf.h",
-    "partition_alloc_base/strings/stringprintf.cc",
-    "partition_alloc_base/strings/stringprintf.h",
-    "partition_alloc_base/system/sys_info.h",
-    "partition_alloc_base/thread_annotations.h",
-    "partition_alloc_base/threading/platform_thread.cc",
-    "partition_alloc_base/threading/platform_thread.h",
-    "partition_alloc_base/threading/platform_thread_ref.h",
-    "partition_alloc_base/time/time.cc",
-    "partition_alloc_base/time/time.h",
-    "partition_alloc_base/time/time_override.cc",
-    "partition_alloc_base/time/time_override.h",
-    "partition_alloc_base/types/strong_alias.h",
-    "partition_alloc_base/win/win_handle_types.h",
-    "partition_alloc_base/win/win_handle_types_list.inc",
-    "partition_alloc_base/win/windows_types.h",
-  ]
-
-  if (is_win) {
-    sources += [
-      "partition_alloc_base/memory/page_size_win.cc",
-      "partition_alloc_base/rand_util_win.cc",
-      "partition_alloc_base/scoped_clear_last_error_win.cc",
-      "partition_alloc_base/threading/platform_thread_win.cc",
-      "partition_alloc_base/time/time_win.cc",
-    ]
-  } else if (is_posix) {
-    sources += [
-      "partition_alloc_base/files/file_util.h",
-      "partition_alloc_base/files/file_util_posix.cc",
-      "partition_alloc_base/memory/page_size_posix.cc",
-      "partition_alloc_base/posix/safe_strerror.cc",
-      "partition_alloc_base/posix/safe_strerror.h",
-      "partition_alloc_base/rand_util_posix.cc",
-      "partition_alloc_base/threading/platform_thread_internal_posix.h",
-      "partition_alloc_base/threading/platform_thread_posix.cc",
-      "partition_alloc_base/time/time_conversion_posix.cc",
-    ]
-
-    if (is_android || is_chromeos_ash) {
-      sources += [ "partition_alloc_base/time/time_android.cc" ]
-    }
-    if (is_apple) {
-      sources += [ "partition_alloc_base/time/time_mac.mm" ]
-    } else {
-      sources += [ "partition_alloc_base/time/time_now_posix.cc" ]
-    }
-  } else if (is_fuchsia) {
-    sources += [
-      "partition_alloc_base/fuchsia/fuchsia_logging.cc",
-      "partition_alloc_base/fuchsia/fuchsia_logging.h",
-      "partition_alloc_base/memory/page_size_posix.cc",
-      "partition_alloc_base/posix/safe_strerror.cc",
-      "partition_alloc_base/posix/safe_strerror.h",
-      "partition_alloc_base/rand_util_fuchsia.cc",
-      "partition_alloc_base/threading/platform_thread_internal_posix.h",
-      "partition_alloc_base/threading/platform_thread_posix.cc",
-      "partition_alloc_base/time/time_conversion_posix.cc",
-      "partition_alloc_base/time/time_fuchsia.cc",
-    ]
-  }
-  if (is_android) {
-    # Only android build requires native_library, and native_library depends
-    # on file_path. So file_path is added if is_android = true.
-    sources += [
-      "partition_alloc_base/files/file_path.cc",
-      "partition_alloc_base/files/file_path.h",
-      "partition_alloc_base/native_library.cc",
-      "partition_alloc_base/native_library.h",
-      "partition_alloc_base/native_library_posix.cc",
-    ]
-  }
-  if (is_apple) {
-    # Apple-specific utilities
-    sources += [
-      "partition_alloc_base/mac/foundation_util.h",
-      "partition_alloc_base/mac/foundation_util.mm",
-      "partition_alloc_base/mac/mach_logging.cc",
-      "partition_alloc_base/mac/mach_logging.h",
-      "partition_alloc_base/mac/scoped_cftyperef.h",
-      "partition_alloc_base/mac/scoped_typeref.h",
-    ]
-    if (is_ios) {
-      sources += [
-        "partition_alloc_base/ios/ios_util.h",
-        "partition_alloc_base/ios/ios_util.mm",
-        "partition_alloc_base/system/sys_info_ios.mm",
-      ]
-    }
-    if (is_mac) {
-      sources += [
-        "partition_alloc_base/mac/mac_util.h",
-        "partition_alloc_base/mac/mac_util.mm",
-        "partition_alloc_base/system/sys_info_mac.mm",
-      ]
-    }
-  }
-
-  public_deps = [
-    ":chromecast_buildflags",
-    ":chromeos_buildflags",
-    ":debugging_buildflags",
-    ":partition_alloc_buildflags",
-  ]
-
-  configs += [ ":partition_alloc_implementation" ]
-
-  deps = []
-  if (is_fuchsia) {
-    public_deps += [ "//third_party/fuchsia-sdk/sdk/pkg/fit" ]
-  }
-
-  frameworks = []
-  if (is_apple) {
-    frameworks += [
-      "CoreFoundation.framework",
-      "Foundation.framework",
-    ]
-  }
-
-  configs -= _remove_configs
-  configs += _add_configs
-}
-
-source_set("allocator_shim") {
-  visibility = [ ":*" ]
-
-  sources = []
-  deps = [ ":allocator_base" ]
-  all_dependent_configs = []
-  configs += [ ":partition_alloc_implementation" ]
-
-  configs -= _remove_configs
-  configs += _add_configs
-
-  if (use_allocator_shim) {
-    sources += [
-      "shim/allocator_shim.cc",
-      "shim/allocator_shim.h",
-      "shim/allocator_shim_internals.h",
-    ]
-    if (use_partition_alloc) {
-      sources += [
-        "shim/allocator_shim_default_dispatch_to_partition_alloc.cc",
-        "shim/allocator_shim_default_dispatch_to_partition_alloc.h",
-        "shim/nonscannable_allocator.cc",
-        "shim/nonscannable_allocator.h",
-      ]
-    }
-    if (is_android) {
-      sources += [
-        "shim/allocator_shim_override_cpp_symbols.h",
-        "shim/allocator_shim_override_linker_wrapped_symbols.h",
-      ]
-      all_dependent_configs += [ ":wrap_malloc_symbols" ]
-    }
-    if (is_apple) {
-      sources += [
-        "shim/allocator_shim_override_mac_default_zone.h",
-        "shim/allocator_shim_override_mac_symbols.h",
-        "shim/early_zone_registration_constants.h",
-      ]
-      configs += [ ":mac_no_default_new_delete_symbols" ]
-    }
-    if (is_chromeos || is_linux) {
-      sources += [
-        "shim/allocator_shim_override_cpp_symbols.h",
-        "shim/allocator_shim_override_glibc_weak_symbols.h",
-        "shim/allocator_shim_override_libc_symbols.h",
-      ]
-    }
-    if (is_win) {
-      sources += [
-        "shim/allocator_shim_override_ucrt_symbols_win.h",
-        "shim/winheap_stubs_win.cc",
-        "shim/winheap_stubs_win.h",
-      ]
-    }
-
-    if (!use_partition_alloc_as_malloc) {
-      if (is_android) {
-        sources += [
-          "shim/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
-        ]
-      }
-      if (is_apple) {
-        sources +=
-            [ "shim/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc" ]
-      }
-      if (is_chromeos || is_linux) {
-        sources += [ "shim/allocator_shim_default_dispatch_to_glibc.cc" ]
-      }
-      if (is_win) {
-        sources += [ "shim/allocator_shim_default_dispatch_to_winheap.cc" ]
-      }
-    }
-
-    deps += [
-      ":allocator_base",
-      ":allocator_core",
-      ":buildflags",
-    ]
-  }
-
-  if (is_apple) {
-    sources += [
-      "shim/allocator_interception_mac.h",
-      "shim/allocator_interception_mac.mm",
-      "shim/malloc_zone_functions_mac.cc",
-      "shim/malloc_zone_functions_mac.h",
-    ]
-
-    # Do not compile with ARC because this target has to interface with
-    # low-level Objective-C and having ARC would interfere.
-    configs -= [ "//build/config/compiler:enable_arc" ]
-    deps += [
-      ":allocator_base",
-      ":allocator_core",
-      ":buildflags",
-    ]
-  }
-}
-
-source_set("raw_ptr") {
-  # `gn check` is unhappy with most `#includes` when PA isn't
-  # actually built.
-  check_includes = use_partition_alloc
-  public = [
-    "pointers/raw_ptr.h",
-    "pointers/raw_ptr_cast.h",
-    "pointers/raw_ptr_exclusion.h",
-    "pointers/raw_ref.h",
-  ]
-  sources = []
-  if (enable_backup_ref_ptr_support) {
-    sources += [
-      "pointers/raw_ptr_backup_ref_impl.cc",
-      "pointers/raw_ptr_backup_ref_impl.h",
-    ]
-  } else if (use_hookable_raw_ptr) {
-    sources += [
-      "pointers/raw_ptr_hookable_impl.cc",
-      "pointers/raw_ptr_hookable_impl.h",
-    ]
-  } else if (use_asan_unowned_ptr) {
-    sources += [
-      "pointers/raw_ptr_asan_unowned_impl.cc",
-      "pointers/raw_ptr_asan_unowned_impl.h",
-    ]
-  } else {
-    sources += [ "pointers/raw_ptr_noop_impl.h" ]
-  }
-  if (use_partition_alloc) {
-    public_deps = [ ":partition_alloc" ]
-  }
-  deps = [ ":buildflags" ]
-
-  # See also: `partition_alloc_base/component_export.h`
-  defines = [ "IS_RAW_PTR_IMPL" ]
-
-  configs -= _remove_configs
-  configs += _add_configs
-}
-
-buildflag_header("partition_alloc_buildflags") {
-  header = "partition_alloc_buildflags.h"
-
-  _record_alloc_info = false
-
-  # GWP-ASan is tied to BRP's "refcount in previous slot" mode, whose
-  # enablement is already gated on BRP enablement.
-  _enable_gwp_asan_support = put_ref_count_in_previous_slot
-
-  # Pools are a logical concept when address space is 32-bit.
-  _glue_core_pools = glue_core_pools && has_64_bit_pointers
-
-  # Pointer compression requires 64-bit pointers.
-  _enable_pointer_compression =
-      enable_pointer_compression_support && has_64_bit_pointers
-
-  # Force-enable live BRP in all processes, ignoring the canonical
-  # experiment state of `PartitionAllocBackupRefPtr`.
-  #
-  # This is not exposed as a GN arg as it is not meant to be used by
-  # developers - it is simply a compile-time hinge that should be
-  # set in the experimental build and then reverted immediately.
-  _force_all_process_brp = false
-
-  # TODO(crbug.com/1151236): Need to refactor the following buildflags.
-  # The buildflags (except RECORD_ALLOC_INFO) are used by both chrome and
-  # partition alloc. For partition alloc,
-  # gen/base/allocator/partition_allocator/partition_alloc_buildflags.h
-  # defines and partition alloc includes the header file. For chrome,
-  # gen/base/allocator/buildflags.h defines and chrome includes.
-  flags = [
-    "HAS_64_BIT_POINTERS=$has_64_bit_pointers",
-
-    "USE_ALLOCATOR_SHIM=$use_allocator_shim",
-    "USE_PARTITION_ALLOC=$use_partition_alloc",
-    "USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
-
-    "ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
-    "ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
-    "ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
-    "ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
-    "ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
-    "ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment",
-    "ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
-    "BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
-    "PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot",
-    "USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
-    "USE_ASAN_UNOWNED_PTR=$use_asan_unowned_ptr",
-    "USE_HOOKABLE_RAW_PTR=$use_hookable_raw_ptr",
-    "ENABLE_GWP_ASAN_SUPPORT=$_enable_gwp_asan_support",
-    "FORCIBLY_ENABLE_BACKUP_REF_PTR_IN_ALL_PROCESSES=$_force_all_process_brp",
-
-    "FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
-
-    "RECORD_ALLOC_INFO=$_record_alloc_info",
-    "USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
-    "GLUE_CORE_POOLS=$_glue_core_pools",
-    "ENABLE_POINTER_COMPRESSION=$_enable_pointer_compression",
-    "ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
-    "USE_FREELIST_POOL_OFFSETS=$use_freelist_pool_offsets",
-
-    "USE_STARSCAN=$use_starscan",
-    "PCSCAN_STACK_SUPPORTED=$pcscan_stack_supported",
-
-    "ENABLE_PKEYS=$enable_pkeys",
-    "ENABLE_THREAD_ISOLATION=$enable_pkeys",
-  ]
-
-  if (is_apple) {
-    # TODO(crbug.com/1414153): once TimeTicks::Now behavior is unified on iOS,
-    # this should be removed.
-    flags += [ "PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS=" +
-               "$partition_alloc_enable_mach_absolute_time_ticks" ]
-  }
-}
-
-buildflag_header("chromecast_buildflags") {
-  header = "chromecast_buildflags.h"
-
-  flags = [
-    "PA_IS_CAST_ANDROID=$is_cast_android",
-    "PA_IS_CASTOS=$is_castos",
-  ]
-}
-
-buildflag_header("chromeos_buildflags") {
-  header = "chromeos_buildflags.h"
-
-  flags = [ "PA_IS_CHROMEOS_ASH=$is_chromeos_ash" ]
-}
-
-buildflag_header("debugging_buildflags") {
-  header = "debugging_buildflags.h"
-  header_dir = rebase_path(".", "//") + "/partition_alloc_base/debug"
-
-  # Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`,
-  # but avails it as a buildflag.
-  _dcheck_is_on = is_debug || dcheck_always_on
-
-  flags = [
-    "PA_DCHECK_IS_ON=$_dcheck_is_on",
-    "PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
-    "PA_DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
-  ]
+group("partition_alloc") {
+  public_deps = [ "src/partition_alloc:partition_alloc" ]
 }
 
 group("buildflags") {
-  public_deps = [
-    ":chromecast_buildflags",
-    ":chromeos_buildflags",
-    ":debugging_buildflags",
-    ":partition_alloc_buildflags",
-  ]
+  public_deps = [ "src/partition_alloc:buildflags" ]
 }
-# TODO(crbug.com/1151236): After making partition_alloc a standalone library,
-# move test code here. i.e. test("partition_alloc_tests") { ... } and
-# test("partition_alloc_perftests").
diff --git a/base/allocator/partition_allocator/PartitionAlloc.md b/base/allocator/partition_allocator/PartitionAlloc.md
index f8390f2..00b813f 100644
--- a/base/allocator/partition_allocator/PartitionAlloc.md
+++ b/base/allocator/partition_allocator/PartitionAlloc.md
@@ -81,7 +81,7 @@
 64-bit systems, and 8B on 32-bit).
 
 PartitionAlloc also supports higher levels of alignment, that can be requested
-via `PartitionAlloc::AlignedAllocWithFlags()` or platform-specific APIs (such as
+via `PartitionAlloc::AlignedAlloc()` or platform-specific APIs (such as
 `posix_memalign()`). The requested
 alignment has to be a power of two. PartitionAlloc reserves the right to round
 up the requested size to the nearest power of two, greater than or equal to the
@@ -197,7 +197,7 @@
 be on the active list, and an empty span can only be on the active or empty
 list.
 
-[PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
-[SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
-[SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
-[payload-start]: https://source.chromium.org/chromium/chromium/src/+/35b2deed603dedd4abb37f204d516ed62aa2b85c:base/allocator/partition_allocator/partition_page.h;l=454
+[PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
+[SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
+[SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
+[payload-start]: https://source.chromium.org/chromium/chromium/src/+/35b2deed603dedd4abb37f204d516ed62aa2b85c:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=454
diff --git a/base/allocator/partition_allocator/address_pool_manager.cc b/base/allocator/partition_allocator/address_pool_manager.cc
deleted file mode 100644
index 0254e27..0000000
--- a/base/allocator/partition_allocator/address_pool_manager.cc
+++ /dev/null
@@ -1,571 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-
-#include <algorithm>
-#include <atomic>
-#include <cstdint>
-#include <limits>
-
-#include "base/allocator/partition_allocator/address_space_stats.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/notreached.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-#include "base/allocator/partition_allocator/thread_isolation/alignment.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_APPLE) || BUILDFLAG(ENABLE_THREAD_ISOLATION)
-#include <sys/mman.h>
-#endif
-
-namespace partition_alloc::internal {
-
-AddressPoolManager AddressPoolManager::singleton_;
-
-// static
-AddressPoolManager& AddressPoolManager::GetInstance() {
-  return singleton_;
-}
-
-namespace {
-// Allocations are all performed on behalf of PartitionAlloc.
-constexpr PageTag kPageTag = PageTag::kPartitionAlloc;
-
-}  // namespace
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-
-namespace {
-
-// This will crash if the range cannot be decommitted.
-void DecommitPages(uintptr_t address, size_t size) {
-  // Callers rely on the pages being zero-initialized when recommitting them.
-  // |DecommitSystemPages| doesn't guarantee this on all operating systems, in
-  // particular on macOS, but |DecommitAndZeroSystemPages| does.
-  DecommitAndZeroSystemPages(address, size, kPageTag);
-}
-
-}  // namespace
-
-void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) {
-  PA_DCHECK(!(ptr & kSuperPageOffsetMask));
-  PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
-  PA_CHECK(handle > 0 && handle <= std::size(pools_));
-
-  Pool* pool = GetPool(handle);
-  PA_CHECK(!pool->IsInitialized());
-  pool->Initialize(ptr, length);
-}
-
-void AddressPoolManager::GetPoolUsedSuperPages(
-    pool_handle handle,
-    std::bitset<kMaxSuperPagesInPool>& used) {
-  Pool* pool = GetPool(handle);
-  if (!pool) {
-    return;
-  }
-
-  pool->GetUsedSuperPages(used);
-}
-
-uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
-  Pool* pool = GetPool(handle);
-  if (!pool) {
-    return 0;
-  }
-
-  return pool->GetBaseAddress();
-}
-
-void AddressPoolManager::ResetForTesting() {
-  for (size_t i = 0; i < std::size(pools_); ++i) {
-    pools_[i].Reset();
-  }
-}
-
-void AddressPoolManager::Remove(pool_handle handle) {
-  Pool* pool = GetPool(handle);
-  PA_DCHECK(pool->IsInitialized());
-  pool->Reset();
-}
-
-uintptr_t AddressPoolManager::Reserve(pool_handle handle,
-                                      uintptr_t requested_address,
-                                      size_t length) {
-  Pool* pool = GetPool(handle);
-  if (!requested_address) {
-    return pool->FindChunk(length);
-  }
-  const bool is_available = pool->TryReserveChunk(requested_address, length);
-  if (is_available) {
-    return requested_address;
-  }
-  return pool->FindChunk(length);
-}
-
-void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
-                                              uintptr_t address,
-                                              size_t length) {
-  PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
-  Pool* pool = GetPool(handle);
-  PA_DCHECK(pool->IsInitialized());
-  DecommitPages(address, length);
-  pool->FreeChunk(address, length);
-}
-
-void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
-  PA_CHECK(ptr != 0);
-  PA_CHECK(!(ptr & kSuperPageOffsetMask));
-  PA_CHECK(!(length & kSuperPageOffsetMask));
-  address_begin_ = ptr;
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  address_end_ = ptr + length;
-  PA_DCHECK(address_begin_ < address_end_);
-#endif
-
-  total_bits_ = length / kSuperPageSize;
-  PA_CHECK(total_bits_ <= kMaxSuperPagesInPool);
-
-  ScopedGuard scoped_lock(lock_);
-  alloc_bitset_.reset();
-  bit_hint_ = 0;
-}
-
-bool AddressPoolManager::Pool::IsInitialized() {
-  return address_begin_ != 0;
-}
-
-void AddressPoolManager::Pool::Reset() {
-  address_begin_ = 0;
-}
-
-void AddressPoolManager::Pool::GetUsedSuperPages(
-    std::bitset<kMaxSuperPagesInPool>& used) {
-  ScopedGuard scoped_lock(lock_);
-
-  PA_DCHECK(IsInitialized());
-  used = alloc_bitset_;
-}
-
-uintptr_t AddressPoolManager::Pool::GetBaseAddress() {
-  PA_DCHECK(IsInitialized());
-  return address_begin_;
-}
-
-uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
-  ScopedGuard scoped_lock(lock_);
-
-  PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
-  const size_t need_bits = requested_size >> kSuperPageShift;
-
-  // Use first-fit policy to find an available chunk from free chunks. Start
-  // from |bit_hint_|, because we know there are no free chunks before.
-  size_t beg_bit = bit_hint_;
-  size_t curr_bit = bit_hint_;
-  while (true) {
-    // |end_bit| points 1 past the last bit that needs to be 0. If it goes past
-    // |total_bits_|, return |nullptr| to signal no free chunk was found.
-    size_t end_bit = beg_bit + need_bits;
-    if (end_bit > total_bits_) {
-      return 0;
-    }
-
-    bool found = true;
-    for (; curr_bit < end_bit; ++curr_bit) {
-      if (alloc_bitset_.test(curr_bit)) {
-        // The bit was set, so this chunk isn't entirely free. Set |found=false|
-        // to ensure the outer loop continues. However, continue the inner loop
-        // to set |beg_bit| just past the last set bit in the investigated
-        // chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
-        // next outer loop pass from checking the same bits.
-        beg_bit = curr_bit + 1;
-        found = false;
-        if (bit_hint_ == curr_bit) {
-          ++bit_hint_;
-        }
-      }
-    }
-
-    // An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
-    // mark as allocated) and return the allocated address.
-    if (found) {
-      for (size_t i = beg_bit; i < end_bit; ++i) {
-        PA_DCHECK(!alloc_bitset_.test(i));
-        alloc_bitset_.set(i);
-      }
-      if (bit_hint_ == beg_bit) {
-        bit_hint_ = end_bit;
-      }
-      uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-      PA_DCHECK(address + requested_size <= address_end_);
-#endif
-      return address;
-    }
-  }
-
-  PA_NOTREACHED();
-}
-
-bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
-                                               size_t requested_size) {
-  ScopedGuard scoped_lock(lock_);
-  PA_DCHECK(!(address & kSuperPageOffsetMask));
-  PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
-  const size_t begin_bit = (address - address_begin_) / kSuperPageSize;
-  const size_t need_bits = requested_size / kSuperPageSize;
-  const size_t end_bit = begin_bit + need_bits;
-  // Check that requested address is not too high.
-  if (end_bit > total_bits_) {
-    return false;
-  }
-  // Check if any bit of the requested region is set already.
-  for (size_t i = begin_bit; i < end_bit; ++i) {
-    if (alloc_bitset_.test(i)) {
-      return false;
-    }
-  }
-  // Otherwise, set the bits.
-  for (size_t i = begin_bit; i < end_bit; ++i) {
-    alloc_bitset_.set(i);
-  }
-  return true;
-}
-
-void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
-  ScopedGuard scoped_lock(lock_);
-
-  PA_DCHECK(!(address & kSuperPageOffsetMask));
-  PA_DCHECK(!(free_size & kSuperPageOffsetMask));
-
-  PA_DCHECK(address_begin_ <= address);
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  PA_DCHECK(address + free_size <= address_end_);
-#endif
-
-  const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
-  const size_t end_bit = beg_bit + free_size / kSuperPageSize;
-  for (size_t i = beg_bit; i < end_bit; ++i) {
-    PA_DCHECK(alloc_bitset_.test(i));
-    alloc_bitset_.reset(i);
-  }
-  bit_hint_ = std::min(bit_hint_, beg_bit);
-}
-
-void AddressPoolManager::Pool::GetStats(PoolStats* stats) {
-  std::bitset<kMaxSuperPagesInPool> pages;
-  size_t i;
-  {
-    ScopedGuard scoped_lock(lock_);
-    pages = alloc_bitset_;
-    i = bit_hint_;
-  }
-
-  stats->usage = pages.count();
-
-  size_t largest_run = 0;
-  size_t current_run = 0;
-  for (; i < total_bits_; ++i) {
-    if (!pages[i]) {
-      current_run += 1;
-      continue;
-    } else if (current_run > largest_run) {
-      largest_run = current_run;
-    }
-    current_run = 0;
-  }
-
-  // Fell out of the loop with last bit being zero. Check once more.
-  if (current_run > largest_run) {
-    largest_run = current_run;
-  }
-  stats->largest_available_reservation = largest_run;
-}
-
-void AddressPoolManager::GetPoolStats(const pool_handle handle,
-                                      PoolStats* stats) {
-  Pool* pool = GetPool(handle);
-  if (!pool->IsInitialized()) {
-    return;
-  }
-  pool->GetStats(stats);
-}
-
-bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
-  // Get 64-bit pool stats.
-  GetPoolStats(kRegularPoolHandle, &stats->regular_pool_stats);
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  GetPoolStats(kBRPPoolHandle, &stats->brp_pool_stats);
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  if (IsConfigurablePoolAvailable()) {
-    GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats);
-  }
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  GetPoolStats(kThreadIsolatedPoolHandle, &stats->thread_isolated_pool_stats);
-#endif
-  return true;
-}
-
-#else  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-static_assert(
-    kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
-        0,
-    "kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap.");
-static_assert(
-    kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0,
-    "kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap.");
-static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >=
-                  AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
-              "kGuardBitsOfBRPPoolBitmap must be larger than or equal to "
-              "kGuardOffsetOfBRPPoolBitmap.");
-
-template <size_t bitsize>
-void SetBitmap(std::bitset<bitsize>& bitmap,
-               size_t start_bit,
-               size_t bit_length) {
-  const size_t end_bit = start_bit + bit_length;
-  PA_DCHECK(start_bit <= bitsize);
-  PA_DCHECK(end_bit <= bitsize);
-
-  for (size_t i = start_bit; i < end_bit; ++i) {
-    PA_DCHECK(!bitmap.test(i));
-    bitmap.set(i);
-  }
-}
-
-template <size_t bitsize>
-void ResetBitmap(std::bitset<bitsize>& bitmap,
-                 size_t start_bit,
-                 size_t bit_length) {
-  const size_t end_bit = start_bit + bit_length;
-  PA_DCHECK(start_bit <= bitsize);
-  PA_DCHECK(end_bit <= bitsize);
-
-  for (size_t i = start_bit; i < end_bit; ++i) {
-    PA_DCHECK(bitmap.test(i));
-    bitmap.reset(i);
-  }
-}
-
-uintptr_t AddressPoolManager::Reserve(pool_handle handle,
-                                      uintptr_t requested_address,
-                                      size_t length) {
-  PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
-  uintptr_t address =
-      AllocPages(requested_address, length, kSuperPageSize,
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kInaccessible),
-                 kPageTag);
-  return address;
-}
-
-void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
-                                              uintptr_t address,
-                                              size_t length) {
-  PA_DCHECK(!(address & kSuperPageOffsetMask));
-  PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
-  FreePages(address, length);
-}
-
-void AddressPoolManager::MarkUsed(pool_handle handle,
-                                  uintptr_t address,
-                                  size_t length) {
-  ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
-  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  if (handle == kBRPPoolHandle) {
-    PA_DCHECK(
-        (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
-
-    // Make IsManagedByBRPPoolPool() return false when an address inside the
-    // first or the last PartitionPageSize()-bytes block is given:
-    //
-    //          ------+---+---------------+---+----
-    // memory   ..... | B | managed by PA | B | ...
-    // regions  ------+---+---------------+---+----
-    //
-    // B: PartitionPageSize()-bytes block. This is used internally by the
-    // allocator and is not available for callers.
-    //
-    // This is required to avoid crash caused by the following code:
-    //   {
-    //     // Assume this allocation happens outside of PartitionAlloc.
-    //     raw_ptr<T> ptr = new T[20];
-    //     for (size_t i = 0; i < 20; i ++) { ptr++; }
-    //     // |ptr| may point to an address inside 'B'.
-    //   }
-    //
-    // Suppose that |ptr| points to an address inside B after the loop. If
-    // IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr<T>() would
-    // crash, since the memory is not allocated by PartitionAlloc.
-    SetBitmap(AddressPoolManagerBitmap::brp_pool_bits_,
-              (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
-                  AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
-              (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
-                  AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
-  } else
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  {
-    PA_DCHECK(handle == kRegularPoolHandle);
-    PA_DCHECK(
-        (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
-        0);
-    SetBitmap(AddressPoolManagerBitmap::regular_pool_bits_,
-              address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
-              length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
-  }
-}
-
-void AddressPoolManager::MarkUnused(pool_handle handle,
-                                    uintptr_t address,
-                                    size_t length) {
-  // Address regions allocated for normal buckets are never released, so this
-  // function can only be called for direct map. However, do not DCHECK on
-  // IsManagedByDirectMap(address), because many tests test this function using
-  // small allocations.
-
-  ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
-  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  if (handle == kBRPPoolHandle) {
-    PA_DCHECK(
-        (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
-
-    // Make IsManagedByBRPPoolPool() return false when an address inside the
-    // first or the last PartitionPageSize()-bytes block is given.
-    // (See MarkUsed comment)
-    ResetBitmap(
-        AddressPoolManagerBitmap::brp_pool_bits_,
-        (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
-            AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
-        (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
-            AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
-  } else
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  {
-    PA_DCHECK(handle == kRegularPoolHandle);
-    PA_DCHECK(
-        (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
-        0);
-    ResetBitmap(
-        AddressPoolManagerBitmap::regular_pool_bits_,
-        address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
-        length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
-  }
-}
-
-void AddressPoolManager::ResetForTesting() {
-  ScopedGuard guard(AddressPoolManagerBitmap::GetLock());
-  AddressPoolManagerBitmap::regular_pool_bits_.reset();
-  AddressPoolManagerBitmap::brp_pool_bits_.reset();
-}
-
-namespace {
-
-// Counts super pages in use represented by `bitmap`.
-template <size_t bitsize>
-size_t CountUsedSuperPages(const std::bitset<bitsize>& bitmap,
-                           const size_t bits_per_super_page) {
-  size_t count = 0;
-  size_t bit_index = 0;
-
-  // Stride over super pages.
-  for (size_t super_page_index = 0; bit_index < bitsize; ++super_page_index) {
-    // Stride over the bits comprising the super page.
-    for (bit_index = super_page_index * bits_per_super_page;
-         bit_index < (super_page_index + 1) * bits_per_super_page &&
-         bit_index < bitsize;
-         ++bit_index) {
-      if (bitmap[bit_index]) {
-        count += 1;
-        // Move on to the next super page.
-        break;
-      }
-    }
-  }
-  return count;
-}
-
-}  // namespace
-
-bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
-  std::bitset<AddressPoolManagerBitmap::kRegularPoolBits> regular_pool_bits;
-  std::bitset<AddressPoolManagerBitmap::kBRPPoolBits> brp_pool_bits;
-  {
-    ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
-    regular_pool_bits = AddressPoolManagerBitmap::regular_pool_bits_;
-    brp_pool_bits = AddressPoolManagerBitmap::brp_pool_bits_;
-  }  // scoped_lock
-
-  // Pool usage is read out from the address pool bitmaps.
-  // The output stats are sized in super pages, so we interpret
-  // the bitmaps into super page usage.
-  static_assert(
-      kSuperPageSize %
-              AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap ==
-          0,
-      "information loss when calculating metrics");
-  constexpr size_t kRegularPoolBitsPerSuperPage =
-      kSuperPageSize /
-      AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
-
-  // Get 32-bit pool usage.
-  stats->regular_pool_stats.usage =
-      CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage);
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  static_assert(
-      kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
-          0,
-      "information loss when calculating metrics");
-  constexpr size_t kBRPPoolBitsPerSuperPage =
-      kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap;
-  stats->brp_pool_stats.usage =
-      CountUsedSuperPages(brp_pool_bits, kBRPPoolBitsPerSuperPage);
-
-  // Get blocklist size.
-  for (const auto& blocked :
-       AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
-    if (blocked.load(std::memory_order_relaxed)) {
-      stats->blocklist_size += 1;
-    }
-  }
-
-  // Count failures in finding non-blocklisted addresses.
-  stats->blocklist_hit_count =
-      AddressPoolManagerBitmap::blocklist_hit_count_.load(
-          std::memory_order_relaxed);
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  return true;
-}
-
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
-  AddressSpaceStats stats{};
-  if (GetStats(&stats)) {
-    dumper->DumpStats(&stats);
-  }
-}
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-// This function just exists to static_assert the layout of the private fields
-// in Pool.
-void AddressPoolManager::AssertThreadIsolatedLayout() {
-  constexpr size_t last_pool_offset =
-      offsetof(AddressPoolManager, pools_) + sizeof(Pool) * (kNumPools - 1);
-  constexpr size_t alloc_bitset_offset =
-      last_pool_offset + offsetof(Pool, alloc_bitset_);
-  static_assert(alloc_bitset_offset % PA_THREAD_ISOLATED_ALIGN_SZ == 0);
-  static_assert(sizeof(AddressPoolManager) % PA_THREAD_ISOLATED_ALIGN_SZ == 0);
-}
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/address_pool_manager.h b/base/allocator/partition_allocator/address_pool_manager.h
deleted file mode 100644
index aeaa299..0000000
--- a/base/allocator/partition_allocator/address_pool_manager.h
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
-
-#include <bitset>
-#include <limits>
-
-#include "base/allocator/partition_allocator/address_pool_manager_types.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-#include "base/allocator/partition_allocator/thread_isolation/alignment.h"
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-#include "build/build_config.h"
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
-#endif
-
-namespace partition_alloc {
-
-class AddressSpaceStatsDumper;
-struct AddressSpaceStats;
-struct PoolStats;
-
-}  // namespace partition_alloc
-
-namespace partition_alloc::internal {
-
-// (64bit version)
-// AddressPoolManager takes a reserved virtual address space and manages address
-// space allocation.
-//
-// AddressPoolManager (currently) supports up to 4 pools. Each pool manages a
-// contiguous reserved address space. Alloc() takes a pool_handle and returns
-// address regions from the specified pool. Free() also takes a pool_handle and
-// returns the address region back to the manager.
-//
-// (32bit version)
-// AddressPoolManager wraps AllocPages and FreePages and remembers allocated
-// address regions using bitmaps. IsManagedByPartitionAlloc*Pool use the bitmaps
-// to judge whether a given address is in a pool that supports BackupRefPtr or
-// in a pool that doesn't. All PartitionAlloc allocations must be in either of
-// the pools.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-    PA_THREAD_ISOLATED_ALIGN AddressPoolManager {
- public:
-  static AddressPoolManager& GetInstance();
-
-  AddressPoolManager(const AddressPoolManager&) = delete;
-  AddressPoolManager& operator=(const AddressPoolManager&) = delete;
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  void Add(pool_handle handle, uintptr_t address, size_t length);
-  void Remove(pool_handle handle);
-
-  // Populate a |used| bitset of superpages currently in use.
-  void GetPoolUsedSuperPages(pool_handle handle,
-                             std::bitset<kMaxSuperPagesInPool>& used);
-
-  // Return the base address of a pool.
-  uintptr_t GetPoolBaseAddress(pool_handle handle);
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-  // Reserves address space from the pool.
-  uintptr_t Reserve(pool_handle handle,
-                    uintptr_t requested_address,
-                    size_t length);
-
-  // Frees address space back to the pool and decommits underlying system pages.
-  void UnreserveAndDecommit(pool_handle handle,
-                            uintptr_t address,
-                            size_t length);
-  void ResetForTesting();
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-  void MarkUsed(pool_handle handle, uintptr_t address, size_t size);
-  void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
-
-  static bool IsManagedByRegularPool(uintptr_t address) {
-    return AddressPoolManagerBitmap::IsManagedByRegularPool(address);
-  }
-
-  static bool IsManagedByBRPPool(uintptr_t address) {
-    return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
-  }
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
-
-  void DumpStats(AddressSpaceStatsDumper* dumper);
-
- private:
-  friend class AddressPoolManagerForTesting;
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  // If we use a thread isolated pool, we need to write-protect its metadata.
-  // Allow the function to get access to the pool pointer.
-  friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption);
-#endif
-
-  constexpr AddressPoolManager() = default;
-  ~AddressPoolManager() = default;
-
-  // Populates `stats` if applicable.
-  // Returns whether `stats` was populated. (They might not be, e.g.
-  // if PartitionAlloc is wholly unused in this process.)
-  bool GetStats(AddressSpaceStats* stats);
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  static void AssertThreadIsolatedLayout();
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-
-  class Pool {
-   public:
-    constexpr Pool() = default;
-    ~Pool() = default;
-
-    Pool(const Pool&) = delete;
-    Pool& operator=(const Pool&) = delete;
-
-    void Initialize(uintptr_t ptr, size_t length);
-    bool IsInitialized();
-    void Reset();
-
-    uintptr_t FindChunk(size_t size);
-    void FreeChunk(uintptr_t address, size_t size);
-
-    bool TryReserveChunk(uintptr_t address, size_t size);
-
-    void GetUsedSuperPages(std::bitset<kMaxSuperPagesInPool>& used);
-    uintptr_t GetBaseAddress();
-
-    void GetStats(PoolStats* stats);
-
-   private:
-    // The lock needs to be the first field in this class.
-    // We write-protect the pool in the ThreadIsolated case, except that the
-    // lock can be used without acquiring write-permission first (via
-    // DumpStats()). So instead of protecting the whole variable, we only
-    // protect the memory after the lock.
-    // See the alignment of ` below.
-    Lock lock_;
-
-    // The bitset stores the allocation state of the address pool. 1 bit per
-    // super-page: 1 = allocated, 0 = free.
-    std::bitset<kMaxSuperPagesInPool> alloc_bitset_ PA_GUARDED_BY(lock_);
-
-    // An index of a bit in the bitset before which we know for sure there all
-    // 1s. This is a best-effort hint in the sense that there still may be lots
-    // of 1s after this index, but at least we know there is no point in
-    // starting the search before it.
-    size_t bit_hint_ PA_GUARDED_BY(lock_) = 0;
-
-    size_t total_bits_ = 0;
-    uintptr_t address_begin_ = 0;
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    uintptr_t address_end_ = 0;
-#endif
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-    friend class AddressPoolManager;
-    friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption);
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  };
-
-  PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
-    PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
-    return &pools_[handle - 1];
-  }
-
-  // Gets the stats for the pool identified by `handle`, if
-  // initialized.
-  void GetPoolStats(pool_handle handle, PoolStats* stats);
-
-  // If thread isolation support is enabled, we need to write-protect the
-  // isolated pool (which needs to be last). For this, we need to add padding in
-  // front of the pools so that the isolated one starts on a page boundary.
-  // We also skip the Lock at the beginning of the pool since it needs to be
-  // used in contexts where we didn't enable write access to the pool memory.
-  char pad_[PA_THREAD_ISOLATED_ARRAY_PAD_SZ_WITH_OFFSET(
-      Pool,
-      kNumPools,
-      offsetof(Pool, alloc_bitset_))] = {};
-  Pool pools_[kNumPools];
-
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-  static PA_CONSTINIT AddressPoolManager singleton_;
-};
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
diff --git a/base/allocator/partition_allocator/address_pool_manager_bitmap.cc b/base/allocator/partition_allocator/address_pool_manager_bitmap.cc
deleted file mode 100644
index 92fab78..0000000
--- a/base/allocator/partition_allocator/address_pool_manager_bitmap.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-
-namespace partition_alloc::internal {
-
-namespace {
-
-Lock g_lock;
-
-}  // namespace
-
-Lock& AddressPoolManagerBitmap::GetLock() {
-  return g_lock;
-}
-
-std::bitset<AddressPoolManagerBitmap::kRegularPoolBits>
-    AddressPoolManagerBitmap::regular_pool_bits_;  // GUARDED_BY(GetLock())
-std::bitset<AddressPoolManagerBitmap::kBRPPoolBits>
-    AddressPoolManagerBitmap::brp_pool_bits_;  // GUARDED_BY(GetLock())
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-std::array<std::atomic_bool,
-           AddressPoolManagerBitmap::kAddressSpaceSize / kSuperPageSize>
-    AddressPoolManagerBitmap::brp_forbidden_super_page_map_;
-std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_;
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-}  // namespace partition_alloc::internal
-
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
diff --git a/base/allocator/partition_allocator/address_pool_manager_bitmap.h b/base/allocator/partition_allocator/address_pool_manager_bitmap.h
deleted file mode 100644
index e0f75ae..0000000
--- a/base/allocator/partition_allocator/address_pool_manager_bitmap.h
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
-
-#include <array>
-#include <atomic>
-#include <bitset>
-#include <limits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-#include "build/build_config.h"
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-
-namespace partition_alloc {
-
-namespace internal {
-
-// AddressPoolManagerBitmap is a set of bitmaps that track whether a given
-// address is in a pool that supports BackupRefPtr, or in a pool that doesn't
-// support it. All PartitionAlloc allocations must be in either of the pools.
-//
-// This code is specific to 32-bit systems.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
- public:
-  static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
-  static constexpr uint64_t kAddressSpaceSize = 4ull * kGiB;
-
-  // For BRP pool, we use partition page granularity to eliminate the guard
-  // pages from the bitmap at the ends:
-  // - Eliminating the guard page at the beginning is needed so that pointers
-  //   to the end of an allocation that immediately precede a super page in BRP
-  //   pool don't accidentally fall into that pool.
-  // - Eliminating the guard page at the end is to ensure that the last page
-  //   of the address space isn't in the BRP pool. This allows using sentinels
-  //   like reinterpret_cast<void*>(-1) without a risk of triggering BRP logic
-  //   on an invalid address. (Note, 64-bit systems don't have this problem as
-  //   the upper half of the address space always belongs to the OS.)
-  //
-  // Note, direct map allocations also belong to this pool. The same logic as
-  // above applies. It is important to note, however, that the granularity used
-  // here has to be a minimum of partition page size and direct map allocation
-  // granularity. Since DirectMapAllocationGranularity() is no smaller than
-  // PageAllocationGranularity(), we don't need to decrease the bitmap
-  // granularity any further.
-  static constexpr size_t kBitShiftOfBRPPoolBitmap = PartitionPageShift();
-  static constexpr size_t kBytesPer1BitOfBRPPoolBitmap = PartitionPageSize();
-  static_assert(kBytesPer1BitOfBRPPoolBitmap == 1 << kBitShiftOfBRPPoolBitmap,
-                "");
-  static constexpr size_t kGuardOffsetOfBRPPoolBitmap = 1;
-  static constexpr size_t kGuardBitsOfBRPPoolBitmap = 2;
-  static constexpr size_t kBRPPoolBits =
-      kAddressSpaceSize / kBytesPer1BitOfBRPPoolBitmap;
-
-  // Regular pool may include both normal bucket and direct map allocations, so
-  // the bitmap granularity has to be at least as small as
-  // DirectMapAllocationGranularity(). No need to eliminate guard pages at the
-  // ends, as this is a BackupRefPtr-specific concern, hence no need to lower
-  // the granularity to partition page size.
-  static constexpr size_t kBitShiftOfRegularPoolBitmap =
-      DirectMapAllocationGranularityShift();
-  static constexpr size_t kBytesPer1BitOfRegularPoolBitmap =
-      DirectMapAllocationGranularity();
-  static_assert(kBytesPer1BitOfRegularPoolBitmap ==
-                    1 << kBitShiftOfRegularPoolBitmap,
-                "");
-  static constexpr size_t kRegularPoolBits =
-      kAddressSpaceSize / kBytesPer1BitOfRegularPoolBitmap;
-
-  // Returns false for nullptr.
-  static bool IsManagedByRegularPool(uintptr_t address) {
-    static_assert(
-        std::numeric_limits<uintptr_t>::max() >> kBitShiftOfRegularPoolBitmap <
-            regular_pool_bits_.size(),
-        "The bitmap is too small, will result in unchecked out of bounds "
-        "accesses.");
-    // It is safe to read |regular_pool_bits_| without a lock since the caller
-    // is responsible for guaranteeing that the address is inside a valid
-    // allocation and the deallocation call won't race with this call.
-    return PA_TS_UNCHECKED_READ(
-        regular_pool_bits_)[address >> kBitShiftOfRegularPoolBitmap];
-  }
-
-  // Returns false for nullptr.
-  static bool IsManagedByBRPPool(uintptr_t address) {
-    static_assert(std::numeric_limits<uintptr_t>::max() >>
-                      kBitShiftOfBRPPoolBitmap < brp_pool_bits_.size(),
-                  "The bitmap is too small, will result in unchecked out of "
-                  "bounds accesses.");
-    // It is safe to read |brp_pool_bits_| without a lock since the caller
-    // is responsible for guaranteeing that the address is inside a valid
-    // allocation and the deallocation call won't race with this call.
-    return PA_TS_UNCHECKED_READ(
-        brp_pool_bits_)[address >> kBitShiftOfBRPPoolBitmap];
-  }
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  static void BanSuperPageFromBRPPool(uintptr_t address) {
-    brp_forbidden_super_page_map_[address >> kSuperPageShift].store(
-        true, std::memory_order_relaxed);
-  }
-
-  static bool IsAllowedSuperPageForBRPPool(uintptr_t address) {
-    // The only potentially dangerous scenario, in which this check is used, is
-    // when the assignment of the first raw_ptr<T> object for an address
-    // allocated outside the BRP pool is racing with the allocation of a new
-    // super page at the same address. We assume that if raw_ptr<T> is being
-    // initialized with a raw pointer, the associated allocation is "alive";
-    // otherwise, the issue should be fixed by rewriting the raw pointer
-    // variable as raw_ptr<T>. In the worst case, when such a fix is
-    // impossible, we should just undo the raw pointer -> raw_ptr<T> rewrite of
-    // the problematic field. If the above assumption holds, the existing
-    // allocation will prevent us from reserving the super-page region and,
-    // thus, having the race condition.  Since we rely on that external
-    // synchronization, the relaxed memory ordering should be sufficient.
-    return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load(
-        std::memory_order_relaxed);
-  }
-
-  static void IncrementBlocklistHitCount() { ++blocklist_hit_count_; }
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
- private:
-  friend class AddressPoolManager;
-
-  static Lock& GetLock();
-
-  static std::bitset<kRegularPoolBits> regular_pool_bits_
-      PA_GUARDED_BY(GetLock());
-  static std::bitset<kBRPPoolBits> brp_pool_bits_ PA_GUARDED_BY(GetLock());
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  static std::array<std::atomic_bool, kAddressSpaceSize / kSuperPageSize>
-      brp_forbidden_super_page_map_;
-  static std::atomic_size_t blocklist_hit_count_;
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-};
-
-}  // namespace internal
-
-// Returns false for nullptr.
-PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
-  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
-  // No need to add IsManagedByConfigurablePool, because Configurable Pool
-  // doesn't exist on 32-bit.
-#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address));
-#endif
-  return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address)
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-         || internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)
-#endif
-      ;
-}
-
-// Returns false for nullptr.
-PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
-  return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address);
-}
-
-// Returns false for nullptr.
-PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
-  return internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address);
-}
-
-// Returns false for nullptr.
-PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
-    uintptr_t address) {
-  // The Configurable Pool is only available on 64-bit builds.
-  return false;
-}
-
-PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
-  // The Configurable Pool is only available on 64-bit builds.
-  return false;
-}
-
-}  // namespace partition_alloc
-
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
diff --git a/base/allocator/partition_allocator/address_pool_manager_types.h b/base/allocator/partition_allocator/address_pool_manager_types.h
deleted file mode 100644
index 1c343d3..0000000
--- a/base/allocator/partition_allocator/address_pool_manager_types.h
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
-
-namespace partition_alloc::internal {
-
-enum pool_handle : unsigned;
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
diff --git a/base/allocator/partition_allocator/address_pool_manager_unittest.cc b/base/allocator/partition_allocator/address_pool_manager_unittest.cc
deleted file mode 100644
index c020c08..0000000
--- a/base/allocator/partition_allocator/address_pool_manager_unittest.cc
+++ /dev/null
@@ -1,405 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/address_space_stats.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal {
-
-class AddressSpaceStatsDumperForTesting final : public AddressSpaceStatsDumper {
- public:
-  AddressSpaceStatsDumperForTesting() = default;
-  ~AddressSpaceStatsDumperForTesting() final = default;
-
-  void DumpStats(
-      const partition_alloc::AddressSpaceStats* address_space_stats) override {
-    regular_pool_usage_ = address_space_stats->regular_pool_stats.usage;
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-    regular_pool_largest_reservation_ =
-        address_space_stats->regular_pool_stats.largest_available_reservation;
-#endif
-#if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    blocklist_size_ = address_space_stats->blocklist_size;
-#endif
-  }
-
-  size_t regular_pool_usage_ = 0;
-  size_t regular_pool_largest_reservation_ = 0;
-  size_t blocklist_size_ = 0;
-};
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-
-class AddressPoolManagerForTesting : public AddressPoolManager {
- public:
-  AddressPoolManagerForTesting() = default;
-  ~AddressPoolManagerForTesting() = default;
-};
-
-class PartitionAllocAddressPoolManagerTest : public testing::Test {
- protected:
-  PartitionAllocAddressPoolManagerTest() = default;
-  ~PartitionAllocAddressPoolManagerTest() override = default;
-
-  void SetUp() override {
-    manager_ = std::make_unique<AddressPoolManagerForTesting>();
-    base_address_ =
-        AllocPages(kPoolSize, kSuperPageSize,
-                   PageAccessibilityConfiguration(
-                       PageAccessibilityConfiguration::kInaccessible),
-                   PageTag::kPartitionAlloc);
-    ASSERT_TRUE(base_address_);
-    manager_->Add(kRegularPoolHandle, base_address_, kPoolSize);
-    pool_ = kRegularPoolHandle;
-  }
-
-  void TearDown() override {
-    manager_->Remove(pool_);
-    FreePages(base_address_, kPoolSize);
-    manager_.reset();
-  }
-
-  AddressPoolManager* GetAddressPoolManager() { return manager_.get(); }
-
-  static constexpr size_t kPoolSize = kPoolMaxSize;
-  static constexpr size_t kPageCnt = kPoolSize / kSuperPageSize;
-
-  std::unique_ptr<AddressPoolManagerForTesting> manager_;
-  uintptr_t base_address_;
-  pool_handle pool_;
-};
-
-TEST_F(PartitionAllocAddressPoolManagerTest, TooLargePool) {
-  uintptr_t base_addr = 0x4200000;
-  const pool_handle extra_pool = static_cast<pool_handle>(2u);
-  static_assert(kNumPools >= 2);
-
-  EXPECT_DEATH_IF_SUPPORTED(
-      GetAddressPoolManager()->Add(extra_pool, base_addr,
-                                   kPoolSize + kSuperPageSize),
-      "");
-}
-
-TEST_F(PartitionAllocAddressPoolManagerTest, ManyPages) {
-  EXPECT_EQ(
-      GetAddressPoolManager()->Reserve(pool_, 0, kPageCnt * kSuperPageSize),
-      base_address_);
-  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, base_address_,
-                                                kPageCnt * kSuperPageSize);
-
-  EXPECT_EQ(
-      GetAddressPoolManager()->Reserve(pool_, 0, kPageCnt * kSuperPageSize),
-      base_address_);
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, base_address_,
-                                                kPageCnt * kSuperPageSize);
-}
-
-TEST_F(PartitionAllocAddressPoolManagerTest, PagesFragmented) {
-  uintptr_t addrs[kPageCnt];
-  for (size_t i = 0; i < kPageCnt; ++i) {
-    addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
-    EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize);
-  }
-  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
-  // Free other other super page, so that we have plenty of free space, but none
-  // of the empty spaces can fit 2 super pages.
-  for (size_t i = 1; i < kPageCnt; i += 2) {
-    GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i],
-                                                  kSuperPageSize);
-  }
-  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize), 0u);
-  // Reserve freed super pages back, so that there are no free ones.
-  for (size_t i = 1; i < kPageCnt; i += 2) {
-    addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
-    EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize);
-  }
-  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
-  // Lastly, clean up.
-  for (uintptr_t addr : addrs) {
-    GetAddressPoolManager()->UnreserveAndDecommit(pool_, addr, kSuperPageSize);
-  }
-}
-
-TEST_F(PartitionAllocAddressPoolManagerTest, GetUsedSuperpages) {
-  uintptr_t addrs[kPageCnt];
-  for (size_t i = 0; i < kPageCnt; ++i) {
-    addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
-    EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize);
-  }
-  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
-
-  std::bitset<kMaxSuperPagesInPool> used_super_pages;
-  GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages);
-
-  // We expect every bit to be set.
-  for (size_t i = 0; i < kPageCnt; ++i) {
-    ASSERT_TRUE(used_super_pages.test(i));
-  }
-
-  // Free every other super page, so that we have plenty of free space, but none
-  // of the empty spaces can fit 2 super pages.
-  for (size_t i = 1; i < kPageCnt; i += 2) {
-    GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i],
-                                                  kSuperPageSize);
-  }
-
-  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize), 0u);
-
-  GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages);
-
-  // We expect every other bit to be set.
-  for (size_t i = 0; i < kPageCnt; i++) {
-    if (i % 2 == 0) {
-      ASSERT_TRUE(used_super_pages.test(i));
-    } else {
-      ASSERT_FALSE(used_super_pages.test(i));
-    }
-  }
-
-  // Free the even numbered super pages.
-  for (size_t i = 0; i < kPageCnt; i += 2) {
-    GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i],
-                                                  kSuperPageSize);
-  }
-
-  // Finally check to make sure all bits are zero in the used superpage bitset.
-  GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages);
-
-  for (size_t i = 0; i < kPageCnt; i++) {
-    ASSERT_FALSE(used_super_pages.test(i));
-  }
-}
-
-TEST_F(PartitionAllocAddressPoolManagerTest, IrregularPattern) {
-  uintptr_t a1 = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
-  EXPECT_EQ(a1, base_address_);
-  uintptr_t a2 = GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize);
-  EXPECT_EQ(a2, base_address_ + 1 * kSuperPageSize);
-  uintptr_t a3 = GetAddressPoolManager()->Reserve(pool_, 0, 3 * kSuperPageSize);
-  EXPECT_EQ(a3, base_address_ + 3 * kSuperPageSize);
-  uintptr_t a4 = GetAddressPoolManager()->Reserve(pool_, 0, 4 * kSuperPageSize);
-  EXPECT_EQ(a4, base_address_ + 6 * kSuperPageSize);
-  uintptr_t a5 = GetAddressPoolManager()->Reserve(pool_, 0, 5 * kSuperPageSize);
-  EXPECT_EQ(a5, base_address_ + 10 * kSuperPageSize);
-
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a4, 4 * kSuperPageSize);
-  uintptr_t a6 = GetAddressPoolManager()->Reserve(pool_, 0, 6 * kSuperPageSize);
-  EXPECT_EQ(a6, base_address_ + 15 * kSuperPageSize);
-
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a5, 5 * kSuperPageSize);
-  uintptr_t a7 = GetAddressPoolManager()->Reserve(pool_, 0, 7 * kSuperPageSize);
-  EXPECT_EQ(a7, base_address_ + 6 * kSuperPageSize);
-  uintptr_t a8 = GetAddressPoolManager()->Reserve(pool_, 0, 3 * kSuperPageSize);
-  EXPECT_EQ(a8, base_address_ + 21 * kSuperPageSize);
-  uintptr_t a9 = GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize);
-  EXPECT_EQ(a9, base_address_ + 13 * kSuperPageSize);
-
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a7, 7 * kSuperPageSize);
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a9, 2 * kSuperPageSize);
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a6, 6 * kSuperPageSize);
-  uintptr_t a10 =
-      GetAddressPoolManager()->Reserve(pool_, 0, 15 * kSuperPageSize);
-  EXPECT_EQ(a10, base_address_ + 6 * kSuperPageSize);
-
-  // Clean up.
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a1, kSuperPageSize);
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a2, 2 * kSuperPageSize);
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a3, 3 * kSuperPageSize);
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a8, 3 * kSuperPageSize);
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a10,
-                                                15 * kSuperPageSize);
-}
-
-TEST_F(PartitionAllocAddressPoolManagerTest, DecommittedDataIsErased) {
-  uintptr_t address =
-      GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
-  ASSERT_TRUE(address);
-  RecommitSystemPages(address, kSuperPageSize,
-                      PageAccessibilityConfiguration(
-                          PageAccessibilityConfiguration::kReadWrite),
-                      PageAccessibilityDisposition::kRequireUpdate);
-
-  memset(reinterpret_cast<void*>(address), 42, kSuperPageSize);
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, address, kSuperPageSize);
-
-  uintptr_t address2 =
-      GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
-  ASSERT_EQ(address, address2);
-  RecommitSystemPages(address2, kSuperPageSize,
-                      PageAccessibilityConfiguration(
-                          PageAccessibilityConfiguration::kReadWrite),
-                      PageAccessibilityDisposition::kRequireUpdate);
-
-  uint32_t sum = 0;
-  for (size_t i = 0; i < kSuperPageSize; i++) {
-    sum += reinterpret_cast<uint8_t*>(address2)[i];
-  }
-  EXPECT_EQ(0u, sum) << sum / 42 << " bytes were not zeroed";
-
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, address2,
-                                                kSuperPageSize);
-}
-
-TEST_F(PartitionAllocAddressPoolManagerTest, RegularPoolUsageChanges) {
-  AddressSpaceStatsDumperForTesting dumper{};
-
-  GetAddressPoolManager()->DumpStats(&dumper);
-  ASSERT_EQ(dumper.regular_pool_usage_, 0ull);
-  ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt);
-
-  // Bisect the pool by reserving a super page in the middle.
-  const uintptr_t midpoint_address =
-      base_address_ + (kPageCnt / 2) * kSuperPageSize;
-  ASSERT_EQ(
-      GetAddressPoolManager()->Reserve(pool_, midpoint_address, kSuperPageSize),
-      midpoint_address);
-
-  GetAddressPoolManager()->DumpStats(&dumper);
-  ASSERT_EQ(dumper.regular_pool_usage_, 1ull);
-  ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt / 2);
-
-  GetAddressPoolManager()->UnreserveAndDecommit(pool_, midpoint_address,
-                                                kSuperPageSize);
-
-  GetAddressPoolManager()->DumpStats(&dumper);
-  ASSERT_EQ(dumper.regular_pool_usage_, 0ull);
-  ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt);
-}
-
-#else  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-TEST(PartitionAllocAddressPoolManagerTest, IsManagedByRegularPool) {
-  constexpr size_t kAllocCount = 8;
-  static const size_t kNumPages[kAllocCount] = {1, 4, 7, 8, 13, 16, 31, 60};
-  uintptr_t addrs[kAllocCount];
-  for (size_t i = 0; i < kAllocCount; ++i) {
-    addrs[i] = AddressPoolManager::GetInstance().Reserve(
-        kRegularPoolHandle, 0,
-        AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
-            kNumPages[i]);
-    EXPECT_TRUE(addrs[i]);
-    EXPECT_TRUE(!(addrs[i] & kSuperPageOffsetMask));
-    AddressPoolManager::GetInstance().MarkUsed(
-        kRegularPoolHandle, addrs[i],
-        AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
-            kNumPages[i]);
-  }
-  for (size_t i = 0; i < kAllocCount; ++i) {
-    uintptr_t address = addrs[i];
-    size_t num_pages =
-        base::bits::AlignUp(
-            kNumPages[i] *
-                AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap,
-            kSuperPageSize) /
-        AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
-    for (size_t j = 0; j < num_pages; ++j) {
-      if (j < kNumPages[i]) {
-        EXPECT_TRUE(AddressPoolManager::IsManagedByRegularPool(address));
-      } else {
-        EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(address));
-      }
-      EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(address));
-      address += AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
-    }
-  }
-  for (size_t i = 0; i < kAllocCount; ++i) {
-    AddressPoolManager::GetInstance().MarkUnused(
-        kRegularPoolHandle, addrs[i],
-        AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
-            kNumPages[i]);
-    AddressPoolManager::GetInstance().UnreserveAndDecommit(
-        kRegularPoolHandle, addrs[i],
-        AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
-            kNumPages[i]);
-    EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(addrs[i]));
-    EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(addrs[i]));
-  }
-}
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-TEST(PartitionAllocAddressPoolManagerTest, IsManagedByBRPPool) {
-  constexpr size_t kAllocCount = 4;
-  // Totally (1+3+7+11) * 2MB = 44MB allocation
-  static const size_t kNumPages[kAllocCount] = {1, 3, 7, 11};
-  uintptr_t addrs[kAllocCount];
-  for (size_t i = 0; i < kAllocCount; ++i) {
-    addrs[i] = AddressPoolManager::GetInstance().Reserve(
-        kBRPPoolHandle, 0, kSuperPageSize * kNumPages[i]);
-    EXPECT_TRUE(addrs[i]);
-    EXPECT_TRUE(!(addrs[i] & kSuperPageOffsetMask));
-    AddressPoolManager::GetInstance().MarkUsed(kBRPPoolHandle, addrs[i],
-                                               kSuperPageSize * kNumPages[i]);
-  }
-
-  constexpr size_t first_guard_size =
-      AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
-      AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
-  constexpr size_t last_guard_size =
-      AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
-      (AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap -
-       AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap);
-
-  for (size_t i = 0; i < kAllocCount; ++i) {
-    uintptr_t address = addrs[i];
-    size_t num_allocated_size = kNumPages[i] * kSuperPageSize;
-    size_t num_system_pages = num_allocated_size / SystemPageSize();
-    for (size_t j = 0; j < num_system_pages; ++j) {
-      size_t offset = address - addrs[i];
-      if (offset < first_guard_size ||
-          offset >= (num_allocated_size - last_guard_size)) {
-        EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(address));
-      } else {
-        EXPECT_TRUE(AddressPoolManager::IsManagedByBRPPool(address));
-      }
-      EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(address));
-      address += SystemPageSize();
-    }
-  }
-  for (size_t i = 0; i < kAllocCount; ++i) {
-    AddressPoolManager::GetInstance().MarkUnused(kBRPPoolHandle, addrs[i],
-                                                 kSuperPageSize * kNumPages[i]);
-    AddressPoolManager::GetInstance().UnreserveAndDecommit(
-        kBRPPoolHandle, addrs[i], kSuperPageSize * kNumPages[i]);
-    EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(addrs[i]));
-    EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(addrs[i]));
-  }
-}
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-TEST(PartitionAllocAddressPoolManagerTest, RegularPoolUsageChanges) {
-  AddressSpaceStatsDumperForTesting dumper{};
-  AddressPoolManager::GetInstance().DumpStats(&dumper);
-  const size_t usage_before = dumper.regular_pool_usage_;
-
-  const uintptr_t address = AddressPoolManager::GetInstance().Reserve(
-      kRegularPoolHandle, 0, kSuperPageSize);
-  ASSERT_TRUE(address);
-  AddressPoolManager::GetInstance().MarkUsed(kRegularPoolHandle, address,
-                                             kSuperPageSize);
-
-  AddressPoolManager::GetInstance().DumpStats(&dumper);
-  EXPECT_GT(dumper.regular_pool_usage_, usage_before);
-
-  AddressPoolManager::GetInstance().MarkUnused(kRegularPoolHandle, address,
-                                               kSuperPageSize);
-  AddressPoolManager::GetInstance().UnreserveAndDecommit(
-      kRegularPoolHandle, address, kSuperPageSize);
-
-  AddressPoolManager::GetInstance().DumpStats(&dumper);
-  EXPECT_EQ(dumper.regular_pool_usage_, usage_before);
-}
-
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/address_space_randomization.cc b/base/allocator/partition_allocator/address_space_randomization.cc
deleted file mode 100644
index 60fcf94..0000000
--- a/base/allocator/partition_allocator/address_space_randomization.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/address_space_randomization.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/random.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_WIN)
-#include <windows.h>
-#endif
-
-namespace partition_alloc {
-
-uintptr_t GetRandomPageBase() {
-  uintptr_t random = static_cast<uintptr_t>(internal::RandomValue());
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  random <<= 32ULL;
-  random |= static_cast<uintptr_t>(internal::RandomValue());
-
-  // The ASLRMask() and ASLROffset() constants will be suitable for the
-  // OS and build configuration.
-  random &= internal::ASLRMask();
-  random += internal::ASLROffset();
-#else  // BUILDFLAG(HAS_64_BIT_POINTERS)
-#if BUILDFLAG(IS_WIN)
-  // On win32 host systems the randomization plus huge alignment causes
-  // excessive fragmentation. Plus most of these systems lack ASLR, so the
-  // randomization isn't buying anything. In that case we just skip it.
-  // TODO(palmer): Just dump the randomization when HE-ASLR is present.
-  static BOOL is_wow64 = -1;
-  if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64)) {
-    is_wow64 = FALSE;
-  }
-  if (!is_wow64) {
-    return 0;
-  }
-#endif  // BUILDFLAG(IS_WIN)
-  random &= internal::ASLRMask();
-  random += internal::ASLROffset();
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-  PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask()));
-  return random;
-}
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/address_space_randomization.h b/base/allocator/partition_allocator/address_space_randomization.h
deleted file mode 100644
index cc69f0d..0000000
--- a/base/allocator/partition_allocator/address_space_randomization.h
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "build/build_config.h"
-
-namespace partition_alloc {
-
-// Calculates a random preferred mapping address. In calculating an address, we
-// balance good ASLR against not fragmenting the address space too badly.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t GetRandomPageBase();
-
-namespace internal {
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-AslrAddress(uintptr_t mask) {
-  return mask & PageAllocationGranularityBaseMask();
-}
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-AslrMask(uintptr_t bits) {
-  return AslrAddress((1ULL << bits) - 1ULL);
-}
-
-// Turn off formatting, because the thicket of nested ifdefs below is
-// incomprehensible without indentation. It is also incomprehensible with
-// indentation, but the only other option is a combinatorial explosion of
-// *_{win,linux,mac,foo}_{32,64}.h files.
-//
-// clang-format off
-
-#if defined(ARCH_CPU_64_BITS)
-
-  #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-
-    // We shouldn't allocate system pages at all for sanitizer builds. However,
-    // we do, and if random hint addresses interfere with address ranges
-    // hard-coded in those tools, bad things happen. This address range is
-    // copied from TSAN source but works with all tools. See
-    // https://crbug.com/539863.
-    PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-    ASLRMask() {
-      return AslrAddress(0x007fffffffffULL);
-    }
-    PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-    ASLROffset() {
-      return AslrAddress(0x7e8000000000ULL);
-    }
-
-  #elif BUILDFLAG(IS_WIN)
-
-    // Windows 8.10 and newer support the full 48 bit address range. Since
-    // ASLROffset() is non-zero and may cause a carry, use 47 bit masks. See
-    // http://www.alex-ionescu.com/?p=246
-    PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
-      return AslrMask(47);
-    }
-    // Try not to map pages into the range where Windows loads DLLs by default.
-    PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-      return 0x80000000ULL;
-    }
-
-  #elif BUILDFLAG(IS_APPLE)
-
-    // macOS as of 10.12.5 does not clean up entries in page map levels 3/4
-    // [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
-    // is destroyed. Using a virtual address space that is too large causes a
-    // leak of about 1 wired [can never be paged out] page per call to mmap. The
-    // page is only reclaimed when the process is killed. Confine the hint to a
-    // 39-bit section of the virtual address space.
-    //
-    // This implementation adapted from
-    // https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
-    // is that here we clamp to 39 bits, not 32.
-    //
-    // TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
-    // changes.
-    PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-    ASLRMask() {
-      return AslrMask(38);
-    }
-    PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-    ASLROffset() {
-      // Be careful, there is a zone where macOS will not map memory, at least
-      // on ARM64. From an ARM64 machine running 12.3, the range seems to be
-      // [0x1000000000, 0x7000000000). Make sure that the range we use is
-      // outside these bounds. In 12.3, there is a reserved area between
-      // MACH_VM_MIN_GPU_CARVEOUT_ADDRESS and MACH_VM_MAX_GPU_CARVEOUT_ADDRESS,
-      // which is reserved on ARM64. See these constants in XNU's source code
-      // for details (xnu-8019.80.24/osfmk/mach/arm/vm_param.h).
-      return AslrAddress(0x10000000000ULL);
-    }
-
-  #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-
-    #if defined(ARCH_CPU_X86_64)
-
-      // Linux (and macOS) support the full 47-bit user space of x64 processors.
-      // Use only 46 to allow the kernel a chance to fulfill the request.
-      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
-        return AslrMask(46);
-      }
-      PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-        return AslrAddress(0);
-      }
-
-    #elif defined(ARCH_CPU_ARM64)
-
-      #if BUILDFLAG(IS_ANDROID)
-
-      // Restrict the address range on Android to avoid a large performance
-      // regression in single-process WebViews. See https://crbug.com/837640.
-      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
-        return AslrMask(30);
-      }
-      PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-        return AslrAddress(0x20000000ULL);
-      }
-
-      #elif BUILDFLAG(IS_LINUX)
-
-      // Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on
-      // page size and number of levels of translation pages used. We use
-      // 39-bit as base as all setups should support this, lowered to 38-bit
-      // as ASLROffset() could cause a carry.
-      PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-      ASLRMask() {
-        return AslrMask(38);
-      }
-      PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-      ASLROffset() {
-        return AslrAddress(0x1000000000ULL);
-      }
-
-      #else
-
-      // ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
-      // could cause a carry.
-      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
-        return AslrMask(38);
-      }
-      PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-        return AslrAddress(0x1000000000ULL);
-      }
-
-      #endif
-
-    #elif defined(ARCH_CPU_PPC64)
-
-      #if BUILDFLAG(IS_AIX)
-
-        // AIX has 64 bits of virtual addressing, but we limit the address range
-        // to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
-        // extra address space to isolate the mmap regions.
-        PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
-          return AslrMask(30);
-        }
-        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-          return AslrAddress(0x400000000000ULL);
-        }
-
-      #elif defined(ARCH_CPU_BIG_ENDIAN)
-
-        // Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
-        PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
-          return AslrMask(42);
-        }
-        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-          return AslrAddress(0);
-        }
-
-      #else  // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
-
-        // Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
-        PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
-          return AslrMask(46);
-        }
-        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-          return AslrAddress(0);
-        }
-
-      #endif  // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
-
-    #elif defined(ARCH_CPU_S390X)
-
-      // Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
-      // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
-      // chance to fulfill the request.
-      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
-        return AslrMask(40);
-      }
-      PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-        return AslrAddress(0);
-      }
-
-    #elif defined(ARCH_CPU_S390)
-
-      // 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
-      // a chance to fulfill the request.
-      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
-        return AslrMask(29);
-      }
-      PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-        return AslrAddress(0);
-      }
-
-    #else  // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
-           // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
-
-      // For all other POSIX variants, use 30 bits.
-      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
-        return AslrMask(30);
-      }
-
-      #if BUILDFLAG(IS_SOLARIS)
-
-        // For our Solaris/illumos mmap hint, we pick a random address in the
-        // bottom half of the top half of the address space (that is, the third
-        // quarter). Because we do not MAP_FIXED, this will be treated only as a
-        // hint -- the system will not fail to mmap because something else
-        // happens to already be mapped at our random address. We deliberately
-        // set the hint high enough to get well above the system's break (that
-        // is, the heap); Solaris and illumos will try the hint and if that
-        // fails allocate as if there were no hint at all. The high hint
-        // prevents the break from getting hemmed in at low values, ceding half
-        // of the address space to the system heap.
-        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-          return AslrAddress(0x80000000ULL);
-        }
-
-      #elif BUILDFLAG(IS_AIX)
-
-        // The range 0x30000000 - 0xD0000000 is available on AIX; choose the
-        // upper range.
-        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-          return AslrAddress(0x90000000ULL);
-        }
-
-      #else  // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
-
-        // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
-        // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
-        // 10.6 and 10.7.
-        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-          return AslrAddress(0x20000000ULL);
-        }
-
-      #endif  // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
-
-    #endif  // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
-            // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
-
-  #endif  // BUILDFLAG(IS_POSIX)
-
-#elif defined(ARCH_CPU_32_BITS)
-
-  // This is a good range on 32-bit Windows and Android (the only platforms on
-  // which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
-  // is no issue with carries here.
-  PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
-    return AslrMask(30);
-  }
-  PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
-    return AslrAddress(0x20000000ULL);
-  }
-
-#else
-
-  #error Please tell us about your exotic hardware! Sounds interesting.
-
-#endif  // defined(ARCH_CPU_32_BITS)
-
-// clang-format on
-
-}  // namespace internal
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
diff --git a/base/allocator/partition_allocator/address_space_randomization_unittest.cc b/base/allocator/partition_allocator/address_space_randomization_unittest.cc
deleted file mode 100644
index 9361c38..0000000
--- a/base/allocator/partition_allocator/address_space_randomization_unittest.cc
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/address_space_randomization.h"
-
-#include <cstdint>
-#include <vector>
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/random.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if BUILDFLAG(IS_WIN)
-#include <windows.h>
-#include "base/win/windows_version.h"
-#endif
-
-namespace partition_alloc {
-
-namespace {
-
-uintptr_t GetMask() {
-  uintptr_t mask = internal::ASLRMask();
-#if defined(ARCH_CPU_64_BITS)
-#elif defined(ARCH_CPU_32_BITS)
-#if BUILDFLAG(IS_WIN)
-  BOOL is_wow64 = FALSE;
-  if (!IsWow64Process(GetCurrentProcess(), &is_wow64)) {
-    is_wow64 = FALSE;
-  }
-  if (!is_wow64) {
-    mask = 0;
-  }
-#endif  // BUILDFLAG(IS_WIN)
-#endif  // defined(ARCH_CPU_32_BITS)
-  return mask;
-}
-
-const size_t kSamples = 100;
-
-uintptr_t GetAddressBits() {
-  return GetRandomPageBase();
-}
-
-uintptr_t GetRandomBits() {
-  return GetAddressBits() - internal::ASLROffset();
-}
-
-}  // namespace
-
-// Configurations without ASLR are tested here.
-TEST(PartitionAllocAddressSpaceRandomizationTest, DisabledASLR) {
-  uintptr_t mask = GetMask();
-  if (!mask) {
-#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_32_BITS)
-    // ASLR should be turned off on 32-bit Windows.
-    EXPECT_EQ(0u, GetRandomPageBase());
-#else
-    // Otherwise, 0 is very unexpected.
-    EXPECT_NE(0u, GetRandomPageBase());
-#endif
-  }
-}
-
-TEST(PartitionAllocAddressSpaceRandomizationTest, Alignment) {
-  uintptr_t mask = GetMask();
-  if (!mask) {
-    return;
-  }
-
-  for (size_t i = 0; i < kSamples; ++i) {
-    uintptr_t address = GetAddressBits();
-    EXPECT_EQ(0ULL,
-              (address & internal::PageAllocationGranularityOffsetMask()));
-  }
-}
-
-TEST(PartitionAllocAddressSpaceRandomizationTest, Range) {
-  uintptr_t mask = GetMask();
-  if (!mask) {
-    return;
-  }
-
-  uintptr_t min = internal::ASLROffset();
-  uintptr_t max = internal::ASLROffset() + internal::ASLRMask();
-  for (size_t i = 0; i < kSamples; ++i) {
-    uintptr_t address = GetAddressBits();
-    EXPECT_LE(min, address);
-    EXPECT_GE(max + mask, address);
-  }
-}
-
-TEST(PartitionAllocAddressSpaceRandomizationTest, Predictable) {
-  uintptr_t mask = GetMask();
-  if (!mask) {
-    return;
-  }
-
-  const uint64_t kInitialSeed = 0xfeed5eedULL;
-  SetMmapSeedForTesting(kInitialSeed);
-
-  std::vector<uintptr_t> sequence;
-  for (size_t i = 0; i < kSamples; ++i) {
-    sequence.push_back(GetRandomPageBase());
-  }
-
-  SetMmapSeedForTesting(kInitialSeed);
-
-  for (size_t i = 0; i < kSamples; ++i) {
-    EXPECT_EQ(GetRandomPageBase(), sequence[i]);
-  }
-}
-
-// This randomness test is adapted from V8's PRNG tests.
-
-// Chi squared for getting m 0s out of n bits.
-double ChiSquared(int m, int n) {
-  double ys_minus_np1 = (m - n / 2.0);
-  double chi_squared_1 = ys_minus_np1 * ys_minus_np1 * 2.0 / n;
-  double ys_minus_np2 = ((n - m) - n / 2.0);
-  double chi_squared_2 = ys_minus_np2 * ys_minus_np2 * 2.0 / n;
-  return chi_squared_1 + chi_squared_2;
-}
-
-// Test for correlations between recent bits from the PRNG, or bits that are
-// biased.
-void RandomBitCorrelation(int random_bit) {
-  uintptr_t mask = GetMask();
-  if ((mask & (1ULL << random_bit)) == 0) {
-    return;  // bit is always 0.
-  }
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // Do fewer checks when BUILDFLAG(PA_DCHECK_IS_ON). Exercized code only
-  // changes when the random number generator does, which should be almost
-  // never. However it's expensive to run all the tests. So keep iterations
-  // faster for local development builds, while having the stricter version run
-  // on official build testers.
-  constexpr int kHistory = 2;
-  constexpr int kRepeats = 1000;
-#else
-  constexpr int kHistory = 8;
-  constexpr int kRepeats = 10000;
-#endif
-  constexpr int kPointerBits = 8 * sizeof(void*);
-  uintptr_t history[kHistory];
-  // The predictor bit is either constant 0 or 1, or one of the bits from the
-  // history.
-  for (int predictor_bit = -2; predictor_bit < kPointerBits; predictor_bit++) {
-    // The predicted bit is one of the bits from the PRNG.
-    for (int ago = 0; ago < kHistory; ago++) {
-      // We don't want to check whether each bit predicts itself.
-      if (ago == 0 && predictor_bit == random_bit) {
-        continue;
-      }
-
-      // Enter the new random value into the history.
-      for (int i = ago; i >= 0; i--) {
-        history[i] = GetRandomBits();
-      }
-
-      // Find out how many of the bits are the same as the prediction bit.
-      int m = 0;
-      for (int i = 0; i < kRepeats; i++) {
-        uintptr_t random = GetRandomBits();
-        for (int j = ago - 1; j >= 0; j--) {
-          history[j + 1] = history[j];
-        }
-        history[0] = random;
-
-        int predicted;
-        if (predictor_bit >= 0) {
-          predicted = (history[ago] >> predictor_bit) & 1;
-        } else {
-          predicted = predictor_bit == -2 ? 0 : 1;
-        }
-        int bit = (random >> random_bit) & 1;
-        if (bit == predicted) {
-          m++;
-        }
-      }
-
-      // Chi squared analysis for k = 2 (2, states: same/not-same) and one
-      // degree of freedom (k - 1).
-      double chi_squared = ChiSquared(m, kRepeats);
-      // For k=2 probability of Chi^2 < 35 is p=3.338e-9. This condition is
-      // tested ~19000 times, so probability of it failing randomly per one
-      // base_unittests run is (1 - (1 - p) ^ 19000) ~= 6e-5.
-      PA_CHECK(chi_squared <= 35.0);
-      // If the predictor bit is a fixed 0 or 1 then it makes no sense to
-      // repeat the test with a different age.
-      if (predictor_bit < 0) {
-        break;
-      }
-    }
-  }
-}
-
-// Tests are fairly slow, so give each random bit its own test.
-#define TEST_RANDOM_BIT(BIT)                        \
-  TEST(PartitionAllocAddressSpaceRandomizationTest, \
-       RandomBitCorrelations##BIT) {                \
-    RandomBitCorrelation(BIT);                      \
-  }
-
-// The first 12 bits on all platforms are always 0.
-TEST_RANDOM_BIT(12)
-TEST_RANDOM_BIT(13)
-TEST_RANDOM_BIT(14)
-TEST_RANDOM_BIT(15)
-TEST_RANDOM_BIT(16)
-TEST_RANDOM_BIT(17)
-TEST_RANDOM_BIT(18)
-TEST_RANDOM_BIT(19)
-TEST_RANDOM_BIT(20)
-TEST_RANDOM_BIT(21)
-TEST_RANDOM_BIT(22)
-TEST_RANDOM_BIT(23)
-TEST_RANDOM_BIT(24)
-TEST_RANDOM_BIT(25)
-TEST_RANDOM_BIT(26)
-TEST_RANDOM_BIT(27)
-TEST_RANDOM_BIT(28)
-TEST_RANDOM_BIT(29)
-TEST_RANDOM_BIT(30)
-TEST_RANDOM_BIT(31)
-#if defined(ARCH_CPU_64_BITS)
-TEST_RANDOM_BIT(32)
-TEST_RANDOM_BIT(33)
-TEST_RANDOM_BIT(34)
-TEST_RANDOM_BIT(35)
-TEST_RANDOM_BIT(36)
-TEST_RANDOM_BIT(37)
-TEST_RANDOM_BIT(38)
-TEST_RANDOM_BIT(39)
-TEST_RANDOM_BIT(40)
-TEST_RANDOM_BIT(41)
-TEST_RANDOM_BIT(42)
-TEST_RANDOM_BIT(43)
-TEST_RANDOM_BIT(44)
-TEST_RANDOM_BIT(45)
-TEST_RANDOM_BIT(46)
-TEST_RANDOM_BIT(47)
-TEST_RANDOM_BIT(48)
-// No platforms have more than 48 address bits.
-#endif  // defined(ARCH_CPU_64_BITS)
-
-#undef TEST_RANDOM_BIT
-
-// Checks that we can actually map memory in the requested range.
-// TODO(crbug.com/1318466): Extend to all operating systems once they are fixed.
-#if BUILDFLAG(IS_MAC)
-TEST(PartitionAllocAddressSpaceRandomizationTest, CanMapInAslrRange) {
-  int tries = 0;
-  // This is overly generous, but we really don't want to make the test flaky.
-  constexpr int kMaxTries = 1000;
-
-  for (tries = 0; tries < kMaxTries; tries++) {
-    uintptr_t requested_address = GetRandomPageBase();
-    size_t size = internal::PageAllocationGranularity();
-
-    uintptr_t address = AllocPages(
-        requested_address, size, internal::PageAllocationGranularity(),
-        PageAccessibilityConfiguration(
-            PageAccessibilityConfiguration::kReadWrite),
-        PageTag::kPartitionAlloc);
-    ASSERT_NE(address, 0u);
-    FreePages(address, size);
-
-    if (address == requested_address) {
-      break;
-    }
-  }
-
-  EXPECT_LT(tries, kMaxTries);
-}
-#endif  // BUILDFLAG(IS_MAC)
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/address_space_stats.h b/base/allocator/partition_allocator/address_space_stats.h
deleted file mode 100644
index 0c3c205..0000000
--- a/base/allocator/partition_allocator/address_space_stats.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
-
-#include <cstddef>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-
-namespace partition_alloc {
-
-// All members are measured in super pages.
-struct PoolStats {
-  size_t usage = 0;
-
-  // On 32-bit, pools are mainly logical entities, intermingled with
-  // allocations not managed by PartitionAlloc. The "largest available
-  // reservation" is not possible to measure in that case.
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  size_t largest_available_reservation = 0;
-#endif
-};
-
-struct AddressSpaceStats {
-  PoolStats regular_pool_stats;
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  PoolStats brp_pool_stats;
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  PoolStats configurable_pool_stats;
-#else
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  size_t blocklist_size;  // measured in super pages
-  size_t blocklist_hit_count;
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  PoolStats thread_isolated_pool_stats;
-#endif
-};
-
-// Interface passed to `AddressPoolManager::DumpStats()` to mediate
-// for `AddressSpaceDumpProvider`.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressSpaceStatsDumper {
- public:
-  virtual void DumpStats(const AddressSpaceStats* address_space_stats) = 0;
-  virtual ~AddressSpaceStatsDumper() = default;
-};
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
diff --git a/base/allocator/partition_allocator/allocation_guard.cc b/base/allocator/partition_allocator/allocation_guard.cc
deleted file mode 100644
index a87ee27..0000000
--- a/base/allocator/partition_allocator/allocation_guard.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/allocation_guard.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-
-#if PA_CONFIG(HAS_ALLOCATION_GUARD)
-
-namespace partition_alloc {
-
-namespace {
-thread_local bool g_disallow_allocations;
-}  // namespace
-
-ScopedDisallowAllocations::ScopedDisallowAllocations() {
-  if (g_disallow_allocations) {
-    PA_IMMEDIATE_CRASH();
-  }
-
-  g_disallow_allocations = true;
-}
-
-ScopedDisallowAllocations::~ScopedDisallowAllocations() {
-  g_disallow_allocations = false;
-}
-
-ScopedAllowAllocations::ScopedAllowAllocations() {
-  // Save the previous value, as ScopedAllowAllocations is used in all
-  // partitions, not just the malloc() ones(s).
-  saved_value_ = g_disallow_allocations;
-  g_disallow_allocations = false;
-}
-
-ScopedAllowAllocations::~ScopedAllowAllocations() {
-  g_disallow_allocations = saved_value_;
-}
-
-}  // namespace partition_alloc
-
-#endif  // PA_CONFIG(HAS_ALLOCATION_GUARD)
diff --git a/base/allocator/partition_allocator/allocation_guard.h b/base/allocator/partition_allocator/allocation_guard.h
deleted file mode 100644
index 756c158..0000000
--- a/base/allocator/partition_allocator/allocation_guard.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "build/build_config.h"
-
-namespace partition_alloc {
-
-#if PA_CONFIG(HAS_ALLOCATION_GUARD)
-
-// Disallow allocations in the scope. Does not nest.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedDisallowAllocations {
- public:
-  ScopedDisallowAllocations();
-  ~ScopedDisallowAllocations();
-};
-
-// Disallow allocations in the scope. Does not nest.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedAllowAllocations {
- public:
-  ScopedAllowAllocations();
-  ~ScopedAllowAllocations();
-
- private:
-  bool saved_value_;
-};
-
-#else
-
-struct [[maybe_unused]] ScopedDisallowAllocations {};
-struct [[maybe_unused]] ScopedAllowAllocations {};
-
-#endif  // PA_CONFIG(HAS_ALLOCATION_GUARD)
-
-}  // namespace partition_alloc
-
-namespace base::internal {
-
-using ::partition_alloc::ScopedAllowAllocations;
-using ::partition_alloc::ScopedDisallowAllocations;
-
-}  // namespace base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
diff --git a/base/allocator/partition_allocator/arm_bti_test_functions.h b/base/allocator/partition_allocator/arm_bti_test_functions.h
deleted file mode 100644
index 485a67b..0000000
--- a/base/allocator/partition_allocator/arm_bti_test_functions.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
-
-#include "build/build_config.h"
-
-#if defined(ARCH_CPU_ARM64)
-extern "C" {
-/**
- * A valid BTI function. Jumping to this funtion should not cause any problem in
- * a BTI enabled environment.
- **/
-int64_t arm_bti_test_function(int64_t);
-
-/**
- * A function without proper BTI landing pad. Jumping here should crash the
- * program on systems which support BTI.
- **/
-int64_t arm_bti_test_function_invalid_offset(int64_t);
-
-/**
- * A simple function which immediately returns to sender.
- **/
-void arm_bti_test_function_end(void);
-}
-#endif  // defined(ARCH_CPU_ARM64)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
diff --git a/base/allocator/partition_allocator/build_config.md b/base/allocator/partition_allocator/build_config.md
index 03223a5..84951d5 100644
--- a/base/allocator/partition_allocator/build_config.md
+++ b/base/allocator/partition_allocator/build_config.md
@@ -10,7 +10,7 @@
 
 * [`//base/allocator/partition_allocator/BUILD.gn`][pa-build-gn],
 * Everything else ending in `.gn` or `.gni` in
-  `//base/allocator/partition_allocator/`,
+  `//base/allocator/partition_allocator/src/partition_alloc/`,
 * [`allocator.gni`][allocator-gni],
 * [`//base/allocator/BUILD.gn`][base-allocator-build-gn], and
 * [`//base/BUILD.gn`][base-build-gn].
@@ -97,7 +97,7 @@
 [allocator-gni]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/allocator.gni
 [base-allocator-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/BUILD.gn
 [base-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/BUILD.gn
-[partition-alloc-config]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_config.h
+[partition-alloc-config]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h
 [pae-public-doc]: https://docs.google.com/document/d/1R1H9z5IVUAnXJgDjnts3nTJVcRbufWWT9ByXLgecSUM/preview
 [miracleptr-doc]: https://docs.google.com/document/d/1pnnOAIz_DMWDI4oIOFoMAqLnf_MZ2GsrJNb_dbQ3ZBg/preview
 [pa-ee-crbug]: https://crbug.com/1151236
diff --git a/base/allocator/partition_allocator/build_overrides/partition_alloc.gni b/base/allocator/partition_allocator/build_overrides/partition_alloc.gni
index 19fd2de..ad51cf5 100644
--- a/base/allocator/partition_allocator/build_overrides/partition_alloc.gni
+++ b/base/allocator/partition_allocator/build_overrides/partition_alloc.gni
@@ -9,10 +9,14 @@
 # If embedders want to use PartitionAlloc, they need to create their own
 # //build_overrides/partition_alloc.gni and define their own PartitionAlloc
 # configuration.
-
 use_partition_alloc_as_malloc_default = false
 use_allocator_shim_default = false
 enable_backup_ref_ptr_support_default = false
 put_ref_count_in_previous_slot_default = true
 enable_backup_ref_ptr_slow_checks_default = false
 enable_dangling_raw_ptr_checks_default = false
+
+# This is the default build configuration for pointers/raw_ptr*.
+raw_ptr_zero_on_construct_default = true
+raw_ptr_zero_on_move_default = true
+raw_ptr_zero_on_destruct_default = false
diff --git a/base/allocator/partition_allocator/compressed_pointer.cc b/base/allocator/partition_allocator/compressed_pointer.cc
deleted file mode 100644
index a9e9853..0000000
--- a/base/allocator/partition_allocator/compressed_pointer.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/compressed_pointer.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-
-#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-
-namespace partition_alloc::internal {
-
-// We keep the useful part in |g_base_| as 1s to speed up decompression.
-alignas(kPartitionCachelineSize)
-    PA_COMPONENT_EXPORT(PARTITION_ALLOC) CompressedPointerBaseGlobal::Base
-    CompressedPointerBaseGlobal::g_base_ = {.base = kUsefulBitsMask};
-
-void CompressedPointerBaseGlobal::SetBase(uintptr_t base) {
-  PA_DCHECK(!IsSet());
-  PA_DCHECK((base & kUsefulBitsMask) == 0);
-  g_base_.base = base | kUsefulBitsMask;
-}
-
-void CompressedPointerBaseGlobal::ResetBaseForTesting() {
-  g_base_.base = kUsefulBitsMask;
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
diff --git a/base/allocator/partition_allocator/compressed_pointer.h b/base/allocator/partition_allocator/compressed_pointer.h
deleted file mode 100644
index bbfad0b..0000000
--- a/base/allocator/partition_allocator/compressed_pointer.h
+++ /dev/null
@@ -1,668 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_COMPRESSED_POINTER_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_COMPRESSED_POINTER_H_
-
-#include <climits>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-
-#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-
-#if !BUILDFLAG(GLUE_CORE_POOLS)
-#error "Pointer compression only works with glued pools"
-#endif
-#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-#error "Pointer compression currently supports constant pool size"
-#endif
-
-#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-
-namespace partition_alloc {
-
-namespace internal {
-
-template <typename T1, typename T2>
-constexpr bool IsDecayedSame =
-    std::is_same_v<std::decay_t<T1>, std::decay_t<T2>>;
-
-#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-
-// Pointer compression works by storing only the 'useful' 32-bit part of the
-// pointer. The other half (the base) is stored in a global variable
-// (CompressedPointerBaseGlobal::g_base_), which is used on decompression. To
-// support fast branchless decompression of nullptr, we use the most significant
-// bit in the compressed pointer to leverage sign-extension (for non-nullptr
-// pointers, the most significant bit is set, whereas for nullptr it's not).
-// Using this bit and supporting heaps larger than 4GB relies on having
-// alignment bits in pointers. Assuming that all pointers point to at least
-// 8-byte alignment objects, pointer compression can support heaps of size <=
-// 16GB.
-// ((3 alignment bits) = (1 bit for sign-extension) + (2 bits for 16GB heap)).
-//
-// Example: heap base: 0x4b0'ffffffff
-//  - g_base: 0x4b3'ffffffff (lower 34 bits set)
-//  - normal pointer: 0x4b2'a08b6480
-//    - compression:
-//      - shift right by 3:        0x96'54116c90
-//      - truncate:                   0x54116c90
-//      - mark MSB:                   0xd4116c90
-//    - decompression:
-//      - sign-extend:       0xffffffff'd4116c90
-//      - shift left by 3:   0xfffffffe'a08b6480
-//      - 'and' with g_base: 0x000004b2'a08b6480
-//
-//  - nullptr: 0x00000000'00000000
-//    - compression:
-//      - shift right by 3:  0x00000000'00000000
-//      - truncate:                   0x00000000
-//      - (don't mark MSB for nullptr)
-//    - decompression:
-//      - sign-extend:       0x00000000'00000000
-//      - shift left by 3:   0x00000000'00000000
-//      - 'and' with g_base: 0x00000000'00000000
-//
-// Pointer compression relies on having both the regular and the BRP pool (core
-// pools) 'glued', so that the same base could be used for both. For simplicity,
-// the configurations with dynamically selected pool size are not supported.
-// However, they can be at the cost of performing an extra load for
-// core-pools-shift-size on both compression and decompression.
-
-class CompressedPointerBaseGlobal final {
- public:
-  static constexpr size_t kUsefulBits =
-      base::bits::CountTrailingZeroBits(PartitionAddressSpace::CorePoolsSize());
-  static_assert(kUsefulBits >= sizeof(uint32_t) * CHAR_BIT);
-  static constexpr size_t kBitsToShift =
-      kUsefulBits - sizeof(uint32_t) * CHAR_BIT;
-
-  CompressedPointerBaseGlobal() = delete;
-
-  // Attribute const allows the compiler to assume that
-  // CompressedPointerBaseGlobal::g_base_ doesn't change (e.g. across calls) and
-  // thereby avoid redundant loads.
-  PA_ALWAYS_INLINE __attribute__((const)) static uintptr_t Get() {
-    PA_DCHECK(IsBaseConsistent());
-    return g_base_.base;
-  }
-
-  PA_ALWAYS_INLINE static bool IsSet() {
-    PA_DCHECK(IsBaseConsistent());
-    return (g_base_.base & ~kUsefulBitsMask) != 0;
-  }
-
- private:
-  static constexpr uintptr_t kUsefulBitsMask =
-      PartitionAddressSpace::CorePoolsSize() - 1;
-
-  static union alignas(kPartitionCachelineSize)
-      PA_COMPONENT_EXPORT(PARTITION_ALLOC) Base {
-    uintptr_t base;
-    char cache_line[kPartitionCachelineSize];
-  } g_base_ PA_CONSTINIT;
-
-  PA_ALWAYS_INLINE static bool IsBaseConsistent() {
-    return kUsefulBitsMask == (g_base_.base & kUsefulBitsMask);
-  }
-
-  static void SetBase(uintptr_t base);
-  static void ResetBaseForTesting();
-
-  friend class PartitionAddressSpace;
-};
-
-#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-
-}  // namespace internal
-
-#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-
-template <typename T>
-class PA_TRIVIAL_ABI CompressedPointer final {
- public:
-  using UnderlyingType = uint32_t;
-
-  PA_ALWAYS_INLINE constexpr CompressedPointer() = default;
-  PA_ALWAYS_INLINE explicit CompressedPointer(T* ptr) : value_(Compress(ptr)) {}
-  PA_ALWAYS_INLINE constexpr explicit CompressedPointer(std::nullptr_t)
-      : value_(0u) {}
-
-  PA_ALWAYS_INLINE constexpr CompressedPointer(const CompressedPointer&) =
-      default;
-  PA_ALWAYS_INLINE constexpr CompressedPointer(
-      CompressedPointer&& other) noexcept = default;
-
-  template <typename U,
-            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
-  PA_ALWAYS_INLINE constexpr CompressedPointer(
-      const CompressedPointer<U>& other) {
-    if constexpr (internal::IsDecayedSame<T, U>) {
-      // When pointers have the same type modulo constness, avoid the
-      // compress-decompress round.
-      value_ = other.value_;
-    } else {
-      // When the types are different, perform the round, because the pointer
-      // may need to be adjusted.
-      // TODO(1376980): Avoid the cycle here.
-      value_ = Compress(other.get());
-    }
-  }
-
-  template <typename U,
-            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
-  PA_ALWAYS_INLINE constexpr CompressedPointer(
-      CompressedPointer<U>&& other) noexcept
-      : CompressedPointer(other) {}
-
-  ~CompressedPointer() = default;
-
-  PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
-      const CompressedPointer&) = default;
-  PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
-      CompressedPointer&& other) noexcept = default;
-
-  template <typename U,
-            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
-  PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
-      const CompressedPointer<U>& other) {
-    CompressedPointer copy(other);
-    value_ = copy.value_;
-    return *this;
-  }
-
-  template <typename U,
-            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
-  PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
-      CompressedPointer<U>&& other) noexcept {
-    *this = other;
-    return *this;
-  }
-
-  // Don't perform compression when assigning to nullptr.
-  PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(std::nullptr_t) {
-    value_ = 0u;
-    return *this;
-  }
-
-  PA_ALWAYS_INLINE T* get() const { return Decompress(value_); }
-
-  PA_ALWAYS_INLINE constexpr bool is_nonnull() const { return value_; }
-
-  PA_ALWAYS_INLINE constexpr UnderlyingType GetAsIntegral() const {
-    return value_;
-  }
-
-  PA_ALWAYS_INLINE constexpr explicit operator bool() const {
-    return is_nonnull();
-  }
-
-  template <typename U = T,
-            std::enable_if_t<!std::is_void_v<std::remove_cv_t<U>>>* = nullptr>
-  PA_ALWAYS_INLINE U& operator*() const {
-    PA_DCHECK(is_nonnull());
-    return *get();
-  }
-
-  PA_ALWAYS_INLINE T* operator->() const {
-    PA_DCHECK(is_nonnull());
-    return get();
-  }
-
-  PA_ALWAYS_INLINE constexpr void swap(CompressedPointer& other) {
-    std::swap(value_, other.value_);
-  }
-
- private:
-  template <typename>
-  friend class CompressedPointer;
-
-  static constexpr size_t kBitsForSignExtension = 1;
-  static constexpr size_t kOverallBitsToShift =
-      internal::CompressedPointerBaseGlobal::kBitsToShift +
-      kBitsForSignExtension;
-
-  PA_ALWAYS_INLINE static UnderlyingType Compress(T* ptr) {
-    static constexpr size_t kMinimalRequiredAlignment = 8;
-    static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment);
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    PA_DCHECK(reinterpret_cast<uintptr_t>(ptr) % kMinimalRequiredAlignment ==
-              0);
-    PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
-
-    const uintptr_t base = internal::CompressedPointerBaseGlobal::Get();
-    static constexpr size_t kCorePoolsBaseMask =
-        ~(internal::PartitionAddressSpace::CorePoolsSize() - 1);
-    PA_DCHECK(!ptr ||
-              (base & kCorePoolsBaseMask) ==
-                  (reinterpret_cast<uintptr_t>(ptr) & kCorePoolsBaseMask));
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-    const auto uptr = reinterpret_cast<uintptr_t>(ptr);
-    // Shift the pointer and truncate.
-    auto compressed = static_cast<UnderlyingType>(uptr >> kOverallBitsToShift);
-    // If the pointer is non-null, mark the most-significant-bit to sign-extend
-    // it on decompression. Assuming compression is a significantly less
-    // frequent operation, we let more work here in favor of faster
-    // decompression.
-    // TODO(1376980): Avoid this by overreserving the heap.
-    if (compressed) {
-      compressed |= (1u << (sizeof(uint32_t) * CHAR_BIT - 1));
-    }
-
-    return compressed;
-  }
-
-  PA_ALWAYS_INLINE static T* Decompress(UnderlyingType ptr) {
-    PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
-    const uintptr_t base = internal::CompressedPointerBaseGlobal::Get();
-    // Treat compressed pointer as signed and cast it to uint64_t, which will
-    // sign-extend it. Then, shift the result by one. It's important to shift
-    // the already unsigned value, as otherwise it would result in undefined
-    // behavior.
-    const uint64_t mask = static_cast<uint64_t>(static_cast<int32_t>(ptr))
-                          << (kOverallBitsToShift);
-    return reinterpret_cast<T*>(mask & base);
-  }
-
-  UnderlyingType value_;
-};
-
-template <typename T>
-PA_ALWAYS_INLINE constexpr void swap(CompressedPointer<T>& a,
-                                     CompressedPointer<T>& b) {
-  a.swap(b);
-}
-
-// operators==.
-template <typename T, typename U>
-PA_ALWAYS_INLINE bool operator==(CompressedPointer<T> a,
-                                 CompressedPointer<U> b) {
-  if constexpr (internal::IsDecayedSame<T, U>) {
-    // When pointers have the same type modulo constness, simply compare
-    // compressed values.
-    return a.GetAsIntegral() == b.GetAsIntegral();
-  } else {
-    // When the types are different, compare decompressed pointers, because the
-    // pointers may need to be adjusted.
-    // TODO(1376980): Avoid decompression here.
-    return a.get() == b.get();
-  }
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator==(CompressedPointer<T> a, U* b) {
-  // Do compression, since it is less expensive.
-  return a == static_cast<CompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator==(T* a, CompressedPointer<U> b) {
-  return b == a;
-}
-
-template <typename T>
-PA_ALWAYS_INLINE constexpr bool operator==(CompressedPointer<T> a,
-                                           std::nullptr_t) {
-  return !a.is_nonnull();
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator==(std::nullptr_t,
-                                           CompressedPointer<U> b) {
-  return b == nullptr;
-}
-
-// operators!=.
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a,
-                                           CompressedPointer<U> b) {
-  return !(a == b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a, U* b) {
-  // Do compression, since it is less expensive.
-  return a != static_cast<CompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator!=(T* a, CompressedPointer<U> b) {
-  return b != a;
-}
-
-template <typename T>
-PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a,
-                                           std::nullptr_t) {
-  return a.is_nonnull();
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator!=(std::nullptr_t,
-                                           CompressedPointer<U> b) {
-  return b != nullptr;
-}
-
-// operators<.
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<(CompressedPointer<T> a,
-                                          CompressedPointer<U> b) {
-  if constexpr (internal::IsDecayedSame<T, U>) {
-    // When pointers have the same type modulo constness, simply compare
-    // compressed values.
-    return a.GetAsIntegral() < b.GetAsIntegral();
-  } else {
-    // When the types are different, compare decompressed pointers, because the
-    // pointers may need to be adjusted.
-    // TODO(1376980): Avoid decompression here.
-    return a.get() < b.get();
-  }
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<(CompressedPointer<T> a, U* b) {
-  // Do compression, since it is less expensive.
-  return a < static_cast<CompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<(T* a, CompressedPointer<U> b) {
-  // Do compression, since it is less expensive.
-  return static_cast<CompressedPointer<T>>(a) < b;
-}
-
-// operators<=.
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<=(CompressedPointer<T> a,
-                                           CompressedPointer<U> b) {
-  if constexpr (internal::IsDecayedSame<T, U>) {
-    // When pointers have the same type modulo constness, simply compare
-    // compressed values.
-    return a.GetAsIntegral() <= b.GetAsIntegral();
-  } else {
-    // When the types are different, compare decompressed pointers, because the
-    // pointers may need to be adjusted.
-    // TODO(1376980): Avoid decompression here.
-    return a.get() <= b.get();
-  }
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<=(CompressedPointer<T> a, U* b) {
-  // Do compression, since it is less expensive.
-  return a <= static_cast<CompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<=(T* a, CompressedPointer<U> b) {
-  // Do compression, since it is less expensive.
-  return static_cast<CompressedPointer<T>>(a) <= b;
-}
-
-// operators>.
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>(CompressedPointer<T> a,
-                                          CompressedPointer<U> b) {
-  return !(a <= b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>(CompressedPointer<T> a, U* b) {
-  // Do compression, since it is less expensive.
-  return a > static_cast<CompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>(T* a, CompressedPointer<U> b) {
-  // Do compression, since it is less expensive.
-  return static_cast<CompressedPointer<T>>(a) > b;
-}
-
-// operators>=.
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>=(CompressedPointer<T> a,
-                                           CompressedPointer<U> b) {
-  return !(a < b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>=(CompressedPointer<T> a, U* b) {
-  // Do compression, since it is less expensive.
-  return a >= static_cast<CompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>=(T* a, CompressedPointer<U> b) {
-  // Do compression, since it is less expensive.
-  return static_cast<CompressedPointer<T>>(a) >= b;
-}
-
-#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-
-// Simple wrapper over the raw pointer.
-template <typename T>
-class PA_TRIVIAL_ABI UncompressedPointer final {
- public:
-  PA_ALWAYS_INLINE constexpr UncompressedPointer() = default;
-  PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(T* ptr) : ptr_(ptr) {}
-  PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(std::nullptr_t)
-      : ptr_(nullptr) {}
-
-  PA_ALWAYS_INLINE constexpr UncompressedPointer(const UncompressedPointer&) =
-      default;
-  PA_ALWAYS_INLINE constexpr UncompressedPointer(
-      UncompressedPointer&& other) noexcept = default;
-
-  template <typename U,
-            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
-  PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(
-      const UncompressedPointer<U>& other)
-      : ptr_(other.ptr_) {}
-
-  template <typename U,
-            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
-  PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(
-      UncompressedPointer<U>&& other) noexcept
-      : ptr_(std::move(other.ptr_)) {}
-
-  ~UncompressedPointer() = default;
-
-  PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
-      const UncompressedPointer&) = default;
-  PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
-      UncompressedPointer&& other) noexcept = default;
-
-  template <typename U,
-            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
-  PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
-      const UncompressedPointer<U>& other) {
-    ptr_ = other.ptr_;
-    return *this;
-  }
-
-  template <typename U,
-            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
-  PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
-      UncompressedPointer<U>&& other) noexcept {
-    ptr_ = std::move(other.ptr_);
-    return *this;
-  }
-
-  PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(std::nullptr_t) {
-    ptr_ = nullptr;
-    return *this;
-  }
-
-  PA_ALWAYS_INLINE constexpr T* get() const { return ptr_; }
-
-  PA_ALWAYS_INLINE constexpr bool is_nonnull() const { return ptr_; }
-
-  PA_ALWAYS_INLINE constexpr explicit operator bool() const {
-    return is_nonnull();
-  }
-
-  template <typename U = T,
-            std::enable_if_t<!std::is_void_v<std::remove_cv_t<U>>>* = nullptr>
-  PA_ALWAYS_INLINE constexpr U& operator*() const {
-    PA_DCHECK(is_nonnull());
-    return *get();
-  }
-
-  PA_ALWAYS_INLINE constexpr T* operator->() const {
-    PA_DCHECK(is_nonnull());
-    return get();
-  }
-
-  PA_ALWAYS_INLINE constexpr void swap(UncompressedPointer& other) {
-    std::swap(ptr_, other.ptr_);
-  }
-
- private:
-  template <typename>
-  friend class UncompressedPointer;
-
-  T* ptr_;
-};
-
-template <typename T>
-PA_ALWAYS_INLINE constexpr void swap(UncompressedPointer<T>& a,
-                                     UncompressedPointer<T>& b) {
-  a.swap(b);
-}
-
-// operators==.
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a,
-                                           UncompressedPointer<U> b) {
-  return a.get() == b.get();
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a, U* b) {
-  return a == static_cast<UncompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator==(T* a, UncompressedPointer<U> b) {
-  return b == a;
-}
-
-template <typename T>
-PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a,
-                                           std::nullptr_t) {
-  return !a.is_nonnull();
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator==(std::nullptr_t,
-                                           UncompressedPointer<U> b) {
-  return b == nullptr;
-}
-
-// operators!=.
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a,
-                                           UncompressedPointer<U> b) {
-  return !(a == b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a, U* b) {
-  return a != static_cast<UncompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator!=(T* a, UncompressedPointer<U> b) {
-  return b != a;
-}
-
-template <typename T>
-PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a,
-                                           std::nullptr_t) {
-  return a.is_nonnull();
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator!=(std::nullptr_t,
-                                           UncompressedPointer<U> b) {
-  return b != nullptr;
-}
-
-// operators<.
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<(UncompressedPointer<T> a,
-                                          UncompressedPointer<U> b) {
-  return a.get() < b.get();
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<(UncompressedPointer<T> a, U* b) {
-  return a < static_cast<UncompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<(T* a, UncompressedPointer<U> b) {
-  return static_cast<UncompressedPointer<T>>(a) < b;
-}
-
-// operators<=.
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<=(UncompressedPointer<T> a,
-                                           UncompressedPointer<U> b) {
-  return a.get() <= b.get();
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<=(UncompressedPointer<T> a, U* b) {
-  return a <= static_cast<UncompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator<=(T* a, UncompressedPointer<U> b) {
-  return static_cast<UncompressedPointer<T>>(a) <= b;
-}
-
-// operators>.
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>(UncompressedPointer<T> a,
-                                          UncompressedPointer<U> b) {
-  return !(a <= b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>(UncompressedPointer<T> a, U* b) {
-  return a > static_cast<UncompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>(T* a, UncompressedPointer<U> b) {
-  return static_cast<UncompressedPointer<T>>(a) > b;
-}
-
-// operators>=.
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>=(UncompressedPointer<T> a,
-                                           UncompressedPointer<U> b) {
-  return !(a < b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>=(UncompressedPointer<T> a, U* b) {
-  return a >= static_cast<UncompressedPointer<U>>(b);
-}
-
-template <typename T, typename U>
-PA_ALWAYS_INLINE constexpr bool operator>=(T* a, UncompressedPointer<U> b) {
-  return static_cast<UncompressedPointer<T>>(a) >= b;
-}
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_COMPRESSED_POINTER_H_
diff --git a/base/allocator/partition_allocator/compressed_pointer_unittest.cc b/base/allocator/partition_allocator/compressed_pointer_unittest.cc
deleted file mode 100644
index d2734d6..0000000
--- a/base/allocator/partition_allocator/compressed_pointer_unittest.cc
+++ /dev/null
@@ -1,431 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/compressed_pointer.h"
-
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc {
-
-namespace {
-
-struct Base {
-  double a;
-};
-struct Derived : Base {
-  double b;
-};
-struct Mixin {
-  double c;
-};
-struct DerivedWithMixin : Base, Mixin {
-  double d;
-};
-
-struct PADeleter final {
-  void operator()(void* ptr) const { allocator_.root()->Free(ptr); }
-  PartitionAllocator& allocator_;
-};
-
-template <typename T, typename... Args>
-std::unique_ptr<T, PADeleter> make_pa_unique(PartitionAllocator& alloc,
-                                             Args&&... args) {
-  T* result = new (alloc.root()->Alloc(sizeof(T), nullptr))
-      T(std::forward<Args>(args)...);
-  return std::unique_ptr<T, PADeleter>(result, PADeleter{alloc});
-}
-
-template <typename T>
-std::unique_ptr<T[], PADeleter> make_pa_array_unique(PartitionAllocator& alloc,
-                                                     size_t num) {
-  T* result = new (alloc.root()->Alloc(sizeof(T) * num, nullptr)) T();
-  return std::unique_ptr<T[], PADeleter>(result, PADeleter{alloc});
-}
-
-// Test that pointer types are trivial.
-#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-static_assert(
-    std::is_trivially_default_constructible_v<CompressedPointer<Base>>);
-static_assert(std::is_trivially_copy_constructible_v<CompressedPointer<Base>>);
-static_assert(std::is_trivially_move_constructible_v<CompressedPointer<Base>>);
-static_assert(std::is_trivially_copy_assignable_v<CompressedPointer<Base>>);
-static_assert(std::is_trivially_move_assignable_v<CompressedPointer<Base>>);
-#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-static_assert(
-    std::is_trivially_default_constructible_v<UncompressedPointer<Base>>);
-static_assert(
-    std::is_trivially_copy_constructible_v<UncompressedPointer<Base>>);
-static_assert(
-    std::is_trivially_move_constructible_v<UncompressedPointer<Base>>);
-static_assert(std::is_trivially_copy_assignable_v<UncompressedPointer<Base>>);
-static_assert(std::is_trivially_move_assignable_v<UncompressedPointer<Base>>);
-
-}  // namespace
-
-struct UncompressedTypeTag {};
-struct CompressedTypeTag {};
-
-template <typename TagType>
-class CompressedPointerTest : public ::testing::Test {
- public:
-#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-  template <typename T>
-  using PointerType =
-      std::conditional_t<std::is_same_v<TagType, CompressedTypeTag>,
-                         CompressedPointer<T>,
-                         UncompressedPointer<T>>;
-#else   // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-  template <typename T>
-  using PointerType = UncompressedPointer<T>;
-#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-
-  CompressedPointerTest() : allocator_(PartitionOptions{}) {}
-
- protected:
-  PartitionAllocator allocator_;
-};
-
-#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-using ObjectTypes = ::testing::Types<UncompressedTypeTag, CompressedTypeTag>;
-#else
-using ObjectTypes = ::testing::Types<UncompressedTypeTag>;
-#endif
-
-TYPED_TEST_SUITE(CompressedPointerTest, ObjectTypes);
-
-TYPED_TEST(CompressedPointerTest, NullConstruction) {
-  using DoublePointer = typename TestFixture::template PointerType<double>;
-  {
-    DoublePointer p = static_cast<DoublePointer>(nullptr);
-    EXPECT_FALSE(p.is_nonnull());
-    EXPECT_FALSE(p.get());
-    EXPECT_EQ(p, nullptr);
-  }
-  {
-    DoublePointer p1 = static_cast<DoublePointer>(nullptr);
-    DoublePointer p2 = p1;
-    EXPECT_FALSE(p2.is_nonnull());
-    EXPECT_FALSE(p2.get());
-    EXPECT_EQ(p2, nullptr);
-  }
-  {
-    DoublePointer p1 = static_cast<DoublePointer>(nullptr);
-    DoublePointer p2 = std::move(p1);
-    EXPECT_FALSE(p2.is_nonnull());
-    EXPECT_FALSE(p2.get());
-    EXPECT_EQ(p2, nullptr);
-  }
-}
-
-TYPED_TEST(CompressedPointerTest, NullAssignment) {
-  using DoublePointer = typename TestFixture::template PointerType<double>;
-  {
-    DoublePointer p;
-    p = static_cast<DoublePointer>(nullptr);
-    EXPECT_FALSE(p.is_nonnull());
-    EXPECT_FALSE(p.get());
-    EXPECT_EQ(p.get(), nullptr);
-    EXPECT_EQ(p, nullptr);
-  }
-  {
-    DoublePointer p1 = DoublePointer(nullptr), p2;
-    p2 = p1;
-    EXPECT_FALSE(p2.is_nonnull());
-    EXPECT_FALSE(p2.get());
-    EXPECT_EQ(p2.get(), nullptr);
-    EXPECT_EQ(p2, nullptr);
-  }
-  {
-    DoublePointer p1 = DoublePointer(nullptr), p2;
-    p2 = std::move(p1);
-    EXPECT_FALSE(p2.is_nonnull());
-    EXPECT_FALSE(p2.get());
-    EXPECT_EQ(p2.get(), nullptr);
-    EXPECT_EQ(p2, nullptr);
-  }
-}
-
-TYPED_TEST(CompressedPointerTest, SameTypeValueConstruction) {
-  using DoublePointer = typename TestFixture::template PointerType<double>;
-  auto d = make_pa_unique<double>(this->allocator_);
-  {
-    DoublePointer p = static_cast<DoublePointer>(d.get());
-    EXPECT_TRUE(p.is_nonnull());
-    EXPECT_EQ(p.get(), d.get());
-    EXPECT_EQ(p, d.get());
-  }
-  {
-    DoublePointer p1 = static_cast<DoublePointer>(d.get());
-    DoublePointer p2 = p1;
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, p1);
-    EXPECT_EQ(p2, d.get());
-  }
-  {
-    DoublePointer p1 = static_cast<DoublePointer>(d.get());
-    DoublePointer p2 = std::move(p1);
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, d.get());
-  }
-}
-
-TYPED_TEST(CompressedPointerTest, SameTypeValueAssignment) {
-  using DoublePointer = typename TestFixture::template PointerType<double>;
-  auto d = make_pa_unique<double>(this->allocator_);
-  {
-    DoublePointer p;
-    p = static_cast<DoublePointer>(d.get());
-    EXPECT_TRUE(p.is_nonnull());
-    EXPECT_EQ(p.get(), d.get());
-    EXPECT_EQ(p, d.get());
-  }
-  {
-    DoublePointer p1 = static_cast<DoublePointer>(d.get());
-    DoublePointer p2;
-    p2 = p1;
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, p1);
-    EXPECT_EQ(p2, d.get());
-  }
-  {
-    DoublePointer p1 = static_cast<DoublePointer>(d.get());
-    DoublePointer p2;
-    p2 = std::move(p1);
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, d.get());
-  }
-}
-
-TYPED_TEST(CompressedPointerTest,
-           HeterogeneousValueConstructionSamePointerValue) {
-  using BasePointer = typename TestFixture::template PointerType<Base>;
-  auto d = make_pa_unique<Derived>(this->allocator_);
-  {
-    BasePointer p = static_cast<BasePointer>(d.get());
-    EXPECT_TRUE(p.is_nonnull());
-    EXPECT_EQ(p.get(), d.get());
-  }
-  {
-    BasePointer p1 = static_cast<BasePointer>(d.get());
-    BasePointer p2 = p1;
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, p1);
-    EXPECT_EQ(p2, d.get());
-  }
-  {
-    BasePointer p1 = static_cast<BasePointer>(d.get());
-    BasePointer p2 = std::move(p1);
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, d.get());
-  }
-}
-
-TYPED_TEST(CompressedPointerTest,
-           HeterogeneousValueAssignmentSamePointerValue) {
-  using BasePointer = typename TestFixture::template PointerType<Base>;
-  auto d = make_pa_unique<Derived>(this->allocator_);
-  {
-    BasePointer p;
-    p = static_cast<BasePointer>(d.get());
-    EXPECT_TRUE(p.is_nonnull());
-    EXPECT_EQ(p.get(), d.get());
-  }
-  {
-    BasePointer p1 = static_cast<BasePointer>(d.get());
-    BasePointer p2;
-    p2 = p1;
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, p1);
-    EXPECT_EQ(p2, d.get());
-  }
-  {
-    BasePointer p1 = static_cast<BasePointer>(d.get());
-    BasePointer p2;
-    p2 = std::move(p1);
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, d.get());
-  }
-}
-
-TYPED_TEST(CompressedPointerTest,
-           HeterogeneousValueConstructionDifferentPointerValues) {
-  using MixinPointer = typename TestFixture::template PointerType<Mixin>;
-  auto d = make_pa_unique<DerivedWithMixin>(this->allocator_);
-  {
-    MixinPointer p = static_cast<MixinPointer>(d.get());
-    ASSERT_NE(static_cast<void*>(p.get()), static_cast<void*>(d.get()));
-  }
-  {
-    MixinPointer p = static_cast<MixinPointer>(d.get());
-    EXPECT_TRUE(p.is_nonnull());
-    EXPECT_EQ(p.get(), d.get());
-  }
-  {
-    MixinPointer p1 = static_cast<MixinPointer>(d.get());
-    MixinPointer p2 = p1;
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, p1);
-    EXPECT_EQ(p2, d.get());
-  }
-  {
-    MixinPointer p1 = static_cast<MixinPointer>(d.get());
-    MixinPointer p2 = std::move(p1);
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, d.get());
-  }
-}
-
-TYPED_TEST(CompressedPointerTest,
-           HeterogeneousValueAssignmentDifferentPointerValue) {
-  using MixinPointer = typename TestFixture::template PointerType<Mixin>;
-  auto d = make_pa_unique<DerivedWithMixin>(this->allocator_);
-  {
-    MixinPointer p;
-    p = static_cast<MixinPointer>(d.get());
-    ASSERT_NE(static_cast<void*>(p.get()), static_cast<void*>(d.get()));
-  }
-  {
-    MixinPointer p;
-    p = static_cast<MixinPointer>(d.get());
-    EXPECT_TRUE(p.is_nonnull());
-    EXPECT_EQ(p.get(), d.get());
-  }
-  {
-    MixinPointer p1 = static_cast<MixinPointer>(d.get());
-    MixinPointer p2;
-    p2 = p1;
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, p1);
-    EXPECT_EQ(p2, d.get());
-  }
-  {
-    MixinPointer p1 = static_cast<MixinPointer>(d.get());
-    MixinPointer p2;
-    p2 = std::move(p1);
-    EXPECT_TRUE(p2.is_nonnull());
-    EXPECT_EQ(p2.get(), d.get());
-    EXPECT_EQ(p2, d.get());
-  }
-}
-
-namespace {
-
-template <template <typename> class PointerType,
-          typename T1,
-          typename T2,
-          typename U>
-void EqualityTest(U* raw) {
-  PointerType<T1> p1 = static_cast<PointerType<T1>>(raw);
-  PointerType<T2> p2 = static_cast<PointerType<T2>>(raw);
-  EXPECT_EQ(p1, raw);
-  EXPECT_EQ(p2, raw);
-  EXPECT_EQ(raw, p1);
-  EXPECT_EQ(raw, p2);
-  EXPECT_EQ(p1, p2);
-}
-
-template <template <typename> class PointerType,
-          typename T1,
-          typename T2,
-          typename U>
-void CompareTest(U* array) {
-  PointerType<T1> p0 = static_cast<PointerType<T1>>(&array[0]);
-  PointerType<T2> p1 = static_cast<PointerType<T2>>(&array[1]);
-  {
-    EXPECT_NE(p0, &array[1]);
-    EXPECT_NE(p0, p1);
-    EXPECT_NE(p1, &array[0]);
-    EXPECT_NE(p1, p0);
-  }
-  {
-    EXPECT_LT(p0, &array[1]);
-    EXPECT_LT(&array[0], p1);
-    EXPECT_LT(p0, p1);
-  }
-  {
-    EXPECT_LE(p0, &array[0]);
-    EXPECT_LE(p0, &array[1]);
-    EXPECT_LE(&array[0], p0);
-
-    EXPECT_LE(&array[1], p1);
-    EXPECT_LE(p1, &array[1]);
-
-    auto p2 = p0;
-    EXPECT_LE(p0, p2);
-    EXPECT_LE(p2, p1);
-  }
-  {
-    EXPECT_GT(&array[1], p0);
-    EXPECT_GT(p1, &array[0]);
-    EXPECT_GT(p1, p0);
-  }
-  {
-    EXPECT_GE(&array[0], p0);
-    EXPECT_GE(&array[1], p0);
-    EXPECT_GE(p0, &array[0]);
-
-    EXPECT_GE(p1, &array[1]);
-    EXPECT_GE(&array[1], p1);
-
-    auto p2 = p1;
-    EXPECT_GE(p1, p2);
-    EXPECT_GE(p1, p0);
-  }
-}
-
-}  // namespace
-
-TYPED_TEST(CompressedPointerTest, EqualitySamePointerValue) {
-  auto d = make_pa_unique<Derived>(this->allocator_);
-  EqualityTest<TestFixture::template PointerType, Base, Base>(d.get());
-  EqualityTest<TestFixture::template PointerType, Base, Derived>(d.get());
-  EqualityTest<TestFixture::template PointerType, Derived, Base>(d.get());
-  EqualityTest<TestFixture::template PointerType, Derived, Derived>(d.get());
-}
-
-TYPED_TEST(CompressedPointerTest, EqualityDifferentPointerValues) {
-  auto d = make_pa_unique<DerivedWithMixin>(this->allocator_);
-  EqualityTest<TestFixture::template PointerType, Mixin, Mixin>(d.get());
-  EqualityTest<TestFixture::template PointerType, Mixin, DerivedWithMixin>(
-      d.get());
-  EqualityTest<TestFixture::template PointerType, DerivedWithMixin, Mixin>(
-      d.get());
-  EqualityTest<TestFixture::template PointerType, DerivedWithMixin,
-               DerivedWithMixin>(d.get());
-}
-
-TYPED_TEST(CompressedPointerTest, CompareSamePointerValue) {
-  auto d = make_pa_array_unique<Derived>(this->allocator_, 2);
-  CompareTest<TestFixture::template PointerType, Base, Base>(d.get());
-  CompareTest<TestFixture::template PointerType, Base, Derived>(d.get());
-  CompareTest<TestFixture::template PointerType, Derived, Base>(d.get());
-  CompareTest<TestFixture::template PointerType, Derived, Derived>(d.get());
-}
-
-TYPED_TEST(CompressedPointerTest, CompareDifferentPointerValues) {
-  auto d = make_pa_array_unique<DerivedWithMixin>(this->allocator_, 2);
-  CompareTest<TestFixture::template PointerType, Mixin, Mixin>(d.get());
-  CompareTest<TestFixture::template PointerType, Mixin, DerivedWithMixin>(
-      d.get());
-  CompareTest<TestFixture::template PointerType, DerivedWithMixin, Mixin>(
-      d.get());
-  CompareTest<TestFixture::template PointerType, DerivedWithMixin,
-              DerivedWithMixin>(d.get());
-}
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/dangling_raw_ptr_checks.cc b/base/allocator/partition_allocator/dangling_raw_ptr_checks.cc
deleted file mode 100644
index f58bb80..0000000
--- a/base/allocator/partition_allocator/dangling_raw_ptr_checks.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-
-namespace partition_alloc {
-
-namespace {
-DanglingRawPtrDetectedFn* g_dangling_raw_ptr_detected_fn = [](uintptr_t) {};
-DanglingRawPtrReleasedFn* g_dangling_raw_ptr_released_fn = [](uintptr_t) {};
-DanglingRawPtrDetectedFn* g_unretained_dangling_raw_ptr_detected_fn =
-    [](uintptr_t) {};
-bool g_unretained_dangling_raw_ptr_check_enabled = false;
-}  // namespace
-
-DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn() {
-  PA_DCHECK(g_dangling_raw_ptr_detected_fn);
-  return g_dangling_raw_ptr_detected_fn;
-}
-
-DanglingRawPtrDetectedFn* GetDanglingRawPtrReleasedFn() {
-  PA_DCHECK(g_dangling_raw_ptr_released_fn);
-  return g_dangling_raw_ptr_released_fn;
-}
-
-void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn fn) {
-  PA_DCHECK(fn);
-  g_dangling_raw_ptr_detected_fn = fn;
-}
-
-void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn fn) {
-  PA_DCHECK(fn);
-  g_dangling_raw_ptr_released_fn = fn;
-}
-
-DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn() {
-  return g_unretained_dangling_raw_ptr_detected_fn;
-}
-
-void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn* fn) {
-  PA_DCHECK(fn);
-  g_unretained_dangling_raw_ptr_detected_fn = fn;
-}
-
-bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled) {
-  bool old = g_unretained_dangling_raw_ptr_check_enabled;
-  g_unretained_dangling_raw_ptr_check_enabled = enabled;
-  return old;
-}
-
-namespace internal {
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id) {
-  g_dangling_raw_ptr_detected_fn(id);
-}
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id) {
-  g_dangling_raw_ptr_released_fn(id);
-}
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void UnretainedDanglingRawPtrDetected(uintptr_t id) {
-  g_unretained_dangling_raw_ptr_detected_fn(id);
-}
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool IsUnretainedDanglingRawPtrCheckEnabled() {
-  return g_unretained_dangling_raw_ptr_check_enabled;
-}
-
-}  // namespace internal
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/dangling_raw_ptr_checks.h b/base/allocator/partition_allocator/dangling_raw_ptr_checks.h
deleted file mode 100644
index 5c2d305..0000000
--- a/base/allocator/partition_allocator/dangling_raw_ptr_checks.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-// When compiled with build flags `enable_dangling_raw_ptr_checks`, dangling
-// raw_ptr are reported. Its behavior can be configured here.
-//
-// Purpose of this level of indirection:
-// - Ease testing.
-// - Keep partition_alloc/ independent from base/. In most cases, when a
-//   dangling raw_ptr is detected/released, this involves recording a
-//   base::debug::StackTrace, which isn't desirable inside partition_alloc/.
-// - Be able (potentially) to turn this feature on/off at runtime based on
-//   dependant's flags.
-namespace partition_alloc {
-
-// DanglingRawPtrDetected is called when there exists a `raw_ptr` referencing a
-// memory region and the allocator is asked to release it.
-//
-// It won't be called again with the same `id`, up until (potentially) a call to
-// DanglingRawPtrReleased(`id`) is made.
-//
-// This function is called from within the allocator, and is not allowed to
-// allocate memory.
-using DanglingRawPtrDetectedFn = void(uintptr_t /*id*/);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn();
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn();
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn*);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled);
-
-// DanglingRawPtrReleased: Called after DanglingRawPtrDetected(id), once the
-// last dangling raw_ptr stops referencing the memory region.
-//
-// This function is allowed to allocate memory.
-using DanglingRawPtrReleasedFn = void(uintptr_t /*id*/);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-DanglingRawPtrReleasedFn* GetDanglingRawPtrReleasedFn();
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn);
-
-namespace internal {
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void UnretainedDanglingRawPtrDetected(uintptr_t id);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool IsUnretainedDanglingRawPtrCheckEnabled();
-
-}  // namespace internal
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
diff --git a/base/allocator/partition_allocator/dot/address-space.dot b/base/allocator/partition_allocator/dot/address-space.dot
deleted file mode 100644
index 456794f..0000000
--- a/base/allocator/partition_allocator/dot/address-space.dot
+++ /dev/null
@@ -1,34 +0,0 @@
-digraph {
-  node[shape=box]
-  edge[dir=both]
-  compound = true
-  bgcolor = transparent
-  dpi = 192
-  nodesep = 0.91
-  // Allows aligning nodes in different subgraphs.
-  newrank = true
-
-  subgraph cluster_0 {
-    label = "Address Space"
-    reg[label="Regular Pool"]
-    brp[label="BRP Pool"]
-    add[label="Additional Pools"]
-    reg->brp->add[style=invis]
-  }
-
-  manager[label="AddressPoolManager"]
-  manager->reg[constraint=false]
-  manager->brp
-  manager->add[constraint=false]
-
-  subgraph cluster_1 {
-    label = "PartitionRoots"
-    pae[label="PA-E Root"]
-    blink[label="Blink Roots"]
-    etc[style=dotted, label="Other Roots"]
-    pae->blink->etc[style=invis]
-  }
-
-  manager->blink[lhead=cluster_1]
-  {rank=same manager brp blink}
-}
diff --git a/base/allocator/partition_allocator/dot/address-space.png b/base/allocator/partition_allocator/dot/address-space.png
deleted file mode 100644
index 07b45ba..0000000
--- a/base/allocator/partition_allocator/dot/address-space.png
+++ /dev/null
Binary files differ
diff --git a/base/allocator/partition_allocator/dot/bucket.dot b/base/allocator/partition_allocator/dot/bucket.dot
deleted file mode 100644
index c4d6006..0000000
--- a/base/allocator/partition_allocator/dot/bucket.dot
+++ /dev/null
@@ -1,60 +0,0 @@
-digraph {
-  graph[bgcolor=transparent]
-  node[shape=plaintext]
-  edge[style=dashed, color=crimson]
-
-  page1[label=<
-    <table border="0" cellborder="1" cellspacing="0"><tr>
-      <!-- head partition page -->
-      <td port="head" bgcolor="darkgrey" width="40" height="52"></td>
-      <!-- bucket-external memory - not depicted -->
-      <td width="160"></td>
-      <!-- a slot span in this bucket -->
-   <td port="slotspan" bgcolor="crimson" width="80"></td>
-      <!-- bucket-external memory - not depicted -->
-      <td width="320"></td>
-      <!-- tail partition page -->
-      <td bgcolor="darkgrey" width="40"></td>
-    </tr></table>
-  >]
-  page2[label=<
-    <table border="0" cellborder="1" cellspacing="0"><tr>
-      <!-- head partition page -->
-      <td port="head" bgcolor="darkgrey" width="40" height="52"></td>
-      <!-- bucket-external memory - not depicted -->
-      <td width="280"></td>
-      <!-- a slot span in this bucket -->
-      <td port="slotspan" bgcolor="crimson" width="80"></td>
-      <!-- bucket-external memory - not depicted -->
-      <td width="200"></td>
-      <!-- tail partition page -->
-      <td bgcolor="darkgrey" width="40"></td>
-    </tr></table>
-  >]
-  page3[label=<
-    <table border="0" cellborder="1" cellspacing="0"><tr>
-      <!-- head partition page -->
-      <td port="head" bgcolor="darkgrey" width="40" height="52"></td>
-      <!-- bucket-external memory - not depicted -->
-      <td width="40"></td>
-      <!-- a slot span in this bucket -->
-      <td port="slotspan1" bgcolor="crimson" width="80"></td>
-      <!-- bucket-external memory - not depicted -->
-      <td width="120"></td>
-      <!-- a slot span in this bucket -->
-      <td port="slotspan2" bgcolor="crimson" width="80"></td>
-      <!-- bucket-external memory - not depicted -->
-      <td width="240"></td>
-      <!-- tail partition page -->
-      <td bgcolor="darkgrey" width="40"></td>
-    </tr></table>
-  >]
-
-  // Invisibly link the head partition pages to force alignment.
-  page1:head->page2:head->page3:head[style=invis]
-
-  // Inter-super-page links disable constraints so to let the above
-  // fully control alignment.
-  page1:slotspan->page2:slotspan->page3:slotspan1[constraint=false]
-  page3:slotspan1:s->page3:slotspan2:sw
-}
diff --git a/base/allocator/partition_allocator/dot/bucket.png b/base/allocator/partition_allocator/dot/bucket.png
deleted file mode 100644
index bf7374b..0000000
--- a/base/allocator/partition_allocator/dot/bucket.png
+++ /dev/null
Binary files differ
diff --git a/base/allocator/partition_allocator/dot/layers.dot b/base/allocator/partition_allocator/dot/layers.dot
deleted file mode 100644
index 27ea7c6..0000000
--- a/base/allocator/partition_allocator/dot/layers.dot
+++ /dev/null
@@ -1,23 +0,0 @@
-digraph G {
-  graph[bgcolor=transparent]
-  node[shape=box,style="filled,rounded",color=deepskyblue]
-
-  subgraph cluster_tc {
-    label = "Thread Cache"
-    rankdir = LR
-    {rank=same;TLS1,TLS2,TLSn}
-    TLS1->TLS2[style=invisible,dir=none]
-    TLS2->TLSn[style=dotted,dir=none]
-  }
-
-  subgraph cluster_central {
-    label = "Central Allocator (per-partition lock)"
-    fast[label="slot span freelists (fast path)"]
-    slow[label="slot span management (slow path)"]
-    # Forces slow path node beneath fast path node.
-    fast->slow[style=invisible,dir=none]
-  }
-
-  # Forces thread-external subgraph beneath thread cache subgraph.
-  TLS2->fast[style=invisible,dir=none]
-}
diff --git a/base/allocator/partition_allocator/dot/layers.png b/base/allocator/partition_allocator/dot/layers.png
deleted file mode 100644
index 80c78e2..0000000
--- a/base/allocator/partition_allocator/dot/layers.png
+++ /dev/null
Binary files differ
diff --git a/base/allocator/partition_allocator/dot/super-page.dot b/base/allocator/partition_allocator/dot/super-page.dot
deleted file mode 100644
index 068392d..0000000
--- a/base/allocator/partition_allocator/dot/super-page.dot
+++ /dev/null
@@ -1,95 +0,0 @@
-digraph G {
-  graph[bgcolor=transparent]
-  node[shape=plaintext]
-  edge[style=dashed]
-
-  invisible_a[label=<
-    <TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
-      <TR>
-        <TD PORT="red" WIDTH="100"></TD>
-        <TD PORT="green" WIDTH="20"></TD>
-        <TD PORT="blue" WIDTH="40"></TD>
-        <TD PORT="gold" WIDTH="300"></TD>
-        <TD PORT="pink" WIDTH="60"></TD>
-      </TR>
-    </TABLE>
-  >]
-  superpage[xlabel="Super Page",label=<
-    <TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" WIDTH="10">
-      <TR>
-        <!-- Head Partition Page -->
-        <TD BGCOLOR="darkgrey" HEIGHT="52"></TD>
-        <TD PORT="metadata"></TD>
-        <TD BGCOLOR="darkgrey" WIDTH="18"></TD>
-        <!-- Bitmaps -->
-        <TD WIDTH="100">Bitmaps(?)</TD>
-        <!-- Several Slot Spans -->
-        <TD PORT="red" BGCOLOR="crimson" WIDTH="119">3</TD>
-        <TD PORT="green" BGCOLOR="palegreen" WIDTH="39">1</TD>
-        <TD PORT="blue" BGCOLOR="cornflowerblue" WIDTH="79">2</TD>
-        <TD PORT="gold" BGCOLOR="gold" WIDTH="239">6</TD>
-        <TD PORT="red2" BGCOLOR="crimson" WIDTH="119">3</TD>
-        <TD PORT="pink" BGCOLOR="deeppink" WIDTH="39">1</TD>
-        <TD WIDTH="79">...</TD>
-        <!-- Tail Partition Page -->
-        <TD BGCOLOR="darkgrey" WIDTH="39"></TD>
-      </TR>
-    </TABLE>
-  >]
-  invisible_b[label=<
-    <TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
-      <TR>
-        <TD PORT="green" WIDTH="30"></TD>
-        <TD PORT="blue" WIDTH="60"></TD>
-        <TD PORT="gold" WIDTH="180"></TD>
-        <TD PORT="red" WIDTH="90"></TD>
-        <TD PORT="pink" WIDTH="90"></TD>
-      </TR>
-    </TABLE>
-  >]
-  metadata_page[xlabel="Metadata",label=<
-    <TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
-      <TR>
-        <!-- Guard Page Metadata -->
-        <TD BGCOLOR="darkgrey"> </TD>
-        <!-- Bitmaps Offset -->
-        <TD> B? </TD>
-        <!-- Red Slot Span Metadata -->
-        <TD BGCOLOR="crimson">v</TD>
-        <TD BGCOLOR="crimson">+</TD>
-        <TD BGCOLOR="crimson">+</TD>
-        <!-- Green Slot Span Metadata -->
-        <TD BGCOLOR="palegreen">v</TD>
-        <!-- Blue Slot Span Metadata -->
-        <TD BGCOLOR="cornflowerblue">v</TD>
-        <TD BGCOLOR="cornflowerblue">+</TD>
-        <!-- Gold Slot Span Metadata -->
-        <TD BGCOLOR="gold">v</TD>
-        <TD BGCOLOR="gold">+</TD>
-        <TD BGCOLOR="gold">+</TD>
-        <TD BGCOLOR="gold">+</TD>
-        <TD BGCOLOR="gold">+</TD>
-        <TD BGCOLOR="gold">+</TD>
-        <!-- Red Slot Span Metadata -->
-        <TD BGCOLOR="crimson">v</TD>
-        <TD BGCOLOR="crimson">+</TD>
-        <TD BGCOLOR="crimson">+</TD>
-        <!-- Pink Slot Span Metadata -->
-        <TD BGCOLOR="deeppink">v</TD>
-        <!-- etc. -->
-        <TD WIDTH="64">...</TD>
-        <!-- Guard Page Metadata -->
-        <TD BGCOLOR="darkgrey"> </TD>
-      </TR>
-    </TABLE>
-  >]
-
-  invisible_a:red->superpage:red->superpage:red2[color=crimson]
-  superpage:red2->invisible_b:red[color=crimson]
-  invisible_a:green->superpage:green->invisible_b:green[color=palegreen]
-  invisible_a:blue->superpage:blue->invisible_b:blue[color=cornflowerblue]
-  invisible_a:gold->superpage:gold->invisible_b:gold[color=gold]
-  invisible_a:pink->superpage:pink->invisible_b:pink[color=deeppink]
-
-  superpage:metadata->metadata_page[style="",arrowhead=odot]
-}
diff --git a/base/allocator/partition_allocator/dot/super-page.png b/base/allocator/partition_allocator/dot/super-page.png
deleted file mode 100644
index 0bfd69a..0000000
--- a/base/allocator/partition_allocator/dot/super-page.png
+++ /dev/null
Binary files differ
diff --git a/base/allocator/partition_allocator/encoded_freelist.h b/base/allocator/partition_allocator/encoded_freelist.h
deleted file mode 100644
index 71211e5..0000000
--- a/base/allocator/partition_allocator/encoded_freelist.h
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ENCODED_FREELIST_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ENCODED_FREELIST_H_
-
-#include <cstddef>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/freeslot_bitmap.h"
-#include "base/allocator/partition_allocator/partition_alloc-inl.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_ref_count.h"
-#include "build/build_config.h"
-
-#if !defined(ARCH_CPU_BIG_ENDIAN)
-#include "base/allocator/partition_allocator/reverse_bytes.h"
-#endif  // !defined(ARCH_CPU_BIG_ENDIAN)
-
-namespace partition_alloc::internal {
-
-class PartitionFreelistEntry;
-
-class EncodedPartitionFreelistEntryPtr {
- private:
-  PA_ALWAYS_INLINE constexpr explicit EncodedPartitionFreelistEntryPtr(
-      std::nullptr_t)
-      : encoded_(Transform(0)) {}
-  PA_ALWAYS_INLINE explicit EncodedPartitionFreelistEntryPtr(void* ptr)
-      // The encoded pointer stays MTE-tagged.
-      : encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}
-
-  PA_ALWAYS_INLINE PartitionFreelistEntry* Decode() const {
-    return reinterpret_cast<PartitionFreelistEntry*>(Transform(encoded_));
-  }
-
-  PA_ALWAYS_INLINE constexpr uintptr_t Inverted() const { return ~encoded_; }
-
-  PA_ALWAYS_INLINE constexpr void Override(uintptr_t encoded) {
-    encoded_ = encoded;
-  }
-
-  PA_ALWAYS_INLINE constexpr explicit operator bool() const { return encoded_; }
-
-  // Transform() works the same in both directions, so can be used for
-  // encoding and decoding.
-  PA_ALWAYS_INLINE static constexpr uintptr_t Transform(uintptr_t address) {
-    // We use bswap on little endian as a fast transformation for two reasons:
-    // 1) On 64 bit architectures, the pointer is very unlikely to be a
-    //    canonical address. Therefore, if an object is freed and its vtable is
-    //    used where the attacker doesn't get the chance to run allocations
-    //    between the free and use, the vtable dereference is likely to fault.
-    // 2) If the attacker has a linear buffer overflow and elects to try and
-    //    corrupt a freelist pointer, partial pointer overwrite attacks are
-    //    thwarted.
-    // For big endian, similar guarantees are arrived at with a negation.
-#if defined(ARCH_CPU_BIG_ENDIAN)
-    uintptr_t transformed = ~address;
-#else
-    uintptr_t transformed = ReverseBytes(address);
-#endif
-    return transformed;
-  }
-
-  uintptr_t encoded_;
-
-  friend PartitionFreelistEntry;
-};
-
-// Freelist entries are encoded for security reasons. See
-// //base/allocator/partition_allocator/PartitionAlloc.md and |Transform()| for
-// the rationale and mechanism, respectively.
-class PartitionFreelistEntry {
- private:
-  constexpr explicit PartitionFreelistEntry(std::nullptr_t)
-      : encoded_next_(EncodedPartitionFreelistEntryPtr(nullptr))
-#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-        ,
-        shadow_(encoded_next_.Inverted())
-#endif
-  {
-  }
-  explicit PartitionFreelistEntry(PartitionFreelistEntry* next)
-      : encoded_next_(EncodedPartitionFreelistEntryPtr(next))
-#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-        ,
-        shadow_(encoded_next_.Inverted())
-#endif
-  {
-  }
-  // For testing only.
-  PartitionFreelistEntry(void* next, bool make_shadow_match)
-      : encoded_next_(EncodedPartitionFreelistEntryPtr(next))
-#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-        ,
-        shadow_(make_shadow_match ? encoded_next_.Inverted() : 12345)
-#endif
-  {
-  }
-
- public:
-  ~PartitionFreelistEntry() = delete;
-
-  // Emplaces the freelist entry at the beginning of the given slot span, and
-  // initializes it as null-terminated.
-  PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull(
-      void* slot_start_tagged) {
-    // |slot_start_tagged| is MTE-tagged.
-    auto* entry = new (slot_start_tagged) PartitionFreelistEntry(nullptr);
-    return entry;
-  }
-  PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull(
-      uintptr_t slot_start) {
-    return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
-  }
-
-  // Emplaces the freelist entry at the beginning of the given slot span, and
-  // initializes it with the given |next| pointer, but encoded.
-  //
-  // This freelist is built for the purpose of thread-cache. This means that we
-  // can't perform a check that this and the next pointer belong to the same
-  // super page, as thread-cache spans may chain slots across super pages.
-  PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitForThreadCache(
-      uintptr_t slot_start,
-      PartitionFreelistEntry* next) {
-    auto* entry =
-        new (SlotStartAddr2Ptr(slot_start)) PartitionFreelistEntry(next);
-    return entry;
-  }
-
-  // Emplaces the freelist entry at the beginning of the given slot span, and
-  // initializes it with the given |next| pointer.
-  //
-  // This is for testing purposes only! |make_shadow_match| allows you to choose
-  // if the shadow matches the next pointer properly or is trash.
-  PA_ALWAYS_INLINE static void EmplaceAndInitForTest(uintptr_t slot_start,
-                                                     void* next,
-                                                     bool make_shadow_match) {
-    new (SlotStartAddr2Ptr(slot_start))
-        PartitionFreelistEntry(next, make_shadow_match);
-  }
-
-  void CorruptNextForTesting(uintptr_t v) {
-    // We just need a value that can never be a valid pointer here.
-    encoded_next_.Override(EncodedPartitionFreelistEntryPtr::Transform(v));
-  }
-
-  // Puts `slot_size` on the stack before crashing in case of memory
-  // corruption. Meant to be used to report the failed allocation size.
-  template <bool crash_on_corruption>
-  PA_ALWAYS_INLINE PartitionFreelistEntry* GetNextForThreadCache(
-      size_t slot_size) const;
-  PA_ALWAYS_INLINE PartitionFreelistEntry* GetNext(size_t slot_size) const;
-
-  PA_NOINLINE void CheckFreeList(size_t slot_size) const {
-    for (auto* entry = this; entry; entry = entry->GetNext(slot_size)) {
-      // |GetNext()| checks freelist integrity.
-    }
-  }
-
-  PA_NOINLINE void CheckFreeListForThreadCache(size_t slot_size) const {
-    for (auto* entry = this; entry;
-         entry = entry->GetNextForThreadCache<true>(slot_size)) {
-      // |GetNextForThreadCache()| checks freelist integrity.
-    }
-  }
-
-  PA_ALWAYS_INLINE void SetNext(PartitionFreelistEntry* entry) {
-    // SetNext() is either called on the freelist head, when provisioning new
-    // slots, or when GetNext() has been called before, no need to pass the
-    // size.
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    // Regular freelists always point to an entry within the same super page.
-    //
-    // This is most likely a PartitionAlloc bug if this triggers.
-    if (PA_UNLIKELY(entry &&
-                    (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
-                        (SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
-      FreelistCorruptionDetected(0);
-    }
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-    encoded_next_ = EncodedPartitionFreelistEntryPtr(entry);
-#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-    shadow_ = encoded_next_.Inverted();
-#endif
-  }
-
-  // Zeroes out |this| before returning the slot. The pointer to this memory
-  // will be returned to the user (caller of Alloc()), thus can't have internal
-  // data.
-  PA_ALWAYS_INLINE uintptr_t ClearForAllocation() {
-    encoded_next_.Override(0);
-#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-    shadow_ = 0;
-#endif
-    return SlotStartPtr2Addr(this);
-  }
-
-  PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero() const {
-    return !encoded_next_;
-  }
-
- private:
-  template <bool crash_on_corruption>
-  PA_ALWAYS_INLINE PartitionFreelistEntry* GetNextInternal(
-      size_t slot_size,
-      bool for_thread_cache) const;
-
-  PA_ALWAYS_INLINE static bool IsSane(const PartitionFreelistEntry* here,
-                                      const PartitionFreelistEntry* next,
-                                      bool for_thread_cache) {
-    // Don't allow the freelist to be blindly followed to any location.
-    // Checks two constraints:
-    // - here and next must belong to the same superpage, unless this is in the
-    //   thread cache (they even always belong to the same slot span).
-    // - next cannot point inside the metadata area.
-    //
-    // Also, the lightweight UaF detection (pointer shadow) is checked.
-
-    uintptr_t here_address = SlotStartPtr2Addr(here);
-    uintptr_t next_address = SlotStartPtr2Addr(next);
-
-#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-    bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_;
-#else
-    bool shadow_ptr_ok = true;
-#endif
-
-    bool same_superpage = (here_address & kSuperPageBaseMask) ==
-                          (next_address & kSuperPageBaseMask);
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-    bool marked_as_free_in_bitmap =
-        for_thread_cache ? true : !FreeSlotBitmapSlotIsUsed(next_address);
-#else
-    bool marked_as_free_in_bitmap = true;
-#endif
-
-    // This is necessary but not sufficient when quarantine is enabled, see
-    // SuperPagePayloadBegin() in partition_page.h. However we don't want to
-    // fetch anything from the root in this function.
-    bool not_in_metadata =
-        (next_address & kSuperPageOffsetMask) >= PartitionPageSize();
-
-    if (for_thread_cache) {
-      return shadow_ptr_ok & not_in_metadata;
-    } else {
-      return shadow_ptr_ok & same_superpage & marked_as_free_in_bitmap &
-             not_in_metadata;
-    }
-  }
-
-  EncodedPartitionFreelistEntryPtr encoded_next_;
-  // This is intended to detect unintentional corruptions of the freelist.
-  // These can happen due to a Use-after-Free, or overflow of the previous
-  // allocation in the slot span.
-#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-  uintptr_t shadow_;
-#endif
-};
-
-static_assert(kSmallestBucket >= sizeof(PartitionFreelistEntry),
-              "Need enough space for freelist entries in the smallest slot");
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-// The smallest bucket actually used. Note that the smallest request is 1 (if
-// it's 0, it gets patched to 1), and ref-count gets added to it.
-namespace {
-constexpr size_t kSmallestUsedBucket =
-    base::bits::AlignUp(1 + sizeof(PartitionRefCount), kSmallestBucket);
-}
-static_assert(kSmallestUsedBucket >=
-                  sizeof(PartitionFreelistEntry) + sizeof(PartitionRefCount),
-              "Need enough space for freelist entries and the ref-count in the "
-              "smallest *used* slot");
-#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-
-template <bool crash_on_corruption>
-PA_ALWAYS_INLINE PartitionFreelistEntry*
-PartitionFreelistEntry::GetNextInternal(size_t slot_size,
-                                        bool for_thread_cache) const {
-  // GetNext() can be called on discarded memory, in which case |encoded_next_|
-  // is 0, and none of the checks apply. Don't prefetch nullptr either.
-  if (IsEncodedNextPtrZero()) {
-    return nullptr;
-  }
-
-  auto* ret = encoded_next_.Decode();
-  // We rely on constant propagation to remove the branches coming from
-  // |for_thread_cache|, since the argument is always a compile-time constant.
-  if (PA_UNLIKELY(!IsSane(this, ret, for_thread_cache))) {
-    if constexpr (crash_on_corruption) {
-      // Put the corrupted data on the stack, it may give us more information
-      // about what kind of corruption that was.
-      PA_DEBUG_DATA_ON_STACK("first",
-                             static_cast<size_t>(encoded_next_.encoded_));
-#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-      PA_DEBUG_DATA_ON_STACK("second", static_cast<size_t>(shadow_));
-#endif
-      FreelistCorruptionDetected(slot_size);
-    } else {
-      return nullptr;
-    }
-  }
-
-  // In real-world profiles, the load of |encoded_next_| above is responsible
-  // for a large fraction of the allocation cost. However, we cannot anticipate
-  // it enough since it is accessed right after we know its address.
-  //
-  // In the case of repeated allocations, we can prefetch the access that will
-  // be done at the *next* allocation, which will touch *ret, prefetch it.
-  PA_PREFETCH(ret);
-
-  return ret;
-}
-
-template <bool crash_on_corruption>
-PA_ALWAYS_INLINE PartitionFreelistEntry*
-PartitionFreelistEntry::GetNextForThreadCache(size_t slot_size) const {
-  return GetNextInternal<crash_on_corruption>(slot_size, true);
-}
-
-PA_ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext(
-    size_t slot_size) const {
-  return GetNextInternal<true>(slot_size, false);
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ENCODED_FREELIST_H_
diff --git a/base/allocator/partition_allocator/extended_api.cc b/base/allocator/partition_allocator/extended_api.cc
deleted file mode 100644
index b7fc1d4..0000000
--- a/base/allocator/partition_allocator/extended_api.cc
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/extended_api.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
-#include "base/allocator/partition_allocator/thread_cache.h"
-
-namespace partition_alloc::internal {
-
-#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
-
-namespace {
-
-void DisableThreadCacheForRootIfEnabled(PartitionRoot* root) {
-  // Some platforms don't have a thread cache, or it could already have been
-  // disabled.
-  if (!root || !root->settings.with_thread_cache) {
-    return;
-  }
-
-  ThreadCacheRegistry::Instance().PurgeAll();
-  root->settings.with_thread_cache = false;
-  // Doesn't destroy the thread cache object(s). For background threads, they
-  // will be collected (and free cached memory) at thread destruction
-  // time. For the main thread, we leak it.
-}
-
-void EnablePartitionAllocThreadCacheForRootIfDisabled(PartitionRoot* root) {
-  if (!root) {
-    return;
-  }
-  root->settings.with_thread_cache = true;
-}
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-void DisablePartitionAllocThreadCacheForProcess() {
-  PA_CHECK(allocator_shim::internal::PartitionAllocMalloc::
-               AllocatorConfigurationFinalized());
-  auto* regular_allocator =
-      allocator_shim::internal::PartitionAllocMalloc::Allocator();
-  auto* aligned_allocator =
-      allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator();
-  DisableThreadCacheForRootIfEnabled(regular_allocator);
-  if (aligned_allocator != regular_allocator) {
-    DisableThreadCacheForRootIfEnabled(aligned_allocator);
-  }
-  DisableThreadCacheForRootIfEnabled(
-      allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator());
-}
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-}  // namespace
-
-#endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
-
-ThreadAllocStats GetAllocStatsForCurrentThread() {
-  ThreadCache* thread_cache = ThreadCache::Get();
-  if (ThreadCache::IsValid(thread_cache)) {
-    return thread_cache->thread_alloc_stats();
-  }
-  return {};
-}
-
-#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
-ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
-    PartitionRoot* root)
-    : root_(root) {
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  auto* regular_allocator =
-      allocator_shim::internal::PartitionAllocMalloc::Allocator();
-  regular_was_enabled_ =
-      regular_allocator && regular_allocator->settings.with_thread_cache;
-
-  if (root_ != regular_allocator) {
-    // Another |root| is ThreadCache's PartitionRoot. Need to disable
-    // thread cache for the process.
-    DisablePartitionAllocThreadCacheForProcess();
-    EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
-    // Replace ThreadCache's PartitionRoot.
-    ThreadCache::SwapForTesting(root_);
-  } else {
-    if (!regular_was_enabled_) {
-      EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
-      ThreadCache::SwapForTesting(root_);
-    }
-  }
-#else
-  PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
-  EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
-  ThreadCache::SwapForTesting(root_);
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-  PA_CHECK(ThreadCache::Get());
-}
-
-ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() {
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  auto* regular_allocator =
-      allocator_shim::internal::PartitionAllocMalloc::Allocator();
-  bool regular_enabled =
-      regular_allocator && regular_allocator->settings.with_thread_cache;
-
-  if (regular_was_enabled_) {
-    if (!regular_enabled) {
-      // Need to re-enable ThreadCache for the process.
-      EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
-      // In the case, |regular_allocator| must be ThreadCache's root.
-      ThreadCache::SwapForTesting(regular_allocator);
-    } else {
-      // ThreadCache is enabled for the process, but we need to be
-      // careful about ThreadCache's PartitionRoot. If it is different from
-      // |regular_allocator|, we need to invoke SwapForTesting().
-      if (regular_allocator != root_) {
-        ThreadCache::SwapForTesting(regular_allocator);
-      }
-    }
-  } else {
-    // ThreadCache for all processes was disabled.
-    DisableThreadCacheForRootIfEnabled(regular_allocator);
-    ThreadCache::SwapForTesting(nullptr);
-  }
-#else
-  // First, disable the test thread cache we have.
-  DisableThreadCacheForRootIfEnabled(root_);
-
-  ThreadCache::SwapForTesting(nullptr);
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-}
-#endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/extended_api.h b/base/allocator/partition_allocator/extended_api.h
deleted file mode 100644
index a99677d..0000000
--- a/base/allocator/partition_allocator/extended_api.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "base/allocator/partition_allocator/partition_stats.h"
-#include "base/allocator/partition_allocator/thread_cache.h"
-
-namespace partition_alloc::internal {
-// Get allocation stats for the thread cache partition on the current
-// thread. See the documentation of ThreadAllocStats for details.
-ThreadAllocStats GetAllocStatsForCurrentThread();
-
-// Creates a scope for testing which:
-// - if the given |root| is a default malloc root for the entire process,
-//   enables the thread cache for the entire process.
-//   (This may happen if UsePartitionAllocAsMalloc is enabled.)
-// - otherwise, disables the thread cache for the entire process, and
-//   replaces it with a thread cache for |root|.
-// This class is unsafe to run if there are multiple threads running
-// in the process.
-class ThreadCacheProcessScopeForTesting {
- public:
-  explicit ThreadCacheProcessScopeForTesting(PartitionRoot* root);
-  ~ThreadCacheProcessScopeForTesting();
-
-  ThreadCacheProcessScopeForTesting() = delete;
-
- private:
-  PartitionRoot* root_ = nullptr;
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  bool regular_was_enabled_ = false;
-#endif
-};
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
diff --git a/base/allocator/partition_allocator/freeslot_bitmap.h b/base/allocator/partition_allocator/freeslot_bitmap.h
deleted file mode 100644
index f86ef59..0000000
--- a/base/allocator/partition_allocator/freeslot_bitmap.h
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_H_
-
-#include <climits>
-#include <cstdint>
-#include <utility>
-
-#include "base/allocator/partition_allocator/freeslot_bitmap_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-
-namespace partition_alloc::internal {
-
-PA_ALWAYS_INLINE uintptr_t GetFreeSlotBitmapAddressForPointer(uintptr_t ptr) {
-  uintptr_t super_page = ptr & kSuperPageBaseMask;
-  return SuperPageFreeSlotBitmapAddr(super_page);
-}
-
-// Calculates the cell address and the offset inside the cell corresponding to
-// the |slot_start|.
-PA_ALWAYS_INLINE std::pair<FreeSlotBitmapCellType*, size_t>
-GetFreeSlotBitmapCellPtrAndBitIndex(uintptr_t slot_start) {
-  uintptr_t slot_superpage_offset = slot_start & kSuperPageOffsetMask;
-  uintptr_t superpage_bitmap_start =
-      GetFreeSlotBitmapAddressForPointer(slot_start);
-  uintptr_t cell_addr = base::bits::AlignDown(
-      superpage_bitmap_start +
-          (slot_superpage_offset / kSmallestBucket) / CHAR_BIT,
-      sizeof(FreeSlotBitmapCellType));
-  PA_DCHECK(cell_addr < superpage_bitmap_start + kFreeSlotBitmapSize);
-  size_t bit_index =
-      (slot_superpage_offset / kSmallestBucket) & kFreeSlotBitmapOffsetMask;
-  PA_DCHECK(bit_index < kFreeSlotBitmapBitsPerCell);
-  return {reinterpret_cast<FreeSlotBitmapCellType*>(cell_addr), bit_index};
-}
-
-// This bitmap marks the used slot as 0 and free one as 1. This is because we
-// would like to set all the slots as "used" by default to prevent allocating a
-// used slot when the freelist entry is overwritten. The state of the bitmap is
-// expected to be synced with freelist (i.e. the bitmap is set to 1 if and only
-// if the slot is in the freelist).
-
-PA_ALWAYS_INLINE FreeSlotBitmapCellType CellWithAOne(size_t n) {
-  return static_cast<FreeSlotBitmapCellType>(1) << n;
-}
-
-PA_ALWAYS_INLINE FreeSlotBitmapCellType CellWithTrailingOnes(size_t n) {
-  return (static_cast<FreeSlotBitmapCellType>(1) << n) -
-         static_cast<FreeSlotBitmapCellType>(1);
-}
-
-// Returns true if the bit corresponding to |slot_start| is used( = 0)
-PA_ALWAYS_INLINE bool FreeSlotBitmapSlotIsUsed(uintptr_t slot_start) {
-  auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
-  return (*cell & CellWithAOne(bit_index)) == 0;
-}
-
-// Mark the bit corresponding to |slot_start| as used( = 0).
-PA_ALWAYS_INLINE void FreeSlotBitmapMarkSlotAsUsed(uintptr_t slot_start) {
-  PA_CHECK(!FreeSlotBitmapSlotIsUsed(slot_start));
-  auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
-  *cell &= ~CellWithAOne(bit_index);
-}
-
-// Mark the bit corresponding to |slot_start| as free( = 1).
-PA_ALWAYS_INLINE void FreeSlotBitmapMarkSlotAsFree(uintptr_t slot_start) {
-  PA_CHECK(FreeSlotBitmapSlotIsUsed(slot_start));
-  auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
-  *cell |= CellWithAOne(bit_index);
-}
-
-// Resets (= set to 0) all the bits corresponding to the slot-start addresses
-// within [begin_addr, end_addr). |begin_addr| has to be the beginning of a
-// slot, but |end_addr| does not.
-PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
-                                          uintptr_t end_addr,
-                                          uintptr_t slot_size) {
-  PA_DCHECK(begin_addr <= end_addr);
-  // |end_addr| has to be kSmallestBucket-aligned.
-  PA_DCHECK((end_addr & (kSmallestBucket - 1)) == 0u);
-  for (uintptr_t slot_start = begin_addr; slot_start < end_addr;
-       slot_start += slot_size) {
-    auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
-    *cell &= ~CellWithAOne(bit_index);
-  }
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // Checks if the cells that are meant to contain only unset bits are really 0.
-  auto [begin_cell, begin_bit_index] =
-      GetFreeSlotBitmapCellPtrAndBitIndex(begin_addr);
-  auto [end_cell, end_bit_index] =
-      GetFreeSlotBitmapCellPtrAndBitIndex(end_addr);
-
-  // The bits that should be marked to 0 are |begin_bit_index|th bit of
-  // |begin_cell| to |end_bit_index - 1|th bit of |end_cell|. We verify all the
-  // bits are set to 0 for the cells between [begin_cell + 1, end_cell). For the
-  // |begin_cell| and |end_cell|, we have to handle them separately to only
-  // check the partial bits.
-  // | begin_cell |     |...|     | end_cell |
-  // |11...100...0|0...0|...|0...0|0...01...1|
-  //        ^                           ^
-  //        |                           |
-  //    begin_addr                   end_addr
-
-  if (begin_cell == end_cell) {
-    PA_DCHECK((*begin_cell & (~CellWithTrailingOnes(begin_bit_index) &
-                              CellWithTrailingOnes(end_bit_index))) == 0u);
-  }
-
-  if (begin_bit_index != 0) {
-    // Checks the bits between [begin_bit_index, kFreeSlotBitmapBitsPerCell) in
-    // the begin_cell are 0
-    PA_DCHECK((*begin_cell & ~CellWithTrailingOnes(begin_bit_index)) == 0u);
-    ++begin_cell;
-  }
-
-  if (end_bit_index != 0) {
-    // Checks the bits between [0, end_bit_index) in the end_cell are 0
-    PA_DCHECK((*end_cell & CellWithTrailingOnes(end_bit_index)) == 0u);
-  }
-
-  for (FreeSlotBitmapCellType* cell = begin_cell; cell != end_cell; ++cell) {
-    PA_DCHECK(*cell == 0u);
-  }
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BUILDFLAG(USE_FREESLOT_BITMAP)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_H_
diff --git a/base/allocator/partition_allocator/freeslot_bitmap_constants.h b/base/allocator/partition_allocator/freeslot_bitmap_constants.h
deleted file mode 100644
index 99768d7..0000000
--- a/base/allocator/partition_allocator/freeslot_bitmap_constants.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_CONSTANTS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_CONSTANTS_H_
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-
-namespace partition_alloc::internal {
-
-using FreeSlotBitmapCellType = uint64_t;
-constexpr size_t kFreeSlotBitmapBitsPerCell =
-    sizeof(FreeSlotBitmapCellType) * CHAR_BIT;
-constexpr size_t kFreeSlotBitmapOffsetMask = kFreeSlotBitmapBitsPerCell - 1;
-
-// The number of bits necessary for the bitmap is equal to the maximum number of
-// slots in a super page.
-constexpr size_t kFreeSlotBitmapSize =
-    (kSuperPageSize / kSmallestBucket) / CHAR_BIT;
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-ReservedFreeSlotBitmapSize() {
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-  return base::bits::AlignUp(kFreeSlotBitmapSize, PartitionPageSize());
-#else
-  return 0;
-#endif
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-CommittedFreeSlotBitmapSize() {
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-  return base::bits::AlignUp(kFreeSlotBitmapSize, SystemPageSize());
-#else
-  return 0;
-#endif
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-NumPartitionPagesPerFreeSlotBitmap() {
-  return ReservedFreeSlotBitmapSize() / PartitionPageSize();
-}
-
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-PA_ALWAYS_INLINE uintptr_t SuperPageFreeSlotBitmapAddr(uintptr_t super_page) {
-  PA_DCHECK(!(super_page % kSuperPageAlignment));
-  return super_page + PartitionPageSize();
-}
-#endif
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/freeslot_bitmap_unittest.cc b/base/allocator/partition_allocator/freeslot_bitmap_unittest.cc
deleted file mode 100644
index bd69204..0000000
--- a/base/allocator/partition_allocator/freeslot_bitmap_unittest.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/freeslot_bitmap.h"
-
-#include <cstdint>
-#include <limits>
-
-#include "base/allocator/partition_allocator/freeslot_bitmap_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// This test is disabled when MEMORY_TOOL_REPLACES_ALLOCATOR is defined because
-// we cannot locate the freeslot bitmap address in that case.
-#if BUILDFLAG(USE_FREESLOT_BITMAP) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-
-namespace partition_alloc::internal {
-
-namespace {
-
-class PartitionAllocFreeSlotBitmapTest : public ::testing::Test {
- protected:
-  static constexpr FreeSlotBitmapCellType kAllUsed = 0u;
-  static constexpr FreeSlotBitmapCellType kAllFree =
-      std::numeric_limits<FreeSlotBitmapCellType>::max();
-
-  void SetUp() override {
-    // Allocates memory and creates a pseudo superpage in it. We need to
-    // allocate |2 * kSuperPageSize| so that a whole superpage is contained in
-    // the allocated region.
-    allocator_.init(PartitionOptions{});
-    allocated_ptr_ = reinterpret_cast<uintptr_t>(
-        allocator_.root()->Alloc(2 * kSuperPageSize, ""));
-    super_page_ = (allocated_ptr_ + kSuperPageSize) & kSuperPageBaseMask;
-
-    // Checks that the whole superpage is in the allocated region.
-    PA_DCHECK(super_page_ + kSuperPageSize <=
-              allocated_ptr_ + 2 * kSuperPageSize);
-  }
-
-  void TearDown() override {
-    allocator_.root()->Free(reinterpret_cast<void*>(allocated_ptr_));
-  }
-
-  // Returns the |index|-th slot address in the virtual superpage. It assumes
-  // that there are no slot spans and the superpage is only filled with the slot
-  // of size |kSmallestBucket|.
-  uintptr_t SlotAddr(size_t index) {
-    return SuperPagePayloadBegin(super_page_, false) + index * kSmallestBucket;
-  }
-
-  // Returns the last slot address in the virtual superpage. It assumes that
-  // there are no slot spans but the superpage is only filled with the slot of
-  // size |kSmallestBucket|.
-  uintptr_t LastSlotAddr() {
-    return super_page_ + kSuperPageSize - PartitionPageSize() - kSmallestBucket;
-  }
-
- private:
-  uintptr_t allocated_ptr_;
-  uintptr_t super_page_;
-  PartitionAllocator allocator_;
-};
-
-}  // namespace
-
-TEST_F(PartitionAllocFreeSlotBitmapTest, MarkFirstSlotAsUsed) {
-  uintptr_t slot_addr = SlotAddr(0);
-  FreeSlotBitmapMarkSlotAsFree(slot_addr);
-  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_addr));
-
-  FreeSlotBitmapMarkSlotAsUsed(slot_addr);
-  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_addr));
-}
-
-TEST_F(PartitionAllocFreeSlotBitmapTest, MarkFirstSlotAsFree) {
-  uintptr_t slot_addr = SlotAddr(0);
-  // All slots are set to "used" by default.
-  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_addr));
-
-  FreeSlotBitmapMarkSlotAsFree(slot_addr);
-  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_addr));
-}
-
-TEST_F(PartitionAllocFreeSlotBitmapTest, MarkAllBitsInCellAsUsed) {
-  const size_t kFirstSlotAddr = SlotAddr(0);
-  const size_t kLastSlotAddr = SlotAddr(kFreeSlotBitmapBitsPerCell);
-
-  auto [cell_first_slot, bit_index_first_slot] =
-      GetFreeSlotBitmapCellPtrAndBitIndex(kFirstSlotAddr);
-  auto [cell_last_slot, bit_index_last_slot] =
-      GetFreeSlotBitmapCellPtrAndBitIndex(kLastSlotAddr);
-
-  // Check that the bit corresponding to |kFirstSlotAddr| is the first bit in
-  // some cell (= |cell_first_slot|), and the bit for |kLastSlotAddr| is the
-  // first bit in the next cell. This means that we are manipulating all the
-  // bits in |cell_first_slot| in this test.
-  EXPECT_EQ(0u, bit_index_first_slot);
-  EXPECT_EQ(0u, bit_index_last_slot);
-  EXPECT_NE(cell_first_slot, cell_last_slot);
-
-  for (size_t slot_addr = kFirstSlotAddr; slot_addr < kLastSlotAddr;
-       slot_addr += kSmallestBucket) {
-    FreeSlotBitmapMarkSlotAsFree(slot_addr);
-  }
-
-  // Check all the bits in |cell_first_slot| are 1 (= free).
-  EXPECT_EQ(kAllFree, *cell_first_slot);
-
-  for (size_t slot_addr = kFirstSlotAddr; slot_addr < kLastSlotAddr;
-       slot_addr += kSmallestBucket) {
-    FreeSlotBitmapMarkSlotAsUsed(slot_addr);
-  }
-
-  // Check all the bits in |cell_first_slot| are 0 (= used).
-  EXPECT_EQ(kAllUsed, *cell_first_slot);
-}
-
-TEST_F(PartitionAllocFreeSlotBitmapTest, MarkLastSlotAsUsed) {
-  uintptr_t last_slot_addr = LastSlotAddr();
-  FreeSlotBitmapMarkSlotAsFree(last_slot_addr);
-  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(last_slot_addr));
-
-  FreeSlotBitmapMarkSlotAsUsed(last_slot_addr);
-  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(last_slot_addr));
-}
-
-TEST_F(PartitionAllocFreeSlotBitmapTest, ResetBitmap) {
-  const size_t kNumSlots = 3 * kFreeSlotBitmapBitsPerCell;
-  for (size_t i = 0; i < kNumSlots; ++i) {
-    FreeSlotBitmapMarkSlotAsFree(SlotAddr(i));
-  }
-
-  auto [cell_first_slot, bit_index_first_slot] =
-      GetFreeSlotBitmapCellPtrAndBitIndex(SlotAddr(0));
-  EXPECT_EQ(0u, bit_index_first_slot);
-  EXPECT_EQ(kAllFree, *cell_first_slot);
-  EXPECT_EQ(kAllFree, *(cell_first_slot + 1));
-  EXPECT_EQ(kAllFree, *(cell_first_slot + 2));
-
-  FreeSlotBitmapReset(SlotAddr(kFreeSlotBitmapBitsPerCell),
-                      SlotAddr(2 * kFreeSlotBitmapBitsPerCell),
-                      kSmallestBucket);
-  EXPECT_EQ(kAllFree, *cell_first_slot);
-  EXPECT_EQ(kAllUsed, *(cell_first_slot + 1));
-  EXPECT_EQ(kAllFree, *(cell_first_slot + 2));
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BUILDFLAG(USE_FREESLOT_BITMAP) &&
-        // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/glossary.md b/base/allocator/partition_allocator/glossary.md
index 763164c..d3c8c09 100644
--- a/base/allocator/partition_allocator/glossary.md
+++ b/base/allocator/partition_allocator/glossary.md
@@ -162,7 +162,7 @@
 to the entire-ish codebase (exclusions apply). This was done by intercepting
 `malloc()`, `free()`, `realloc()`, aforementioned `posix_memalign()`, etc. and
 routing them into PartitionAlloc. The shim located in
-`base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h` is
+`base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h` is
 responsible for intercepting. For more details, see
 [base/allocator/README.md](../../../base/allocator/README.md).
 
@@ -180,7 +180,7 @@
 * macOS
 * Fuchsia
 
-[max-bucket-comment]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_constants.h;l=345;drc=667e6b001f438521e1c1a1bc3eabeead7aaa1f37
-[pa-thread-cache]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/thread_cache.h
+[max-bucket-comment]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h;l=345;drc=667e6b001f438521e1c1a1bc3eabeead7aaa1f37
+[pa-thread-cache]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/thread_cache.h
 [v8-sandbox]: https://docs.google.com/document/d/1FM4fQmIhEqPG8uGp5o9A-mnPB5BOeScZYpkHjo0KKA8/preview#
 [v8-cfi]: https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/preview#
diff --git a/base/allocator/partition_allocator/gwp_asan_support.cc b/base/allocator/partition_allocator/gwp_asan_support.cc
deleted file mode 100644
index 99bde65..0000000
--- a/base/allocator/partition_allocator/gwp_asan_support.cc
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/gwp_asan_support.h"
-
-#if BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
-
-#include "base/allocator/partition_allocator/freeslot_bitmap_constants.h"
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_bucket.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_ref_count.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "build/build_config.h"
-
-namespace partition_alloc {
-
-// static
-void* GwpAsanSupport::MapRegion(size_t slot_count,
-                                std::vector<uint16_t>& free_list) {
-  PA_CHECK(slot_count > 0);
-
-  constexpr PartitionOptions kConfig{
-      .backup_ref_ptr = PartitionOptions::BackupRefPtr::kEnabled,
-  };
-  static internal::base::NoDestructor<PartitionRoot> root(kConfig);
-
-  const size_t kSlotSize = 2 * internal::SystemPageSize();
-  uint16_t bucket_index = PartitionRoot::SizeToBucketIndex(
-      kSlotSize, root->GetBucketDistribution());
-  auto* bucket = root->buckets + bucket_index;
-
-  const size_t kSuperPagePayloadStartOffset =
-      internal::SuperPagePayloadStartOffset(
-          /* is_managed_by_normal_buckets = */ true,
-          /* with_quarantine = */ false);
-  PA_CHECK(kSuperPagePayloadStartOffset % kSlotSize == 0);
-  const size_t kSuperPageGwpAsanSlotAreaBeginOffset =
-      kSuperPagePayloadStartOffset;
-  const size_t kSuperPageGwpAsanSlotAreaEndOffset =
-      internal::SuperPagePayloadEndOffset();
-  const size_t kSuperPageGwpAsanSlotAreaSize =
-      kSuperPageGwpAsanSlotAreaEndOffset - kSuperPageGwpAsanSlotAreaBeginOffset;
-  const size_t kSlotsPerSlotSpan = bucket->get_bytes_per_span() / kSlotSize;
-  const size_t kSlotsPerSuperPage =
-      kSuperPageGwpAsanSlotAreaSize / (kSlotsPerSlotSpan * kSlotSize);
-
-  size_t super_page_count = 1 + ((slot_count - 1) / kSlotsPerSuperPage);
-  PA_CHECK(super_page_count <=
-           std::numeric_limits<size_t>::max() / kSuperPageSize);
-  uintptr_t super_page_span_start;
-  {
-    internal::ScopedGuard locker{internal::PartitionRootLock(root.get())};
-    super_page_span_start = bucket->AllocNewSuperPageSpanForGwpAsan(
-        root.get(), super_page_count, 0);
-
-    if (!super_page_span_start) {
-      return nullptr;
-    }
-
-#if defined(ARCH_CPU_64_BITS)
-    // Mapping the GWP-ASan region in to the lower 32-bits of address space
-    // makes it much more likely that a bad pointer dereference points into
-    // our region and triggers a false positive report. We rely on the fact
-    // that PA address pools are never allocated in the first 4GB due to
-    // their alignment requirements.
-    PA_CHECK(super_page_span_start >= (1ULL << 32));
-#endif  // defined(ARCH_CPU_64_BITS)
-
-    uintptr_t super_page_span_end =
-        super_page_span_start + super_page_count * kSuperPageSize;
-    PA_CHECK(super_page_span_start < super_page_span_end);
-
-    for (uintptr_t super_page = super_page_span_start;
-         super_page < super_page_span_end; super_page += kSuperPageSize) {
-      auto* page_metadata =
-          internal::PartitionSuperPageToMetadataArea(super_page);
-
-      // Index 0 is invalid because it is the super page extent metadata.
-      for (size_t partition_page_idx =
-               1 + internal::NumPartitionPagesPerFreeSlotBitmap();
-           partition_page_idx + bucket->get_pages_per_slot_span() <
-           internal::NumPartitionPagesPerSuperPage();
-           partition_page_idx += bucket->get_pages_per_slot_span()) {
-        auto* slot_span_metadata =
-            &page_metadata[partition_page_idx].slot_span_metadata;
-        bucket->InitializeSlotSpanForGwpAsan(slot_span_metadata);
-        auto slot_span_start =
-            internal::SlotSpanMetadata::ToSlotSpanStart(slot_span_metadata);
-
-        for (uintptr_t slot_idx = 0; slot_idx < kSlotsPerSlotSpan; ++slot_idx) {
-          auto slot_start = slot_span_start + slot_idx * kSlotSize;
-          internal::PartitionRefCountPointer(slot_start)->InitalizeForGwpAsan();
-          size_t global_slot_idx = (slot_start - super_page_span_start -
-                                    kSuperPageGwpAsanSlotAreaBeginOffset) /
-                                   kSlotSize;
-          PA_DCHECK(global_slot_idx < std::numeric_limits<uint16_t>::max());
-          free_list.push_back(global_slot_idx);
-          if (free_list.size() == slot_count) {
-            return reinterpret_cast<void*>(
-                super_page_span_start + kSuperPageGwpAsanSlotAreaBeginOffset -
-                internal::SystemPageSize());  // Depends on the PA guard region
-                                              // in front of the super page
-                                              // payload area.
-          }
-        }
-      }
-    }
-  }
-
-  PA_NOTREACHED();
-}
-
-// static
-bool GwpAsanSupport::CanReuse(uintptr_t slot_start) {
-  return internal::PartitionRefCountPointer(slot_start)->CanBeReusedByGwpAsan();
-}
-
-}  // namespace partition_alloc
-
-#endif  // BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
diff --git a/base/allocator/partition_allocator/gwp_asan_support.h b/base/allocator/partition_allocator/gwp_asan_support.h
deleted file mode 100644
index dd1f96e..0000000
--- a/base/allocator/partition_allocator/gwp_asan_support.h
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_GWP_ASAN_SUPPORT_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_GWP_ASAN_SUPPORT_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-
-#if BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
-
-#include <cstddef>
-#include <cstdint>
-#include <vector>
-
-namespace partition_alloc {
-
-// This class allows GWP-ASan allocations to be backed by PartitionAlloc and,
-// consequently, protected by MiraclePtr.
-//
-// GWP-ASan mainly operates at the system memory page granularity. During
-// process startup, it reserves a certain number of consecutive system pages.
-//
-// The standard layout is as follows:
-//
-//   +-------------------+--------
-//   |                   | ▲   ▲
-//   |   system page 0   |(a) (c)
-//   |                   | ▼   ▼
-//   +-------------------+--------
-//   |                   | ▲   ▲
-//   |   system page 1   |(b)  |
-//   |                   | ▼   |
-//   +-------------------+--- (d)    (a) inaccessible
-//   |                   | ▲   |     (b) accessible
-//   |   system page 2   |(a)  |     (c) initial guard page
-//   |                   | ▼   ▼     (d) allocation slot
-//   +-------------------+--------
-//   |                   | ▲   ▲
-//   |   system page 3   |(b)  |
-//   |                   | ▼   |
-//   +-------------------+--- (d)
-//   |                   | ▲   |
-//   |   system page 4   |(a)  |
-//   |                   | ▼   ▼
-//   |-------------------|--------
-//   |                   | ▲   ▲
-//   |        ...        |(a) (d)
-//
-// Unfortunately, PartitionAlloc can't provide GWP-ASan an arbitrary number of
-// consecutive allocation slots. Allocations need to be grouped into 2MB super
-// pages so that the allocation metadata can be easily located.
-//
-// Below is the new layout:
-//
-//   +-----------------------------------
-//   |                   |         ▲   ▲
-//   |   system page 0   |         |   |
-//   |                   |         |   |
-//   +-------------------+         |   |
-//   |                   |         |   |
-//   |        ...        |        (e)  |
-//   |                   |         |   |
-//   +-------------------+-------  |   |
-//   |                   | ▲   ▲   |   |
-//   |  system page k-1  |(a) (c)  |   |
-//   |                   | ▼   ▼   ▼   |
-//   +-------------------+----------- (f)
-//   |                   | ▲   ▲       |
-//   |   system page k   |(b)  |       |
-//   |                   | ▼   |       |
-//   +-------------------+--- (d)      |
-//   |                   | ▲   |       |
-//   |  system page k+1  |(a)  |       |
-//   |                   | ▼   ▼       |
-//   +-------------------+-----------  |
-//   |                   |             |    (a) inaccessible
-//   |        ...        |             |    (b) accessible
-//   |                   |             ▼    (c) initial guard page
-//   +-----------------------------------   (d) allocation slot
-//   |                   |         ▲   ▲    (e) super page metadata
-//   |   system page m   |         |   |    (f) super page
-//   |                   |         |   |    (g) pseudo allocation slot
-//   +-------------------+-------  |   |
-//   |                   |     ▲   |   |
-//   |        ...        |     |  (e)  |
-//   |                   |     |   |   |
-//   +-------------------+--- (g)  |   |
-//   |                   | ▲   |   |   |
-//   | system page m+k-1 |(a)  |   |   |
-//   |                   | ▼   ▼   ▼   |
-//   +-------------------+----------- (f)
-//   |                   | ▲   ▲       |
-//   |  system page m+k  |(b)  |       |
-//   |                   | ▼   |       |
-//   +-------------------+--- (d)      |
-//   |                   | ▲   |       |
-//   | system page m+k+1 |(a)  |       |
-//   |                   | ▼   ▼       |
-//   +-------------------+-----------  |
-//   |                   |             |
-//   |        ...        |             |
-//   |                   |             ▼
-//   +-------------------+---------------
-//
-// This means some allocation slots will be reserved to hold PA
-// metadata. We exclude these pseudo slots from the GWP-ASan free list so that
-// they are never used for anything other that storing the metadata.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) GwpAsanSupport {
- public:
-  static void* MapRegion(size_t slot_count, std::vector<uint16_t>& free_list);
-  static bool CanReuse(uintptr_t slot_start);
-};
-
-}  // namespace partition_alloc
-
-#endif  // BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_GWP_ASAN_SUPPORT_H_
diff --git a/base/allocator/partition_allocator/hardening_unittest.cc b/base/allocator/partition_allocator/hardening_unittest.cc
deleted file mode 100644
index c618ce5..0000000
--- a/base/allocator/partition_allocator/hardening_unittest.cc
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cstdint>
-#include <string>
-#include <vector>
-
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_freelist_entry.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// With *SAN, PartitionAlloc is rerouted to malloc().
-#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-
-namespace partition_alloc::internal {
-namespace {
-
-// Death tests misbehave on Android, crbug.com/1240184
-#if !BUILDFLAG(IS_ANDROID) && defined(GTEST_HAS_DEATH_TEST) && \
-    PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-
-TEST(HardeningTest, PartialCorruption) {
-  std::string important_data("very important");
-  char* to_corrupt = const_cast<char*>(important_data.c_str());
-
-  PartitionRoot root(PartitionOptions{
-      .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-  });
-  root.UncapEmptySlotSpanMemoryForTesting();
-
-  const size_t kAllocSize = 100;
-  void* data = root.Alloc(kAllocSize, "");
-  void* data2 = root.Alloc(kAllocSize, "");
-  root.Free(data2);
-  root.Free(data);
-
-  // root->bucket->active_slot_span_head->freelist_head points to data, next_
-  // points to data2. We can corrupt *data to get overwrite the next_ pointer.
-  // Even if it looks reasonable (valid encoded pointer), freelist corruption
-  // detection will make the code crash, because shadow_ doesn't match
-  // encoded_next_.
-  PartitionFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
-                                                to_corrupt, false);
-  EXPECT_DEATH(root.Alloc(kAllocSize, ""), "");
-}
-
-TEST(HardeningTest, OffHeapPointerCrashing) {
-  std::string important_data("very important");
-  char* to_corrupt = const_cast<char*>(important_data.c_str());
-
-  PartitionRoot root(PartitionOptions{
-      .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-  });
-  root.UncapEmptySlotSpanMemoryForTesting();
-
-  const size_t kAllocSize = 100;
-  void* data = root.Alloc(kAllocSize, "");
-  void* data2 = root.Alloc(kAllocSize, "");
-  root.Free(data2);
-  root.Free(data);
-
-  // See "PartialCorruption" above for details. This time, make shadow_
-  // consistent.
-  PartitionFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
-                                                to_corrupt, true);
-
-  // Crashes, because |to_corrupt| is not on the same superpage as data.
-  EXPECT_DEATH(root.Alloc(kAllocSize, ""), "");
-}
-
-TEST(HardeningTest, MetadataPointerCrashing) {
-  PartitionRoot root(PartitionOptions{
-      .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-  });
-  root.UncapEmptySlotSpanMemoryForTesting();
-
-  const size_t kAllocSize = 100;
-  void* data = root.Alloc(kAllocSize, "");
-  void* data2 = root.Alloc(kAllocSize, "");
-  root.Free(data2);
-  root.Free(data);
-
-  uintptr_t slot_start = root.ObjectToSlotStart(data);
-  auto* metadata = SlotSpanMetadata::FromSlotStart(slot_start);
-  PartitionFreelistEntry::EmplaceAndInitForTest(slot_start, metadata, true);
-
-  // Crashes, because |metadata| points inside the metadata area.
-  EXPECT_DEATH(root.Alloc(kAllocSize, ""), "");
-}
-#endif  // !BUILDFLAG(IS_ANDROID) && defined(GTEST_HAS_DEATH_TEST) &&
-        // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-
-// Below test also misbehaves on Android; as above, death tests don't
-// quite work (crbug.com/1240184), and having free slot bitmaps enabled
-// force the expectations below to crash.
-#if !BUILDFLAG(IS_ANDROID)
-
-TEST(HardeningTest, SuccessfulCorruption) {
-  PartitionRoot root(PartitionOptions{
-      .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-  });
-  root.UncapEmptySlotSpanMemoryForTesting();
-
-  uintptr_t* zero_vector = reinterpret_cast<uintptr_t*>(
-      root.AllocWithFlags(AllocFlags::kZeroFill, 100 * sizeof(uintptr_t), ""));
-  ASSERT_TRUE(zero_vector);
-  // Pointer to the middle of an existing allocation.
-  uintptr_t* to_corrupt = zero_vector + 20;
-
-  const size_t kAllocSize = 100;
-  void* data = root.Alloc(kAllocSize, "");
-  void* data2 = root.Alloc(kAllocSize, "");
-  root.Free(data2);
-  root.Free(data);
-
-  PartitionFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
-                                                to_corrupt, true);
-
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-  // This part crashes with freeslot bitmap because it detects freelist
-  // corruptions, which is rather desirable behavior.
-  EXPECT_DEATH_IF_SUPPORTED(root.Alloc(kAllocSize, ""), "");
-#else
-  // Next allocation is what was in
-  // root->bucket->active_slot_span_head->freelist_head, so not the corrupted
-  // pointer.
-  void* new_data = root.Alloc(kAllocSize, "");
-  ASSERT_EQ(new_data, data);
-
-  // Not crashing, because a zeroed area is a "valid" freelist entry.
-  void* new_data2 = root.Alloc(kAllocSize, "");
-  // Now we have a pointer to the middle of an existing allocation.
-  EXPECT_EQ(new_data2, to_corrupt);
-#endif  // BUILDFLAG(USE_FREESLOT_BITMAP)
-}
-#endif  // !BUILDFLAG(IS_ANDROID)
-
-}  // namespace
-}  // namespace partition_alloc::internal
-
-#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/memory_reclaimer.cc b/base/allocator/partition_allocator/memory_reclaimer.cc
deleted file mode 100644
index dabfeae..0000000
--- a/base/allocator/partition_allocator/memory_reclaimer.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/memory_reclaimer.h"
-
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-
-#if BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#endif
-
-namespace partition_alloc {
-
-// static
-MemoryReclaimer* MemoryReclaimer::Instance() {
-  static internal::base::NoDestructor<MemoryReclaimer> instance;
-  return instance.get();
-}
-
-void MemoryReclaimer::RegisterPartition(PartitionRoot* partition) {
-  internal::ScopedGuard lock(lock_);
-  PA_DCHECK(partition);
-  auto it_and_whether_inserted = partitions_.insert(partition);
-  PA_DCHECK(it_and_whether_inserted.second);
-}
-
-void MemoryReclaimer::UnregisterPartition(PartitionRoot* partition) {
-  internal::ScopedGuard lock(lock_);
-  PA_DCHECK(partition);
-  size_t erased_count = partitions_.erase(partition);
-  PA_DCHECK(erased_count == 1u);
-}
-
-MemoryReclaimer::MemoryReclaimer() = default;
-MemoryReclaimer::~MemoryReclaimer() = default;
-
-void MemoryReclaimer::ReclaimAll() {
-  constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
-                         PurgeFlags::kDiscardUnusedSystemPages |
-                         PurgeFlags::kAggressiveReclaim;
-  Reclaim(kFlags);
-}
-
-void MemoryReclaimer::ReclaimNormal() {
-  constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
-                         PurgeFlags::kDiscardUnusedSystemPages;
-  Reclaim(kFlags);
-}
-
-void MemoryReclaimer::Reclaim(int flags) {
-  internal::ScopedGuard lock(
-      lock_);  // Has to protect from concurrent (Un)Register calls.
-
-  // PCScan quarantines freed slots. Trigger the scan first to let it call
-  // FreeNoHooksImmediate on slots that pass the quarantine.
-  //
-  // In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
-  // so that the slots are actually freed. (This is done synchronously only for
-  // the current thread.)
-  //
-  // Lastly decommit empty slot spans and lastly try to discard unused pages at
-  // the end of the remaining active slots.
-#if PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) && BUILDFLAG(USE_STARSCAN)
-  {
-    using PCScan = internal::PCScan;
-    const auto invocation_mode = flags & PurgeFlags::kAggressiveReclaim
-                                     ? PCScan::InvocationMode::kForcedBlocking
-                                     : PCScan::InvocationMode::kBlocking;
-    PCScan::PerformScanIfNeeded(invocation_mode);
-  }
-#endif  // PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) &&
-        // BUILDFLAG(USE_STARSCAN)
-
-#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
-  // Don't completely empty the thread cache outside of low memory situations,
-  // as there is periodic purge which makes sure that it doesn't take too much
-  // space.
-  if (flags & PurgeFlags::kAggressiveReclaim) {
-    ThreadCacheRegistry::Instance().PurgeAll();
-  }
-#endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
-
-  for (auto* partition : partitions_) {
-    partition->PurgeMemory(flags);
-  }
-}
-
-void MemoryReclaimer::ResetForTesting() {
-  internal::ScopedGuard lock(lock_);
-  partitions_.clear();
-}
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/memory_reclaimer.h b/base/allocator/partition_allocator/memory_reclaimer.h
deleted file mode 100644
index 4d90ccb..0000000
--- a/base/allocator/partition_allocator/memory_reclaimer.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
-
-#include <memory>
-#include <set>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-
-namespace partition_alloc {
-
-// Posts and handles memory reclaim tasks for PartitionAlloc.
-//
-// PartitionAlloc users are responsible for scheduling and calling the
-// reclamation methods with their own timers / event loops.
-//
-// Singleton as this runs as long as the process is alive, and
-// having multiple instances would be wasteful.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MemoryReclaimer {
- public:
-  static MemoryReclaimer* Instance();
-
-  MemoryReclaimer(const MemoryReclaimer&) = delete;
-  MemoryReclaimer& operator=(const MemoryReclaimer&) = delete;
-
-  // Internal. Do not use.
-  // Registers a partition to be tracked by the reclaimer.
-  void RegisterPartition(PartitionRoot* partition);
-  // Internal. Do not use.
-  // Unregisters a partition to be tracked by the reclaimer.
-  void UnregisterPartition(PartitionRoot* partition);
-
-  // Triggers an explicit reclaim now to reclaim as much free memory as
-  // possible. The API callers need to invoke this method periodically
-  // if they want to use memory reclaimer.
-  // See also GetRecommendedReclaimIntervalInMicroseconds()'s comment.
-  void ReclaimNormal();
-
-  // Returns a recommended interval to invoke ReclaimNormal.
-  int64_t GetRecommendedReclaimIntervalInMicroseconds() {
-    return internal::base::Seconds(4).InMicroseconds();
-  }
-
-  // Triggers an explicit reclaim now reclaiming all free memory
-  void ReclaimAll();
-
- private:
-  MemoryReclaimer();
-  ~MemoryReclaimer();
-  // |flags| is an OR of base::PartitionPurgeFlags
-  void Reclaim(int flags);
-  void ResetForTesting();
-
-  internal::Lock lock_;
-  std::set<PartitionRoot*> partitions_ PA_GUARDED_BY(lock_);
-
-  friend class internal::base::NoDestructor<MemoryReclaimer>;
-  friend class MemoryReclaimerTest;
-};
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
diff --git a/base/allocator/partition_allocator/memory_reclaimer_unittest.cc b/base/allocator/partition_allocator/memory_reclaimer_unittest.cc
deleted file mode 100644
index 7625215..0000000
--- a/base/allocator/partition_allocator/memory_reclaimer_unittest.cc
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/memory_reclaimer.h"
-
-#include <memory>
-#include <utility>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_for_testing.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
-    PA_CONFIG(THREAD_CACHE_SUPPORTED)
-#include "base/allocator/partition_allocator/extended_api.h"
-#include "base/allocator/partition_allocator/thread_cache.h"
-#endif
-
-// Otherwise, PartitionAlloc doesn't allocate any memory, and the tests are
-// meaningless.
-#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-
-namespace partition_alloc {
-
-namespace {
-
-void HandleOOM(size_t unused_size) {
-  PA_LOG(FATAL) << "Out of memory";
-}
-
-}  // namespace
-
-class MemoryReclaimerTest : public ::testing::Test {
- public:
-  MemoryReclaimerTest() {
-    // Since MemoryReclaimer::ResetForTesting() clears partitions_,
-    // we need to make PartitionAllocator after this ResetForTesting().
-    // Otherwise, we will see no PartitionAllocator is registered.
-    MemoryReclaimer::Instance()->ResetForTesting();
-
-    allocator_ =
-        std::make_unique<PartitionAllocatorForTesting>(PartitionOptions{
-            .star_scan_quarantine =
-                PartitionOptions::StarScanQuarantine::kAllowed,
-        });
-    allocator_->root()->UncapEmptySlotSpanMemoryForTesting();
-    PartitionAllocGlobalInit(HandleOOM);
-  }
-
-  ~MemoryReclaimerTest() override {
-    // Since MemoryReclaimer::UnregisterPartition() checks whether
-    // the given partition is managed by MemoryReclaimer, need to
-    // destruct |allocator_| before ResetForTesting().
-    allocator_ = nullptr;
-    PartitionAllocGlobalUninitForTesting();
-  }
-
-  void Reclaim() { MemoryReclaimer::Instance()->ReclaimNormal(); }
-
-  void AllocateAndFree() {
-    void* data = allocator_->root()->Alloc(1, "");
-    allocator_->root()->Free(data);
-  }
-
-  std::unique_ptr<PartitionAllocatorForTesting> allocator_;
-};
-
-TEST_F(MemoryReclaimerTest, FreesMemory) {
-  PartitionRoot* root = allocator_->root();
-
-  size_t committed_initially = root->get_total_size_of_committed_pages();
-  AllocateAndFree();
-  size_t committed_before = root->get_total_size_of_committed_pages();
-
-  EXPECT_GT(committed_before, committed_initially);
-
-  Reclaim();
-  size_t committed_after = root->get_total_size_of_committed_pages();
-  EXPECT_LT(committed_after, committed_before);
-  EXPECT_LE(committed_initially, committed_after);
-}
-
-TEST_F(MemoryReclaimerTest, Reclaim) {
-  PartitionRoot* root = allocator_->root();
-  size_t committed_initially = root->get_total_size_of_committed_pages();
-
-  {
-    AllocateAndFree();
-
-    size_t committed_before = root->get_total_size_of_committed_pages();
-    EXPECT_GT(committed_before, committed_initially);
-    MemoryReclaimer::Instance()->ReclaimAll();
-    size_t committed_after = root->get_total_size_of_committed_pages();
-
-    EXPECT_LT(committed_after, committed_before);
-    EXPECT_LE(committed_initially, committed_after);
-  }
-}
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
-    PA_CONFIG(THREAD_CACHE_SUPPORTED)
-
-namespace {
-// malloc() / free() pairs can be removed by the compiler, this is enough (for
-// now) to prevent that.
-PA_NOINLINE void FreeForTest(void* data) {
-  free(data);
-}
-}  // namespace
-
-TEST_F(MemoryReclaimerTest, DoNotAlwaysPurgeThreadCache) {
-  // Make sure the thread cache is enabled in the main partition.
-  internal::ThreadCacheProcessScopeForTesting scope(
-      allocator_shim::internal::PartitionAllocMalloc::Allocator());
-
-  for (size_t i = 0; i < ThreadCache::kDefaultSizeThreshold; i++) {
-    void* data = malloc(i);
-    FreeForTest(data);
-  }
-
-  auto* tcache = ThreadCache::Get();
-  ASSERT_TRUE(tcache);
-  size_t cached_size = tcache->CachedMemory();
-
-  Reclaim();
-
-  // No thread cache purging during periodic purge, but with ReclaimAll().
-  //
-  // Cannot assert on the exact size of the thread cache, since it can shrink
-  // when a buffer is overfull, and this may happen through other malloc()
-  // allocations in the test harness.
-  EXPECT_GT(tcache->CachedMemory(), cached_size / 2);
-
-  Reclaim();
-  EXPECT_GT(tcache->CachedMemory(), cached_size / 2);
-
-  MemoryReclaimer::Instance()->ReclaimAll();
-  EXPECT_LT(tcache->CachedMemory(), cached_size / 2);
-}
-
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
-        // PA_CONFIG(THREAD_CACHE_SUPPORTED)
-
-}  // namespace partition_alloc
-
-#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/oom.cc b/base/allocator/partition_allocator/oom.cc
deleted file mode 100644
index e007390..0000000
--- a/base/allocator/partition_allocator/oom.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/oom.h"
-
-#include "base/allocator/partition_allocator/oom_callback.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_WIN)
-#include <windows.h>
-
-#include <stdlib.h>
-
-#include <array>
-#endif  // BUILDFLAG(IS_WIN)
-
-namespace partition_alloc {
-
-size_t g_oom_size = 0U;
-
-namespace internal {
-
-// Crash server classifies base::internal::OnNoMemoryInternal as OOM.
-// TODO(crbug.com/1151236): Update to
-// partition_alloc::internal::base::internal::OnNoMemoryInternal
-PA_NOINLINE void OnNoMemoryInternal(size_t size) {
-  g_oom_size = size;
-#if BUILDFLAG(IS_WIN)
-  // Kill the process. This is important for security since most of code
-  // does not check the result of memory allocation.
-  // https://msdn.microsoft.com/en-us/library/het71c37.aspx
-  // Pass the size of the failed request in an exception argument.
-  ULONG_PTR exception_args[] = {size};
-  ::RaiseException(win::kOomExceptionCode, EXCEPTION_NONCONTINUABLE,
-                   std::size(exception_args), exception_args);
-
-  // Safety check, make sure process exits here.
-  _exit(win::kOomExceptionCode);
-#else
-  size_t tmp_size = size;
-  internal::base::debug::Alias(&tmp_size);
-
-  // Note: Don't add anything that may allocate here. Depending on the
-  // allocator, this may be called from within the allocator (e.g. with
-  // PartitionAlloc), and would deadlock as our locks are not recursive.
-  //
-  // Additionally, this is unlikely to work, since allocating from an OOM
-  // handler is likely to fail.
-  //
-  // Use PA_IMMEDIATE_CRASH() so that the top frame in the crash is our code,
-  // rather than using abort() or similar; this avoids the crash server needing
-  // to be able to successfully unwind through libc to get to the correct
-  // address, which is particularly an issue on Android.
-  PA_IMMEDIATE_CRASH();
-#endif  // BUILDFLAG(IS_WIN)
-}
-
-}  // namespace internal
-
-void TerminateBecauseOutOfMemory(size_t size) {
-  internal::OnNoMemoryInternal(size);
-}
-
-namespace internal {
-
-// The crash is generated in a PA_NOINLINE function so that we can classify the
-// crash as an OOM solely by analyzing the stack trace. It is tagged as
-// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
-[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void OnNoMemory(size_t size) {
-  RunPartitionAllocOomCallback();
-  TerminateBecauseOutOfMemory(size);
-  PA_IMMEDIATE_CRASH();
-}
-
-}  // namespace internal
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/oom.h b/base/allocator/partition_allocator/oom.h
deleted file mode 100644
index 493dff9..0000000
--- a/base/allocator/partition_allocator/oom.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
-
-#include <cstddef>
-
-#include "base/allocator/partition_allocator/allocation_guard.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_WIN)
-#include "base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h"
-#endif
-
-namespace partition_alloc {
-
-// Terminates process. Should be called only for out of memory errors.
-// |size| is the size of the failed allocation, or 0 if not known.
-// Crash reporting classifies such crashes as OOM.
-// Must be allocation-safe.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void TerminateBecauseOutOfMemory(size_t size);
-
-// Records the size of the allocation that caused the current OOM crash, for
-// consumption by Breakpad.
-// TODO: this can be removed when Breakpad is no longer supported.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern size_t g_oom_size;
-
-#if BUILDFLAG(IS_WIN)
-namespace win {
-
-// Custom Windows exception code chosen to indicate an out of memory error.
-// See https://msdn.microsoft.com/en-us/library/het71c37.aspx.
-// "To make sure that you do not define a code that conflicts with an existing
-// exception code" ... "The resulting error code should therefore have the
-// highest four bits set to hexadecimal E."
-// 0xe0000008 was chosen arbitrarily, as 0x00000008 is ERROR_NOT_ENOUGH_MEMORY.
-const DWORD kOomExceptionCode = 0xe0000008;
-
-}  // namespace win
-#endif
-
-namespace internal {
-
-// The crash is generated in a PA_NOINLINE function so that we can classify the
-// crash as an OOM solely by analyzing the stack trace. It is tagged as
-// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
-[[noreturn]] PA_NOT_TAIL_CALLED PA_COMPONENT_EXPORT(
-    PARTITION_ALLOC) void OnNoMemory(size_t size);
-
-// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
-// exception on Windows to signal this is OOM and not a normal assert.
-// OOM_CRASH(size) is called by users of PageAllocator (including
-// PartitionAlloc) to signify an allocation failure from the platform.
-#define OOM_CRASH(size)                                     \
-  do {                                                      \
-    /* Raising an exception might allocate, allow that.  */ \
-    ::partition_alloc::ScopedAllowAllocations guard{};      \
-    ::partition_alloc::internal::OnNoMemory(size);          \
-  } while (0)
-
-}  // namespace internal
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
diff --git a/base/allocator/partition_allocator/oom_callback.cc b/base/allocator/partition_allocator/oom_callback.cc
deleted file mode 100644
index 75877ea..0000000
--- a/base/allocator/partition_allocator/oom_callback.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/oom_callback.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-
-namespace partition_alloc {
-
-namespace {
-PartitionAllocOomCallback g_oom_callback;
-}  // namespace
-
-void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
-  PA_DCHECK(!g_oom_callback);
-  g_oom_callback = callback;
-}
-
-namespace internal {
-void RunPartitionAllocOomCallback() {
-  if (g_oom_callback) {
-    g_oom_callback();
-  }
-}
-}  // namespace internal
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/oom_callback.h b/base/allocator/partition_allocator/oom_callback.h
deleted file mode 100644
index f3730ec..0000000
--- a/base/allocator/partition_allocator/oom_callback.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace partition_alloc {
-
-using PartitionAllocOomCallback = void (*)();
-
-// Registers a callback to be invoked during an OOM_CRASH(). OOM_CRASH is
-// invoked by users of PageAllocator (including PartitionAlloc) to signify an
-// allocation failure from the platform.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback);
-
-namespace internal {
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void RunPartitionAllocOomCallback();
-}  // namespace internal
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
diff --git a/base/allocator/partition_allocator/page_allocator.cc b/base/allocator/partition_allocator/page_allocator.cc
deleted file mode 100644
index 6268ffa..0000000
--- a/base/allocator/partition_allocator/page_allocator.cc
+++ /dev/null
@@ -1,418 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-
-#include <atomic>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/address_space_randomization.h"
-#include "base/allocator/partition_allocator/page_allocator_internal.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_WIN)
-#include <windows.h>
-#endif
-
-#if BUILDFLAG(IS_WIN)
-#include "base/allocator/partition_allocator/page_allocator_internals_win.h"
-#elif BUILDFLAG(IS_POSIX)
-#include "base/allocator/partition_allocator/page_allocator_internals_posix.h"
-#elif BUILDFLAG(IS_FUCHSIA)
-#include "base/allocator/partition_allocator/page_allocator_internals_fuchsia.h"
-#else
-#error Platform not supported.
-#endif
-
-namespace partition_alloc {
-
-namespace {
-
-internal::Lock g_reserve_lock;
-
-// We may reserve/release address space on different threads.
-internal::Lock& GetReserveLock() {
-  return g_reserve_lock;
-}
-
-std::atomic<size_t> g_total_mapped_address_space;
-
-// We only support a single block of reserved address space.
-uintptr_t s_reservation_address PA_GUARDED_BY(GetReserveLock()) = 0;
-size_t s_reservation_size PA_GUARDED_BY(GetReserveLock()) = 0;
-
-uintptr_t AllocPagesIncludingReserved(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility,
-    PageTag page_tag,
-    int file_descriptor_for_shared_alloc = -1) {
-  uintptr_t ret =
-      internal::SystemAllocPages(address, length, accessibility, page_tag,
-                                 file_descriptor_for_shared_alloc);
-  if (!ret) {
-    const bool cant_alloc_length = internal::kHintIsAdvisory || !address;
-    if (cant_alloc_length) {
-      // The system cannot allocate |length| bytes. Release any reserved address
-      // space and try once more.
-      ReleaseReservation();
-      ret = internal::SystemAllocPages(address, length, accessibility, page_tag,
-                                       file_descriptor_for_shared_alloc);
-    }
-  }
-  return ret;
-}
-
-// Trims memory at |base_address| to given |trim_length| and |alignment|.
-//
-// On failure, on Windows, this function returns 0 and frees memory at
-// |base_address|.
-uintptr_t TrimMapping(uintptr_t base_address,
-                      size_t base_length,
-                      size_t trim_length,
-                      uintptr_t alignment,
-                      uintptr_t alignment_offset,
-                      PageAccessibilityConfiguration accessibility) {
-  PA_DCHECK(base_length >= trim_length);
-  PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
-  PA_DCHECK(alignment_offset < alignment);
-  uintptr_t new_base =
-      NextAlignedWithOffset(base_address, alignment, alignment_offset);
-  PA_DCHECK(new_base >= base_address);
-  size_t pre_slack = new_base - base_address;
-  size_t post_slack = base_length - pre_slack - trim_length;
-  PA_DCHECK(base_length == trim_length || pre_slack || post_slack);
-  PA_DCHECK(pre_slack < base_length);
-  PA_DCHECK(post_slack < base_length);
-  return internal::TrimMappingInternal(base_address, base_length, trim_length,
-                                       accessibility, pre_slack, post_slack);
-}
-
-}  // namespace
-
-// Align |address| up to the closest, non-smaller address, that gives
-// |requested_offset| remainder modulo |alignment|.
-//
-// Examples for alignment=1024 and requested_offset=64:
-//   64 -> 64
-//   65 -> 1088
-//   1024 -> 1088
-//   1088 -> 1088
-//   1089 -> 2112
-//   2048 -> 2112
-uintptr_t NextAlignedWithOffset(uintptr_t address,
-                                uintptr_t alignment,
-                                uintptr_t requested_offset) {
-  PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
-  PA_DCHECK(requested_offset < alignment);
-
-  uintptr_t actual_offset = address & (alignment - 1);
-  uintptr_t new_address;
-  if (actual_offset <= requested_offset) {
-    new_address = address + requested_offset - actual_offset;
-  } else {
-    new_address = address + alignment + requested_offset - actual_offset;
-  }
-  PA_DCHECK(new_address >= address);
-  PA_DCHECK(new_address - address < alignment);
-  PA_DCHECK(new_address % alignment == requested_offset);
-
-  return new_address;
-}
-
-namespace internal {
-
-uintptr_t SystemAllocPages(uintptr_t hint,
-                           size_t length,
-                           PageAccessibilityConfiguration accessibility,
-                           PageTag page_tag,
-                           int file_descriptor_for_shared_alloc) {
-  PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
-  PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
-  uintptr_t ret = internal::SystemAllocPagesInternal(
-      hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc);
-  if (ret) {
-    g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
-  }
-
-  return ret;
-}
-
-}  // namespace internal
-
-uintptr_t AllocPages(size_t length,
-                     size_t align,
-                     PageAccessibilityConfiguration accessibility,
-                     PageTag page_tag,
-                     int file_descriptor_for_shared_alloc) {
-  return AllocPagesWithAlignOffset(0, length, align, 0, accessibility, page_tag,
-                                   file_descriptor_for_shared_alloc);
-}
-uintptr_t AllocPages(uintptr_t address,
-                     size_t length,
-                     size_t align,
-                     PageAccessibilityConfiguration accessibility,
-                     PageTag page_tag) {
-  return AllocPagesWithAlignOffset(address, length, align, 0, accessibility,
-                                   page_tag);
-}
-void* AllocPages(void* address,
-                 size_t length,
-                 size_t align,
-                 PageAccessibilityConfiguration accessibility,
-                 PageTag page_tag) {
-  return reinterpret_cast<void*>(
-      AllocPages(reinterpret_cast<uintptr_t>(address), length, align,
-                 accessibility, page_tag));
-}
-
-uintptr_t AllocPagesWithAlignOffset(
-    uintptr_t address,
-    size_t length,
-    size_t align,
-    size_t align_offset,
-    PageAccessibilityConfiguration accessibility,
-    PageTag page_tag,
-    int file_descriptor_for_shared_alloc) {
-  PA_DCHECK(length >= internal::PageAllocationGranularity());
-  PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
-  PA_DCHECK(align >= internal::PageAllocationGranularity());
-  // Alignment must be power of 2 for masking math to work.
-  PA_DCHECK(internal::base::bits::IsPowerOfTwo(align));
-  PA_DCHECK(align_offset < align);
-  PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
-  PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
-  uintptr_t align_offset_mask = align - 1;
-  uintptr_t align_base_mask = ~align_offset_mask;
-  PA_DCHECK(!address || (address & align_offset_mask) == align_offset);
-
-  // If the client passed null as the address, choose a good one.
-  if (!address) {
-    address = (GetRandomPageBase() & align_base_mask) + align_offset;
-  }
-
-  // First try to force an exact-size, aligned allocation from our random base.
-#if defined(ARCH_CPU_32_BITS)
-  // On 32 bit systems, first try one random aligned address, and then try an
-  // aligned address derived from the value of |ret|.
-  constexpr int kExactSizeTries = 2;
-#else
-  // On 64 bit systems, try 3 random aligned addresses.
-  constexpr int kExactSizeTries = 3;
-#endif
-
-  for (int i = 0; i < kExactSizeTries; ++i) {
-    uintptr_t ret =
-        AllocPagesIncludingReserved(address, length, accessibility, page_tag,
-                                    file_descriptor_for_shared_alloc);
-    if (ret) {
-      // If the alignment is to our liking, we're done.
-      if ((ret & align_offset_mask) == align_offset) {
-        return ret;
-      }
-      // Free the memory and try again.
-      FreePages(ret, length);
-    } else {
-      // |ret| is null; if this try was unhinted, we're OOM.
-      if (internal::kHintIsAdvisory || !address) {
-        return 0;
-      }
-    }
-
-#if defined(ARCH_CPU_32_BITS)
-    // For small address spaces, try the first aligned address >= |ret|. Note
-    // |ret| may be null, in which case |address| becomes null. If
-    // |align_offset| is non-zero, this calculation may get us not the first,
-    // but the next matching address.
-    address = ((ret + align_offset_mask) & align_base_mask) + align_offset;
-#else  // defined(ARCH_CPU_64_BITS)
-    // Keep trying random addresses on systems that have a large address space.
-    address = NextAlignedWithOffset(GetRandomPageBase(), align, align_offset);
-#endif
-  }
-
-  // Make a larger allocation so we can force alignment.
-  size_t try_length = length + (align - internal::PageAllocationGranularity());
-  PA_CHECK(try_length >= length);
-  uintptr_t ret;
-
-  do {
-    // Continue randomizing only on POSIX.
-    address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0;
-    ret =
-        AllocPagesIncludingReserved(address, try_length, accessibility,
-                                    page_tag, file_descriptor_for_shared_alloc);
-    // The retries are for Windows, where a race can steal our mapping on
-    // resize.
-  } while (ret && (ret = TrimMapping(ret, try_length, length, align,
-                                     align_offset, accessibility)) == 0);
-
-  return ret;
-}
-
-void FreePages(uintptr_t address, size_t length) {
-  PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
-  PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
-  internal::FreePagesInternal(address, length);
-  PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0);
-  g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed);
-}
-void FreePages(void* address, size_t length) {
-  FreePages(reinterpret_cast<uintptr_t>(address), length);
-}
-
-bool TrySetSystemPagesAccess(uintptr_t address,
-                             size_t length,
-                             PageAccessibilityConfiguration accessibility) {
-  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
-  return internal::TrySetSystemPagesAccessInternal(address, length,
-                                                   accessibility);
-}
-bool TrySetSystemPagesAccess(void* address,
-                             size_t length,
-                             PageAccessibilityConfiguration accessibility) {
-  return TrySetSystemPagesAccess(reinterpret_cast<uintptr_t>(address), length,
-                                 accessibility);
-}
-
-void SetSystemPagesAccess(uintptr_t address,
-                          size_t length,
-                          PageAccessibilityConfiguration accessibility) {
-  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
-  internal::SetSystemPagesAccessInternal(address, length, accessibility);
-}
-
-void DecommitSystemPages(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityDisposition accessibility_disposition) {
-  PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
-  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
-  internal::DecommitSystemPagesInternal(address, length,
-                                        accessibility_disposition);
-}
-void DecommitSystemPages(
-    void* address,
-    size_t length,
-    PageAccessibilityDisposition accessibility_disposition) {
-  DecommitSystemPages(reinterpret_cast<uintptr_t>(address), length,
-                      accessibility_disposition);
-}
-
-void DecommitAndZeroSystemPages(uintptr_t address,
-                                size_t length,
-                                PageTag page_tag) {
-  PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
-  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
-  internal::DecommitAndZeroSystemPagesInternal(address, length, page_tag);
-}
-
-void DecommitAndZeroSystemPages(void* address,
-                                size_t length,
-                                PageTag page_tag) {
-  DecommitAndZeroSystemPages(reinterpret_cast<uintptr_t>(address), length,
-                             page_tag);
-}
-
-void RecommitSystemPages(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility,
-    PageAccessibilityDisposition accessibility_disposition) {
-  PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
-  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
-  PA_DCHECK(accessibility.permissions !=
-            PageAccessibilityConfiguration::kInaccessible);
-  internal::RecommitSystemPagesInternal(address, length, accessibility,
-                                        accessibility_disposition);
-}
-
-bool TryRecommitSystemPages(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility,
-    PageAccessibilityDisposition accessibility_disposition) {
-  // Duplicated because we want errors to be reported at a lower level in the
-  // crashing case.
-  PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
-  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
-  PA_DCHECK(accessibility.permissions !=
-            PageAccessibilityConfiguration::kInaccessible);
-  return internal::TryRecommitSystemPagesInternal(
-      address, length, accessibility, accessibility_disposition);
-}
-
-void DiscardSystemPages(uintptr_t address, size_t length) {
-  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
-  internal::DiscardSystemPagesInternal(address, length);
-}
-void DiscardSystemPages(void* address, size_t length) {
-  DiscardSystemPages(reinterpret_cast<uintptr_t>(address), length);
-}
-
-bool ReserveAddressSpace(size_t size) {
-  // To avoid deadlock, call only SystemAllocPages.
-  internal::ScopedGuard guard(GetReserveLock());
-  if (!s_reservation_address) {
-    uintptr_t mem = internal::SystemAllocPages(
-        0, size,
-        PageAccessibilityConfiguration(
-            PageAccessibilityConfiguration::kInaccessible),
-        PageTag::kChromium);
-    if (mem) {
-      // We guarantee this alignment when reserving address space.
-      PA_DCHECK(!(mem & internal::PageAllocationGranularityOffsetMask()));
-      s_reservation_address = mem;
-      s_reservation_size = size;
-      return true;
-    }
-  }
-  return false;
-}
-
-bool ReleaseReservation() {
-  // To avoid deadlock, call only FreePages.
-  internal::ScopedGuard guard(GetReserveLock());
-  if (!s_reservation_address) {
-    return false;
-  }
-
-  FreePages(s_reservation_address, s_reservation_size);
-  s_reservation_address = 0;
-  s_reservation_size = 0;
-  return true;
-}
-
-bool HasReservationForTesting() {
-  internal::ScopedGuard guard(GetReserveLock());
-  return s_reservation_address;
-}
-
-uint32_t GetAllocPageErrorCode() {
-  return internal::s_allocPageErrorCode;
-}
-
-size_t GetTotalMappedSize() {
-  return g_total_mapped_address_space;
-}
-
-#if BUILDFLAG(IS_WIN)
-namespace {
-bool g_retry_on_commit_failure = false;
-}
-
-void SetRetryOnCommitFailure(bool retry_on_commit_failure) {
-  g_retry_on_commit_failure = retry_on_commit_failure;
-}
-
-bool GetRetryOnCommitFailure() {
-  return g_retry_on_commit_failure;
-}
-#endif
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/page_allocator.h b/base/allocator/partition_allocator/page_allocator.h
deleted file mode 100644
index 8e366cc..0000000
--- a/base/allocator/partition_allocator/page_allocator.h
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
-
-#include <cstddef>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-#include "build/build_config.h"
-
-namespace partition_alloc {
-
-struct PageAccessibilityConfiguration {
-  enum Permissions {
-    kInaccessible,
-    // This flag is valid only with AllocPages(), where in creates kInaccessible
-    // pages that may later be re-mapped as executable, on platforms which
-    // distinguish never-executable and maybe-executable pages.
-    kInaccessibleWillJitLater,
-    kRead,
-    kReadWrite,
-    // This flag is mapped to kReadWrite on systems that
-    // don't support MTE.
-    kReadWriteTagged,
-    // This flag is mapped to kReadExecute on systems
-    // that don't support Arm's BTI.
-    kReadExecuteProtected,
-    kReadExecute,
-    // This flag is deprecated and will go away soon.
-    // TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
-    kReadWriteExecute,
-  };
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  constexpr explicit PageAccessibilityConfiguration(Permissions permissions)
-      : permissions(permissions) {}
-  constexpr PageAccessibilityConfiguration(
-      Permissions permissions,
-      ThreadIsolationOption thread_isolation)
-      : permissions(permissions), thread_isolation(thread_isolation) {}
-#else
-  constexpr explicit PageAccessibilityConfiguration(Permissions permissions)
-      : permissions(permissions) {}
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-  Permissions permissions;
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  // Tag the page with a Memory Protection Key. Use 0 for none.
-  ThreadIsolationOption thread_isolation;
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-};
-
-// Use for De/RecommitSystemPages API.
-enum class PageAccessibilityDisposition {
-  // Enforces permission update (Decommit will set to
-  // PageAccessibilityConfiguration::kInaccessible;
-  // Recommit will set to whatever was requested, other than
-  // PageAccessibilityConfiguration::kInaccessible).
-  kRequireUpdate,
-  // Will not update permissions, if the platform supports that (POSIX & Fuchsia
-  // only).
-  kAllowKeepForPerf,
-};
-
-// Some platforms (including macOS and some Linux-based ones) support tagged
-// memory regions, to help in debugging. On Android, these tags are used to name
-// anonymous mappings.
-//
-// kChromium is the default value, used to distinguish general
-// Chromium-originated allocations from other ones (e.g. from platform
-// libraries).
-enum class PageTag {
-  kSimulation = 251,      // Memory simulator tool.
-  kBlinkGC = 252,         // Blink GC pages.
-  kPartitionAlloc = 253,  // PartitionAlloc, no matter the partition.
-  kChromium = 254,        // Chromium page.
-  kV8 = 255,              // V8 heap pages.
-
-  kFirst = kSimulation,  // Minimum tag value.
-  kLast = kV8            // Maximum tag value.
-};
-
-// See
-// https://github.com/apple-oss-distributions/xnu/blob/5c2921b07a2480ab43ec66f5b9e41cb872bc554f/osfmk/mach/vm_statistics.h#L687
-static_assert(static_cast<int>(PageTag::kLast) >= 240,
-              "The first application-reserved tag on macOS is 240, see "
-              "vm_statistics.h in XNU.");
-static_assert(
-    static_cast<int>(PageTag::kLast) < 256,
-    "Tags are only 1 byte long on macOS, see vm_statistics.h in XNU.");
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-uintptr_t NextAlignedWithOffset(uintptr_t ptr,
-                                uintptr_t alignment,
-                                uintptr_t requested_offset);
-
-// Allocates one or more pages.
-//
-// The requested |address| is just a hint; the actual address returned may
-// differ. The returned address will be aligned to |align_offset| modulo |align|
-// bytes.
-//
-// |length|, |align| and |align_offset| are in bytes, and must be a multiple of
-// |PageAllocationGranularity()|. |length| and |align| must be non-zero.
-// |align_offset| must be less than |align|. |align| must be a power of two.
-//
-// If |address| is 0/nullptr, then a suitable and randomized address will be
-// chosen automatically.
-//
-// |accessibility| controls the permission of the allocated pages.
-// PageAccessibilityConfiguration::kInaccessible means uncommitted.
-//
-// |page_tag| is used on some platforms to identify the source of the
-// allocation.
-//
-// |file_descriptor_for_shared_alloc| is only used in mapping the shadow
-// pools to the same physical address as the real one in
-// PartitionAddressSpace::Init(). It should be ignored in other cases.
-//
-// This call will return 0/nullptr if the allocation cannot be satisfied.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-uintptr_t AllocPages(size_t length,
-                     size_t align,
-                     PageAccessibilityConfiguration accessibility,
-                     PageTag page_tag = PageTag::kChromium,
-                     int file_descriptor_for_shared_alloc = -1);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-uintptr_t AllocPages(uintptr_t address,
-                     size_t length,
-                     size_t align,
-                     PageAccessibilityConfiguration accessibility,
-                     PageTag page_tag = PageTag::kChromium);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* AllocPages(void* address,
-                 size_t length,
-                 size_t align,
-                 PageAccessibilityConfiguration accessibility,
-                 PageTag page_tag = PageTag::kChromium);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-uintptr_t AllocPagesWithAlignOffset(
-    uintptr_t address,
-    size_t length,
-    size_t align,
-    size_t align_offset,
-    PageAccessibilityConfiguration page_accessibility,
-    PageTag page_tag = PageTag::kChromium,
-    int file_descriptor_for_shared_alloc = -1);
-
-// Frees one or more pages starting at |address| and continuing for |length|
-// bytes.
-//
-// |address| and |length| must match a previous call to |AllocPages|. Therefore,
-// |address| must be aligned to |PageAllocationGranularity()| bytes, and
-// |length| must be a multiple of |PageAllocationGranularity()|.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void FreePages(uintptr_t address, size_t length);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void FreePages(void* address, size_t length);
-
-// Marks one or more system pages, starting at |address| with the given
-// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
-// bytes.
-//
-// Returns true if the permission change succeeded. In most cases you must
-// |CHECK| the result.
-[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration page_accessibility);
-[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
-    void* address,
-    size_t length,
-    PageAccessibilityConfiguration page_accessibility);
-
-// Marks one or more system pages, starting at |address| with the given
-// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
-// bytes.
-//
-// Performs a CHECK that the operation succeeds.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void SetSystemPagesAccess(uintptr_t address,
-                          size_t length,
-                          PageAccessibilityConfiguration page_accessibility);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void SetSystemPagesAccess(void* address,
-                          size_t length,
-                          PageAccessibilityConfiguration page_accessibility);
-
-// Decommits one or more system pages starting at |address| and continuing for
-// |length| bytes. |address| and |length| must be aligned to a system page
-// boundary.
-//
-// This API will crash if the operation cannot be performed!
-//
-// If disposition is PageAccessibilityDisposition::kRequireUpdate (recommended),
-// the decommitted pages will be made inaccessible before the call returns.
-// While it is always a programming error to access decommitted pages without
-// first recommitting them, callers may use
-// PageAccessibilityDisposition::kAllowKeepForPerf to allow the implementation
-// to skip changing permissions (use with care), for performance reasons (see
-// crrev.com/c/2567282 and crrev.com/c/2563038 for perf regressions encountered
-// in the past). Implementations may choose to always modify permissions, hence
-// accessing those pages may or may not trigger a fault.
-//
-// Decommitting means that physical resources (RAM or swap/pagefile) backing the
-// allocated virtual address range may be released back to the system, but the
-// address space is still allocated to the process (possibly using up page table
-// entries or other accounting resources). There is no guarantee that the pages
-// are zeroed, unless |DecommittedMemoryIsAlwaysZeroed()| is true.
-//
-// This operation may not be atomic on some platforms.
-//
-// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures
-// processes will not fault when touching a committed memory region. There is
-// no analogue in the POSIX & Fuchsia memory API where virtual memory pages are
-// best-effort allocated resources on the first touch. If
-// PageAccessibilityDisposition::kRequireUpdate disposition is used, this API
-// behaves in a platform-agnostic way by simulating the Windows "decommit" state
-// by both discarding the region (allowing the OS to avoid swap operations)
-// *and* changing the page protections so accesses fault.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void DecommitSystemPages(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityDisposition accessibility_disposition);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void DecommitSystemPages(
-    void* address,
-    size_t length,
-    PageAccessibilityDisposition accessibility_disposition);
-
-// Decommits one or more system pages starting at |address| and continuing for
-// |length| bytes. |address| and |length| must be aligned to a system page
-// boundary.
-//
-// In contrast to |DecommitSystemPages|, this API guarantees that the pages are
-// zeroed and will always mark the region as inaccessible (the equivalent of
-// setting them to PageAccessibilityConfiguration::kInaccessible).
-//
-// This API will crash if the operation cannot be performed.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void DecommitAndZeroSystemPages(uintptr_t address,
-                                size_t length,
-                                PageTag page_tag = PageTag::kChromium);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void DecommitAndZeroSystemPages(void* address,
-                                size_t length,
-                                PageTag page_tag = PageTag::kChromium);
-
-// Whether decommitted memory is guaranteed to be zeroed when it is
-// recommitted. Do not assume that this will not change over time.
-constexpr PA_COMPONENT_EXPORT(
-    PARTITION_ALLOC) bool DecommittedMemoryIsAlwaysZeroed() {
-#if BUILDFLAG(IS_APPLE)
-  return false;
-#else
-  return true;
-#endif
-}
-
-// (Re)Commits one or more system pages, starting at |address| and continuing
-// for |length| bytes with the given |page_accessibility| (must not be
-// PageAccessibilityConfiguration::kInaccessible). |address| and |length|
-// must be aligned to a system page boundary.
-//
-// This API will crash if the operation cannot be performed!
-//
-// If disposition is PageAccessibilityConfiguration::kRequireUpdate, the calls
-// updates the pages to |page_accessibility|. This can be used regardless of
-// what disposition was used to decommit the pages.
-// PageAccessibilityConfiguration::kAllowKeepForPerf allows the implementation
-// to leave the page permissions, if that improves performance. This option can
-// only be used if the pages were previously accessible and decommitted with
-// that same option.
-//
-// The memory will be zeroed when it is committed for the first time. However,
-// there is no such guarantee when memory is recommitted, unless
-// |DecommittedMemoryIsAlwaysZeroed()| is true.
-//
-// This operation may not be atomic on some platforms.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void RecommitSystemPages(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration page_accessibility,
-    PageAccessibilityDisposition accessibility_disposition);
-
-// Like RecommitSystemPages(), but returns false instead of crashing.
-[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TryRecommitSystemPages(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration page_accessibility,
-    PageAccessibilityDisposition accessibility_disposition);
-
-// Discard one or more system pages starting at |address| and continuing for
-// |length| bytes. |length| must be a multiple of |SystemPageSize()|.
-//
-// Discarding is a hint to the system that the page is no longer required. The
-// hint may:
-//   - Do nothing.
-//   - Discard the page immediately, freeing up physical pages.
-//   - Discard the page at some time in the future in response to memory
-//   pressure.
-//
-// Only committed pages should be discarded. Discarding a page does not decommit
-// it, and it is valid to discard an already-discarded page. A read or write to
-// a discarded page will not fault.
-//
-// Reading from a discarded page may return the original page content, or a page
-// full of zeroes.
-//
-// Writing to a discarded page is the only guaranteed way to tell the system
-// that the page is required again. Once written to, the content of the page is
-// guaranteed stable once more. After being written to, the page content may be
-// based on the original page content, or a page of zeroes.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void DiscardSystemPages(uintptr_t address, size_t length);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void DiscardSystemPages(void* address, size_t length);
-
-// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
-// 0 for an |address| of 0.
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-RoundUpToSystemPage(uintptr_t address) {
-  return (address + internal::SystemPageOffsetMask()) &
-         internal::SystemPageBaseMask();
-}
-
-// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
-// 0 for an |address| of 0.
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-RoundDownToSystemPage(uintptr_t address) {
-  return address & internal::SystemPageBaseMask();
-}
-
-// Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
-// Returns 0 for an |address| of 0.
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-RoundUpToPageAllocationGranularity(uintptr_t address) {
-  return (address + internal::PageAllocationGranularityOffsetMask()) &
-         internal::PageAllocationGranularityBaseMask();
-}
-
-// Rounds down |address| to the previous multiple of
-// |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
-RoundDownToPageAllocationGranularity(uintptr_t address) {
-  return address & internal::PageAllocationGranularityBaseMask();
-}
-
-// Reserves (at least) |size| bytes of address space, aligned to
-// |PageAllocationGranularity()|. This can be called early on to make it more
-// likely that large allocations will succeed. Returns true if the reservation
-// succeeded, false if the reservation failed or a reservation was already made.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ReserveAddressSpace(size_t size);
-
-// Releases any reserved address space. |AllocPages| calls this automatically on
-// an allocation failure. External allocators may also call this on failure.
-//
-// Returns true when an existing reservation was released.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ReleaseReservation();
-
-// Returns true if there is currently an address space reservation.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool HasReservationForTesting();
-
-// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
-// (POSIX) or |VirtualAlloc| (Windows) fails.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) uint32_t GetAllocPageErrorCode();
-
-// Returns the total amount of mapped pages from all clients of
-// PageAllocator. These pages may or may not be committed. This is mostly useful
-// to assess address space pressure.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) size_t GetTotalMappedSize();
-
-#if BUILDFLAG(IS_WIN)
-// Sets whether to retry the allocation of pages when a commit failure
-// happens. This doesn't cover cases where the system is out of address space,
-// or reaches another limit.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void SetRetryOnCommitFailure(bool retry_on_commit_failure);
-bool GetRetryOnCommitFailure();
-#endif  // BUILDFLAG(IS_WIN)
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
diff --git a/base/allocator/partition_allocator/page_allocator_constants.h b/base/allocator/partition_allocator/page_allocator_constants.h
deleted file mode 100644
index 380042d..0000000
--- a/base/allocator/partition_allocator/page_allocator_constants.h
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
-
-#include <stddef.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
-
-#include <mach/vm_page_size.h>
-
-// Although page allocator constants are not constexpr, they are run-time
-// constant. Because the underlying variables they access, such as vm_page_size,
-// are not marked const, the compiler normally has no way to know that they
-// don’t change and must obtain their values whenever it can't prove that they
-// haven't been modified, even if they had already been obtained previously.
-// Attaching __attribute__((const)) to these declarations allows these redundant
-// accesses to be omitted under optimization such as common subexpression
-// elimination.
-#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
-
-#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
-// This should work for all POSIX (if needed), but currently all other
-// supported OS/architecture combinations use either hard-coded values
-// (such as x86) or have means to determine these values without needing
-// atomics (such as macOS on arm64).
-
-// Page allocator constants are run-time constant
-#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
-
-#include <unistd.h>
-#include <atomic>
-
-namespace partition_alloc::internal {
-
-// Holds the current page size and shift, where size = 1 << shift
-// Use PageAllocationGranularity(), PageAllocationGranularityShift()
-// to initialize and retrieve these values safely.
-struct PageCharacteristics {
-  std::atomic<size_t> size;
-  std::atomic<size_t> shift;
-};
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-extern PageCharacteristics page_characteristics;
-
-}  // namespace partition_alloc::internal
-
-#else
-
-// When defined, page size constants are fixed at compile time. When not
-// defined, they may vary at run time.
-#define PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR 1
-
-// Use this macro to declare a function as constexpr or not based on whether
-// PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR is defined.
-#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR constexpr
-
-#endif
-
-// Ability to name anonymous VMAs is available on some, but not all Linux-based
-// systems.
-#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
-#include <sys/prctl.h>
-
-#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
-#define LINUX_NAME_REGION 1
-#endif
-
-#endif  // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
-
-namespace partition_alloc::internal {
-
-// Forward declaration, implementation below
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PageAllocationGranularity();
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PageAllocationGranularityShift() {
-#if BUILDFLAG(IS_WIN) || defined(ARCH_CPU_PPC64)
-  // Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
-  // sizes.  Since 64kB is the de facto standard on the platform and binaries
-  // compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
-  // here.
-  return 16;  // 64kB
-#elif defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONGARCH64)
-  return 14;  // 16kB
-#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
-  return static_cast<size_t>(vm_page_shift);
-#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
-  // arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
-  // page sizes. Retrieve from or initialize cache.
-  size_t shift = page_characteristics.shift.load(std::memory_order_relaxed);
-  if (PA_UNLIKELY(shift == 0)) {
-    shift = static_cast<size_t>(
-        __builtin_ctz((unsigned int)PageAllocationGranularity()));
-    page_characteristics.shift.store(shift, std::memory_order_relaxed);
-  }
-  return shift;
-#else
-  return 12;  // 4kB
-#endif
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PageAllocationGranularity() {
-#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
-  // This is literally equivalent to |1 << PageAllocationGranularityShift()|
-  // below, but was separated out for IS_APPLE to avoid << on a non-constexpr.
-  return vm_page_size;
-#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
-  // arm64 supports 4kb, 16kb, and 64kb page sizes. Retrieve from or
-  // initialize cache.
-  size_t size = page_characteristics.size.load(std::memory_order_relaxed);
-  if (PA_UNLIKELY(size == 0)) {
-    size = static_cast<size_t>(getpagesize());
-    page_characteristics.size.store(size, std::memory_order_relaxed);
-  }
-  return size;
-#else
-  return 1 << PageAllocationGranularityShift();
-#endif
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PageAllocationGranularityOffsetMask() {
-  return PageAllocationGranularity() - 1;
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PageAllocationGranularityBaseMask() {
-  return ~PageAllocationGranularityOffsetMask();
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-SystemPageShift() {
-  // On Windows allocation granularity is higher than the page size. This comes
-  // into play when reserving address space range (allocation granularity),
-  // compared to committing pages into memory (system page granularity).
-#if BUILDFLAG(IS_WIN)
-  return 12;  // 4096=1<<12
-#else
-  return PageAllocationGranularityShift();
-#endif
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-SystemPageSize() {
-#if (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
-    (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
-  // This is literally equivalent to |1 << SystemPageShift()| below, but was
-  // separated out for 64-bit IS_APPLE and arm64 on Linux to avoid << on a
-  // non-constexpr.
-  return PageAllocationGranularity();
-#else
-  return 1 << SystemPageShift();
-#endif
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-SystemPageOffsetMask() {
-  return SystemPageSize() - 1;
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-SystemPageBaseMask() {
-  return ~SystemPageOffsetMask();
-}
-
-constexpr size_t kPageMetadataShift = 5;  // 32 bytes per partition page.
-constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/page_allocator_internal.h b/base/allocator/partition_allocator/page_allocator_internal.h
deleted file mode 100644
index 1ae26c5..0000000
--- a/base/allocator/partition_allocator/page_allocator_internal.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
-
-#include <cstddef>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-
-namespace partition_alloc::internal {
-
-uintptr_t SystemAllocPages(uintptr_t hint,
-                           size_t length,
-                           PageAccessibilityConfiguration accessibility,
-                           PageTag page_tag,
-                           int file_descriptor_for_shared_alloc = -1);
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
diff --git a/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h b/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h
deleted file mode 100644
index 7662895..0000000
--- a/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// This file implements memory allocation primitives for PageAllocator using
-// Fuchsia's VMOs (Virtual Memory Objects). VMO API is documented in
-// https://fuchsia.dev/fuchsia-src/zircon/objects/vm_object . A VMO is a kernel
-// object that corresponds to a set of memory pages. VMO pages may be mapped
-// to an address space. The code below creates VMOs for each memory allocations
-// and maps them to the default address space of the current process.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
-
-#include <fidl/fuchsia.kernel/cpp/fidl.h>
-#include <lib/component/incoming/cpp/protocol.h>
-#include <lib/zx/resource.h>
-#include <lib/zx/vmar.h>
-#include <lib/zx/vmo.h>
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/notreached.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-
-namespace partition_alloc::internal {
-
-namespace {
-
-zx::resource GetVmexResource() {
-  auto vmex_resource_client =
-      component::Connect<fuchsia_kernel::VmexResource>();
-  if (vmex_resource_client.is_error()) {
-    PA_LOG(ERROR) << "Connect(VmexResource):"
-                  << vmex_resource_client.status_string();
-    return {};
-  }
-
-  fidl::SyncClient sync_vmex_resource_client(
-      std::move(vmex_resource_client.value()));
-  auto result = sync_vmex_resource_client->Get();
-  if (result.is_error()) {
-    PA_LOG(ERROR) << "VmexResource.Get():"
-                  << result.error_value().FormatDescription();
-    return {};
-  }
-
-  return std::move(result->resource());
-}
-
-const zx::resource& VmexResource() {
-  static base::NoDestructor<zx::resource> vmex_resource(GetVmexResource());
-  return *vmex_resource;
-}
-
-// Returns VMO name for a PageTag.
-const char* PageTagToName(PageTag tag) {
-  switch (tag) {
-    case PageTag::kBlinkGC:
-      return "cr_blink_gc";
-    case PageTag::kPartitionAlloc:
-      return "cr_partition_alloc";
-    case PageTag::kChromium:
-      return "cr_chromium";
-    case PageTag::kV8:
-      return "cr_v8";
-    case PageTag::kSimulation:
-      PA_NOTREACHED();
-  }
-  PA_NOTREACHED();
-}
-
-zx_vm_option_t PageAccessibilityToZxVmOptions(
-    PageAccessibilityConfiguration accessibility) {
-  switch (accessibility.permissions) {
-    case PageAccessibilityConfiguration::kRead:
-      return ZX_VM_PERM_READ;
-    case PageAccessibilityConfiguration::kReadWrite:
-    case PageAccessibilityConfiguration::kReadWriteTagged:
-      return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
-    case PageAccessibilityConfiguration::kReadExecuteProtected:
-    case PageAccessibilityConfiguration::kReadExecute:
-      return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
-    case PageAccessibilityConfiguration::kReadWriteExecute:
-      return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
-    case PageAccessibilityConfiguration::kInaccessible:
-    case PageAccessibilityConfiguration::kInaccessibleWillJitLater:
-      return 0;
-  };
-  PA_NOTREACHED();
-}
-
-}  // namespace
-
-// zx_vmar_map() will fail if the VMO cannot be mapped at |vmar_offset|, i.e.
-// |hint| is not advisory.
-constexpr bool kHintIsAdvisory = false;
-
-std::atomic<int32_t> s_allocPageErrorCode{0};
-
-uintptr_t SystemAllocPagesInternal(
-    uintptr_t hint,
-    size_t length,
-    PageAccessibilityConfiguration accessibility,
-    PageTag page_tag,
-    [[maybe_unused]] int file_descriptor_for_shared_alloc) {
-  zx::vmo vmo;
-  zx_status_t status = zx::vmo::create(length, 0, &vmo);
-  if (status != ZX_OK) {
-    PA_ZX_DLOG(INFO, status) << "zx_vmo_create";
-    return 0;
-  }
-
-  const char* vmo_name = PageTagToName(page_tag);
-  status = vmo.set_property(ZX_PROP_NAME, vmo_name, strlen(vmo_name));
-
-  // VMO names are used only for debugging, so failure to set a name is not
-  // fatal.
-  PA_ZX_DCHECK(status == ZX_OK, status);
-
-  if (accessibility.permissions ==
-          PageAccessibilityConfiguration::kInaccessibleWillJitLater ||
-      accessibility.permissions ==
-          PageAccessibilityConfiguration::kReadWriteExecute) {
-    // V8 uses JIT. Call zx_vmo_replace_as_executable() to allow code execution
-    // in the new VMO.
-    status = vmo.replace_as_executable(VmexResource(), &vmo);
-    if (status != ZX_OK) {
-      PA_ZX_DLOG(INFO, status) << "zx_vmo_replace_as_executable";
-      return 0;
-    }
-  }
-
-  zx_vm_option_t options = PageAccessibilityToZxVmOptions(accessibility);
-
-  uint64_t vmar_offset = 0;
-  if (hint) {
-    vmar_offset = hint;
-    options |= ZX_VM_SPECIFIC;
-  }
-
-  uint64_t address;
-  status = zx::vmar::root_self()->map(options, vmar_offset, vmo,
-                                      /*vmo_offset=*/0, length, &address);
-  if (status != ZX_OK) {
-    // map() is expected to fail if |hint| is set to an already-in-use location.
-    if (!hint) {
-      PA_ZX_DLOG(ERROR, status) << "zx_vmar_map";
-    }
-    return 0;
-  }
-
-  return address;
-}
-
-uintptr_t TrimMappingInternal(uintptr_t base_address,
-                              size_t base_length,
-                              size_t trim_length,
-                              PageAccessibilityConfiguration accessibility,
-                              size_t pre_slack,
-                              size_t post_slack) {
-  PA_DCHECK(base_length == trim_length + pre_slack + post_slack);
-
-  // Unmap head if necessary.
-  if (pre_slack) {
-    zx_status_t status = zx::vmar::root_self()->unmap(base_address, pre_slack);
-    PA_ZX_CHECK(status == ZX_OK, status);
-  }
-
-  // Unmap tail if necessary.
-  if (post_slack) {
-    zx_status_t status = zx::vmar::root_self()->unmap(
-        base_address + pre_slack + trim_length, post_slack);
-    PA_ZX_CHECK(status == ZX_OK, status);
-  }
-
-  return base_address + pre_slack;
-}
-
-bool TrySetSystemPagesAccessInternal(
-    uint64_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility) {
-  zx_status_t status = zx::vmar::root_self()->protect(
-      PageAccessibilityToZxVmOptions(accessibility), address, length);
-  return status == ZX_OK;
-}
-
-void SetSystemPagesAccessInternal(
-    uint64_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility) {
-  zx_status_t status = zx::vmar::root_self()->protect(
-      PageAccessibilityToZxVmOptions(accessibility), address, length);
-  PA_ZX_CHECK(status == ZX_OK, status);
-}
-
-void FreePagesInternal(uint64_t address, size_t length) {
-  zx_status_t status = zx::vmar::root_self()->unmap(address, length);
-  PA_ZX_CHECK(status == ZX_OK, status);
-}
-
-void DiscardSystemPagesInternal(uint64_t address, size_t length) {
-  zx_status_t status = zx::vmar::root_self()->op_range(
-      ZX_VMO_OP_DECOMMIT, address, length, nullptr, 0);
-  PA_ZX_CHECK(status == ZX_OK, status);
-}
-
-void DecommitSystemPagesInternal(
-    uint64_t address,
-    size_t length,
-    PageAccessibilityDisposition accessibility_disposition) {
-  if (accessibility_disposition ==
-      PageAccessibilityDisposition::kRequireUpdate) {
-    SetSystemPagesAccess(address, length,
-                         PageAccessibilityConfiguration(
-                             PageAccessibilityConfiguration::kInaccessible));
-  }
-
-  DiscardSystemPagesInternal(address, length);
-}
-
-void DecommitAndZeroSystemPagesInternal(uintptr_t address,
-                                        size_t length,
-                                        PageTag page_tag) {
-  SetSystemPagesAccess(address, length,
-                       PageAccessibilityConfiguration(
-                           PageAccessibilityConfiguration::kInaccessible));
-
-  DiscardSystemPagesInternal(address, length);
-}
-
-void RecommitSystemPagesInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility,
-    PageAccessibilityDisposition accessibility_disposition) {
-  // On Fuchsia systems, the caller needs to simply read the memory to recommit
-  // it. However, if decommit changed the permissions, recommit has to change
-  // them back.
-  if (accessibility_disposition ==
-      PageAccessibilityDisposition::kRequireUpdate) {
-    SetSystemPagesAccess(address, length, accessibility);
-  }
-}
-
-bool TryRecommitSystemPagesInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility,
-    PageAccessibilityDisposition accessibility_disposition) {
-  // On Fuchsia systems, the caller needs to simply read the memory to recommit
-  // it. However, if decommit changed the permissions, recommit has to change
-  // them back.
-  if (accessibility_disposition ==
-      PageAccessibilityDisposition::kRequireUpdate) {
-    return TrySetSystemPagesAccess(address, length, accessibility);
-  }
-  return true;
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
diff --git a/base/allocator/partition_allocator/page_allocator_internals_posix.cc b/base/allocator/partition_allocator/page_allocator_internals_posix.cc
deleted file mode 100644
index d84f155..0000000
--- a/base/allocator/partition_allocator/page_allocator_internals_posix.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/notreached.h"
-
-#include <sys/mman.h>
-
-// PA_PROT_BTI requests a page that supports BTI landing pads.
-#define PA_PROT_BTI 0x10
-// PA_PROT_MTE requests a page that's suitable for memory tagging.
-#define PA_PROT_MTE 0x20
-
-namespace partition_alloc::internal {
-
-int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
-  switch (accessibility.permissions) {
-    case PageAccessibilityConfiguration::kRead:
-      return PROT_READ;
-    case PageAccessibilityConfiguration::kReadWriteTagged:
-#if defined(ARCH_CPU_ARM64)
-      return PROT_READ | PROT_WRITE |
-             (base::CPU::GetInstanceNoAllocation().has_mte() ? PA_PROT_MTE : 0);
-#else
-      [[fallthrough]];
-#endif
-    case PageAccessibilityConfiguration::kReadWrite:
-      return PROT_READ | PROT_WRITE;
-    case PageAccessibilityConfiguration::kReadExecuteProtected:
-      return PROT_READ | PROT_EXEC |
-             (base::CPU::GetInstanceNoAllocation().has_bti() ? PA_PROT_BTI : 0);
-    case PageAccessibilityConfiguration::kReadExecute:
-      return PROT_READ | PROT_EXEC;
-    case PageAccessibilityConfiguration::kReadWriteExecute:
-      return PROT_READ | PROT_WRITE | PROT_EXEC;
-    case PageAccessibilityConfiguration::kInaccessible:
-    case PageAccessibilityConfiguration::kInaccessibleWillJitLater:
-      return PROT_NONE;
-  }
-}
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/page_allocator_internals_posix.h b/base/allocator/partition_allocator/page_allocator_internals_posix.h
deleted file mode 100644
index 425f3ef..0000000
--- a/base/allocator/partition_allocator/page_allocator_internals_posix.h
+++ /dev/null
@@ -1,426 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
-
-#include <algorithm>
-#include <atomic>
-#include <cerrno>
-#include <cstdint>
-#include <cstring>
-
-#include <sys/mman.h>
-
-#include "base/allocator/partition_allocator/oom.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/notreached.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_APPLE)
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.h"
-#if BUILDFLAG(IS_IOS)
-#include "base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h"
-#elif BUILDFLAG(IS_MAC)
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h"
-#else
-#error "Unknown platform"
-#endif
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/scoped_cftyperef.h"
-
-#include <Availability.h>
-#include <Security/Security.h>
-#include <mach/mach.h>
-#endif
-#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
-#include <sys/prctl.h>
-#endif
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-#include <sys/resource.h>
-#endif
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-#if BUILDFLAG(IS_MAC)
-
-// SecTaskGetCodeSignStatus is marked as unavailable on macOS, although it’s
-// available on iOS and other Apple operating systems. It is, in fact, present
-// on the system since macOS 10.12.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wavailability"
-uint32_t SecTaskGetCodeSignStatus(SecTaskRef task) API_AVAILABLE(macos(10.12));
-#pragma clang diagnostic pop
-
-#endif  // BUILDFLAG(IS_MAC)
-
-namespace partition_alloc::internal {
-
-namespace {
-
-#if defined(LINUX_NAME_REGION)
-
-void NameRegion(void* start, size_t length, PageTag page_tag) {
-  // Important: All the names should be string literals. As per prctl.h in
-  // //third_party/android_toolchain/ndk the kernel keeps a pointer to the name
-  // instead of copying it.
-  //
-  // Having the name in .rodata ensures that the pointer remains valid as
-  // long as the mapping is alive.
-  const char* name = nullptr;
-  switch (page_tag) {
-    case PageTag::kSimulation:
-      name = "simulation";
-      break;
-    case PageTag::kBlinkGC:
-      name = "blink_gc";
-      break;
-    case PageTag::kPartitionAlloc:
-      name = "partition_alloc";
-      break;
-    case PageTag::kChromium:
-      name = "chromium";
-      break;
-    case PageTag::kV8:
-      name = "v8";
-      break;
-    default:
-      PA_NOTREACHED();
-      break;
-  }
-
-  // No error checking on purpose, testing only.
-  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, length, name);
-}
-
-#endif  // defined(LINUX_NAME_REGION)
-
-#if BUILDFLAG(IS_MAC)
-// Tests whether the version of macOS supports the MAP_JIT flag and if the
-// current process is signed with the hardened runtime and the allow-jit
-// entitlement, returning whether MAP_JIT should be used to allocate regions
-// that will contain JIT-compiled executable code.
-bool UseMapJit() {
-  // Until determining that the hardened runtime is enabled, early returns will
-  // return true, so that MAP_JIT will be used. This is important on arm64,
-  // which only allows pages to be simultaneously writable and executable when
-  // in a region allocated with MAP_JIT, regardless of code signing options. On
-  // arm64, an attempt to set a non-MAP_JIT page as simultaneously writable and
-  // executable fails with EPERM. Although this is not enforced on x86_64,
-  // MAP_JIT is harmless in that case.
-
-  base::ScopedCFTypeRef<SecTaskRef> task(
-      SecTaskCreateFromSelf(kCFAllocatorDefault));
-  if (!task) {
-    return true;
-  }
-
-  uint32_t flags = SecTaskGetCodeSignStatus(task);
-  if (!(flags & kSecCodeSignatureRuntime)) {
-    // The hardened runtime is not enabled. Note that kSecCodeSignatureRuntime
-    // == CS_RUNTIME.
-    return true;
-  }
-
-  // The hardened runtime is enabled. From this point on, early returns must
-  // return false, indicating that MAP_JIT is not to be used. It’s an error
-  // (EINVAL) to use MAP_JIT with the hardened runtime unless the JIT
-  // entitlement is specified.
-
-  base::ScopedCFTypeRef<CFTypeRef> jit_entitlement(
-      SecTaskCopyValueForEntitlement(
-          task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
-  if (!jit_entitlement) {
-    return false;
-  }
-
-  return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
-         kCFBooleanTrue;
-}
-#elif BUILDFLAG(IS_IOS)
-bool UseMapJit() {
-// Always enable MAP_JIT in simulator as it is supported unconditionally.
-#if TARGET_IPHONE_SIMULATOR
-  return true;
-#else
-  // TODO(https://crbug.com/1413818): Fill this out when the API it is
-  // available.
-  return false;
-#endif  // TARGET_IPHONE_SIMULATOR
-}
-#endif  // BUILDFLAG(IS_IOS)
-}  // namespace
-
-// |mmap| uses a nearby address if the hint address is blocked.
-constexpr bool kHintIsAdvisory = true;
-std::atomic<int32_t> s_allocPageErrorCode{0};
-
-int GetAccessFlags(PageAccessibilityConfiguration accessibility);
-
-uintptr_t SystemAllocPagesInternal(uintptr_t hint,
-                                   size_t length,
-                                   PageAccessibilityConfiguration accessibility,
-                                   PageTag page_tag,
-                                   int file_descriptor_for_shared_alloc) {
-#if BUILDFLAG(IS_APPLE)
-  // Use a custom tag to make it easier to distinguish Partition Alloc regions
-  // in vmmap(1). Tags between 240-255 are supported.
-  int fd = file_descriptor_for_shared_alloc == -1
-               ? VM_MAKE_TAG(static_cast<int>(page_tag))
-               : file_descriptor_for_shared_alloc;
-#else
-  int fd = file_descriptor_for_shared_alloc;
-#endif
-
-  int access_flag = GetAccessFlags(accessibility);
-  int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
-
-#if BUILDFLAG(IS_APPLE)
-  // On macOS, executables that are code signed with the "runtime" option cannot
-  // execute writable memory by default. They can opt into this capability by
-  // specifying the "com.apple.security.cs.allow-jit" code signing entitlement
-  // and allocating the region with the MAP_JIT flag.
-  static const bool kUseMapJit = UseMapJit();
-  if (accessibility.permissions ==
-          PageAccessibilityConfiguration::kInaccessibleWillJitLater &&
-      kUseMapJit) {
-    map_flags |= MAP_JIT;
-  }
-#endif
-
-  void* ret = mmap(reinterpret_cast<void*>(hint), length, access_flag,
-                   map_flags, fd, 0);
-  if (ret == MAP_FAILED) {
-    s_allocPageErrorCode = errno;
-    ret = nullptr;
-  }
-
-#if defined(LINUX_NAME_REGION)
-  if (ret) {
-    NameRegion(ret, length, page_tag);
-  }
-#endif
-
-  return reinterpret_cast<uintptr_t>(ret);
-}
-
-bool TrySetSystemPagesAccessInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility) {
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  if (accessibility.thread_isolation.enabled) {
-    return 0 == MprotectWithThreadIsolation(reinterpret_cast<void*>(address),
-                                            length,
-                                            GetAccessFlags(accessibility),
-                                            accessibility.thread_isolation);
-  }
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  return 0 == PA_HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
-                                       GetAccessFlags(accessibility)));
-}
-
-void SetSystemPagesAccessInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility) {
-  int access_flags = GetAccessFlags(accessibility);
-  int ret;
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  if (accessibility.thread_isolation.enabled) {
-    ret = MprotectWithThreadIsolation(reinterpret_cast<void*>(address), length,
-                                      GetAccessFlags(accessibility),
-                                      accessibility.thread_isolation);
-  } else
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  {
-    ret = PA_HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
-                                   GetAccessFlags(accessibility)));
-  }
-
-  // On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal
-  // kernel data structures cannot be allocated, (2) the address range is
-  // invalid, or (3) this would split an existing mapping in a way that would
-  // exceed the maximum number of allowed mappings.
-  //
-  // Neither are very likely, but we still get a lot of crashes here. This is
-  // because setrlimit(RLIMIT_DATA)'s limit is checked and enforced here, if the
-  // access flags match a "data" mapping, which in our case would be MAP_PRIVATE
-  // | MAP_ANONYMOUS, and PROT_WRITE. see the call to may_expand_vm() in
-  // mm/mprotect.c in the kernel for details.
-  //
-  // In this case, we are almost certainly bumping into the sandbox limit, mark
-  // the crash as OOM. See SandboxLinux::LimitAddressSpace() for details.
-  if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE)) {
-    OOM_CRASH(length);
-  }
-
-  PA_PCHECK(0 == ret);
-}
-
-void FreePagesInternal(uintptr_t address, size_t length) {
-  PA_PCHECK(0 == munmap(reinterpret_cast<void*>(address), length));
-}
-
-uintptr_t TrimMappingInternal(uintptr_t base_address,
-                              size_t base_length,
-                              size_t trim_length,
-                              PageAccessibilityConfiguration accessibility,
-                              size_t pre_slack,
-                              size_t post_slack) {
-  uintptr_t ret = base_address;
-  // We can resize the allocation run. Release unneeded memory before and after
-  // the aligned range.
-  if (pre_slack) {
-    FreePages(base_address, pre_slack);
-    ret = base_address + pre_slack;
-  }
-  if (post_slack) {
-    FreePages(ret + trim_length, post_slack);
-  }
-  return ret;
-}
-
-void DecommitSystemPagesInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityDisposition accessibility_disposition) {
-  // In POSIX, there is no decommit concept. Discarding is an effective way of
-  // implementing the Windows semantics where the OS is allowed to not swap the
-  // pages in the region.
-  DiscardSystemPages(address, length);
-
-  bool change_permissions =
-      accessibility_disposition == PageAccessibilityDisposition::kRequireUpdate;
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // This is not guaranteed, show that we're serious.
-  //
-  // More specifically, several callers have had issues with assuming that
-  // memory is zeroed, this would hopefully make these bugs more visible.  We
-  // don't memset() everything, because ranges can be very large, and doing it
-  // over the entire range could make Chrome unusable with
-  // BUILDFLAG(PA_DCHECK_IS_ON).
-  //
-  // Only do it when we are about to change the permissions, since we don't know
-  // the previous permissions, and cannot restore them.
-  if (!DecommittedMemoryIsAlwaysZeroed() && change_permissions) {
-    // Memory may not be writable.
-    size_t size = std::min(length, 2 * SystemPageSize());
-    void* ptr = reinterpret_cast<void*>(address);
-    PA_CHECK(mprotect(ptr, size, PROT_WRITE) == 0);
-    memset(ptr, 0xcc, size);
-  }
-#endif
-
-  // Make pages inaccessible, unless the caller requested to keep permissions.
-  //
-  // Note, there is a small window between these calls when the pages can be
-  // incorrectly touched and brought back to memory. Not ideal, but doing those
-  // operations in the opposite order resulted in PMF regression on Mac (see
-  // crbug.com/1153021).
-  if (change_permissions) {
-    SetSystemPagesAccess(address, length,
-                         PageAccessibilityConfiguration(
-                             PageAccessibilityConfiguration::kInaccessible));
-  }
-}
-
-void DecommitAndZeroSystemPagesInternal(uintptr_t address,
-                                        size_t length,
-                                        PageTag page_tag) {
-  int fd = -1;
-#if BUILDFLAG(IS_APPLE)
-  fd = VM_MAKE_TAG(static_cast<int>(page_tag));
-#endif
-
-  // https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html: "If
-  // a MAP_FIXED request is successful, then any previous mappings [...] for
-  // those whole pages containing any part of the address range [pa,pa+len)
-  // shall be removed, as if by an appropriate call to munmap(), before the
-  // new mapping is established." As a consequence, the memory will be
-  // zero-initialized on next access.
-  void* ptr = reinterpret_cast<void*>(address);
-  void* ret = mmap(ptr, length, PROT_NONE,
-                   MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, fd, 0);
-  PA_CHECK(ptr == ret);
-  // Since we just remapped the region, need to set is name again.
-#if defined(LINUX_NAME_REGION)
-  NameRegion(ret, length, page_tag);
-#endif
-}
-
-void RecommitSystemPagesInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility,
-    PageAccessibilityDisposition accessibility_disposition) {
-  // On POSIX systems, the caller needs to simply read the memory to recommit
-  // it. However, if decommit changed the permissions, recommit has to change
-  // them back.
-  if (accessibility_disposition ==
-      PageAccessibilityDisposition::kRequireUpdate) {
-    SetSystemPagesAccess(address, length, accessibility);
-  }
-
-#if BUILDFLAG(IS_APPLE)
-  // On macOS, to update accounting, we need to make another syscall. For more
-  // details, see https://crbug.com/823915.
-  madvise(reinterpret_cast<void*>(address), length, MADV_FREE_REUSE);
-#endif
-}
-
-bool TryRecommitSystemPagesInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility,
-    PageAccessibilityDisposition accessibility_disposition) {
-  // On POSIX systems, the caller needs to simply read the memory to recommit
-  // it. However, if decommit changed the permissions, recommit has to change
-  // them back.
-  if (accessibility_disposition ==
-      PageAccessibilityDisposition::kRequireUpdate) {
-    bool ok = TrySetSystemPagesAccess(address, length, accessibility);
-    if (!ok) {
-      return false;
-    }
-  }
-
-#if BUILDFLAG(IS_APPLE)
-  // On macOS, to update accounting, we need to make another syscall. For more
-  // details, see https://crbug.com/823915.
-  madvise(reinterpret_cast<void*>(address), length, MADV_FREE_REUSE);
-#endif
-
-  return true;
-}
-
-void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
-  void* ptr = reinterpret_cast<void*>(address);
-#if BUILDFLAG(IS_APPLE)
-  int ret = madvise(ptr, length, MADV_FREE_REUSABLE);
-  if (ret) {
-    // MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
-    ret = madvise(ptr, length, MADV_DONTNEED);
-  }
-  PA_PCHECK(ret == 0);
-#else   // BUILDFLAG(IS_APPLE)
-  // We have experimented with other flags, but with suboptimal results.
-  //
-  // MADV_FREE (Linux): Makes our memory measurements less predictable;
-  // performance benefits unclear.
-  //
-  // Therefore, we just do the simple thing: MADV_DONTNEED.
-  PA_PCHECK(0 == madvise(ptr, length, MADV_DONTNEED));
-#endif  // BUILDFLAG(IS_APPLE)
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
diff --git a/base/allocator/partition_allocator/page_allocator_internals_win.h b/base/allocator/partition_allocator/page_allocator_internals_win.h
deleted file mode 100644
index 1ac45fb..0000000
--- a/base/allocator/partition_allocator/page_allocator_internals_win.h
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/oom.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/page_allocator_internal.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/notreached.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-
-namespace partition_alloc::internal {
-
-// |VirtualAlloc| will fail if allocation at the hint address is blocked.
-constexpr bool kHintIsAdvisory = false;
-std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
-
-bool IsOutOfMemory(DWORD error) {
-  // From
-  // https://learn.microsoft.com/en-us/windows/win32/debug/system-error-codes--0-499-
-  switch (error) {
-    // Page file is being extended.
-    case ERROR_COMMITMENT_MINIMUM:
-      // Page file is too small.
-    case ERROR_COMMITMENT_LIMIT:
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-    // Not enough memory resources are available to process this command.
-    //
-    // It is not entirely clear whether this error pertains to out of address
-    // space errors, or the kernel being out of memory. Only include it for 64
-    // bit architectures, since address space issues are unlikely there.
-    case ERROR_NOT_ENOUGH_MEMORY:
-#endif
-    case ERROR_PAGEFILE_QUOTA:
-      // Insufficient quota to complete the requested service.
-      return true;
-    default:
-      return false;
-  }
-}
-
-void* VirtualAllocWithRetry(void* address,
-                            size_t size,
-                            DWORD type_flags,
-                            DWORD access_flags) {
-  void* ret = nullptr;
-  // Failure to commit memory can be temporary, in at least two cases:
-  // - The page file is getting extended.
-  // - Another process terminates (most likely because of OOM)
-  //
-  // Wait and retry, since the alternative is crashing. Note that if we
-  // selectively apply this... hum... beautiful hack to some process types only,
-  // "some process crashing" may very well be one of ours, which may be
-  // desirable (e.g. some processes like the browser are more important than
-  // others).
-  //
-  // This approach has been shown to be effective for Firefox, see
-  // crbug.com/1392738 for context. Constants below are accordingly taken from
-  // Firefox as well.
-  constexpr int kMaxTries = 10;
-  constexpr int kDelayMs = 50;
-
-  bool should_retry = GetRetryOnCommitFailure() && (type_flags & MEM_COMMIT) &&
-                      (access_flags != PAGE_NOACCESS);
-  for (int tries = 0; tries < kMaxTries; tries++) {
-    ret = VirtualAlloc(address, size, type_flags, access_flags);
-    // Only retry for commit failures. If this is an address space problem
-    // (e.g. caller asked for an address which is not available), this is
-    // unlikely to be resolved by waiting.
-    if (ret || !should_retry || !IsOutOfMemory(GetLastError())) {
-      break;
-    }
-
-    Sleep(kDelayMs);
-  }
-  return ret;
-}
-
-int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
-  switch (accessibility.permissions) {
-    case PageAccessibilityConfiguration::kRead:
-      return PAGE_READONLY;
-    case PageAccessibilityConfiguration::kReadWrite:
-    case PageAccessibilityConfiguration::kReadWriteTagged:
-      return PAGE_READWRITE;
-    case PageAccessibilityConfiguration::kReadExecute:
-    case PageAccessibilityConfiguration::kReadExecuteProtected:
-      return PAGE_EXECUTE_READ;
-    case PageAccessibilityConfiguration::kReadWriteExecute:
-      return PAGE_EXECUTE_READWRITE;
-    case PageAccessibilityConfiguration::kInaccessible:
-    case PageAccessibilityConfiguration::kInaccessibleWillJitLater:
-      return PAGE_NOACCESS;
-  }
-  PA_NOTREACHED();
-}
-
-uintptr_t SystemAllocPagesInternal(
-    uintptr_t hint,
-    size_t length,
-    PageAccessibilityConfiguration accessibility,
-    PageTag page_tag,
-    [[maybe_unused]] int file_descriptor_for_shared_alloc) {
-  const DWORD access_flag = GetAccessFlags(accessibility);
-  const DWORD type_flags =
-      (access_flag == PAGE_NOACCESS) ? MEM_RESERVE : (MEM_RESERVE | MEM_COMMIT);
-  void* ret = VirtualAllocWithRetry(reinterpret_cast<void*>(hint), length,
-                                    type_flags, access_flag);
-  if (ret == nullptr) {
-    s_allocPageErrorCode = GetLastError();
-  }
-  return reinterpret_cast<uintptr_t>(ret);
-}
-
-uintptr_t TrimMappingInternal(uintptr_t base_address,
-                              size_t base_length,
-                              size_t trim_length,
-                              PageAccessibilityConfiguration accessibility,
-                              size_t pre_slack,
-                              size_t post_slack) {
-  uintptr_t ret = base_address;
-  if (pre_slack || post_slack) {
-    // We cannot resize the allocation run. Free it and retry at the aligned
-    // address within the freed range.
-    ret = base_address + pre_slack;
-    FreePages(base_address, base_length);
-    ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium);
-  }
-  return ret;
-}
-
-bool TrySetSystemPagesAccessInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility) {
-  void* ptr = reinterpret_cast<void*>(address);
-  if (GetAccessFlags(accessibility) == PAGE_NOACCESS) {
-    return VirtualFree(ptr, length, MEM_DECOMMIT) != 0;
-  }
-  // Call the retry path even though this function can fail, because callers of
-  // this are likely to crash the process when this function fails, and we don't
-  // want that for transient failures.
-  return nullptr != VirtualAllocWithRetry(ptr, length, MEM_COMMIT,
-                                          GetAccessFlags(accessibility));
-}
-
-void SetSystemPagesAccessInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility) {
-  void* ptr = reinterpret_cast<void*>(address);
-  const DWORD access_flag = GetAccessFlags(accessibility);
-  if (access_flag == PAGE_NOACCESS) {
-    if (!VirtualFree(ptr, length, MEM_DECOMMIT)) {
-      // We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
-      // report we get the error number.
-      PA_CHECK(static_cast<uint32_t>(ERROR_SUCCESS) == GetLastError());
-    }
-  } else {
-    if (!VirtualAllocWithRetry(ptr, length, MEM_COMMIT, access_flag)) {
-      int32_t error = GetLastError();
-      if (error == ERROR_COMMITMENT_LIMIT) {
-        OOM_CRASH(length);
-      }
-      // We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
-      // report we get the error number.
-      PA_CHECK(ERROR_SUCCESS == error);
-    }
-  }
-}
-
-void FreePagesInternal(uintptr_t address, size_t length) {
-  PA_CHECK(VirtualFree(reinterpret_cast<void*>(address), 0, MEM_RELEASE));
-}
-
-void DecommitSystemPagesInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityDisposition accessibility_disposition) {
-  // Ignore accessibility_disposition, because decommitting is equivalent to
-  // making pages inaccessible.
-  SetSystemPagesAccess(address, length,
-                       PageAccessibilityConfiguration(
-                           PageAccessibilityConfiguration::kInaccessible));
-}
-
-void DecommitAndZeroSystemPagesInternal(uintptr_t address,
-                                        size_t length,
-                                        PageTag page_tag) {
-  // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualfree:
-  // "If a page is decommitted but not released, its state changes to reserved.
-  // Subsequently, you can call VirtualAlloc to commit it, or VirtualFree to
-  // release it. Attempts to read from or write to a reserved page results in an
-  // access violation exception."
-  // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc
-  // for MEM_COMMIT: "The function also guarantees that when the caller later
-  // initially accesses the memory, the contents will be zero."
-  PA_CHECK(VirtualFree(reinterpret_cast<void*>(address), length, MEM_DECOMMIT));
-}
-
-void RecommitSystemPagesInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility,
-    PageAccessibilityDisposition accessibility_disposition) {
-  // Ignore accessibility_disposition, because decommitting is equivalent to
-  // making pages inaccessible.
-  SetSystemPagesAccess(address, length, accessibility);
-}
-
-bool TryRecommitSystemPagesInternal(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityConfiguration accessibility,
-    PageAccessibilityDisposition accessibility_disposition) {
-  // Ignore accessibility_disposition, because decommitting is equivalent to
-  // making pages inaccessible.
-  return TrySetSystemPagesAccess(address, length, accessibility);
-}
-
-void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
-  void* ptr = reinterpret_cast<void*>(address);
-  // Use DiscardVirtualMemory when available because it releases faster than
-  // MEM_RESET.
-  DWORD ret = DiscardVirtualMemory(ptr, length);
-  // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
-  // failure.
-  if (ret) {
-    PA_CHECK(VirtualAllocWithRetry(ptr, length, MEM_RESET, PAGE_READWRITE));
-  }
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
diff --git a/base/allocator/partition_allocator/page_allocator_unittest.cc b/base/allocator/partition_allocator/page_allocator_unittest.cc
deleted file mode 100644
index d8d9c1c..0000000
--- a/base/allocator/partition_allocator/page_allocator_unittest.cc
+++ /dev/null
@@ -1,678 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <algorithm>
-#include <cstdint>
-#include <string>
-#include <vector>
-
-#include "base/allocator/partition_allocator/address_space_randomization.h"
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/notreached.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "build/build_config.h"
-
-#if defined(LINUX_NAME_REGION)
-#include "base/debug/proc_maps_linux.h"
-#endif
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if BUILDFLAG(IS_POSIX)
-#include <setjmp.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#endif  // BUILDFLAG(IS_POSIX)
-
-#include "base/allocator/partition_allocator/arm_bti_test_functions.h"
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-#include <arm_acle.h>
-#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
-#define MTE_KILLED_BY_SIGNAL_AVAILABLE
-#endif
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
-#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-
-namespace partition_alloc::internal {
-
-namespace {
-
-// Any number of bytes that can be allocated with no trouble.
-size_t EasyAllocSize() {
-  return (1024 * 1024) & ~(PageAllocationGranularity() - 1);
-}
-
-// A huge amount of memory, greater than or equal to the ASLR space.
-size_t HugeMemoryAmount() {
-  return std::max(::partition_alloc::internal::ASLRMask(),
-                  std::size_t{2} * ::partition_alloc::internal::ASLRMask());
-}
-
-}  // namespace
-
-TEST(PartitionAllocPageAllocatorTest, Rounding) {
-  EXPECT_EQ(0u, RoundUpToSystemPage(0u));
-  EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(1));
-  EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(SystemPageSize() - 1));
-  EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(SystemPageSize()));
-  EXPECT_EQ(2 * SystemPageSize(), RoundUpToSystemPage(SystemPageSize() + 1));
-  EXPECT_EQ(0u, RoundDownToSystemPage(0u));
-  EXPECT_EQ(0u, RoundDownToSystemPage(SystemPageSize() - 1));
-  EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(SystemPageSize()));
-  EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(SystemPageSize() + 1));
-  EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(2 * SystemPageSize() - 1));
-  EXPECT_EQ(0u, RoundUpToPageAllocationGranularity(0u));
-  EXPECT_EQ(PageAllocationGranularity(), RoundUpToPageAllocationGranularity(1));
-  EXPECT_EQ(PageAllocationGranularity(), RoundUpToPageAllocationGranularity(
-                                             PageAllocationGranularity() - 1));
-  EXPECT_EQ(PageAllocationGranularity(),
-            RoundUpToPageAllocationGranularity(PageAllocationGranularity()));
-  EXPECT_EQ(
-      2 * PageAllocationGranularity(),
-      RoundUpToPageAllocationGranularity(PageAllocationGranularity() + 1));
-  EXPECT_EQ(0u, RoundDownToPageAllocationGranularity(0u));
-  EXPECT_EQ(0u, RoundDownToPageAllocationGranularity(
-                    PageAllocationGranularity() - 1));
-  EXPECT_EQ(PageAllocationGranularity(),
-            RoundDownToPageAllocationGranularity(PageAllocationGranularity()));
-  EXPECT_EQ(PageAllocationGranularity(), RoundDownToPageAllocationGranularity(
-                                             PageAllocationGranularity() + 1));
-  EXPECT_EQ(PageAllocationGranularity(),
-            RoundDownToPageAllocationGranularity(
-                2 * PageAllocationGranularity() - 1));
-}
-
-TEST(PartitionAllocPageAllocatorTest, NextAlignedWithOffset) {
-  EXPECT_EQ(1024u, NextAlignedWithOffset(1024, 1, 0));
-  EXPECT_EQ(2024u, NextAlignedWithOffset(1024, 1024, 1000));
-  EXPECT_EQ(2024u, NextAlignedWithOffset(2024, 1024, 1000));
-  EXPECT_EQ(3048u, NextAlignedWithOffset(2025, 1024, 1000));
-  EXPECT_EQ(2048u, NextAlignedWithOffset(1024, 2048, 0));
-  EXPECT_EQ(2148u, NextAlignedWithOffset(1024, 2048, 100));
-  EXPECT_EQ(2000u, NextAlignedWithOffset(1024, 2048, 2000));
-}
-
-// Test that failed page allocations invoke base::ReleaseReservation().
-// We detect this by making a reservation and ensuring that after failure, we
-// can make a new reservation.
-TEST(PartitionAllocPageAllocatorTest, AllocFailure) {
-  // Release any reservation made by another test.
-  ReleaseReservation();
-
-  // We can make a reservation.
-  EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
-
-  // We can't make another reservation until we trigger an allocation failure.
-  EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
-
-  size_t size = HugeMemoryAmount();
-  // Skip the test for sanitizers and platforms with ASLR turned off.
-  if (size == 0) {
-    return;
-  }
-
-  uintptr_t result =
-      AllocPages(size, PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kInaccessible),
-                 PageTag::kChromium);
-  if (!result) {
-    // We triggered allocation failure. Our reservation should have been
-    // released, and we should be able to make a new reservation.
-    EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
-    ReleaseReservation();
-    return;
-  }
-  // We couldn't fail. Make sure reservation is still there.
-  EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
-}
-
-// TODO(crbug.com/765801): Test failed on chromium.win/Win10 Tests x64.
-#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_64_BITS)
-#define MAYBE_ReserveAddressSpace DISABLED_ReserveAddressSpace
-#else
-#define MAYBE_ReserveAddressSpace ReserveAddressSpace
-#endif  // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_64_BITS)
-
-// Test that reserving address space can fail.
-TEST(PartitionAllocPageAllocatorTest, MAYBE_ReserveAddressSpace) {
-  // Release any reservation made by another test.
-  ReleaseReservation();
-
-  size_t size = HugeMemoryAmount();
-  // Skip the test for sanitizers and platforms with ASLR turned off.
-  if (size == 0) {
-    return;
-  }
-
-  bool success = ReserveAddressSpace(size);
-  if (!success) {
-    EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
-    return;
-  }
-  // We couldn't fail. Make sure reservation is still there.
-  EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
-}
-
-TEST(PartitionAllocPageAllocatorTest, AllocAndFreePages) {
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWrite),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  int* buffer0 = reinterpret_cast<int*>(buffer);
-  *buffer0 = 42;
-  EXPECT_EQ(42, *buffer0);
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-TEST(PartitionAllocPageAllocatorTest, AllocPagesAligned) {
-  size_t alignment = 8 * PageAllocationGranularity();
-  size_t sizes[] = {PageAllocationGranularity(),
-                    alignment - PageAllocationGranularity(), alignment,
-                    alignment + PageAllocationGranularity(), alignment * 4};
-  size_t offsets[] = {0, PageAllocationGranularity(), alignment / 2,
-                      alignment - PageAllocationGranularity()};
-  for (size_t size : sizes) {
-    for (size_t offset : offsets) {
-      uintptr_t buffer = AllocPagesWithAlignOffset(
-          0, size, alignment, offset,
-          PageAccessibilityConfiguration(
-              PageAccessibilityConfiguration::kReadWrite),
-          PageTag::kChromium);
-      EXPECT_TRUE(buffer);
-      EXPECT_EQ(buffer % alignment, offset);
-      FreePages(buffer, size);
-    }
-  }
-}
-
-TEST(PartitionAllocPageAllocatorTest,
-     AllocAndFreePagesWithPageReadWriteTagged) {
-  // This test checks that a page allocated with
-  // PageAccessibilityConfiguration::kReadWriteTagged is safe to use on all
-  // systems (even those which don't support MTE).
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteTagged),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  int* buffer0 = reinterpret_cast<int*>(buffer);
-  *buffer0 = 42;
-  EXPECT_EQ(42, *buffer0);
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-TEST(PartitionAllocPageAllocatorTest,
-     AllocAndFreePagesWithPageReadExecuteConfirmCFI) {
-  // This test checks that indirect branches to anything other than a valid
-  // branch target in a PageAccessibilityConfiguration::kReadExecute-mapped
-  // crash on systems which support the Armv8.5 Branch Target Identification
-  // extension.
-  base::CPU cpu;
-  if (!cpu.has_bti()) {
-#if BUILDFLAG(IS_IOS)
-    // Workaround for incorrectly failed iOS tests with GTEST_SKIP,
-    // see crbug.com/912138 for details.
-    return;
-#else
-    GTEST_SKIP();
-#endif
-  }
-#if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
-  // Next, map some read-write memory and copy the BTI-enabled function there.
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWrite),
-                 PageTag::kChromium);
-  ptrdiff_t function_range =
-      reinterpret_cast<char*>(arm_bti_test_function_end) -
-      reinterpret_cast<char*>(arm_bti_test_function);
-  ptrdiff_t invalid_offset =
-      reinterpret_cast<char*>(arm_bti_test_function_invalid_offset) -
-      reinterpret_cast<char*>(arm_bti_test_function);
-  memcpy(reinterpret_cast<void*>(buffer),
-         reinterpret_cast<void*>(arm_bti_test_function), function_range);
-
-  // Next re-protect the page.
-  SetSystemPagesAccess(
-      buffer, PageAllocationGranularity(),
-      PageAccessibilityConfiguration(
-          PageAccessibilityConfiguration::kReadExecuteProtected));
-
-  using BTITestFunction = int64_t (*)(int64_t);
-
-  // Attempt to call the function through the BTI-enabled entrypoint. Confirm
-  // that it works.
-  BTITestFunction bti_enabled_fn = reinterpret_cast<BTITestFunction>(buffer);
-  BTITestFunction bti_invalid_fn =
-      reinterpret_cast<BTITestFunction>(buffer + invalid_offset);
-  EXPECT_EQ(bti_enabled_fn(15), 18);
-  // Next, attempt to call the function without the entrypoint.
-  EXPECT_EXIT({ bti_invalid_fn(15); }, testing::KilledBySignal(SIGILL),
-              "");  // Should crash with SIGILL.
-  FreePages(buffer, PageAllocationGranularity());
-#else
-  PA_NOTREACHED();
-#endif
-}
-
-TEST(PartitionAllocPageAllocatorTest,
-     AllocAndFreePagesWithPageReadWriteTaggedSynchronous) {
-  // This test checks that a page allocated with
-  // PageAccessibilityConfiguration::kReadWriteTagged generates tag violations
-  // if allocated on a system which supports the
-  // Armv8.5 Memory Tagging Extension.
-  base::CPU cpu;
-  if (!cpu.has_mte()) {
-    // Skip this test if there's no MTE.
-#if BUILDFLAG(IS_IOS)
-    return;
-#else
-    GTEST_SKIP();
-#endif
-  }
-
-#if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteTagged),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  int* buffer0 = reinterpret_cast<int*>(buffer);
-  // Assign an 0x1 tag to the first granule of buffer.
-  int* buffer1 = __arm_mte_increment_tag(buffer0, 0x1);
-  EXPECT_NE(buffer0, buffer1);
-  __arm_mte_set_tag(buffer1);
-  // Retrieve the tag to ensure that it's set.
-  buffer1 = __arm_mte_get_tag(buffer0);
-  // Prove that the tag is different (if they're the same, the test won't work).
-  ASSERT_NE(buffer0, buffer1);
-  TagViolationReportingMode parent_tagging_mode =
-      GetMemoryTaggingModeForCurrentThread();
-  EXPECT_EXIT(
-      {
-  // Switch to synchronous mode.
-#if BUILDFLAG(IS_ANDROID)
-        ChangeMemoryTaggingModeForAllThreadsPerProcess(
-            TagViolationReportingMode::kSynchronous);
-#else
-        ChangeMemoryTaggingModeForCurrentThread(
-            TagViolationReportingMode::kSynchronous);
-#endif  // BUILDFLAG(IS_ANDROID)
-        EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
-                  TagViolationReportingMode::kSynchronous);
-        // Write to the buffer using its previous tag. A segmentation fault
-        // should be delivered.
-        *buffer0 = 42;
-      },
-      testing::KilledBySignal(SIGSEGV), "");
-  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(), parent_tagging_mode);
-  FreePages(buffer, PageAllocationGranularity());
-#else
-  PA_NOTREACHED();
-#endif
-}
-
-TEST(PartitionAllocPageAllocatorTest,
-     AllocAndFreePagesWithPageReadWriteTaggedAsynchronous) {
-  // This test checks that a page allocated with
-  // PageAccessibilityConfiguration::kReadWriteTagged generates tag violations
-  // if allocated on a system which supports MTE.
-  base::CPU cpu;
-  if (!cpu.has_mte()) {
-    // Skip this test if there's no MTE.
-#if BUILDFLAG(IS_IOS)
-    return;
-#else
-    GTEST_SKIP();
-#endif
-  }
-
-#if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteTagged),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  int* buffer0 = reinterpret_cast<int*>(buffer);
-  __arm_mte_set_tag(__arm_mte_increment_tag(buffer0, 0x1));
-  int* buffer1 = __arm_mte_get_tag(buffer0);
-  EXPECT_NE(buffer0, buffer1);
-  TagViolationReportingMode parent_tagging_mode =
-      GetMemoryTaggingModeForCurrentThread();
-  EXPECT_EXIT(
-      {
-  // Switch to asynchronous mode.
-#if BUILDFLAG(IS_ANDROID)
-        ChangeMemoryTaggingModeForAllThreadsPerProcess(
-            TagViolationReportingMode::kAsynchronous);
-#else
-        ChangeMemoryTaggingModeForCurrentThread(
-            TagViolationReportingMode::kAsynchronous);
-#endif  // BUILDFLAG(IS_ANDROID)
-        EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
-                  TagViolationReportingMode::kAsynchronous);
-        // Write to the buffer using its previous tag. A fault should be
-        // generated at this point but we may not notice straight away...
-        *buffer0 = 42;
-        EXPECT_EQ(42, *buffer0);
-        PA_LOG(ERROR) << "=";  // Until we receive control back from the kernel
-                               // (e.g. on a system call).
-      },
-      testing::KilledBySignal(SIGSEGV), "");
-  FreePages(buffer, PageAllocationGranularity());
-  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(), parent_tagging_mode);
-#else
-  PA_NOTREACHED();
-#endif
-}
-
-// Test permission setting on POSIX, where we can set a trap handler.
-#if BUILDFLAG(IS_POSIX)
-
-namespace {
-sigjmp_buf g_continuation;
-
-void SignalHandler(int signal, siginfo_t* info, void*) {
-  siglongjmp(g_continuation, 1);
-}
-}  // namespace
-
-// On Mac, sometimes we get SIGBUS instead of SIGSEGV, so handle that too.
-#if BUILDFLAG(IS_APPLE)
-#define EXTRA_FAULT_BEGIN_ACTION() \
-  struct sigaction old_bus_action; \
-  sigaction(SIGBUS, &action, &old_bus_action);
-#define EXTRA_FAULT_END_ACTION() sigaction(SIGBUS, &old_bus_action, nullptr);
-#else
-#define EXTRA_FAULT_BEGIN_ACTION()
-#define EXTRA_FAULT_END_ACTION()
-#endif
-
-// Install a signal handler so we can catch the fault we're about to trigger.
-#define FAULT_TEST_BEGIN()                  \
-  struct sigaction action = {};             \
-  struct sigaction old_action = {};         \
-  action.sa_sigaction = SignalHandler;      \
-  sigemptyset(&action.sa_mask);             \
-  action.sa_flags = SA_SIGINFO;             \
-  sigaction(SIGSEGV, &action, &old_action); \
-  EXTRA_FAULT_BEGIN_ACTION();               \
-  int const save_sigs = 1;                  \
-  if (!sigsetjmp(g_continuation, save_sigs)) {
-// Fault generating code goes here...
-
-// Handle when sigsetjmp returns nonzero (we are returning from our handler).
-#define FAULT_TEST_END()                      \
-  }                                           \
-  else {                                      \
-    sigaction(SIGSEGV, &old_action, nullptr); \
-    EXTRA_FAULT_END_ACTION();                 \
-  }
-
-TEST(PartitionAllocPageAllocatorTest, InaccessiblePages) {
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kInaccessible),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-
-  FAULT_TEST_BEGIN()
-
-  // Reading from buffer should fault.
-  int* buffer0 = reinterpret_cast<int*>(buffer);
-  int buffer0_contents = *buffer0;
-  EXPECT_EQ(buffer0_contents, *buffer0);
-  EXPECT_TRUE(false);
-
-  FAULT_TEST_END()
-
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-// TODO(crbug.com/1291888): Understand why we can't read from Read-Execute pages
-// on iOS.
-#if BUILDFLAG(IS_IOS)
-#define MAYBE_ReadExecutePages DISABLED_ReadExecutePages
-#else
-#define MAYBE_ReadExecutePages ReadExecutePages
-#endif  // BUILDFLAG(IS_IOS)
-TEST(PartitionAllocPageAllocatorTest, MAYBE_ReadExecutePages) {
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadExecute),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  int* buffer0 = reinterpret_cast<int*>(buffer);
-  // Reading from buffer should succeed.
-  int buffer0_contents = *buffer0;
-
-  FAULT_TEST_BEGIN()
-
-  // Writing to buffer should fault.
-  *buffer0 = ~buffer0_contents;
-  EXPECT_TRUE(false);
-
-  FAULT_TEST_END()
-
-  // Make sure no write occurred.
-  EXPECT_EQ(buffer0_contents, *buffer0);
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-#endif  // BUILDFLAG(IS_POSIX)
-
-#if defined(LINUX_NAME_REGION)
-TEST(PartitionAllocPageAllocatorTest, PageTagging) {
-  size_t size = PageAllocationGranularity();
-  uintptr_t buffer =
-      AllocPages(size, PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kInaccessible),
-                 PageTag::kChromium);
-  ASSERT_TRUE(buffer);
-
-  auto is_region_named = [](uintptr_t start_address) {
-    std::string proc_maps;
-    EXPECT_TRUE(::base::debug::ReadProcMaps(&proc_maps));
-    std::vector<::base::debug::MappedMemoryRegion> regions;
-    EXPECT_TRUE(::base::debug::ParseProcMaps(proc_maps, &regions));
-
-    bool found = false;
-    for (const auto& region : regions) {
-      if (region.start == start_address) {
-        found = true;
-        return "[anon:chromium]" == region.path;
-      }
-    }
-    EXPECT_TRUE(found);
-    return false;
-  };
-
-  bool before = is_region_named(buffer);
-  DecommitAndZeroSystemPages(buffer, size);
-  bool after = is_region_named(buffer);
-
-#if BUILDFLAG(IS_ANDROID)
-  EXPECT_TRUE(before) << "VMA tagging should always work on Android";
-#endif
-  // When not running on Android, the prctl() command may be defined in the
-  // headers, but not be implemented by the host kernel.
-  EXPECT_EQ(before, after);
-
-  FreePages(buffer, size);
-}
-#endif  // defined(LINUX_NAME_REGION)
-
-TEST(PartitionAllocPageAllocatorTest, DecommitErasesMemory) {
-  if (!DecommittedMemoryIsAlwaysZeroed()) {
-    return;
-  }
-
-  size_t size = PageAllocationGranularity();
-  uintptr_t buffer = AllocPages(size, PageAllocationGranularity(),
-                                PageAccessibilityConfiguration(
-                                    PageAccessibilityConfiguration::kReadWrite),
-                                PageTag::kChromium);
-  ASSERT_TRUE(buffer);
-
-  memset(reinterpret_cast<void*>(buffer), 42, size);
-
-  DecommitSystemPages(buffer, size,
-                      PageAccessibilityDisposition::kAllowKeepForPerf);
-  RecommitSystemPages(buffer, size,
-                      PageAccessibilityConfiguration(
-                          PageAccessibilityConfiguration::kReadWrite),
-                      PageAccessibilityDisposition::kAllowKeepForPerf);
-
-  uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
-  uint32_t sum = 0;
-  for (size_t i = 0; i < size; i++) {
-    sum += recommitted_buffer[i];
-  }
-  EXPECT_EQ(0u, sum) << "Data was not erased";
-
-  FreePages(buffer, size);
-}
-
-TEST(PartitionAllocPageAllocatorTest, DecommitAndZero) {
-  size_t size = PageAllocationGranularity();
-  uintptr_t buffer = AllocPages(size, PageAllocationGranularity(),
-                                PageAccessibilityConfiguration(
-                                    PageAccessibilityConfiguration::kReadWrite),
-                                PageTag::kChromium);
-  ASSERT_TRUE(buffer);
-
-  memset(reinterpret_cast<void*>(buffer), 42, size);
-
-  DecommitAndZeroSystemPages(buffer, size);
-
-// Test permission setting on POSIX, where we can set a trap handler.
-#if BUILDFLAG(IS_POSIX)
-
-  FAULT_TEST_BEGIN()
-
-  // Reading from buffer should now fault.
-  int* buffer0 = reinterpret_cast<int*>(buffer);
-  int buffer0_contents = *buffer0;
-  EXPECT_EQ(buffer0_contents, *buffer0);
-  EXPECT_TRUE(false);
-
-  FAULT_TEST_END()
-
-#endif
-
-  // Clients of the DecommitAndZero API (in particular, V8), currently just
-  // call SetSystemPagesAccess to mark the region as accessible again, so we
-  // use that here as well.
-  SetSystemPagesAccess(buffer, size,
-                       PageAccessibilityConfiguration(
-                           PageAccessibilityConfiguration::kReadWrite));
-
-  uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
-  uint32_t sum = 0;
-  for (size_t i = 0; i < size; i++) {
-    sum += recommitted_buffer[i];
-  }
-  EXPECT_EQ(0u, sum) << "Data was not erased";
-
-  FreePages(buffer, size);
-}
-
-TEST(PartitionAllocPageAllocatorTest, MappedPagesAccounting) {
-  size_t size = PageAllocationGranularity();
-  // Ask for a large alignment to make sure that trimming doesn't change the
-  // accounting.
-  size_t alignment = 128 * PageAllocationGranularity();
-  size_t offsets[] = {0, PageAllocationGranularity(), alignment / 2,
-                      alignment - PageAllocationGranularity()};
-
-  size_t mapped_size_before = GetTotalMappedSize();
-
-  for (size_t offset : offsets) {
-    uintptr_t data = AllocPagesWithAlignOffset(
-        0, size, alignment, offset,
-        PageAccessibilityConfiguration(
-            PageAccessibilityConfiguration::kInaccessible),
-        PageTag::kChromium);
-    ASSERT_TRUE(data);
-
-    EXPECT_EQ(mapped_size_before + size, GetTotalMappedSize());
-
-    DecommitSystemPages(data, size,
-                        PageAccessibilityDisposition::kAllowKeepForPerf);
-    EXPECT_EQ(mapped_size_before + size, GetTotalMappedSize());
-
-    FreePages(data, size);
-    EXPECT_EQ(mapped_size_before, GetTotalMappedSize());
-  }
-}
-
-TEST(PartitionAllocPageAllocatorTest, AllocInaccessibleWillJitLater) {
-  // Verify that kInaccessibleWillJitLater allows read/write, and read/execute
-  // permissions to be set.
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kInaccessibleWillJitLater),
-                 PageTag::kChromium);
-  EXPECT_TRUE(
-      TrySetSystemPagesAccess(buffer, PageAllocationGranularity(),
-                              PageAccessibilityConfiguration(
-                                  PageAccessibilityConfiguration::kReadWrite)));
-  EXPECT_TRUE(TrySetSystemPagesAccess(
-      buffer, PageAllocationGranularity(),
-      PageAccessibilityConfiguration(
-          PageAccessibilityConfiguration::kReadExecute)));
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-#if BUILDFLAG(IS_IOS) || BUILDFLAG(IS_MAC)
-// TODO(crbug.com/1452151): Fix test to GTEST_SKIP() if MAP_JIT is in-use,
-// or to be run otherwise, since kReadWriteExecute is used in some other
-// configurations.
-#define MAYBE_AllocReadWriteExecute DISABLED_AllocReadWriteExecute
-#else
-#define MAYBE_AllocReadWriteExecute AllocReadWriteExecute
-#endif  // BUILDFLAG(IS_IOS) || BUILDFLAG(IS_MAC)
-TEST(PartitionAllocPageAllocatorTest, MAYBE_AllocReadWriteExecute) {
-  // Verify that kReadWriteExecute is similarly functional.
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteExecute),
-                 PageTag::kChromium);
-  EXPECT_TRUE(
-      TrySetSystemPagesAccess(buffer, PageAllocationGranularity(),
-                              PageAccessibilityConfiguration(
-                                  PageAccessibilityConfiguration::kReadWrite)));
-  EXPECT_TRUE(TrySetSystemPagesAccess(
-      buffer, PageAllocationGranularity(),
-      PageAccessibilityConfiguration(
-          PageAccessibilityConfiguration::kReadExecute)));
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/partition_address_space.cc b/base/allocator/partition_allocator/partition_address_space.cc
deleted file mode 100644
index fe5ebd5..0000000
--- a/base/allocator/partition_allocator/partition_address_space.cc
+++ /dev/null
@@ -1,431 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_address_space.h"
-
-#include <array>
-#include <cstddef>
-#include <cstdint>
-#include <ostream>
-#include <string>
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/compressed_pointer.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_IOS)
-#include <mach-o/dyld.h>
-#endif
-
-#if BUILDFLAG(IS_WIN)
-#include <windows.h>
-#endif  // BUILDFLAG(IS_WIN)
-
-#if PA_CONFIG(ENABLE_SHADOW_METADATA) || BUILDFLAG(ENABLE_THREAD_ISOLATION)
-#include <sys/mman.h>
-#endif
-
-namespace partition_alloc::internal {
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-
-namespace {
-
-#if BUILDFLAG(IS_WIN)
-
-PA_NOINLINE void HandlePoolAllocFailureOutOfVASpace() {
-  PA_NO_CODE_FOLDING();
-  PA_CHECK(false);
-}
-
-PA_NOINLINE void HandlePoolAllocFailureOutOfCommitCharge() {
-  PA_NO_CODE_FOLDING();
-  PA_CHECK(false);
-}
-#endif  // BUILDFLAG(IS_WIN)
-
-PA_NOINLINE void HandlePoolAllocFailure() {
-  PA_NO_CODE_FOLDING();
-  uint32_t alloc_page_error_code = GetAllocPageErrorCode();
-  PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
-  // It's important to easily differentiate these two failures on Windows, so
-  // crash with different stacks.
-#if BUILDFLAG(IS_WIN)
-  if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) {
-    // The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE,
-    // it must be VA space exhaustion.
-    HandlePoolAllocFailureOutOfVASpace();
-  } else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT) {
-    // Should not happen, since as of Windows 8.1+, reserving address space
-    // should not be charged against the commit limit, aside from a very small
-    // amount per 64kiB block. Keep this path anyway, to check in crash reports.
-    HandlePoolAllocFailureOutOfCommitCharge();
-  } else
-#endif  // BUILDFLAG(IS_WIN)
-  {
-    PA_CHECK(false);
-  }
-}
-
-}  // namespace
-
-PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_;
-
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-std::ptrdiff_t PartitionAddressSpace::regular_pool_shadow_offset_ = 0;
-std::ptrdiff_t PartitionAddressSpace::brp_pool_shadow_offset_ = 0;
-#endif
-
-#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-#if !BUILDFLAG(IS_IOS)
-#error Dynamic pool size is only supported on iOS.
-#endif
-
-namespace {
-bool IsIOSTestProcess() {
-  // On iOS, only applications with the extended virtual addressing entitlement
-  // can use a large address space. Since Earl Grey test runner apps cannot get
-  // entitlements, they must use a much smaller pool size. Similarly,
-  // integration tests for ChromeWebView end up with two PartitionRoots since
-  // both the integration tests and ChromeWebView have a copy of base/. Even
-  // with the entitlement, there is insufficient address space for two
-  // PartitionRoots, so a smaller pool size is needed.
-
-  // Use a fixed buffer size to avoid allocation inside the allocator.
-  constexpr size_t path_buffer_size = 8192;
-  char executable_path[path_buffer_size];
-
-  uint32_t executable_length = path_buffer_size;
-  int rv = _NSGetExecutablePath(executable_path, &executable_length);
-  PA_CHECK(!rv);
-  size_t executable_path_length =
-      std::char_traits<char>::length(executable_path);
-
-  auto has_suffix = [&](const char* suffix) -> bool {
-    size_t suffix_length = std::char_traits<char>::length(suffix);
-    if (executable_path_length < suffix_length) {
-      return false;
-    }
-    return std::char_traits<char>::compare(
-               executable_path + (executable_path_length - suffix_length),
-               suffix, suffix_length) == 0;
-  };
-
-  return has_suffix("Runner") || has_suffix("ios_web_view_inttests");
-}
-}  // namespace
-
-PA_ALWAYS_INLINE size_t PartitionAddressSpace::RegularPoolSize() {
-  return IsIOSTestProcess() ? kRegularPoolSizeForIOSTestProcess
-                            : kRegularPoolSize;
-}
-PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
-  return IsIOSTestProcess() ? kBRPPoolSizeForIOSTestProcess : kBRPPoolSize;
-}
-#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-
-void PartitionAddressSpace::Init() {
-  if (IsInitialized()) {
-    return;
-  }
-
-  size_t regular_pool_size = RegularPoolSize();
-  size_t brp_pool_size = BRPPoolSize();
-
-#if BUILDFLAG(GLUE_CORE_POOLS)
-  // Gluing core pools (regular & BRP) makes sense only when both pools are of
-  // the same size. This the only way we can check belonging to either of the
-  // two with a single bitmask operation.
-  PA_CHECK(regular_pool_size == brp_pool_size);
-
-  // TODO(crbug.com/1362969): Support PA_ENABLE_SHADOW_METADATA.
-  int pools_fd = -1;
-
-  size_t glued_pool_sizes = regular_pool_size * 2;
-  // Note, BRP pool requires to be preceded by a "forbidden zone", which is
-  // conveniently taken care of by the last guard page of the regular pool.
-  setup_.regular_pool_base_address_ =
-      AllocPages(glued_pool_sizes, glued_pool_sizes,
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kInaccessible),
-                 PageTag::kPartitionAlloc, pools_fd);
-  if (!setup_.regular_pool_base_address_) {
-    HandlePoolAllocFailure();
-  }
-  setup_.brp_pool_base_address_ =
-      setup_.regular_pool_base_address_ + regular_pool_size;
-#else  // BUILDFLAG(GLUE_CORE_POOLS)
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-  int regular_pool_fd = memfd_create("/regular_pool", MFD_CLOEXEC);
-#else
-  int regular_pool_fd = -1;
-#endif
-  setup_.regular_pool_base_address_ =
-      AllocPages(regular_pool_size, regular_pool_size,
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kInaccessible),
-                 PageTag::kPartitionAlloc, regular_pool_fd);
-  if (!setup_.regular_pool_base_address_) {
-    HandlePoolAllocFailure();
-  }
-
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-  int brp_pool_fd = memfd_create("/brp_pool", MFD_CLOEXEC);
-#else
-  int brp_pool_fd = -1;
-#endif
-  // Reserve an extra allocation granularity unit before the BRP pool, but keep
-  // the pool aligned at BRPPoolSize(). A pointer immediately past an allocation
-  // is a valid pointer, and having a "forbidden zone" before the BRP pool
-  // prevents such a pointer from "sneaking into" the pool.
-  const size_t kForbiddenZoneSize = PageAllocationGranularity();
-  uintptr_t base_address = AllocPagesWithAlignOffset(
-      0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
-      brp_pool_size - kForbiddenZoneSize,
-      PageAccessibilityConfiguration(
-          PageAccessibilityConfiguration::kInaccessible),
-      PageTag::kPartitionAlloc, brp_pool_fd);
-  if (!base_address) {
-    HandlePoolAllocFailure();
-  }
-  setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
-#endif  // BUILDFLAG(GLUE_CORE_POOLS)
-
-#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-  setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
-  setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
-#if BUILDFLAG(GLUE_CORE_POOLS)
-  // When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
-  // regular pool, effectively forming one virtual pool of a twice bigger
-  // size. Adjust the mask appropriately.
-  setup_.core_pools_base_mask_ = setup_.regular_pool_base_mask_ << 1;
-  PA_DCHECK(setup_.core_pools_base_mask_ == (setup_.brp_pool_base_mask_ << 1));
-#endif
-#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-
-  AddressPoolManager::GetInstance().Add(
-      kRegularPoolHandle, setup_.regular_pool_base_address_, regular_pool_size);
-  AddressPoolManager::GetInstance().Add(
-      kBRPPoolHandle, setup_.brp_pool_base_address_, brp_pool_size);
-
-  // Sanity check pool alignment.
-  PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
-  PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
-#if BUILDFLAG(GLUE_CORE_POOLS)
-  PA_DCHECK(!(setup_.regular_pool_base_address_ & (glued_pool_sizes - 1)));
-#endif
-
-  // Sanity check pool belonging.
-  PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
-  PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
-  PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
-                            regular_pool_size - 1));
-  PA_DCHECK(
-      !IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size));
-  PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1));
-  PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_));
-  PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size - 1));
-  PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size));
-#if BUILDFLAG(GLUE_CORE_POOLS)
-  PA_DCHECK(!IsInCorePools(setup_.regular_pool_base_address_ - 1));
-  PA_DCHECK(IsInCorePools(setup_.regular_pool_base_address_));
-  PA_DCHECK(
-      IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size - 1));
-  PA_DCHECK(
-      IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size));
-  PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ - 1));
-  PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_));
-  PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size - 1));
-  PA_DCHECK(!IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size));
-#endif  // BUILDFLAG(GLUE_CORE_POOLS)
-
-#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-  // Reserve memory for PCScan quarantine card table.
-  uintptr_t requested_address = setup_.regular_pool_base_address_;
-  uintptr_t actual_address = AddressPoolManager::GetInstance().Reserve(
-      kRegularPoolHandle, requested_address, kSuperPageSize);
-  PA_CHECK(requested_address == actual_address)
-      << "QuarantineCardTable is required to be allocated at the beginning of "
-         "the regular pool";
-#endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-  // Reserve memory for the shadow pools.
-  uintptr_t regular_pool_shadow_address =
-      AllocPages(regular_pool_size, regular_pool_size,
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kInaccessible),
-                 PageTag::kPartitionAlloc, regular_pool_fd);
-  regular_pool_shadow_offset_ =
-      regular_pool_shadow_address - setup_.regular_pool_base_address_;
-
-  uintptr_t brp_pool_shadow_address = AllocPagesWithAlignOffset(
-      0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
-      brp_pool_size - kForbiddenZoneSize,
-      PageAccessibilityConfiguration(
-          PageAccessibilityConfiguration::kInaccessible),
-      PageTag::kPartitionAlloc, brp_pool_fd);
-  brp_pool_shadow_offset_ =
-      brp_pool_shadow_address - setup_.brp_pool_base_address_;
-#endif
-
-#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-  CompressedPointerBaseGlobal::SetBase(setup_.regular_pool_base_address_);
-#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-}
-
-void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
-                                                 size_t size) {
-  // The ConfigurablePool must only be initialized once.
-  PA_CHECK(!IsConfigurablePoolInitialized());
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  // It's possible that the thread isolated pool has been initialized first, in
-  // which case the setup_ memory has been made read-only. Remove the protection
-  // temporarily.
-  if (IsThreadIsolatedPoolInitialized()) {
-    UnprotectThreadIsolatedGlobals();
-  }
-#endif
-
-  PA_CHECK(pool_base);
-  PA_CHECK(size <= kConfigurablePoolMaxSize);
-  PA_CHECK(size >= kConfigurablePoolMinSize);
-  PA_CHECK(base::bits::IsPowerOfTwo(size));
-  PA_CHECK(pool_base % size == 0);
-
-  setup_.configurable_pool_base_address_ = pool_base;
-  setup_.configurable_pool_base_mask_ = ~(size - 1);
-
-  AddressPoolManager::GetInstance().Add(
-      kConfigurablePoolHandle, setup_.configurable_pool_base_address_, size);
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  // Put the metadata protection back in place.
-  if (IsThreadIsolatedPoolInitialized()) {
-    WriteProtectThreadIsolatedGlobals(setup_.thread_isolation_);
-  }
-#endif
-}
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-void PartitionAddressSpace::InitThreadIsolatedPool(
-    ThreadIsolationOption thread_isolation) {
-  // The ThreadIsolated pool can't be initialized with conflicting settings.
-  if (IsThreadIsolatedPoolInitialized()) {
-    PA_CHECK(setup_.thread_isolation_ == thread_isolation);
-    return;
-  }
-
-  size_t pool_size = ThreadIsolatedPoolSize();
-  setup_.thread_isolated_pool_base_address_ =
-      AllocPages(pool_size, pool_size,
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kInaccessible),
-                 PageTag::kPartitionAlloc);
-  if (!setup_.thread_isolated_pool_base_address_) {
-    HandlePoolAllocFailure();
-  }
-
-  PA_DCHECK(!(setup_.thread_isolated_pool_base_address_ & (pool_size - 1)));
-  setup_.thread_isolation_ = thread_isolation;
-  AddressPoolManager::GetInstance().Add(
-      kThreadIsolatedPoolHandle, setup_.thread_isolated_pool_base_address_,
-      pool_size);
-
-  PA_DCHECK(
-      !IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ - 1));
-  PA_DCHECK(IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_));
-  PA_DCHECK(IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ +
-                                   pool_size - 1));
-  PA_DCHECK(!IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ +
-                                    pool_size));
-
-  // TODO(1362969): support PA_ENABLE_SHADOW_METADATA
-}
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-void PartitionAddressSpace::UninitForTesting() {
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  UninitThreadIsolatedPoolForTesting();  // IN-TEST
-#endif
-#if BUILDFLAG(GLUE_CORE_POOLS)
-  // The core pools (regular & BRP) were allocated using a single allocation of
-  // double size.
-  FreePages(setup_.regular_pool_base_address_, 2 * RegularPoolSize());
-#else   // BUILDFLAG(GLUE_CORE_POOLS)
-  FreePages(setup_.regular_pool_base_address_, RegularPoolSize());
-  // For BRP pool, the allocation region includes a "forbidden zone" before the
-  // pool.
-  const size_t kForbiddenZoneSize = PageAllocationGranularity();
-  FreePages(setup_.brp_pool_base_address_ - kForbiddenZoneSize,
-            BRPPoolSize() + kForbiddenZoneSize);
-#endif  // BUILDFLAG(GLUE_CORE_POOLS)
-  // Do not free pages for the configurable pool, because its memory is owned
-  // by someone else, but deinitialize it nonetheless.
-  setup_.regular_pool_base_address_ = kUninitializedPoolBaseAddress;
-  setup_.brp_pool_base_address_ = kUninitializedPoolBaseAddress;
-  setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
-  setup_.configurable_pool_base_mask_ = 0;
-  AddressPoolManager::GetInstance().ResetForTesting();
-#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-  CompressedPointerBaseGlobal::ResetBaseForTesting();
-#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-}
-
-void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  // It's possible that the thread isolated pool has been initialized first, in
-  // which case the setup_ memory has been made read-only. Remove the protection
-  // temporarily.
-  if (IsThreadIsolatedPoolInitialized()) {
-    UnprotectThreadIsolatedGlobals();
-  }
-#endif
-  AddressPoolManager::GetInstance().Remove(kConfigurablePoolHandle);
-  setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
-  setup_.configurable_pool_base_mask_ = 0;
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  // Put the metadata protection back in place.
-  if (IsThreadIsolatedPoolInitialized()) {
-    WriteProtectThreadIsolatedGlobals(setup_.thread_isolation_);
-  }
-#endif
-}
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-void PartitionAddressSpace::UninitThreadIsolatedPoolForTesting() {
-  if (IsThreadIsolatedPoolInitialized()) {
-    UnprotectThreadIsolatedGlobals();
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    ThreadIsolationSettings::settings.enabled = false;
-#endif
-
-    FreePages(setup_.thread_isolated_pool_base_address_,
-              ThreadIsolatedPoolSize());
-    AddressPoolManager::GetInstance().Remove(kThreadIsolatedPoolHandle);
-    setup_.thread_isolated_pool_base_address_ = kUninitializedPoolBaseAddress;
-    setup_.thread_isolation_.enabled = false;
-  }
-}
-#endif
-
-#if BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
-
-PageCharacteristics page_characteristics;
-
-#endif  // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
-
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/partition_address_space.h b/base/allocator/partition_allocator/partition_address_space.h
deleted file mode 100644
index 1cad98f..0000000
--- a/base/allocator/partition_allocator/partition_address_space.h
+++ /dev/null
@@ -1,453 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
-
-#include <cstddef>
-#include <utility>
-
-#include "base/allocator/partition_allocator/address_pool_manager_types.h"
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/notreached.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "base/allocator/partition_allocator/thread_isolation/alignment.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-#endif
-
-// The feature is not applicable to 32-bit address space.
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-
-namespace partition_alloc {
-
-namespace internal {
-
-// Manages PartitionAlloc address space, which is split into pools.
-// See `glossary.md`.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
- public:
-  // Represents pool-specific information about a given address.
-  struct PoolInfo {
-    pool_handle handle;
-    uintptr_t offset;
-  };
-
-#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-  PA_ALWAYS_INLINE static uintptr_t RegularPoolBaseMask() {
-    return setup_.regular_pool_base_mask_;
-  }
-#else
-  PA_ALWAYS_INLINE static constexpr uintptr_t RegularPoolBaseMask() {
-    return kRegularPoolBaseMask;
-  }
-#endif
-
-  PA_ALWAYS_INLINE static PoolInfo GetPoolAndOffset(uintptr_t address) {
-    // When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
-#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    PA_DCHECK(!IsInBRPPool(address));
-#endif
-    pool_handle pool = kNullPoolHandle;
-    uintptr_t base = 0;
-    if (IsInRegularPool(address)) {
-      pool = kRegularPoolHandle;
-      base = setup_.regular_pool_base_address_;
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    } else if (IsInBRPPool(address)) {
-      pool = kBRPPoolHandle;
-      base = setup_.brp_pool_base_address_;
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    } else if (IsInConfigurablePool(address)) {
-      PA_DCHECK(IsConfigurablePoolInitialized());
-      pool = kConfigurablePoolHandle;
-      base = setup_.configurable_pool_base_address_;
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-    } else if (IsInThreadIsolatedPool(address)) {
-      pool = kThreadIsolatedPoolHandle;
-      base = setup_.thread_isolated_pool_base_address_;
-#endif
-    } else {
-      PA_NOTREACHED();
-    }
-    return PoolInfo{.handle = pool, .offset = address - base};
-  }
-  PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMaxSize() {
-    return kConfigurablePoolMaxSize;
-  }
-  PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMinSize() {
-    return kConfigurablePoolMinSize;
-  }
-
-  // Initialize pools (except for the configurable one).
-  //
-  // This function must only be called from the main thread.
-  static void Init();
-  // Initialize the ConfigurablePool at the given address |pool_base|. It must
-  // be aligned to the size of the pool. The size must be a power of two and
-  // must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()].
-  //
-  // This function must only be called from the main thread.
-  static void InitConfigurablePool(uintptr_t pool_base, size_t size);
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  static void InitThreadIsolatedPool(ThreadIsolationOption thread_isolation);
-  static void UninitThreadIsolatedPoolForTesting();
-#endif
-  static void UninitForTesting();
-  static void UninitConfigurablePoolForTesting();
-
-  PA_ALWAYS_INLINE static bool IsInitialized() {
-    // Either neither or both regular and BRP pool are initialized. The
-    // configurable and thread isolated pool are initialized separately.
-    if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) {
-      PA_DCHECK(setup_.brp_pool_base_address_ != kUninitializedPoolBaseAddress);
-      return true;
-    }
-
-    PA_DCHECK(setup_.brp_pool_base_address_ == kUninitializedPoolBaseAddress);
-    return false;
-  }
-
-  PA_ALWAYS_INLINE static bool IsConfigurablePoolInitialized() {
-    return setup_.configurable_pool_base_address_ !=
-           kUninitializedPoolBaseAddress;
-  }
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  PA_ALWAYS_INLINE static bool IsThreadIsolatedPoolInitialized() {
-    return setup_.thread_isolated_pool_base_address_ !=
-           kUninitializedPoolBaseAddress;
-  }
-#endif
-
-  // Returns false for nullptr.
-  PA_ALWAYS_INLINE static bool IsInRegularPool(uintptr_t address) {
-#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-    const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
-#else
-    constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask;
-#endif
-    return (address & regular_pool_base_mask) ==
-           setup_.regular_pool_base_address_;
-  }
-
-  PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
-    return setup_.regular_pool_base_address_;
-  }
-
-  // Returns false for nullptr.
-  PA_ALWAYS_INLINE static bool IsInBRPPool(uintptr_t address) {
-#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-    const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
-#else
-    constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask;
-#endif
-    return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
-  }
-
-#if BUILDFLAG(GLUE_CORE_POOLS)
-  // Checks whether the address belongs to either regular or BRP pool.
-  // Returns false for nullptr.
-  PA_ALWAYS_INLINE static bool IsInCorePools(uintptr_t address) {
-#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-    const uintptr_t core_pools_base_mask = setup_.core_pools_base_mask_;
-#else
-    // When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
-    // regular pool, effectively forming one virtual pool of a twice bigger
-    // size. Adjust the mask appropriately.
-    constexpr uintptr_t core_pools_base_mask = kRegularPoolBaseMask << 1;
-#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-    bool ret =
-        (address & core_pools_base_mask) == setup_.regular_pool_base_address_;
-    PA_DCHECK(ret == (IsInRegularPool(address) || IsInBRPPool(address)));
-    return ret;
-  }
-#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-  PA_ALWAYS_INLINE static size_t CorePoolsSize() {
-    return RegularPoolSize() * 2;
-  }
-#else
-  PA_ALWAYS_INLINE static constexpr size_t CorePoolsSize() {
-    return RegularPoolSize() * 2;
-  }
-#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-#endif  // BUILDFLAG(GLUE_CORE_POOLS)
-
-  PA_ALWAYS_INLINE static uintptr_t OffsetInBRPPool(uintptr_t address) {
-    PA_DCHECK(IsInBRPPool(address));
-    return address - setup_.brp_pool_base_address_;
-  }
-
-  // Returns false for nullptr.
-  PA_ALWAYS_INLINE static bool IsInConfigurablePool(uintptr_t address) {
-    return (address & setup_.configurable_pool_base_mask_) ==
-           setup_.configurable_pool_base_address_;
-  }
-
-  PA_ALWAYS_INLINE static uintptr_t ConfigurablePoolBase() {
-    return setup_.configurable_pool_base_address_;
-  }
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  // Returns false for nullptr.
-  PA_ALWAYS_INLINE static bool IsInThreadIsolatedPool(uintptr_t address) {
-    return (address & kThreadIsolatedPoolBaseMask) ==
-           setup_.thread_isolated_pool_base_address_;
-  }
-#endif
-
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-  PA_ALWAYS_INLINE static std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
-    if (pool == kRegularPoolHandle) {
-      return regular_pool_shadow_offset_;
-    } else if (pool == kBRPPoolHandle) {
-      return brp_pool_shadow_offset_;
-    } else {
-      // TODO(crbug.com/1362969): Add shadow for configurable pool as well.
-      // Shadow is not created for ConfigurablePool for now, so this part should
-      // be unreachable.
-      PA_NOTREACHED();
-    }
-  }
-#endif
-
-  // PartitionAddressSpace is static_only class.
-  PartitionAddressSpace() = delete;
-  PartitionAddressSpace(const PartitionAddressSpace&) = delete;
-  void* operator new(size_t) = delete;
-  void* operator new(size_t, void*) = delete;
-
- private:
-#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-  PA_ALWAYS_INLINE static size_t RegularPoolSize();
-  PA_ALWAYS_INLINE static size_t BRPPoolSize();
-#else
-  // The pool sizes should be as large as maximum whenever possible.
-  PA_ALWAYS_INLINE static constexpr size_t RegularPoolSize() {
-    return kRegularPoolSize;
-  }
-  PA_ALWAYS_INLINE static constexpr size_t BRPPoolSize() {
-    return kBRPPoolSize;
-  }
-#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  PA_ALWAYS_INLINE static constexpr size_t ThreadIsolatedPoolSize() {
-    return kThreadIsolatedPoolSize;
-  }
-#endif
-
-  // On 64-bit systems, PA allocates from several contiguous, mutually disjoint
-  // pools. The BRP pool is where all allocations have a BRP ref-count, thus
-  // pointers pointing there can use a BRP protection against UaF. Allocations
-  // in the other pools don't have that.
-  //
-  // Pool sizes have to be the power of two. Each pool will be aligned at its
-  // own size boundary.
-  //
-  // NOTE! The BRP pool must be preceded by an inaccessible region. This is to
-  // prevent a pointer to the end of a non-BRP-pool allocation from falling into
-  // the BRP pool, thus triggering BRP mechanism and likely crashing. This
-  // "forbidden zone" can be as small as 1B, but it's simpler to just reserve an
-  // allocation granularity unit.
-  //
-  // The ConfigurablePool is an optional Pool that can be created inside an
-  // existing mapping provided by the embedder. This Pool can be used when
-  // certain PA allocations must be located inside a given virtual address
-  // region. One use case for this Pool is V8 Sandbox, which requires that
-  // ArrayBuffers be located inside of it.
-  static constexpr size_t kRegularPoolSize = kPoolMaxSize;
-  static constexpr size_t kBRPPoolSize = kPoolMaxSize;
-  static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize));
-  static_assert(base::bits::IsPowerOfTwo(kBRPPoolSize));
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  static constexpr size_t kThreadIsolatedPoolSize = kGiB / 4;
-  static_assert(base::bits::IsPowerOfTwo(kThreadIsolatedPoolSize));
-#endif
-  static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
-  static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
-  static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
-  static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMaxSize));
-  static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMinSize));
-
-#if BUILDFLAG(IS_IOS)
-
-#if !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-#error iOS is only supported with a dynamically sized GigaCase.
-#endif
-
-  // We can't afford pool sizes as large as kPoolMaxSize in iOS EarlGrey tests,
-  // since the test process cannot use an extended virtual address space (see
-  // crbug.com/1250788).
-  static constexpr size_t kRegularPoolSizeForIOSTestProcess = kGiB / 4;
-  static constexpr size_t kBRPPoolSizeForIOSTestProcess = kGiB / 4;
-  static_assert(kRegularPoolSizeForIOSTestProcess < kRegularPoolSize);
-  static_assert(kBRPPoolSizeForIOSTestProcess < kBRPPoolSize);
-  static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForIOSTestProcess));
-  static_assert(base::bits::IsPowerOfTwo(kBRPPoolSizeForIOSTestProcess));
-#endif  // BUILDFLAG(IOS_IOS)
-
-#if !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-  // Masks used to easy determine belonging to a pool.
-  static constexpr uintptr_t kRegularPoolOffsetMask =
-      static_cast<uintptr_t>(kRegularPoolSize) - 1;
-  static constexpr uintptr_t kRegularPoolBaseMask = ~kRegularPoolOffsetMask;
-  static constexpr uintptr_t kBRPPoolOffsetMask =
-      static_cast<uintptr_t>(kBRPPoolSize) - 1;
-  static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
-#endif  // !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  static constexpr uintptr_t kThreadIsolatedPoolOffsetMask =
-      static_cast<uintptr_t>(kThreadIsolatedPoolSize) - 1;
-  static constexpr uintptr_t kThreadIsolatedPoolBaseMask =
-      ~kThreadIsolatedPoolOffsetMask;
-#endif
-
-  // This must be set to such a value that IsIn*Pool() always returns false when
-  // the pool isn't initialized.
-  static constexpr uintptr_t kUninitializedPoolBaseAddress =
-      static_cast<uintptr_t>(-1);
-
-  struct alignas(kPartitionCachelineSize) PA_THREAD_ISOLATED_ALIGN PoolSetup {
-    // Before PartitionAddressSpace::Init(), no allocation are allocated from a
-    // reserved address space. Therefore, set *_pool_base_address_ initially to
-    // -1, so that PartitionAddressSpace::IsIn*Pool() always returns false.
-    constexpr PoolSetup() = default;
-
-    // Using a struct to enforce alignment and padding
-    uintptr_t regular_pool_base_address_ = kUninitializedPoolBaseAddress;
-    uintptr_t brp_pool_base_address_ = kUninitializedPoolBaseAddress;
-    uintptr_t configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-      uintptr_t thread_isolated_pool_base_address_ =
-          kUninitializedPoolBaseAddress;
-#endif
-#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-      uintptr_t regular_pool_base_mask_ = 0;
-      uintptr_t brp_pool_base_mask_ = 0;
-#if BUILDFLAG(GLUE_CORE_POOLS)
-      uintptr_t core_pools_base_mask_ = 0;
-#endif
-#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-      uintptr_t configurable_pool_base_mask_ = 0;
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-      ThreadIsolationOption thread_isolation_;
-#endif
-  };
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  static_assert(sizeof(PoolSetup) % SystemPageSize() == 0,
-                "PoolSetup has to fill a page(s)");
-#else
-  static_assert(sizeof(PoolSetup) % kPartitionCachelineSize == 0,
-                "PoolSetup has to fill a cacheline(s)");
-#endif
-
-  // See the comment describing the address layout above.
-  //
-  // These are write-once fields, frequently accessed thereafter. Make sure they
-  // don't share a cacheline with other, potentially writeable data, through
-  // alignment and padding.
-  static PoolSetup setup_ PA_CONSTINIT;
-
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-  static std::ptrdiff_t regular_pool_shadow_offset_;
-  static std::ptrdiff_t brp_pool_shadow_offset_;
-#endif
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  // If we use thread isolation, we need to write-protect its metadata.
-  // Allow the function to get access to the PoolSetup.
-  friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption);
-#endif
-};
-
-PA_ALWAYS_INLINE PartitionAddressSpace::PoolInfo GetPoolAndOffset(
-    uintptr_t address) {
-  return PartitionAddressSpace::GetPoolAndOffset(address);
-}
-
-PA_ALWAYS_INLINE pool_handle GetPool(uintptr_t address) {
-  return GetPoolAndOffset(address).handle;
-}
-
-PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
-  return PartitionAddressSpace::OffsetInBRPPool(address);
-}
-
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
-  return PartitionAddressSpace::ShadowPoolOffset(pool);
-}
-#endif
-
-}  // namespace internal
-
-// Returns false for nullptr.
-PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
-  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
-#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  PA_DCHECK(!internal::PartitionAddressSpace::IsInBRPPool(address));
-#endif
-  return internal::PartitionAddressSpace::IsInRegularPool(address)
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-         || internal::PartitionAddressSpace::IsInBRPPool(address)
-#endif
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-         || internal::PartitionAddressSpace::IsInThreadIsolatedPool(address)
-#endif
-         || internal::PartitionAddressSpace::IsInConfigurablePool(address);
-}
-
-// Returns false for nullptr.
-PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
-  return internal::PartitionAddressSpace::IsInRegularPool(address);
-}
-
-// Returns false for nullptr.
-PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
-  return internal::PartitionAddressSpace::IsInBRPPool(address);
-}
-
-#if BUILDFLAG(GLUE_CORE_POOLS)
-// Checks whether the address belongs to either regular or BRP pool.
-// Returns false for nullptr.
-PA_ALWAYS_INLINE bool IsManagedByPartitionAllocCorePools(uintptr_t address) {
-  return internal::PartitionAddressSpace::IsInCorePools(address);
-}
-#endif  // BUILDFLAG(GLUE_CORE_POOLS)
-
-// Returns false for nullptr.
-PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
-    uintptr_t address) {
-  return internal::PartitionAddressSpace::IsInConfigurablePool(address);
-}
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-// Returns false for nullptr.
-PA_ALWAYS_INLINE bool IsManagedByPartitionAllocThreadIsolatedPool(
-    uintptr_t address) {
-  return internal::PartitionAddressSpace::IsInThreadIsolatedPool(address);
-}
-#endif
-
-PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
-  return internal::PartitionAddressSpace::IsConfigurablePoolInitialized();
-}
-
-}  // namespace partition_alloc
-
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
diff --git a/base/allocator/partition_allocator/partition_alloc-inl.h b/base/allocator/partition_allocator/partition_alloc-inl.h
deleted file mode 100644
index 4dfa4d2..0000000
--- a/base/allocator/partition_allocator/partition_alloc-inl.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
-
-#include <algorithm>
-#include <cstring>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_ref_count.h"
-#include "base/allocator/partition_allocator/random.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-#include "build/build_config.h"
-
-// Prefetch *x into memory.
-#if defined(__clang__) || defined(COMPILER_GCC)
-#define PA_PREFETCH(x) __builtin_prefetch(x)
-#else
-#define PA_PREFETCH(x)
-#endif
-
-namespace partition_alloc::internal {
-
-// This is a `memset` that resists being optimized away. Adapted from
-// boringssl/src/crypto/mem.c. (Copying and pasting is bad, but //base can't
-// depend on //third_party, and this is small enough.)
-#if PA_CONFIG(IS_NONCLANG_MSVC)
-// MSVC only supports inline assembly on x86. This preprocessor directive
-// is intended to be a replacement for the same.
-//
-// TODO(crbug.com/1351310): Make sure inlining doesn't degrade this into
-// a no-op or similar. The documentation doesn't say.
-#pragma optimize("", off)
-#endif
-PA_ALWAYS_INLINE void SecureMemset(void* ptr, uint8_t value, size_t size) {
-  memset(ptr, value, size);
-
-#if !PA_CONFIG(IS_NONCLANG_MSVC)
-  // As best as we can tell, this is sufficient to break any optimisations that
-  // might try to eliminate "superfluous" memsets. If there's an easy way to
-  // detect memset_s, it would be better to use that.
-  __asm__ __volatile__("" : : "r"(ptr) : "memory");
-#endif  // !PA_CONFIG(IS_NONCLANG_MSVC)
-}
-#if PA_CONFIG(IS_NONCLANG_MSVC)
-#pragma optimize("", on)
-#endif
-
-#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-// Used to memset() memory for debugging purposes only.
-PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
-  // Only set the first 512kiB of the allocation. This is enough to detect uses
-  // of uininitialized / freed memory, and makes tests run significantly
-  // faster. Note that for direct-mapped allocations, memory is decomitted at
-  // free() time, so freed memory usage cannot happen.
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  LiftThreadIsolationScope lift_thread_isolation_restrictions;
-#endif
-  size_t size_to_memset = std::min(size, size_t{1} << 19);
-  memset(ptr, value, size_to_memset);
-}
-#endif  // BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-
-// Returns true if we've hit the end of a random-length period. We don't want to
-// invoke `RandomValue` too often, because we call this function in a hot spot
-// (`Free`), and `RandomValue` incurs the cost of atomics.
-#if !BUILDFLAG(PA_DCHECK_IS_ON)
-PA_ALWAYS_INLINE bool RandomPeriod() {
-  static thread_local uint8_t counter = 0;
-  if (PA_UNLIKELY(counter == 0)) {
-    // It's OK to truncate this value.
-    counter = static_cast<uint8_t>(RandomValue());
-  }
-  // If `counter` is 0, this will wrap. That is intentional and OK.
-  counter--;
-  return counter == 0;
-}
-#endif  // !BUILDFLAG(PA_DCHECK_IS_ON)
-
-PA_ALWAYS_INLINE uintptr_t ObjectInnerPtr2Addr(const void* ptr) {
-  return UntagPtr(ptr);
-}
-PA_ALWAYS_INLINE uintptr_t ObjectPtr2Addr(const void* object) {
-  // TODO(bartekn): Check that |object| is indeed an object start.
-  return ObjectInnerPtr2Addr(object);
-}
-PA_ALWAYS_INLINE void* SlotStartAddr2Ptr(uintptr_t slot_start) {
-  // TODO(bartekn): Check that |slot_start| is indeed a slot start.
-  return TagAddr(slot_start);
-}
-PA_ALWAYS_INLINE uintptr_t SlotStartPtr2Addr(const void* slot_start) {
-  // TODO(bartekn): Check that |slot_start| is indeed a slot start.
-  return UntagPtr(slot_start);
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc.cc b/base/allocator/partition_allocator/partition_alloc.cc
deleted file mode 100644
index 5c44e10..0000000
--- a/base/allocator/partition_allocator/partition_alloc.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc.h"
-
-#include <string.h>
-
-#include <cstdint>
-#include <memory>
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/memory_reclaimer.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
-#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
-#include "base/allocator/partition_allocator/partition_oom.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "base/allocator/partition_allocator/partition_stats.h"
-
-#if BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#endif
-
-namespace partition_alloc {
-
-void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
-  // This is from page_allocator_constants.h and doesn't really fit here, but
-  // there isn't a centralized initialization function in page_allocator.cc, so
-  // there's no good place in that file to do a STATIC_ASSERT_OR_PA_CHECK.
-  STATIC_ASSERT_OR_PA_CHECK(
-      (internal::SystemPageSize() & internal::SystemPageOffsetMask()) == 0,
-      "SystemPageSize() must be power of 2");
-
-  // Two partition pages are used as guard / metadata page so make sure the
-  // super page size is bigger.
-  STATIC_ASSERT_OR_PA_CHECK(
-      internal::PartitionPageSize() * 4 <= internal::kSuperPageSize,
-      "ok super page size");
-  STATIC_ASSERT_OR_PA_CHECK(
-      (internal::kSuperPageSize & internal::SystemPageOffsetMask()) == 0,
-      "ok super page multiple");
-  // Four system pages gives us room to hack out a still-guard-paged piece
-  // of metadata in the middle of a guard partition page.
-  STATIC_ASSERT_OR_PA_CHECK(
-      internal::SystemPageSize() * 4 <= internal::PartitionPageSize(),
-      "ok partition page size");
-  STATIC_ASSERT_OR_PA_CHECK(
-      (internal::PartitionPageSize() & internal::SystemPageOffsetMask()) == 0,
-      "ok partition page multiple");
-  static_assert(sizeof(internal::PartitionPage) <= internal::kPageMetadataSize,
-                "PartitionPage should not be too big");
-  STATIC_ASSERT_OR_PA_CHECK(
-      internal::kPageMetadataSize * internal::NumPartitionPagesPerSuperPage() <=
-          internal::SystemPageSize(),
-      "page metadata fits in hole");
-
-  // Limit to prevent callers accidentally overflowing an int size.
-  STATIC_ASSERT_OR_PA_CHECK(
-      internal::MaxDirectMapped() <=
-          (1UL << 31) + internal::DirectMapAllocationGranularity(),
-      "maximum direct mapped allocation");
-
-  // Check that some of our zanier calculations worked out as expected.
-  static_assert(internal::kSmallestBucket == internal::kAlignment,
-                "generic smallest bucket");
-  static_assert(internal::kMaxBucketed == 983040, "generic max bucketed");
-  STATIC_ASSERT_OR_PA_CHECK(
-      internal::MaxSystemPagesPerRegularSlotSpan() <= 16,
-      "System pages per slot span must be no greater than 16.");
-
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-  STATIC_ASSERT_OR_PA_CHECK(
-      internal::GetPartitionRefCountIndexMultiplierShift() <
-          std::numeric_limits<size_t>::max() / 2,
-      "Calculation in GetPartitionRefCountIndexMultiplierShift() must not "
-      "underflow.");
-  // Check that the GetPartitionRefCountIndexMultiplierShift() calculation is
-  // correct.
-  STATIC_ASSERT_OR_PA_CHECK(
-      (1 << internal::GetPartitionRefCountIndexMultiplierShift()) ==
-          (internal::SystemPageSize() /
-           (sizeof(internal::PartitionRefCount) *
-            (internal::kSuperPageSize / internal::SystemPageSize()))),
-      "Bitshift must match the intended multiplication.");
-  STATIC_ASSERT_OR_PA_CHECK(
-      ((sizeof(internal::PartitionRefCount) *
-        (internal::kSuperPageSize / internal::SystemPageSize()))
-       << internal::GetPartitionRefCountIndexMultiplierShift()) <=
-          internal::SystemPageSize(),
-      "PartitionRefCount Bitmap size must be smaller than or equal to "
-      "<= SystemPageSize().");
-#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-
-  PA_DCHECK(on_out_of_memory);
-  internal::g_oom_handling_function = on_out_of_memory;
-}
-
-void PartitionAllocGlobalUninitForTesting() {
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  internal::PartitionAddressSpace::UninitThreadIsolatedPoolForTesting();
-#endif
-  internal::g_oom_handling_function = nullptr;
-}
-
-PartitionAllocator::PartitionAllocator() = default;
-
-PartitionAllocator::~PartitionAllocator() {
-  MemoryReclaimer::Instance()->UnregisterPartition(&partition_root_);
-}
-
-void PartitionAllocator::init(PartitionOptions opts) {
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  PA_CHECK(opts.thread_cache == PartitionOptions::ThreadCache::kDisabled)
-      << "Cannot use a thread cache when PartitionAlloc is malloc().";
-#endif
-  partition_root_.Init(opts);
-  MemoryReclaimer::Instance()->RegisterPartition(&partition_root_);
-}
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/partition_alloc.gni b/base/allocator/partition_allocator/partition_alloc.gni
index dbbde01..1471292 100644
--- a/base/allocator/partition_allocator/partition_alloc.gni
+++ b/base/allocator/partition_allocator/partition_alloc.gni
@@ -24,10 +24,11 @@
   assert(false, "Unknown CPU: $current_cpu")
 }
 
-if (use_partition_alloc_as_malloc_default) {
-  _default_allocator = "partition"
-} else {
-  _default_allocator = "none"
+declare_args() {
+  # Causes all the allocations to be routed via allocator_shim.cc. Usually,
+  # the allocator shim will, in turn, route them to Partition Alloc, but
+  # other allocators are also supported by the allocator shim.
+  use_allocator_shim = use_allocator_shim_default
 }
 
 declare_args() {
@@ -64,13 +65,8 @@
 
   # PartitionAlloc-Everywhere (PA-E). Causes allocator_shim.cc to route
   # calls to PartitionAlloc, rather than some other platform allocator.
-  use_partition_alloc_as_malloc =
-      use_partition_alloc && use_partition_alloc_as_malloc_default
-}
-
-declare_args() {
-  # Causes all the allocations to be routed via allocator_shim.cc.
-  use_allocator_shim = use_allocator_shim_default
+  use_partition_alloc_as_malloc = use_partition_alloc && use_allocator_shim &&
+                                  use_partition_alloc_as_malloc_default
 }
 
 assert(!use_allocator_shim || (is_android || is_apple || is_chromeos ||
@@ -104,6 +100,11 @@
   # Enables a bounds check when two pointers (at least one being raw_ptr) are
   # subtracted (if supported by the underlying implementation).
   enable_pointer_subtraction_check = false
+
+  # Enables a compile-time check that all raw_ptrs to which arithmetic
+  # operations are to be applied are annotated with the AllowPtrArithmetic
+  # trait,
+  enable_pointer_arithmetic_trait_check = true
 }
 
 declare_args() {
@@ -116,7 +117,7 @@
   #
   # Note that |enable_backup_ref_ptr_support = true| doesn't necessarily enable
   # BRP protection. It'll be enabled only for partition created with
-  # partition_alloc::PartitionOptions::BackupRefPtr::kEnabled.
+  # partition_alloc::PartitionOptions::kEnabled.
   enable_backup_ref_ptr_support =
       use_partition_alloc && enable_backup_ref_ptr_support_default
 
@@ -203,12 +204,6 @@
   # Shadow metadata is still under development and only supports Linux
   # for now.
   enable_shadow_metadata = false
-
-  if (is_apple) {
-    # use_blink currently assumes mach absolute ticks (eg, to ensure trace
-    # events cohere).
-    partition_alloc_enable_mach_absolute_time_ticks = is_mac || use_blink
-  }
 }
 
 # *Scan is currently only used by Chromium, and supports only 64-bit.
@@ -311,14 +306,6 @@
 assert(!use_asan_unowned_ptr || is_asan,
        "AsanUnownedPtr requires AddressSanitizer")
 
-if (is_apple) {
-  assert(!use_blink || partition_alloc_enable_mach_absolute_time_ticks,
-         "use_blink requires partition_alloc_enable_mach_absolute_time_ticks")
-
-  assert(!is_mac || partition_alloc_enable_mach_absolute_time_ticks,
-         "mac requires partition_alloc_enable_mach_absolute_time_ticks")
-}
-
 # AsanBackupRefPtr is not supported outside Chromium. The implementation is
 # entangled with `//base`. The code is only physically located with the rest of
 # `raw_ptr` to keep it together.
@@ -336,3 +323,34 @@
 }
 assert(!enable_pkeys || (is_linux && target_cpu == "x64"),
        "Pkeys are only supported on x64 linux")
+
+# Some implementations of raw_ptr<>, like BackupRefPtr, require zeroing when
+# constructing, destructing or moving out of a pointer. When using these
+# implementations, raw_ptrs<> will be always be zeroed, no matter what
+# GN args or flags are present.
+#
+# Other implementations of raw_ptr<>, like NoOpImpl, don't require zeroing
+# and do not do so by default. This can lead to subtle bugs when testing
+# against one of the zeroing impls and then deploying on a platform that is
+# using a non-zeroing implementation. Setting the following GN args to
+# true triggers zeroing even for implementations that don't require it.
+# This provides consistency with the other impls. This is the recommended
+# setting.
+#
+# Setting these to false will make raw_ptr<> behave more like raw C++ pointer
+# `T*`, making NoOpImpl act like an actual no-op, so use it if you're worried
+# about performance of your project. Use at your own risk, as it's unsupported
+# and untested within Chromium.
+#
+# Even when these are set to true, the raw_ptr trait AllowUninitialized
+# provides a finer-grained mechanism for opting out of initialization on a
+# pointer by pointer basis when using a non-zeroing implementation.
+#
+# Caveat: _zero_on_move and _on_destruct will prevent the type from being
+# trivially copyable, _zero_on_construct and _on_destruct will prevent the
+# type from being trivially default constructible.
+declare_args() {
+  raw_ptr_zero_on_construct = raw_ptr_zero_on_construct_default
+  raw_ptr_zero_on_move = raw_ptr_zero_on_move_default
+  raw_ptr_zero_on_destruct = raw_ptr_zero_on_destruct_default
+}
diff --git a/base/allocator/partition_allocator/partition_alloc.h b/base/allocator/partition_allocator/partition_alloc.h
deleted file mode 100644
index b9287d2..0000000
--- a/base/allocator/partition_allocator/partition_alloc.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_oom.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-
-// *** HOUSEKEEPING RULES ***
-//
-// Throughout PartitionAlloc code, we avoid using generic variable names like
-// |ptr| or |address|, and prefer names like |object|, |slot_start|, instead.
-// This helps emphasize that terms like "object" and "slot" represent two
-// different worlds. "Slot" is an indivisible allocation unit, internal to
-// PartitionAlloc. It is generally represented as an address (uintptr_t), since
-// arithmetic operations on it aren't uncommon, and for that reason it isn't
-// MTE-tagged either. "Object" is the allocated memory that the app is given via
-// interfaces like Alloc(), Free(), etc. An object is fully contained within a
-// slot, and may be surrounded by internal PartitionAlloc structures or empty
-// space. Is is generally represented as a pointer to its beginning (most
-// commonly void*), and is MTE-tagged so it's safe to access.
-//
-// The best way to transition between these to worlds is via
-// PartitionRoot::ObjectToSlotStart() and ::SlotStartToObject(). These take care
-// of shifting between slot/object start, MTE-tagging/untagging and the cast for
-// you. There are cases where these functions are insufficient. Internal
-// PartitionAlloc structures, like free-list pointers, BRP ref-count, cookie,
-// etc. are located in-slot thus accessing them requires an MTE tag.
-// SlotStartPtr2Addr() and SlotStartAddr2Ptr() take care of this.
-// There are cases where we have to do pointer arithmetic on an object pointer
-// (like check belonging to a pool, etc.), in which case we want to strip MTE
-// tag. ObjectInnerPtr2Addr() and ObjectPtr2Addr() take care of that.
-//
-// Avoid using UntagPtr/Addr() and TagPtr/Addr() directly, if possible. And
-// definitely avoid using reinterpret_cast between uintptr_t and pointer worlds.
-// When you do, add a comment explaining why it's safe from the point of MTE
-// tagging.
-
-namespace partition_alloc {
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void PartitionAllocGlobalUninitForTesting();
-
-struct PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocator {
-  PartitionAllocator();
-  explicit PartitionAllocator(PartitionOptions opts) { init(opts); }
-  ~PartitionAllocator();
-
-  void init(PartitionOptions);
-
-  PA_ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
-  PA_ALWAYS_INLINE const PartitionRoot* root() const {
-    return &partition_root_;
-  }
-
- private:
-  PartitionRoot partition_root_;
-};
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_allocation_data.h b/base/allocator/partition_allocator/partition_alloc_allocation_data.h
deleted file mode 100644
index 3d23b41..0000000
--- a/base/allocator/partition_allocator/partition_alloc_allocation_data.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_ALLOCATION_DATA_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_ALLOCATION_DATA_H_
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/tagging.h"
-
-namespace partition_alloc {
-
-// Definitions of various parameters of override and observer hooks. Allocation
-// and free path differ from each other in that the allocation override provides
-// data to the caller (we have an out parameter there), whereas the free
-// override just consumes the data.
-
-// AllocationNotificationData is the in-parameter of an allocation observer
-// hook.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AllocationNotificationData {
- public:
-  AllocationNotificationData(void* address, size_t size, const char* type_name)
-      : address_(address), size_(size), type_name_(type_name) {}
-
-  void* address() const { return address_; }
-  size_t size() const { return size_; }
-  const char* type_name() const { return type_name_; }
-
-  // In the allocation observer path, it's interesting which reporting mode is
-  // enabled.
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  AllocationNotificationData& SetMteReportingMode(
-      TagViolationReportingMode mode) {
-    mte_reporting_mode_ = mode;
-    return *this;
-  }
-
-  TagViolationReportingMode mte_reporting_mode() const {
-    return mte_reporting_mode_;
-  }
-#else
-  constexpr TagViolationReportingMode mte_reporting_mode() const {
-    return TagViolationReportingMode::kUndefined;
-  }
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
- private:
-  void* address_ = nullptr;
-  size_t size_ = 0;
-  const char* type_name_ = nullptr;
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  TagViolationReportingMode mte_reporting_mode_ =
-      TagViolationReportingMode::kUndefined;
-#endif
-};
-
-// FreeNotificationData is the in-parameter of a free observer hook.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) FreeNotificationData {
- public:
-  constexpr explicit FreeNotificationData(void* address) : address_(address) {}
-
-  void* address() const { return address_; }
-
-  // In the free observer path, it's interesting which reporting mode is
-  // enabled.
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  FreeNotificationData& SetMteReportingMode(TagViolationReportingMode mode) {
-    mte_reporting_mode_ = mode;
-    return *this;
-  }
-
-  TagViolationReportingMode mte_reporting_mode() const {
-    return mte_reporting_mode_;
-  }
-#else
-  constexpr TagViolationReportingMode mte_reporting_mode() const {
-    return TagViolationReportingMode::kUndefined;
-  }
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
- private:
-  void* address_ = nullptr;
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  TagViolationReportingMode mte_reporting_mode_ =
-      TagViolationReportingMode::kUndefined;
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-};
-
-}  // namespace partition_alloc
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_ALLOCATION_DATA_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/atomic_ref_count.h b/base/allocator/partition_allocator/partition_alloc_base/atomic_ref_count.h
deleted file mode 100644
index 9743a64..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/atomic_ref_count.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a low level implementation of atomic semantics for reference
-// counting.  Please use base/memory/ref_counted.h directly instead.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_
-
-#include <atomic>
-
-namespace partition_alloc::internal::base {
-
-class AtomicRefCount {
- public:
-  constexpr AtomicRefCount() : ref_count_(0) {}
-  explicit constexpr AtomicRefCount(int initial_value)
-      : ref_count_(initial_value) {}
-
-  // Increment a reference count.
-  // Returns the previous value of the count.
-  int Increment() { return Increment(1); }
-
-  // Increment a reference count by "increment", which must exceed 0.
-  // Returns the previous value of the count.
-  int Increment(int increment) {
-    return ref_count_.fetch_add(increment, std::memory_order_relaxed);
-  }
-
-  // Decrement a reference count, and return whether the result is non-zero.
-  // Insert barriers to ensure that state written before the reference count
-  // became zero will be visible to a thread that has just made the count zero.
-  bool Decrement() {
-    // TODO(jbroman): Technically this doesn't need to be an acquire operation
-    // unless the result is 1 (i.e., the ref count did indeed reach zero).
-    // However, there are toolchain issues that make that not work as well at
-    // present (notably TSAN doesn't like it).
-    return ref_count_.fetch_sub(1, std::memory_order_acq_rel) != 1;
-  }
-
-  // Return whether the reference count is one.  If the reference count is used
-  // in the conventional way, a reference count of 1 implies that the current
-  // thread owns the reference and no other thread shares it.  This call
-  // performs the test for a reference count of one, and performs the memory
-  // barrier needed for the owning thread to act on the object, knowing that it
-  // has exclusive access to the object.
-  bool IsOne() const { return ref_count_.load(std::memory_order_acquire) == 1; }
-
-  // Return whether the reference count is zero.  With conventional object
-  // referencing counting, the object will be destroyed, so the reference count
-  // should never be zero.  Hence this is generally used for a debug check.
-  bool IsZero() const {
-    return ref_count_.load(std::memory_order_acquire) == 0;
-  }
-
-  // Returns the current reference count (with no barriers). This is subtle, and
-  // should be used only for debugging.
-  int SubtleRefCountForDebug() const {
-    return ref_count_.load(std::memory_order_relaxed);
-  }
-
- private:
-  std::atomic_int ref_count_;
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/augmentations/compiler_specific.h b/base/allocator/partition_allocator/partition_alloc_base/augmentations/compiler_specific.h
deleted file mode 100644
index 8ebc003..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/augmentations/compiler_specific.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_AUGMENTATIONS_COMPILER_SPECIFIC_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_AUGMENTATIONS_COMPILER_SPECIFIC_H_
-
-// Extensions for PA's copy of `//base/compiler_specific.h`.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-
-// PA_ATTRIBUTE_RETURNS_NONNULL
-//
-// Tells the compiler that a function never returns a null pointer.
-// Sourced from Abseil's `attributes.h`.
-#if PA_HAS_ATTRIBUTE(returns_nonnull)
-#define PA_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull))
-#else
-#define PA_ATTRIBUTE_RETURNS_NONNULL
-#endif
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_AUGMENTATIONS_COMPILER_SPECIFIC_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/bit_cast.h b/base/allocator/partition_allocator/partition_alloc_base/bit_cast.h
deleted file mode 100644
index a6f5cd7..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/bit_cast.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BIT_CAST_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BIT_CAST_H_
-
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-
-#if !PA_HAS_BUILTIN(__builtin_bit_cast)
-#include <string.h>  // memcpy
-#endif
-
-namespace partition_alloc::internal::base {
-
-// This is C++20's std::bit_cast<>().
-// It morally does what `*reinterpret_cast<Dest*>(&source)` does, but the
-// cast/deref pair is undefined behavior, while bit_cast<>() isn't.
-template <class Dest, class Source>
-#if PA_HAS_BUILTIN(__builtin_bit_cast)
-constexpr
-#else
-inline
-#endif
-    Dest
-    bit_cast(const Source& source) {
-#if PA_HAS_BUILTIN(__builtin_bit_cast)
-  // TODO(thakis): Keep only this codepath once nacl is gone or updated.
-  return __builtin_bit_cast(Dest, source);
-#else
-  static_assert(sizeof(Dest) == sizeof(Source),
-                "bit_cast requires source and destination to be the same size");
-  static_assert(std::is_trivially_copyable_v<Dest>,
-                "bit_cast requires the destination type to be copyable");
-  static_assert(std::is_trivially_copyable_v<Source>,
-                "bit_cast requires the source type to be copyable");
-
-  Dest dest;
-  memcpy(&dest, &source, sizeof(dest));
-  return dest;
-#endif
-}
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BIT_CAST_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/bits.h b/base/allocator/partition_allocator/partition_alloc_base/bits.h
deleted file mode 100644
index 58e48f8..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/bits.h
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file defines some bit utilities.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BITS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BITS_H_
-
-#include <cstddef>
-#include <cstdint>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base::bits {
-
-// Returns true iff |value| is a power of 2.
-template <typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
-constexpr bool IsPowerOfTwo(T value) {
-  // From "Hacker's Delight": Section 2.1 Manipulating Rightmost Bits.
-  //
-  // Only positive integers with a single bit set are powers of two. If only one
-  // bit is set in x (e.g. 0b00000100000000) then |x-1| will have that bit set
-  // to zero and all bits to its right set to 1 (e.g. 0b00000011111111). Hence
-  // |x & (x-1)| is 0 iff x is a power of two.
-  return value > 0 && (value & (value - 1)) == 0;
-}
-
-// Round down |size| to a multiple of alignment, which must be a power of two.
-inline constexpr size_t AlignDown(size_t size, size_t alignment) {
-  PA_BASE_DCHECK(IsPowerOfTwo(alignment));
-  return size & ~(alignment - 1);
-}
-
-// Move |ptr| back to the previous multiple of alignment, which must be a power
-// of two. Defined for types where sizeof(T) is one byte.
-template <typename T, typename = typename std::enable_if<sizeof(T) == 1>::type>
-inline T* AlignDown(T* ptr, size_t alignment) {
-  return reinterpret_cast<T*>(
-      AlignDown(reinterpret_cast<size_t>(ptr), alignment));
-}
-
-// Round up |size| to a multiple of alignment, which must be a power of two.
-inline constexpr size_t AlignUp(size_t size, size_t alignment) {
-  PA_BASE_DCHECK(IsPowerOfTwo(alignment));
-  return (size + alignment - 1) & ~(alignment - 1);
-}
-
-// Advance |ptr| to the next multiple of alignment, which must be a power of
-// two. Defined for types where sizeof(T) is one byte.
-template <typename T, typename = typename std::enable_if<sizeof(T) == 1>::type>
-inline T* AlignUp(T* ptr, size_t alignment) {
-  return reinterpret_cast<T*>(
-      AlignUp(reinterpret_cast<size_t>(ptr), alignment));
-}
-
-// CountLeadingZeroBits(value) returns the number of zero bits following the
-// most significant 1 bit in |value| if |value| is non-zero, otherwise it
-// returns {sizeof(T) * 8}.
-// Example: 00100010 -> 2
-//
-// CountTrailingZeroBits(value) returns the number of zero bits preceding the
-// least significant 1 bit in |value| if |value| is non-zero, otherwise it
-// returns {sizeof(T) * 8}.
-// Example: 00100010 -> 1
-//
-// C does not have an operator to do this, but fortunately the various
-// compilers have built-ins that map to fast underlying processor instructions.
-// __builtin_clz has undefined behaviour for an input of 0, even though there's
-// clearly a return value that makes sense, and even though some processor clz
-// instructions have defined behaviour for 0. We could drop to raw __asm__ to
-// do better, but we'll avoid doing that unless we see proof that we need to.
-template <typename T, int bits = sizeof(T) * 8>
-PA_ALWAYS_INLINE constexpr
-    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
-                            int>::type
-    CountLeadingZeroBits(T value) {
-  static_assert(bits > 0, "invalid instantiation");
-#if defined(COMPILER_MSVC) && !defined(__clang__)
-  // We would prefer to use the _BitScanReverse(64) intrinsics, but they
-  // aren't constexpr and thus unusable here.
-  if (PA_LIKELY(value)) {
-    int leading_zeros = 0;
-    constexpr T kMostSignificantBitMask = 1ull << (bits - 1);
-    for (; !(value & kMostSignificantBitMask); value <<= 1, ++leading_zeros) {
-    }
-    return leading_zeros;
-  }
-  return bits;
-#else
-  return PA_LIKELY(value)
-             ? bits == 64
-                   ? __builtin_clzll(static_cast<uint64_t>(value))
-                   : __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits)
-             : bits;
-#endif  // defined(COMPILER_MSVC) && !defined(__clang__)
-}
-
-template <typename T, int bits = sizeof(T) * 8>
-PA_ALWAYS_INLINE constexpr
-    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
-                            int>::type
-    CountTrailingZeroBits(T value) {
-#if defined(COMPILER_MSVC) && !defined(__clang__)
-  // We would prefer to use the _BitScanForward(64) intrinsics, but they
-  // aren't constexpr and thus unusable here.
-  if (PA_LIKELY(value)) {
-    int trailing_zeros = 0;
-    constexpr T kLeastSignificantBitMask = 1ull;
-    for (; !(value & kLeastSignificantBitMask); value >>= 1, ++trailing_zeros) {
-    }
-    return trailing_zeros;
-  }
-  return bits;
-
-#else
-  return PA_LIKELY(value) ? bits == 64
-                                ? __builtin_ctzll(static_cast<uint64_t>(value))
-                                : __builtin_ctz(static_cast<uint32_t>(value))
-                          : bits;
-#endif  // defined(COMPILER_MSVC) && !defined(__clang__)
-}
-
-// Returns the integer i such as 2^i <= n < 2^(i+1).
-//
-// There is a common `BitLength` function, which returns the number of bits
-// required to represent a value. Rather than implement that function,
-// use `Log2Floor` and add 1 to the result.
-constexpr int Log2Floor(uint32_t n) {
-  return 31 - CountLeadingZeroBits(n);
-}
-
-// Returns the integer i such as 2^(i-1) < n <= 2^i.
-constexpr int Log2Ceiling(uint32_t n) {
-  // When n == 0, we want the function to return -1.
-  // When n == 0, (n - 1) will underflow to 0xFFFFFFFF, which is
-  // why the statement below starts with (n ? 32 : -1).
-  return (n ? 32 : -1) - CountLeadingZeroBits(n - 1);
-}
-
-// Returns a value of type T with a single bit set in the left-most position.
-// Can be used instead of manually shifting a 1 to the left.
-template <typename T>
-constexpr T LeftmostBit() {
-  static_assert(std::is_integral<T>::value,
-                "This function can only be used with integral types.");
-  T one(1u);
-  return one << (8 * sizeof(T) - 1);
-}
-
-}  // namespace partition_alloc::internal::base::bits
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BITS_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/bits_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/bits_pa_unittest.cc
deleted file mode 100644
index 6fc44c6..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/bits_pa_unittest.cc
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2009 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains the unit tests for the bit utilities.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-
-#include <cstddef>
-#include <limits>
-
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal::base::bits {
-
-TEST(BitsTestPA, Log2Floor) {
-  EXPECT_EQ(-1, Log2Floor(0));
-  EXPECT_EQ(0, Log2Floor(1));
-  EXPECT_EQ(1, Log2Floor(2));
-  EXPECT_EQ(1, Log2Floor(3));
-  EXPECT_EQ(2, Log2Floor(4));
-  for (int i = 3; i < 31; ++i) {
-    unsigned int value = 1U << i;
-    EXPECT_EQ(i, Log2Floor(value));
-    EXPECT_EQ(i, Log2Floor(value + 1));
-    EXPECT_EQ(i, Log2Floor(value + 2));
-    EXPECT_EQ(i - 1, Log2Floor(value - 1));
-    EXPECT_EQ(i - 1, Log2Floor(value - 2));
-  }
-  EXPECT_EQ(31, Log2Floor(0xffffffffU));
-}
-
-TEST(BitsTestPA, Log2Ceiling) {
-  EXPECT_EQ(-1, Log2Ceiling(0));
-  EXPECT_EQ(0, Log2Ceiling(1));
-  EXPECT_EQ(1, Log2Ceiling(2));
-  EXPECT_EQ(2, Log2Ceiling(3));
-  EXPECT_EQ(2, Log2Ceiling(4));
-  for (int i = 3; i < 31; ++i) {
-    unsigned int value = 1U << i;
-    EXPECT_EQ(i, Log2Ceiling(value));
-    EXPECT_EQ(i + 1, Log2Ceiling(value + 1));
-    EXPECT_EQ(i + 1, Log2Ceiling(value + 2));
-    EXPECT_EQ(i, Log2Ceiling(value - 1));
-    EXPECT_EQ(i, Log2Ceiling(value - 2));
-  }
-  EXPECT_EQ(32, Log2Ceiling(0xffffffffU));
-}
-
-TEST(BitsTestPA, AlignUp) {
-  static constexpr size_t kSizeTMax = std::numeric_limits<size_t>::max();
-  EXPECT_EQ(0ul, AlignUp(0, 4));
-  EXPECT_EQ(4ul, AlignUp(1, 4));
-  EXPECT_EQ(4096ul, AlignUp(1, 4096));
-  EXPECT_EQ(4096ul, AlignUp(4096, 4096));
-  EXPECT_EQ(4096ul, AlignUp(4095, 4096));
-  EXPECT_EQ(8192ul, AlignUp(4097, 4096));
-  EXPECT_EQ(kSizeTMax - 31, AlignUp(kSizeTMax - 62, 32));
-  EXPECT_EQ(kSizeTMax / 2 + 1, AlignUp(1, kSizeTMax / 2 + 1));
-}
-
-TEST(BitsTestPA, AlignUpPointer) {
-  static constexpr uintptr_t kUintPtrTMax =
-      std::numeric_limits<uintptr_t>::max();
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
-            AlignUp(reinterpret_cast<uint8_t*>(0), 4));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(4),
-            AlignUp(reinterpret_cast<uint8_t*>(1), 4));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(4096),
-            AlignUp(reinterpret_cast<uint8_t*>(1), 4096));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(4096),
-            AlignUp(reinterpret_cast<uint8_t*>(4096), 4096));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(4096),
-            AlignUp(reinterpret_cast<uint8_t*>(4095), 4096));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(8192),
-            AlignUp(reinterpret_cast<uint8_t*>(4097), 4096));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(kUintPtrTMax - 31),
-            AlignUp(reinterpret_cast<uint8_t*>(kUintPtrTMax - 62), 32));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(kUintPtrTMax / 2 + 1),
-            AlignUp(reinterpret_cast<uint8_t*>(1), kUintPtrTMax / 2 + 1));
-}
-
-TEST(BitsTestPA, AlignDown) {
-  static constexpr size_t kSizeTMax = std::numeric_limits<size_t>::max();
-  EXPECT_EQ(0ul, AlignDown(0, 4));
-  EXPECT_EQ(0ul, AlignDown(1, 4));
-  EXPECT_EQ(0ul, AlignDown(1, 4096));
-  EXPECT_EQ(4096ul, AlignDown(4096, 4096));
-  EXPECT_EQ(0ul, AlignDown(4095, 4096));
-  EXPECT_EQ(4096ul, AlignDown(4097, 4096));
-  EXPECT_EQ(kSizeTMax - 63, AlignDown(kSizeTMax - 62, 32));
-  EXPECT_EQ(kSizeTMax - 31, AlignDown(kSizeTMax, 32));
-  EXPECT_EQ(0ul, AlignDown(1, kSizeTMax / 2 + 1));
-}
-
-TEST(BitsTestPA, AlignDownPointer) {
-  static constexpr uintptr_t kUintPtrTMax =
-      std::numeric_limits<uintptr_t>::max();
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
-            AlignDown(reinterpret_cast<uint8_t*>(0), 4));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
-            AlignDown(reinterpret_cast<uint8_t*>(1), 4));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
-            AlignDown(reinterpret_cast<uint8_t*>(1), 4096));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(4096),
-            AlignDown(reinterpret_cast<uint8_t*>(4096), 4096));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
-            AlignDown(reinterpret_cast<uint8_t*>(4095), 4096));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(4096),
-            AlignDown(reinterpret_cast<uint8_t*>(4097), 4096));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(kUintPtrTMax - 63),
-            AlignDown(reinterpret_cast<uint8_t*>(kUintPtrTMax - 62), 32));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(kUintPtrTMax - 31),
-            AlignDown(reinterpret_cast<uint8_t*>(kUintPtrTMax), 32));
-  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
-            AlignDown(reinterpret_cast<uint8_t*>(1), kUintPtrTMax / 2 + 1));
-}
-
-TEST(BitsTestPA, CountLeadingZeroBits8) {
-  EXPECT_EQ(8, CountLeadingZeroBits(uint8_t{0}));
-  EXPECT_EQ(7, CountLeadingZeroBits(uint8_t{1}));
-  for (int shift = 0; shift <= 7; ++shift) {
-    EXPECT_EQ(7 - shift,
-              CountLeadingZeroBits(static_cast<uint8_t>(1 << shift)));
-  }
-  EXPECT_EQ(4, CountLeadingZeroBits(uint8_t{0x0f}));
-}
-
-TEST(BitsTestPA, CountLeadingZeroBits16) {
-  EXPECT_EQ(16, CountLeadingZeroBits(uint16_t{0}));
-  EXPECT_EQ(15, CountLeadingZeroBits(uint16_t{1}));
-  for (int shift = 0; shift <= 15; ++shift) {
-    EXPECT_EQ(15 - shift,
-              CountLeadingZeroBits(static_cast<uint16_t>(1 << shift)));
-  }
-  EXPECT_EQ(4, CountLeadingZeroBits(uint16_t{0x0f0f}));
-}
-
-TEST(BitsTestPA, CountLeadingZeroBits32) {
-  EXPECT_EQ(32, CountLeadingZeroBits(uint32_t{0}));
-  EXPECT_EQ(31, CountLeadingZeroBits(uint32_t{1}));
-  for (int shift = 0; shift <= 31; ++shift) {
-    EXPECT_EQ(31 - shift, CountLeadingZeroBits(uint32_t{1} << shift));
-  }
-  EXPECT_EQ(4, CountLeadingZeroBits(uint32_t{0x0f0f0f0f}));
-}
-
-TEST(BitsTestPA, CountTrailingZeroBits8) {
-  EXPECT_EQ(8, CountTrailingZeroBits(uint8_t{0}));
-  EXPECT_EQ(7, CountTrailingZeroBits(uint8_t{128}));
-  for (int shift = 0; shift <= 7; ++shift) {
-    EXPECT_EQ(shift, CountTrailingZeroBits(static_cast<uint8_t>(1 << shift)));
-  }
-  EXPECT_EQ(4, CountTrailingZeroBits(uint8_t{0xf0}));
-}
-
-TEST(BitsTestPA, CountTrailingZeroBits16) {
-  EXPECT_EQ(16, CountTrailingZeroBits(uint16_t{0}));
-  EXPECT_EQ(15, CountTrailingZeroBits(uint16_t{32768}));
-  for (int shift = 0; shift <= 15; ++shift) {
-    EXPECT_EQ(shift, CountTrailingZeroBits(static_cast<uint16_t>(1 << shift)));
-  }
-  EXPECT_EQ(4, CountTrailingZeroBits(uint16_t{0xf0f0}));
-}
-
-TEST(BitsTestPA, CountTrailingZeroBits32) {
-  EXPECT_EQ(32, CountTrailingZeroBits(uint32_t{0}));
-  EXPECT_EQ(31, CountTrailingZeroBits(uint32_t{1} << 31));
-  for (int shift = 0; shift <= 31; ++shift) {
-    EXPECT_EQ(shift, CountTrailingZeroBits(uint32_t{1} << shift));
-  }
-  EXPECT_EQ(4, CountTrailingZeroBits(uint32_t{0xf0f0f0f0}));
-}
-
-TEST(BitsTestPA, CountLeadingZeroBits64) {
-  EXPECT_EQ(64, CountLeadingZeroBits(uint64_t{0}));
-  EXPECT_EQ(63, CountLeadingZeroBits(uint64_t{1}));
-  for (int shift = 0; shift <= 63; ++shift) {
-    EXPECT_EQ(63 - shift, CountLeadingZeroBits(uint64_t{1} << shift));
-  }
-  EXPECT_EQ(4, CountLeadingZeroBits(uint64_t{0x0f0f0f0f0f0f0f0f}));
-}
-
-TEST(BitsTestPA, CountTrailingZeroBits64) {
-  EXPECT_EQ(64, CountTrailingZeroBits(uint64_t{0}));
-  EXPECT_EQ(63, CountTrailingZeroBits(uint64_t{1} << 63));
-  for (int shift = 0; shift <= 31; ++shift) {
-    EXPECT_EQ(shift, CountTrailingZeroBits(uint64_t{1} << shift));
-  }
-  EXPECT_EQ(4, CountTrailingZeroBits(uint64_t{0xf0f0f0f0f0f0f0f0}));
-}
-
-TEST(BitsTestPA, CountLeadingZeroBitsSizeT) {
-#if defined(ARCH_CPU_64_BITS)
-  EXPECT_EQ(64, CountLeadingZeroBits(size_t{0}));
-  EXPECT_EQ(63, CountLeadingZeroBits(size_t{1}));
-  EXPECT_EQ(32, CountLeadingZeroBits(size_t{1} << 31));
-  EXPECT_EQ(1, CountLeadingZeroBits(size_t{1} << 62));
-  EXPECT_EQ(0, CountLeadingZeroBits(size_t{1} << 63));
-#else
-  EXPECT_EQ(32, CountLeadingZeroBits(size_t{0}));
-  EXPECT_EQ(31, CountLeadingZeroBits(size_t{1}));
-  EXPECT_EQ(1, CountLeadingZeroBits(size_t{1} << 30));
-  EXPECT_EQ(0, CountLeadingZeroBits(size_t{1} << 31));
-#endif  // ARCH_CPU_64_BITS
-}
-
-TEST(BitsTestPA, CountTrailingZeroBitsSizeT) {
-#if defined(ARCH_CPU_64_BITS)
-  EXPECT_EQ(64, CountTrailingZeroBits(size_t{0}));
-  EXPECT_EQ(63, CountTrailingZeroBits(size_t{1} << 63));
-  EXPECT_EQ(31, CountTrailingZeroBits(size_t{1} << 31));
-  EXPECT_EQ(1, CountTrailingZeroBits(size_t{2}));
-  EXPECT_EQ(0, CountTrailingZeroBits(size_t{1}));
-#else
-  EXPECT_EQ(32, CountTrailingZeroBits(size_t{0}));
-  EXPECT_EQ(31, CountTrailingZeroBits(size_t{1} << 31));
-  EXPECT_EQ(1, CountTrailingZeroBits(size_t{2}));
-  EXPECT_EQ(0, CountTrailingZeroBits(size_t{1}));
-#endif  // ARCH_CPU_64_BITS
-}
-
-TEST(BitsTestPA, PowerOfTwo) {
-  EXPECT_FALSE(IsPowerOfTwo(-1));
-  EXPECT_FALSE(IsPowerOfTwo(0));
-  EXPECT_TRUE(IsPowerOfTwo(1));
-  EXPECT_TRUE(IsPowerOfTwo(2));
-  // Unsigned 64 bit cases.
-  for (uint32_t i = 2; i < 64; i++) {
-    const uint64_t val = uint64_t{1} << i;
-    EXPECT_FALSE(IsPowerOfTwo(val - 1));
-    EXPECT_TRUE(IsPowerOfTwo(val));
-    EXPECT_FALSE(IsPowerOfTwo(val + 1));
-  }
-  // Signed 64 bit cases.
-  for (uint32_t i = 2; i < 63; i++) {
-    const int64_t val = int64_t{1} << i;
-    EXPECT_FALSE(IsPowerOfTwo(val - 1));
-    EXPECT_TRUE(IsPowerOfTwo(val));
-    EXPECT_FALSE(IsPowerOfTwo(val + 1));
-  }
-  // Signed integers with only the last bit set are negative, not powers of two.
-  EXPECT_FALSE(IsPowerOfTwo(int64_t{1} << 63));
-}
-
-TEST(BitsTestPA, LeftMostBit) {
-  // Construction of a signed type from an unsigned one of the same width
-  // preserves all bits. Explicitly confirming this behavior here to illustrate
-  // correctness of reusing unsigned literals to test behavior of signed types.
-  // Using signed literals does not work with EXPECT_EQ.
-  static_assert(
-      static_cast<int64_t>(0xFFFFFFFFFFFFFFFFu) == 0xFFFFFFFFFFFFFFFFl,
-      "Comparing signed with unsigned literals compares bits.");
-  static_assert((0xFFFFFFFFFFFFFFFFu ^ 0xFFFFFFFFFFFFFFFFl) == 0,
-                "Signed and unsigned literals have the same bits set");
-
-  uint64_t unsigned_long_long_value = 0x8000000000000000u;
-  EXPECT_EQ(LeftmostBit<uint64_t>(), unsigned_long_long_value);
-  EXPECT_EQ(LeftmostBit<int64_t>(), int64_t(unsigned_long_long_value));
-
-  uint32_t unsigned_long_value = 0x80000000u;
-  EXPECT_EQ(LeftmostBit<uint32_t>(), unsigned_long_value);
-  EXPECT_EQ(LeftmostBit<int32_t>(), int32_t(unsigned_long_value));
-
-  uint16_t unsigned_short_value = 0x8000u;
-  EXPECT_EQ(LeftmostBit<uint16_t>(), unsigned_short_value);
-  EXPECT_EQ(LeftmostBit<int16_t>(), int16_t(unsigned_short_value));
-
-  uint8_t unsigned_byte_value = 0x80u;
-  EXPECT_EQ(LeftmostBit<uint8_t>(), unsigned_byte_value);
-  EXPECT_EQ(LeftmostBit<int8_t>(), int8_t(unsigned_byte_value));
-}
-
-}  // namespace partition_alloc::internal::base::bits
diff --git a/base/allocator/partition_allocator/partition_alloc_base/check.cc b/base/allocator/partition_allocator/partition_alloc_base/check.cc
deleted file mode 100644
index 2ea3952..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/check.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-
-namespace partition_alloc::internal::logging {
-
-// TODO(1151236): Make CheckError not to allocate memory. So we can use
-// CHECK() inside PartitionAllocator when PartitionAllocator-Everywhere is
-// enabled. (Also need to modify LogMessage).
-CheckError CheckError::Check(const char* file,
-                             int line,
-                             const char* condition) {
-  CheckError check_error(new LogMessage(file, line, LOGGING_FATAL));
-  check_error.stream() << "Check failed: " << condition << ". ";
-  return check_error;
-}
-
-CheckError CheckError::DCheck(const char* file,
-                              int line,
-                              const char* condition) {
-  CheckError check_error(new LogMessage(file, line, LOGGING_DCHECK));
-  check_error.stream() << "Check failed: " << condition << ". ";
-  return check_error;
-}
-
-CheckError CheckError::PCheck(const char* file,
-                              int line,
-                              const char* condition) {
-  SystemErrorCode err_code = logging::GetLastSystemErrorCode();
-#if BUILDFLAG(IS_WIN)
-  CheckError check_error(
-      new Win32ErrorLogMessage(file, line, LOGGING_FATAL, err_code));
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  CheckError check_error(
-      new ErrnoLogMessage(file, line, LOGGING_FATAL, err_code));
-#endif
-  check_error.stream() << "Check failed: " << condition << ". ";
-  return check_error;
-}
-
-CheckError CheckError::PCheck(const char* file, int line) {
-  return PCheck(file, line, "");
-}
-
-CheckError CheckError::DPCheck(const char* file,
-                               int line,
-                               const char* condition) {
-  SystemErrorCode err_code = logging::GetLastSystemErrorCode();
-#if BUILDFLAG(IS_WIN)
-  CheckError check_error(
-      new Win32ErrorLogMessage(file, line, LOGGING_DCHECK, err_code));
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  CheckError check_error(
-      new ErrnoLogMessage(file, line, LOGGING_DCHECK, err_code));
-#endif
-  check_error.stream() << "Check failed: " << condition << ". ";
-  return check_error;
-}
-
-CheckError CheckError::NotImplemented(const char* file,
-                                      int line,
-                                      const char* function) {
-  CheckError check_error(new LogMessage(file, line, LOGGING_ERROR));
-  check_error.stream() << "Not implemented reached in " << function;
-  return check_error;
-}
-
-std::ostream& CheckError::stream() {
-  return log_message_->stream();
-}
-
-CheckError::~CheckError() {
-  // Note: This function ends up in crash stack traces. If its full name
-  // changes, the crash server's magic signature logic needs to be updated.
-  // See cl/306632920.
-  delete log_message_;
-}
-
-CheckError::CheckError(LogMessage* log_message) : log_message_(log_message) {}
-
-void RawCheckFailure(const char* message) {
-  RawLog(LOGGING_FATAL, message);
-  __builtin_unreachable();
-}
-
-}  // namespace partition_alloc::internal::logging
diff --git a/base/allocator/partition_allocator/partition_alloc_base/check.h b/base/allocator/partition_allocator/partition_alloc_base/check.h
deleted file mode 100644
index 67437e3..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/check.h
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CHECK_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CHECK_H_
-
-#include <iosfwd>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-
-#define PA_STRINGIFY_IMPL(s) #s
-#define PA_STRINGIFY(s) PA_STRINGIFY_IMPL(s)
-
-// This header defines the CHECK, DCHECK, and DPCHECK macros.
-//
-// CHECK dies with a fatal error if its condition is not true. It is not
-// controlled by NDEBUG, so the check will be executed regardless of compilation
-// mode.
-//
-// DCHECK, the "debug mode" check, is enabled depending on NDEBUG and
-// DCHECK_ALWAYS_ON, and its severity depends on DCHECK_IS_CONFIGURABLE.
-//
-// (D)PCHECK is like (D)CHECK, but includes the system error code (c.f.
-// perror(3)).
-//
-// Additional information can be streamed to these macros and will be included
-// in the log output if the condition doesn't hold (you may need to include
-// <ostream>):
-//
-//   CHECK(condition) << "Additional info.";
-//
-// The condition is evaluated exactly once. Even in build modes where e.g.
-// DCHECK is disabled, the condition and any stream arguments are still
-// referenced to avoid warnings about unused variables and functions.
-//
-// For the (D)CHECK_EQ, etc. macros, see base/check_op.h. However, that header
-// is *significantly* larger than check.h, so try to avoid including it in
-// header files.
-
-namespace partition_alloc::internal::logging {
-
-// Class used to explicitly ignore an ostream, and optionally a boolean value.
-class VoidifyStream {
- public:
-  VoidifyStream() = default;
-  explicit VoidifyStream(bool ignored) {}
-
-  // This operator has lower precedence than << but higher than ?:
-  void operator&(std::ostream&) {}
-};
-
-// Helper macro which avoids evaluating the arguments to a stream if the
-// condition is false.
-#define PA_LAZY_CHECK_STREAM(stream, condition) \
-  !(condition)                                  \
-      ? (void)0                                 \
-      : ::partition_alloc::internal::logging::VoidifyStream() & (stream)
-
-// Macro which uses but does not evaluate expr and any stream parameters.
-#define PA_EAT_CHECK_STREAM_PARAMS(expr)                             \
-  true ? (void)0                                                     \
-       : ::partition_alloc::internal::logging::VoidifyStream(expr) & \
-             (*::partition_alloc::internal::logging::g_swallow_stream)
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern std::ostream* g_swallow_stream;
-
-class LogMessage;
-
-// Class used for raising a check error upon destruction.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) CheckError {
- public:
-  static CheckError Check(const char* file, int line, const char* condition);
-
-  static CheckError DCheck(const char* file, int line, const char* condition);
-
-  static CheckError PCheck(const char* file, int line, const char* condition);
-  static CheckError PCheck(const char* file, int line);
-
-  static CheckError DPCheck(const char* file, int line, const char* condition);
-
-  static CheckError NotImplemented(const char* file,
-                                   int line,
-                                   const char* function);
-
-  // Stream for adding optional details to the error message.
-  std::ostream& stream();
-
-  PA_NOMERGE ~CheckError();
-
-  CheckError(const CheckError& other) = delete;
-  CheckError& operator=(const CheckError& other) = delete;
-  CheckError(CheckError&& other) = default;
-  CheckError& operator=(CheckError&& other) = default;
-
- private:
-  explicit CheckError(LogMessage* log_message);
-
-  LogMessage* log_message_;
-};
-
-#if defined(OFFICIAL_BUILD) && !defined(NDEBUG)
-#error "Debug builds are not expected to be optimized as official builds."
-#endif  // defined(OFFICIAL_BUILD) && !defined(NDEBUG)
-
-#if defined(OFFICIAL_BUILD) && !BUILDFLAG(PA_DCHECK_IS_ON)
-
-// Discard log strings to reduce code bloat.
-//
-// This is not calling BreakDebugger since this is called frequently, and
-// calling an out-of-line function instead of a noreturn inline macro prevents
-// compiler optimizations.
-#define PA_BASE_CHECK(condition)                   \
-  PA_UNLIKELY(!(condition)) ? PA_IMMEDIATE_CRASH() \
-                            : PA_EAT_CHECK_STREAM_PARAMS()
-
-#define PA_BASE_CHECK_WILL_STREAM() false
-
-#define PA_BASE_PCHECK(condition)                                        \
-  PA_LAZY_CHECK_STREAM(                                                  \
-      ::partition_alloc::internal::logging::CheckError::PCheck(__FILE__, \
-                                                               __LINE__) \
-          .stream(),                                                     \
-      PA_UNLIKELY(!(condition)))
-
-#else
-
-#define PA_BASE_CHECK(condition)                               \
-  PA_LAZY_CHECK_STREAM(                                        \
-      ::partition_alloc::internal::logging::CheckError::Check( \
-          __FILE__, __LINE__, #condition)                      \
-          .stream(),                                           \
-      !PA_ANALYZER_ASSUME_TRUE(condition))
-
-#define PA_BASE_CHECK_WILL_STREAM() true
-
-#define PA_BASE_PCHECK(condition)                               \
-  PA_LAZY_CHECK_STREAM(                                         \
-      ::partition_alloc::internal::logging::CheckError::PCheck( \
-          __FILE__, __LINE__, #condition)                       \
-          .stream(),                                            \
-      !PA_ANALYZER_ASSUME_TRUE(condition))
-
-#endif
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-
-#define PA_BASE_DCHECK(condition)                               \
-  PA_LAZY_CHECK_STREAM(                                         \
-      ::partition_alloc::internal::logging::CheckError::DCheck( \
-          __FILE__, __LINE__, #condition)                       \
-          .stream(),                                            \
-      !PA_ANALYZER_ASSUME_TRUE(condition))
-
-#define PA_BASE_DPCHECK(condition)                               \
-  PA_LAZY_CHECK_STREAM(                                          \
-      ::partition_alloc::internal::logging::CheckError::DPCheck( \
-          __FILE__, __LINE__, #condition)                        \
-          .stream(),                                             \
-      !PA_ANALYZER_ASSUME_TRUE(condition))
-
-#else
-
-#define PA_BASE_DCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
-#define PA_BASE_DPCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
-
-#endif
-
-// Async signal safe checking mechanism.
-[[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) void RawCheckFailure(
-    const char* message);
-#define PA_RAW_CHECK(condition)                              \
-  do {                                                       \
-    if (!(condition))                                        \
-      ::partition_alloc::internal::logging::RawCheckFailure( \
-          "Check failed: " #condition "\n");                 \
-  } while (0)
-
-}  // namespace partition_alloc::internal::logging
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CHECK_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h b/base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h
deleted file mode 100644
index 8a8095c..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
-
-#include "build/build_config.h"
-
-// A wrapper around `__has_attribute`, similar to HAS_CPP_ATTRIBUTE.
-#if defined(__has_attribute)
-#define PA_HAS_ATTRIBUTE(x) __has_attribute(x)
-#else
-#define PA_HAS_ATTRIBUTE(x) 0
-#endif
-
-// A wrapper around `__has_builtin`, similar to HAS_CPP_ATTRIBUTE.
-#if defined(__has_builtin)
-#define PA_HAS_BUILTIN(x) __has_builtin(x)
-#else
-#define PA_HAS_BUILTIN(x) 0
-#endif
-
-// Annotate a function indicating it should not be inlined.
-// Use like:
-//   NOINLINE void DoStuff() { ... }
-#if defined(__clang__) && PA_HAS_ATTRIBUTE(noinline)
-#define PA_NOINLINE [[clang::noinline]]
-#elif defined(COMPILER_GCC) && PA_HAS_ATTRIBUTE(noinline)
-#define PA_NOINLINE __attribute__((noinline))
-#elif defined(COMPILER_MSVC)
-#define PA_NOINLINE __declspec(noinline)
-#else
-#define PA_NOINLINE
-#endif
-
-#if defined(__clang__) && defined(NDEBUG) && PA_HAS_ATTRIBUTE(always_inline)
-#define PA_ALWAYS_INLINE [[clang::always_inline]] inline
-#elif defined(COMPILER_GCC) && defined(NDEBUG) && \
-    PA_HAS_ATTRIBUTE(always_inline)
-#define PA_ALWAYS_INLINE inline __attribute__((__always_inline__))
-#elif defined(COMPILER_MSVC) && defined(NDEBUG)
-#define PA_ALWAYS_INLINE __forceinline
-#else
-#define PA_ALWAYS_INLINE inline
-#endif
-
-// Annotate a function indicating it should never be tail called. Useful to make
-// sure callers of the annotated function are never omitted from call-stacks.
-// To provide the complementary behavior (prevent the annotated function from
-// being omitted) look at NOINLINE. Also note that this doesn't prevent code
-// folding of multiple identical caller functions into a single signature. To
-// prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h.
-// Use like:
-//   void NOT_TAIL_CALLED FooBar();
-#if defined(__clang__) && PA_HAS_ATTRIBUTE(not_tail_called)
-#define PA_NOT_TAIL_CALLED [[clang::not_tail_called]]
-#else
-#define PA_NOT_TAIL_CALLED
-#endif
-
-// Specify memory alignment for structs, classes, etc.
-// Use like:
-//   class PA_ALIGNAS(16) MyClass { ... }
-//   PA_ALIGNAS(16) int array[4];
-//
-// In most places you can use the C++11 keyword "alignas", which is preferred.
-//
-// Historically, compilers had trouble mixing __attribute__((...)) syntax with
-// alignas(...) syntax. However, at least Clang is very accepting nowadays. It
-// may be that this macro can be removed entirely.
-#if defined(__clang__)
-#define PA_ALIGNAS(byte_alignment) alignas(byte_alignment)
-#elif defined(COMPILER_MSVC)
-#define PA_ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
-#elif defined(COMPILER_GCC) && PA_HAS_ATTRIBUTE(aligned)
-#define PA_ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
-#endif
-
-// Tells the compiler a function is using a printf-style format string.
-// |format_param| is the one-based index of the format string parameter;
-// |dots_param| is the one-based index of the "..." parameter.
-// For v*printf functions (which take a va_list), pass 0 for dots_param.
-// (This is undocumented but matches what the system C headers do.)
-// For member functions, the implicit this parameter counts as index 1.
-#if (defined(COMPILER_GCC) || defined(__clang__)) && PA_HAS_ATTRIBUTE(format)
-#define PA_PRINTF_FORMAT(format_param, dots_param) \
-  __attribute__((format(printf, format_param, dots_param)))
-#else
-#define PA_PRINTF_FORMAT(format_param, dots_param)
-#endif
-
-// Sanitizers annotations.
-#if PA_HAS_ATTRIBUTE(no_sanitize)
-#define PA_NO_SANITIZE(what) __attribute__((no_sanitize(what)))
-#endif
-#if !defined(PA_NO_SANITIZE)
-#define PA_NO_SANITIZE(what)
-#endif
-
-// MemorySanitizer annotations.
-#if defined(MEMORY_SANITIZER)
-#include <sanitizer/msan_interface.h>
-
-// Mark a memory region fully initialized.
-// Use this to annotate code that deliberately reads uninitialized data, for
-// example a GC scavenging root set pointers from the stack.
-#define PA_MSAN_UNPOISON(p, size) __msan_unpoison(p, size)
-#else  // MEMORY_SANITIZER
-#define PA_MSAN_UNPOISON(p, size)
-#endif  // MEMORY_SANITIZER
-
-// Macro for hinting that an expression is likely to be false.
-#if !defined(PA_UNLIKELY)
-#if defined(COMPILER_GCC) || defined(__clang__)
-#define PA_UNLIKELY(x) __builtin_expect(!!(x), 0)
-#else
-#define PA_UNLIKELY(x) (x)
-#endif  // defined(COMPILER_GCC)
-#endif  // !defined(PA_UNLIKELY)
-
-#if !defined(PA_LIKELY)
-#if defined(COMPILER_GCC) || defined(__clang__)
-#define PA_LIKELY(x) __builtin_expect(!!(x), 1)
-#else
-#define PA_LIKELY(x) (x)
-#endif  // defined(COMPILER_GCC)
-#endif  // !defined(PA_LIKELY)
-
-#if !defined(PA_CPU_ARM_NEON)
-#if defined(__arm__)
-#if !defined(__ARMEB__) && !defined(__ARM_EABI__) && !defined(__EABI__) && \
-    !defined(__VFP_FP__) && !defined(_WIN32_WCE) && !defined(ANDROID)
-#error Chromium does not support middle endian architecture
-#endif
-#if defined(__ARM_NEON__)
-#define PA_CPU_ARM_NEON 1
-#endif
-#endif  // defined(__arm__)
-#endif  // !defined(CPU_ARM_NEON)
-
-#if !defined(PA_HAVE_MIPS_MSA_INTRINSICS)
-#if defined(__mips_msa) && defined(__mips_isa_rev) && (__mips_isa_rev >= 5)
-#define PA_HAVE_MIPS_MSA_INTRINSICS 1
-#endif
-#endif
-
-// The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints
-// to Clang which control what code paths are statically analyzed,
-// and is meant to be used in conjunction with assert & assert-like functions.
-// The expression is passed straight through if analysis isn't enabled.
-//
-// ANALYZER_SKIP_THIS_PATH() suppresses static analysis for the current
-// codepath and any other branching codepaths that might follow.
-#if defined(__clang_analyzer__)
-
-namespace partition_alloc::internal {
-
-inline constexpr bool AnalyzerNoReturn() __attribute__((analyzer_noreturn)) {
-  return false;
-}
-
-inline constexpr bool AnalyzerAssumeTrue(bool arg) {
-  // PartitionAllocAnalyzerNoReturn() is invoked and analysis is terminated if
-  // |arg| is false.
-  return arg || AnalyzerNoReturn();
-}
-
-}  // namespace partition_alloc::internal
-
-#define PA_ANALYZER_ASSUME_TRUE(arg) \
-  ::partition_alloc::internal::AnalyzerAssumeTrue(!!(arg))
-#define PA_ANALYZER_SKIP_THIS_PATH() \
-  static_cast<void>(::partition_alloc::internal::AnalyzerNoReturn())
-
-#else  // !defined(__clang_analyzer__)
-
-#define PA_ANALYZER_ASSUME_TRUE(arg) (arg)
-#define PA_ANALYZER_SKIP_THIS_PATH()
-
-#endif  // defined(__clang_analyzer__)
-
-// Use nomerge attribute to disable optimization of merging multiple same calls.
-#if defined(__clang__) && PA_HAS_ATTRIBUTE(nomerge)
-#define PA_NOMERGE [[clang::nomerge]]
-#else
-#define PA_NOMERGE
-#endif
-
-// Marks a type as being eligible for the "trivial" ABI despite having a
-// non-trivial destructor or copy/move constructor. Such types can be relocated
-// after construction by simply copying their memory, which makes them eligible
-// to be passed in registers. The canonical example is std::unique_ptr.
-//
-// Use with caution; this has some subtle effects on constructor/destructor
-// ordering and will be very incorrect if the type relies on its address
-// remaining constant. When used as a function argument (by value), the value
-// may be constructed in the caller's stack frame, passed in a register, and
-// then used and destructed in the callee's stack frame. A similar thing can
-// occur when values are returned.
-//
-// TRIVIAL_ABI is not needed for types which have a trivial destructor and
-// copy/move constructors, such as base::TimeTicks and other POD.
-//
-// It is also not likely to be effective on types too large to be passed in one
-// or two registers on typical target ABIs.
-//
-// See also:
-//   https://clang.llvm.org/docs/AttributeReference.html#trivial-abi
-//   https://libcxx.llvm.org/docs/DesignDocs/UniquePtrTrivialAbi.html
-#if defined(__clang__) && PA_HAS_ATTRIBUTE(trivial_abi)
-#define PA_TRIVIAL_ABI [[clang::trivial_abi]]
-#else
-#define PA_TRIVIAL_ABI
-#endif
-
-// Requires constant initialization. See constinit in C++20. Allows to rely on a
-// variable being initialized before execution, and not requiring a global
-// constructor.
-#if PA_HAS_ATTRIBUTE(require_constant_initialization)
-#define PA_CONSTINIT __attribute__((require_constant_initialization))
-#endif
-#if !defined(PA_CONSTINIT)
-#define PA_CONSTINIT
-#endif
-
-#if defined(__clang__)
-#define PA_GSL_POINTER [[gsl::Pointer]]
-#else
-#define PA_GSL_POINTER
-#endif
-
-// Constexpr destructors were introduced in C++20. PartitionAlloc's minimum
-// supported C++ version is C++17.
-#if defined(__cpp_constexpr) && __cpp_constexpr >= 201907L
-#define PA_CONSTEXPR_DTOR constexpr
-#else
-#define PA_CONSTEXPR_DTOR
-#endif
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/component_export.h b/base/allocator/partition_allocator/partition_alloc_base/component_export.h
deleted file mode 100644
index d396aa2..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/component_export.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_COMPONENT_EXPORT_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_COMPONENT_EXPORT_H_
-
-// Used to annotate symbols which are exported by the component named
-// |component|. Note that this only does the right thing if the corresponding
-// component target's sources are compiled with |IS_$component_IMPL| defined
-// as 1. For example:
-//
-//   class PA_COMPONENT_EXPORT(FOO) Bar {};
-//
-// If IS_FOO_IMPL=1 at compile time, then Bar will be annotated using the
-// PA_COMPONENT_EXPORT_ANNOTATION macro defined below. Otherwise it will be
-// annotated using the PA_COMPONENT_IMPORT_ANNOTATION macro.
-#define PA_COMPONENT_EXPORT(component)                            \
-  PA_COMPONENT_MACRO_CONDITIONAL_(IS_##component##_IMPL,          \
-                                  PA_COMPONENT_EXPORT_ANNOTATION, \
-                                  PA_COMPONENT_IMPORT_ANNOTATION)
-
-// Indicates whether the current compilation unit is being compiled as part of
-// the implementation of the component named |component|. Expands to |1| if
-// |IS_$component_IMPL| is defined as |1|; expands to |0| otherwise.
-//
-// Note in particular that if |IS_$component_IMPL| is not defined at all, it is
-// still fine to test PA_INSIDE_COMPONENT_IMPL(component), which expands to |0|
-// as expected.
-#define PA_INSIDE_COMPONENT_IMPL(component) \
-  PA_COMPONENT_MACRO_CONDITIONAL_(IS_##component##_IMPL, 1, 0)
-
-// Compiler-specific macros to annotate for export or import of a symbol. No-op
-// in non-component builds. These should not see much if any direct use.
-// Instead use the PA_COMPONENT_EXPORT macro defined above.
-#if defined(COMPONENT_BUILD)
-#if defined(WIN32)
-#define PA_COMPONENT_EXPORT_ANNOTATION __declspec(dllexport)
-#define PA_COMPONENT_IMPORT_ANNOTATION __declspec(dllimport)
-#else  // defined(WIN32)
-#define PA_COMPONENT_EXPORT_ANNOTATION __attribute__((visibility("default")))
-#define PA_COMPONENT_IMPORT_ANNOTATION
-#endif  // defined(WIN32)
-#else   // defined(COMPONENT_BUILD)
-#define PA_COMPONENT_EXPORT_ANNOTATION
-#define PA_COMPONENT_IMPORT_ANNOTATION
-#endif  // defined(COMPONENT_BUILD)
-
-// Below this point are several internal utility macros used for the
-// implementation of the above macros. Not intended for external use.
-
-// Helper for conditional expansion to one of two token strings. If |condition|
-// expands to |1| then this macro expands to |consequent|; otherwise it expands
-// to |alternate|.
-#define PA_COMPONENT_MACRO_CONDITIONAL_(condition, consequent, alternate) \
-  PA_COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_(                              \
-      PA_COMPONENT_MACRO_CONDITIONAL_COMMA_(condition), consequent, alternate)
-
-// MSVC workaround for __VA_ARGS__ expanding into one expression.
-#define PA_MSVC_EXPAND_ARG(arg) arg
-
-// Expands to a comma (,) iff its first argument expands to |1|. Used in
-// conjunction with |PA_COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_()|, as the
-// presence or absense of an extra comma can be used to conditionally shift
-// subsequent argument positions and thus influence which argument is selected.
-#define PA_COMPONENT_MACRO_CONDITIONAL_COMMA_(...) \
-  PA_COMPONENT_MACRO_CONDITIONAL_COMMA_IMPL_(__VA_ARGS__, )
-#define PA_COMPONENT_MACRO_CONDITIONAL_COMMA_IMPL_(x, ...) \
-  PA_COMPONENT_MACRO_CONDITIONAL_COMMA_##x##_
-#define PA_COMPONENT_MACRO_CONDITIONAL_COMMA_1_ ,
-
-// Helper which simply selects its third argument. Used in conjunction with
-// |PA_COMPONENT_MACRO_CONDITIONAL_COMMA_()| above to implement conditional
-// macro expansion.
-#define PA_COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_(...) \
-  PA_MSVC_EXPAND_ARG(                                  \
-      PA_COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_IMPL_(__VA_ARGS__))
-#define PA_COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_IMPL_(a, b, c, ...) c
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_COMPONENT_EXPORT_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/component_export_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/component_export_pa_unittest.cc
deleted file mode 100644
index c9c53d7..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/component_export_pa_unittest.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal::base {
-namespace {
-
-using ComponentExportTestPA = testing::Test;
-
-#define IS_TEST_COMPONENT_A_IMPL 1
-#define IS_TEST_COMPONENT_B_IMPL
-#define IS_TEST_COMPONENT_C_IMPL 0
-#define IS_TEST_COMPONENT_D_IMPL 2
-#define IS_TEST_COMPONENT_E_IMPL xyz
-
-TEST(ComponentExportTestPA, ImportExport) {
-  // Defined as 1. Treat as export.
-  EXPECT_EQ(1, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_A));
-
-  // Defined, but empty. Treat as import.
-  EXPECT_EQ(0, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_B));
-
-  // Defined, but 0. Treat as import.
-  EXPECT_EQ(0, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_C));
-
-  // Defined, but some other arbitrary thing that isn't 1. Treat as import.
-  EXPECT_EQ(0, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_D));
-  EXPECT_EQ(0, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_E));
-
-  // Undefined. Treat as import.
-  EXPECT_EQ(0, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_F));
-
-  // And just for good measure, ensure that the macros evaluate properly in the
-  // context of preprocessor #if blocks.
-#if PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_A)
-  EXPECT_TRUE(true);
-#else
-  EXPECT_TRUE(false);
-#endif
-
-#if !PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_B)
-  EXPECT_TRUE(true);
-#else
-  EXPECT_TRUE(false);
-#endif
-
-#if !PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_C)
-  EXPECT_TRUE(true);
-#else
-  EXPECT_TRUE(false);
-#endif
-
-#if !PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_D)
-  EXPECT_TRUE(true);
-#else
-  EXPECT_TRUE(false);
-#endif
-
-#if !PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_E)
-  EXPECT_TRUE(true);
-#else
-  EXPECT_TRUE(false);
-#endif
-
-#if !PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_F)
-  EXPECT_TRUE(true);
-#else
-  EXPECT_TRUE(false);
-#endif
-}
-
-#undef IS_TEST_COMPONENT_A_IMPL
-#undef IS_TEST_COMPONENT_B_IMPL
-#undef IS_TEST_COMPONENT_C_IMPL
-#undef IS_TEST_COMPONENT_D_IMPL
-#undef IS_TEST_COMPONENT_E_IMPL
-
-}  // namespace
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/cpu.cc b/base/allocator/partition_allocator/partition_alloc_base/cpu.cc
deleted file mode 100644
index 97bf240..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/cpu.cc
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-
-#include <inttypes.h>
-#include <limits.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <string.h>
-
-#include <algorithm>
-#include <sstream>
-#include <utility>
-
-#include "build/build_config.h"
-
-#if defined(ARCH_CPU_ARM_FAMILY) && \
-    (BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS))
-#include <asm/hwcap.h>
-#include <sys/auxv.h>
-
-// Temporary definitions until a new hwcap.h is pulled in everywhere.
-// https://crbug.com/1265965
-#ifndef HWCAP2_MTE
-#define HWCAP2_MTE (1 << 18)
-#define HWCAP2_BTI (1 << 17)
-#endif
-#endif
-
-#if defined(ARCH_CPU_X86_FAMILY)
-#if defined(COMPILER_MSVC)
-#include <immintrin.h>  // For _xgetbv()
-#include <intrin.h>
-#endif
-#endif
-
-namespace partition_alloc::internal::base {
-
-CPU::CPU() {
-  Initialize();
-}
-CPU::CPU(CPU&&) = default;
-
-namespace {
-
-#if defined(ARCH_CPU_X86_FAMILY)
-#if !defined(COMPILER_MSVC)
-
-#if defined(__pic__) && defined(__i386__)
-
-void __cpuid(int cpu_info[4], int info_type) {
-  __asm__ volatile(
-      "mov %%ebx, %%edi\n"
-      "cpuid\n"
-      "xchg %%edi, %%ebx\n"
-      : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]),
-        "=d"(cpu_info[3])
-      : "a"(info_type), "c"(0));
-}
-
-#else
-
-void __cpuid(int cpu_info[4], int info_type) {
-  __asm__ volatile("cpuid\n"
-                   : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
-                     "=d"(cpu_info[3])
-                   : "a"(info_type), "c"(0));
-}
-
-#endif
-#endif  // !defined(COMPILER_MSVC)
-
-// xgetbv returns the value of an Intel Extended Control Register (XCR).
-// Currently only XCR0 is defined by Intel so |xcr| should always be zero.
-uint64_t xgetbv(uint32_t xcr) {
-#if defined(COMPILER_MSVC)
-  return _xgetbv(xcr);
-#else
-  uint32_t eax, edx;
-
-  __asm__ volatile("xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr));
-  return (static_cast<uint64_t>(edx) << 32) | eax;
-#endif  // defined(COMPILER_MSVC)
-}
-
-#endif  // ARCH_CPU_X86_FAMILY
-
-}  // namespace
-
-void CPU::Initialize() {
-#if defined(ARCH_CPU_X86_FAMILY)
-  int cpu_info[4] = {-1};
-
-  // __cpuid with an InfoType argument of 0 returns the number of
-  // valid Ids in CPUInfo[0] and the CPU identification string in
-  // the other three array elements. The CPU identification string is
-  // not in linear order. The code below arranges the information
-  // in a human readable form. The human readable order is CPUInfo[1] |
-  // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
-  // before using memcpy() to copy these three array elements to |cpu_string|.
-  __cpuid(cpu_info, 0);
-  int num_ids = cpu_info[0];
-  std::swap(cpu_info[2], cpu_info[3]);
-
-  // Interpret CPU feature information.
-  if (num_ids > 0) {
-    int cpu_info7[4] = {0};
-    __cpuid(cpu_info, 1);
-    if (num_ids >= 7) {
-      __cpuid(cpu_info7, 7);
-    }
-    signature_ = cpu_info[0];
-    stepping_ = cpu_info[0] & 0xf;
-    type_ = (cpu_info[0] >> 12) & 0x3;
-    has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
-    has_sse_ = (cpu_info[3] & 0x02000000) != 0;
-    has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
-    has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
-    has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
-    has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
-    has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
-    has_popcnt_ = (cpu_info[2] & 0x00800000) != 0;
-
-    // "Hypervisor Present Bit: Bit 31 of ECX of CPUID leaf 0x1."
-    // See https://lwn.net/Articles/301888/
-    // This is checking for any hypervisor. Hypervisors may choose not to
-    // announce themselves. Hypervisors trap CPUID and sometimes return
-    // different results to underlying hardware.
-    is_running_in_vm_ = (cpu_info[2] & 0x80000000) != 0;
-
-    // AVX instructions will generate an illegal instruction exception unless
-    //   a) they are supported by the CPU,
-    //   b) XSAVE is supported by the CPU and
-    //   c) XSAVE is enabled by the kernel.
-    // See http://software.intel.com/en-us/blogs/2011/04/14/is-avx-enabled
-    //
-    // In addition, we have observed some crashes with the xgetbv instruction
-    // even after following Intel's example code. (See crbug.com/375968.)
-    // Because of that, we also test the XSAVE bit because its description in
-    // the CPUID documentation suggests that it signals xgetbv support.
-    has_avx_ = (cpu_info[2] & 0x10000000) != 0 &&
-               (cpu_info[2] & 0x04000000) != 0 /* XSAVE */ &&
-               (cpu_info[2] & 0x08000000) != 0 /* OSXSAVE */ &&
-               (xgetbv(0) & 6) == 6 /* XSAVE enabled by kernel */;
-    has_aesni_ = (cpu_info[2] & 0x02000000) != 0;
-    has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
-    has_avx2_ = has_avx_ && (cpu_info7[1] & 0x00000020) != 0;
-
-    has_pku_ = (cpu_info7[2] & 0x00000010) != 0;
-  }
-
-  // Get the brand string of the cpu.
-  __cpuid(cpu_info, 0x80000000);
-  const int max_parameter = cpu_info[0];
-
-  static constexpr int kParameterContainingNonStopTimeStampCounter = 0x80000007;
-  if (max_parameter >= kParameterContainingNonStopTimeStampCounter) {
-    __cpuid(cpu_info, kParameterContainingNonStopTimeStampCounter);
-    has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
-  }
-
-  if (!has_non_stop_time_stamp_counter_ && is_running_in_vm_) {
-    int cpu_info_hv[4] = {};
-    __cpuid(cpu_info_hv, 0x40000000);
-    if (cpu_info_hv[1] == 0x7263694D &&  // Micr
-        cpu_info_hv[2] == 0x666F736F &&  // osof
-        cpu_info_hv[3] == 0x76482074) {  // t Hv
-      // If CPUID says we have a variant TSC and a hypervisor has identified
-      // itself and the hypervisor says it is Microsoft Hyper-V, then treat
-      // TSC as invariant.
-      //
-      // Microsoft Hyper-V hypervisor reports variant TSC as there are some
-      // scenarios (eg. VM live migration) where the TSC is variant, but for
-      // our purposes we can treat it as invariant.
-      has_non_stop_time_stamp_counter_ = true;
-    }
-  }
-#elif defined(ARCH_CPU_ARM_FAMILY)
-#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-
-#if defined(ARCH_CPU_ARM64)
-  // Check for Armv8.5-A BTI/MTE support, exposed via HWCAP2
-  unsigned long hwcap2 = getauxval(AT_HWCAP2);
-  has_mte_ = hwcap2 & HWCAP2_MTE;
-  has_bti_ = hwcap2 & HWCAP2_BTI;
-#endif
-
-#elif BUILDFLAG(IS_WIN)
-  // Windows makes high-resolution thread timing information available in
-  // user-space.
-  has_non_stop_time_stamp_counter_ = true;
-#endif
-#endif
-}
-
-const CPU& CPU::GetInstanceNoAllocation() {
-  static const CPU cpu;
-  return cpu;
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/cpu.h b/base/allocator/partition_allocator/partition_alloc_base/cpu.h
deleted file mode 100644
index 570a2aa..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/cpu.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CPU_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CPU_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base {
-
-// Query information about the processor.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) CPU final {
- public:
-  CPU();
-  CPU(CPU&&);
-  CPU(const CPU&) = delete;
-
-  // Get a preallocated instance of CPU.
-  // This can be used in very early application startup. The instance of CPU is
-  // created without branding, see CPU(bool requires_branding) for details and
-  // implications.
-  static const CPU& GetInstanceNoAllocation();
-
-  enum IntelMicroArchitecture {
-    PENTIUM = 0,
-    SSE = 1,
-    SSE2 = 2,
-    SSE3 = 3,
-    SSSE3 = 4,
-    SSE41 = 5,
-    SSE42 = 6,
-    AVX = 7,
-    AVX2 = 8,
-    FMA3 = 9,
-    MAX_INTEL_MICRO_ARCHITECTURE = 10
-  };
-
-  // Accessors for CPU information.
-  int signature() const { return signature_; }
-  int stepping() const { return stepping_; }
-  int type() const { return type_; }
-  bool has_mmx() const { return has_mmx_; }
-  bool has_sse() const { return has_sse_; }
-  bool has_sse2() const { return has_sse2_; }
-  bool has_sse3() const { return has_sse3_; }
-  bool has_ssse3() const { return has_ssse3_; }
-  bool has_sse41() const { return has_sse41_; }
-  bool has_sse42() const { return has_sse42_; }
-  bool has_popcnt() const { return has_popcnt_; }
-  bool has_avx() const { return has_avx_; }
-  bool has_fma3() const { return has_fma3_; }
-  bool has_avx2() const { return has_avx2_; }
-  bool has_aesni() const { return has_aesni_; }
-  bool has_non_stop_time_stamp_counter() const {
-    return has_non_stop_time_stamp_counter_;
-  }
-  bool is_running_in_vm() const { return is_running_in_vm_; }
-
-  // Armv8.5-A extensions for control flow and memory safety.
-#if defined(ARCH_CPU_ARM_FAMILY)
-  bool has_mte() const { return has_mte_; }
-  bool has_bti() const { return has_bti_; }
-#else
-  constexpr bool has_mte() const { return false; }
-  constexpr bool has_bti() const { return false; }
-#endif
-
-#if defined(ARCH_CPU_X86_FAMILY)
-  // Memory protection key support for user-mode pages
-  bool has_pku() const { return has_pku_; }
-#else
-  constexpr bool has_pku() const { return false; }
-#endif
-
- private:
-  // Query the processor for CPUID information.
-  void Initialize();
-
-  int signature_ = 0;  // raw form of type, family, model, and stepping
-  int type_ = 0;       // process type
-  int stepping_ = 0;   // processor revision number
-  bool has_mmx_ = false;
-  bool has_sse_ = false;
-  bool has_sse2_ = false;
-  bool has_sse3_ = false;
-  bool has_ssse3_ = false;
-  bool has_sse41_ = false;
-  bool has_sse42_ = false;
-  bool has_popcnt_ = false;
-  bool has_avx_ = false;
-  bool has_fma3_ = false;
-  bool has_avx2_ = false;
-  bool has_aesni_ = false;
-#if defined(ARCH_CPU_ARM_FAMILY)
-  bool has_mte_ = false;  // Armv8.5-A MTE (Memory Taggging Extension)
-  bool has_bti_ = false;  // Armv8.5-A BTI (Branch Target Identification)
-#endif
-#if defined(ARCH_CPU_X86_FAMILY)
-  bool has_pku_ = false;
-#endif
-  bool has_non_stop_time_stamp_counter_ = false;
-  bool is_running_in_vm_ = false;
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CPU_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/cpu_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/cpu_pa_unittest.cc
deleted file mode 100644
index c4c557b..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/cpu_pa_unittest.cc
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc {
-
-// Tests whether we can run extended instructions represented by the CPU
-// information. This test actually executes some extended instructions (such as
-// MMX, SSE, etc.) supported by the CPU and sees we can run them without
-// "undefined instruction" exceptions. That is, this test succeeds when this
-// test finishes without a crash.
-TEST(CPUPA, RunExtendedInstructions) {
-  // Retrieve the CPU information.
-  internal::base::CPU cpu;
-#if defined(ARCH_CPU_X86_FAMILY)
-
-  ASSERT_TRUE(cpu.has_mmx());
-  ASSERT_TRUE(cpu.has_sse());
-  ASSERT_TRUE(cpu.has_sse2());
-  ASSERT_TRUE(cpu.has_sse3());
-
-// GCC and clang instruction test.
-#if defined(COMPILER_GCC)
-  // Execute an MMX instruction.
-  __asm__ __volatile__("emms\n" : : : "mm0");
-
-  // Execute an SSE instruction.
-  __asm__ __volatile__("xorps %%xmm0, %%xmm0\n" : : : "xmm0");
-
-  // Execute an SSE 2 instruction.
-  __asm__ __volatile__("psrldq $0, %%xmm0\n" : : : "xmm0");
-
-  // Execute an SSE 3 instruction.
-  __asm__ __volatile__("addsubpd %%xmm0, %%xmm0\n" : : : "xmm0");
-
-  if (cpu.has_ssse3()) {
-    // Execute a Supplimental SSE 3 instruction.
-    __asm__ __volatile__("psignb %%xmm0, %%xmm0\n" : : : "xmm0");
-  }
-
-  if (cpu.has_sse41()) {
-    // Execute an SSE 4.1 instruction.
-    __asm__ __volatile__("pmuldq %%xmm0, %%xmm0\n" : : : "xmm0");
-  }
-
-  if (cpu.has_sse42()) {
-    // Execute an SSE 4.2 instruction.
-    __asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
-  }
-
-  if (cpu.has_popcnt()) {
-    // Execute a POPCNT instruction.
-    __asm__ __volatile__("popcnt %%eax, %%eax\n" : : : "eax");
-  }
-
-  if (cpu.has_avx()) {
-    // Execute an AVX instruction.
-    __asm__ __volatile__("vzeroupper\n" : : : "xmm0");
-  }
-
-  if (cpu.has_fma3()) {
-    // Execute a FMA3 instruction.
-    __asm__ __volatile__("vfmadd132ps %%xmm0, %%xmm0, %%xmm0\n" : : : "xmm0");
-  }
-
-  if (cpu.has_avx2()) {
-    // Execute an AVX 2 instruction.
-    __asm__ __volatile__("vpunpcklbw %%ymm0, %%ymm0, %%ymm0\n" : : : "xmm0");
-  }
-
-  if (cpu.has_pku()) {
-    // rdpkru
-    uint32_t pkru;
-    __asm__ __volatile__(".byte 0x0f,0x01,0xee\n"
-                         : "=a"(pkru)
-                         : "c"(0), "d"(0));
-  }
-// Visual C 32 bit and ClangCL 32/64 bit test.
-#elif defined(COMPILER_MSVC) &&   \
-    (defined(ARCH_CPU_32_BITS) || \
-     (defined(ARCH_CPU_64_BITS) && defined(__clang__)))
-
-  // Execute an MMX instruction.
-  __asm emms;
-
-  // Execute an SSE instruction.
-  __asm xorps xmm0, xmm0;
-
-  // Execute an SSE 2 instruction.
-  __asm psrldq xmm0, 0;
-
-  // Execute an SSE 3 instruction.
-  __asm addsubpd xmm0, xmm0;
-
-  if (cpu.has_ssse3()) {
-    // Execute a Supplimental SSE 3 instruction.
-    __asm psignb xmm0, xmm0;
-  }
-
-  if (cpu.has_sse41()) {
-    // Execute an SSE 4.1 instruction.
-    __asm pmuldq xmm0, xmm0;
-  }
-
-  if (cpu.has_sse42()) {
-    // Execute an SSE 4.2 instruction.
-    __asm crc32 eax, eax;
-  }
-
-  if (cpu.has_popcnt()) {
-    // Execute a POPCNT instruction.
-    __asm popcnt eax, eax;
-  }
-
-  if (cpu.has_avx()) {
-    // Execute an AVX instruction.
-    __asm vzeroupper;
-  }
-
-  if (cpu.has_fma3()) {
-    // Execute an AVX instruction.
-    __asm vfmadd132ps xmm0, xmm0, xmm0;
-  }
-
-  if (cpu.has_avx2()) {
-    // Execute an AVX 2 instruction.
-    __asm vpunpcklbw ymm0, ymm0, ymm0
-  }
-#endif  // defined(COMPILER_GCC)
-#endif  // defined(ARCH_CPU_X86_FAMILY)
-
-#if defined(ARCH_CPU_ARM64)
-  // Check that the CPU is correctly reporting support for the Armv8.5-A memory
-  // tagging extension. The new MTE instructions aren't encoded in NOP space
-  // like BTI/Pointer Authentication and will crash older cores with a SIGILL if
-  // used incorrectly. This test demonstrates how it should be done and that
-  // this approach works.
-  if (cpu.has_mte()) {
-#if !defined(__ARM_FEATURE_MEMORY_TAGGING)
-    // In this section, we're running on an MTE-compatible core, but we're
-    // building this file without MTE support. Fail this test to indicate that
-    // there's a problem with the base/ build configuration.
-    GTEST_FAIL()
-        << "MTE support detected (but base/ built without MTE support)";
-#else
-    char ptr[32];
-    uint64_t val;
-    // Execute a trivial MTE instruction. Normally, MTE should be used via the
-    // intrinsics documented at
-    // https://developer.arm.com/documentation/101028/0012/10--Memory-tagging-intrinsics,
-    // this test uses the irg (Insert Random Tag) instruction directly to make
-    // sure that it's not optimized out by the compiler.
-    __asm__ __volatile__("irg %0, %1" : "=r"(val) : "r"(ptr));
-#endif  // __ARM_FEATURE_MEMORY_TAGGING
-  }
-#endif  // ARCH_CPU_ARM64
-}
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h b/base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h
deleted file mode 100644
index a0bb2e4..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
-
-namespace partition_alloc::internal::base {
-
-// std::is_constant_evaluated was introduced in C++20. PartitionAlloc's minimum
-// supported C++ version is C++17.
-#if defined(__cpp_lib_is_constant_evaluated) && \
-    __cpp_lib_is_constant_evaluated >= 201811L
-
-#include <type_traits>
-using std::is_constant_evaluated;
-
-#else
-
-// Implementation of C++20's std::is_constant_evaluated.
-//
-// References:
-// - https://en.cppreference.com/w/cpp/types/is_constant_evaluated
-// - https://wg21.link/meta.const.eval
-constexpr bool is_constant_evaluated() noexcept {
-  return __builtin_is_constant_evaluated();
-}
-
-#endif
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/debug/alias.cc b/base/allocator/partition_allocator/partition_alloc_base/debug/alias.cc
deleted file mode 100644
index 8f6229c..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/debug/alias.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-
-namespace partition_alloc::internal::base::debug {
-
-// This file/function should be excluded from LTO/LTCG to ensure that the
-// compiler can't see this function's implementation when compiling calls to it.
-PA_NOINLINE void Alias(const void* var) {}
-
-}  // namespace partition_alloc::internal::base::debug
diff --git a/base/allocator/partition_allocator/partition_alloc_base/debug/alias.h b/base/allocator/partition_allocator/partition_alloc_base/debug/alias.h
deleted file mode 100644
index ae4e0c6..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/debug/alias.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_DEBUG_ALIAS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_DEBUG_ALIAS_H_
-
-#include <stddef.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace partition_alloc::internal::base::debug {
-
-// Make the optimizer think that |var| is aliased. This can be used to inhibit
-// three different kinds of optimizations:
-//
-// Case #1: Prevent a local variable from being optimized out if it would not
-// otherwise be live at the point of a potential crash. This can only be done
-// with local variables, not globals, object members, or function return values
-// - these must be copied to locals if you want to ensure they are recorded in
-// crash dumps. Function arguments are fine to use since the
-// base::debug::Alias() call on them will make sure they are copied to the stack
-// even if they were passed in a register. Note that if the local variable is a
-// pointer then its value will be retained but the memory that it points to will
-// probably not be saved in the crash dump - by default only stack memory is
-// saved. Therefore the aliasing technique is usually only worthwhile with
-// non-pointer variables. If you have a pointer to an object and you want to
-// retain the object's state you need to copy the object or its fields to local
-// variables.
-//
-// Example usage:
-//   int last_error = err_;
-//   base::debug::Alias(&last_error);
-//   char name_copy[16];
-//   strncpy(name_copy, p->name, sizeof(name_copy)-1);
-//   name_copy[sizeof(name_copy)-1] = '\0';;
-//   base::debug::alias(name_copy);
-//   CHECK(false);
-//
-// Case #2: Prevent a tail call into a function. This is useful to make sure the
-// function containing the call to base::debug::Alias() will be present in the
-// call stack. In this case there is no memory that needs to be on
-// the stack so we can use nullptr. The call to base::debug::Alias() needs to
-// happen after the call that is suspected to be tail called. Note: This
-// technique will prevent tail calls at the specific call site only. To prevent
-// them for all invocations of a function look at PA_NOT_TAIL_CALLED.
-//
-// Example usage:
-//   PA_NOINLINE void Foo(){
-//     ... code ...
-//
-//     Bar();
-//     base::debug::Alias(nullptr);
-//   }
-//
-// Case #3: Prevent code folding of a non-unique function. Code folding can
-// cause the same address to be assigned to different functions if they are
-// identical. If finding the precise signature of a function in the call-stack
-// is important and it's suspected the function is identical to other functions
-// it can be made unique using PA_NO_CODE_FOLDING which is a wrapper around
-// base::debug::Alias();
-//
-// Example usage:
-//   PA_NOINLINE void Foo(){
-//     PA_NO_CODE_FOLDING();
-//     Bar();
-//   }
-//
-// Finally please note that these effects compound. This means that saving a
-// stack variable (case #1) using base::debug::Alias() will also inhibit
-// tail calls for calls in earlier lines and prevent code folding.
-
-void PA_COMPONENT_EXPORT(PARTITION_ALLOC) Alias(const void* var);
-
-}  // namespace partition_alloc::internal::base::debug
-
-// Code folding is a linker optimization whereby the linker identifies functions
-// that are bit-identical and overlays them. This saves space but it leads to
-// confusing call stacks because multiple symbols are at the same address and
-// it is unpredictable which one will be displayed. Disabling of code folding is
-// particularly useful when function names are used as signatures in crashes.
-// This macro doesn't guarantee that code folding will be prevented but it
-// greatly reduces the odds and always prevents it within one source file.
-// If using in a function that terminates the process it is safest to put the
-// PA_NO_CODE_FOLDING macro at the top of the function.
-// Use like:
-//   void FooBarFailure(size_t size) { PA_NO_CODE_FOLDING(); OOM_CRASH(size); }
-#define PA_NO_CODE_FOLDING()        \
-  const int line_number = __LINE__; \
-  ::partition_alloc::internal::base::debug::Alias(&line_number)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_DEBUG_ALIAS_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/export_template.h b/base/allocator/partition_allocator/partition_alloc_base/export_template.h
deleted file mode 100644
index d728e57..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/export_template.h
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_EXPORT_TEMPLATE_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_EXPORT_TEMPLATE_H_
-
-// Synopsis
-//
-// This header provides macros for using PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-// macros with explicit template instantiation declarations and definitions.
-// Generally, the PA_COMPONENT_EXPORT(PARTITION_ALLOC) macros are used at
-// declarations, and GCC requires them to be used at explicit instantiation
-// declarations, but MSVC requires __declspec(dllexport) to be used at the
-// explicit instantiation definitions instead.
-
-// Usage
-//
-// In a header file, write:
-//
-//   extern template class
-//   PA_EXPORT_TEMPLATE_DECLARE(PA_COMPONENT_EXPORT(PARTITION_ALLOC)) foo<bar>;
-//
-// In a source file, write:
-//
-//   template class
-//   PA_EXPORT_TEMPLATE_DEFINE(PA_COMPONENT_EXPORT(PARTITION_ALLOC)) foo<bar>;
-
-// Implementation notes
-//
-// On Windows, when building when PA_COMPONENT_EXPORT(PARTITION_ALLOC) expands
-// to __declspec(dllexport)), we want the two lines to expand to:
-//
-//     extern template class foo<bar>;
-//     template class PA_COMPONENT_EXPORT(PARTITION_ALLOC) foo<bar>;
-//
-// In all other cases (non-Windows, and Windows when
-// PA_COMPONENT_EXPORT(PARTITION_ALLOC) expands to
-// __declspec(dllimport)), we want:
-//
-//     extern template class PA_COMPONENT_EXPORT(PARTITION_ALLOC) foo<bar>;
-//     template class foo<bar>;
-//
-// The implementation of this header uses some subtle macro semantics to
-// detect what the provided PA_COMPONENT_EXPORT(PARTITION_ALLOC) value was
-// defined as and then to dispatch to appropriate macro definitions.
-// Unfortunately, MSVC's C preprocessor is rather non-compliant and requires
-// special care to make it work.
-//
-// Issue 1.
-//
-//   #define F(x)
-//   F()
-//
-// MSVC emits warning C4003 ("not enough actual parameters for macro
-// 'F'), even though it's a valid macro invocation.  This affects the
-// macros below that take just an "export" parameter, because export
-// may be empty.
-//
-// As a workaround, we can add a dummy parameter and arguments:
-//
-//   #define F(x,_)
-//   F(,)
-//
-// Issue 2.
-//
-//   #define F(x) G##x
-//   #define Gj() ok
-//   F(j())
-//
-// The correct replacement for "F(j())" is "ok", but MSVC replaces it
-// with "Gj()".  As a workaround, we can pass the result to an
-// identity macro to force MSVC to look for replacements again.  (This
-// is why PA_EXPORT_TEMPLATE_STYLE_3 exists.)
-
-#define PA_EXPORT_TEMPLATE_DECLARE(export)                               \
-  PA_EXPORT_TEMPLATE_INVOKE(DECLARE, PA_EXPORT_TEMPLATE_STYLE(export, ), \
-                            export)  // NOLINT
-#define PA_EXPORT_TEMPLATE_DEFINE(export)                               \
-  PA_EXPORT_TEMPLATE_INVOKE(DEFINE, PA_EXPORT_TEMPLATE_STYLE(export, ), \
-                            export)  // NOLINT
-
-// INVOKE is an internal helper macro to perform parameter replacements
-// and token pasting to chain invoke another macro.  E.g.,
-//     PA_EXPORT_TEMPLATE_INVOKE(DECLARE, DEFAULT, PA_EXPORT)
-// will export to call
-//     PA_EXPORT_TEMPLATE_DECLARE_DEFAULT(PA_EXPORT, )
-// (but with PA_COMPONENT_EXPORT(PARTITION_ALLOC) expanded too).
-#define PA_EXPORT_TEMPLATE_INVOKE(which, style, export) \
-  PA_EXPORT_TEMPLATE_INVOKE_2(which, style, export)
-#define PA_EXPORT_TEMPLATE_INVOKE_2(which, style, export) \
-  PA_EXPORT_TEMPLATE_##which##_##style(export, )
-
-// Default style is to apply the PA_COMPONENT_EXPORT(PARTITION_ALLOC) macro at
-// declaration sites.
-#define PA_EXPORT_TEMPLATE_DECLARE_DEFAULT(export, _) export
-#define PA_EXPORT_TEMPLATE_DEFINE_DEFAULT(export, _)
-
-// The "MSVC hack" style is used when PA_COMPONENT_EXPORT(PARTITION_ALLOC) is
-// defined as __declspec(dllexport), which MSVC requires to be used at
-// definition sites instead.
-#define PA_EXPORT_TEMPLATE_DECLARE_EXPORT_DLLEXPORT(export, _)
-#define PA_EXPORT_TEMPLATE_DEFINE_EXPORT_DLLEXPORT(export, _) export
-
-// PA_EXPORT_TEMPLATE_STYLE is an internal helper macro that identifies which
-// export style needs to be used for the provided
-// PA_COMPONENT_EXPORT(PARTITION_ALLOC) macro definition.
-// "", "__attribute__(...)", and "__declspec(dllimport)" are mapped
-// to "DEFAULT"; while "__declspec(dllexport)" is mapped to "MSVC_HACK".
-//
-// It's implemented with token pasting to transform the __attribute__ and
-// __declspec annotations into macro invocations.  E.g., if
-// PA_COMPONENT_EXPORT(PARTITION_ALLOC) is defined as "__declspec(dllimport)",
-// it undergoes the following sequence of macro substitutions:
-//     PA_EXPORT_TEMPLATE_STYLE(PA_EXPORT,)
-//     PA_EXPORT_TEMPLATE_STYLE_2(__declspec(dllimport),)
-//     PA_EXPORT_TEMPLATE_STYLE_3(
-//         PA_EXPORT_TEMPLATE_STYLE_MATCH__declspec(dllimport))
-//     PA_EXPORT_TEMPLATE_STYLE_MATCH__declspec(dllimport)
-//     PA_EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllimport
-//     DEFAULT
-#define PA_EXPORT_TEMPLATE_STYLE(export, _) PA_EXPORT_TEMPLATE_STYLE_2(export, )
-#define PA_EXPORT_TEMPLATE_STYLE_2(export, _) \
-  PA_EXPORT_TEMPLATE_STYLE_3(                 \
-      PA_EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA##export)
-#define PA_EXPORT_TEMPLATE_STYLE_3(style) style
-
-// Internal helper macros for PA_EXPORT_TEMPLATE_STYLE.
-//
-// XXX: C++ reserves all identifiers containing "__" for the implementation,
-// but "__attribute__" and "__declspec" already contain "__" and the token-paste
-// operator can only add characters; not remove them.  To minimize the risk of
-// conflict with implementations, we include "foj3FJo5StF0OvIzl7oMxA" (a random
-// 128-bit string, encoded in Base64) in the macro name.
-#define PA_EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA DEFAULT
-#define PA_EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA__attribute__( \
-    ...)                                                                    \
-  DEFAULT
-#define PA_EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA__declspec(arg) \
-  PA_EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_##arg
-
-// Internal helper macros for PA_EXPORT_TEMPLATE_STYLE.
-#define PA_EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllexport EXPORT_DLLEXPORT
-#define PA_EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllimport DEFAULT
-
-// Sanity checks.
-//
-// PA_EXPORT_TEMPLATE_TEST uses the same macro invocation pattern as
-// PA_EXPORT_TEMPLATE_DECLARE and PA_EXPORT_TEMPLATE_DEFINE do to check that
-// they're working correctly. When they're working correctly, the sequence of
-// macro replacements should go something like:
-//
-//     PA_EXPORT_TEMPLATE_TEST(DEFAULT, __declspec(dllimport));
-//
-//     static_assert(PA_EXPORT_TEMPLATE_INVOKE(TEST_DEFAULT,
-//         PA_EXPORT_TEMPLATE_STYLE(__declspec(dllimport), ),
-//         __declspec(dllimport)), "__declspec(dllimport)");
-//
-//     static_assert(PA_EXPORT_TEMPLATE_INVOKE(TEST_DEFAULT,
-//         DEFAULT, __declspec(dllimport)), "__declspec(dllimport)");
-//
-//     static_assert(PA_EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT(
-//         __declspec(dllimport)), "__declspec(dllimport)");
-//
-//     static_assert(true, "__declspec(dllimport)");
-//
-// When they're not working correctly, a syntax error should occur instead.
-#define PA_EXPORT_TEMPLATE_TEST(want, export)                                 \
-  static_assert(PA_EXPORT_TEMPLATE_INVOKE(                                    \
-                    TEST_##want, PA_EXPORT_TEMPLATE_STYLE(export, ), export), \
-                #export)  // NOLINT
-#define PA_EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT(...) true
-#define PA_EXPORT_TEMPLATE_TEST_EXPORT_DLLEXPORT_EXPORT_DLLEXPORT(...) true
-
-PA_EXPORT_TEMPLATE_TEST(DEFAULT, );  // NOLINT
-PA_EXPORT_TEMPLATE_TEST(DEFAULT, __attribute__((visibility("default"))));
-PA_EXPORT_TEMPLATE_TEST(EXPORT_DLLEXPORT, __declspec(dllexport));
-PA_EXPORT_TEMPLATE_TEST(DEFAULT, __declspec(dllimport));
-
-#undef PA_EXPORT_TEMPLATE_TEST
-#undef PA_EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT
-#undef PA_EXPORT_TEMPLATE_TEST_EXPORT_DLLEXPORT_EXPORT_DLLEXPORT
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_EXPORT_TEMPLATE_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/files/file_path.cc b/base/allocator/partition_allocator/partition_alloc_base/files/file_path.cc
deleted file mode 100644
index 7a11fdd..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/files/file_path.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
-
-#include <string.h>
-#include <algorithm>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-
-#if BUILDFLAG(IS_WIN)
-#include <windows.h>
-#elif BUILDFLAG(IS_APPLE)
-#include <CoreFoundation/CoreFoundation.h>
-#endif
-
-namespace partition_alloc::internal::base {
-
-using StringType = FilePath::StringType;
-const FilePath::CharType kStringTerminator = PA_FILE_PATH_LITERAL('\0');
-
-// If this FilePath contains a drive letter specification, returns the
-// position of the last character of the drive letter specification,
-// otherwise returns npos.  This can only be true on Windows, when a pathname
-// begins with a letter followed by a colon.  On other platforms, this always
-// returns npos.
-StringType::size_type FindDriveLetter(const StringType& path) {
-#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
-  // This is dependent on an ASCII-based character set, but that's a
-  // reasonable assumption.  iswalpha can be too inclusive here.
-  if (path.length() >= 2 && path[1] == L':' &&
-      ((path[0] >= L'A' && path[0] <= L'Z') ||
-       (path[0] >= L'a' && path[0] <= L'z'))) {
-    return 1;
-  }
-#endif  // PA_FILE_PATH_USES_DRIVE_LETTERS
-  return StringType::npos;
-}
-
-bool IsPathAbsolute(const StringType& path) {
-#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
-  StringType::size_type letter = FindDriveLetter(path);
-  if (letter != StringType::npos) {
-    // Look for a separator right after the drive specification.
-    return path.length() > letter + 1 &&
-           FilePath::IsSeparator(path[letter + 1]);
-  }
-  // Look for a pair of leading separators.
-  return path.length() > 1 && FilePath::IsSeparator(path[0]) &&
-         FilePath::IsSeparator(path[1]);
-#else   // PA_FILE_PATH_USES_DRIVE_LETTERS
-  // Look for a separator in the first position.
-  return path.length() > 0 && FilePath::IsSeparator(path[0]);
-#endif  // PA_FILE_PATH_USES_DRIVE_LETTERS
-}
-
-FilePath::FilePath() = default;
-
-FilePath::FilePath(const FilePath& that) = default;
-FilePath::FilePath(FilePath&& that) noexcept = default;
-
-FilePath::FilePath(const StringType& path) : path_(path) {
-  StringType::size_type nul_pos = path_.find(kStringTerminator);
-  if (nul_pos != StringType::npos)
-    path_.erase(nul_pos, StringType::npos);
-}
-
-FilePath::~FilePath() = default;
-
-FilePath& FilePath::operator=(const FilePath& that) = default;
-
-FilePath& FilePath::operator=(FilePath&& that) noexcept = default;
-
-// static
-bool FilePath::IsSeparator(CharType character) {
-  for (size_t i = 0; i < kSeparatorsLength - 1; ++i) {
-    if (character == kSeparators[i]) {
-      return true;
-    }
-  }
-
-  return false;
-}
-
-FilePath FilePath::Append(const StringType& component) const {
-  StringType appended = component;
-  StringType without_nuls;
-
-  StringType::size_type nul_pos = component.find(kStringTerminator);
-  if (nul_pos != StringType::npos) {
-    without_nuls = component.substr(0, nul_pos);
-    appended = without_nuls;
-  }
-
-  PA_BASE_DCHECK(!IsPathAbsolute(appended));
-
-  if (path_.compare(kCurrentDirectory) == 0 && !appended.empty()) {
-    // Append normally doesn't do any normalization, but as a special case,
-    // when appending to kCurrentDirectory, just return a new path for the
-    // component argument.  Appending component to kCurrentDirectory would
-    // serve no purpose other than needlessly lengthening the path, and
-    // it's likely in practice to wind up with FilePath objects containing
-    // only kCurrentDirectory when calling DirName on a single relative path
-    // component.
-    return FilePath(appended);
-  }
-
-  FilePath new_path(path_);
-  new_path.StripTrailingSeparatorsInternal();
-
-  // Don't append a separator if the path is empty (indicating the current
-  // directory) or if the path component is empty (indicating nothing to
-  // append).
-  if (!appended.empty() && !new_path.path_.empty()) {
-    // Don't append a separator if the path still ends with a trailing
-    // separator after stripping (indicating the root directory).
-    if (!IsSeparator(new_path.path_.back())) {
-      // Don't append a separator if the path is just a drive letter.
-      if (FindDriveLetter(new_path.path_) + 1 != new_path.path_.length()) {
-        new_path.path_.append(1, kSeparators[0]);
-      }
-    }
-  }
-
-  new_path.path_.append(appended);
-  return new_path;
-}
-
-FilePath FilePath::Append(const FilePath& component) const {
-  return Append(component.value());
-}
-
-void FilePath::StripTrailingSeparatorsInternal() {
-  // If there is no drive letter, start will be 1, which will prevent stripping
-  // the leading separator if there is only one separator.  If there is a drive
-  // letter, start will be set appropriately to prevent stripping the first
-  // separator following the drive letter, if a separator immediately follows
-  // the drive letter.
-  StringType::size_type start = FindDriveLetter(path_) + 2;
-
-  StringType::size_type last_stripped = StringType::npos;
-  for (StringType::size_type pos = path_.length();
-       pos > start && IsSeparator(path_[pos - 1]); --pos) {
-    // If the string only has two separators and they're at the beginning,
-    // don't strip them, unless the string began with more than two separators.
-    if (pos != start + 1 || last_stripped == start + 2 ||
-        !IsSeparator(path_[start - 1])) {
-      path_.resize(pos - 1);
-      last_stripped = pos;
-    }
-  }
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/files/file_path.h b/base/allocator/partition_allocator/partition_alloc_base/files/file_path.h
deleted file mode 100644
index 3fe2e81..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/files/file_path.h
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// FilePath is a container for pathnames stored in a platform's native string
-// type, providing containers for manipulation in according with the
-// platform's conventions for pathnames.  It supports the following path
-// types:
-//
-//                   POSIX            Windows
-//                   ---------------  ----------------------------------
-// Fundamental type  char[]           wchar_t[]
-// Encoding          unspecified*     UTF-16
-// Separator         /                \, tolerant of /
-// Drive letters     no               case-insensitive A-Z followed by :
-// Alternate root    // (surprise!)   \\ (2 Separators), for UNC paths
-//
-// * The encoding need not be specified on POSIX systems, although some
-//   POSIX-compliant systems do specify an encoding.  Mac OS X uses UTF-8.
-//   Chrome OS also uses UTF-8.
-//   Linux does not specify an encoding, but in practice, the locale's
-//   character set may be used.
-//
-// For more arcane bits of path trivia, see below.
-//
-// FilePath objects are intended to be used anywhere paths are.  An
-// application may pass FilePath objects around internally, masking the
-// underlying differences between systems, only differing in implementation
-// where interfacing directly with the system.  For example, a single
-// OpenFile(const FilePath &) function may be made available, allowing all
-// callers to operate without regard to the underlying implementation.  On
-// POSIX-like platforms, OpenFile might wrap fopen, and on Windows, it might
-// wrap _wfopen_s, perhaps both by calling file_path.value().c_str().  This
-// allows each platform to pass pathnames around without requiring conversions
-// between encodings, which has an impact on performance, but more imporantly,
-// has an impact on correctness on platforms that do not have well-defined
-// encodings for pathnames.
-//
-// Several methods are available to perform common operations on a FilePath
-// object, such as determining the parent directory (DirName), isolating the
-// final path component (BaseName), and appending a relative pathname string
-// to an existing FilePath object (Append).  These methods are highly
-// recommended over attempting to split and concatenate strings directly.
-// These methods are based purely on string manipulation and knowledge of
-// platform-specific pathname conventions, and do not consult the filesystem
-// at all, making them safe to use without fear of blocking on I/O operations.
-// These methods do not function as mutators but instead return distinct
-// instances of FilePath objects, and are therefore safe to use on const
-// objects.  The objects themselves are safe to share between threads.
-//
-// To aid in initialization of FilePath objects from string literals, a
-// FILE_PATH_LITERAL macro is provided, which accounts for the difference
-// between char[]-based pathnames on POSIX systems and wchar_t[]-based
-// pathnames on Windows.
-//
-// As a precaution against premature truncation, paths can't contain NULs.
-//
-// Because a FilePath object should not be instantiated at the global scope,
-// instead, use a FilePath::CharType[] and initialize it with
-// FILE_PATH_LITERAL.  At runtime, a FilePath object can be created from the
-// character array.  Example:
-//
-// | const FilePath::CharType kLogFileName[] = FILE_PATH_LITERAL("log.txt");
-// |
-// | void Function() {
-// |   FilePath log_file_path(kLogFileName);
-// |   [...]
-// | }
-//
-// WARNING: FilePaths should ALWAYS be displayed with LTR directionality, even
-// when the UI language is RTL. This means you always need to pass filepaths
-// through base::i18n::WrapPathWithLTRFormatting() before displaying it in the
-// RTL UI.
-//
-// This is a very common source of bugs, please try to keep this in mind.
-//
-// ARCANE BITS OF PATH TRIVIA
-//
-//  - A double leading slash is actually part of the POSIX standard.  Systems
-//    are allowed to treat // as an alternate root, as Windows does for UNC
-//    (network share) paths.  Most POSIX systems don't do anything special
-//    with two leading slashes, but FilePath handles this case properly
-//    in case it ever comes across such a system.  FilePath needs this support
-//    for Windows UNC paths, anyway.
-//    References:
-//    The Open Group Base Specifications Issue 7, sections 3.267 ("Pathname")
-//    and 4.12 ("Pathname Resolution"), available at:
-//    http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_267
-//    http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_12
-//
-//  - Windows treats c:\\ the same way it treats \\.  This was intended to
-//    allow older applications that require drive letters to support UNC paths
-//    like \\server\share\path, by permitting c:\\server\share\path as an
-//    equivalent.  Since the OS treats these paths specially, FilePath needs
-//    to do the same.  Since Windows can use either / or \ as the separator,
-//    FilePath treats c://, c:\\, //, and \\ all equivalently.
-//    Reference:
-//    The Old New Thing, "Why is a drive letter permitted in front of UNC
-//    paths (sometimes)?", available at:
-//    http://blogs.msdn.com/oldnewthing/archive/2005/11/22/495740.aspx
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_PATH_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_PATH_H_
-
-#include <cstddef>
-#include <iosfwd>
-#include <string>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "build/build_config.h"
-
-// Windows-style drive letter support and pathname separator characters can be
-// enabled and disabled independently, to aid testing.  These #defines are
-// here so that the same setting can be used in both the implementation and
-// in the unit test.
-#if BUILDFLAG(IS_WIN)
-#define PA_FILE_PATH_USES_DRIVE_LETTERS
-#define PA_FILE_PATH_USES_WIN_SEPARATORS
-#endif  // BUILDFLAG(IS_WIN)
-
-// Macros for string literal initialization of FilePath::CharType[].
-#if BUILDFLAG(IS_WIN)
-#define PA_FILE_PATH_LITERAL(x) L##x
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-#define PA_FILE_PATH_LITERAL(x) x
-#endif  // BUILDFLAG(IS_WIN)
-
-namespace partition_alloc::internal::base {
-
-// An abstraction to isolate users from the differences between native
-// pathnames on different platforms.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) FilePath {
- public:
-#if BUILDFLAG(IS_WIN)
-  // On Windows, for Unicode-aware applications, native pathnames are wchar_t
-  // arrays encoded in UTF-16.
-  typedef std::wstring StringType;
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  // On most platforms, native pathnames are char arrays, and the encoding
-  // may or may not be specified.  On Mac OS X, native pathnames are encoded
-  // in UTF-8.
-  typedef std::string StringType;
-#endif  // BUILDFLAG(IS_WIN)
-
-  typedef StringType::value_type CharType;
-
-  // Null-terminated array of separators used to separate components in paths.
-  // Each character in this array is a valid separator, but kSeparators[0] is
-  // treated as the canonical separator and is used when composing pathnames.
-  static constexpr CharType kSeparators[] =
-#if defined(PA_FILE_PATH_USES_WIN_SEPARATORS)
-      PA_FILE_PATH_LITERAL("\\/");
-#else   // PA_FILE_PATH_USES_WIN_SEPARATORS
-      PA_FILE_PATH_LITERAL("/");
-#endif  // PA_FILE_PATH_USES_WIN_SEPARATORS
-
-  // std::size(kSeparators), i.e., the number of separators in kSeparators plus
-  // one (the null terminator at the end of kSeparators).
-  static constexpr size_t kSeparatorsLength = std::size(kSeparators);
-
-  // The special path component meaning "this directory."
-  static constexpr CharType kCurrentDirectory[] = PA_FILE_PATH_LITERAL(".");
-
-  // The special path component meaning "the parent directory."
-  static constexpr CharType kParentDirectory[] = PA_FILE_PATH_LITERAL("..");
-
-  // The character used to identify a file extension.
-  static constexpr CharType kExtensionSeparator = PA_FILE_PATH_LITERAL('.');
-
-  FilePath();
-  FilePath(const FilePath& that);
-  explicit FilePath(const StringType& that);
-  ~FilePath();
-  FilePath& operator=(const FilePath& that);
-
-  // Constructs FilePath with the contents of |that|, which is left in valid but
-  // unspecified state.
-  FilePath(FilePath&& that) noexcept;
-  // Replaces the contents with those of |that|, which is left in valid but
-  // unspecified state.
-  FilePath& operator=(FilePath&& that) noexcept;
-
-  // Required for some STL containers and operations
-  bool operator<(const FilePath& that) const { return path_ < that.path_; }
-
-  const StringType& value() const { return path_; }
-
-  [[nodiscard]] bool empty() const { return path_.empty(); }
-
-  void clear() { path_.clear(); }
-
-  // Returns true if |character| is in kSeparators.
-  static bool IsSeparator(CharType character);
-
-  // Returns a FilePath by appending a separator and the supplied path
-  // component to this object's path.  Append takes care to avoid adding
-  // excessive separators if this object's path already ends with a separator.
-  // If this object's path is kCurrentDirectory, a new FilePath corresponding
-  // only to |component| is returned.  |component| must be a relative path;
-  // it is an error to pass an absolute path.
-  [[nodiscard]] FilePath Append(const FilePath& component) const;
-  [[nodiscard]] FilePath Append(const StringType& component) const;
-
- private:
-  // Remove trailing separators from this object.  If the path is absolute, it
-  // will never be stripped any more than to refer to the absolute root
-  // directory, so "////" will become "/", not "".  A leading pair of
-  // separators is never stripped, to support alternate roots.  This is used to
-  // support UNC paths on Windows.
-  void StripTrailingSeparatorsInternal();
-
-  StringType path_;
-};
-
-}  // namespace partition_alloc::internal::base
-
-namespace std {
-
-template <>
-struct hash<::partition_alloc::internal::base::FilePath> {
-  typedef ::partition_alloc::internal::base::FilePath argument_type;
-  typedef std::size_t result_type;
-  result_type operator()(argument_type const& f) const {
-    return hash<::partition_alloc::internal::base::FilePath::StringType>()(
-        f.value());
-  }
-};
-
-}  // namespace std
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_PATH_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/files/file_path_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/files/file_path_pa_unittest.cc
deleted file mode 100644
index 5542132..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/files/file_path_pa_unittest.cc
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
-
-#include <stddef.h>
-
-#include <sstream>
-
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// This macro helps avoid wrapped lines in the test structs.
-#define FPL(x) PA_FILE_PATH_LITERAL(x)
-
-// This macro constructs strings which can contain NULs.
-#define FPS(x) FilePath::StringType(FPL(x), std::size(FPL(x)) - 1)
-
-namespace partition_alloc::internal::base {
-
-struct UnaryTestData {
-  FilePath::StringType input;
-  FilePath::StringType expected;
-};
-
-struct UnaryBooleanTestData {
-  FilePath::StringType input;
-  bool expected;
-};
-
-struct BinaryTestData {
-  FilePath::StringType inputs[2];
-  FilePath::StringType expected;
-};
-
-struct BinaryBooleanTestData {
-  FilePath::StringType inputs[2];
-  bool expected;
-};
-
-struct BinaryIntTestData {
-  FilePath::StringType inputs[2];
-  int expected;
-};
-
-TEST(PartitionAllocBaseFilePathTest, Append) {
-  const struct BinaryTestData cases[] = {
-    {{FPL(""), FPL("cc")}, FPL("cc")},
-    {{FPL("."), FPL("ff")}, FPL("ff")},
-    {{FPL("."), FPL("")}, FPL(".")},
-    {{FPL("/"), FPL("cc")}, FPL("/cc")},
-    {{FPL("/aa"), FPL("")}, FPL("/aa")},
-    {{FPL("/aa/"), FPL("")}, FPL("/aa")},
-    {{FPL("//aa"), FPL("")}, FPL("//aa")},
-    {{FPL("//aa/"), FPL("")}, FPL("//aa")},
-    {{FPL("//"), FPL("aa")}, FPL("//aa")},
-#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
-    {{FPL("c:"), FPL("a")}, FPL("c:a")},
-    {{FPL("c:"), FPL("")}, FPL("c:")},
-    {{FPL("c:/"), FPL("a")}, FPL("c:/a")},
-    {{FPL("c://"), FPL("a")}, FPL("c://a")},
-    {{FPL("c:///"), FPL("a")}, FPL("c:/a")},
-#endif  // PA_FILE_PATH_USES_DRIVE_LETTERS
-#if defined(PA_FILE_PATH_USES_WIN_SEPARATORS)
-    // Append introduces the default separator character, so these test cases
-    // need to be defined with different expected results on platforms that use
-    // different default separator characters.
-    {{FPL("\\"), FPL("cc")}, FPL("\\cc")},
-    {{FPL("\\aa"), FPL("")}, FPL("\\aa")},
-    {{FPL("\\aa\\"), FPL("")}, FPL("\\aa")},
-    {{FPL("\\\\aa"), FPL("")}, FPL("\\\\aa")},
-    {{FPL("\\\\aa\\"), FPL("")}, FPL("\\\\aa")},
-    {{FPL("\\\\"), FPL("aa")}, FPL("\\\\aa")},
-    {{FPL("/aa/bb"), FPL("cc")}, FPL("/aa/bb\\cc")},
-    {{FPL("/aa/bb/"), FPL("cc")}, FPL("/aa/bb\\cc")},
-    {{FPL("aa/bb/"), FPL("cc")}, FPL("aa/bb\\cc")},
-    {{FPL("aa/bb"), FPL("cc")}, FPL("aa/bb\\cc")},
-    {{FPL("a/b"), FPL("c")}, FPL("a/b\\c")},
-    {{FPL("a/b/"), FPL("c")}, FPL("a/b\\c")},
-    {{FPL("//aa"), FPL("bb")}, FPL("//aa\\bb")},
-    {{FPL("//aa/"), FPL("bb")}, FPL("//aa\\bb")},
-    {{FPL("\\aa\\bb"), FPL("cc")}, FPL("\\aa\\bb\\cc")},
-    {{FPL("\\aa\\bb\\"), FPL("cc")}, FPL("\\aa\\bb\\cc")},
-    {{FPL("aa\\bb\\"), FPL("cc")}, FPL("aa\\bb\\cc")},
-    {{FPL("aa\\bb"), FPL("cc")}, FPL("aa\\bb\\cc")},
-    {{FPL("a\\b"), FPL("c")}, FPL("a\\b\\c")},
-    {{FPL("a\\b\\"), FPL("c")}, FPL("a\\b\\c")},
-    {{FPL("\\\\aa"), FPL("bb")}, FPL("\\\\aa\\bb")},
-    {{FPL("\\\\aa\\"), FPL("bb")}, FPL("\\\\aa\\bb")},
-#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
-    {{FPL("c:\\"), FPL("a")}, FPL("c:\\a")},
-    {{FPL("c:\\\\"), FPL("a")}, FPL("c:\\\\a")},
-    {{FPL("c:\\\\\\"), FPL("a")}, FPL("c:\\a")},
-    {{FPL("c:\\"), FPL("")}, FPL("c:\\")},
-    {{FPL("c:\\a"), FPL("b")}, FPL("c:\\a\\b")},
-    {{FPL("c:\\a\\"), FPL("b")}, FPL("c:\\a\\b")},
-#endif  // PA_FILE_PATH_USES_DRIVE_LETTERS
-#else   // PA_FILE_PATH_USES_WIN_SEPARATORS
-    {{FPL("/aa/bb"), FPL("cc")}, FPL("/aa/bb/cc")},
-    {{FPL("/aa/bb/"), FPL("cc")}, FPL("/aa/bb/cc")},
-    {{FPL("aa/bb/"), FPL("cc")}, FPL("aa/bb/cc")},
-    {{FPL("aa/bb"), FPL("cc")}, FPL("aa/bb/cc")},
-    {{FPL("a/b"), FPL("c")}, FPL("a/b/c")},
-    {{FPL("a/b/"), FPL("c")}, FPL("a/b/c")},
-    {{FPL("//aa"), FPL("bb")}, FPL("//aa/bb")},
-    {{FPL("//aa/"), FPL("bb")}, FPL("//aa/bb")},
-#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
-    {{FPL("c:/"), FPL("a")}, FPL("c:/a")},
-    {{FPL("c:/"), FPL("")}, FPL("c:/")},
-    {{FPL("c:/a"), FPL("b")}, FPL("c:/a/b")},
-    {{FPL("c:/a/"), FPL("b")}, FPL("c:/a/b")},
-#endif  // PA_FILE_PATH_USES_DRIVE_LETTERS
-#endif  // PA_FILE_PATH_USES_WIN_SEPARATORS
-  };
-
-  for (size_t i = 0; i < std::size(cases); ++i) {
-    FilePath root(cases[i].inputs[0]);
-    FilePath::StringType leaf(cases[i].inputs[1]);
-    FilePath observed_str = root.Append(leaf);
-    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed_str.value())
-        << "i: " << i << ", root: " << root.value() << ", leaf: " << leaf;
-    FilePath observed_path = root.Append(FilePath(leaf));
-    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed_path.value())
-        << "i: " << i << ", root: " << root.value() << ", leaf: " << leaf;
-  }
-}
-
-TEST(PartitionAllocBaseFilePathTest, ConstructWithNUL) {
-  // Assert FPS() works.
-  ASSERT_EQ(3U, FPS("a\0b").length());
-
-  // Test constructor strips '\0'
-  FilePath path(FPS("a\0b"));
-  EXPECT_EQ(1U, path.value().length());
-  EXPECT_EQ(FPL("a"), path.value());
-}
-
-TEST(PartitionAllocBaseFilePathTest, AppendWithNUL) {
-  // Assert FPS() works.
-  ASSERT_EQ(3U, FPS("b\0b").length());
-
-  // Test Append() strips '\0'
-  FilePath path(FPL("a"));
-  path = path.Append(FPS("b\0b"));
-  EXPECT_EQ(3U, path.value().length());
-#if defined(PA_FILE_PATH_USES_WIN_SEPARATORS)
-  EXPECT_EQ(FPL("a\\b"), path.value());
-#else
-  EXPECT_EQ(FPL("a/b"), path.value());
-#endif
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/files/file_util.h b/base/allocator/partition_allocator/partition_alloc_base/files/file_util.h
deleted file mode 100644
index 4bc5bd6..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/files/file_util.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains utility functions for dealing with the local
-// filesystem.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_UTIL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_UTIL_H_
-
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-#include <sys/stat.h>
-#include <unistd.h>
-#endif
-
-namespace partition_alloc::internal::base {
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-
-// Read exactly |bytes| bytes from file descriptor |fd|, storing the result
-// in |buffer|. This function is protected against EINTR and partial reads.
-// Returns true iff |bytes| bytes have been successfully read from |fd|.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool ReadFromFD(int fd, char* buffer, size_t bytes);
-
-#endif  // BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_UTIL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/files/file_util_posix.cc b/base/allocator/partition_allocator/partition_alloc_base/files/file_util_posix.cc
deleted file mode 100644
index 4853ff3..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/files/file_util_posix.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/files/file_util.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
-
-namespace partition_alloc::internal::base {
-
-bool ReadFromFD(int fd, char* buffer, size_t bytes) {
-  size_t total_read = 0;
-  while (total_read < bytes) {
-    ssize_t bytes_read =
-        PA_HANDLE_EINTR(read(fd, buffer + total_read, bytes - total_read));
-    if (bytes_read <= 0)
-      break;
-    total_read += bytes_read;
-  }
-  return total_read == bytes;
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging.cc b/base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging.cc
deleted file mode 100644
index 84663a9..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging.h"
-
-#include <zircon/status.h>
-
-#include <iomanip>
-
-namespace partition_alloc::internal::logging {
-
-ZxLogMessage::ZxLogMessage(const char* file_path,
-                           int line,
-                           LogSeverity severity,
-                           zx_status_t zx_err)
-    : LogMessage(file_path, line, severity), zx_err_(zx_err) {}
-
-ZxLogMessage::~ZxLogMessage() {
-  // zx_status_t error values are negative, so log the numeric version as
-  // decimal rather than hex. This is also useful to match zircon/errors.h for
-  // grepping.
-  stream() << ": " << zx_status_get_string(zx_err_) << " (" << zx_err_ << ")";
-}
-
-}  // namespace partition_alloc::internal::logging
diff --git a/base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging.h b/base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging.h
deleted file mode 100644
index c0cf1b7..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FUCHSIA_FUCHSIA_LOGGING_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FUCHSIA_FUCHSIA_LOGGING_H_
-
-#include <lib/fit/function.h>
-#include <zircon/types.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "build/build_config.h"
-
-// Use the PA_ZX_LOG family of macros along with a zx_status_t containing a
-// Zircon error. The error value will be decoded so that logged messages explain
-// the error.
-
-namespace partition_alloc::internal::logging {
-
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ZxLogMessage
-    : public logging::LogMessage {
- public:
-  ZxLogMessage(const char* file_path,
-               int line,
-               LogSeverity severity,
-               zx_status_t zx_err);
-
-  ZxLogMessage(const ZxLogMessage&) = delete;
-  ZxLogMessage& operator=(const ZxLogMessage&) = delete;
-
-  ~ZxLogMessage() override;
-
- private:
-  zx_status_t zx_err_;
-};
-
-}  // namespace partition_alloc::internal::logging
-
-#define PA_ZX_LOG_STREAM(severity, zx_err) \
-  PA_COMPACT_GOOGLE_LOG_EX_##severity(ZxLogMessage, zx_err).stream()
-
-#define PA_ZX_LOG(severity, zx_err) \
-  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(severity, zx_err), PA_LOG_IS_ON(severity))
-#define PA_ZX_LOG_IF(severity, condition, zx_err)    \
-  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(severity, zx_err), \
-                 PA_LOG_IS_ON(severity) && (condition))
-
-#define PA_ZX_CHECK(condition, zx_err)                          \
-  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(FATAL, zx_err), !(condition)) \
-      << "Check failed: " #condition << ". "
-
-#define PA_ZX_DLOG(severity, zx_err) \
-  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(severity, zx_err), PA_DLOG_IS_ON(severity))
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-#define PA_ZX_DLOG_IF(severity, condition, zx_err)   \
-  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(severity, zx_err), \
-                 PA_DLOG_IS_ON(severity) && (condition))
-#else  // BUILDFLAG(PA_DCHECK_IS_ON)
-#define PA_ZX_DLOG_IF(severity, condition, zx_err) PA_EAT_STREAM_PARAMETERS
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-#define PA_ZX_DCHECK(condition, zx_err)                      \
-  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(DCHECK, zx_err),           \
-                 BUILDFLAG(PA_DCHECK_IS_ON) && !(condition)) \
-      << "Check failed: " #condition << ". "
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FUCHSIA_FUCHSIA_LOGGING_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc
deleted file mode 100644
index 9996b21..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging.h"
-
-#include <fuchsia/logger/cpp/fidl.h>
-#include <lib/fidl/cpp/binding.h>
-#include <lib/sys/cpp/component_context.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal::base {
-
-namespace {
-
-class MockLogSource {
- public:
-  MOCK_METHOD0(Log, const char*());
-};
-
-}  // namespace
-
-// Verifies the Fuchsia-specific PA_ZX_*() logging macros.
-TEST(FuchsiaLoggingTestPA, FuchsiaLogging) {
-  MockLogSource mock_log_source;
-  constexpr int kTimes =
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-      2;
-#else
-      1;
-#endif
-  EXPECT_CALL(mock_log_source, Log())
-      .Times(kTimes)
-      .WillRepeatedly(testing::Return("log message"));
-
-  logging::SetMinLogLevel(logging::LOGGING_INFO);
-
-  EXPECT_TRUE(PA_LOG_IS_ON(INFO));
-  EXPECT_EQ(BUILDFLAG(PA_DCHECK_IS_ON), PA_DLOG_IS_ON(INFO));
-
-  PA_ZX_LOG(INFO, ZX_ERR_INTERNAL) << mock_log_source.Log();
-  PA_ZX_DLOG(INFO, ZX_ERR_INTERNAL) << mock_log_source.Log();
-
-  PA_ZX_CHECK(true, ZX_ERR_INTERNAL);
-  PA_ZX_DCHECK(true, ZX_ERR_INTERNAL);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h b/base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h
deleted file mode 100644
index c0065f6..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_GTEST_PROD_UTIL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_GTEST_PROD_UTIL_H_
-
-#include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck
-
-// This is a wrapper for gtest's FRIEND_TEST macro that friends
-// test with all possible prefixes. This is very helpful when changing the test
-// prefix, because the friend declarations don't need to be updated.
-//
-// Example usage:
-//
-// class MyClass {
-//  private:
-//   void MyMethod();
-//   PA_FRIEND_TEST_ALL_PREFIXES(MyClassTest, MyMethod);
-// };
-#define PA_FRIEND_TEST_ALL_PREFIXES(test_case_name, test_name) \
-  FRIEND_TEST(test_case_name, test_name);                      \
-  FRIEND_TEST(test_case_name, DISABLED_##test_name);           \
-  FRIEND_TEST(test_case_name, FLAKY_##test_name)
-
-// C++ compilers will refuse to compile the following code:
-//
-// namespace foo {
-// class MyClass {
-//  private:
-//   PA_FRIEND_TEST_ALL_PREFIXES(MyClassTest, TestMethod);
-//   bool private_var;
-// };
-// }  // namespace foo
-//
-// class MyClassTest::TestMethod() {
-//   foo::MyClass foo_class;
-//   foo_class.private_var = true;
-// }
-//
-// Unless you forward declare MyClassTest::TestMethod outside of namespace foo.
-// Use PA_FORWARD_DECLARE_TEST to do so for all possible prefixes.
-//
-// Example usage:
-//
-// PA_FORWARD_DECLARE_TEST(MyClassTest, TestMethod);
-//
-// namespace foo {
-// class MyClass {
-//  private:
-//   PA_FRIEND_TEST_ALL_PREFIXES(::MyClassTest, TestMethod);  // NOTE use of ::
-//   bool private_var;
-// };
-// }  // namespace foo
-//
-// class MyClassTest::TestMethod() {
-//   foo::MyClass foo_class;
-//   foo_class.private_var = true;
-// }
-
-#define PA_FORWARD_DECLARE_TEST(test_case_name, test_name) \
-  class test_case_name##_##test_name##_Test;               \
-  class test_case_name##_##DISABLED_##test_name##_Test;    \
-  class test_case_name##_##FLAKY_##test_name##_Test
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_GTEST_PROD_UTIL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h b/base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h
deleted file mode 100644
index 58f8cb6..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IMMEDIATE_CRASH_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IMMEDIATE_CRASH_H_
-
-#include "build/build_config.h"
-
-// Crashes in the fastest possible way with no attempt at logging.
-// There are several constraints; see http://crbug.com/664209 for more context.
-//
-// - PA_TRAP_SEQUENCE_() must be fatal. It should not be possible to ignore the
-//   resulting exception or simply hit 'continue' to skip over it in a debugger.
-// - Different instances of PA_TRAP_SEQUENCE_() must not be folded together, to
-//   ensure crash reports are debuggable. Unlike __builtin_trap(), asm volatile
-//   blocks will not be folded together.
-//   Note: PA_TRAP_SEQUENCE_() previously required an instruction with a unique
-//   nonce since unlike clang, GCC folds together identical asm volatile
-//   blocks.
-// - PA_TRAP_SEQUENCE_() must produce a signal that is distinct from an invalid
-//   memory access.
-// - PA_TRAP_SEQUENCE_() must be treated as a set of noreturn instructions.
-//   __builtin_unreachable() is used to provide that hint here. clang also uses
-//   this as a heuristic to pack the instructions in the function epilogue to
-//   improve code density.
-//
-// Additional properties that are nice to have:
-// - PA_TRAP_SEQUENCE_() should be as compact as possible.
-// - The first instruction of PA_TRAP_SEQUENCE_() should not change, to avoid
-//   shifting crash reporting clusters. As a consequence of this, explicit
-//   assembly is preferred over intrinsics.
-//   Note: this last bullet point may no longer be true, and may be removed in
-//   the future.
-
-// Note: PA_TRAP_SEQUENCE Is currently split into two macro helpers due to the
-// fact that clang emits an actual instruction for __builtin_unreachable() on
-// certain platforms (see https://crbug.com/958675). In addition, the
-// int3/bkpt/brk will be removed in followups, so splitting it up like this now
-// makes it easy to land the followups.
-
-#if defined(COMPILER_GCC)
-
-#if defined(ARCH_CPU_X86_FAMILY)
-
-// TODO(https://crbug.com/958675): In theory, it should be possible to use just
-// int3. However, there are a number of crashes with SIGILL as the exception
-// code, so it seems likely that there's a signal handler that allows execution
-// to continue after SIGTRAP.
-#define PA_TRAP_SEQUENCE1_() asm volatile("int3")
-
-#if BUILDFLAG(IS_APPLE)
-// Intentionally empty: __builtin_unreachable() is always part of the sequence
-// (see PA_IMMEDIATE_CRASH below) and already emits a ud2 on Mac.
-#define PA_TRAP_SEQUENCE2_() asm volatile("")
-#else
-#define PA_TRAP_SEQUENCE2_() asm volatile("ud2")
-#endif  // BUILDFLAG(IS_APPLE)
-
-#elif defined(ARCH_CPU_ARMEL)
-
-// bkpt will generate a SIGBUS when running on armv7 and a SIGTRAP when running
-// as a 32 bit userspace app on arm64. There doesn't seem to be any way to
-// cause a SIGTRAP from userspace without using a syscall (which would be a
-// problem for sandboxing).
-// TODO(https://crbug.com/958675): Remove bkpt from this sequence.
-#define PA_TRAP_SEQUENCE1_() asm volatile("bkpt #0")
-#define PA_TRAP_SEQUENCE2_() asm volatile("udf #0")
-
-#elif defined(ARCH_CPU_ARM64)
-
-// This will always generate a SIGTRAP on arm64.
-// TODO(https://crbug.com/958675): Remove brk from this sequence.
-#define PA_TRAP_SEQUENCE1_() asm volatile("brk #0")
-#define PA_TRAP_SEQUENCE2_() asm volatile("hlt #0")
-
-#else
-
-// Crash report accuracy will not be guaranteed on other architectures, but at
-// least this will crash as expected.
-#define PA_TRAP_SEQUENCE1_() __builtin_trap()
-#define PA_TRAP_SEQUENCE2_() asm volatile("")
-
-#endif  // ARCH_CPU_*
-
-#elif defined(COMPILER_MSVC)
-
-#if !defined(__clang__)
-
-// MSVC x64 doesn't support inline asm, so use the MSVC intrinsic.
-#define PA_TRAP_SEQUENCE1_() __debugbreak()
-#define PA_TRAP_SEQUENCE2_()
-
-#elif defined(ARCH_CPU_ARM64)
-
-// Windows ARM64 uses "BRK #F000" as its breakpoint instruction, and
-// __debugbreak() generates that in both VC++ and clang.
-#define PA_TRAP_SEQUENCE1_() __debugbreak()
-// Intentionally empty: __builtin_unreachable() is always part of the sequence
-// (see PA_IMMEDIATE_CRASH below) and already emits a ud2 on Win64,
-// https://crbug.com/958373
-#define PA_TRAP_SEQUENCE2_() __asm volatile("")
-
-#else
-
-#define PA_TRAP_SEQUENCE1_() asm volatile("int3")
-#define PA_TRAP_SEQUENCE2_() asm volatile("ud2")
-
-#endif  // __clang__
-
-#else
-
-#error No supported trap sequence!
-
-#endif  // COMPILER_GCC
-
-#define PA_TRAP_SEQUENCE_() \
-  do {                      \
-    PA_TRAP_SEQUENCE1_();   \
-    PA_TRAP_SEQUENCE2_();   \
-  } while (false)
-
-// CHECK() and the trap sequence can be invoked from a constexpr function.
-// This could make compilation fail on GCC, as it forbids directly using inline
-// asm inside a constexpr function. However, it allows calling a lambda
-// expression including the same asm.
-// The side effect is that the top of the stacktrace will not point to the
-// calling function, but to this anonymous lambda. This is still useful as the
-// full name of the lambda will typically include the name of the function that
-// calls CHECK() and the debugger will still break at the right line of code.
-#if !defined(COMPILER_GCC) || defined(__clang__)
-
-#define PA_WRAPPED_TRAP_SEQUENCE_() PA_TRAP_SEQUENCE_()
-
-#else
-
-#define PA_WRAPPED_TRAP_SEQUENCE_() \
-  do {                              \
-    [] { PA_TRAP_SEQUENCE_(); }();  \
-  } while (false)
-
-#endif  // !defined(COMPILER_GCC) || defined(__clang__)
-
-#if defined(__clang__) || defined(COMPILER_GCC)
-
-// __builtin_unreachable() hints to the compiler that this is noreturn and can
-// be packed in the function epilogue.
-#define PA_IMMEDIATE_CRASH()     \
-  ({                             \
-    PA_WRAPPED_TRAP_SEQUENCE_(); \
-    __builtin_unreachable();     \
-  })
-
-#else
-
-// This is supporting non-chromium user of logging.h to build with MSVC, like
-// pdfium. On MSVC there is no __builtin_unreachable().
-#define PA_IMMEDIATE_CRASH() PA_WRAPPED_TRAP_SEQUENCE_()
-
-#endif  // defined(__clang__) || defined(COMPILER_GCC)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IMMEDIATE_CRASH_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h b/base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h
deleted file mode 100644
index d855866..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
-
-#include <stdint.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace partition_alloc::internal::base::ios {
-
-// Returns whether the operating system is iOS 12 or later.
-// TODO(crbug.com/1129482): Remove once minimum supported version is at least 12
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS12OrLater();
-
-// Returns whether the operating system is iOS 13 or later.
-// TODO(crbug.com/1129483): Remove once minimum supported version is at least 13
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS13OrLater();
-
-// Returns whether the operating system is iOS 14 or later.
-// TODO(crbug.com/1129484): Remove once minimum supported version is at least 14
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS14OrLater();
-
-// Returns whether the operating system is iOS 15 or later.
-// TODO(crbug.com/1227419): Remove once minimum supported version is at least 15
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS15OrLater();
-
-// Returns whether the operating system is at the given version or later.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix);
-
-}  // namespace partition_alloc::internal::base::ios
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.mm b/base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.mm
deleted file mode 100644
index d5d607d..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.mm
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h"
-
-#include <array>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h"
-
-namespace partition_alloc::internal::base::ios {
-
-bool IsRunningOnIOS12OrLater() {
-  static const bool is_running_on_or_later = IsRunningOnOrLater(12, 0, 0);
-  return is_running_on_or_later;
-}
-
-bool IsRunningOnIOS13OrLater() {
-  static const bool is_running_on_or_later = IsRunningOnOrLater(13, 0, 0);
-  return is_running_on_or_later;
-}
-
-bool IsRunningOnIOS14OrLater() {
-  static const bool is_running_on_or_later = IsRunningOnOrLater(14, 0, 0);
-  return is_running_on_or_later;
-}
-
-bool IsRunningOnIOS15OrLater() {
-  static const bool is_running_on_or_later = IsRunningOnOrLater(15, 0, 0);
-  return is_running_on_or_later;
-}
-
-bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix) {
-  static const class OSVersion {
-   public:
-    OSVersion() {
-      SysInfo::OperatingSystemVersionNumbers(
-          &current_version_[0], &current_version_[1], &current_version_[2]);
-    }
-
-    bool IsRunningOnOrLater(int32_t version[3]) const {
-      for (size_t i = 0; i < std::size(current_version_); ++i) {
-        if (current_version_[i] != version[i])
-          return current_version_[i] > version[i];
-      }
-      return true;
-    }
-
-   private:
-    int32_t current_version_[3];
-  } kOSVersion;
-
-  int32_t version[3] = {major, minor, bug_fix};
-  return kOSVersion.IsRunningOnOrLater(version);
-}
-
-}  // namespace partition_alloc::internal::base::ios
diff --git a/base/allocator/partition_allocator/partition_alloc_base/logging.cc b/base/allocator/partition_allocator/partition_alloc_base/logging.cc
deleted file mode 100644
index 464a352..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/logging.cc
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-
-// TODO(1151236): After finishing copying //base files to PA library, remove
-// defined(BASE_CHECK_H_) from here.
-#if defined(                                                             \
-    BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CHECK_H_) || \
-    defined(BASE_CHECK_H_) ||                                            \
-    defined(BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_)
-#error "logging.h should not include check.h"
-#endif
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_WIN)
-
-#include <io.h>
-#include <windows.h>
-// Windows warns on using write().  It prefers _write().
-#define write(fd, buf, count) _write(fd, buf, static_cast<unsigned int>(count))
-// Windows doesn't define STDERR_FILENO.  Define it here.
-#define STDERR_FILENO 2
-
-#endif
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#endif
-
-#include <cstring>
-#include <ostream>
-#include <string>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-#include "base/allocator/partition_allocator/partition_alloc_base/posix/safe_strerror.h"
-#endif
-
-namespace partition_alloc::internal::logging {
-
-namespace {
-
-const char* const log_severity_names[] = {"INFO", "WARNING", "ERROR", "FATAL"};
-static_assert(LOGGING_NUM_SEVERITIES == std::size(log_severity_names),
-              "Incorrect number of log_severity_names");
-
-const char* log_severity_name(int severity) {
-  if (severity >= 0 && severity < LOGGING_NUM_SEVERITIES)
-    return log_severity_names[severity];
-  return "UNKNOWN";
-}
-
-int g_min_log_level = 0;
-
-// A log message handler that gets notified of every log message we process.
-LogMessageHandlerFunction g_log_message_handler = nullptr;
-
-#if !BUILDFLAG(IS_WIN)
-void WriteToStderr(const char* data, size_t length) {
-  size_t bytes_written = 0;
-  int rv;
-  while (bytes_written < length) {
-    rv = PA_HANDLE_EINTR(
-        write(STDERR_FILENO, data + bytes_written, length - bytes_written));
-    if (rv < 0) {
-      // Give up, nothing we can do now.
-      break;
-    }
-    bytes_written += rv;
-  }
-}
-#else   // !BUILDFLAG(IS_WIN)
-void WriteToStderr(const char* data, size_t length) {
-  HANDLE handle = ::GetStdHandle(STD_ERROR_HANDLE);
-  const char* ptr = data;
-  const char* ptr_end = data + length;
-  while (ptr < ptr_end) {
-    DWORD bytes_written = 0;
-    if (!::WriteFile(handle, ptr, ptr_end - ptr, &bytes_written, nullptr) ||
-        bytes_written == 0) {
-      // Give up, nothing we can do now.
-      break;
-    }
-    ptr += bytes_written;
-  }
-}
-#endif  // !BUILDFLAG(IS_WIN)
-
-}  // namespace
-
-#if BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
-// In DCHECK-enabled Chrome builds, allow the meaning of LOGGING_DCHECK to be
-// determined at run-time. We default it to INFO, to avoid it triggering
-// crashes before the run-time has explicitly chosen the behaviour.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-logging::LogSeverity LOGGING_DCHECK = LOGGING_INFO;
-#endif  // BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
-
-// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to have
-// an object of the correct type on the LHS of the unused part of the ternary
-// operator.
-std::ostream* g_swallow_stream;
-
-void SetMinLogLevel(int level) {
-  g_min_log_level = std::min(LOGGING_FATAL, level);
-}
-
-int GetMinLogLevel() {
-  return g_min_log_level;
-}
-
-bool ShouldCreateLogMessage(int severity) {
-  if (severity < g_min_log_level)
-    return false;
-
-  // Return true here unless we know ~LogMessage won't do anything.
-  return true;
-}
-
-int GetVlogVerbosity() {
-  return std::max(-1, LOG_INFO - GetMinLogLevel());
-}
-
-void SetLogMessageHandler(LogMessageHandlerFunction handler) {
-  g_log_message_handler = handler;
-}
-
-LogMessageHandlerFunction GetLogMessageHandler() {
-  return g_log_message_handler;
-}
-
-LogMessage::LogMessage(const char* file, int line, LogSeverity severity)
-    : severity_(severity), file_(file), line_(line) {
-  Init(file, line);
-}
-
-LogMessage::LogMessage(const char* file, int line, const char* condition)
-    : severity_(LOGGING_FATAL), file_(file), line_(line) {
-  Init(file, line);
-  stream_ << "Check failed: " << condition << ". ";
-}
-
-LogMessage::~LogMessage() {
-  stream_ << std::endl;
-  std::string str_newline(stream_.str());
-
-  // Give any log message handler first dibs on the message.
-  if (g_log_message_handler &&
-      g_log_message_handler(severity_, file_, line_, message_start_,
-                            str_newline)) {
-    // The handler took care of it, no further processing.
-    return;
-  }
-
-  // Always use RawLog() if g_log_message_handler doesn't filter messages.
-  RawLog(severity_, str_newline.c_str());
-}
-
-// writes the common header info to the stream
-void LogMessage::Init(const char* file, int line) {
-  std::string filename(file);
-  size_t last_slash_pos = filename.find_last_of("\\/");
-  if (last_slash_pos != std::string::npos)
-    filename.erase(0, last_slash_pos + 1);
-
-  {
-    // TODO(darin): It might be nice if the columns were fixed width.
-    stream_ << '[';
-    // TODO(1151236): show process id, thread id, timestamp and so on
-    // if needed.
-    if (severity_ >= 0) {
-      stream_ << log_severity_name(severity_);
-    } else {
-      stream_ << "VERBOSE" << -severity_;
-    }
-    stream_ << ":" << filename << "(" << line << ")] ";
-  }
-  message_start_ = stream_.str().length();
-}
-
-#if BUILDFLAG(IS_WIN)
-// This has already been defined in the header, but defining it again as DWORD
-// ensures that the type used in the header is equivalent to DWORD. If not,
-// the redefinition is a compile error.
-typedef DWORD SystemErrorCode;
-#endif
-
-SystemErrorCode GetLastSystemErrorCode() {
-#if BUILDFLAG(IS_WIN)
-  return ::GetLastError();
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  return errno;
-#endif
-}
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-std::string SystemErrorCodeToString(SystemErrorCode error_code) {
-#if BUILDFLAG(IS_WIN)
-  const int kErrorMessageBufferSize = 256;
-  char msgbuf[kErrorMessageBufferSize];
-  DWORD flags = FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
-  DWORD len = FormatMessageA(flags, nullptr, error_code, 0, msgbuf,
-                             std::size(msgbuf), nullptr);
-  if (len) {
-    // Messages returned by system end with line breaks.
-    std::string message(msgbuf);
-    size_t whitespace_pos = message.find_last_not_of("\n\r ");
-    if (whitespace_pos != std::string::npos)
-      message.erase(whitespace_pos + 1);
-    return message + base::TruncatingStringPrintf(" (0x%lX)", error_code);
-  }
-  return base::TruncatingStringPrintf(
-      "Error (0x%lX) while retrieving error. (0x%lX)", GetLastError(),
-      error_code);
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  return base::safe_strerror(error_code) +
-         base::TruncatingStringPrintf(" (%d)", error_code);
-#endif  // BUILDFLAG(IS_WIN)
-}
-
-#if BUILDFLAG(IS_WIN)
-Win32ErrorLogMessage::Win32ErrorLogMessage(const char* file,
-                                           int line,
-                                           LogSeverity severity,
-                                           SystemErrorCode err)
-    : LogMessage(file, line, severity), err_(err) {}
-
-Win32ErrorLogMessage::~Win32ErrorLogMessage() {
-  stream() << ": " << SystemErrorCodeToString(err_);
-  // We're about to crash (CHECK). Put |err_| on the stack (by placing it in a
-  // field) and use Alias in hopes that it makes it into crash dumps.
-  DWORD last_error = err_;
-  base::debug::Alias(&last_error);
-}
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-ErrnoLogMessage::ErrnoLogMessage(const char* file,
-                                 int line,
-                                 LogSeverity severity,
-                                 SystemErrorCode err)
-    : LogMessage(file, line, severity), err_(err) {}
-
-ErrnoLogMessage::~ErrnoLogMessage() {
-  stream() << ": " << SystemErrorCodeToString(err_);
-  // We're about to crash (CHECK). Put |err_| on the stack (by placing it in a
-  // field) and use Alias in hopes that it makes it into crash dumps.
-  int last_error = err_;
-  base::debug::Alias(&last_error);
-}
-#endif  // BUILDFLAG(IS_WIN)
-
-void RawLog(int level, const char* message) {
-  if (level >= g_min_log_level && message) {
-#if !BUILDFLAG(IS_WIN)
-    const size_t message_len = strlen(message);
-#else   // !BUILDFLAG(IS_WIN)
-    const size_t message_len = ::lstrlenA(message);
-#endif  // !BUILDFLAG(IS_WIN)
-    WriteToStderr(message, message_len);
-
-    if (message_len > 0 && message[message_len - 1] != '\n') {
-      WriteToStderr("\n", 1);
-    }
-  }
-
-  if (level == LOGGING_FATAL)
-    PA_IMMEDIATE_CRASH();
-}
-
-// This was defined at the beginning of this file.
-#undef write
-
-}  // namespace partition_alloc::internal::logging
diff --git a/base/allocator/partition_allocator/partition_alloc_base/logging.h b/base/allocator/partition_allocator/partition_alloc_base/logging.h
deleted file mode 100644
index 270d770..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/logging.h
+++ /dev/null
@@ -1,522 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_LOGGING_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_LOGGING_H_
-
-#include <stddef.h>
-
-#include <cassert>
-#include <cstdint>
-#include <sstream>
-#include <string>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h"
-#include "build/build_config.h"
-
-// TODO(1151236): Need to update the description, because logging for PA
-// standalone library was minimized.
-//
-// Optional message capabilities
-// -----------------------------
-// Assertion failed messages and fatal errors are displayed in a dialog box
-// before the application exits. However, running this UI creates a message
-// loop, which causes application messages to be processed and potentially
-// dispatched to existing application windows. Since the application is in a
-// bad state when this assertion dialog is displayed, these messages may not
-// get processed and hang the dialog, or the application might go crazy.
-//
-// Therefore, it can be beneficial to display the error dialog in a separate
-// process from the main application. When the logging system needs to display
-// a fatal error dialog box, it will look for a program called
-// "DebugMessage.exe" in the same directory as the application executable. It
-// will run this application with the message as the command line, and will
-// not include the name of the application as is traditional for easier
-// parsing.
-//
-// The code for DebugMessage.exe is only one line. In WinMain, do:
-//   MessageBox(NULL, GetCommandLineW(), L"Fatal Error", 0);
-//
-// If DebugMessage.exe is not found, the logging code will use a normal
-// MessageBox, potentially causing the problems discussed above.
-
-// Instructions
-// ------------
-//
-// Make a bunch of macros for logging.  The way to log things is to stream
-// things to PA_LOG(<a particular severity level>).  E.g.,
-//
-//   PA_LOG(INFO) << "Found " << num_cookies << " cookies";
-//
-// You can also do conditional logging:
-//
-//   PA_LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
-//
-// The CHECK(condition) macro is active in both debug and release builds and
-// effectively performs a PA_LOG(FATAL) which terminates the process and
-// generates a crashdump unless a debugger is attached.
-//
-// There are also "debug mode" logging macros like the ones above:
-//
-//   PA_DLOG(INFO) << "Found cookies";
-//
-//   PA_DLOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
-//
-// All "debug mode" logging is compiled away to nothing for non-debug mode
-// compiles.  PA_LOG_IF and development flags also work well together
-// because the code can be compiled away sometimes.
-//
-// We also have
-//
-//   PA_LOG_ASSERT(assertion);
-//   PA_DLOG_ASSERT(assertion);
-//
-// which is syntactic sugar for PA_{,D}LOG_IF(FATAL, assert fails) << assertion;
-//
-// There are "verbose level" logging macros.  They look like
-//
-//   PA_VLOG(1) << "I'm printed when you run the program with --v=1 or more";
-//   PA_VLOG(2) << "I'm printed when you run the program with --v=2 or more";
-//
-// These always log at the INFO log level (when they log at all).
-//
-// There's also PA_VLOG_IS_ON(n) "verbose level" condition macro. To be used as
-//
-//   if (PA_VLOG_IS_ON(2)) {
-//     // do some logging preparation and logging
-//     // that can't be accomplished with just PA_VLOG(2) << ...;
-//   }
-//
-// There is also a PA_VLOG_IF "verbose level" condition macro for sample
-// cases, when some extra computation and preparation for logs is not
-// needed.
-//
-//   PA_VLOG_IF(1, (size > 1024))
-//      << "I'm printed when size is more than 1024 and when you run the "
-//         "program with --v=1 or more";
-//
-// We also override the standard 'assert' to use 'PA_DLOG_ASSERT'.
-//
-// Lastly, there is:
-//
-//   PA_PLOG(ERROR) << "Couldn't do foo";
-//   PA_DPLOG(ERROR) << "Couldn't do foo";
-//   PA_PLOG_IF(ERROR, cond) << "Couldn't do foo";
-//   PA_DPLOG_IF(ERROR, cond) << "Couldn't do foo";
-//   PA_PCHECK(condition) << "Couldn't do foo";
-//   PA_DPCHECK(condition) << "Couldn't do foo";
-//
-// which append the last system error to the message in string form (taken from
-// GetLastError() on Windows and errno on POSIX).
-//
-// The supported severity levels for macros that allow you to specify one
-// are (in increasing order of severity) INFO, WARNING, ERROR, and FATAL.
-//
-// Very important: logging a message at the FATAL severity level causes
-// the program to terminate (after the message is logged).
-//
-// There is the special severity of DFATAL, which logs FATAL in DCHECK-enabled
-// builds, ERROR in normal mode.
-//
-// Output is formatted as per the following example:
-// [VERBOSE1:drm_device_handle.cc(90)] Succeeded
-// authenticating /dev/dri/card0 in 0 ms with 1 attempt(s)
-//
-// The colon separated fields inside the brackets are as follows:
-// 1. The log level
-// 2. The filename and line number where the log was instantiated
-//
-// Additional logging-related information can be found here:
-// https://chromium.googlesource.com/chromium/src/+/main/docs/linux/debugging.md#Logging
-
-namespace partition_alloc::internal::logging {
-
-// Sets the log level. Anything at or above this level will be written to the
-// log file/displayed to the user (if applicable). Anything below this level
-// will be silently ignored. The log level defaults to 0 (everything is logged
-// up to level INFO) if this function is not called.
-// Note that log messages for VLOG(x) are logged at level -x, so setting
-// the min log level to negative values enables verbose logging.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void SetMinLogLevel(int level);
-
-// Gets the current log level.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) int GetMinLogLevel();
-
-// Used by PA_LOG_IS_ON to lazy-evaluate stream arguments.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ShouldCreateLogMessage(int severity);
-
-// Gets the PA_VLOG default verbosity level.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) int GetVlogVerbosity();
-
-// Sets the Log Message Handler that gets passed every log message before
-// it's sent to other log destinations (if any).
-// Returns true to signal that it handled the message and the message
-// should not be sent to other log destinations.
-typedef bool (*LogMessageHandlerFunction)(int severity,
-                                          const char* file,
-                                          int line,
-                                          size_t message_start,
-                                          const std::string& str);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void SetLogMessageHandler(LogMessageHandlerFunction handler);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-LogMessageHandlerFunction GetLogMessageHandler();
-
-using LogSeverity = int;
-constexpr LogSeverity LOGGING_VERBOSE = -1;  // This is level 1 verbosity
-// Note: the log severities are used to index into the array of names,
-// see log_severity_names.
-constexpr LogSeverity LOGGING_INFO = 0;
-constexpr LogSeverity LOGGING_WARNING = 1;
-constexpr LogSeverity LOGGING_ERROR = 2;
-constexpr LogSeverity LOGGING_FATAL = 3;
-constexpr LogSeverity LOGGING_NUM_SEVERITIES = 4;
-
-// LOGGING_DFATAL is LOGGING_FATAL in DCHECK-enabled builds, ERROR in normal
-// mode.
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-constexpr LogSeverity LOGGING_DFATAL = LOGGING_FATAL;
-#else
-constexpr LogSeverity LOGGING_DFATAL = LOGGING_ERROR;
-#endif
-
-// This block duplicates the above entries to facilitate incremental conversion
-// from LOG_FOO to LOGGING_FOO.
-// TODO(thestig): Convert existing users to LOGGING_FOO and remove this block.
-constexpr LogSeverity LOG_VERBOSE = LOGGING_VERBOSE;
-constexpr LogSeverity LOG_INFO = LOGGING_INFO;
-constexpr LogSeverity LOG_WARNING = LOGGING_WARNING;
-constexpr LogSeverity LOG_ERROR = LOGGING_ERROR;
-constexpr LogSeverity LOG_FATAL = LOGGING_FATAL;
-constexpr LogSeverity LOG_DFATAL = LOGGING_DFATAL;
-
-// A few definitions of macros that don't generate much code. These are used
-// by PA_LOG() and LOG_IF, etc. Since these are used all over our code, it's
-// better to have compact code for these operations.
-#define PA_COMPACT_GOOGLE_LOG_EX_INFO(ClassName, ...)                         \
-  ::partition_alloc::internal::logging::ClassName(                            \
-      __FILE__, __LINE__, ::partition_alloc::internal::logging::LOGGING_INFO, \
-      ##__VA_ARGS__)
-#define PA_COMPACT_GOOGLE_LOG_EX_WARNING(ClassName, ...) \
-  ::partition_alloc::internal::logging::ClassName(       \
-      __FILE__, __LINE__,                                \
-      ::partition_alloc::internal::logging::LOGGING_WARNING, ##__VA_ARGS__)
-#define PA_COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ...)                         \
-  ::partition_alloc::internal::logging::ClassName(                             \
-      __FILE__, __LINE__, ::partition_alloc::internal::logging::LOGGING_ERROR, \
-      ##__VA_ARGS__)
-#define PA_COMPACT_GOOGLE_LOG_EX_FATAL(ClassName, ...)                         \
-  ::partition_alloc::internal::logging::ClassName(                             \
-      __FILE__, __LINE__, ::partition_alloc::internal::logging::LOGGING_FATAL, \
-      ##__VA_ARGS__)
-#define PA_COMPACT_GOOGLE_LOG_EX_DFATAL(ClassName, ...) \
-  ::partition_alloc::internal::logging::ClassName(      \
-      __FILE__, __LINE__,                               \
-      ::partition_alloc::internal::logging::LOGGING_DFATAL, ##__VA_ARGS__)
-#define PA_COMPACT_GOOGLE_LOG_EX_DCHECK(ClassName, ...) \
-  ::partition_alloc::internal::logging::ClassName(      \
-      __FILE__, __LINE__,                               \
-      ::partition_alloc::internal::logging::LOGGING_DCHECK, ##__VA_ARGS__)
-
-#define PA_COMPACT_GOOGLE_LOG_INFO PA_COMPACT_GOOGLE_LOG_EX_INFO(LogMessage)
-#define PA_COMPACT_GOOGLE_LOG_WARNING \
-  PA_COMPACT_GOOGLE_LOG_EX_WARNING(LogMessage)
-#define PA_COMPACT_GOOGLE_LOG_ERROR PA_COMPACT_GOOGLE_LOG_EX_ERROR(LogMessage)
-#define PA_COMPACT_GOOGLE_LOG_FATAL PA_COMPACT_GOOGLE_LOG_EX_FATAL(LogMessage)
-#define PA_COMPACT_GOOGLE_LOG_DFATAL PA_COMPACT_GOOGLE_LOG_EX_DFATAL(LogMessage)
-#define PA_COMPACT_GOOGLE_LOG_DCHECK PA_COMPACT_GOOGLE_LOG_EX_DCHECK(LogMessage)
-
-#if BUILDFLAG(IS_WIN)
-// wingdi.h defines ERROR to be 0. When we call PA_LOG(ERROR), it gets
-// substituted with 0, and it expands to PA_COMPACT_GOOGLE_LOG_0. To allow us
-// to keep using this syntax, we define this macro to do the same thing
-// as PA_COMPACT_GOOGLE_LOG_ERROR, and also define ERROR the same way that
-// the Windows SDK does for consistency.
-#define PA_ERROR 0
-#define PA_COMPACT_GOOGLE_LOG_EX_0(ClassName, ...) \
-  PA_COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ##__VA_ARGS__)
-#define PA_COMPACT_GOOGLE_LOG_0 PA_COMPACT_GOOGLE_LOG_ERROR
-// Needed for LOG_IS_ON(ERROR).
-constexpr LogSeverity LOGGING_0 = LOGGING_ERROR;
-#endif
-
-// As special cases, we can assume that LOG_IS_ON(FATAL) always holds. Also,
-// LOG_IS_ON(DFATAL) always holds in debug mode. In particular, CHECK()s will
-// always fire if they fail.
-#define PA_LOG_IS_ON(severity)                                   \
-  (::partition_alloc::internal::logging::ShouldCreateLogMessage( \
-      ::partition_alloc::internal::logging::LOGGING_##severity))
-
-// We don't do any caching tricks with VLOG_IS_ON() like the
-// google-glog version since it increases binary size.  This means
-// that using the v-logging functions in conjunction with --vmodule
-// may be slow.
-#define PA_VLOG_IS_ON(verboselevel) \
-  ((verboselevel) <= ::partition_alloc::internal::logging::GetVlogVerbosity())
-
-// Helper macro which avoids evaluating the arguments to a stream if
-// the condition doesn't hold. Condition is evaluated once and only once.
-#define PA_LAZY_STREAM(stream, condition) \
-  !(condition)                            \
-      ? (void)0                           \
-      : ::partition_alloc::internal::logging::LogMessageVoidify() & (stream)
-
-// We use the preprocessor's merging operator, "##", so that, e.g.,
-// PA_LOG(INFO) becomes the token PA_COMPACT_GOOGLE_LOG_INFO.  There's some
-// funny subtle difference between ostream member streaming functions (e.g.,
-// ostream::operator<<(int) and ostream non-member streaming functions
-// (e.g., ::operator<<(ostream&, string&): it turns out that it's
-// impossible to stream something like a string directly to an unnamed
-// ostream. We employ a neat hack by calling the stream() member
-// function of LogMessage which seems to avoid the problem.
-#define PA_LOG_STREAM(severity) PA_COMPACT_GOOGLE_LOG_##severity.stream()
-
-#define PA_LOG(severity) \
-  PA_LAZY_STREAM(PA_LOG_STREAM(severity), PA_LOG_IS_ON(severity))
-#define PA_LOG_IF(severity, condition) \
-  PA_LAZY_STREAM(PA_LOG_STREAM(severity), PA_LOG_IS_ON(severity) && (condition))
-
-// The VLOG macros log with negative verbosities.
-#define PA_VLOG_STREAM(verbose_level)                                  \
-  ::partition_alloc::internal::logging::LogMessage(__FILE__, __LINE__, \
-                                                   -(verbose_level))   \
-      .stream()
-
-#define PA_VLOG(verbose_level) \
-  PA_LAZY_STREAM(PA_VLOG_STREAM(verbose_level), PA_VLOG_IS_ON(verbose_level))
-
-#define PA_VLOG_IF(verbose_level, condition)    \
-  PA_LAZY_STREAM(PA_VLOG_STREAM(verbose_level), \
-                 PA_VLOG_IS_ON(verbose_level) && (condition))
-
-#if BUILDFLAG(IS_WIN)
-#define PA_VPLOG_STREAM(verbose_level)                                \
-  ::partition_alloc::internal::logging::Win32ErrorLogMessage(         \
-      __FILE__, __LINE__, -(verbose_level),                           \
-      ::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
-      .stream()
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-#define PA_VPLOG_STREAM(verbose_level)                                \
-  ::partition_alloc::internal::logging::ErrnoLogMessage(              \
-      __FILE__, __LINE__, -(verbose_level),                           \
-      ::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
-      .stream()
-#endif
-
-#define PA_VPLOG(verbose_level) \
-  PA_LAZY_STREAM(PA_VPLOG_STREAM(verbose_level), PA_VLOG_IS_ON(verbose_level))
-
-#define PA_VPLOG_IF(verbose_level, condition)    \
-  PA_LAZY_STREAM(PA_VPLOG_STREAM(verbose_level), \
-                 PA_VLOG_IS_ON(verbose_level) && (condition))
-
-// TODO(akalin): Add more VLOG variants, e.g. VPLOG.
-
-#define PA_LOG_ASSERT(condition)                          \
-  PA_LOG_IF(FATAL, !(PA_ANALYZER_ASSUME_TRUE(condition))) \
-      << "Assert failed: " #condition ". "
-
-#if BUILDFLAG(IS_WIN)
-#define PA_PLOG_STREAM(severity)                                      \
-  PA_COMPACT_GOOGLE_LOG_EX_##severity(                                \
-      Win32ErrorLogMessage,                                           \
-      ::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
-      .stream()
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-#define PA_PLOG_STREAM(severity)                                      \
-  PA_COMPACT_GOOGLE_LOG_EX_##severity(                                \
-      ErrnoLogMessage,                                                \
-      ::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
-      .stream()
-#endif
-
-#define PA_PLOG(severity) \
-  PA_LAZY_STREAM(PA_PLOG_STREAM(severity), PA_LOG_IS_ON(severity))
-
-#define PA_PLOG_IF(severity, condition)    \
-  PA_LAZY_STREAM(PA_PLOG_STREAM(severity), \
-                 PA_LOG_IS_ON(severity) && (condition))
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern std::ostream* g_swallow_stream;
-
-// Note that g_swallow_stream is used instead of an arbitrary PA_LOG() stream to
-// avoid the creation of an object with a non-trivial destructor (LogMessage).
-// On MSVC x86 (checked on 2015 Update 3), this causes a few additional
-// pointless instructions to be emitted even at full optimization level, even
-// though the : arm of the ternary operator is clearly never executed. Using a
-// simpler object to be &'d with Voidify() avoids these extra instructions.
-// Using a simpler POD object with a templated operator<< also works to avoid
-// these instructions. However, this causes warnings on statically defined
-// implementations of operator<<(std::ostream, ...) in some .cc files, because
-// they become defined-but-unreferenced functions. A reinterpret_cast of 0 to an
-// ostream* also is not suitable, because some compilers warn of undefined
-// behavior.
-#define PA_EAT_STREAM_PARAMETERS                                     \
-  true ? (void)0                                                     \
-       : ::partition_alloc::internal::logging::LogMessageVoidify() & \
-             (*::partition_alloc::internal::logging::g_swallow_stream)
-
-// Definitions for DLOG et al.
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-
-#define PA_DLOG_IS_ON(severity) PA_LOG_IS_ON(severity)
-#define PA_DLOG_IF(severity, condition) PA_LOG_IF(severity, condition)
-#define PA_DLOG_ASSERT(condition) PA_LOG_ASSERT(condition)
-#define PA_DPLOG_IF(severity, condition) PA_PLOG_IF(severity, condition)
-#define PA_DVLOG_IF(verboselevel, condition) PA_VLOG_IF(verboselevel, condition)
-#define PA_DVPLOG_IF(verboselevel, condition) \
-  PA_VPLOG_IF(verboselevel, condition)
-
-#else  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-// If !BUILDFLAG(PA_DCHECK_IS_ON), we want to avoid emitting any references to
-// |condition| (which may reference a variable defined only if
-// BUILDFLAG(PA_DCHECK_IS_ON)). Contrast this with DCHECK et al., which has
-// different behavior.
-
-#define PA_DLOG_IS_ON(severity) false
-#define PA_DLOG_IF(severity, condition) PA_EAT_STREAM_PARAMETERS
-#define PA_DLOG_ASSERT(condition) PA_EAT_STREAM_PARAMETERS
-#define PA_DPLOG_IF(severity, condition) PA_EAT_STREAM_PARAMETERS
-#define PA_DVLOG_IF(verboselevel, condition) PA_EAT_STREAM_PARAMETERS
-#define PA_DVPLOG_IF(verboselevel, condition) PA_EAT_STREAM_PARAMETERS
-
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-#define PA_DLOG(severity) \
-  PA_LAZY_STREAM(PA_LOG_STREAM(severity), PA_DLOG_IS_ON(severity))
-
-#define PA_DPLOG(severity) \
-  PA_LAZY_STREAM(PA_PLOG_STREAM(severity), PA_DLOG_IS_ON(severity))
-
-#define PA_DVLOG(verboselevel) PA_DVLOG_IF(verboselevel, true)
-
-#define PA_DVPLOG(verboselevel) PA_DVPLOG_IF(verboselevel, true)
-
-// Definitions for DCHECK et al.
-
-#if BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern LogSeverity LOGGING_DCHECK;
-#else
-constexpr LogSeverity LOGGING_DCHECK = LOGGING_FATAL;
-#endif  // BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
-
-// Redefine the standard assert to use our nice log files
-#undef assert
-#define assert(x) PA_DLOG_ASSERT(x)
-
-// This class more or less represents a particular log message.  You
-// create an instance of LogMessage and then stream stuff to it.
-// When you finish streaming to it, ~LogMessage is called and the
-// full message gets streamed to the appropriate destination.
-//
-// You shouldn't actually use LogMessage's constructor to log things,
-// though.  You should use the PA_LOG() macro (and variants thereof)
-// above.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LogMessage {
- public:
-  // Used for PA_LOG(severity).
-  LogMessage(const char* file, int line, LogSeverity severity);
-
-  // Used for CHECK().  Implied severity = LOGGING_FATAL.
-  LogMessage(const char* file, int line, const char* condition);
-  LogMessage(const LogMessage&) = delete;
-  LogMessage& operator=(const LogMessage&) = delete;
-  virtual ~LogMessage();
-
-  std::ostream& stream() { return stream_; }
-
-  LogSeverity severity() { return severity_; }
-  std::string str() { return stream_.str(); }
-
- private:
-  void Init(const char* file, int line);
-
-  const LogSeverity severity_;
-  std::ostringstream stream_;
-  size_t message_start_;  // Offset of the start of the message (past prefix
-                          // info).
-  // The file and line information passed in to the constructor.
-  const char* const file_;
-  const int line_;
-
-  // This is useful since the LogMessage class uses a lot of Win32 calls
-  // that will lose the value of GLE and the code that called the log function
-  // will have lost the thread error value when the log call returns.
-  base::ScopedClearLastError last_error_;
-};
-
-// This class is used to explicitly ignore values in the conditional
-// logging macros.  This avoids compiler warnings like "value computed
-// is not used" and "statement has no effect".
-class LogMessageVoidify {
- public:
-  LogMessageVoidify() = default;
-  // This has to be an operator with a precedence lower than << but
-  // higher than ?:
-  void operator&(std::ostream&) {}
-};
-
-#if BUILDFLAG(IS_WIN)
-typedef unsigned long SystemErrorCode;
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-typedef int SystemErrorCode;
-#endif
-
-// Alias for ::GetLastError() on Windows and errno on POSIX. Avoids having to
-// pull in windows.h just for GetLastError() and DWORD.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) SystemErrorCode GetLastSystemErrorCode();
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-std::string SystemErrorCodeToString(SystemErrorCode error_code);
-
-#if BUILDFLAG(IS_WIN)
-// Appends a formatted system message of the GetLastError() type.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) Win32ErrorLogMessage
-    : public LogMessage {
- public:
-  Win32ErrorLogMessage(const char* file,
-                       int line,
-                       LogSeverity severity,
-                       SystemErrorCode err);
-  Win32ErrorLogMessage(const Win32ErrorLogMessage&) = delete;
-  Win32ErrorLogMessage& operator=(const Win32ErrorLogMessage&) = delete;
-  // Appends the error message before destructing the encapsulated class.
-  ~Win32ErrorLogMessage() override;
-
- private:
-  SystemErrorCode err_;
-};
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-// Appends a formatted system message of the errno type
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ErrnoLogMessage : public LogMessage {
- public:
-  ErrnoLogMessage(const char* file,
-                  int line,
-                  LogSeverity severity,
-                  SystemErrorCode err);
-  ErrnoLogMessage(const ErrnoLogMessage&) = delete;
-  ErrnoLogMessage& operator=(const ErrnoLogMessage&) = delete;
-  // Appends the error message before destructing the encapsulated class.
-  ~ErrnoLogMessage() override;
-
- private:
-  SystemErrorCode err_;
-};
-#endif  // BUILDFLAG(IS_WIN)
-
-// Async signal safe logging mechanism.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void RawLog(int level, const char* message);
-
-#define PA_RAW_LOG(level, message)              \
-  ::partition_alloc::internal::logging::RawLog( \
-      ::partition_alloc::internal::logging::LOGGING_##level, message)
-
-}  // namespace partition_alloc::internal::logging
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_LOGGING_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/logging_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/logging_pa_unittest.cc
deleted file mode 100644
index 3cd2728..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/logging_pa_unittest.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <sstream>
-#include <string>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "build/build_config.h"
-
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal::logging {
-
-namespace {
-
-using ::testing::_;
-using ::testing::Return;
-
-class MockLogSource {
- public:
-  MOCK_METHOD0(Log, const char*());
-};
-
-TEST(PALoggingTest, BasicLogging) {
-  MockLogSource mock_log_source;
-  constexpr int kTimes =
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-      16;
-#else
-      8;
-#endif
-  EXPECT_CALL(mock_log_source, Log())
-      .Times(kTimes)
-      .WillRepeatedly(Return("log message"));
-
-  SetMinLogLevel(LOGGING_INFO);
-
-  EXPECT_TRUE(PA_LOG_IS_ON(INFO));
-  EXPECT_EQ(BUILDFLAG(PA_DCHECK_IS_ON), PA_DLOG_IS_ON(INFO));
-  EXPECT_TRUE(PA_VLOG_IS_ON(0));
-
-  PA_LOG(INFO) << mock_log_source.Log();
-  PA_LOG_IF(INFO, true) << mock_log_source.Log();
-  PA_PLOG(INFO) << mock_log_source.Log();
-  PA_PLOG_IF(INFO, true) << mock_log_source.Log();
-  PA_VLOG(0) << mock_log_source.Log();
-  PA_VLOG_IF(0, true) << mock_log_source.Log();
-  PA_VPLOG(0) << mock_log_source.Log();
-  PA_VPLOG_IF(0, true) << mock_log_source.Log();
-
-  PA_DLOG(INFO) << mock_log_source.Log();
-  PA_DLOG_IF(INFO, true) << mock_log_source.Log();
-  PA_DPLOG(INFO) << mock_log_source.Log();
-  PA_DPLOG_IF(INFO, true) << mock_log_source.Log();
-  PA_DVLOG(0) << mock_log_source.Log();
-  PA_DVLOG_IF(0, true) << mock_log_source.Log();
-  PA_DVPLOG(0) << mock_log_source.Log();
-  PA_DVPLOG_IF(0, true) << mock_log_source.Log();
-}
-
-TEST(PALoggingTest, LogIsOn) {
-  SetMinLogLevel(LOGGING_INFO);
-  EXPECT_TRUE(PA_LOG_IS_ON(INFO));
-  EXPECT_TRUE(PA_LOG_IS_ON(WARNING));
-  EXPECT_TRUE(PA_LOG_IS_ON(ERROR));
-  EXPECT_TRUE(PA_LOG_IS_ON(FATAL));
-  EXPECT_TRUE(PA_LOG_IS_ON(DFATAL));
-
-  SetMinLogLevel(LOGGING_WARNING);
-  EXPECT_FALSE(PA_LOG_IS_ON(INFO));
-  EXPECT_TRUE(PA_LOG_IS_ON(WARNING));
-  EXPECT_TRUE(PA_LOG_IS_ON(ERROR));
-  EXPECT_TRUE(PA_LOG_IS_ON(FATAL));
-  EXPECT_TRUE(PA_LOG_IS_ON(DFATAL));
-
-  SetMinLogLevel(LOGGING_ERROR);
-  EXPECT_FALSE(PA_LOG_IS_ON(INFO));
-  EXPECT_FALSE(PA_LOG_IS_ON(WARNING));
-  EXPECT_TRUE(PA_LOG_IS_ON(ERROR));
-  EXPECT_TRUE(PA_LOG_IS_ON(FATAL));
-  EXPECT_TRUE(PA_LOG_IS_ON(DFATAL));
-
-  SetMinLogLevel(LOGGING_FATAL + 1);
-  EXPECT_FALSE(PA_LOG_IS_ON(INFO));
-  EXPECT_FALSE(PA_LOG_IS_ON(WARNING));
-  EXPECT_FALSE(PA_LOG_IS_ON(ERROR));
-  // PA_LOG_IS_ON(FATAL) should always be true.
-  EXPECT_TRUE(PA_LOG_IS_ON(FATAL));
-  // If BUILDFLAG(PA_DCHECK_IS_ON) then DFATAL is FATAL.
-  EXPECT_EQ(BUILDFLAG(PA_DCHECK_IS_ON), PA_LOG_IS_ON(DFATAL));
-}
-
-TEST(PALoggingTest, LoggingIsLazyBySeverity) {
-  MockLogSource mock_log_source;
-  EXPECT_CALL(mock_log_source, Log()).Times(0);
-
-  SetMinLogLevel(LOGGING_WARNING);
-
-  EXPECT_FALSE(PA_LOG_IS_ON(INFO));
-  EXPECT_FALSE(PA_DLOG_IS_ON(INFO));
-  EXPECT_FALSE(PA_VLOG_IS_ON(1));
-
-  PA_LOG(INFO) << mock_log_source.Log();
-  PA_LOG_IF(INFO, false) << mock_log_source.Log();
-  PA_PLOG(INFO) << mock_log_source.Log();
-  PA_PLOG_IF(INFO, false) << mock_log_source.Log();
-  PA_VLOG(1) << mock_log_source.Log();
-  PA_VLOG_IF(1, true) << mock_log_source.Log();
-  PA_VPLOG(1) << mock_log_source.Log();
-  PA_VPLOG_IF(1, true) << mock_log_source.Log();
-
-  PA_DLOG(INFO) << mock_log_source.Log();
-  PA_DLOG_IF(INFO, true) << mock_log_source.Log();
-  PA_DPLOG(INFO) << mock_log_source.Log();
-  PA_DPLOG_IF(INFO, true) << mock_log_source.Log();
-  PA_DVLOG(1) << mock_log_source.Log();
-  PA_DVLOG_IF(1, true) << mock_log_source.Log();
-  PA_DVPLOG(1) << mock_log_source.Log();
-  PA_DVPLOG_IF(1, true) << mock_log_source.Log();
-}
-
-// Always log-to-stderr(RawLog) if message handler is not assigned.
-TEST(PALoggingTest, LogIsAlwaysToStdErr) {
-  MockLogSource mock_log_source_stderr;
-  SetMinLogLevel(LOGGING_INFO);
-  EXPECT_TRUE(PA_LOG_IS_ON(INFO));
-  EXPECT_CALL(mock_log_source_stderr, Log()).Times(1).WillOnce(Return("foo"));
-  PA_LOG(INFO) << mock_log_source_stderr.Log();
-}
-
-TEST(PALoggingTest, DebugLoggingReleaseBehavior) {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  int debug_only_variable = 1;
-#endif
-  // These should avoid emitting references to |debug_only_variable|
-  // in release mode.
-  PA_DLOG_IF(INFO, debug_only_variable) << "test";
-  PA_DLOG_ASSERT(debug_only_variable) << "test";
-  PA_DPLOG_IF(INFO, debug_only_variable) << "test";
-  PA_DVLOG_IF(1, debug_only_variable) << "test";
-}
-
-}  // namespace
-
-}  // namespace partition_alloc::internal::logging
diff --git a/base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.h b/base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.h
deleted file mode 100644
index 6960a3f..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_FOUNDATION_UTIL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_FOUNDATION_UTIL_H_
-
-#include <CoreFoundation/CoreFoundation.h>
-
-namespace partition_alloc::internal::base::mac {
-
-// CFCast<>() and CFCastStrict<>() cast a basic CFTypeRef to a more
-// specific CoreFoundation type. The compatibility of the passed
-// object is found by comparing its opaque type against the
-// requested type identifier. If the supplied object is not
-// compatible with the requested return type, CFCast<>() returns
-// NULL and CFCastStrict<>() will DCHECK. Providing a NULL pointer
-// to either variant results in NULL being returned without
-// triggering any DCHECK.
-//
-// Example usage:
-// CFNumberRef some_number = base::mac::CFCast<CFNumberRef>(
-//     CFArrayGetValueAtIndex(array, index));
-//
-// CFTypeRef hello = CFSTR("hello world");
-// CFStringRef some_string = base::mac::CFCastStrict<CFStringRef>(hello);
-
-template <typename T>
-T CFCast(const CFTypeRef& cf_val);
-
-template <typename T>
-T CFCastStrict(const CFTypeRef& cf_val);
-
-#define PA_CF_CAST_DECL(TypeCF)                             \
-  template <>                                               \
-  TypeCF##Ref CFCast<TypeCF##Ref>(const CFTypeRef& cf_val); \
-                                                            \
-  template <>                                               \
-  TypeCF##Ref CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val)
-
-PA_CF_CAST_DECL(CFArray);
-PA_CF_CAST_DECL(CFBag);
-PA_CF_CAST_DECL(CFBoolean);
-PA_CF_CAST_DECL(CFData);
-PA_CF_CAST_DECL(CFDate);
-PA_CF_CAST_DECL(CFDictionary);
-PA_CF_CAST_DECL(CFNull);
-PA_CF_CAST_DECL(CFNumber);
-PA_CF_CAST_DECL(CFSet);
-PA_CF_CAST_DECL(CFString);
-PA_CF_CAST_DECL(CFURL);
-PA_CF_CAST_DECL(CFUUID);
-
-#undef PA_CF_CAST_DECL
-
-}  // namespace partition_alloc::internal::base::mac
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_FOUNDATION_UTIL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.mm b/base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.mm
deleted file mode 100644
index 7326db8..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.mm
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-
-namespace partition_alloc::internal::base::mac {
-
-#define PA_CF_CAST_DEFN(TypeCF)                                    \
-  template <>                                                      \
-  TypeCF##Ref CFCast<TypeCF##Ref>(const CFTypeRef& cf_val) {       \
-    if (cf_val == NULL) {                                          \
-      return NULL;                                                 \
-    }                                                              \
-    if (CFGetTypeID(cf_val) == TypeCF##GetTypeID()) {              \
-      return (TypeCF##Ref)(cf_val);                                \
-    }                                                              \
-    return NULL;                                                   \
-  }                                                                \
-                                                                   \
-  template <>                                                      \
-  TypeCF##Ref CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val) { \
-    TypeCF##Ref rv = CFCast<TypeCF##Ref>(cf_val);                  \
-    PA_BASE_DCHECK(cf_val == NULL || rv);                          \
-    return rv;                                                     \
-  }
-
-PA_CF_CAST_DEFN(CFArray)
-PA_CF_CAST_DEFN(CFBag)
-PA_CF_CAST_DEFN(CFBoolean)
-PA_CF_CAST_DEFN(CFData)
-PA_CF_CAST_DEFN(CFDate)
-PA_CF_CAST_DEFN(CFDictionary)
-PA_CF_CAST_DEFN(CFNull)
-PA_CF_CAST_DEFN(CFNumber)
-PA_CF_CAST_DEFN(CFSet)
-PA_CF_CAST_DEFN(CFString)
-PA_CF_CAST_DEFN(CFURL)
-PA_CF_CAST_DEFN(CFUUID)
-
-#undef PA_CF_CAST_DEFN
-
-}  // namespace partition_alloc::internal::base::mac
diff --git a/base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h b/base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h
deleted file mode 100644
index f7e93f8..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_MAC_UTIL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_MAC_UTIL_H_
-
-#include <AvailabilityMacros.h>
-#import <CoreGraphics/CoreGraphics.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace partition_alloc::internal::base::mac {
-
-namespace internal {
-
-// Returns the system's macOS major and minor version numbers combined into an
-// integer value. For example, for macOS Sierra this returns 1012, and for macOS
-// Big Sur it returns 1100. Note that the accuracy returned by this function is
-// as granular as the major version number of Darwin.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) int MacOSVersion();
-
-}  // namespace internal
-
-// Run-time OS version checks. Prefer @available in Objective-C files. If that
-// is not possible, use these functions instead of
-// base::SysInfo::OperatingSystemVersionNumbers. Prefer the "AtLeast" and
-// "AtMost" variants to those that check for a specific version, unless you know
-// for sure that you need to check for a specific version.
-
-#define PA_DEFINE_OLD_IS_OS_FUNCS_CR_MIN_REQUIRED(V, DEPLOYMENT_TARGET_TEST) \
-  inline bool IsOS10_##V() {                                                 \
-    DEPLOYMENT_TARGET_TEST(>, V, false)                                      \
-    return internal::MacOSVersion() == 1000 + V;                             \
-  }
-
-#define PA_DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED(V, DEPLOYMENT_TARGET_TEST) \
-  inline bool IsOS##V() {                                                \
-    DEPLOYMENT_TARGET_TEST(>, V, false)                                  \
-    return internal::MacOSVersion() == V * 100;                          \
-  }
-
-#define PA_DEFINE_IS_OS_FUNCS(V, DEPLOYMENT_TARGET_TEST)           \
-  PA_DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED(V, DEPLOYMENT_TARGET_TEST) \
-  inline bool IsAtLeastOS##V() {                                   \
-    DEPLOYMENT_TARGET_TEST(>=, V, true)                            \
-    return internal::MacOSVersion() >= V * 100;                    \
-  }                                                                \
-  inline bool IsAtMostOS##V() {                                    \
-    DEPLOYMENT_TARGET_TEST(>, V, false)                            \
-    return internal::MacOSVersion() <= V * 100;                    \
-  }
-
-#define PA_OLD_TEST_DEPLOYMENT_TARGET(OP, V, RET)               \
-  if (MAC_OS_X_VERSION_MIN_REQUIRED OP MAC_OS_X_VERSION_10_##V) \
-    return RET;
-#define PA_TEST_DEPLOYMENT_TARGET(OP, V, RET)                  \
-  if (MAC_OS_X_VERSION_MIN_REQUIRED OP MAC_OS_VERSION_##V##_0) \
-    return RET;
-#define PA_IGNORE_DEPLOYMENT_TARGET(OP, V, RET)
-
-// Notes:
-// - When bumping the minimum version of the macOS required by Chromium, remove
-//   lines from below corresponding to versions of the macOS no longer
-//   supported. Ensure that the minimum supported version uses the
-//   PA_DEFINE_OLD_IS_OS_FUNCS_CR_MIN_REQUIRED macro. When macOS 11.0 is the
-//   minimum required version, remove all the OLD versions of the macros.
-// - When bumping the minimum version of the macOS SDK required to build
-//   Chromium, remove the #ifdef that switches between
-//   PA_TEST_DEPLOYMENT_TARGET and PA_IGNORE_DEPLOYMENT_TARGET.
-
-// Versions of macOS supported at runtime but whose SDK is not supported for
-// building.
-PA_DEFINE_OLD_IS_OS_FUNCS_CR_MIN_REQUIRED(15, PA_OLD_TEST_DEPLOYMENT_TARGET)
-PA_DEFINE_IS_OS_FUNCS(11, PA_TEST_DEPLOYMENT_TARGET)
-PA_DEFINE_IS_OS_FUNCS(12, PA_TEST_DEPLOYMENT_TARGET)
-
-// Versions of macOS supported at runtime and whose SDK is supported for
-// building.
-#ifdef MAC_OS_VERSION_13_0
-PA_DEFINE_IS_OS_FUNCS(13, PA_TEST_DEPLOYMENT_TARGET)
-#else
-PA_DEFINE_IS_OS_FUNCS(13, PA_IGNORE_DEPLOYMENT_TARGET)
-#endif
-
-#ifdef MAC_OS_VERSION_14_0
-PA_DEFINE_IS_OS_FUNCS(14, PA_TEST_DEPLOYMENT_TARGET)
-#else
-PA_DEFINE_IS_OS_FUNCS(14, PA_IGNORE_DEPLOYMENT_TARGET)
-#endif
-
-#undef PA_DEFINE_OLD_IS_OS_FUNCS_CR_MIN_REQUIRED
-#undef PA_DEFINE_OLD_IS_OS_FUNCS
-#undef PA_DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED
-#undef PA_DEFINE_IS_OS_FUNCS
-#undef PA_OLD_TEST_DEPLOYMENT_TARGET
-#undef PA_TEST_DEPLOYMENT_TARGET
-#undef PA_IGNORE_DEPLOYMENT_TARGET
-
-}  // namespace partition_alloc::internal::base::mac
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_MAC_UTIL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.mm b/base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.mm
deleted file mode 100644
index df15dd8..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.mm
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h"
-
-#include <stddef.h>
-#include <string.h>
-#include <sys/sysctl.h>
-#include <sys/types.h>
-#include <sys/utsname.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-
-namespace partition_alloc::internal::base::mac {
-
-namespace {
-
-// Returns the running system's Darwin major version. Don't call this, it's an
-// implementation detail and its result is meant to be cached by
-// MacOSVersionInternal().
-int DarwinMajorVersionInternal() {
-  // base::OperatingSystemVersionNumbers() at one time called Gestalt(), which
-  // was observed to be able to spawn threads (see https://crbug.com/53200).
-  // Nowadays that function calls -[NSProcessInfo operatingSystemVersion], whose
-  // current implementation does things like hit the file system, which is
-  // possibly a blocking operation. Either way, it's overkill for what needs to
-  // be done here.
-  //
-  // uname, on the other hand, is implemented as a simple series of sysctl
-  // system calls to obtain the relevant data from the kernel. The data is
-  // compiled right into the kernel, so no threads or blocking or other
-  // funny business is necessary.
-
-  struct utsname uname_info;
-  if (uname(&uname_info) != 0) {
-    PA_DPLOG(ERROR) << "uname";
-    return 0;
-  }
-
-  if (strcmp(uname_info.sysname, "Darwin") != 0) {
-    PA_DLOG(ERROR) << "unexpected uname sysname " << uname_info.sysname;
-    return 0;
-  }
-
-  const char* dot = strchr(uname_info.release, '.');
-  if (!dot || uname_info.release == dot ||
-      // Darwin version should be 1 or 2 digits, it's unlikely to be more than
-      // 4 digits.
-      dot - uname_info.release > 4) {
-    PA_DLOG(ERROR) << "could not parse uname release " << uname_info.release;
-    return 0;
-  }
-
-  int darwin_major_version = 0;
-  constexpr int base = 10;
-  for (const char* p = uname_info.release; p < dot; ++p) {
-    if (!('0' <= *p && *p < '0' + base)) {
-      PA_DLOG(ERROR) << "could not parse uname release " << uname_info.release;
-      return 0;
-    }
-
-    // Since we checked the number of digits is 4 at most (see above), there is
-    // no chance to overflow.
-    darwin_major_version *= base;
-    darwin_major_version += *p - '0';
-  }
-
-  return darwin_major_version;
-}
-
-// The implementation of MacOSVersion() as defined in the header. Don't call
-// this, it's an implementation detail and the result is meant to be cached by
-// MacOSVersion().
-int MacOSVersionInternal() {
-  int darwin_major_version = DarwinMajorVersionInternal();
-
-  // Darwin major versions 6 through 19 corresponded to macOS versions 10.2
-  // through 10.15.
-  PA_BASE_CHECK(darwin_major_version >= 6);
-  if (darwin_major_version <= 19)
-    return 1000 + darwin_major_version - 4;
-
-  // Darwin major version 20 corresponds to macOS version 11.0. Assume a
-  // correspondence between Darwin's major version numbers and macOS major
-  // version numbers.
-  int macos_major_version = darwin_major_version - 9;
-
-  return macos_major_version * 100;
-}
-
-}  // namespace
-
-namespace internal {
-
-int MacOSVersion() {
-  static int macos_version = MacOSVersionInternal();
-  return macos_version;
-}
-
-}  // namespace internal
-
-}  // namespace partition_alloc::internal::base::mac
diff --git a/base/allocator/partition_allocator/partition_alloc_base/mac/mach_logging.cc b/base/allocator/partition_allocator/partition_alloc_base/mac/mach_logging.cc
deleted file mode 100644
index 5ddca4f..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/mac/mach_logging.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/mach_logging.h"
-
-#include <iomanip>
-#include <string>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf.h"
-#include "build/build_config.h"
-
-namespace {
-
-std::string FormatMachErrorNumber(mach_error_t mach_err) {
-  // For the os/kern subsystem, give the error number in decimal as in
-  // <mach/kern_return.h>. Otherwise, give it in hexadecimal to make it easier
-  // to visualize the various bits. See <mach/error.h>.
-  if (mach_err >= 0 && mach_err < KERN_RETURN_MAX) {
-    return partition_alloc::internal::base::TruncatingStringPrintf(" (%d)",
-                                                                   mach_err);
-  }
-  return partition_alloc::internal::base::TruncatingStringPrintf(" (0x%08x)",
-                                                                 mach_err);
-}
-
-}  // namespace
-
-namespace partition_alloc::internal::logging {
-
-MachLogMessage::MachLogMessage(const char* file_path,
-                               int line,
-                               LogSeverity severity,
-                               mach_error_t mach_err)
-    : LogMessage(file_path, line, severity), mach_err_(mach_err) {}
-
-MachLogMessage::~MachLogMessage() {
-  stream() << ": " << mach_error_string(mach_err_)
-           << FormatMachErrorNumber(mach_err_);
-}
-
-}  // namespace partition_alloc::internal::logging
diff --git a/base/allocator/partition_allocator/partition_alloc_base/mac/mach_logging.h b/base/allocator/partition_allocator/partition_alloc_base/mac/mach_logging.h
deleted file mode 100644
index 7c62724..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/mac/mach_logging.h
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_MACH_LOGGING_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_MACH_LOGGING_H_
-
-#include <mach/mach.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "build/build_config.h"
-
-// Use the PA_MACH_LOG family of macros along with a mach_error_t
-// (kern_return_t) containing a Mach error. The error value will be decoded so
-// that logged messages explain the error.
-//
-// Examples:
-//
-//   kern_return_t kr = mach_timebase_info(&info);
-//   if (kr != KERN_SUCCESS) {
-//     PA_MACH_LOG(ERROR, kr) << "mach_timebase_info";
-//   }
-//
-//   kr = vm_deallocate(task, address, size);
-//   PA_MACH_DCHECK(kr == KERN_SUCCESS, kr) << "vm_deallocate";
-
-namespace partition_alloc::internal::logging {
-
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MachLogMessage
-    : public partition_alloc::internal::logging::LogMessage {
- public:
-  MachLogMessage(const char* file_path,
-                 int line,
-                 LogSeverity severity,
-                 mach_error_t mach_err);
-
-  MachLogMessage(const MachLogMessage&) = delete;
-  MachLogMessage& operator=(const MachLogMessage&) = delete;
-
-  ~MachLogMessage() override;
-
- private:
-  mach_error_t mach_err_;
-};
-
-}  // namespace partition_alloc::internal::logging
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-#define PA_MACH_DVLOG_IS_ON(verbose_level) PA_VLOG_IS_ON(verbose_level)
-#else
-#define PA_MACH_DVLOG_IS_ON(verbose_level) 0
-#endif
-
-#define PA_MACH_LOG_STREAM(severity, mach_err) \
-  PA_COMPACT_GOOGLE_LOG_EX_##severity(MachLogMessage, mach_err).stream()
-#define PA_MACH_VLOG_STREAM(verbose_level, mach_err)    \
-  ::partition_alloc::internal::logging::MachLogMessage( \
-      __FILE__, __LINE__, -verbose_level, mach_err)     \
-      .stream()
-
-#define PA_MACH_LOG(severity, mach_err) \
-  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(severity, mach_err), PA_LOG_IS_ON(severity))
-#define PA_MACH_LOG_IF(severity, condition, mach_err)    \
-  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(severity, mach_err), \
-                 PA_LOG_IS_ON(severity) && (condition))
-
-#define PA_MACH_VLOG(verbose_level, mach_err)                  \
-  PA_LAZY_STREAM(PA_MACH_VLOG_STREAM(verbose_level, mach_err), \
-                 PA_VLOG_IS_ON(verbose_level))
-#define PA_MACH_VLOG_IF(verbose_level, condition, mach_err)    \
-  PA_LAZY_STREAM(PA_MACH_VLOG_STREAM(verbose_level, mach_err), \
-                 PA_VLOG_IS_ON(verbose_level) && (condition))
-
-#define PA_MACH_CHECK(condition, mach_err)                          \
-  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(FATAL, mach_err), !(condition)) \
-      << "Check failed: " #condition << ". "
-
-#define PA_MACH_DLOG(severity, mach_err)                 \
-  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(severity, mach_err), \
-                 PA_DLOG_IS_ON(severity))
-#define PA_MACH_DLOG_IF(severity, condition, mach_err)   \
-  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(severity, mach_err), \
-                 PA_DLOG_IS_ON(severity) && (condition))
-
-#define PA_MACH_DVLOG(verbose_level, mach_err)                 \
-  PA_LAZY_STREAM(PA_MACH_VLOG_STREAM(verbose_level, mach_err), \
-                 PA_MACH_DVLOG_IS_ON(verbose_level))
-#define PA_MACH_DVLOG_IF(verbose_level, condition, mach_err)   \
-  PA_LAZY_STREAM(PA_MACH_VLOG_STREAM(verbose_level, mach_err), \
-                 PA_MACH_DVLOG_IS_ON(verbose_level) && (condition))
-
-#define PA_MACH_DCHECK(condition, mach_err)                  \
-  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(FATAL, mach_err),        \
-                 BUILDFLAG(PA_DCHECK_IS_ON) && !(condition)) \
-      << "Check failed: " #condition << ". "
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_MACH_LOGGING_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/mac/scoped_cftyperef.h b/base/allocator/partition_allocator/partition_alloc_base/mac/scoped_cftyperef.h
deleted file mode 100644
index 60919d0..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/mac/scoped_cftyperef.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_SCOPED_CFTYPEREF_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_SCOPED_CFTYPEREF_H_
-
-#include <CoreFoundation/CoreFoundation.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/scoped_typeref.h"
-
-namespace partition_alloc::internal::base {
-
-// ScopedCFTypeRef<> is patterned after std::unique_ptr<>, but maintains
-// ownership of a CoreFoundation object: any object that can be represented
-// as a CFTypeRef.  Style deviations here are solely for compatibility with
-// std::unique_ptr<>'s interface, with which everyone is already familiar.
-//
-// By default, ScopedCFTypeRef<> takes ownership of an object (in the
-// constructor or in reset()) by taking over the caller's existing ownership
-// claim.  The caller must own the object it gives to ScopedCFTypeRef<>, and
-// relinquishes an ownership claim to that object.  ScopedCFTypeRef<> does not
-// call CFRetain(). This behavior is parameterized by the |OwnershipPolicy|
-// enum. If the value |RETAIN| is passed (in the constructor or in reset()),
-// then ScopedCFTypeRef<> will call CFRetain() on the object, and the initial
-// ownership is not changed.
-
-namespace internal {
-
-template <typename CFT>
-struct ScopedCFTypeRefTraits {
-  static CFT InvalidValue() { return nullptr; }
-  static CFT Retain(CFT object) {
-    CFRetain(object);
-    return object;
-  }
-  static void Release(CFT object) { CFRelease(object); }
-};
-
-}  // namespace internal
-
-template <typename CFT>
-using ScopedCFTypeRef =
-    ScopedTypeRef<CFT, internal::ScopedCFTypeRefTraits<CFT>>;
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_SCOPED_CFTYPEREF_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/mac/scoped_typeref.h b/base/allocator/partition_allocator/partition_alloc_base/mac/scoped_typeref.h
deleted file mode 100644
index 6d7c26f..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/mac/scoped_typeref.h
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_SCOPED_TYPEREF_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_SCOPED_TYPEREF_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/memory/scoped_policy.h"
-
-namespace partition_alloc::internal::base {
-
-// ScopedTypeRef<> is patterned after std::unique_ptr<>, but maintains ownership
-// of a reference to any type that is maintained by Retain and Release methods.
-//
-// The Traits structure must provide the Retain and Release methods for type T.
-// A default ScopedTypeRefTraits is used but not defined, and should be defined
-// for each type to use this interface. For example, an appropriate definition
-// of ScopedTypeRefTraits for CGLContextObj would be:
-//
-//   template<>
-//   struct ScopedTypeRefTraits<CGLContextObj> {
-//     static CGLContextObj InvalidValue() { return nullptr; }
-//     static CGLContextObj Retain(CGLContextObj object) {
-//       CGLContextRetain(object);
-//       return object;
-//     }
-//     static void Release(CGLContextObj object) { CGLContextRelease(object); }
-//   };
-//
-// For the many types that have pass-by-pointer create functions, the function
-// InitializeInto() is provided to allow direct initialization and assumption
-// of ownership of the object. For example, continuing to use the above
-// CGLContextObj specialization:
-//
-//   base::ScopedTypeRef<CGLContextObj> context;
-//   CGLCreateContext(pixel_format, share_group, context.InitializeInto());
-//
-// For initialization with an existing object, the caller may specify whether
-// the ScopedTypeRef<> being initialized is assuming the caller's existing
-// ownership of the object (and should not call Retain in initialization) or if
-// it should not assume this ownership and must create its own (by calling
-// Retain in initialization). This behavior is based on the |policy| parameter,
-// with |ASSUME| for the former and |RETAIN| for the latter. The default policy
-// is to |ASSUME|.
-
-template <typename T>
-struct ScopedTypeRefTraits;
-
-template <typename T, typename Traits = ScopedTypeRefTraits<T>>
-class ScopedTypeRef {
- public:
-  using element_type = T;
-
-  explicit constexpr ScopedTypeRef(
-      element_type object = Traits::InvalidValue(),
-      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
-      : object_(object) {
-    if (object_ && policy == base::scoped_policy::RETAIN)
-      object_ = Traits::Retain(object_);
-  }
-
-  ScopedTypeRef(const ScopedTypeRef<T, Traits>& that) : object_(that.object_) {
-    if (object_)
-      object_ = Traits::Retain(object_);
-  }
-
-  // This allows passing an object to a function that takes its superclass.
-  template <typename R, typename RTraits>
-  explicit ScopedTypeRef(const ScopedTypeRef<R, RTraits>& that_as_subclass)
-      : object_(that_as_subclass.get()) {
-    if (object_)
-      object_ = Traits::Retain(object_);
-  }
-
-  ScopedTypeRef(ScopedTypeRef<T, Traits>&& that) : object_(that.object_) {
-    that.object_ = Traits::InvalidValue();
-  }
-
-  ~ScopedTypeRef() {
-    if (object_)
-      Traits::Release(object_);
-  }
-
-  ScopedTypeRef& operator=(const ScopedTypeRef<T, Traits>& that) {
-    reset(that.get(), base::scoped_policy::RETAIN);
-    return *this;
-  }
-
-  // This is to be used only to take ownership of objects that are created
-  // by pass-by-pointer create functions. To enforce this, require that the
-  // object be reset to NULL before this may be used.
-  [[nodiscard]] element_type* InitializeInto() {
-    PA_BASE_DCHECK(!object_);
-    return &object_;
-  }
-
-  void reset(const ScopedTypeRef<T, Traits>& that) {
-    reset(that.get(), base::scoped_policy::RETAIN);
-  }
-
-  void reset(element_type object = Traits::InvalidValue(),
-             base::scoped_policy::OwnershipPolicy policy =
-                 base::scoped_policy::ASSUME) {
-    if (object && policy == base::scoped_policy::RETAIN)
-      object = Traits::Retain(object);
-    if (object_)
-      Traits::Release(object_);
-    object_ = object;
-  }
-
-  bool operator==(const ScopedTypeRef& that) const {
-    return object_ == that.object_;
-  }
-
-  bool operator!=(const ScopedTypeRef& that) const {
-    return object_ != that.object_;
-  }
-
-  operator element_type() const { return object_; }
-
-  element_type get() const { return object_; }
-
-  void swap(ScopedTypeRef& that) {
-    element_type temp = that.object_;
-    that.object_ = object_;
-    object_ = temp;
-  }
-
-  // ScopedTypeRef<>::release() is like std::unique_ptr<>::release.  It is NOT
-  // a wrapper for Release().  To force a ScopedTypeRef<> object to call
-  // Release(), use ScopedTypeRef<>::reset().
-  [[nodiscard]] element_type release() {
-    element_type temp = object_;
-    object_ = Traits::InvalidValue();
-    return temp;
-  }
-
- private:
-  element_type object_;
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_SCOPED_TYPEREF_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/memory/page_size.h b/base/allocator/partition_allocator/partition_alloc_base/memory/page_size.h
deleted file mode 100644
index 19ce91a..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/memory/page_size.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_PAGE_SIZE_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_PAGE_SIZE_H_
-
-#include <stddef.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace partition_alloc::internal::base {
-
-// Returns the number of bytes in a memory page. Do not use this to compute
-// the number of pages in a block of memory for calling mincore(). On some
-// platforms, e.g. iOS, mincore() uses a different page size from what is
-// returned by GetPageSize().
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) size_t GetPageSize();
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_PAGE_SIZE_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/memory/page_size_posix.cc b/base/allocator/partition_allocator/partition_alloc_base/memory/page_size_posix.cc
deleted file mode 100644
index 68df92c..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/memory/page_size_posix.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/memory/page_size.h"
-
-#include <unistd.h>
-
-namespace partition_alloc::internal::base {
-
-size_t GetPageSize() {
-  static const size_t pagesize = []() -> size_t {
-  // For more information see getpagesize(2). Portable applications should use
-  // sysconf(_SC_PAGESIZE) rather than getpagesize() if it's available.
-#if defined(_SC_PAGESIZE)
-    return static_cast<size_t>(sysconf(_SC_PAGESIZE));
-#else
-    return getpagesize();
-#endif
-  }();
-  return pagesize;
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/memory/page_size_win.cc b/base/allocator/partition_allocator/partition_alloc_base/memory/page_size_win.cc
deleted file mode 100644
index e37372c..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/memory/page_size_win.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/memory/page_size.h"
-
-namespace partition_alloc::internal::base {
-
-size_t GetPageSize() {
-  // System pagesize. This value remains constant on x86/64 architectures.
-  constexpr int PAGESIZE_KB = 4;
-  return PAGESIZE_KB * 1024;
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/memory/ref_counted.cc b/base/allocator/partition_allocator/partition_alloc_base/memory/ref_counted.cc
deleted file mode 100644
index 515980f..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/memory/ref_counted.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/memory/ref_counted.h"
-
-#include <limits>
-#include <ostream>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-
-namespace partition_alloc::internal::base::subtle {
-
-bool RefCountedThreadSafeBase::HasOneRef() const {
-  return ref_count_.IsOne();
-}
-
-bool RefCountedThreadSafeBase::HasAtLeastOneRef() const {
-  return !ref_count_.IsZero();
-}
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
-  PA_BASE_DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
-                              "calling Release()";
-}
-#endif
-
-// For security and correctness, we check the arithmetic on ref counts.
-//
-// In an attempt to avoid binary bloat (from inlining the `CHECK`), we define
-// these functions out-of-line. However, compilers are wily. Further testing may
-// show that `PA_NOINLINE` helps or hurts.
-//
-#if !defined(ARCH_CPU_X86_FAMILY)
-bool RefCountedThreadSafeBase::Release() const {
-  return ReleaseImpl();
-}
-void RefCountedThreadSafeBase::AddRef() const {
-  AddRefImpl();
-}
-void RefCountedThreadSafeBase::AddRefWithCheck() const {
-  AddRefWithCheckImpl();
-}
-#endif
-
-}  // namespace partition_alloc::internal::base::subtle
diff --git a/base/allocator/partition_allocator/partition_alloc_base/memory/ref_counted.h b/base/allocator/partition_allocator/partition_alloc_base/memory/ref_counted.h
deleted file mode 100644
index 33a965b..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/memory/ref_counted.h
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_REF_COUNTED_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_REF_COUNTED_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/atomic_ref_count.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/memory/scoped_refptr.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base {
-namespace subtle {
-
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) RefCountedThreadSafeBase {
- public:
-  RefCountedThreadSafeBase(const RefCountedThreadSafeBase&) = delete;
-  RefCountedThreadSafeBase& operator=(const RefCountedThreadSafeBase&) = delete;
-
-  bool HasOneRef() const;
-  bool HasAtLeastOneRef() const;
-
- protected:
-  explicit constexpr RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
-  explicit constexpr RefCountedThreadSafeBase(StartRefCountFromOneTag)
-      : ref_count_(1) {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    needs_adopt_ref_ = true;
-#endif
-  }
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  ~RefCountedThreadSafeBase();
-#else
-  ~RefCountedThreadSafeBase() = default;
-#endif
-
-// Release and AddRef are suitable for inlining on X86 because they generate
-// very small code sequences. On other platforms (ARM), it causes a size
-// regression and is probably not worth it.
-#if defined(ARCH_CPU_X86_FAMILY)
-  // Returns true if the object should self-delete.
-  bool Release() const { return ReleaseImpl(); }
-  void AddRef() const { AddRefImpl(); }
-  void AddRefWithCheck() const { AddRefWithCheckImpl(); }
-#else
-  // Returns true if the object should self-delete.
-  bool Release() const;
-  void AddRef() const;
-  void AddRefWithCheck() const;
-#endif
-
- private:
-  template <typename U>
-  friend scoped_refptr<U> AdoptRef(U*);
-
-  void Adopted() const {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    PA_BASE_DCHECK(needs_adopt_ref_);
-    needs_adopt_ref_ = false;
-#endif
-  }
-
-  PA_ALWAYS_INLINE void AddRefImpl() const {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    PA_BASE_DCHECK(!in_dtor_);
-    // This RefCounted object is created with non-zero reference count.
-    // The first reference to such a object has to be made by AdoptRef or
-    // MakeRefCounted.
-    PA_BASE_DCHECK(!needs_adopt_ref_);
-#endif
-    ref_count_.Increment();
-  }
-
-  PA_ALWAYS_INLINE void AddRefWithCheckImpl() const {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    PA_BASE_DCHECK(!in_dtor_);
-    // This RefCounted object is created with non-zero reference count.
-    // The first reference to such a object has to be made by AdoptRef or
-    // MakeRefCounted.
-    PA_BASE_DCHECK(!needs_adopt_ref_);
-#endif
-    PA_BASE_CHECK(ref_count_.Increment() > 0);
-  }
-
-  PA_ALWAYS_INLINE bool ReleaseImpl() const {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    PA_BASE_DCHECK(!in_dtor_);
-    PA_BASE_DCHECK(!ref_count_.IsZero());
-#endif
-    if (!ref_count_.Decrement()) {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-      in_dtor_ = true;
-#endif
-      return true;
-    }
-    return false;
-  }
-
-  mutable AtomicRefCount ref_count_{0};
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  mutable bool needs_adopt_ref_ = false;
-  mutable bool in_dtor_ = false;
-#endif
-};
-
-}  // namespace subtle
-
-// Forward declaration.
-template <class T, typename Traits>
-class RefCountedThreadSafe;
-
-// Default traits for RefCountedThreadSafe<T>.  Deletes the object when its ref
-// count reaches 0.  Overload to delete it on a different thread etc.
-template <typename T>
-struct DefaultRefCountedThreadSafeTraits {
-  static void Destruct(const T* x) {
-    // Delete through RefCountedThreadSafe to make child classes only need to be
-    // friend with RefCountedThreadSafe instead of this struct, which is an
-    // implementation detail.
-    RefCountedThreadSafe<T, DefaultRefCountedThreadSafeTraits>::DeleteInternal(
-        x);
-  }
-};
-
-//
-// A thread-safe variant of RefCounted<T>
-//
-//   class MyFoo : public base::RefCountedThreadSafe<MyFoo> {
-//    ...
-//   };
-//
-// If you're using the default trait, then you should add compile time
-// asserts that no one else is deleting your object.  i.e.
-//    private:
-//     friend class base::RefCountedThreadSafe<MyFoo>;
-//     ~MyFoo();
-//
-// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
-// too. See the comment above the RefCounted definition for details.
-template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T>>
-class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
- public:
-  static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
-      subtle::kStartRefCountFromZeroTag;
-
-  explicit RefCountedThreadSafe()
-      : subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
-
-  RefCountedThreadSafe(const RefCountedThreadSafe&) = delete;
-  RefCountedThreadSafe& operator=(const RefCountedThreadSafe&) = delete;
-
-  void AddRef() const { AddRefImpl(T::kRefCountPreference); }
-
-  void Release() const {
-    if (subtle::RefCountedThreadSafeBase::Release()) {
-      PA_ANALYZER_SKIP_THIS_PATH();
-      Traits::Destruct(static_cast<const T*>(this));
-    }
-  }
-
- protected:
-  ~RefCountedThreadSafe() = default;
-
- private:
-  friend struct DefaultRefCountedThreadSafeTraits<T>;
-  template <typename U>
-  static void DeleteInternal(const U* x) {
-    delete x;
-  }
-
-  void AddRefImpl(subtle::StartRefCountFromZeroTag) const {
-    subtle::RefCountedThreadSafeBase::AddRef();
-  }
-
-  void AddRefImpl(subtle::StartRefCountFromOneTag) const {
-    subtle::RefCountedThreadSafeBase::AddRefWithCheck();
-  }
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_REF_COUNTED_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/memory/scoped_policy.h b/base/allocator/partition_allocator/partition_alloc_base/memory/scoped_policy.h
deleted file mode 100644
index dbbe675..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/memory/scoped_policy.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_SCOPED_POLICY_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_SCOPED_POLICY_H_
-
-namespace partition_alloc::internal::base::scoped_policy {
-
-// Defines the ownership policy for a scoped object.
-enum OwnershipPolicy {
-  // The scoped object takes ownership of an object by taking over an existing
-  // ownership claim.
-  ASSUME,
-
-  // The scoped object will retain the object and any initial ownership is
-  // not changed.
-  RETAIN
-};
-
-}  // namespace partition_alloc::internal::base::scoped_policy
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_SCOPED_POLICY_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/memory/scoped_refptr.h b/base/allocator/partition_allocator/partition_alloc_base/memory/scoped_refptr.h
deleted file mode 100644
index 9382f1d..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/memory/scoped_refptr.h
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_SCOPED_REFPTR_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_SCOPED_REFPTR_H_
-
-#include <stddef.h>
-
-#include <iosfwd>
-#include <type_traits>
-#include <utility>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-
-namespace partition_alloc::internal {
-
-template <class T>
-class scoped_refptr;
-
-namespace base {
-
-template <class, typename>
-class RefCountedThreadSafe;
-
-template <typename T>
-scoped_refptr<T> AdoptRef(T* t);
-
-namespace subtle {
-
-enum AdoptRefTag { kAdoptRefTag };
-enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
-enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
-
-// scoped_refptr<T> is typically used with one of several RefCounted<T> base
-// classes or with custom AddRef and Release methods. These overloads dispatch
-// on which was used.
-
-template <typename T, typename U, typename V>
-constexpr bool IsRefCountPreferenceOverridden(
-    const T*,
-    const RefCountedThreadSafe<U, V>*) {
-  return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
-                       std::decay_t<decltype(U::kRefCountPreference)>>::value;
-}
-
-constexpr bool IsRefCountPreferenceOverridden(...) {
-  return false;
-}
-
-template <typename T, typename U, typename V>
-constexpr void AssertRefCountBaseMatches(const T*,
-                                         const RefCountedThreadSafe<U, V>*) {
-  static_assert(
-      std::is_base_of_v<U, T>,
-      "T implements RefCountedThreadSafe<U>, but U is not a base of T.");
-}
-
-constexpr void AssertRefCountBaseMatches(...) {}
-
-}  // namespace subtle
-
-// Creates a scoped_refptr from a raw pointer without incrementing the reference
-// count. Use this only for a newly created object whose reference count starts
-// from 1 instead of 0.
-template <typename T>
-scoped_refptr<T> AdoptRef(T* obj) {
-  using Tag = std::decay_t<decltype(T::kRefCountPreference)>;
-  static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
-                "Use AdoptRef only if the reference count starts from one.");
-
-  PA_BASE_DCHECK(obj);
-  PA_BASE_DCHECK(obj->HasOneRef());
-  obj->Adopted();
-  return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
-}
-
-namespace subtle {
-
-template <typename T>
-scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
-  return scoped_refptr<T>(obj);
-}
-
-template <typename T>
-scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
-  return AdoptRef(obj);
-}
-
-}  // namespace subtle
-
-// Constructs an instance of T, which is a ref counted type, and wraps the
-// object into a scoped_refptr<T>.
-template <typename T, typename... Args>
-scoped_refptr<T> MakeRefCounted(Args&&... args) {
-  T* obj = new T(std::forward<Args>(args)...);
-  return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
-}
-
-// Takes an instance of T, which is a ref counted type, and wraps the object
-// into a scoped_refptr<T>.
-template <typename T>
-scoped_refptr<T> WrapRefCounted(T* t) {
-  return scoped_refptr<T>(t);
-}
-
-}  // namespace base
-
-//
-// A smart pointer class for reference counted objects.  Use this class instead
-// of calling AddRef and Release manually on a reference counted object to
-// avoid common memory leaks caused by forgetting to Release an object
-// reference.  Sample usage:
-//
-//   class MyFoo : public RefCounted<MyFoo> {
-//    ...
-//    private:
-//     friend class RefCounted<MyFoo>;  // Allow destruction by RefCounted<>.
-//     ~MyFoo();                        // Destructor must be private/protected.
-//   };
-//
-//   void some_function() {
-//     scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
-//     foo->Method(param);
-//     // |foo| is released when this function returns
-//   }
-//
-//   void some_other_function() {
-//     scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
-//     ...
-//     foo.reset();  // explicitly releases |foo|
-//     ...
-//     if (foo)
-//       foo->Method(param);
-//   }
-//
-// The above examples show how scoped_refptr<T> acts like a pointer to T.
-// Given two scoped_refptr<T> classes, it is also possible to exchange
-// references between the two objects, like so:
-//
-//   {
-//     scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
-//     scoped_refptr<MyFoo> b;
-//
-//     b.swap(a);
-//     // now, |b| references the MyFoo object, and |a| references nullptr.
-//   }
-//
-// To make both |a| and |b| in the above example reference the same MyFoo
-// object, simply use the assignment operator:
-//
-//   {
-//     scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
-//     scoped_refptr<MyFoo> b;
-//
-//     b = a;
-//     // now, |a| and |b| each own a reference to the same MyFoo object.
-//   }
-//
-// Also see Chromium's ownership and calling conventions:
-// https://chromium.googlesource.com/chromium/src/+/lkgr/styleguide/c++/c++.md#object-ownership-and-calling-conventions
-// Specifically:
-//   If the function (at least sometimes) takes a ref on a refcounted object,
-//   declare the param as scoped_refptr<T>. The caller can decide whether it
-//   wishes to transfer ownership (by calling std::move(t) when passing t) or
-//   retain its ref (by simply passing t directly).
-//   In other words, use scoped_refptr like you would a std::unique_ptr except
-//   in the odd case where it's required to hold on to a ref while handing one
-//   to another component (if a component merely needs to use t on the stack
-//   without keeping a ref: pass t as a raw T*).
-template <class T>
-class PA_TRIVIAL_ABI scoped_refptr {
- public:
-  typedef T element_type;
-
-  constexpr scoped_refptr() = default;
-
-  // Allow implicit construction from nullptr.
-  constexpr scoped_refptr(std::nullptr_t) {}
-
-  // Constructs from a raw pointer. Note that this constructor allows implicit
-  // conversion from T* to scoped_refptr<T> which is strongly discouraged. If
-  // you are creating a new ref-counted object please use
-  // base::MakeRefCounted<T>() or base::WrapRefCounted<T>(). Otherwise you
-  // should move or copy construct from an existing scoped_refptr<T> to the
-  // ref-counted object.
-  scoped_refptr(T* p) : ptr_(p) {
-    if (ptr_)
-      AddRef(ptr_);
-  }
-
-  // Copy constructor. This is required in addition to the copy conversion
-  // constructor below.
-  scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {}
-
-  // Copy conversion constructor.
-  template <typename U,
-            typename = typename std::enable_if<
-                std::is_convertible<U*, T*>::value>::type>
-  scoped_refptr(const scoped_refptr<U>& r) : scoped_refptr(r.ptr_) {}
-
-  // Move constructor. This is required in addition to the move conversion
-  // constructor below.
-  scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { r.ptr_ = nullptr; }
-
-  // Move conversion constructor.
-  template <typename U,
-            typename = typename std::enable_if<
-                std::is_convertible<U*, T*>::value>::type>
-  scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.ptr_) {
-    r.ptr_ = nullptr;
-  }
-
-  ~scoped_refptr() {
-    static_assert(!base::subtle::IsRefCountPreferenceOverridden(
-                      static_cast<T*>(nullptr), static_cast<T*>(nullptr)),
-                  "It's unsafe to override the ref count preference."
-                  " Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE"
-                  " from subclasses.");
-    if (ptr_)
-      Release(ptr_);
-  }
-
-  T* get() const { return ptr_; }
-
-  T& operator*() const {
-    PA_BASE_DCHECK(ptr_);
-    return *ptr_;
-  }
-
-  T* operator->() const {
-    PA_BASE_DCHECK(ptr_);
-    return ptr_;
-  }
-
-  scoped_refptr& operator=(std::nullptr_t) {
-    reset();
-    return *this;
-  }
-
-  scoped_refptr& operator=(T* p) { return *this = scoped_refptr(p); }
-
-  // Unified assignment operator.
-  scoped_refptr& operator=(scoped_refptr r) noexcept {
-    swap(r);
-    return *this;
-  }
-
-  // Sets managed object to null and releases reference to the previous managed
-  // object, if it existed.
-  void reset() { scoped_refptr().swap(*this); }
-
-  // Returns the owned pointer (if any), releasing ownership to the caller. The
-  // caller is responsible for managing the lifetime of the reference.
-  [[nodiscard]] T* release();
-
-  void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); }
-
-  explicit operator bool() const { return ptr_ != nullptr; }
-
-  template <typename U>
-  bool operator==(const scoped_refptr<U>& rhs) const {
-    return ptr_ == rhs.get();
-  }
-
-  template <typename U>
-  bool operator!=(const scoped_refptr<U>& rhs) const {
-    return !operator==(rhs);
-  }
-
-  template <typename U>
-  bool operator<(const scoped_refptr<U>& rhs) const {
-    return ptr_ < rhs.get();
-  }
-
- protected:
-  T* ptr_ = nullptr;
-
- private:
-  template <typename U>
-  friend scoped_refptr<U> base::AdoptRef(U*);
-
-  scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
-
-  // Friend required for move constructors that set r.ptr_ to null.
-  template <typename U>
-  friend class scoped_refptr;
-
-  // Non-inline helpers to allow:
-  //     class Opaque;
-  //     extern template class scoped_refptr<Opaque>;
-  // Otherwise the compiler will complain that Opaque is an incomplete type.
-  static void AddRef(T* ptr);
-  static void Release(T* ptr);
-};
-
-template <typename T>
-T* scoped_refptr<T>::release() {
-  T* ptr = ptr_;
-  ptr_ = nullptr;
-  return ptr;
-}
-
-// static
-template <typename T>
-void scoped_refptr<T>::AddRef(T* ptr) {
-  base::subtle::AssertRefCountBaseMatches(ptr, ptr);
-  ptr->AddRef();
-}
-
-// static
-template <typename T>
-void scoped_refptr<T>::Release(T* ptr) {
-  base::subtle::AssertRefCountBaseMatches(ptr, ptr);
-  ptr->Release();
-}
-
-template <typename T, typename U>
-bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
-  return lhs.get() == rhs;
-}
-
-template <typename T, typename U>
-bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
-  return lhs == rhs.get();
-}
-
-template <typename T>
-bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t null) {
-  return !static_cast<bool>(lhs);
-}
-
-template <typename T>
-bool operator==(std::nullptr_t null, const scoped_refptr<T>& rhs) {
-  return !static_cast<bool>(rhs);
-}
-
-template <typename T, typename U>
-bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
-  return !operator==(lhs, rhs);
-}
-
-template <typename T, typename U>
-bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
-  return !operator==(lhs, rhs);
-}
-
-template <typename T>
-bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
-  return !operator==(lhs, null);
-}
-
-template <typename T>
-bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
-  return !operator==(null, rhs);
-}
-
-template <typename T>
-std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
-  return out << p.get();
-}
-
-template <typename T>
-void swap(scoped_refptr<T>& lhs, scoped_refptr<T>& rhs) noexcept {
-  lhs.swap(rhs);
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_SCOPED_REFPTR_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/native_library.cc b/base/allocator/partition_allocator/partition_alloc_base/native_library.cc
deleted file mode 100644
index 6d3897f..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/native_library.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/native_library.h"
-
-namespace partition_alloc::internal::base {
-
-NativeLibrary LoadNativeLibrary(const FilePath& library_path,
-                                NativeLibraryLoadError* error) {
-  return LoadNativeLibraryWithOptions(library_path, NativeLibraryOptions(),
-                                      error);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/native_library.h b/base/allocator/partition_allocator/partition_alloc_base/native_library.h
deleted file mode 100644
index 219d1c0..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/native_library.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NATIVE_LIBRARY_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NATIVE_LIBRARY_H_
-
-// This file defines a cross-platform "NativeLibrary" type which represents
-// a loadable module.
-
-#include <string>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_WIN)
-#include <windows.h>
-#elif BUILDFLAG(IS_APPLE)
-#import <CoreFoundation/CoreFoundation.h>
-#endif  // OS_*
-
-namespace partition_alloc::internal::base {
-
-#if BUILDFLAG(IS_WIN)
-using NativeLibrary = HMODULE;
-#elif BUILDFLAG(IS_APPLE)
-enum NativeLibraryType { BUNDLE, DYNAMIC_LIB };
-enum NativeLibraryObjCStatus {
-  OBJC_UNKNOWN,
-  OBJC_PRESENT,
-  OBJC_NOT_PRESENT,
-};
-struct NativeLibraryStruct {
-  NativeLibraryType type;
-  CFBundleRefNum bundle_resource_ref;
-  NativeLibraryObjCStatus objc_status;
-  union {
-    CFBundleRef bundle;
-    void* dylib;
-  };
-};
-using NativeLibrary = NativeLibraryStruct*;
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-using NativeLibrary = void*;
-#endif  // OS_*
-
-struct PA_COMPONENT_EXPORT(PARTITION_ALLOC) NativeLibraryLoadError {
-#if BUILDFLAG(IS_WIN)
-  NativeLibraryLoadError() : code(0) {}
-#endif  // BUILDFLAG(IS_WIN)
-
-  // Returns a string representation of the load error.
-  std::string ToString() const;
-
-#if BUILDFLAG(IS_WIN)
-  DWORD code;
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  std::string message;
-#endif  // BUILDFLAG(IS_WIN)
-};
-
-struct PA_COMPONENT_EXPORT(PARTITION_ALLOC) NativeLibraryOptions {
-  NativeLibraryOptions() = default;
-  NativeLibraryOptions(const NativeLibraryOptions& options) = default;
-
-  // If |true|, a loaded library is required to prefer local symbol resolution
-  // before considering global symbols. Note that this is already the default
-  // behavior on most systems. Setting this to |false| does not guarantee the
-  // inverse, i.e., it does not force a preference for global symbols over local
-  // ones.
-  bool prefer_own_symbols = false;
-};
-
-// Loads a native library from disk.  Release it with UnloadNativeLibrary when
-// you're done.  Returns NULL on failure.
-// If |error| is not NULL, it may be filled in on load error.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-NativeLibrary LoadNativeLibrary(const FilePath& library_path,
-                                NativeLibraryLoadError* error);
-
-// Loads a native library from disk.  Release it with UnloadNativeLibrary when
-// you're done.  Returns NULL on failure.
-// If |error| is not NULL, it may be filled in on load error.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
-                                           const NativeLibraryOptions& options,
-                                           NativeLibraryLoadError* error);
-
-// Gets a function pointer from a native library.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
-                                          const std::string& name);
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NATIVE_LIBRARY_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/native_library_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/native_library_pa_unittest.cc
deleted file mode 100644
index a2ef975..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/native_library_pa_unittest.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/native_library.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal::base {
-
-const FilePath::CharType kDummyLibraryPath[] =
-    PA_FILE_PATH_LITERAL("dummy_library");
-
-TEST(PartitionAllocBaseNativeLibraryTest, LoadFailure) {
-  NativeLibraryLoadError error;
-  EXPECT_FALSE(LoadNativeLibrary(FilePath(kDummyLibraryPath), &error));
-  EXPECT_FALSE(error.ToString().empty());
-}
-
-// |error| is optional and can be null.
-TEST(PartitionAllocBaseNativeLibraryTest, LoadFailureWithNullError) {
-  EXPECT_FALSE(LoadNativeLibrary(FilePath(kDummyLibraryPath), nullptr));
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/native_library_posix.cc b/base/allocator/partition_allocator/partition_alloc_base/native_library_posix.cc
deleted file mode 100644
index 67fd20a..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/native_library_posix.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/native_library.h"
-
-#include <dlfcn.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base {
-
-std::string NativeLibraryLoadError::ToString() const {
-  return message;
-}
-
-NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
-                                           const NativeLibraryOptions& options,
-                                           NativeLibraryLoadError* error) {
-  // TODO(1151236): Temporarily disable this ScopedBlockingCall. After making
-  // partition_alloc ScopedBlockingCall() to see the same blocking_observer_
-  // in base's ScopedBlockingCall(), we will copy ScopedBlockingCall code and
-  // will enable this.
-
-  // dlopen() opens the file off disk.
-  // ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
-
-  // We deliberately do not use RTLD_DEEPBIND by default.  For the history why,
-  // please refer to the bug tracker.  Some useful bug reports to read include:
-  // http://crbug.com/17943, http://crbug.com/17557, http://crbug.com/36892,
-  // and http://crbug.com/40794.
-  int flags = RTLD_LAZY;
-#if BUILDFLAG(IS_ANDROID) || !defined(RTLD_DEEPBIND)
-  // Certain platforms don't define RTLD_DEEPBIND. Android dlopen() requires
-  // further investigation, as it might vary across versions. Crash here to
-  // warn developers that they're trying to rely on uncertain behavior.
-  PA_BASE_CHECK(!options.prefer_own_symbols);
-#else
-  if (options.prefer_own_symbols)
-    flags |= RTLD_DEEPBIND;
-#endif
-  void* dl = dlopen(library_path.value().c_str(), flags);
-  if (!dl && error)
-    error->message = dlerror();
-
-  return dl;
-}
-
-void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
-                                          const std::string& name) {
-  return dlsym(library, name.c_str());
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/no_destructor.h b/base/allocator/partition_allocator/partition_alloc_base/no_destructor.h
deleted file mode 100644
index 98c2867..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/no_destructor.h
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NO_DESTRUCTOR_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NO_DESTRUCTOR_H_
-
-#include <new>
-#include <type_traits>
-#include <utility>
-
-namespace partition_alloc::internal::base {
-
-// Helper type to create a function-local static variable of type `T` when `T`
-// has a non-trivial destructor. Storing a `T` in a `base::NoDestructor<T>` will
-// prevent `~T()` from running, even when the variable goes out of scope.
-//
-// Useful when a variable has static storage duration but its type has a
-// non-trivial destructor. Chromium bans global constructors and destructors:
-// using a function-local static variable prevents the former, while using
-// `base::NoDestructor<T>` prevents the latter.
-//
-// ## Caveats
-//
-// - Must only be used as a function-local static variable. Declaring a global
-//   variable of type `base::NoDestructor<T>` will still generate a global
-//   constructor; declaring a local or member variable will lead to memory leaks
-//   or other surprising and undesirable behaviour.
-//
-// - If the data is rarely used, consider creating it on demand rather than
-//   caching it for the lifetime of the program. Though `base::NoDestructor<T>`
-//   does not heap allocate, the compiler still reserves space in bss for
-//   storing `T`, which costs memory at runtime.
-//
-// - If `T` is trivially destructible, do not use `base::NoDestructor<T>`:
-//
-//     const uint64_t GetUnstableSessionSeed() {
-//       // No need to use `base::NoDestructor<T>` as `uint64_t` is trivially
-//       // destructible and does not require a global destructor.
-//       static const uint64_t kSessionSeed = base::RandUint64();
-//       return kSessionSeed;
-//     }
-//
-// ## Example Usage
-//
-// const std::string& GetDefaultText() {
-//   // Required since `static const std::string` requires a global destructor.
-//   static const base::NoDestructor<std::string> s("Hello world!");
-//   return *s;
-// }
-//
-// More complex initialization using a lambda:
-//
-// const std::string& GetRandomNonce() {
-//   // `nonce` is initialized with random data the first time this function is
-//   // called, but its value is fixed thereafter.
-//   static const base::NoDestructor<std::string> nonce([] {
-//     std::string s(16);
-//     crypto::RandString(s.data(), s.size());
-//     return s;
-//   }());
-//   return *nonce;
-// }
-//
-// ## Thread safety
-//
-// Initialisation of function-local static variables is thread-safe since C++11.
-// The standard guarantees that:
-//
-// - function-local static variables will be initialised the first time
-//   execution passes through the declaration.
-//
-// - if another thread's execution concurrently passes through the declaration
-//   in the middle of initialisation, that thread will wait for the in-progress
-//   initialisation to complete.
-template <typename T>
-class NoDestructor {
- public:
-  static_assert(
-      !std::is_trivially_destructible_v<T>,
-      "T is trivially destructible; please use a function-local static "
-      "of type T directly instead");
-
-  // Not constexpr; just write static constexpr T x = ...; if the value should
-  // be a constexpr.
-  template <typename... Args>
-  explicit NoDestructor(Args&&... args) {
-    new (storage_) T(std::forward<Args>(args)...);
-  }
-
-  // Allows copy and move construction of the contained type, to allow
-  // construction from an initializer list, e.g. for std::vector.
-  explicit NoDestructor(const T& x) { new (storage_) T(x); }
-  explicit NoDestructor(T&& x) { new (storage_) T(std::move(x)); }
-
-  NoDestructor(const NoDestructor&) = delete;
-  NoDestructor& operator=(const NoDestructor&) = delete;
-
-  ~NoDestructor() = default;
-
-  const T& operator*() const { return *get(); }
-  T& operator*() { return *get(); }
-
-  const T* operator->() const { return get(); }
-  T* operator->() { return get(); }
-
-  const T* get() const { return reinterpret_cast<const T*>(storage_); }
-  T* get() { return reinterpret_cast<T*>(storage_); }
-
- private:
-  alignas(T) char storage_[sizeof(T)];
-
-#if defined(LEAK_SANITIZER)
-  // TODO(https://crbug.com/812277): This is a hack to work around the fact
-  // that LSan doesn't seem to treat NoDestructor as a root for reachability
-  // analysis. This means that code like this:
-  //   static base::NoDestructor<std::vector<int>> v({1, 2, 3});
-  // is considered a leak. Using the standard leak sanitizer annotations to
-  // suppress leaks doesn't work: std::vector is implicitly constructed before
-  // calling the base::NoDestructor constructor.
-  //
-  // Unfortunately, I haven't been able to demonstrate this issue in simpler
-  // reproductions: until that's resolved, hold an explicit pointer to the
-  // placement-new'd object in leak sanitizer mode to help LSan realize that
-  // objects allocated by the contained type are still reachable.
-  T* storage_ptr_ = reinterpret_cast<T*>(storage_);
-#endif  // defined(LEAK_SANITIZER)
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NO_DESTRUCTOR_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/notreached.h b/base/allocator/partition_allocator/partition_alloc_base/notreached.h
deleted file mode 100644
index 5127222..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/notreached.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NOTREACHED_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NOTREACHED_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-
-// PA_NOTREACHED() annotates paths that are supposed to be unreachable. They
-// crash if they are ever hit.
-#if PA_BASE_CHECK_WILL_STREAM()
-// PartitionAlloc uses async-signal-safe RawCheckFailure() for error reporting.
-// Async-signal-safe functions are guaranteed to not allocate as otherwise they
-// could operate with inconsistent allocator state.
-#define PA_NOTREACHED()                                  \
-  ::partition_alloc::internal::logging::RawCheckFailure( \
-      __FILE__ "(" PA_STRINGIFY(__LINE__) ") PA_NOTREACHED() hit.")
-#else
-#define PA_NOTREACHED() PA_IMMEDIATE_CRASH()
-#endif  // CHECK_WILL_STREAM()
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NOTREACHED_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h b/base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h
deleted file mode 100644
index 9f32938..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h
+++ /dev/null
@@ -1,375 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_H_
-
-#include <stddef.h>
-
-#include <limits>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math_impl.h"
-
-namespace partition_alloc::internal::base {
-namespace internal {
-
-template <typename T>
-class CheckedNumeric {
-  static_assert(std::is_arithmetic<T>::value,
-                "CheckedNumeric<T>: T must be a numeric type.");
-
- public:
-  template <typename Src>
-  friend class CheckedNumeric;
-
-  using type = T;
-
-  constexpr CheckedNumeric() = default;
-
-  // Copy constructor.
-  template <typename Src>
-  constexpr CheckedNumeric(const CheckedNumeric<Src>& rhs)
-      : state_(rhs.state_.value(), rhs.IsValid()) {}
-
-  // This is not an explicit constructor because we implicitly upgrade regular
-  // numerics to CheckedNumerics to make them easier to use.
-  template <typename Src>
-  constexpr CheckedNumeric(Src value)  // NOLINT(runtime/explicit)
-      : state_(value) {
-    static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
-  }
-
-  // This is not an explicit constructor because we want a seamless conversion
-  // from StrictNumeric types.
-  template <typename Src>
-  constexpr CheckedNumeric(
-      StrictNumeric<Src> value)  // NOLINT(runtime/explicit)
-      : state_(static_cast<Src>(value)) {}
-
-  // IsValid() - The public API to test if a CheckedNumeric is currently valid.
-  // A range checked destination type can be supplied using the Dst template
-  // parameter.
-  template <typename Dst = T>
-  constexpr bool IsValid() const {
-    return state_.is_valid() &&
-           IsValueInRangeForNumericType<Dst>(state_.value());
-  }
-
-  // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid
-  // and is within the range supported by the destination type. Returns true if
-  // successful and false otherwise.
-  template <typename Dst>
-#if defined(__clang__) || defined(__GNUC__)
-  __attribute__((warn_unused_result))
-#elif defined(_MSC_VER)
-  _Check_return_
-#endif
-  constexpr bool
-  AssignIfValid(Dst* result) const {
-    return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
-               ? ((*result = static_cast<Dst>(state_.value())), true)
-               : false;
-  }
-
-  // ValueOrDie() - The primary accessor for the underlying value. If the
-  // current state is not valid it will CHECK and crash.
-  // A range checked destination type can be supplied using the Dst template
-  // parameter, which will trigger a CHECK if the value is not in bounds for
-  // the destination.
-  // The CHECK behavior can be overridden by supplying a handler as a
-  // template parameter, for test code, etc. However, the handler cannot access
-  // the underlying value, and it is not available through other means.
-  template <typename Dst = T, class CheckHandler = CheckOnFailure>
-  constexpr StrictNumeric<Dst> ValueOrDie() const {
-    return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
-               ? static_cast<Dst>(state_.value())
-               : CheckHandler::template HandleFailure<Dst>();
-  }
-
-  // ValueOrDefault(T default_value) - A convenience method that returns the
-  // current value if the state is valid, and the supplied default_value for
-  // any other state.
-  // A range checked destination type can be supplied using the Dst template
-  // parameter. WARNING: This function may fail to compile or CHECK at runtime
-  // if the supplied default_value is not within range of the destination type.
-  template <typename Dst = T, typename Src>
-  constexpr StrictNumeric<Dst> ValueOrDefault(const Src default_value) const {
-    return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
-               ? static_cast<Dst>(state_.value())
-               : checked_cast<Dst>(default_value);
-  }
-
-  // Returns a checked numeric of the specified type, cast from the current
-  // CheckedNumeric. If the current state is invalid or the destination cannot
-  // represent the result then the returned CheckedNumeric will be invalid.
-  template <typename Dst>
-  constexpr CheckedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
-    return *this;
-  }
-
-  // This friend method is available solely for providing more detailed logging
-  // in the tests. Do not implement it in production code, because the
-  // underlying values may change at any time.
-  template <typename U>
-  friend U GetNumericValueForTest(const CheckedNumeric<U>& src);
-
-  // Prototypes for the supported arithmetic operator overloads.
-  template <typename Src>
-  constexpr CheckedNumeric& operator+=(const Src rhs);
-  template <typename Src>
-  constexpr CheckedNumeric& operator-=(const Src rhs);
-  template <typename Src>
-  constexpr CheckedNumeric& operator*=(const Src rhs);
-  template <typename Src>
-  constexpr CheckedNumeric& operator/=(const Src rhs);
-  template <typename Src>
-  constexpr CheckedNumeric& operator%=(const Src rhs);
-  template <typename Src>
-  constexpr CheckedNumeric& operator<<=(const Src rhs);
-  template <typename Src>
-  constexpr CheckedNumeric& operator>>=(const Src rhs);
-  template <typename Src>
-  constexpr CheckedNumeric& operator&=(const Src rhs);
-  template <typename Src>
-  constexpr CheckedNumeric& operator|=(const Src rhs);
-  template <typename Src>
-  constexpr CheckedNumeric& operator^=(const Src rhs);
-
-  constexpr CheckedNumeric operator-() const {
-    // Use an optimized code path for a known run-time variable.
-    if (!PA_IsConstantEvaluated() && std::is_signed<T>::value &&
-        std::is_floating_point<T>::value) {
-      return FastRuntimeNegate();
-    }
-    // The negation of two's complement int min is int min.
-    const bool is_valid =
-        IsValid() &&
-        (!std::is_signed<T>::value || std::is_floating_point<T>::value ||
-         NegateWrapper(state_.value()) != std::numeric_limits<T>::lowest());
-    return CheckedNumeric<T>(NegateWrapper(state_.value()), is_valid);
-  }
-
-  constexpr CheckedNumeric operator~() const {
-    return CheckedNumeric<decltype(InvertWrapper(T()))>(
-        InvertWrapper(state_.value()), IsValid());
-  }
-
-  constexpr CheckedNumeric Abs() const {
-    return !IsValueNegative(state_.value()) ? *this : -*this;
-  }
-
-  template <typename U>
-  constexpr CheckedNumeric<typename MathWrapper<CheckedMaxOp, T, U>::type> Max(
-      const U rhs) const {
-    return CheckMax(*this, rhs);
-  }
-
-  template <typename U>
-  constexpr CheckedNumeric<typename MathWrapper<CheckedMinOp, T, U>::type> Min(
-      const U rhs) const {
-    return CheckMin(*this, rhs);
-  }
-
-  // This function is available only for integral types. It returns an unsigned
-  // integer of the same width as the source type, containing the absolute value
-  // of the source, and properly handling signed min.
-  constexpr CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>
-  UnsignedAbs() const {
-    return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
-        SafeUnsignedAbs(state_.value()), state_.is_valid());
-  }
-
-  constexpr CheckedNumeric& operator++() {
-    *this += 1;
-    return *this;
-  }
-
-  constexpr CheckedNumeric operator++(int) {
-    CheckedNumeric value = *this;
-    *this += 1;
-    return value;
-  }
-
-  constexpr CheckedNumeric& operator--() {
-    *this -= 1;
-    return *this;
-  }
-
-  constexpr CheckedNumeric operator--(int) {
-    // TODO(pkasting): Consider std::exchange() once it's constexpr in C++20.
-    const CheckedNumeric value = *this;
-    *this -= 1;
-    return value;
-  }
-
-  // These perform the actual math operations on the CheckedNumerics.
-  // Binary arithmetic operations.
-  template <template <typename, typename, typename> class M,
-            typename L,
-            typename R>
-  static constexpr CheckedNumeric MathOp(const L lhs, const R rhs) {
-    using Math = typename MathWrapper<M, L, R>::math;
-    T result = 0;
-    const bool is_valid =
-        Wrapper<L>::is_valid(lhs) && Wrapper<R>::is_valid(rhs) &&
-        Math::Do(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs), &result);
-    return CheckedNumeric<T>(result, is_valid);
-  }
-
-  // Assignment arithmetic operations.
-  template <template <typename, typename, typename> class M, typename R>
-  constexpr CheckedNumeric& MathOp(const R rhs) {
-    using Math = typename MathWrapper<M, T, R>::math;
-    T result = 0;  // Using T as the destination saves a range check.
-    const bool is_valid =
-        state_.is_valid() && Wrapper<R>::is_valid(rhs) &&
-        Math::Do(state_.value(), Wrapper<R>::value(rhs), &result);
-    *this = CheckedNumeric<T>(result, is_valid);
-    return *this;
-  }
-
- private:
-  CheckedNumericState<T> state_;
-
-  CheckedNumeric FastRuntimeNegate() const {
-    T result;
-    const bool success = CheckedSubOp<T, T>::Do(T(0), state_.value(), &result);
-    return CheckedNumeric<T>(result, IsValid() && success);
-  }
-
-  template <typename Src>
-  constexpr CheckedNumeric(Src value, bool is_valid)
-      : state_(value, is_valid) {}
-
-  // These wrappers allow us to handle state the same way for both
-  // CheckedNumeric and POD arithmetic types.
-  template <typename Src>
-  struct Wrapper {
-    static constexpr bool is_valid(Src) { return true; }
-    static constexpr Src value(Src value) { return value; }
-  };
-
-  template <typename Src>
-  struct Wrapper<CheckedNumeric<Src>> {
-    static constexpr bool is_valid(const CheckedNumeric<Src> v) {
-      return v.IsValid();
-    }
-    static constexpr Src value(const CheckedNumeric<Src> v) {
-      return v.state_.value();
-    }
-  };
-
-  template <typename Src>
-  struct Wrapper<StrictNumeric<Src>> {
-    static constexpr bool is_valid(const StrictNumeric<Src>) { return true; }
-    static constexpr Src value(const StrictNumeric<Src> v) {
-      return static_cast<Src>(v);
-    }
-  };
-};
-
-// Convenience functions to avoid the ugly template disambiguator syntax.
-template <typename Dst, typename Src>
-constexpr bool IsValidForType(const CheckedNumeric<Src> value) {
-  return value.template IsValid<Dst>();
-}
-
-template <typename Dst, typename Src>
-constexpr StrictNumeric<Dst> ValueOrDieForType(
-    const CheckedNumeric<Src> value) {
-  return value.template ValueOrDie<Dst>();
-}
-
-template <typename Dst, typename Src, typename Default>
-constexpr StrictNumeric<Dst> ValueOrDefaultForType(
-    const CheckedNumeric<Src> value,
-    const Default default_value) {
-  return value.template ValueOrDefault<Dst>(default_value);
-}
-
-// Convenience wrapper to return a new CheckedNumeric from the provided
-// arithmetic or CheckedNumericType.
-template <typename T>
-constexpr CheckedNumeric<typename UnderlyingType<T>::type> MakeCheckedNum(
-    const T value) {
-  return value;
-}
-
-// These implement the variadic wrapper for the math operations.
-template <template <typename, typename, typename> class M,
-          typename L,
-          typename R>
-constexpr CheckedNumeric<typename MathWrapper<M, L, R>::type> CheckMathOp(
-    const L lhs,
-    const R rhs) {
-  using Math = typename MathWrapper<M, L, R>::math;
-  return CheckedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
-                                                                        rhs);
-}
-
-// General purpose wrapper template for arithmetic operations.
-template <template <typename, typename, typename> class M,
-          typename L,
-          typename R,
-          typename... Args>
-constexpr auto CheckMathOp(const L lhs, const R rhs, const Args... args) {
-  return CheckMathOp<M>(CheckMathOp<M>(lhs, rhs), args...);
-}
-
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Add, +, +=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Sub, -, -=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mul, *, *=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Div, /, /=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mod, %, %=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Lsh, <<, <<=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Rsh, >>, >>=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, And, &, &=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Or, |, |=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Xor, ^, ^=)
-PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Max)
-PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Min)
-
-// These are some extra StrictNumeric operators to support simple pointer
-// arithmetic with our result types. Since wrapping on a pointer is always
-// bad, we trigger the CHECK condition here.
-template <typename L, typename R>
-L* operator+(L* lhs, const StrictNumeric<R> rhs) {
-  const uintptr_t result = CheckAdd(reinterpret_cast<uintptr_t>(lhs),
-                                    CheckMul(sizeof(L), static_cast<R>(rhs)))
-                               .template ValueOrDie<uintptr_t>();
-  return reinterpret_cast<L*>(result);
-}
-
-template <typename L, typename R>
-L* operator-(L* lhs, const StrictNumeric<R> rhs) {
-  const uintptr_t result = CheckSub(reinterpret_cast<uintptr_t>(lhs),
-                                    CheckMul(sizeof(L), static_cast<R>(rhs)))
-                               .template ValueOrDie<uintptr_t>();
-  return reinterpret_cast<L*>(result);
-}
-
-}  // namespace internal
-
-using internal::CheckAdd;
-using internal::CheckAnd;
-using internal::CheckDiv;
-using internal::CheckedNumeric;
-using internal::CheckLsh;
-using internal::CheckMax;
-using internal::CheckMin;
-using internal::CheckMod;
-using internal::CheckMul;
-using internal::CheckOr;
-using internal::CheckRsh;
-using internal::CheckSub;
-using internal::CheckXor;
-using internal::IsValidForType;
-using internal::MakeCheckedNum;
-using internal::ValueOrDefaultForType;
-using internal::ValueOrDieForType;
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math_impl.h b/base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math_impl.h
deleted file mode 100644
index 3b1e95a..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math_impl.h
+++ /dev/null
@@ -1,593 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_IMPL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_IMPL_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <climits>
-#include <cmath>
-#include <cstdlib>
-#include <limits>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_shared_impl.h"
-
-namespace partition_alloc::internal::base::internal {
-
-template <typename T>
-constexpr bool CheckedAddImpl(T x, T y, T* result) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
-  // Since the value of x+y is undefined if we have a signed type, we compute
-  // it using the unsigned type of the same size.
-  using UnsignedDst = typename std::make_unsigned<T>::type;
-  using SignedDst = typename std::make_signed<T>::type;
-  const UnsignedDst ux = static_cast<UnsignedDst>(x);
-  const UnsignedDst uy = static_cast<UnsignedDst>(y);
-  const UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
-  // Addition is valid if the sign of (x + y) is equal to either that of x or
-  // that of y.
-  if (std::is_signed<T>::value
-          ? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) < 0
-          : uresult < uy)  // Unsigned is either valid or underflow.
-    return false;
-  *result = static_cast<T>(uresult);
-  return true;
-}
-
-template <typename T, typename U, class Enable = void>
-struct CheckedAddOp {};
-
-template <typename T, typename U>
-struct CheckedAddOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V>
-  static constexpr bool Do(T x, U y, V* result) {
-    if constexpr (CheckedAddFastOp<T, U>::is_supported)
-      return CheckedAddFastOp<T, U>::Do(x, y, result);
-
-    // Double the underlying type up to a full machine word.
-    using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
-    using Promotion =
-        typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
-                                   IntegerBitsPlusSign<intptr_t>::value),
-                                  typename BigEnoughPromotion<T, U>::type,
-                                  FastPromotion>::type;
-    // Fail if either operand is out of range for the promoted type.
-    // TODO(jschuh): This could be made to work for a broader range of values.
-    if (PA_BASE_NUMERICS_UNLIKELY(
-            !IsValueInRangeForNumericType<Promotion>(x) ||
-            !IsValueInRangeForNumericType<Promotion>(y))) {
-      return false;
-    }
-
-    Promotion presult = {};
-    bool is_valid = true;
-    if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
-      presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
-    } else {
-      is_valid = CheckedAddImpl(static_cast<Promotion>(x),
-                                static_cast<Promotion>(y), &presult);
-    }
-    if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
-      return false;
-    *result = static_cast<V>(presult);
-    return true;
-  }
-};
-
-template <typename T>
-constexpr bool CheckedSubImpl(T x, T y, T* result) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
-  // Since the value of x+y is undefined if we have a signed type, we compute
-  // it using the unsigned type of the same size.
-  using UnsignedDst = typename std::make_unsigned<T>::type;
-  using SignedDst = typename std::make_signed<T>::type;
-  const UnsignedDst ux = static_cast<UnsignedDst>(x);
-  const UnsignedDst uy = static_cast<UnsignedDst>(y);
-  const UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
-  // Subtraction is valid if either x and y have same sign, or (x-y) and x have
-  // the same sign.
-  if (std::is_signed<T>::value
-          ? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) < 0
-          : x < y)
-    return false;
-  *result = static_cast<T>(uresult);
-  return true;
-}
-
-template <typename T, typename U, class Enable = void>
-struct CheckedSubOp {};
-
-template <typename T, typename U>
-struct CheckedSubOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V>
-  static constexpr bool Do(T x, U y, V* result) {
-    if constexpr (CheckedSubFastOp<T, U>::is_supported)
-      return CheckedSubFastOp<T, U>::Do(x, y, result);
-
-    // Double the underlying type up to a full machine word.
-    using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
-    using Promotion =
-        typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
-                                   IntegerBitsPlusSign<intptr_t>::value),
-                                  typename BigEnoughPromotion<T, U>::type,
-                                  FastPromotion>::type;
-    // Fail if either operand is out of range for the promoted type.
-    // TODO(jschuh): This could be made to work for a broader range of values.
-    if (PA_BASE_NUMERICS_UNLIKELY(
-            !IsValueInRangeForNumericType<Promotion>(x) ||
-            !IsValueInRangeForNumericType<Promotion>(y))) {
-      return false;
-    }
-
-    Promotion presult = {};
-    bool is_valid = true;
-    if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
-      presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
-    } else {
-      is_valid = CheckedSubImpl(static_cast<Promotion>(x),
-                                static_cast<Promotion>(y), &presult);
-    }
-    if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
-      return false;
-    *result = static_cast<V>(presult);
-    return true;
-  }
-};
-
-template <typename T>
-constexpr bool CheckedMulImpl(T x, T y, T* result) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
-  // Since the value of x*y is potentially undefined if we have a signed type,
-  // we compute it using the unsigned type of the same size.
-  using UnsignedDst = typename std::make_unsigned<T>::type;
-  using SignedDst = typename std::make_signed<T>::type;
-  const UnsignedDst ux = SafeUnsignedAbs(x);
-  const UnsignedDst uy = SafeUnsignedAbs(y);
-  const UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
-  const bool is_negative =
-      std::is_signed<T>::value && static_cast<SignedDst>(x ^ y) < 0;
-  // We have a fast out for unsigned identity or zero on the second operand.
-  // After that it's an unsigned overflow check on the absolute value, with
-  // a +1 bound for a negative result.
-  if (uy > UnsignedDst(!std::is_signed<T>::value || is_negative) &&
-      ux > (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy)
-    return false;
-  *result = static_cast<T>(is_negative ? 0 - uresult : uresult);
-  return true;
-}
-
-template <typename T, typename U, class Enable = void>
-struct CheckedMulOp {};
-
-template <typename T, typename U>
-struct CheckedMulOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V>
-  static constexpr bool Do(T x, U y, V* result) {
-    if constexpr (CheckedMulFastOp<T, U>::is_supported)
-      return CheckedMulFastOp<T, U>::Do(x, y, result);
-
-    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
-    // Verify the destination type can hold the result (always true for 0).
-    if (PA_BASE_NUMERICS_UNLIKELY(
-            (!IsValueInRangeForNumericType<Promotion>(x) ||
-             !IsValueInRangeForNumericType<Promotion>(y)) &&
-            x && y)) {
-      return false;
-    }
-
-    Promotion presult = {};
-    bool is_valid = true;
-    if (CheckedMulFastOp<Promotion, Promotion>::is_supported) {
-      // The fast op may be available with the promoted type.
-      is_valid = CheckedMulFastOp<Promotion, Promotion>::Do(
-          static_cast<Promotion>(x), static_cast<Promotion>(y), &presult);
-    } else if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
-      presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
-    } else {
-      is_valid = CheckedMulImpl(static_cast<Promotion>(x),
-                                static_cast<Promotion>(y), &presult);
-    }
-    if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
-      return false;
-    *result = static_cast<V>(presult);
-    return true;
-  }
-};
-
-// Division just requires a check for a zero denominator or an invalid negation
-// on signed min/-1.
-template <typename T, typename U, class Enable = void>
-struct CheckedDivOp {};
-
-template <typename T, typename U>
-struct CheckedDivOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V>
-  static constexpr bool Do(T x, U y, V* result) {
-    if (PA_BASE_NUMERICS_UNLIKELY(!y))
-      return false;
-
-    // The overflow check can be compiled away if we don't have the exact
-    // combination of types needed to trigger this case.
-    using Promotion = typename BigEnoughPromotion<T, U>::type;
-    if (PA_BASE_NUMERICS_UNLIKELY(
-            (std::is_signed<T>::value && std::is_signed<U>::value &&
-             IsTypeInRangeForNumericType<T, Promotion>::value &&
-             static_cast<Promotion>(x) ==
-                 std::numeric_limits<Promotion>::lowest() &&
-             y == static_cast<U>(-1)))) {
-      return false;
-    }
-
-    // This branch always compiles away if the above branch wasn't removed.
-    if (PA_BASE_NUMERICS_UNLIKELY(
-            (!IsValueInRangeForNumericType<Promotion>(x) ||
-             !IsValueInRangeForNumericType<Promotion>(y)) &&
-            x)) {
-      return false;
-    }
-
-    const Promotion presult = Promotion(x) / Promotion(y);
-    if (!IsValueInRangeForNumericType<V>(presult))
-      return false;
-    *result = static_cast<V>(presult);
-    return true;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct CheckedModOp {};
-
-template <typename T, typename U>
-struct CheckedModOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V>
-  static constexpr bool Do(T x, U y, V* result) {
-    if (PA_BASE_NUMERICS_UNLIKELY(!y))
-      return false;
-
-    using Promotion = typename BigEnoughPromotion<T, U>::type;
-    if (PA_BASE_NUMERICS_UNLIKELY(
-            (std::is_signed<T>::value && std::is_signed<U>::value &&
-             IsTypeInRangeForNumericType<T, Promotion>::value &&
-             static_cast<Promotion>(x) ==
-                 std::numeric_limits<Promotion>::lowest() &&
-             y == static_cast<U>(-1)))) {
-      *result = 0;
-      return true;
-    }
-
-    const Promotion presult =
-        static_cast<Promotion>(x) % static_cast<Promotion>(y);
-    if (!IsValueInRangeForNumericType<V>(presult))
-      return false;
-    *result = static_cast<Promotion>(presult);
-    return true;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct CheckedLshOp {};
-
-// Left shift. Shifts less than 0 or greater than or equal to the number
-// of bits in the promoted type are undefined. Shifts of negative values
-// are undefined. Otherwise it is defined when the result fits.
-template <typename T, typename U>
-struct CheckedLshOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = T;
-  template <typename V>
-  static constexpr bool Do(T x, U shift, V* result) {
-    // Disallow negative numbers and verify the shift is in bounds.
-    if (PA_BASE_NUMERICS_LIKELY(
-            !IsValueNegative(x) &&
-            as_unsigned(shift) < as_unsigned(std::numeric_limits<T>::digits))) {
-      // Shift as unsigned to avoid undefined behavior.
-      *result = static_cast<V>(as_unsigned(x) << shift);
-      // If the shift can be reversed, we know it was valid.
-      return *result >> shift == x;
-    }
-
-    // Handle the legal corner-case of a full-width signed shift of zero.
-    if (!std::is_signed<T>::value || x ||
-        as_unsigned(shift) != as_unsigned(std::numeric_limits<T>::digits))
-      return false;
-    *result = 0;
-    return true;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct CheckedRshOp {};
-
-// Right shift. Shifts less than 0 or greater than or equal to the number
-// of bits in the promoted type are undefined. Otherwise, it is always defined,
-// but a right shift of a negative value is implementation-dependent.
-template <typename T, typename U>
-struct CheckedRshOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = T;
-  template <typename V>
-  static constexpr bool Do(T x, U shift, V* result) {
-    // Use sign conversion to push negative values out of range.
-    if (PA_BASE_NUMERICS_UNLIKELY(as_unsigned(shift) >=
-                                  IntegerBitsPlusSign<T>::value)) {
-      return false;
-    }
-
-    const T tmp = x >> shift;
-    if (!IsValueInRangeForNumericType<V>(tmp))
-      return false;
-    *result = static_cast<V>(tmp);
-    return true;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct CheckedAndOp {};
-
-// For simplicity we support only unsigned integer results.
-template <typename T, typename U>
-struct CheckedAndOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename std::make_unsigned<
-      typename MaxExponentPromotion<T, U>::type>::type;
-  template <typename V>
-  static constexpr bool Do(T x, U y, V* result) {
-    const result_type tmp =
-        static_cast<result_type>(x) & static_cast<result_type>(y);
-    if (!IsValueInRangeForNumericType<V>(tmp))
-      return false;
-    *result = static_cast<V>(tmp);
-    return true;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct CheckedOrOp {};
-
-// For simplicity we support only unsigned integers.
-template <typename T, typename U>
-struct CheckedOrOp<T,
-                   U,
-                   typename std::enable_if<std::is_integral<T>::value &&
-                                           std::is_integral<U>::value>::type> {
-  using result_type = typename std::make_unsigned<
-      typename MaxExponentPromotion<T, U>::type>::type;
-  template <typename V>
-  static constexpr bool Do(T x, U y, V* result) {
-    const result_type tmp =
-        static_cast<result_type>(x) | static_cast<result_type>(y);
-    if (!IsValueInRangeForNumericType<V>(tmp))
-      return false;
-    *result = static_cast<V>(tmp);
-    return true;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct CheckedXorOp {};
-
-// For simplicity we support only unsigned integers.
-template <typename T, typename U>
-struct CheckedXorOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename std::make_unsigned<
-      typename MaxExponentPromotion<T, U>::type>::type;
-  template <typename V>
-  static constexpr bool Do(T x, U y, V* result) {
-    const result_type tmp =
-        static_cast<result_type>(x) ^ static_cast<result_type>(y);
-    if (!IsValueInRangeForNumericType<V>(tmp))
-      return false;
-    *result = static_cast<V>(tmp);
-    return true;
-  }
-};
-
-// Max doesn't really need to be implemented this way because it can't fail,
-// but it makes the code much cleaner to use the MathOp wrappers.
-template <typename T, typename U, class Enable = void>
-struct CheckedMaxOp {};
-
-template <typename T, typename U>
-struct CheckedMaxOp<
-    T,
-    U,
-    typename std::enable_if<std::is_arithmetic<T>::value &&
-                            std::is_arithmetic<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V>
-  static constexpr bool Do(T x, U y, V* result) {
-    const result_type tmp = IsGreater<T, U>::Test(x, y)
-                                ? static_cast<result_type>(x)
-                                : static_cast<result_type>(y);
-    if (!IsValueInRangeForNumericType<V>(tmp))
-      return false;
-    *result = static_cast<V>(tmp);
-    return true;
-  }
-};
-
-// Min doesn't really need to be implemented this way because it can't fail,
-// but it makes the code much cleaner to use the MathOp wrappers.
-template <typename T, typename U, class Enable = void>
-struct CheckedMinOp {};
-
-template <typename T, typename U>
-struct CheckedMinOp<
-    T,
-    U,
-    typename std::enable_if<std::is_arithmetic<T>::value &&
-                            std::is_arithmetic<U>::value>::type> {
-  using result_type = typename LowestValuePromotion<T, U>::type;
-  template <typename V>
-  static constexpr bool Do(T x, U y, V* result) {
-    const result_type tmp = IsLess<T, U>::Test(x, y)
-                                ? static_cast<result_type>(x)
-                                : static_cast<result_type>(y);
-    if (!IsValueInRangeForNumericType<V>(tmp))
-      return false;
-    *result = static_cast<V>(tmp);
-    return true;
-  }
-};
-
-// This is just boilerplate that wraps the standard floating point arithmetic.
-// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
-#define PA_BASE_FLOAT_ARITHMETIC_OPS(NAME, OP)                           \
-  template <typename T, typename U>                                      \
-  struct Checked##NAME##Op<                                              \
-      T, U,                                                              \
-      typename std::enable_if<std::is_floating_point<T>::value ||        \
-                              std::is_floating_point<U>::value>::type> { \
-    using result_type = typename MaxExponentPromotion<T, U>::type;       \
-    template <typename V>                                                \
-    static constexpr bool Do(T x, U y, V* result) {                      \
-      using Promotion = typename MaxExponentPromotion<T, U>::type;       \
-      const Promotion presult = x OP y;                                  \
-      if (!IsValueInRangeForNumericType<V>(presult))                     \
-        return false;                                                    \
-      *result = static_cast<V>(presult);                                 \
-      return true;                                                       \
-    }                                                                    \
-  };
-
-PA_BASE_FLOAT_ARITHMETIC_OPS(Add, +)
-PA_BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
-PA_BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
-PA_BASE_FLOAT_ARITHMETIC_OPS(Div, /)
-
-#undef PA_BASE_FLOAT_ARITHMETIC_OPS
-
-// Floats carry around their validity state with them, but integers do not. So,
-// we wrap the underlying value in a specialization in order to hide that detail
-// and expose an interface via accessors.
-enum NumericRepresentation {
-  NUMERIC_INTEGER,
-  NUMERIC_FLOATING,
-  NUMERIC_UNKNOWN
-};
-
-template <typename NumericType>
-struct GetNumericRepresentation {
-  static const NumericRepresentation value =
-      std::is_integral<NumericType>::value
-          ? NUMERIC_INTEGER
-          : (std::is_floating_point<NumericType>::value ? NUMERIC_FLOATING
-                                                        : NUMERIC_UNKNOWN);
-};
-
-template <typename T,
-          NumericRepresentation type = GetNumericRepresentation<T>::value>
-class CheckedNumericState {};
-
-// Integrals require quite a bit of additional housekeeping to manage state.
-template <typename T>
-class CheckedNumericState<T, NUMERIC_INTEGER> {
- public:
-  template <typename Src = int>
-  constexpr explicit CheckedNumericState(Src value = 0, bool is_valid = true)
-      : is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
-        value_(WellDefinedConversionOrZero(value, is_valid_)) {
-    static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
-  }
-
-  template <typename Src>
-  constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
-      : CheckedNumericState(rhs.value(), rhs.is_valid()) {}
-
-  constexpr bool is_valid() const { return is_valid_; }
-
-  constexpr T value() const { return value_; }
-
- private:
-  // Ensures that a type conversion does not trigger undefined behavior.
-  template <typename Src>
-  static constexpr T WellDefinedConversionOrZero(Src value, bool is_valid) {
-    using SrcType = typename internal::UnderlyingType<Src>::type;
-    return (std::is_integral<SrcType>::value || is_valid)
-               ? static_cast<T>(value)
-               : 0;
-  }
-
-  // is_valid_ precedes value_ because member initializers in the constructors
-  // are evaluated in field order, and is_valid_ must be read when initializing
-  // value_.
-  bool is_valid_;
-  T value_;
-};
-
-// Floating points maintain their own validity, but need translation wrappers.
-template <typename T>
-class CheckedNumericState<T, NUMERIC_FLOATING> {
- public:
-  template <typename Src = double>
-  constexpr explicit CheckedNumericState(Src value = 0.0, bool is_valid = true)
-      : value_(WellDefinedConversionOrNaN(
-            value,
-            is_valid && IsValueInRangeForNumericType<T>(value))) {}
-
-  template <typename Src>
-  constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
-      : CheckedNumericState(rhs.value(), rhs.is_valid()) {}
-
-  constexpr bool is_valid() const {
-    // Written this way because std::isfinite is not reliably constexpr.
-    return PA_IsConstantEvaluated()
-               ? value_ <= std::numeric_limits<T>::max() &&
-                     value_ >= std::numeric_limits<T>::lowest()
-               : std::isfinite(value_);
-  }
-
-  constexpr T value() const { return value_; }
-
- private:
-  // Ensures that a type conversion does not trigger undefined behavior.
-  template <typename Src>
-  static constexpr T WellDefinedConversionOrNaN(Src value, bool is_valid) {
-    using SrcType = typename internal::UnderlyingType<Src>::type;
-    return (StaticDstRangeRelationToSrcRange<T, SrcType>::value ==
-                NUMERIC_RANGE_CONTAINED ||
-            is_valid)
-               ? static_cast<T>(value)
-               : std::numeric_limits<T>::quiet_NaN();
-  }
-
-  T value_;
-};
-
-}  // namespace partition_alloc::internal::base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_IMPL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math.h b/base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math.h
deleted file mode 100644
index ae3ea39..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math.h
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_H_
-
-#include <stddef.h>
-
-#include <limits>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math_impl.h"
-
-namespace partition_alloc::internal::base {
-namespace internal {
-
-template <typename T>
-class ClampedNumeric {
-  static_assert(std::is_arithmetic<T>::value,
-                "ClampedNumeric<T>: T must be a numeric type.");
-
- public:
-  using type = T;
-
-  constexpr ClampedNumeric() : value_(0) {}
-
-  // Copy constructor.
-  template <typename Src>
-  constexpr ClampedNumeric(const ClampedNumeric<Src>& rhs)
-      : value_(saturated_cast<T>(rhs.value_)) {}
-
-  template <typename Src>
-  friend class ClampedNumeric;
-
-  // This is not an explicit constructor because we implicitly upgrade regular
-  // numerics to ClampedNumerics to make them easier to use.
-  template <typename Src>
-  constexpr ClampedNumeric(Src value)  // NOLINT(runtime/explicit)
-      : value_(saturated_cast<T>(value)) {
-    static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
-  }
-
-  // This is not an explicit constructor because we want a seamless conversion
-  // from StrictNumeric types.
-  template <typename Src>
-  constexpr ClampedNumeric(
-      StrictNumeric<Src> value)  // NOLINT(runtime/explicit)
-      : value_(saturated_cast<T>(static_cast<Src>(value))) {}
-
-  // Returns a ClampedNumeric of the specified type, cast from the current
-  // ClampedNumeric, and saturated to the destination type.
-  template <typename Dst>
-  constexpr ClampedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
-    return *this;
-  }
-
-  // Prototypes for the supported arithmetic operator overloads.
-  template <typename Src>
-  constexpr ClampedNumeric& operator+=(const Src rhs);
-  template <typename Src>
-  constexpr ClampedNumeric& operator-=(const Src rhs);
-  template <typename Src>
-  constexpr ClampedNumeric& operator*=(const Src rhs);
-  template <typename Src>
-  constexpr ClampedNumeric& operator/=(const Src rhs);
-  template <typename Src>
-  constexpr ClampedNumeric& operator%=(const Src rhs);
-  template <typename Src>
-  constexpr ClampedNumeric& operator<<=(const Src rhs);
-  template <typename Src>
-  constexpr ClampedNumeric& operator>>=(const Src rhs);
-  template <typename Src>
-  constexpr ClampedNumeric& operator&=(const Src rhs);
-  template <typename Src>
-  constexpr ClampedNumeric& operator|=(const Src rhs);
-  template <typename Src>
-  constexpr ClampedNumeric& operator^=(const Src rhs);
-
-  constexpr ClampedNumeric operator-() const {
-    // The negation of two's complement int min is int min, so that's the
-    // only overflow case where we will saturate.
-    return ClampedNumeric<T>(SaturatedNegWrapper(value_));
-  }
-
-  constexpr ClampedNumeric operator~() const {
-    return ClampedNumeric<decltype(InvertWrapper(T()))>(InvertWrapper(value_));
-  }
-
-  constexpr ClampedNumeric Abs() const {
-    // The negation of two's complement int min is int min, so that's the
-    // only overflow case where we will saturate.
-    return ClampedNumeric<T>(SaturatedAbsWrapper(value_));
-  }
-
-  template <typename U>
-  constexpr ClampedNumeric<typename MathWrapper<ClampedMaxOp, T, U>::type> Max(
-      const U rhs) const {
-    using result_type = typename MathWrapper<ClampedMaxOp, T, U>::type;
-    return ClampedNumeric<result_type>(
-        ClampedMaxOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
-  }
-
-  template <typename U>
-  constexpr ClampedNumeric<typename MathWrapper<ClampedMinOp, T, U>::type> Min(
-      const U rhs) const {
-    using result_type = typename MathWrapper<ClampedMinOp, T, U>::type;
-    return ClampedNumeric<result_type>(
-        ClampedMinOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
-  }
-
-  // This function is available only for integral types. It returns an unsigned
-  // integer of the same width as the source type, containing the absolute value
-  // of the source, and properly handling signed min.
-  constexpr ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>
-  UnsignedAbs() const {
-    return ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>(
-        SafeUnsignedAbs(value_));
-  }
-
-  constexpr ClampedNumeric& operator++() {
-    *this += 1;
-    return *this;
-  }
-
-  constexpr ClampedNumeric operator++(int) {
-    ClampedNumeric value = *this;
-    *this += 1;
-    return value;
-  }
-
-  constexpr ClampedNumeric& operator--() {
-    *this -= 1;
-    return *this;
-  }
-
-  constexpr ClampedNumeric operator--(int) {
-    ClampedNumeric value = *this;
-    *this -= 1;
-    return value;
-  }
-
-  // These perform the actual math operations on the ClampedNumerics.
-  // Binary arithmetic operations.
-  template <template <typename, typename, typename> class M,
-            typename L,
-            typename R>
-  static constexpr ClampedNumeric MathOp(const L lhs, const R rhs) {
-    using Math = typename MathWrapper<M, L, R>::math;
-    return ClampedNumeric<T>(
-        Math::template Do<T>(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs)));
-  }
-
-  // Assignment arithmetic operations.
-  template <template <typename, typename, typename> class M, typename R>
-  constexpr ClampedNumeric& MathOp(const R rhs) {
-    using Math = typename MathWrapper<M, T, R>::math;
-    *this =
-        ClampedNumeric<T>(Math::template Do<T>(value_, Wrapper<R>::value(rhs)));
-    return *this;
-  }
-
-  template <typename Dst>
-  constexpr operator Dst() const {
-    return saturated_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(
-        value_);
-  }
-
-  // This method extracts the raw integer value without saturating it to the
-  // destination type as the conversion operator does. This is useful when
-  // e.g. assigning to an auto type or passing as a deduced template parameter.
-  constexpr T RawValue() const { return value_; }
-
- private:
-  T value_;
-
-  // These wrappers allow us to handle state the same way for both
-  // ClampedNumeric and POD arithmetic types.
-  template <typename Src>
-  struct Wrapper {
-    static constexpr typename UnderlyingType<Src>::type value(Src value) {
-      return value;
-    }
-  };
-};
-
-// Convenience wrapper to return a new ClampedNumeric from the provided
-// arithmetic or ClampedNumericType.
-template <typename T>
-constexpr ClampedNumeric<typename UnderlyingType<T>::type> MakeClampedNum(
-    const T value) {
-  return value;
-}
-
-// These implement the variadic wrapper for the math operations.
-template <template <typename, typename, typename> class M,
-          typename L,
-          typename R>
-constexpr ClampedNumeric<typename MathWrapper<M, L, R>::type> ClampMathOp(
-    const L lhs,
-    const R rhs) {
-  using Math = typename MathWrapper<M, L, R>::math;
-  return ClampedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
-                                                                        rhs);
-}
-
-// General purpose wrapper template for arithmetic operations.
-template <template <typename, typename, typename> class M,
-          typename L,
-          typename R,
-          typename... Args>
-constexpr auto ClampMathOp(const L lhs, const R rhs, const Args... args) {
-  return ClampMathOp<M>(ClampMathOp<M>(lhs, rhs), args...);
-}
-
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Add, +, +=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Sub, -, -=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mul, *, *=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Div, /, /=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mod, %, %=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Lsh, <<, <<=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Rsh, >>, >>=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, And, &, &=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Or, |, |=)
-PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Xor, ^, ^=)
-PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Max)
-PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Min)
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLess, <)
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLessOrEqual, <=)
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreater, >)
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreaterOrEqual, >=)
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsEqual, ==)
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsNotEqual, !=)
-
-}  // namespace internal
-
-using internal::ClampAdd;
-using internal::ClampAnd;
-using internal::ClampDiv;
-using internal::ClampedNumeric;
-using internal::ClampLsh;
-using internal::ClampMax;
-using internal::ClampMin;
-using internal::ClampMod;
-using internal::ClampMul;
-using internal::ClampOr;
-using internal::ClampRsh;
-using internal::ClampSub;
-using internal::ClampXor;
-using internal::MakeClampedNum;
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math_impl.h b/base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math_impl.h
deleted file mode 100644
index ab026cf..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math_impl.h
+++ /dev/null
@@ -1,338 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <climits>
-#include <cmath>
-#include <cstdlib>
-#include <limits>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_shared_impl.h"
-
-namespace partition_alloc::internal::base::internal {
-
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value &&
-                                  std::is_signed<T>::value>::type* = nullptr>
-constexpr T SaturatedNegWrapper(T value) {
-  return PA_IsConstantEvaluated() || !ClampedNegFastOp<T>::is_supported
-             ? (NegateWrapper(value) != std::numeric_limits<T>::lowest()
-                    ? NegateWrapper(value)
-                    : std::numeric_limits<T>::max())
-             : ClampedNegFastOp<T>::Do(value);
-}
-
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value &&
-                                  !std::is_signed<T>::value>::type* = nullptr>
-constexpr T SaturatedNegWrapper(T value) {
-  return T(0);
-}
-
-template <
-    typename T,
-    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
-constexpr T SaturatedNegWrapper(T value) {
-  return -value;
-}
-
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-constexpr T SaturatedAbsWrapper(T value) {
-  // The calculation below is a static identity for unsigned types, but for
-  // signed integer types it provides a non-branching, saturated absolute value.
-  // This works because SafeUnsignedAbs() returns an unsigned type, which can
-  // represent the absolute value of all negative numbers of an equal-width
-  // integer type. The call to IsValueNegative() then detects overflow in the
-  // special case of numeric_limits<T>::min(), by evaluating the bit pattern as
-  // a signed integer value. If it is the overflow case, we end up subtracting
-  // one from the unsigned result, thus saturating to numeric_limits<T>::max().
-  return static_cast<T>(
-      SafeUnsignedAbs(value) -
-      IsValueNegative<T>(static_cast<T>(SafeUnsignedAbs(value))));
-}
-
-template <
-    typename T,
-    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
-constexpr T SaturatedAbsWrapper(T value) {
-  return value < 0 ? -value : value;
-}
-
-template <typename T, typename U, class Enable = void>
-struct ClampedAddOp {};
-
-template <typename T, typename U>
-struct ClampedAddOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V = result_type>
-  static constexpr V Do(T x, U y) {
-    if (!PA_IsConstantEvaluated() && ClampedAddFastOp<T, U>::is_supported)
-      return ClampedAddFastOp<T, U>::template Do<V>(x, y);
-
-    static_assert(std::is_same<V, result_type>::value ||
-                      IsTypeInRangeForNumericType<U, V>::value,
-                  "The saturation result cannot be determined from the "
-                  "provided types.");
-    const V saturated = CommonMaxOrMin<V>(IsValueNegative(y));
-    V result = {};
-    return PA_BASE_NUMERICS_LIKELY((CheckedAddOp<T, U>::Do(x, y, &result)))
-               ? result
-               : saturated;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct ClampedSubOp {};
-
-template <typename T, typename U>
-struct ClampedSubOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V = result_type>
-  static constexpr V Do(T x, U y) {
-    if (!PA_IsConstantEvaluated() && ClampedSubFastOp<T, U>::is_supported)
-      return ClampedSubFastOp<T, U>::template Do<V>(x, y);
-
-    static_assert(std::is_same<V, result_type>::value ||
-                      IsTypeInRangeForNumericType<U, V>::value,
-                  "The saturation result cannot be determined from the "
-                  "provided types.");
-    const V saturated = CommonMaxOrMin<V>(!IsValueNegative(y));
-    V result = {};
-    return PA_BASE_NUMERICS_LIKELY((CheckedSubOp<T, U>::Do(x, y, &result)))
-               ? result
-               : saturated;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct ClampedMulOp {};
-
-template <typename T, typename U>
-struct ClampedMulOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V = result_type>
-  static constexpr V Do(T x, U y) {
-    if (!PA_IsConstantEvaluated() && ClampedMulFastOp<T, U>::is_supported)
-      return ClampedMulFastOp<T, U>::template Do<V>(x, y);
-
-    V result = {};
-    const V saturated =
-        CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
-    return PA_BASE_NUMERICS_LIKELY((CheckedMulOp<T, U>::Do(x, y, &result)))
-               ? result
-               : saturated;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct ClampedDivOp {};
-
-template <typename T, typename U>
-struct ClampedDivOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V = result_type>
-  static constexpr V Do(T x, U y) {
-    V result = {};
-    if (PA_BASE_NUMERICS_LIKELY((CheckedDivOp<T, U>::Do(x, y, &result))))
-      return result;
-    // Saturation goes to max, min, or NaN (if x is zero).
-    return x ? CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y))
-             : SaturationDefaultLimits<V>::NaN();
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct ClampedModOp {};
-
-template <typename T, typename U>
-struct ClampedModOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V = result_type>
-  static constexpr V Do(T x, U y) {
-    V result = {};
-    return PA_BASE_NUMERICS_LIKELY((CheckedModOp<T, U>::Do(x, y, &result)))
-               ? result
-               : x;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct ClampedLshOp {};
-
-// Left shift. Non-zero values saturate in the direction of the sign. A zero
-// shifted by any value always results in zero.
-template <typename T, typename U>
-struct ClampedLshOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = T;
-  template <typename V = result_type>
-  static constexpr V Do(T x, U shift) {
-    static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
-    if (PA_BASE_NUMERICS_LIKELY(shift < std::numeric_limits<T>::digits)) {
-      // Shift as unsigned to avoid undefined behavior.
-      V result = static_cast<V>(as_unsigned(x) << shift);
-      // If the shift can be reversed, we know it was valid.
-      if (PA_BASE_NUMERICS_LIKELY(result >> shift == x))
-        return result;
-    }
-    return x ? CommonMaxOrMin<V>(IsValueNegative(x)) : 0;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct ClampedRshOp {};
-
-// Right shift. Negative values saturate to -1. Positive or 0 saturates to 0.
-template <typename T, typename U>
-struct ClampedRshOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = T;
-  template <typename V = result_type>
-  static constexpr V Do(T x, U shift) {
-    static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
-    // Signed right shift is odd, because it saturates to -1 or 0.
-    const V saturated = as_unsigned(V(0)) - IsValueNegative(x);
-    return PA_BASE_NUMERICS_LIKELY(shift < IntegerBitsPlusSign<T>::value)
-               ? saturated_cast<V>(x >> shift)
-               : saturated;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct ClampedAndOp {};
-
-template <typename T, typename U>
-struct ClampedAndOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename std::make_unsigned<
-      typename MaxExponentPromotion<T, U>::type>::type;
-  template <typename V>
-  static constexpr V Do(T x, U y) {
-    return static_cast<result_type>(x) & static_cast<result_type>(y);
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct ClampedOrOp {};
-
-// For simplicity we promote to unsigned integers.
-template <typename T, typename U>
-struct ClampedOrOp<T,
-                   U,
-                   typename std::enable_if<std::is_integral<T>::value &&
-                                           std::is_integral<U>::value>::type> {
-  using result_type = typename std::make_unsigned<
-      typename MaxExponentPromotion<T, U>::type>::type;
-  template <typename V>
-  static constexpr V Do(T x, U y) {
-    return static_cast<result_type>(x) | static_cast<result_type>(y);
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct ClampedXorOp {};
-
-// For simplicity we support only unsigned integers.
-template <typename T, typename U>
-struct ClampedXorOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename std::make_unsigned<
-      typename MaxExponentPromotion<T, U>::type>::type;
-  template <typename V>
-  static constexpr V Do(T x, U y) {
-    return static_cast<result_type>(x) ^ static_cast<result_type>(y);
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct ClampedMaxOp {};
-
-template <typename T, typename U>
-struct ClampedMaxOp<
-    T,
-    U,
-    typename std::enable_if<std::is_arithmetic<T>::value &&
-                            std::is_arithmetic<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V = result_type>
-  static constexpr V Do(T x, U y) {
-    return IsGreater<T, U>::Test(x, y) ? saturated_cast<V>(x)
-                                       : saturated_cast<V>(y);
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct ClampedMinOp {};
-
-template <typename T, typename U>
-struct ClampedMinOp<
-    T,
-    U,
-    typename std::enable_if<std::is_arithmetic<T>::value &&
-                            std::is_arithmetic<U>::value>::type> {
-  using result_type = typename LowestValuePromotion<T, U>::type;
-  template <typename V = result_type>
-  static constexpr V Do(T x, U y) {
-    return IsLess<T, U>::Test(x, y) ? saturated_cast<V>(x)
-                                    : saturated_cast<V>(y);
-  }
-};
-
-// This is just boilerplate that wraps the standard floating point arithmetic.
-// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
-#define PA_BASE_FLOAT_ARITHMETIC_OPS(NAME, OP)                           \
-  template <typename T, typename U>                                      \
-  struct Clamped##NAME##Op<                                              \
-      T, U,                                                              \
-      typename std::enable_if<std::is_floating_point<T>::value ||        \
-                              std::is_floating_point<U>::value>::type> { \
-    using result_type = typename MaxExponentPromotion<T, U>::type;       \
-    template <typename V = result_type>                                  \
-    static constexpr V Do(T x, U y) {                                    \
-      return saturated_cast<V>(x OP y);                                  \
-    }                                                                    \
-  };
-
-PA_BASE_FLOAT_ARITHMETIC_OPS(Add, +)
-PA_BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
-PA_BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
-PA_BASE_FLOAT_ARITHMETIC_OPS(Div, /)
-
-#undef PA_BASE_FLOAT_ARITHMETIC_OPS
-
-}  // namespace partition_alloc::internal::base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h b/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h
deleted file mode 100644
index 9d65355..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_H_
-
-#include <stddef.h>
-
-#include <cmath>
-#include <limits>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_impl.h"
-
-#if defined(__ARMEL__) && !defined(__native_client__)
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_arm_impl.h"
-#define PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (1)
-#else
-#define PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (0)
-#endif
-
-#if !PA_BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
-#include <ostream>
-#endif
-
-namespace partition_alloc::internal::base {
-namespace internal {
-
-#if !PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
-template <typename Dst, typename Src>
-struct SaturateFastAsmOp {
-  static constexpr bool is_supported = false;
-  static constexpr Dst Do(Src) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<Dst>();
-  }
-};
-#endif  // PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
-#undef PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
-
-// The following special case a few specific integer conversions where we can
-// eke out better performance than range checking.
-template <typename Dst, typename Src, typename Enable = void>
-struct IsValueInRangeFastOp {
-  static constexpr bool is_supported = false;
-  static constexpr bool Do(Src value) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<bool>();
-  }
-};
-
-// Signed to signed range comparison.
-template <typename Dst, typename Src>
-struct IsValueInRangeFastOp<
-    Dst,
-    Src,
-    typename std::enable_if<
-        std::is_integral<Dst>::value && std::is_integral<Src>::value &&
-        std::is_signed<Dst>::value && std::is_signed<Src>::value &&
-        !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
-  static constexpr bool is_supported = true;
-
-  static constexpr bool Do(Src value) {
-    // Just downcast to the smaller type, sign extend it back to the original
-    // type, and then see if it matches the original value.
-    return value == static_cast<Dst>(value);
-  }
-};
-
-// Signed to unsigned range comparison.
-template <typename Dst, typename Src>
-struct IsValueInRangeFastOp<
-    Dst,
-    Src,
-    typename std::enable_if<
-        std::is_integral<Dst>::value && std::is_integral<Src>::value &&
-        !std::is_signed<Dst>::value && std::is_signed<Src>::value &&
-        !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
-  static constexpr bool is_supported = true;
-
-  static constexpr bool Do(Src value) {
-    // We cast a signed as unsigned to overflow negative values to the top,
-    // then compare against whichever maximum is smaller, as our upper bound.
-    return as_unsigned(value) <= as_unsigned(CommonMax<Src, Dst>());
-  }
-};
-
-// Convenience function that returns true if the supplied value is in range
-// for the destination type.
-template <typename Dst, typename Src>
-constexpr bool IsValueInRangeForNumericType(Src value) {
-  using SrcType = typename internal::UnderlyingType<Src>::type;
-  return internal::IsValueInRangeFastOp<Dst, SrcType>::is_supported
-             ? internal::IsValueInRangeFastOp<Dst, SrcType>::Do(
-                   static_cast<SrcType>(value))
-             : internal::DstRangeRelationToSrcRange<Dst>(
-                   static_cast<SrcType>(value))
-                   .IsValid();
-}
-
-// checked_cast<> is analogous to static_cast<> for numeric types,
-// except that it CHECKs that the specified numeric conversion will not
-// overflow or underflow. NaN source will always trigger a CHECK.
-template <typename Dst,
-          class CheckHandler = internal::CheckOnFailure,
-          typename Src>
-constexpr Dst checked_cast(Src value) {
-  // This throws a compile-time error on evaluating the constexpr if it can be
-  // determined at compile-time as failing, otherwise it will CHECK at runtime.
-  using SrcType = typename internal::UnderlyingType<Src>::type;
-  return PA_BASE_NUMERICS_LIKELY((IsValueInRangeForNumericType<Dst>(value)))
-             ? static_cast<Dst>(static_cast<SrcType>(value))
-             : CheckHandler::template HandleFailure<Dst>();
-}
-
-// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
-// You may provide your own limits (e.g. to saturated_cast) so long as you
-// implement all of the static constexpr member functions in the class below.
-template <typename T>
-struct SaturationDefaultLimits : public std::numeric_limits<T> {
-  static constexpr T NaN() {
-    return std::numeric_limits<T>::has_quiet_NaN
-               ? std::numeric_limits<T>::quiet_NaN()
-               : T();
-  }
-  using std::numeric_limits<T>::max;
-  static constexpr T Overflow() {
-    return std::numeric_limits<T>::has_infinity
-               ? std::numeric_limits<T>::infinity()
-               : std::numeric_limits<T>::max();
-  }
-  using std::numeric_limits<T>::lowest;
-  static constexpr T Underflow() {
-    return std::numeric_limits<T>::has_infinity
-               ? std::numeric_limits<T>::infinity() * -1
-               : std::numeric_limits<T>::lowest();
-  }
-};
-
-template <typename Dst, template <typename> class S, typename Src>
-constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint) {
-  // For some reason clang generates much better code when the branch is
-  // structured exactly this way, rather than a sequence of checks.
-  return !constraint.IsOverflowFlagSet()
-             ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
-                                                 : S<Dst>::Underflow())
-             // Skip this check for integral Src, which cannot be NaN.
-             : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
-                    ? S<Dst>::Overflow()
-                    : S<Dst>::NaN());
-}
-
-// We can reduce the number of conditions and get slightly better performance
-// for normal signed and unsigned integer ranges. And in the specific case of
-// Arm, we can use the optimized saturation instructions.
-template <typename Dst, typename Src, typename Enable = void>
-struct SaturateFastOp {
-  static constexpr bool is_supported = false;
-  static constexpr Dst Do(Src value) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<Dst>();
-  }
-};
-
-template <typename Dst, typename Src>
-struct SaturateFastOp<
-    Dst,
-    Src,
-    typename std::enable_if<std::is_integral<Src>::value &&
-                            std::is_integral<Dst>::value &&
-                            SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
-  static constexpr bool is_supported = true;
-  static constexpr Dst Do(Src value) {
-    return SaturateFastAsmOp<Dst, Src>::Do(value);
-  }
-};
-
-template <typename Dst, typename Src>
-struct SaturateFastOp<
-    Dst,
-    Src,
-    typename std::enable_if<std::is_integral<Src>::value &&
-                            std::is_integral<Dst>::value &&
-                            !SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
-  static constexpr bool is_supported = true;
-  static constexpr Dst Do(Src value) {
-    // The exact order of the following is structured to hit the correct
-    // optimization heuristics across compilers. Do not change without
-    // checking the emitted code.
-    const Dst saturated = CommonMaxOrMin<Dst, Src>(
-        IsMaxInRangeForNumericType<Dst, Src>() ||
-        (!IsMinInRangeForNumericType<Dst, Src>() && IsValueNegative(value)));
-    return PA_BASE_NUMERICS_LIKELY(IsValueInRangeForNumericType<Dst>(value))
-               ? static_cast<Dst>(value)
-               : saturated;
-  }
-};
-
-// saturated_cast<> is analogous to static_cast<> for numeric types, except
-// that the specified numeric conversion will saturate by default rather than
-// overflow or underflow, and NaN assignment to an integral will return 0.
-// All boundary condition behaviors can be overridden with a custom handler.
-template <typename Dst,
-          template <typename> class SaturationHandler = SaturationDefaultLimits,
-          typename Src>
-constexpr Dst saturated_cast(Src value) {
-  using SrcType = typename UnderlyingType<Src>::type;
-  return !PA_IsConstantEvaluated() &&
-                 SaturateFastOp<Dst, SrcType>::is_supported &&
-                 std::is_same<SaturationHandler<Dst>,
-                              SaturationDefaultLimits<Dst>>::value
-             ? SaturateFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
-             : saturated_cast_impl<Dst, SaturationHandler, SrcType>(
-                   static_cast<SrcType>(value),
-                   DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(
-                       static_cast<SrcType>(value)));
-}
-
-// strict_cast<> is analogous to static_cast<> for numeric types, except that
-// it will cause a compile failure if the destination type is not large enough
-// to contain any value in the source type. It performs no runtime checking.
-template <typename Dst, typename Src>
-constexpr Dst strict_cast(Src value) {
-  using SrcType = typename UnderlyingType<Src>::type;
-  static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
-  static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
-
-  // If you got here from a compiler error, it's because you tried to assign
-  // from a source type to a destination type that has insufficient range.
-  // The solution may be to change the destination type you're assigning to,
-  // and use one large enough to represent the source.
-  // Alternatively, you may be better served with the checked_cast<> or
-  // saturated_cast<> template functions for your particular use case.
-  static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value ==
-                    NUMERIC_RANGE_CONTAINED,
-                "The source type is out of range for the destination type. "
-                "Please see strict_cast<> comments for more information.");
-
-  return static_cast<Dst>(static_cast<SrcType>(value));
-}
-
-// Some wrappers to statically check that a type is in range.
-template <typename Dst, typename Src, class Enable = void>
-struct IsNumericRangeContained {
-  static constexpr bool value = false;
-};
-
-template <typename Dst, typename Src>
-struct IsNumericRangeContained<
-    Dst,
-    Src,
-    typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
-                            ArithmeticOrUnderlyingEnum<Src>::value>::type> {
-  static constexpr bool value =
-      StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
-      NUMERIC_RANGE_CONTAINED;
-};
-
-// StrictNumeric implements compile time range checking between numeric types by
-// wrapping assignment operations in a strict_cast. This class is intended to be
-// used for function arguments and return types, to ensure the destination type
-// can always contain the source type. This is essentially the same as enforcing
-// -Wconversion in gcc and C4302 warnings on MSVC, but it can be applied
-// incrementally at API boundaries, making it easier to convert code so that it
-// compiles cleanly with truncation warnings enabled.
-// This template should introduce no runtime overhead, but it also provides no
-// runtime checking of any of the associated mathematical operations. Use
-// CheckedNumeric for runtime range checks of the actual value being assigned.
-template <typename T>
-class StrictNumeric {
- public:
-  using type = T;
-
-  constexpr StrictNumeric() : value_(0) {}
-
-  // Copy constructor.
-  template <typename Src>
-  constexpr StrictNumeric(const StrictNumeric<Src>& rhs)
-      : value_(strict_cast<T>(rhs.value_)) {}
-
-  // This is not an explicit constructor because we implicitly upgrade regular
-  // numerics to StrictNumerics to make them easier to use.
-  template <typename Src>
-  constexpr StrictNumeric(Src value)  // NOLINT(runtime/explicit)
-      : value_(strict_cast<T>(value)) {}
-
-  // If you got here from a compiler error, it's because you tried to assign
-  // from a source type to a destination type that has insufficient range.
-  // The solution may be to change the destination type you're assigning to,
-  // and use one large enough to represent the source.
-  // If you're assigning from a CheckedNumeric<> class, you may be able to use
-  // the AssignIfValid() member function, specify a narrower destination type to
-  // the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
-  // of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
-  // If you've encountered an _ambiguous overload_ you can use a static_cast<>
-  // to explicitly cast the result to the destination type.
-  // If none of that works, you may be better served with the checked_cast<> or
-  // saturated_cast<> template functions for your particular use case.
-  template <typename Dst,
-            typename std::enable_if<
-                IsNumericRangeContained<Dst, T>::value>::type* = nullptr>
-  constexpr operator Dst() const {
-    return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
-  }
-
- private:
-  const T value_;
-};
-
-// Convenience wrapper returns a StrictNumeric from the provided arithmetic
-// type.
-template <typename T>
-constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
-    const T value) {
-  return value;
-}
-
-#define PA_BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP)           \
-  template <typename L, typename R,                                     \
-            typename std::enable_if<                                    \
-                internal::Is##CLASS##Op<L, R>::value>::type* = nullptr> \
-  constexpr bool operator OP(const L lhs, const R rhs) {                \
-    return SafeCompare<NAME, typename UnderlyingType<L>::type,          \
-                       typename UnderlyingType<R>::type>(lhs, rhs);     \
-  }
-
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLess, <)
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLessOrEqual, <=)
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreater, >)
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreaterOrEqual, >=)
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsEqual, ==)
-PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsNotEqual, !=)
-
-}  // namespace internal
-
-using internal::as_signed;
-using internal::as_unsigned;
-using internal::checked_cast;
-using internal::IsTypeInRangeForNumericType;
-using internal::IsValueInRangeForNumericType;
-using internal::IsValueNegative;
-using internal::MakeStrictNum;
-using internal::SafeUnsignedAbs;
-using internal::saturated_cast;
-using internal::strict_cast;
-using internal::StrictNumeric;
-
-// Explicitly make a shorter size_t alias for convenience.
-using SizeT = StrictNumeric<size_t>;
-
-// floating -> integral conversions that saturate and thus can actually return
-// an integral type.  In most cases, these should be preferred over the std::
-// versions.
-template <typename Dst = int,
-          typename Src,
-          typename = std::enable_if_t<std::is_integral<Dst>::value &&
-                                      std::is_floating_point<Src>::value>>
-Dst ClampFloor(Src value) {
-  return saturated_cast<Dst>(std::floor(value));
-}
-template <typename Dst = int,
-          typename Src,
-          typename = std::enable_if_t<std::is_integral<Dst>::value &&
-                                      std::is_floating_point<Src>::value>>
-Dst ClampCeil(Src value) {
-  return saturated_cast<Dst>(std::ceil(value));
-}
-template <typename Dst = int,
-          typename Src,
-          typename = std::enable_if_t<std::is_integral<Dst>::value &&
-                                      std::is_floating_point<Src>::value>>
-Dst ClampRound(Src value) {
-  const Src rounded =
-      (value >= 0.0f) ? std::floor(value + 0.5f) : std::ceil(value - 0.5f);
-  return saturated_cast<Dst>(rounded);
-}
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_arm_impl.h b/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_arm_impl.h
deleted file mode 100644
index b23e42f..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_arm_impl.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
-
-#include <cassert>
-#include <limits>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_impl.h"
-
-namespace partition_alloc::internal::base::internal {
-
-// Fast saturation to a destination type.
-template <typename Dst, typename Src>
-struct SaturateFastAsmOp {
-  static constexpr bool is_supported =
-      kEnableAsmCode && std::is_signed<Src>::value &&
-      std::is_integral<Dst>::value && std::is_integral<Src>::value &&
-      IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
-      IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
-      !IsTypeInRangeForNumericType<Dst, Src>::value;
-
-  __attribute__((always_inline)) static Dst Do(Src value) {
-    int32_t src = value;
-    typename std::conditional<std::is_signed<Dst>::value, int32_t,
-                              uint32_t>::type result;
-    if (std::is_signed<Dst>::value) {
-      asm("ssat %[dst], %[shift], %[src]"
-          : [dst] "=r"(result)
-          : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
-                                            ? IntegerBitsPlusSign<Dst>::value
-                                            : 32));
-    } else {
-      asm("usat %[dst], %[shift], %[src]"
-          : [dst] "=r"(result)
-          : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value < 32
-                                            ? IntegerBitsPlusSign<Dst>::value
-                                            : 31));
-    }
-    return static_cast<Dst>(result);
-  }
-};
-
-}  // namespace partition_alloc::internal::base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_impl.h b/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_impl.h
deleted file mode 100644
index 3767074..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_impl.h
+++ /dev/null
@@ -1,845 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
-
-#include <stdint.h>
-
-#include <limits>
-#include <type_traits>
-
-#if defined(__GNUC__) || defined(__clang__)
-#define PA_BASE_NUMERICS_LIKELY(x) __builtin_expect(!!(x), 1)
-#define PA_BASE_NUMERICS_UNLIKELY(x) __builtin_expect(!!(x), 0)
-#else
-#define PA_BASE_NUMERICS_LIKELY(x) (x)
-#define PA_BASE_NUMERICS_UNLIKELY(x) (x)
-#endif
-
-namespace partition_alloc::internal::base::internal {
-
-// The std library doesn't provide a binary max_exponent for integers, however
-// we can compute an analog using std::numeric_limits<>::digits.
-template <typename NumericType>
-struct MaxExponent {
-  static const int value = std::is_floating_point<NumericType>::value
-                               ? std::numeric_limits<NumericType>::max_exponent
-                               : std::numeric_limits<NumericType>::digits + 1;
-};
-
-// The number of bits (including the sign) in an integer. Eliminates sizeof
-// hacks.
-template <typename NumericType>
-struct IntegerBitsPlusSign {
-  static const int value = std::numeric_limits<NumericType>::digits +
-                           std::is_signed<NumericType>::value;
-};
-
-// Helper templates for integer manipulations.
-
-template <typename Integer>
-struct PositionOfSignBit {
-  static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
-};
-
-// Determines if a numeric value is negative without throwing compiler
-// warnings on: unsigned(value) < 0.
-template <typename T,
-          typename std::enable_if<std::is_signed<T>::value>::type* = nullptr>
-constexpr bool IsValueNegative(T value) {
-  static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
-  return value < 0;
-}
-
-template <typename T,
-          typename std::enable_if<!std::is_signed<T>::value>::type* = nullptr>
-constexpr bool IsValueNegative(T) {
-  static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
-  return false;
-}
-
-// This performs a fast negation, returning a signed value. It works on unsigned
-// arguments, but probably doesn't do what you want for any unsigned value
-// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
-template <typename T>
-constexpr typename std::make_signed<T>::type ConditionalNegate(
-    T x,
-    bool is_negative) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
-  using SignedT = typename std::make_signed<T>::type;
-  using UnsignedT = typename std::make_unsigned<T>::type;
-  return static_cast<SignedT>((static_cast<UnsignedT>(x) ^
-                               static_cast<UnsignedT>(-SignedT(is_negative))) +
-                              is_negative);
-}
-
-// This performs a safe, absolute value via unsigned overflow.
-template <typename T>
-constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
-  using UnsignedT = typename std::make_unsigned<T>::type;
-  return IsValueNegative(value)
-             ? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
-             : static_cast<UnsignedT>(value);
-}
-
-// TODO(jschuh): Switch to std::is_constant_evaluated() once C++20 is supported.
-// Alternately, the usage could be restructured for "consteval if" in C++23.
-#define PA_IsConstantEvaluated() (__builtin_is_constant_evaluated())
-
-// TODO(jschuh): Debug builds don't reliably propagate constants, so we restrict
-// some accelerated runtime paths to release builds until this can be forced
-// with consteval support in C++20 or C++23.
-#if defined(NDEBUG)
-constexpr bool kEnableAsmCode = true;
-#else
-constexpr bool kEnableAsmCode = false;
-#endif
-
-// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
-// Also used in a constexpr template to trigger a compilation failure on
-// an error condition.
-struct CheckOnFailure {
-  template <typename T>
-  static T HandleFailure() {
-#if defined(_MSC_VER)
-    __debugbreak();
-#elif defined(__GNUC__) || defined(__clang__)
-    __builtin_trap();
-#else
-    ((void)(*(volatile char*)0 = 0));
-#endif
-    return T();
-  }
-};
-
-enum IntegerRepresentation {
-  INTEGER_REPRESENTATION_UNSIGNED,
-  INTEGER_REPRESENTATION_SIGNED
-};
-
-// A range for a given nunmeric Src type is contained for a given numeric Dst
-// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
-// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
-// We implement this as template specializations rather than simple static
-// comparisons to ensure type correctness in our comparisons.
-enum NumericRangeRepresentation {
-  NUMERIC_RANGE_NOT_CONTAINED,
-  NUMERIC_RANGE_CONTAINED
-};
-
-// Helper templates to statically determine if our destination type can contain
-// maximum and minimum values represented by the source type.
-
-template <typename Dst,
-          typename Src,
-          IntegerRepresentation DstSign = std::is_signed<Dst>::value
-                                              ? INTEGER_REPRESENTATION_SIGNED
-                                              : INTEGER_REPRESENTATION_UNSIGNED,
-          IntegerRepresentation SrcSign = std::is_signed<Src>::value
-                                              ? INTEGER_REPRESENTATION_SIGNED
-                                              : INTEGER_REPRESENTATION_UNSIGNED>
-struct StaticDstRangeRelationToSrcRange;
-
-// Same sign: Dst is guaranteed to contain Src only if its range is equal or
-// larger.
-template <typename Dst, typename Src, IntegerRepresentation Sign>
-struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> {
-  static const NumericRangeRepresentation value =
-      MaxExponent<Dst>::value >= MaxExponent<Src>::value
-          ? NUMERIC_RANGE_CONTAINED
-          : NUMERIC_RANGE_NOT_CONTAINED;
-};
-
-// Unsigned to signed: Dst is guaranteed to contain source only if its range is
-// larger.
-template <typename Dst, typename Src>
-struct StaticDstRangeRelationToSrcRange<Dst,
-                                        Src,
-                                        INTEGER_REPRESENTATION_SIGNED,
-                                        INTEGER_REPRESENTATION_UNSIGNED> {
-  static const NumericRangeRepresentation value =
-      MaxExponent<Dst>::value > MaxExponent<Src>::value
-          ? NUMERIC_RANGE_CONTAINED
-          : NUMERIC_RANGE_NOT_CONTAINED;
-};
-
-// Signed to unsigned: Dst cannot be statically determined to contain Src.
-template <typename Dst, typename Src>
-struct StaticDstRangeRelationToSrcRange<Dst,
-                                        Src,
-                                        INTEGER_REPRESENTATION_UNSIGNED,
-                                        INTEGER_REPRESENTATION_SIGNED> {
-  static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
-};
-
-// This class wraps the range constraints as separate booleans so the compiler
-// can identify constants and eliminate unused code paths.
-class RangeCheck {
- public:
-  constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
-      : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
-  constexpr RangeCheck() : is_underflow_(false), is_overflow_(false) {}
-  constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
-  constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
-  constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
-  constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
-  constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
-  constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
-  constexpr bool operator==(const RangeCheck rhs) const {
-    return is_underflow_ == rhs.is_underflow_ &&
-           is_overflow_ == rhs.is_overflow_;
-  }
-  constexpr bool operator!=(const RangeCheck rhs) const {
-    return !(*this == rhs);
-  }
-
- private:
-  // Do not change the order of these member variables. The integral conversion
-  // optimization depends on this exact order.
-  const bool is_underflow_;
-  const bool is_overflow_;
-};
-
-// The following helper template addresses a corner case in range checks for
-// conversion from a floating-point type to an integral type of smaller range
-// but larger precision (e.g. float -> unsigned). The problem is as follows:
-//   1. Integral maximum is always one less than a power of two, so it must be
-//      truncated to fit the mantissa of the floating point. The direction of
-//      rounding is implementation defined, but by default it's always IEEE
-//      floats, which round to nearest and thus result in a value of larger
-//      magnitude than the integral value.
-//      Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
-//                                   // is 4294967295u.
-//   2. If the floating point value is equal to the promoted integral maximum
-//      value, a range check will erroneously pass.
-//      Example: (4294967296f <= 4294967295u) // This is true due to a precision
-//                                            // loss in rounding up to float.
-//   3. When the floating point value is then converted to an integral, the
-//      resulting value is out of range for the target integral type and
-//      thus is implementation defined.
-//      Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
-// To fix this bug we manually truncate the maximum value when the destination
-// type is an integral of larger precision than the source floating-point type,
-// such that the resulting maximum is represented exactly as a floating point.
-template <typename Dst, typename Src, template <typename> class Bounds>
-struct NarrowingRange {
-  using SrcLimits = std::numeric_limits<Src>;
-  using DstLimits = typename std::numeric_limits<Dst>;
-
-  // Computes the mask required to make an accurate comparison between types.
-  static const int kShift =
-      (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
-       SrcLimits::digits < DstLimits::digits)
-          ? (DstLimits::digits - SrcLimits::digits)
-          : 0;
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-
-  // Masks out the integer bits that are beyond the precision of the
-  // intermediate type used for comparison.
-  static constexpr T Adjust(T value) {
-    static_assert(std::is_same<T, Dst>::value, "");
-    static_assert(kShift < DstLimits::digits, "");
-    using UnsignedDst = typename std::make_unsigned_t<T>;
-    return static_cast<T>(ConditionalNegate(
-        SafeUnsignedAbs(value) & ~((UnsignedDst{1} << kShift) - UnsignedDst{1}),
-        IsValueNegative(value)));
-  }
-
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static constexpr T Adjust(T value) {
-    static_assert(std::is_same<T, Dst>::value, "");
-    static_assert(kShift == 0, "");
-    return value;
-  }
-
-  static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
-  static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
-};
-
-template <typename Dst,
-          typename Src,
-          template <typename>
-          class Bounds,
-          IntegerRepresentation DstSign = std::is_signed<Dst>::value
-                                              ? INTEGER_REPRESENTATION_SIGNED
-                                              : INTEGER_REPRESENTATION_UNSIGNED,
-          IntegerRepresentation SrcSign = std::is_signed<Src>::value
-                                              ? INTEGER_REPRESENTATION_SIGNED
-                                              : INTEGER_REPRESENTATION_UNSIGNED,
-          NumericRangeRepresentation DstRange =
-              StaticDstRangeRelationToSrcRange<Dst, Src>::value>
-struct DstRangeRelationToSrcRangeImpl;
-
-// The following templates are for ranges that must be verified at runtime. We
-// split it into checks based on signedness to avoid confusing casts and
-// compiler warnings on signed an unsigned comparisons.
-
-// Same sign narrowing: The range is contained for normal limits.
-template <typename Dst,
-          typename Src,
-          template <typename>
-          class Bounds,
-          IntegerRepresentation DstSign,
-          IntegerRepresentation SrcSign>
-struct DstRangeRelationToSrcRangeImpl<Dst,
-                                      Src,
-                                      Bounds,
-                                      DstSign,
-                                      SrcSign,
-                                      NUMERIC_RANGE_CONTAINED> {
-  static constexpr RangeCheck Check(Src value) {
-    using SrcLimits = std::numeric_limits<Src>;
-    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
-    return RangeCheck(
-        static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
-            static_cast<Dst>(value) >= DstLimits::lowest(),
-        static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
-            static_cast<Dst>(value) <= DstLimits::max());
-  }
-};
-
-// Signed to signed narrowing: Both the upper and lower boundaries may be
-// exceeded for standard limits.
-template <typename Dst, typename Src, template <typename> class Bounds>
-struct DstRangeRelationToSrcRangeImpl<Dst,
-                                      Src,
-                                      Bounds,
-                                      INTEGER_REPRESENTATION_SIGNED,
-                                      INTEGER_REPRESENTATION_SIGNED,
-                                      NUMERIC_RANGE_NOT_CONTAINED> {
-  static constexpr RangeCheck Check(Src value) {
-    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
-    return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
-  }
-};
-
-// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
-// standard limits.
-template <typename Dst, typename Src, template <typename> class Bounds>
-struct DstRangeRelationToSrcRangeImpl<Dst,
-                                      Src,
-                                      Bounds,
-                                      INTEGER_REPRESENTATION_UNSIGNED,
-                                      INTEGER_REPRESENTATION_UNSIGNED,
-                                      NUMERIC_RANGE_NOT_CONTAINED> {
-  static constexpr RangeCheck Check(Src value) {
-    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
-    return RangeCheck(
-        DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
-        value <= DstLimits::max());
-  }
-};
-
-// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
-template <typename Dst, typename Src, template <typename> class Bounds>
-struct DstRangeRelationToSrcRangeImpl<Dst,
-                                      Src,
-                                      Bounds,
-                                      INTEGER_REPRESENTATION_SIGNED,
-                                      INTEGER_REPRESENTATION_UNSIGNED,
-                                      NUMERIC_RANGE_NOT_CONTAINED> {
-  static constexpr RangeCheck Check(Src value) {
-    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
-    using Promotion = decltype(Src() + Dst());
-    return RangeCheck(DstLimits::lowest() <= Dst(0) ||
-                          static_cast<Promotion>(value) >=
-                              static_cast<Promotion>(DstLimits::lowest()),
-                      static_cast<Promotion>(value) <=
-                          static_cast<Promotion>(DstLimits::max()));
-  }
-};
-
-// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
-// and any negative value exceeds the lower boundary for standard limits.
-template <typename Dst, typename Src, template <typename> class Bounds>
-struct DstRangeRelationToSrcRangeImpl<Dst,
-                                      Src,
-                                      Bounds,
-                                      INTEGER_REPRESENTATION_UNSIGNED,
-                                      INTEGER_REPRESENTATION_SIGNED,
-                                      NUMERIC_RANGE_NOT_CONTAINED> {
-  static constexpr RangeCheck Check(Src value) {
-    using SrcLimits = std::numeric_limits<Src>;
-    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
-    using Promotion = decltype(Src() + Dst());
-    bool ge_zero = false;
-    // Converting floating-point to integer will discard fractional part, so
-    // values in (-1.0, -0.0) will truncate to 0 and fit in Dst.
-    if (std::is_floating_point<Src>::value) {
-      ge_zero = value > Src(-1);
-    } else {
-      ge_zero = value >= Src(0);
-    }
-    return RangeCheck(
-        ge_zero && (DstLimits::lowest() == 0 ||
-                    static_cast<Dst>(value) >= DstLimits::lowest()),
-        static_cast<Promotion>(SrcLimits::max()) <=
-                static_cast<Promotion>(DstLimits::max()) ||
-            static_cast<Promotion>(value) <=
-                static_cast<Promotion>(DstLimits::max()));
-  }
-};
-
-// Simple wrapper for statically checking if a type's range is contained.
-template <typename Dst, typename Src>
-struct IsTypeInRangeForNumericType {
-  static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
-                            NUMERIC_RANGE_CONTAINED;
-};
-
-template <typename Dst,
-          template <typename> class Bounds = std::numeric_limits,
-          typename Src>
-constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
-  static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
-  static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
-  static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
-  return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
-}
-
-// Integer promotion templates used by the portable checked integer arithmetic.
-template <size_t Size, bool IsSigned>
-struct IntegerForDigitsAndSign;
-
-#define PA_INTEGER_FOR_DIGITS_AND_SIGN(I)                       \
-  template <>                                                   \
-  struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
-                                 std::is_signed<I>::value> {    \
-    using type = I;                                             \
-  }
-
-PA_INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
-PA_INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
-PA_INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
-PA_INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
-PA_INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
-PA_INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
-PA_INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
-PA_INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
-#undef PA_INTEGER_FOR_DIGITS_AND_SIGN
-
-// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
-// support 128-bit math, then the ArithmeticPromotion template below will need
-// to be updated (or more likely replaced with a decltype expression).
-static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
-              "Max integer size not supported for this toolchain.");
-
-template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
-struct TwiceWiderInteger {
-  using type =
-      typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
-                                       IsSigned>::type;
-};
-
-enum ArithmeticPromotionCategory {
-  LEFT_PROMOTION,  // Use the type of the left-hand argument.
-  RIGHT_PROMOTION  // Use the type of the right-hand argument.
-};
-
-// Determines the type that can represent the largest positive value.
-template <typename Lhs,
-          typename Rhs,
-          ArithmeticPromotionCategory Promotion =
-              (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
-                  ? LEFT_PROMOTION
-                  : RIGHT_PROMOTION>
-struct MaxExponentPromotion;
-
-template <typename Lhs, typename Rhs>
-struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
-  using type = Lhs;
-};
-
-template <typename Lhs, typename Rhs>
-struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
-  using type = Rhs;
-};
-
-// Determines the type that can represent the lowest arithmetic value.
-template <typename Lhs,
-          typename Rhs,
-          ArithmeticPromotionCategory Promotion =
-              std::is_signed<Lhs>::value
-                  ? (std::is_signed<Rhs>::value
-                         ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
-                                ? LEFT_PROMOTION
-                                : RIGHT_PROMOTION)
-                         : LEFT_PROMOTION)
-                  : (std::is_signed<Rhs>::value
-                         ? RIGHT_PROMOTION
-                         : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
-                                ? LEFT_PROMOTION
-                                : RIGHT_PROMOTION))>
-struct LowestValuePromotion;
-
-template <typename Lhs, typename Rhs>
-struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION> {
-  using type = Lhs;
-};
-
-template <typename Lhs, typename Rhs>
-struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION> {
-  using type = Rhs;
-};
-
-// Determines the type that is best able to represent an arithmetic result.
-template <
-    typename Lhs,
-    typename Rhs = Lhs,
-    bool is_intmax_type =
-        std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value&&
-            IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
-                value == IntegerBitsPlusSign<intmax_t>::value,
-    bool is_max_exponent =
-        StaticDstRangeRelationToSrcRange<
-            typename MaxExponentPromotion<Lhs, Rhs>::type,
-            Lhs>::value ==
-        NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
-            typename MaxExponentPromotion<Lhs, Rhs>::type,
-            Rhs>::value == NUMERIC_RANGE_CONTAINED>
-struct BigEnoughPromotion;
-
-// The side with the max exponent is big enough.
-template <typename Lhs, typename Rhs, bool is_intmax_type>
-struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
-  using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
-  static const bool is_contained = true;
-};
-
-// We can use a twice wider type to fit.
-template <typename Lhs, typename Rhs>
-struct BigEnoughPromotion<Lhs, Rhs, false, false> {
-  using type =
-      typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
-                                 std::is_signed<Lhs>::value ||
-                                     std::is_signed<Rhs>::value>::type;
-  static const bool is_contained = true;
-};
-
-// No type is large enough.
-template <typename Lhs, typename Rhs>
-struct BigEnoughPromotion<Lhs, Rhs, true, false> {
-  using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
-  static const bool is_contained = false;
-};
-
-// We can statically check if operations on the provided types can wrap, so we
-// can skip the checked operations if they're not needed. So, for an integer we
-// care if the destination type preserves the sign and is twice the width of
-// the source.
-template <typename T, typename Lhs, typename Rhs = Lhs>
-struct IsIntegerArithmeticSafe {
-  static const bool value =
-      !std::is_floating_point<T>::value &&
-      !std::is_floating_point<Lhs>::value &&
-      !std::is_floating_point<Rhs>::value &&
-      std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
-      IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
-      std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
-      IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
-};
-
-// Promotes to a type that can represent any possible result of a binary
-// arithmetic operation with the source types.
-template <typename Lhs,
-          typename Rhs,
-          bool is_promotion_possible = IsIntegerArithmeticSafe<
-              typename std::conditional<std::is_signed<Lhs>::value ||
-                                            std::is_signed<Rhs>::value,
-                                        intmax_t,
-                                        uintmax_t>::type,
-              typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
-struct FastIntegerArithmeticPromotion;
-
-template <typename Lhs, typename Rhs>
-struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
-  using type =
-      typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
-                                 std::is_signed<Lhs>::value ||
-                                     std::is_signed<Rhs>::value>::type;
-  static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
-  static const bool is_contained = true;
-};
-
-template <typename Lhs, typename Rhs>
-struct FastIntegerArithmeticPromotion<Lhs, Rhs, false> {
-  using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
-  static const bool is_contained = false;
-};
-
-// Extracts the underlying type from an enum.
-template <typename T, bool is_enum = std::is_enum<T>::value>
-struct ArithmeticOrUnderlyingEnum;
-
-template <typename T>
-struct ArithmeticOrUnderlyingEnum<T, true> {
-  using type = typename std::underlying_type<T>::type;
-  static const bool value = std::is_arithmetic<type>::value;
-};
-
-template <typename T>
-struct ArithmeticOrUnderlyingEnum<T, false> {
-  using type = T;
-  static const bool value = std::is_arithmetic<type>::value;
-};
-
-// The following are helper templates used in the CheckedNumeric class.
-template <typename T>
-class CheckedNumeric;
-
-template <typename T>
-class ClampedNumeric;
-
-template <typename T>
-class StrictNumeric;
-
-// Used to treat CheckedNumeric and arithmetic underlying types the same.
-template <typename T>
-struct UnderlyingType {
-  using type = typename ArithmeticOrUnderlyingEnum<T>::type;
-  static const bool is_numeric = std::is_arithmetic<type>::value;
-  static const bool is_checked = false;
-  static const bool is_clamped = false;
-  static const bool is_strict = false;
-};
-
-template <typename T>
-struct UnderlyingType<CheckedNumeric<T>> {
-  using type = T;
-  static const bool is_numeric = true;
-  static const bool is_checked = true;
-  static const bool is_clamped = false;
-  static const bool is_strict = false;
-};
-
-template <typename T>
-struct UnderlyingType<ClampedNumeric<T>> {
-  using type = T;
-  static const bool is_numeric = true;
-  static const bool is_checked = false;
-  static const bool is_clamped = true;
-  static const bool is_strict = false;
-};
-
-template <typename T>
-struct UnderlyingType<StrictNumeric<T>> {
-  using type = T;
-  static const bool is_numeric = true;
-  static const bool is_checked = false;
-  static const bool is_clamped = false;
-  static const bool is_strict = true;
-};
-
-template <typename L, typename R>
-struct IsCheckedOp {
-  static const bool value =
-      UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
-      (UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
-};
-
-template <typename L, typename R>
-struct IsClampedOp {
-  static const bool value =
-      UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
-      (UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped) &&
-      !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
-};
-
-template <typename L, typename R>
-struct IsStrictOp {
-  static const bool value =
-      UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
-      (UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict) &&
-      !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked) &&
-      !(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped);
-};
-
-// as_signed<> returns the supplied integral value (or integral castable
-// Numeric template) cast as a signed integral of equivalent precision.
-// I.e. it's mostly an alias for: static_cast<std::make_signed<T>::type>(t)
-template <typename Src>
-constexpr typename std::make_signed<
-    typename base::internal::UnderlyingType<Src>::type>::type
-as_signed(const Src value) {
-  static_assert(std::is_integral<decltype(as_signed(value))>::value,
-                "Argument must be a signed or unsigned integer type.");
-  return static_cast<decltype(as_signed(value))>(value);
-}
-
-// as_unsigned<> returns the supplied integral value (or integral castable
-// Numeric template) cast as an unsigned integral of equivalent precision.
-// I.e. it's mostly an alias for: static_cast<std::make_unsigned<T>::type>(t)
-template <typename Src>
-constexpr typename std::make_unsigned<
-    typename base::internal::UnderlyingType<Src>::type>::type
-as_unsigned(const Src value) {
-  static_assert(std::is_integral<decltype(as_unsigned(value))>::value,
-                "Argument must be a signed or unsigned integer type.");
-  return static_cast<decltype(as_unsigned(value))>(value);
-}
-
-template <typename L, typename R>
-constexpr bool IsLessImpl(const L lhs,
-                          const R rhs,
-                          const RangeCheck l_range,
-                          const RangeCheck r_range) {
-  return l_range.IsUnderflow() || r_range.IsOverflow() ||
-         (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <
-                                    static_cast<decltype(lhs + rhs)>(rhs));
-}
-
-template <typename L, typename R>
-struct IsLess {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
-                      DstRangeRelationToSrcRange<L>(rhs));
-  }
-};
-
-template <typename L, typename R>
-constexpr bool IsLessOrEqualImpl(const L lhs,
-                                 const R rhs,
-                                 const RangeCheck l_range,
-                                 const RangeCheck r_range) {
-  return l_range.IsUnderflow() || r_range.IsOverflow() ||
-         (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <=
-                                    static_cast<decltype(lhs + rhs)>(rhs));
-}
-
-template <typename L, typename R>
-struct IsLessOrEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
-                             DstRangeRelationToSrcRange<L>(rhs));
-  }
-};
-
-template <typename L, typename R>
-constexpr bool IsGreaterImpl(const L lhs,
-                             const R rhs,
-                             const RangeCheck l_range,
-                             const RangeCheck r_range) {
-  return l_range.IsOverflow() || r_range.IsUnderflow() ||
-         (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >
-                                    static_cast<decltype(lhs + rhs)>(rhs));
-}
-
-template <typename L, typename R>
-struct IsGreater {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
-                         DstRangeRelationToSrcRange<L>(rhs));
-  }
-};
-
-template <typename L, typename R>
-constexpr bool IsGreaterOrEqualImpl(const L lhs,
-                                    const R rhs,
-                                    const RangeCheck l_range,
-                                    const RangeCheck r_range) {
-  return l_range.IsOverflow() || r_range.IsUnderflow() ||
-         (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >=
-                                    static_cast<decltype(lhs + rhs)>(rhs));
-}
-
-template <typename L, typename R>
-struct IsGreaterOrEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
-                                DstRangeRelationToSrcRange<L>(rhs));
-  }
-};
-
-template <typename L, typename R>
-struct IsEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return DstRangeRelationToSrcRange<R>(lhs) ==
-               DstRangeRelationToSrcRange<L>(rhs) &&
-           static_cast<decltype(lhs + rhs)>(lhs) ==
-               static_cast<decltype(lhs + rhs)>(rhs);
-  }
-};
-
-template <typename L, typename R>
-struct IsNotEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return DstRangeRelationToSrcRange<R>(lhs) !=
-               DstRangeRelationToSrcRange<L>(rhs) ||
-           static_cast<decltype(lhs + rhs)>(lhs) !=
-               static_cast<decltype(lhs + rhs)>(rhs);
-  }
-};
-
-// These perform the actual math operations on the CheckedNumerics.
-// Binary arithmetic operations.
-template <template <typename, typename> class C, typename L, typename R>
-constexpr bool SafeCompare(const L lhs, const R rhs) {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  using Promotion = BigEnoughPromotion<L, R>;
-  using BigType = typename Promotion::type;
-  return Promotion::is_contained
-             // Force to a larger type for speed if both are contained.
-             ? C<BigType, BigType>::Test(
-                   static_cast<BigType>(static_cast<L>(lhs)),
-                   static_cast<BigType>(static_cast<R>(rhs)))
-             // Let the template functions figure it out for mixed types.
-             : C<L, R>::Test(lhs, rhs);
-}
-
-template <typename Dst, typename Src>
-constexpr bool IsMaxInRangeForNumericType() {
-  return IsGreaterOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::max(),
-                                          std::numeric_limits<Src>::max());
-}
-
-template <typename Dst, typename Src>
-constexpr bool IsMinInRangeForNumericType() {
-  return IsLessOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::lowest(),
-                                       std::numeric_limits<Src>::lowest());
-}
-
-template <typename Dst, typename Src>
-constexpr Dst CommonMax() {
-  return !IsMaxInRangeForNumericType<Dst, Src>()
-             ? Dst(std::numeric_limits<Dst>::max())
-             : Dst(std::numeric_limits<Src>::max());
-}
-
-template <typename Dst, typename Src>
-constexpr Dst CommonMin() {
-  return !IsMinInRangeForNumericType<Dst, Src>()
-             ? Dst(std::numeric_limits<Dst>::lowest())
-             : Dst(std::numeric_limits<Src>::lowest());
-}
-
-// This is a wrapper to generate return the max or min for a supplied type.
-// If the argument is false, the returned value is the maximum. If true the
-// returned value is the minimum.
-template <typename Dst, typename Src = Dst>
-constexpr Dst CommonMaxOrMin(bool is_min) {
-  return is_min ? CommonMin<Dst, Src>() : CommonMax<Dst, Src>();
-}
-
-}  // namespace partition_alloc::internal::base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math.h b/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math.h
deleted file mode 100644
index dfb59ca..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math.h
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_arm_impl.h b/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_arm_impl.h
deleted file mode 100644
index c284e3e..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_arm_impl.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
-
-#include <cassert>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
-
-namespace partition_alloc::internal::base::internal {
-
-template <typename T, typename U>
-struct CheckedMulFastAsmOp {
-  static const bool is_supported =
-      kEnableAsmCode && FastIntegerArithmeticPromotion<T, U>::is_contained;
-
-  // The following is not an assembler routine and is thus constexpr safe, it
-  // just emits much more efficient code than the Clang and GCC builtins for
-  // performing overflow-checked multiplication when a twice wider type is
-  // available. The below compiles down to 2-3 instructions, depending on the
-  // width of the types in use.
-  // As an example, an int32_t multiply compiles to:
-  //    smull   r0, r1, r0, r1
-  //    cmp     r1, r1, asr #31
-  // And an int16_t multiply compiles to:
-  //    smulbb  r1, r1, r0
-  //    asr     r2, r1, #16
-  //    cmp     r2, r1, asr #15
-  template <typename V>
-  static constexpr bool Do(T x, U y, V* result) {
-    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
-    Promotion presult;
-
-    presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
-    if (!IsValueInRangeForNumericType<V>(presult))
-      return false;
-    *result = static_cast<V>(presult);
-    return true;
-  }
-};
-
-template <typename T, typename U>
-struct ClampedAddFastAsmOp {
-  static const bool is_supported =
-      kEnableAsmCode && BigEnoughPromotion<T, U>::is_contained &&
-      IsTypeInRangeForNumericType<
-          int32_t,
-          typename BigEnoughPromotion<T, U>::type>::value;
-
-  template <typename V>
-  __attribute__((always_inline)) static V Do(T x, U y) {
-    // This will get promoted to an int, so let the compiler do whatever is
-    // clever and rely on the saturated cast to bounds check.
-    if (IsIntegerArithmeticSafe<int, T, U>::value)
-      return saturated_cast<V>(x + y);
-
-    int32_t result;
-    int32_t x_i32 = checked_cast<int32_t>(x);
-    int32_t y_i32 = checked_cast<int32_t>(y);
-
-    asm("qadd %[result], %[first], %[second]"
-        : [result] "=r"(result)
-        : [first] "r"(x_i32), [second] "r"(y_i32));
-    return saturated_cast<V>(result);
-  }
-};
-
-template <typename T, typename U>
-struct ClampedSubFastAsmOp {
-  static const bool is_supported =
-      kEnableAsmCode && BigEnoughPromotion<T, U>::is_contained &&
-      IsTypeInRangeForNumericType<
-          int32_t,
-          typename BigEnoughPromotion<T, U>::type>::value;
-
-  template <typename V>
-  __attribute__((always_inline)) static V Do(T x, U y) {
-    // This will get promoted to an int, so let the compiler do whatever is
-    // clever and rely on the saturated cast to bounds check.
-    if (IsIntegerArithmeticSafe<int, T, U>::value)
-      return saturated_cast<V>(x - y);
-
-    int32_t result;
-    int32_t x_i32 = checked_cast<int32_t>(x);
-    int32_t y_i32 = checked_cast<int32_t>(y);
-
-    asm("qsub %[result], %[first], %[second]"
-        : [result] "=r"(result)
-        : [first] "r"(x_i32), [second] "r"(y_i32));
-    return saturated_cast<V>(result);
-  }
-};
-
-template <typename T, typename U>
-struct ClampedMulFastAsmOp {
-  static const bool is_supported =
-      kEnableAsmCode && CheckedMulFastAsmOp<T, U>::is_supported;
-
-  template <typename V>
-  __attribute__((always_inline)) static V Do(T x, U y) {
-    // Use the CheckedMulFastAsmOp for full-width 32-bit values, because
-    // it's fewer instructions than promoting and then saturating.
-    if (!IsIntegerArithmeticSafe<int32_t, T, U>::value &&
-        !IsIntegerArithmeticSafe<uint32_t, T, U>::value) {
-      V result;
-      return CheckedMulFastAsmOp<T, U>::Do(x, y, &result)
-                 ? result
-                 : CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
-    }
-
-    assert((FastIntegerArithmeticPromotion<T, U>::is_contained));
-    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
-    return saturated_cast<V>(static_cast<Promotion>(x) *
-                             static_cast<Promotion>(y));
-  }
-};
-
-}  // namespace partition_alloc::internal::base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_clang_gcc_impl.h b/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_clang_gcc_impl.h
deleted file mode 100644
index 956a892..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_clang_gcc_impl.h
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
-
-#include <cassert>
-#include <limits>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
-
-#if !defined(__native_client__) && (defined(__ARMEL__) || defined(__arch64__))
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_arm_impl.h"
-#define PA_BASE_HAS_ASSEMBLER_SAFE_MATH (1)
-#else
-#define PA_BASE_HAS_ASSEMBLER_SAFE_MATH (0)
-#endif
-
-namespace partition_alloc::internal::base::internal {
-
-// These are the non-functioning boilerplate implementations of the optimized
-// safe math routines.
-#if !PA_BASE_HAS_ASSEMBLER_SAFE_MATH
-template <typename T, typename U>
-struct CheckedMulFastAsmOp {
-  static const bool is_supported = false;
-  template <typename V>
-  static constexpr bool Do(T, U, V*) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<bool>();
-  }
-};
-
-template <typename T, typename U>
-struct ClampedAddFastAsmOp {
-  static const bool is_supported = false;
-  template <typename V>
-  static constexpr V Do(T, U) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<V>();
-  }
-};
-
-template <typename T, typename U>
-struct ClampedSubFastAsmOp {
-  static const bool is_supported = false;
-  template <typename V>
-  static constexpr V Do(T, U) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<V>();
-  }
-};
-
-template <typename T, typename U>
-struct ClampedMulFastAsmOp {
-  static const bool is_supported = false;
-  template <typename V>
-  static constexpr V Do(T, U) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<V>();
-  }
-};
-#endif  // PA_BASE_HAS_ASSEMBLER_SAFE_MATH
-#undef PA_BASE_HAS_ASSEMBLER_SAFE_MATH
-
-template <typename T, typename U>
-struct CheckedAddFastOp {
-  static const bool is_supported = true;
-  template <typename V>
-  __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
-    return !__builtin_add_overflow(x, y, result);
-  }
-};
-
-template <typename T, typename U>
-struct CheckedSubFastOp {
-  static const bool is_supported = true;
-  template <typename V>
-  __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
-    return !__builtin_sub_overflow(x, y, result);
-  }
-};
-
-template <typename T, typename U>
-struct CheckedMulFastOp {
-#if defined(__clang__)
-  // TODO(jschuh): Get the Clang runtime library issues sorted out so we can
-  // support full-width, mixed-sign multiply builtins.
-  // https://crbug.com/613003
-  // We can support intptr_t, uintptr_t, or a smaller common type.
-  static const bool is_supported =
-      (IsTypeInRangeForNumericType<intptr_t, T>::value &&
-       IsTypeInRangeForNumericType<intptr_t, U>::value) ||
-      (IsTypeInRangeForNumericType<uintptr_t, T>::value &&
-       IsTypeInRangeForNumericType<uintptr_t, U>::value);
-#else
-  static const bool is_supported = true;
-#endif
-  template <typename V>
-  __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
-    return CheckedMulFastAsmOp<T, U>::is_supported
-               ? CheckedMulFastAsmOp<T, U>::Do(x, y, result)
-               : !__builtin_mul_overflow(x, y, result);
-  }
-};
-
-template <typename T, typename U>
-struct ClampedAddFastOp {
-  static const bool is_supported = ClampedAddFastAsmOp<T, U>::is_supported;
-  template <typename V>
-  __attribute__((always_inline)) static V Do(T x, U y) {
-    return ClampedAddFastAsmOp<T, U>::template Do<V>(x, y);
-  }
-};
-
-template <typename T, typename U>
-struct ClampedSubFastOp {
-  static const bool is_supported = ClampedSubFastAsmOp<T, U>::is_supported;
-  template <typename V>
-  __attribute__((always_inline)) static V Do(T x, U y) {
-    return ClampedSubFastAsmOp<T, U>::template Do<V>(x, y);
-  }
-};
-
-template <typename T, typename U>
-struct ClampedMulFastOp {
-  static const bool is_supported = ClampedMulFastAsmOp<T, U>::is_supported;
-  template <typename V>
-  __attribute__((always_inline)) static V Do(T x, U y) {
-    return ClampedMulFastAsmOp<T, U>::template Do<V>(x, y);
-  }
-};
-
-template <typename T>
-struct ClampedNegFastOp {
-  static const bool is_supported = std::is_signed<T>::value;
-  __attribute__((always_inline)) static T Do(T value) {
-    // Use this when there is no assembler path available.
-    if (!ClampedSubFastAsmOp<T, T>::is_supported) {
-      T result;
-      return !__builtin_sub_overflow(T(0), value, &result)
-                 ? result
-                 : std::numeric_limits<T>::max();
-    }
-
-    // Fallback to the normal subtraction path.
-    return ClampedSubFastOp<T, T>::template Do<T>(T(0), value);
-  }
-};
-
-}  // namespace partition_alloc::internal::base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_shared_impl.h b/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_shared_impl.h
deleted file mode 100644
index e644a67..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_shared_impl.h
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <cassert>
-#include <climits>
-#include <cmath>
-#include <cstdlib>
-#include <limits>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_ASMJS)
-// Optimized safe math instructions are incompatible with asmjs.
-#define PA_BASE_HAS_OPTIMIZED_SAFE_MATH (0)
-// Where available use builtin math overflow support on Clang and GCC.
-#elif !defined(__native_client__) &&                       \
-    ((defined(__clang__) &&                                \
-      ((__clang_major__ > 3) ||                            \
-       (__clang_major__ == 3 && __clang_minor__ >= 4))) || \
-     (defined(__GNUC__) && __GNUC__ >= 5))
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_clang_gcc_impl.h"
-#define PA_BASE_HAS_OPTIMIZED_SAFE_MATH (1)
-#else
-#define PA_BASE_HAS_OPTIMIZED_SAFE_MATH (0)
-#endif
-
-namespace partition_alloc::internal::base::internal {
-
-// These are the non-functioning boilerplate implementations of the optimized
-// safe math routines.
-#if !PA_BASE_HAS_OPTIMIZED_SAFE_MATH
-template <typename T, typename U>
-struct CheckedAddFastOp {
-  static const bool is_supported = false;
-  template <typename V>
-  static constexpr bool Do(T, U, V*) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<bool>();
-  }
-};
-
-template <typename T, typename U>
-struct CheckedSubFastOp {
-  static const bool is_supported = false;
-  template <typename V>
-  static constexpr bool Do(T, U, V*) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<bool>();
-  }
-};
-
-template <typename T, typename U>
-struct CheckedMulFastOp {
-  static const bool is_supported = false;
-  template <typename V>
-  static constexpr bool Do(T, U, V*) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<bool>();
-  }
-};
-
-template <typename T, typename U>
-struct ClampedAddFastOp {
-  static const bool is_supported = false;
-  template <typename V>
-  static constexpr V Do(T, U) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<V>();
-  }
-};
-
-template <typename T, typename U>
-struct ClampedSubFastOp {
-  static const bool is_supported = false;
-  template <typename V>
-  static constexpr V Do(T, U) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<V>();
-  }
-};
-
-template <typename T, typename U>
-struct ClampedMulFastOp {
-  static const bool is_supported = false;
-  template <typename V>
-  static constexpr V Do(T, U) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<V>();
-  }
-};
-
-template <typename T>
-struct ClampedNegFastOp {
-  static const bool is_supported = false;
-  static constexpr T Do(T) {
-    // Force a compile failure if instantiated.
-    return CheckOnFailure::template HandleFailure<T>();
-  }
-};
-#endif  // PA_BASE_HAS_OPTIMIZED_SAFE_MATH
-#undef PA_BASE_HAS_OPTIMIZED_SAFE_MATH
-
-// This is used for UnsignedAbs, where we need to support floating-point
-// template instantiations even though we don't actually support the operations.
-// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
-// so the float versions will not compile.
-template <typename Numeric,
-          bool IsInteger = std::is_integral<Numeric>::value,
-          bool IsFloat = std::is_floating_point<Numeric>::value>
-struct UnsignedOrFloatForSize;
-
-template <typename Numeric>
-struct UnsignedOrFloatForSize<Numeric, true, false> {
-  using type = typename std::make_unsigned<Numeric>::type;
-};
-
-template <typename Numeric>
-struct UnsignedOrFloatForSize<Numeric, false, true> {
-  using type = Numeric;
-};
-
-// Wrap the unary operations to allow SFINAE when instantiating integrals versus
-// floating points. These don't perform any overflow checking. Rather, they
-// exhibit well-defined overflow semantics and rely on the caller to detect
-// if an overflow occurred.
-
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-constexpr T NegateWrapper(T value) {
-  using UnsignedT = typename std::make_unsigned<T>::type;
-  // This will compile to a NEG on Intel, and is normal negation on ARM.
-  return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
-}
-
-template <
-    typename T,
-    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
-constexpr T NegateWrapper(T value) {
-  return -value;
-}
-
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-constexpr typename std::make_unsigned<T>::type InvertWrapper(T value) {
-  return ~value;
-}
-
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-constexpr T AbsWrapper(T value) {
-  return static_cast<T>(SafeUnsignedAbs(value));
-}
-
-template <
-    typename T,
-    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
-constexpr T AbsWrapper(T value) {
-  return value < 0 ? -value : value;
-}
-
-template <template <typename, typename, typename> class M,
-          typename L,
-          typename R>
-struct MathWrapper {
-  using math = M<typename UnderlyingType<L>::type,
-                 typename UnderlyingType<R>::type,
-                 void>;
-  using type = typename math::result_type;
-};
-
-// The following macros are just boilerplate for the standard arithmetic
-// operator overloads and variadic function templates. A macro isn't the nicest
-// solution, but it beats rewriting these over and over again.
-#define PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME)    \
-  template <typename L, typename R, typename... Args>                   \
-  constexpr auto CL_ABBR##OP_NAME(const L lhs, const R rhs,             \
-                                  const Args... args) {                 \
-    return CL_ABBR##MathOp<CLASS##OP_NAME##Op, L, R, Args...>(lhs, rhs, \
-                                                              args...); \
-  }
-
-#define PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(CLASS, CL_ABBR, OP_NAME, OP,  \
-                                             CMP_OP)                       \
-  /* Binary arithmetic operator for all CLASS##Numeric operations. */      \
-  template <typename L, typename R,                                        \
-            typename std::enable_if<Is##CLASS##Op<L, R>::value>::type* =   \
-                nullptr>                                                   \
-  constexpr CLASS##Numeric<                                                \
-      typename MathWrapper<CLASS##OP_NAME##Op, L, R>::type>                \
-  operator OP(const L lhs, const R rhs) {                                  \
-    return decltype(lhs OP rhs)::template MathOp<CLASS##OP_NAME##Op>(lhs,  \
-                                                                     rhs); \
-  }                                                                        \
-  /* Assignment arithmetic operator implementation from CLASS##Numeric. */ \
-  template <typename L>                                                    \
-  template <typename R>                                                    \
-  constexpr CLASS##Numeric<L>& CLASS##Numeric<L>::operator CMP_OP(         \
-      const R rhs) {                                                       \
-    return MathOp<CLASS##OP_NAME##Op>(rhs);                                \
-  }                                                                        \
-  /* Variadic arithmetic functions that return CLASS##Numeric. */          \
-  PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME)
-
-}  // namespace partition_alloc::internal::base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h b/base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h
deleted file mode 100644
index ea55ee6..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This provides a wrapper around system calls which may be interrupted by a
-// signal and return EINTR. See man 7 signal.
-// To prevent long-lasting loops (which would likely be a bug, such as a signal
-// that should be masked) to go unnoticed, there is a limit after which the
-// caller will nonetheless see an EINTR in Debug builds.
-//
-// On Windows and Fuchsia, this wrapper macro does nothing because there are no
-// signals.
-//
-// Don't wrap close calls in HANDLE_EINTR. Use IGNORE_EINTR if the return
-// value of close is significant. See http://crbug.com/269623.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
-
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_POSIX)
-
-#include <errno.h>
-
-#if defined(NDEBUG)
-
-#define PA_HANDLE_EINTR(x)                                  \
-  ({                                                        \
-    decltype(x) eintr_wrapper_result;                       \
-    do {                                                    \
-      eintr_wrapper_result = (x);                           \
-    } while (eintr_wrapper_result == -1 && errno == EINTR); \
-    eintr_wrapper_result;                                   \
-  })
-
-#else
-
-#define PA_HANDLE_EINTR(x)                                   \
-  ({                                                         \
-    int eintr_wrapper_counter = 0;                           \
-    decltype(x) eintr_wrapper_result;                        \
-    do {                                                     \
-      eintr_wrapper_result = (x);                            \
-    } while (eintr_wrapper_result == -1 && errno == EINTR && \
-             eintr_wrapper_counter++ < 100);                 \
-    eintr_wrapper_result;                                    \
-  })
-
-#endif  // NDEBUG
-
-#else  // !BUILDFLAG(IS_POSIX)
-
-#define PA_HANDLE_EINTR(x) (x)
-
-#endif  // !BUILDFLAG(IS_POSIX)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/posix/safe_strerror.cc b/base/allocator/partition_allocator/partition_alloc_base/posix/safe_strerror.cc
deleted file mode 100644
index 5c4de6e..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/posix/safe_strerror.cc
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2006-2009 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/posix/safe_strerror.h"
-
-#include <errno.h>
-#include <stdio.h>
-#include <string.h>
-
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base {
-
-#if defined(__GLIBC__)
-#define USE_HISTORICAL_STRERROR_R 1
-// Post-L versions of bionic define the GNU-specific strerror_r if _GNU_SOURCE
-// is defined, but the symbol is renamed to __gnu_strerror_r which only exists
-// on those later versions. For parity, add the same condition as bionic.
-#elif defined(__BIONIC__) && defined(_GNU_SOURCE) && __ANDROID_API__ >= 23
-#define USE_HISTORICAL_STRERROR_R 1
-#else
-#define USE_HISTORICAL_STRERROR_R 0
-#endif
-
-#if USE_HISTORICAL_STRERROR_R
-// glibc has two strerror_r functions: a historical GNU-specific one that
-// returns type char *, and a POSIX.1-2001 compliant one available since 2.3.4
-// that returns int. This wraps the GNU-specific one.
-[[maybe_unused]] static void wrap_posix_strerror_r(
-    char* (*strerror_r_ptr)(int, char*, size_t),
-    int err,
-    char* buf,
-    size_t len) {
-  // GNU version.
-  char* rc = (*strerror_r_ptr)(err, buf, len);
-  if (rc != buf) {
-    // glibc did not use buf and returned a static string instead. Copy it
-    // into buf.
-    buf[0] = '\0';
-    strncat(buf, rc, len - 1);
-  }
-  // The GNU version never fails. Unknown errors get an "unknown error" message.
-  // The result is always null terminated.
-}
-#endif  // USE_HISTORICAL_STRERROR_R
-
-// Wrapper for strerror_r functions that implement the POSIX interface. POSIX
-// does not define the behaviour for some of the edge cases, so we wrap it to
-// guarantee that they are handled. This is compiled on all POSIX platforms, but
-// it will only be used on Linux if the POSIX strerror_r implementation is
-// being used (see below).
-[[maybe_unused]] static void wrap_posix_strerror_r(
-    int (*strerror_r_ptr)(int, char*, size_t),
-    int err,
-    char* buf,
-    size_t len) {
-  int old_errno = errno;
-  // Have to cast since otherwise we get an error if this is the GNU version
-  // (but in such a scenario this function is never called). Sadly we can't use
-  // C++-style casts because the appropriate one is reinterpret_cast but it's
-  // considered illegal to reinterpret_cast a type to itself, so we get an
-  // error in the opposite case.
-  int result = (*strerror_r_ptr)(err, buf, len);
-  if (result == 0) {
-    // POSIX is vague about whether the string will be terminated, although
-    // it indirectly implies that typically ERANGE will be returned, instead
-    // of truncating the string. We play it safe by always terminating the
-    // string explicitly.
-    buf[len - 1] = '\0';
-  } else {
-    // Error. POSIX is vague about whether the return value is itself a system
-    // error code or something else. On Linux currently it is -1 and errno is
-    // set. On BSD-derived systems it is a system error and errno is unchanged.
-    // We try and detect which case it is so as to put as much useful info as
-    // we can into our message.
-    int strerror_error;  // The error encountered in strerror
-    int new_errno = errno;
-    if (new_errno != old_errno) {
-      // errno was changed, so probably the return value is just -1 or something
-      // else that doesn't provide any info, and errno is the error.
-      strerror_error = new_errno;
-    } else {
-      // Either the error from strerror_r was the same as the previous value, or
-      // errno wasn't used. Assume the latter.
-      strerror_error = result;
-    }
-    // snprintf truncates and always null-terminates.
-    snprintf(buf, len, "Error %d while retrieving error %d", strerror_error,
-             err);
-  }
-  errno = old_errno;
-}
-
-void safe_strerror_r(int err, char* buf, size_t len) {
-  if (buf == nullptr || len <= 0) {
-    return;
-  }
-  // If using glibc (i.e., Linux), the compiler will automatically select the
-  // appropriate overloaded function based on the function type of strerror_r.
-  // The other one will be elided from the translation unit since both are
-  // static.
-  wrap_posix_strerror_r(&strerror_r, err, buf, len);
-}
-
-std::string safe_strerror(int err) {
-  const int buffer_size = 256;
-  char buf[buffer_size];
-  safe_strerror_r(err, buf, sizeof(buf));
-  return std::string(buf);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/posix/safe_strerror.h b/base/allocator/partition_allocator/partition_alloc_base/posix/safe_strerror.h
deleted file mode 100644
index fe402ed..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/posix/safe_strerror.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_SAFE_STRERROR_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_SAFE_STRERROR_H_
-
-#include <stddef.h>
-
-#include <string>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace partition_alloc::internal::base {
-
-// BEFORE using anything from this file, first look at PLOG and friends in
-// logging.h and use them instead if applicable.
-//
-// This file declares safe, portable alternatives to the POSIX strerror()
-// function. strerror() is inherently unsafe in multi-threaded apps and should
-// never be used. Doing so can cause crashes. Additionally, the thread-safe
-// alternative strerror_r varies in semantics across platforms. Use these
-// functions instead.
-
-// Thread-safe strerror function with dependable semantics that never fails.
-// It will write the string form of error "err" to buffer buf of length len.
-// If there is an error calling the OS's strerror_r() function then a message to
-// that effect will be printed into buf, truncating if necessary. The final
-// result is always null-terminated. The value of errno is never changed.
-//
-// Use this instead of strerror_r().
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void safe_strerror_r(int err, char* buf, size_t len);
-
-// Calls safe_strerror_r with a buffer of suitable size and returns the result
-// in a C++ string.
-//
-// Use this instead of strerror(). Note though that safe_strerror_r will be
-// more robust in the case of heap corruption errors, since it doesn't need to
-// allocate a string.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) std::string safe_strerror(int err);
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_SAFE_STRERROR_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/rand_util.cc b/base/allocator/partition_allocator/partition_alloc_base/rand_util.cc
deleted file mode 100644
index b8e23a2..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/rand_util.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
-
-#include <limits.h>
-#include <math.h>
-#include <stdint.h>
-
-#include <limits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-
-namespace partition_alloc::internal::base {
-
-uint64_t RandUint64() {
-  uint64_t number;
-  RandBytes(&number, sizeof(number));
-  return number;
-}
-
-uint64_t RandGenerator(uint64_t range) {
-  PA_BASE_DCHECK(range > 0u);
-  // We must discard random results above this number, as they would
-  // make the random generator non-uniform (consider e.g. if
-  // MAX_UINT64 was 7 and |range| was 5, then a result of 1 would be twice
-  // as likely as a result of 3 or 4).
-  uint64_t max_acceptable_value =
-      (std::numeric_limits<uint64_t>::max() / range) * range - 1;
-
-  uint64_t value;
-  do {
-    value = base::RandUint64();
-  } while (value > max_acceptable_value);
-
-  return value % range;
-}
-
-InsecureRandomGenerator::InsecureRandomGenerator() {
-  a_ = base::RandUint64();
-  b_ = base::RandUint64();
-}
-
-void InsecureRandomGenerator::ReseedForTesting(uint64_t seed) {
-  a_ = seed;
-  b_ = seed;
-}
-
-uint64_t InsecureRandomGenerator::RandUint64() {
-  // Using XorShift128+, which is simple and widely used. See
-  // https://en.wikipedia.org/wiki/Xorshift#xorshift+ for details.
-  uint64_t t = a_;
-  const uint64_t s = b_;
-
-  a_ = s;
-  t ^= t << 23;
-  t ^= t >> 17;
-  t ^= s ^ (s >> 26);
-  b_ = t;
-
-  return t + s;
-}
-
-uint32_t InsecureRandomGenerator::RandUint32() {
-  // The generator usually returns an uint64_t, truncate it.
-  //
-  // It is noted in this paper (https://arxiv.org/abs/1810.05313) that the
-  // lowest 32 bits fail some statistical tests from the Big Crush
-  // suite. Use the higher ones instead.
-  return this->RandUint64() >> 32;
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/rand_util.h b/base/allocator/partition_allocator/partition_alloc_base/rand_util.h
deleted file mode 100644
index 3dc55df..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/rand_util.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_RAND_UTIL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_RAND_UTIL_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h"
-#include "build/build_config.h"
-
-namespace partition_alloc {
-class RandomGenerator;
-}  // namespace partition_alloc
-
-namespace partition_alloc::internal::base {
-
-// Returns a random number in range [0, UINT64_MAX]. Thread-safe.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) uint64_t RandUint64();
-
-// Returns a random number in range [0, range).  Thread-safe.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) uint64_t RandGenerator(uint64_t range);
-
-// Fills |output_length| bytes of |output| with random data. Thread-safe.
-//
-// Although implementations are required to use a cryptographically secure
-// random number source, code outside of base/ that relies on this should use
-// crypto::RandBytes instead to ensure the requirement is easily discoverable.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void RandBytes(void* output, size_t output_length);
-
-// Fast, insecure pseudo-random number generator.
-//
-// WARNING: This is not the generator you are looking for. This has significant
-// caveats:
-//   - It is non-cryptographic, so easy to miuse
-//   - It is neither fork() nor clone()-safe.
-//   - Synchronization is up to the client.
-//
-// Always prefer base::Rand*() above, unless you have a use case where its
-// overhead is too high, or system calls are disallowed.
-//
-// Performance: As of 2021, rough overhead on Linux on a desktop machine of
-// base::RandUint64() is ~800ns per call (it performs a system call). On Windows
-// it is lower. On the same machine, this generator's cost is ~2ns per call,
-// regardless of platform.
-//
-// This is different from |Rand*()| above as it is guaranteed to never make a
-// system call to generate a new number, except to seed it.  This should *never*
-// be used for cryptographic applications, and is not thread-safe.
-//
-// It is seeded using base::RandUint64() in the constructor, meaning that it
-// doesn't need to be seeded. It can be re-seeded though, with
-// ReseedForTesting(). Its period is long enough that it should not need to be
-// re-seeded during use.
-//
-// Uses the XorShift128+ generator under the hood.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InsecureRandomGenerator {
- public:
-  // Never use outside testing, not enough entropy.
-  void ReseedForTesting(uint64_t seed);
-
-  uint32_t RandUint32();
-  uint64_t RandUint64();
-
- private:
-  InsecureRandomGenerator();
-  // State.
-  uint64_t a_ = 0, b_ = 0;
-
-  // Before adding a new friend class, make sure that the overhead of
-  // base::Rand*() is too high, using something more representative than a
-  // microbenchmark.
-  //
-  // PartitionAlloc allocations should not take more than 40-50ns per
-  // malloc()/free() pair, otherwise high-level benchmarks regress, and does not
-  // need a secure PRNG, as it's used for ASLR and zeroing some allocations at
-  // free() time.
-  friend class ::partition_alloc::RandomGenerator;
-
-  PA_FRIEND_TEST_ALL_PREFIXES(
-      PartitionAllocBaseRandUtilTest,
-      InsecureRandomGeneratorProducesBothValuesOfAllBits);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocBaseRandUtilTest,
-                              InsecureRandomGeneratorChiSquared);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocBaseRandUtilTest,
-                              InsecureRandomGeneratorRandDouble);
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_RAND_UTIL_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/rand_util_fuchsia.cc b/base/allocator/partition_allocator/partition_alloc_base/rand_util_fuchsia.cc
deleted file mode 100644
index 8839f8d..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/rand_util_fuchsia.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
-
-#include <zircon/syscalls.h>
-
-namespace partition_alloc::internal::base {
-
-void RandBytes(void* output, size_t output_length) {
-  zx_cprng_draw(output, output_length);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/rand_util_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/rand_util_pa_unittest.cc
deleted file mode 100644
index 51ed4ed..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/rand_util_pa_unittest.cc
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <algorithm>
-#include <cmath>
-#include <limits>
-#include <memory>
-#include <vector>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal::base {
-
-TEST(PartitionAllocBaseRandUtilTest, RandBytes) {
-  const size_t buffer_size = 50;
-  char buffer[buffer_size];
-  memset(buffer, 0, buffer_size);
-  base::RandBytes(buffer, buffer_size);
-  std::sort(buffer, buffer + buffer_size);
-  // Probability of occurrence of less than 25 unique bytes in 50 random bytes
-  // is below 10^-25.
-  EXPECT_GT(std::unique(buffer, buffer + buffer_size) - buffer, 25);
-}
-
-// Verify that calling base::RandBytes with an empty buffer doesn't fail.
-TEST(PartitionAllocBaseRandUtilTest, RandBytes0) {
-  base::RandBytes(nullptr, 0);
-}
-
-// Make sure that it is still appropriate to use RandGenerator in conjunction
-// with std::random_shuffle().
-TEST(PartitionAllocBaseRandUtilTest, RandGeneratorForRandomShuffle) {
-  EXPECT_EQ(base::RandGenerator(1), 0U);
-  EXPECT_LE(std::numeric_limits<ptrdiff_t>::max(),
-            std::numeric_limits<int64_t>::max());
-}
-
-TEST(PartitionAllocBaseRandUtilTest, RandGeneratorIsUniform) {
-  // Verify that RandGenerator has a uniform distribution. This is a
-  // regression test that consistently failed when RandGenerator was
-  // implemented this way:
-  //
-  //   return base::RandUint64() % max;
-  //
-  // A degenerate case for such an implementation is e.g. a top of
-  // range that is 2/3rds of the way to MAX_UINT64, in which case the
-  // bottom half of the range would be twice as likely to occur as the
-  // top half. A bit of calculus care of jar@ shows that the largest
-  // measurable delta is when the top of the range is 3/4ths of the
-  // way, so that's what we use in the test.
-  constexpr uint64_t kTopOfRange =
-      (std::numeric_limits<uint64_t>::max() / 4ULL) * 3ULL;
-  constexpr double kExpectedAverage = static_cast<double>(kTopOfRange / 2);
-  constexpr double kAllowedVariance = kExpectedAverage / 50.0;  // +/- 2%
-  constexpr int kMinAttempts = 1000;
-  constexpr int kMaxAttempts = 1000000;
-
-  double cumulative_average = 0.0;
-  int count = 0;
-  while (count < kMaxAttempts) {
-    uint64_t value = base::RandGenerator(kTopOfRange);
-    cumulative_average = (count * cumulative_average + value) / (count + 1);
-
-    // Don't quit too quickly for things to start converging, or we may have
-    // a false positive.
-    if (count > kMinAttempts &&
-        kExpectedAverage - kAllowedVariance < cumulative_average &&
-        cumulative_average < kExpectedAverage + kAllowedVariance) {
-      break;
-    }
-
-    ++count;
-  }
-
-  ASSERT_LT(count, kMaxAttempts) << "Expected average was " << kExpectedAverage
-                                 << ", average ended at " << cumulative_average;
-}
-
-TEST(PartitionAllocBaseRandUtilTest, RandUint64ProducesBothValuesOfAllBits) {
-  // This tests to see that our underlying random generator is good
-  // enough, for some value of good enough.
-  uint64_t kAllZeros = 0ULL;
-  uint64_t kAllOnes = ~kAllZeros;
-  uint64_t found_ones = kAllZeros;
-  uint64_t found_zeros = kAllOnes;
-
-  for (size_t i = 0; i < 1000; ++i) {
-    uint64_t value = base::RandUint64();
-    found_ones |= value;
-    found_zeros &= value;
-
-    if (found_zeros == kAllZeros && found_ones == kAllOnes)
-      return;
-  }
-
-  FAIL() << "Didn't achieve all bit values in maximum number of tries.";
-}
-
-// Benchmark test for RandBytes().  Disabled since it's intentionally slow and
-// does not test anything that isn't already tested by the existing RandBytes()
-// tests.
-TEST(PartitionAllocBaseRandUtilTest, DISABLED_RandBytesPerf) {
-  // Benchmark the performance of |kTestIterations| of RandBytes() using a
-  // buffer size of |kTestBufferSize|.
-  const int kTestIterations = 10;
-  const size_t kTestBufferSize = 1 * 1024 * 1024;
-
-  std::unique_ptr<uint8_t[]> buffer(new uint8_t[kTestBufferSize]);
-  const TimeTicks now = TimeTicks::Now();
-  for (int i = 0; i < kTestIterations; ++i)
-    base::RandBytes(buffer.get(), kTestBufferSize);
-  const TimeTicks end = TimeTicks::Now();
-
-  PA_LOG(INFO) << "RandBytes(" << kTestBufferSize
-               << ") took: " << (end - now).InMicroseconds() << "µs";
-}
-
-TEST(PartitionAllocBaseRandUtilTest,
-     InsecureRandomGeneratorProducesBothValuesOfAllBits) {
-  // This tests to see that our underlying random generator is good
-  // enough, for some value of good enough.
-  uint64_t kAllZeros = 0ULL;
-  uint64_t kAllOnes = ~kAllZeros;
-  uint64_t found_ones = kAllZeros;
-  uint64_t found_zeros = kAllOnes;
-
-  InsecureRandomGenerator generator;
-
-  for (size_t i = 0; i < 1000; ++i) {
-    uint64_t value = generator.RandUint64();
-    found_ones |= value;
-    found_zeros &= value;
-
-    if (found_zeros == kAllZeros && found_ones == kAllOnes)
-      return;
-  }
-
-  FAIL() << "Didn't achieve all bit values in maximum number of tries.";
-}
-
-namespace {
-
-constexpr double kXp1Percent = -2.33;
-constexpr double kXp99Percent = 2.33;
-
-double ChiSquaredCriticalValue(double nu, double x_p) {
-  // From "The Art Of Computer Programming" (TAOCP), Volume 2, Section 3.3.1,
-  // Table 1. This is the asymptotic value for nu > 30, up to O(1 / sqrt(nu)).
-  return nu + sqrt(2. * nu) * x_p + 2. / 3. * (x_p * x_p) - 2. / 3.;
-}
-
-int ExtractBits(uint64_t value, int from_bit, int num_bits) {
-  return (value >> from_bit) & ((1 << num_bits) - 1);
-}
-
-// Performs a Chi-Squared test on a subset of |num_bits| extracted starting from
-// |from_bit| in the generated value.
-//
-// See TAOCP, Volume 2, Section 3.3.1, and
-// https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test for details.
-//
-// This is only one of the many, many random number generator test we could do,
-// but they are cumbersome, as they are typically very slow, and expected to
-// fail from time to time, due to their probabilistic nature.
-//
-// The generator we use has however been vetted with the BigCrush test suite
-// from Marsaglia, so this should suffice as a smoke test that our
-// implementation is wrong.
-bool ChiSquaredTest(InsecureRandomGenerator& gen,
-                    size_t n,
-                    int from_bit,
-                    int num_bits) {
-  const int range = 1 << num_bits;
-  PA_BASE_CHECK(static_cast<int>(n % range) == 0)
-      << "Makes computations simpler";
-  std::vector<size_t> samples(range, 0);
-
-  // Count how many samples pf each value are found. All buckets should be
-  // almost equal if the generator is suitably uniformly random.
-  for (size_t i = 0; i < n; i++) {
-    int sample = ExtractBits(gen.RandUint64(), from_bit, num_bits);
-    samples[sample] += 1;
-  }
-
-  // Compute the Chi-Squared statistic, which is:
-  // \Sum_{k=0}^{range-1} \frac{(count - expected)^2}{expected}
-  double chi_squared = 0.;
-  double expected_count = n / range;
-  for (size_t sample_count : samples) {
-    double deviation = sample_count - expected_count;
-    chi_squared += (deviation * deviation) / expected_count;
-  }
-
-  // The generator should produce numbers that are not too far of (chi_squared
-  // lower than a given quantile), but not too close to the ideal distribution
-  // either (chi_squared is too low).
-  //
-  // See The Art Of Computer Programming, Volume 2, Section 3.3.1 for details.
-  return chi_squared > ChiSquaredCriticalValue(range - 1, kXp1Percent) &&
-         chi_squared < ChiSquaredCriticalValue(range - 1, kXp99Percent);
-}
-
-}  // namespace
-
-TEST(PartitionAllocBaseRandUtilTest, InsecureRandomGeneratorChiSquared) {
-  constexpr int kIterations = 50;
-
-  // Specifically test the low bits, which are usually weaker in random number
-  // generators. We don't use them for the 32 bit number generation, but let's
-  // make sure they are still suitable.
-  for (int start_bit : {1, 2, 3, 8, 12, 20, 32, 48, 54}) {
-    int pass_count = 0;
-    for (int i = 0; i < kIterations; i++) {
-      size_t samples = 1 << 16;
-      InsecureRandomGenerator gen;
-      // Fix the seed to make the test non-flaky.
-      gen.ReseedForTesting(kIterations + 1);
-      bool pass = ChiSquaredTest(gen, samples, start_bit, 8);
-      pass_count += pass;
-    }
-
-    // We exclude 1% on each side, so we expect 98% of tests to pass, meaning 98
-    // * kIterations / 100. However this is asymptotic, so add a bit of leeway.
-    int expected_pass_count = (kIterations * 98) / 100;
-    EXPECT_GE(pass_count, expected_pass_count - ((kIterations * 2) / 100))
-        << "For start_bit = " << start_bit;
-  }
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/rand_util_posix.cc b/base/allocator/partition_allocator/partition_alloc_base/rand_util_posix.cc
deleted file mode 100644
index e0d7e54..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/rand_util_posix.cc
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-#include <sstream>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/files/file_util.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_MAC)
-// TODO(crbug.com/995996): Waiting for this header to appear in the iOS SDK.
-// (See below.)
-#include <sys/random.h>
-#endif
-
-namespace {
-
-#if BUILDFLAG(IS_AIX)
-// AIX has no 64-bit support for O_CLOEXEC.
-static constexpr int kOpenFlags = O_RDONLY;
-#else
-static constexpr int kOpenFlags = O_RDONLY | O_CLOEXEC;
-#endif
-
-// We keep the file descriptor for /dev/urandom around so we don't need to
-// reopen it (which is expensive), and since we may not even be able to reopen
-// it if we are later put in a sandbox. This class wraps the file descriptor so
-// we can use a static-local variable to handle opening it on the first access.
-class URandomFd {
- public:
-  URandomFd() : fd_(PA_HANDLE_EINTR(open("/dev/urandom", kOpenFlags))) {
-    PA_BASE_CHECK(fd_ >= 0) << "Cannot open /dev/urandom";
-  }
-
-  ~URandomFd() { close(fd_); }
-
-  int fd() const { return fd_; }
-
- private:
-  const int fd_;
-};
-
-int GetUrandomFD() {
-  static partition_alloc::internal::base::NoDestructor<URandomFd> urandom_fd;
-  return urandom_fd->fd();
-}
-
-}  // namespace
-
-namespace partition_alloc::internal::base {
-
-// NOTE: In an ideal future, all implementations of this function will just
-// wrap BoringSSL's `RAND_bytes`. TODO(crbug.com/995996): Figure out the
-// build/test/performance issues with dcheng's CL
-// (https://chromium-review.googlesource.com/c/chromium/src/+/1545096) and land
-// it or some form of it.
-void RandBytes(void* output, size_t output_length) {
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-  // Use `syscall(__NR_getrandom...` to avoid a dependency on
-  // `third_party/linux_syscall_support.h`.
-  //
-  // Here in PartitionAlloc, we don't need to look before we leap
-  // because we know that both Linux and CrOS only support kernels
-  // that do have this syscall defined. This diverges from upstream
-  // `//base` behavior both here and below.
-  const ssize_t r =
-      PA_HANDLE_EINTR(syscall(__NR_getrandom, output, output_length, 0));
-
-  // Return success only on total success. In case errno == ENOSYS (or any other
-  // error), we'll fall through to reading from urandom below.
-  if (output_length == static_cast<size_t>(r)) {
-    PA_MSAN_UNPOISON(output, output_length);
-    return;
-  }
-#elif BUILDFLAG(IS_MAC)
-  // TODO(crbug.com/995996): Enable this on iOS too, when sys/random.h arrives
-  // in its SDK.
-  if (getentropy(output, output_length) == 0) {
-    return;
-  }
-#endif
-  // If getrandom(2) above returned with an error and the /dev/urandom fallback
-  // took place on Linux/ChromeOS bots, they would fail with a CHECK in
-  // nacl_helper. The latter assumes that the number of open file descriptors
-  // must be constant. The nacl_helper knows about the FD from
-  // //base/rand_utils, but is not aware of the urandom_fd from this file (see
-  // CheckForExpectedNumberOfOpenFds).
-  //
-  // *  On `linux_chromium_asan_rel_ng` in
-  //    `ContentBrowserTest.RendererCrashCallStack`:
-  //    ```
-  //    [FATAL:rand_util_posix.cc(45)] Check failed: fd_ >= 0. Cannot open
-  //    /dev/urandom
-  //    ```
-  // *  On `linux-lacros-rel` in
-  //    `NaClBrowserTestGLibc.CrashInCallback`:
-  //    ```
-  //    2023-07-03T11:31:13.115755Z FATAL nacl_helper:
-  //    [nacl_sandbox_linux.cc(178)] Check failed: expected_num_fds ==
-  //    sandbox::ProcUtil::CountOpenFds(proc_fd_.get()) (6 vs. 7)
-  //    ```
-  const int urandom_fd = GetUrandomFD();
-  const bool success =
-      ReadFromFD(urandom_fd, static_cast<char*>(output), output_length);
-  PA_BASE_CHECK(success);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/rand_util_win.cc b/base/allocator/partition_allocator/partition_alloc_base/rand_util_win.cc
deleted file mode 100644
index c6071fa..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/rand_util_win.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
-
-#include <stddef.h>
-#include <stdint.h>
-#include <windows.h>
-
-#include <algorithm>
-#include <limits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-
-// Prototype for ProcessPrng.
-// See: https://learn.microsoft.com/en-us/windows/win32/seccng/processprng
-extern "C" {
-BOOL WINAPI ProcessPrng(PBYTE pbData, SIZE_T cbData);
-}
-
-namespace partition_alloc::internal::base {
-
-void RandBytes(void* output, size_t output_length) {
-  // Import bcryptprimitives directly rather than cryptbase to avoid opening a
-  // handle to \\Device\KsecDD in the renderer.
-  // Note: we cannot use a magic static here as PA runs too early in process
-  // startup, but this should be safe as the process will be single-threaded
-  // when this first runs.
-  static decltype(&ProcessPrng) process_prng_fn = nullptr;
-  if (!process_prng_fn) {
-    HMODULE hmod = LoadLibraryW(L"bcryptprimitives.dll");
-    PA_BASE_CHECK(hmod);
-    process_prng_fn = reinterpret_cast<decltype(&ProcessPrng)>(
-        GetProcAddress(hmod, "ProcessPrng"));
-    PA_BASE_CHECK(process_prng_fn);
-  }
-  BOOL success = process_prng_fn(static_cast<BYTE*>(output), output_length);
-  // ProcessPrng is documented to always return TRUE.
-  PA_BASE_CHECK(success);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h b/base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h
deleted file mode 100644
index c180164..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SCOPED_CLEAR_LAST_ERROR_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SCOPED_CLEAR_LAST_ERROR_H_
-
-#include <errno.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base {
-
-// ScopedClearLastError stores and resets the value of thread local error codes
-// (errno, GetLastError()), and restores them in the destructor. This is useful
-// to avoid side effects on these values in instrumentation functions that
-// interact with the OS.
-
-// Common implementation of ScopedClearLastError for all platforms. Use
-// ScopedClearLastError instead.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedClearLastErrorBase {
- public:
-  ScopedClearLastErrorBase() : last_errno_(errno) { errno = 0; }
-  ScopedClearLastErrorBase(const ScopedClearLastErrorBase&) = delete;
-  ScopedClearLastErrorBase& operator=(const ScopedClearLastErrorBase&) = delete;
-  ~ScopedClearLastErrorBase() { errno = last_errno_; }
-
- private:
-  const int last_errno_;
-};
-
-#if BUILDFLAG(IS_WIN)
-
-// Windows specific implementation of ScopedClearLastError.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedClearLastError
-    : public ScopedClearLastErrorBase {
- public:
-  ScopedClearLastError();
-  ScopedClearLastError(const ScopedClearLastError&) = delete;
-  ScopedClearLastError& operator=(const ScopedClearLastError&) = delete;
-  ~ScopedClearLastError();
-
- private:
-  const unsigned long last_system_error_;
-};
-
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-
-using ScopedClearLastError = ScopedClearLastErrorBase;
-
-#endif  // BUILDFLAG(IS_WIN)
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SCOPED_CLEAR_LAST_ERROR_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error_pa_unittest.cc
deleted file mode 100644
index 680a086..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error_pa_unittest.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if BUILDFLAG(IS_WIN)
-#include <windows.h>
-#endif  // BUILDFLAG(IS_WIN)
-
-namespace partition_alloc::internal::base {
-
-TEST(PAScopedClearLastError, TestNoError) {
-  errno = 1;
-  {
-    ScopedClearLastError clear_error;
-    EXPECT_EQ(0, errno);
-  }
-  EXPECT_EQ(1, errno);
-}
-
-TEST(PAScopedClearLastError, TestError) {
-  errno = 1;
-  {
-    ScopedClearLastError clear_error;
-    errno = 2;
-  }
-  EXPECT_EQ(1, errno);
-}
-
-#if BUILDFLAG(IS_WIN)
-
-TEST(PAScopedClearLastError, TestNoErrorWin) {
-  ::SetLastError(1);
-  {
-    ScopedClearLastError clear_error;
-    EXPECT_EQ(logging::SystemErrorCode(0), ::GetLastError());
-  }
-  EXPECT_EQ(logging::SystemErrorCode(1), ::GetLastError());
-}
-
-TEST(PAScopedClearLastError, TestErrorWin) {
-  ::SetLastError(1);
-  {
-    ScopedClearLastError clear_error;
-    ::SetLastError(2);
-  }
-  EXPECT_EQ(logging::SystemErrorCode(1), ::GetLastError());
-}
-
-#endif  // BUILDFLAG(IS_WIN)
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error_win.cc b/base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error_win.cc
deleted file mode 100644
index d3e8f8e..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error_win.cc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h"
-
-#include <windows.h>
-
-namespace partition_alloc::internal::base {
-
-ScopedClearLastError::ScopedClearLastError()
-    : ScopedClearLastErrorBase(), last_system_error_(GetLastError()) {
-  SetLastError(0);
-}
-
-ScopedClearLastError::~ScopedClearLastError() {
-  SetLastError(last_system_error_);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf.cc b/base/allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf.cc
deleted file mode 100644
index c5d330d..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf.cc
+++ /dev/null
@@ -1,712 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf.h"
-
-#include <errno.h>
-#include <string.h>
-
-#include <algorithm>
-#include <limits>
-
-#include "build/build_config.h"
-
-#if !defined(NDEBUG)
-// In debug builds, we use RAW_CHECK() to print useful error messages, if
-// SafeSPrintf() is called with broken arguments.
-// As our contract promises that SafeSPrintf() can be called from any
-// restricted run-time context, it is not actually safe to call logging
-// functions from it; and we only ever do so for debug builds and hope for the
-// best. We should _never_ call any logging function other than RAW_CHECK(),
-// and we should _never_ include any logging code that is active in production
-// builds. Most notably, we should not include these logging functions in
-// unofficial release builds, even though those builds would otherwise have
-// DCHECKS() enabled.
-// In other words; please do not remove the #ifdef around this #include.
-// Instead, in production builds we opt for returning a degraded result,
-// whenever an error is encountered.
-// E.g. The broken function call
-//        SafeSPrintf("errno = %d (%x)", errno, strerror(errno))
-//      will print something like
-//        errno = 13, (%x)
-//      instead of
-//        errno = 13 (Access denied)
-//      In most of the anticipated use cases, that's probably the preferred
-//      behavior.
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#define DEBUG_CHECK PA_RAW_CHECK
-#else
-#define DEBUG_CHECK(x) \
-  do {                 \
-    if (x) {           \
-    }                  \
-  } while (0)
-#endif
-
-namespace partition_alloc::internal::base::strings {
-
-// The code in this file is extremely careful to be async-signal-safe.
-//
-// Most obviously, we avoid calling any code that could dynamically allocate
-// memory. Doing so would almost certainly result in bugs and dead-locks.
-// We also avoid calling any other STL functions that could have unintended
-// side-effects involving memory allocation or access to other shared
-// resources.
-//
-// But on top of that, we also avoid calling other library functions, as many
-// of them have the side-effect of calling getenv() (in order to deal with
-// localization) or accessing errno. The latter sounds benign, but there are
-// several execution contexts where it isn't even possible to safely read let
-// alone write errno.
-//
-// The stated design goal of the SafeSPrintf() function is that it can be
-// called from any context that can safely call C or C++ code (i.e. anything
-// that doesn't require assembly code).
-//
-// For a brief overview of some but not all of the issues with async-signal-
-// safety, refer to:
-// http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html
-
-namespace {
-const size_t kSSizeMaxConst = ((size_t)(ssize_t)-1) >> 1;
-
-const char kUpCaseHexDigits[] = "0123456789ABCDEF";
-const char kDownCaseHexDigits[] = "0123456789abcdef";
-}  // namespace
-
-#if defined(NDEBUG)
-// We would like to define kSSizeMax as std::numeric_limits<ssize_t>::max(),
-// but C++ doesn't allow us to do that for constants. Instead, we have to
-// use careful casting and shifting. We later use a static_assert to
-// verify that this worked correctly.
-namespace {
-const size_t kSSizeMax = kSSizeMaxConst;
-}
-#else   // defined(NDEBUG)
-// For efficiency, we really need kSSizeMax to be a constant. But for unit
-// tests, it should be adjustable. This allows us to verify edge cases without
-// having to fill the entire available address space. As a compromise, we make
-// kSSizeMax adjustable in debug builds, and then only compile that particular
-// part of the unit test in debug builds.
-namespace {
-static size_t kSSizeMax = kSSizeMaxConst;
-}
-
-namespace internal {
-void SetSafeSPrintfSSizeMaxForTest(size_t max) {
-  kSSizeMax = max;
-}
-
-size_t GetSafeSPrintfSSizeMaxForTest() {
-  return kSSizeMax;
-}
-}  // namespace internal
-#endif  // defined(NDEBUG)
-
-namespace {
-class Buffer {
- public:
-  // |buffer| is caller-allocated storage that SafeSPrintf() writes to. It
-  // has |size| bytes of writable storage. It is the caller's responsibility
-  // to ensure that the buffer is at least one byte in size, so that it fits
-  // the trailing NUL that will be added by the destructor. The buffer also
-  // must be smaller or equal to kSSizeMax in size.
-  Buffer(char* buffer, size_t size)
-      : buffer_(buffer),
-        size_(size - 1),  // Account for trailing NUL byte
-        count_(0) {
-// MSVS2013's standard library doesn't mark max() as constexpr yet. cl.exe
-// supports static_cast but doesn't really implement constexpr yet so it doesn't
-// complain, but clang does.
-#if __cplusplus >= 201103 && !(defined(__clang__) && BUILDFLAG(IS_WIN))
-    static_assert(kSSizeMaxConst ==
-                      static_cast<size_t>(std::numeric_limits<ssize_t>::max()),
-                  "kSSizeMaxConst should be the max value of an ssize_t");
-#endif
-    DEBUG_CHECK(size > 0);
-    DEBUG_CHECK(size <= kSSizeMax);
-  }
-
-  Buffer(const Buffer&) = delete;
-  Buffer& operator=(const Buffer&) = delete;
-
-  ~Buffer() {
-    // The code calling the constructor guaranteed that there was enough space
-    // to store a trailing NUL -- and in debug builds, we are actually
-    // verifying this with DEBUG_CHECK()s in the constructor. So, we can
-    // always unconditionally write the NUL byte in the destructor.  We do not
-    // need to adjust the count_, as SafeSPrintf() copies snprintf() in not
-    // including the NUL byte in its return code.
-    *GetInsertionPoint() = '\000';
-  }
-
-  // Returns true, iff the buffer is filled all the way to |kSSizeMax-1|. The
-  // caller can now stop adding more data, as GetCount() has reached its
-  // maximum possible value.
-  inline bool OutOfAddressableSpace() const {
-    return count_ == static_cast<size_t>(kSSizeMax - 1);
-  }
-
-  // Returns the number of bytes that would have been emitted to |buffer_|
-  // if it was sized sufficiently large. This number can be larger than
-  // |size_|, if the caller provided an insufficiently large output buffer.
-  // But it will never be bigger than |kSSizeMax-1|.
-  inline ssize_t GetCount() const {
-    DEBUG_CHECK(count_ < kSSizeMax);
-    return static_cast<ssize_t>(count_);
-  }
-
-  // Emits one |ch| character into the |buffer_| and updates the |count_| of
-  // characters that are currently supposed to be in the buffer.
-  // Returns "false", iff the buffer was already full.
-  // N.B. |count_| increases even if no characters have been written. This is
-  // needed so that GetCount() can return the number of bytes that should
-  // have been allocated for the |buffer_|.
-  inline bool Out(char ch) {
-    if (size_ >= 1 && count_ < size_) {
-      buffer_[count_] = ch;
-      return IncrementCountByOne();
-    }
-    // |count_| still needs to be updated, even if the buffer has been
-    // filled completely. This allows SafeSPrintf() to return the number of
-    // bytes that should have been emitted.
-    IncrementCountByOne();
-    return false;
-  }
-
-  // Inserts |padding|-|len| bytes worth of padding into the |buffer_|.
-  // |count_| will also be incremented by the number of bytes that were meant
-  // to be emitted. The |pad| character is typically either a ' ' space
-  // or a '0' zero, but other non-NUL values are legal.
-  // Returns "false", iff the |buffer_| filled up (i.e. |count_|
-  // overflowed |size_|) at any time during padding.
-  inline bool Pad(char pad, size_t padding, size_t len) {
-    DEBUG_CHECK(pad);
-    DEBUG_CHECK(padding <= kSSizeMax);
-    for (; padding > len; --padding) {
-      if (!Out(pad)) {
-        if (--padding) {
-          IncrementCount(padding - len);
-        }
-        return false;
-      }
-    }
-    return true;
-  }
-
-  // POSIX doesn't define any async-signal-safe function for converting
-  // an integer to ASCII. Define our own version.
-  //
-  // This also gives us the ability to make the function a little more
-  // powerful and have it deal with |padding|, with truncation, and with
-  // predicting the length of the untruncated output.
-  //
-  // IToASCII() converts an integer |i| to ASCII.
-  //
-  // Unlike similar functions in the standard C library, it never appends a
-  // NUL character. This is left for the caller to do.
-  //
-  // While the function signature takes a signed int64_t, the code decides at
-  // run-time whether to treat the argument as signed (int64_t) or as unsigned
-  // (uint64_t) based on the value of |sign|.
-  //
-  // It supports |base|s 2 through 16. Only a |base| of 10 is allowed to have
-  // a |sign|. Otherwise, |i| is treated as unsigned.
-  //
-  // For bases larger than 10, |upcase| decides whether lower-case or upper-
-  // case letters should be used to designate digits greater than 10.
-  //
-  // Padding can be done with either '0' zeros or ' ' spaces. Padding has to
-  // be positive and will always be applied to the left of the output.
-  //
-  // Prepends a |prefix| to the number (e.g. "0x"). This prefix goes to
-  // the left of |padding|, if |pad| is '0'; and to the right of |padding|
-  // if |pad| is ' '.
-  //
-  // Returns "false", if the |buffer_| overflowed at any time.
-  bool IToASCII(bool sign,
-                bool upcase,
-                int64_t i,
-                size_t base,
-                char pad,
-                size_t padding,
-                const char* prefix);
-
- private:
-  // Increments |count_| by |inc| unless this would cause |count_| to
-  // overflow |kSSizeMax-1|. Returns "false", iff an overflow was detected;
-  // it then clamps |count_| to |kSSizeMax-1|.
-  inline bool IncrementCount(size_t inc) {
-    // "inc" is either 1 or a "padding" value. Padding is clamped at
-    // run-time to at most kSSizeMax-1. So, we know that "inc" is always in
-    // the range 1..kSSizeMax-1.
-    // This allows us to compute "kSSizeMax - 1 - inc" without incurring any
-    // integer overflows.
-    DEBUG_CHECK(inc <= kSSizeMax - 1);
-    if (count_ > kSSizeMax - 1 - inc) {
-      count_ = kSSizeMax - 1;
-      return false;
-    }
-    count_ += inc;
-    return true;
-  }
-
-  // Convenience method for the common case of incrementing |count_| by one.
-  inline bool IncrementCountByOne() { return IncrementCount(1); }
-
-  // Return the current insertion point into the buffer. This is typically
-  // at |buffer_| + |count_|, but could be before that if truncation
-  // happened. It always points to one byte past the last byte that was
-  // successfully placed into the |buffer_|.
-  inline char* GetInsertionPoint() const {
-    size_t idx = count_;
-    if (idx > size_) {
-      idx = size_;
-    }
-    return buffer_ + idx;
-  }
-
-  // User-provided buffer that will receive the fully formatted output string.
-  char* buffer_;
-
-  // Number of bytes that are available in the buffer excluding the trailing
-  // NUL byte that will be added by the destructor.
-  const size_t size_;
-
-  // Number of bytes that would have been emitted to the buffer, if the buffer
-  // was sufficiently big. This number always excludes the trailing NUL byte
-  // and it is guaranteed to never grow bigger than kSSizeMax-1.
-  size_t count_;
-};
-
-bool Buffer::IToASCII(bool sign,
-                      bool upcase,
-                      int64_t i,
-                      size_t base,
-                      char pad,
-                      size_t padding,
-                      const char* prefix) {
-  // Sanity check for parameters. None of these should ever fail, but see
-  // above for the rationale why we can't call CHECK().
-  DEBUG_CHECK(base >= 2);
-  DEBUG_CHECK(base <= 16);
-  DEBUG_CHECK(!sign || base == 10);
-  DEBUG_CHECK(pad == '0' || pad == ' ');
-  DEBUG_CHECK(padding <= kSSizeMax);
-  DEBUG_CHECK(!(sign && prefix && *prefix));
-
-  // Handle negative numbers, if the caller indicated that |i| should be
-  // treated as a signed number; otherwise treat |i| as unsigned (even if the
-  // MSB is set!)
-  // Details are tricky, because of limited data-types, but equivalent pseudo-
-  // code would look like:
-  //   if (sign && i < 0)
-  //     prefix = "-";
-  //   num = abs(i);
-  size_t minint = 0;
-  uint64_t num;
-  if (sign && i < 0) {
-    prefix = "-";
-
-    // Turn our number positive.
-    if (i == std::numeric_limits<int64_t>::min()) {
-      // The most negative integer needs special treatment.
-      minint = 1;
-      num = static_cast<uint64_t>(-(i + 1));
-    } else {
-      // "Normal" negative numbers are easy.
-      num = static_cast<uint64_t>(-i);
-    }
-  } else {
-    num = static_cast<uint64_t>(i);
-  }
-
-  // If padding with '0' zero, emit the prefix or '-' character now. Otherwise,
-  // make the prefix accessible in reverse order, so that we can later output
-  // it right between padding and the number.
-  // We cannot choose the easier approach of just reversing the number, as that
-  // fails in situations where we need to truncate numbers that have padding
-  // and/or prefixes.
-  const char* reverse_prefix = nullptr;
-  if (prefix && *prefix) {
-    if (pad == '0') {
-      while (*prefix) {
-        if (padding) {
-          --padding;
-        }
-        Out(*prefix++);
-      }
-      prefix = nullptr;
-    } else {
-      for (reverse_prefix = prefix; *reverse_prefix; ++reverse_prefix) {
-      }
-    }
-  } else {
-    prefix = nullptr;
-  }
-  const size_t prefix_length = static_cast<size_t>(reverse_prefix - prefix);
-
-  // Loop until we have converted the entire number. Output at least one
-  // character (i.e. '0').
-  size_t start = count_;
-  size_t discarded = 0;
-  bool started = false;
-  do {
-    // Make sure there is still enough space left in our output buffer.
-    if (count_ >= size_) {
-      if (start < size_) {
-        // It is rare that we need to output a partial number. But if asked
-        // to do so, we will still make sure we output the correct number of
-        // leading digits.
-        // Since we are generating the digits in reverse order, we actually
-        // have to discard digits in the order that we have already emitted
-        // them. This is essentially equivalent to:
-        //   memmove(buffer_ + start, buffer_ + start + 1, size_ - start - 1)
-        for (char *move = buffer_ + start, *end = buffer_ + size_ - 1;
-             move < end; ++move) {
-          *move = move[1];
-        }
-        ++discarded;
-        --count_;
-      } else if (count_ - size_ > 1) {
-        // Need to increment either |count_| or |discarded| to make progress.
-        // The latter is more efficient, as it eventually triggers fast
-        // handling of padding. But we have to ensure we don't accidentally
-        // change the overall state (i.e. switch the state-machine from
-        // discarding to non-discarding). |count_| needs to always stay
-        // bigger than |size_|.
-        --count_;
-        ++discarded;
-      }
-    }
-
-    // Output the next digit and (if necessary) compensate for the most
-    // negative integer needing special treatment. This works because,
-    // no matter the bit width of the integer, the lowest-most decimal
-    // integer always ends in 2, 4, 6, or 8.
-    if (!num && started) {
-      if (reverse_prefix > prefix) {
-        Out(*--reverse_prefix);
-      } else {
-        Out(pad);
-      }
-    } else {
-      started = true;
-      Out((upcase ? kUpCaseHexDigits
-                  : kDownCaseHexDigits)[num % base + minint]);
-    }
-
-    minint = 0;
-    num /= base;
-
-    // Add padding, if requested.
-    if (padding > 0) {
-      --padding;
-
-      // Performance optimization for when we are asked to output excessive
-      // padding, but our output buffer is limited in size.  Even if we output
-      // a 64bit number in binary, we would never write more than 64 plus
-      // prefix non-padding characters. So, once this limit has been passed,
-      // any further state change can be computed arithmetically; we know that
-      // by this time, our entire final output consists of padding characters
-      // that have all already been output.
-      if (discarded > 8 * sizeof(num) + prefix_length) {
-        IncrementCount(padding);
-        padding = 0;
-      }
-    }
-  } while (num || padding || (reverse_prefix > prefix));
-
-  if (start < size_) {
-    // Conversion to ASCII actually resulted in the digits being in reverse
-    // order. We can't easily generate them in forward order, as we can't tell
-    // the number of characters needed until we are done converting.
-    // So, now, we reverse the string (except for the possible '-' sign).
-    char* front = buffer_ + start;
-    char* back = GetInsertionPoint();
-    while (--back > front) {
-      char ch = *back;
-      *back = *front;
-      *front++ = ch;
-    }
-  }
-  IncrementCount(discarded);
-  return !discarded;
-}
-
-}  // anonymous namespace
-
-namespace internal {
-
-ssize_t SafeSNPrintf(char* buf,
-                     size_t sz,
-                     const char* fmt,
-                     const Arg* args,
-                     const size_t max_args) {
-  // Make sure that at least one NUL byte can be written, and that the buffer
-  // never overflows kSSizeMax. Not only does that use up most or all of the
-  // address space, it also would result in a return code that cannot be
-  // represented.
-  if (static_cast<ssize_t>(sz) < 1) {
-    return -1;
-  }
-  sz = std::min(sz, kSSizeMax);
-
-  // Iterate over format string and interpret '%' arguments as they are
-  // encountered.
-  Buffer buffer(buf, sz);
-  size_t padding;
-  char pad;
-  for (unsigned int cur_arg = 0; *fmt && !buffer.OutOfAddressableSpace();) {
-    if (*fmt++ == '%') {
-      padding = 0;
-      pad = ' ';
-      char ch = *fmt++;
-    format_character_found:
-      switch (ch) {
-        case '0':
-        case '1':
-        case '2':
-        case '3':
-        case '4':
-        case '5':
-        case '6':
-        case '7':
-        case '8':
-        case '9':
-          // Found a width parameter. Convert to an integer value and store in
-          // "padding". If the leading digit is a zero, change the padding
-          // character from a space ' ' to a zero '0'.
-          pad = ch == '0' ? '0' : ' ';
-          for (;;) {
-            const size_t digit = static_cast<size_t>(ch - '0');
-            // The maximum allowed padding fills all the available address
-            // space and leaves just enough space to insert the trailing NUL.
-            const size_t max_padding = kSSizeMax - 1;
-            if (padding > max_padding / 10 ||
-                10 * padding > max_padding - digit) {
-              DEBUG_CHECK(padding <= max_padding / 10 &&
-                          10 * padding <= max_padding - digit);
-              // Integer overflow detected. Skip the rest of the width until
-              // we find the format character, then do the normal error
-              // handling.
-            padding_overflow:
-              padding = max_padding;
-              while ((ch = *fmt++) >= '0' && ch <= '9') {
-              }
-              if (cur_arg < max_args) {
-                ++cur_arg;
-              }
-              goto fail_to_expand;
-            }
-            padding = 10 * padding + digit;
-            if (padding > max_padding) {
-              // This doesn't happen for "sane" values of kSSizeMax. But once
-              // kSSizeMax gets smaller than about 10, our earlier range checks
-              // are incomplete. Unittests do trigger this artificial corner
-              // case.
-              DEBUG_CHECK(padding <= max_padding);
-              goto padding_overflow;
-            }
-            ch = *fmt++;
-            if (ch < '0' || ch > '9') {
-              // Reached the end of the width parameter. This is where the
-              // format character is found.
-              goto format_character_found;
-            }
-          }
-        case 'c': {  // Output an ASCII character.
-          // Check that there are arguments left to be inserted.
-          if (cur_arg >= max_args) {
-            DEBUG_CHECK(cur_arg < max_args);
-            goto fail_to_expand;
-          }
-
-          // Check that the argument has the expected type.
-          const Arg& arg = args[cur_arg++];
-          if (arg.type != Arg::INT && arg.type != Arg::UINT) {
-            DEBUG_CHECK(arg.type == Arg::INT || arg.type == Arg::UINT);
-            goto fail_to_expand;
-          }
-
-          // Apply padding, if needed.
-          buffer.Pad(' ', padding, 1);
-
-          // Convert the argument to an ASCII character and output it.
-          char as_char = static_cast<char>(arg.integer.i);
-          if (!as_char) {
-            goto end_of_output_buffer;
-          }
-          buffer.Out(as_char);
-          break;
-        }
-        case 'd':  // Output a possibly signed decimal value.
-        case 'o':  // Output an unsigned octal value.
-        case 'x':  // Output an unsigned hexadecimal value.
-        case 'X':
-        case 'p': {  // Output a pointer value.
-          // Check that there are arguments left to be inserted.
-          if (cur_arg >= max_args) {
-            DEBUG_CHECK(cur_arg < max_args);
-            goto fail_to_expand;
-          }
-
-          const Arg& arg = args[cur_arg++];
-          int64_t i;
-          const char* prefix = nullptr;
-          if (ch != 'p') {
-            // Check that the argument has the expected type.
-            if (arg.type != Arg::INT && arg.type != Arg::UINT) {
-              DEBUG_CHECK(arg.type == Arg::INT || arg.type == Arg::UINT);
-              goto fail_to_expand;
-            }
-            i = arg.integer.i;
-
-            if (ch != 'd') {
-              // The Arg() constructor automatically performed sign expansion on
-              // signed parameters. This is great when outputting a %d decimal
-              // number, but can result in unexpected leading 0xFF bytes when
-              // outputting a %x hexadecimal number. Mask bits, if necessary.
-              // We have to do this here, instead of in the Arg() constructor,
-              // as the Arg() constructor cannot tell whether we will output a
-              // %d or a %x. Only the latter should experience masking.
-              if (arg.integer.width < sizeof(int64_t)) {
-                i &= (1LL << (8 * arg.integer.width)) - 1;
-              }
-            }
-          } else {
-            // Pointer values require an actual pointer or a string.
-            if (arg.type == Arg::POINTER) {
-              i = static_cast<int64_t>(reinterpret_cast<uintptr_t>(arg.ptr));
-            } else if (arg.type == Arg::STRING) {
-              i = static_cast<int64_t>(reinterpret_cast<uintptr_t>(arg.str));
-            } else if (arg.type == Arg::INT &&
-                       arg.integer.width == sizeof(NULL) &&
-                       arg.integer.i == 0) {  // Allow C++'s version of NULL
-              i = 0;
-            } else {
-              DEBUG_CHECK(arg.type == Arg::POINTER || arg.type == Arg::STRING);
-              goto fail_to_expand;
-            }
-
-            // Pointers always include the "0x" prefix.
-            prefix = "0x";
-          }
-
-          // Use IToASCII() to convert to ASCII representation. For decimal
-          // numbers, optionally print a sign. For hexadecimal numbers,
-          // distinguish between upper and lower case. %p addresses are always
-          // printed as upcase. Supports base 8, 10, and 16. Prints padding
-          // and/or prefixes, if so requested.
-          buffer.IToASCII(ch == 'd' && arg.type == Arg::INT, ch != 'x', i,
-                          ch == 'o'   ? 8
-                          : ch == 'd' ? 10
-                                      : 16,
-                          pad, padding, prefix);
-          break;
-        }
-        case 's': {
-          // Check that there are arguments left to be inserted.
-          if (cur_arg >= max_args) {
-            DEBUG_CHECK(cur_arg < max_args);
-            goto fail_to_expand;
-          }
-
-          // Check that the argument has the expected type.
-          const Arg& arg = args[cur_arg++];
-          const char* s;
-          if (arg.type == Arg::STRING) {
-            s = arg.str ? arg.str : "<NULL>";
-          } else if (arg.type == Arg::INT &&
-                     arg.integer.width == sizeof(NULL) &&
-                     arg.integer.i == 0) {  // Allow C++'s version of NULL
-            s = "<NULL>";
-          } else {
-            DEBUG_CHECK(arg.type == Arg::STRING);
-            goto fail_to_expand;
-          }
-
-          // Apply padding, if needed. This requires us to first check the
-          // length of the string that we are outputting.
-          if (padding) {
-            size_t len = 0;
-            for (const char* src = s; *src++;) {
-              ++len;
-            }
-            buffer.Pad(' ', padding, len);
-          }
-
-          // Printing a string involves nothing more than copying it into the
-          // output buffer and making sure we don't output more bytes than
-          // available space; Out() takes care of doing that.
-          for (const char* src = s; *src;) {
-            buffer.Out(*src++);
-          }
-          break;
-        }
-        case '%':
-          // Quoted percent '%' character.
-          goto copy_verbatim;
-        fail_to_expand:
-          // C++ gives us tools to do type checking -- something that snprintf()
-          // could never really do. So, whenever we see arguments that don't
-          // match up with the format string, we refuse to output them. But
-          // since we have to be extremely conservative about being async-
-          // signal-safe, we are limited in the type of error handling that we
-          // can do in production builds (in debug builds we can use
-          // DEBUG_CHECK() and hope for the best). So, all we do is pass the
-          // format string unchanged. That should eventually get the user's
-          // attention; and in the meantime, it hopefully doesn't lose too much
-          // data.
-        default:
-          // Unknown or unsupported format character. Just copy verbatim to
-          // output.
-          buffer.Out('%');
-          DEBUG_CHECK(ch);
-          if (!ch) {
-            goto end_of_format_string;
-          }
-          buffer.Out(ch);
-          break;
-      }
-    } else {
-    copy_verbatim:
-      buffer.Out(fmt[-1]);
-    }
-  }
-end_of_format_string:
-end_of_output_buffer:
-  return buffer.GetCount();
-}
-
-}  // namespace internal
-
-ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt) {
-  // Make sure that at least one NUL byte can be written, and that the buffer
-  // never overflows kSSizeMax. Not only does that use up most or all of the
-  // address space, it also would result in a return code that cannot be
-  // represented.
-  if (static_cast<ssize_t>(sz) < 1) {
-    return -1;
-  }
-  sz = std::min(sz, kSSizeMax);
-
-  Buffer buffer(buf, sz);
-
-  // In the slow-path, we deal with errors by copying the contents of
-  // "fmt" unexpanded. This means, if there are no arguments passed, the
-  // SafeSPrintf() function always degenerates to a version of strncpy() that
-  // de-duplicates '%' characters.
-  const char* src = fmt;
-  for (; *src; ++src) {
-    buffer.Out(*src);
-    DEBUG_CHECK(src[0] != '%' || src[1] == '%');
-    if (src[0] == '%' && src[1] == '%') {
-      ++src;
-    }
-  }
-  return buffer.GetCount();
-}
-
-}  // namespace partition_alloc::internal::base::strings
diff --git a/base/allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf.h b/base/allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf.h
deleted file mode 100644
index 3d9c95e..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf.h
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_STRINGS_SAFE_SPRINTF_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_STRINGS_SAFE_SPRINTF_H_
-
-#include <stddef.h>
-#include <stdint.h>
-#include <stdlib.h>
-
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-// For ssize_t
-#include <unistd.h>
-#endif
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace partition_alloc::internal::base::strings {
-
-#if defined(COMPILER_MSVC)
-// Define ssize_t inside of our namespace.
-#if defined(_WIN64)
-typedef __int64 ssize_t;
-#else
-typedef long ssize_t;
-#endif
-#endif
-
-// SafeSPrintf() is a type-safe and completely self-contained version of
-// snprintf().
-//
-// SafeSNPrintf() is an alternative function signature that can be used when
-// not dealing with fixed-sized buffers. When possible, SafeSPrintf() should
-// always be used instead of SafeSNPrintf()
-//
-// These functions allow for formatting complicated messages from contexts that
-// require strict async-signal-safety. In fact, it is safe to call them from
-// any low-level execution context, as they are guaranteed to make no library
-// or system calls. It deliberately never touches "errno", either.
-//
-// The only exception to this rule is that in debug builds the code calls
-// RAW_CHECK() to help diagnose problems when the format string does not
-// match the rest of the arguments. In release builds, no CHECK()s are used,
-// and SafeSPrintf() instead returns an output string that expands only
-// those arguments that match their format characters. Mismatched arguments
-// are ignored.
-//
-// The code currently only supports a subset of format characters:
-//   %c, %o, %d, %x, %X, %p, and %s.
-//
-// SafeSPrintf() aims to be as liberal as reasonably possible. Integer-like
-// values of arbitrary width can be passed to all of the format characters
-// that expect integers. Thus, it is explicitly legal to pass an "int" to
-// "%c", and output will automatically look at the LSB only. It is also
-// explicitly legal to pass either signed or unsigned values, and the format
-// characters will automatically interpret the arguments accordingly.
-//
-// It is still not legal to mix-and-match integer-like values with pointer
-// values. For instance, you cannot pass a pointer to %x, nor can you pass an
-// integer to %p.
-//
-// The one exception is "0" zero being accepted by "%p". This works-around
-// the problem of C++ defining NULL as an integer-like value.
-//
-// All format characters take an optional width parameter. This must be a
-// positive integer. For %d, %o, %x, %X and %p, if the width starts with
-// a leading '0', padding is done with '0' instead of ' ' characters.
-//
-// There are a few features of snprintf()-style format strings, that
-// SafeSPrintf() does not support at this time.
-//
-// If an actual user showed up, there is no particularly strong reason they
-// couldn't be added. But that assumes that the trade-offs between complexity
-// and utility are favorable.
-//
-// For example, adding support for negative padding widths, and for %n are all
-// likely to be viewed positively. They are all clearly useful, low-risk, easy
-// to test, don't jeopardize the async-signal-safety of the code, and overall
-// have little impact on other parts of SafeSPrintf() function.
-//
-// On the other hands, adding support for alternate forms, positional
-// arguments, grouping, wide characters, localization or floating point numbers
-// are all unlikely to ever be added.
-//
-// SafeSPrintf() and SafeSNPrintf() mimic the behavior of snprintf() and they
-// return the number of bytes needed to store the untruncated output. This
-// does *not* include the terminating NUL byte.
-//
-// They return -1, iff a fatal error happened. This typically can only happen,
-// if the buffer size is a) negative, or b) zero (i.e. not even the NUL byte
-// can be written). The return value can never be larger than SSIZE_MAX-1.
-// This ensures that the caller can always add one to the signed return code
-// in order to determine the amount of storage that needs to be allocated.
-//
-// While the code supports type checking and while it is generally very careful
-// to avoid printing incorrect values, it tends to be conservative in printing
-// as much as possible, even when given incorrect parameters. Typically, in
-// case of an error, the format string will not be expanded. (i.e. something
-// like SafeSPrintf(buf, "%p %d", 1, 2) results in "%p 2"). See above for
-// the use of RAW_CHECK() in debug builds, though.
-//
-// Basic example:
-//   char buf[20];
-//   base::strings::SafeSPrintf(buf, "The answer: %2d", 42);
-//
-// Example with dynamically sized buffer (async-signal-safe). This code won't
-// work on Visual studio, as it requires dynamically allocating arrays on the
-// stack. Consider picking a smaller value for |kMaxSize| if stack size is
-// limited and known. On the other hand, if the parameters to SafeSNPrintf()
-// are trusted and not controllable by the user, you can consider eliminating
-// the check for |kMaxSize| altogether. The current value of SSIZE_MAX is
-// essentially a no-op that just illustrates how to implement an upper bound:
-//   const size_t kInitialSize = 128;
-//   const size_t kMaxSize = std::numeric_limits<ssize_t>::max();
-//   size_t size = kInitialSize;
-//   for (;;) {
-//     char buf[size];
-//     size = SafeSNPrintf(buf, size, "Error message \"%s\"\n", err) + 1;
-//     if (sizeof(buf) < kMaxSize && size > kMaxSize) {
-//       size = kMaxSize;
-//       continue;
-//     } else if (size > sizeof(buf))
-//       continue;
-//     write(2, buf, size-1);
-//     break;
-//   }
-
-namespace internal {
-// Helpers that use C++ overloading, templates, and specializations to deduce
-// and record type information from function arguments. This allows us to
-// later write a type-safe version of snprintf().
-
-struct Arg {
-  enum Type { INT, UINT, STRING, POINTER };
-
-  // Any integer-like value.
-  Arg(signed char c) : type(INT) {
-    integer.i = c;
-    integer.width = sizeof(char);
-  }
-  Arg(unsigned char c) : type(UINT) {
-    integer.i = c;
-    integer.width = sizeof(char);
-  }
-  Arg(signed short j) : type(INT) {
-    integer.i = j;
-    integer.width = sizeof(short);
-  }
-  Arg(unsigned short j) : type(UINT) {
-    integer.i = j;
-    integer.width = sizeof(short);
-  }
-  Arg(signed int j) : type(INT) {
-    integer.i = j;
-    integer.width = sizeof(int);
-  }
-  Arg(unsigned int j) : type(UINT) {
-    integer.i = j;
-    integer.width = sizeof(int);
-  }
-  Arg(signed long j) : type(INT) {
-    integer.i = j;
-    integer.width = sizeof(long);
-  }
-  Arg(unsigned long j) : type(UINT) {
-    integer.i = static_cast<int64_t>(j);
-    integer.width = sizeof(long);
-  }
-  Arg(signed long long j) : type(INT) {
-    integer.i = j;
-    integer.width = sizeof(long long);
-  }
-  Arg(unsigned long long j) : type(UINT) {
-    integer.i = static_cast<int64_t>(j);
-    integer.width = sizeof(long long);
-  }
-
-  // A C-style text string.
-  Arg(const char* s) : str(s), type(STRING) {}
-  Arg(char* s) : str(s), type(STRING) {}
-
-  // Any pointer value that can be cast to a "void*".
-  template <class T>
-  Arg(T* p) : ptr((void*)p), type(POINTER) {}
-
-  union {
-    // An integer-like value.
-    struct {
-      int64_t i;
-      unsigned char width;
-    } integer;
-
-    // A C-style text string.
-    const char* str;
-
-    // A pointer to an arbitrary object.
-    const void* ptr;
-  };
-  const enum Type type;
-};
-
-// This is the internal function that performs the actual formatting of
-// an snprintf()-style format string.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-ssize_t SafeSNPrintf(char* buf,
-                     size_t sz,
-                     const char* fmt,
-                     const Arg* args,
-                     size_t max_args);
-
-#if !defined(NDEBUG)
-// In debug builds, allow unit tests to artificially lower the kSSizeMax
-// constant that is used as a hard upper-bound for all buffers. In normal
-// use, this constant should always be std::numeric_limits<ssize_t>::max().
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void SetSafeSPrintfSSizeMaxForTest(size_t max);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) size_t GetSafeSPrintfSSizeMaxForTest();
-#endif
-
-}  // namespace internal
-
-template <typename... Args>
-ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt, Args... args) {
-  // Use Arg() object to record type information and then copy arguments to an
-  // array to make it easier to iterate over them.
-  const internal::Arg arg_array[] = {args...};
-  return internal::SafeSNPrintf(buf, N, fmt, arg_array, sizeof...(args));
-}
-
-template <size_t N, typename... Args>
-ssize_t SafeSPrintf(char (&buf)[N], const char* fmt, Args... args) {
-  // Use Arg() object to record type information and then copy arguments to an
-  // array to make it easier to iterate over them.
-  const internal::Arg arg_array[] = {args...};
-  return internal::SafeSNPrintf(buf, N, fmt, arg_array, sizeof...(args));
-}
-
-// Fast-path when we don't actually need to substitute any arguments.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt);
-template <size_t N>
-inline ssize_t SafeSPrintf(char (&buf)[N], const char* fmt) {
-  return SafeSNPrintf(buf, N, fmt);
-}
-
-}  // namespace partition_alloc::internal::base::strings
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_STRINGS_SAFE_SPRINTF_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf_pa_unittest.cc
deleted file mode 100644
index d242b61..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf_pa_unittest.cc
+++ /dev/null
@@ -1,770 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/strings/safe_sprintf.h"
-
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <string.h>
-
-#include <limits>
-#include <memory>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// Death tests on Android are currently very flaky. No need to add more flaky
-// tests, as they just make it hard to spot real problems.
-// TODO(markus): See if the restrictions on Android can eventually be lifted.
-#if defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
-#define ALLOW_DEATH_TEST
-#endif
-
-namespace partition_alloc::internal::base::strings {
-
-TEST(SafeSPrintfTestPA, Empty) {
-  char buf[2] = {'X', 'X'};
-
-  // Negative buffer size should always result in an error.
-  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), ""));
-  EXPECT_EQ('X', buf[0]);
-  EXPECT_EQ('X', buf[1]);
-
-  // Zero buffer size should always result in an error.
-  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, ""));
-  EXPECT_EQ('X', buf[0]);
-  EXPECT_EQ('X', buf[1]);
-
-  // A one-byte buffer should always print a single NUL byte.
-  EXPECT_EQ(0, SafeSNPrintf(buf, 1, ""));
-  EXPECT_EQ(0, buf[0]);
-  EXPECT_EQ('X', buf[1]);
-  buf[0] = 'X';
-
-  // A larger buffer should leave the trailing bytes unchanged.
-  EXPECT_EQ(0, SafeSNPrintf(buf, 2, ""));
-  EXPECT_EQ(0, buf[0]);
-  EXPECT_EQ('X', buf[1]);
-  buf[0] = 'X';
-
-  // The same test using SafeSPrintf() instead of SafeSNPrintf().
-  EXPECT_EQ(0, SafeSPrintf(buf, ""));
-  EXPECT_EQ(0, buf[0]);
-  EXPECT_EQ('X', buf[1]);
-  buf[0] = 'X';
-}
-
-TEST(SafeSPrintfTestPA, NoArguments) {
-  // Output a text message that doesn't require any substitutions. This
-  // is roughly equivalent to calling strncpy() (but unlike strncpy(), it does
-  // always add a trailing NUL; it always deduplicates '%' characters).
-  static const char text[] = "hello world";
-  char ref[20], buf[20];
-  memset(ref, 'X', sizeof(ref));
-  memcpy(buf, ref, sizeof(buf));
-
-  // A negative buffer size should always result in an error.
-  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), text));
-  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
-
-  // Zero buffer size should always result in an error.
-  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, text));
-  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
-
-  // A one-byte buffer should always print a single NUL byte.
-  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1, SafeSNPrintf(buf, 1, text));
-  EXPECT_EQ(0, buf[0]);
-  EXPECT_TRUE(!memcmp(buf + 1, ref + 1, sizeof(buf) - 1));
-  memcpy(buf, ref, sizeof(buf));
-
-  // A larger (but limited) buffer should always leave the trailing bytes
-  // unchanged.
-  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1, SafeSNPrintf(buf, 2, text));
-  EXPECT_EQ(text[0], buf[0]);
-  EXPECT_EQ(0, buf[1]);
-  EXPECT_TRUE(!memcmp(buf + 2, ref + 2, sizeof(buf) - 2));
-  memcpy(buf, ref, sizeof(buf));
-
-  // A unrestricted buffer length should always leave the trailing bytes
-  // unchanged.
-  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1,
-            SafeSNPrintf(buf, sizeof(buf), text));
-  EXPECT_EQ(std::string(text), std::string(buf));
-  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
-                      sizeof(buf) - sizeof(text)));
-  memcpy(buf, ref, sizeof(buf));
-
-  // The same test using SafeSPrintf() instead of SafeSNPrintf().
-  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1, SafeSPrintf(buf, text));
-  EXPECT_EQ(std::string(text), std::string(buf));
-  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
-                      sizeof(buf) - sizeof(text)));
-  memcpy(buf, ref, sizeof(buf));
-
-  // Check for deduplication of '%' percent characters.
-  EXPECT_EQ(1, SafeSPrintf(buf, "%%"));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%%%%"));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%%X"));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%%%%X"));
-#if defined(NDEBUG)
-  EXPECT_EQ(1, SafeSPrintf(buf, "%"));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%X"));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%%%X"));
-#elif defined(ALLOW_DEATH_TEST)
-  EXPECT_DEATH(SafeSPrintf(buf, "%"), "src.1. == '%'");
-  EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
-  EXPECT_DEATH(SafeSPrintf(buf, "%X"), "src.1. == '%'");
-  EXPECT_DEATH(SafeSPrintf(buf, "%%%X"), "src.1. == '%'");
-#endif
-}
-
-TEST(SafeSPrintfTestPA, OneArgument) {
-  // Test basic single-argument single-character substitution.
-  const char text[] = "hello world";
-  const char fmt[] = "hello%cworld";
-  char ref[20], buf[20];
-  memset(ref, 'X', sizeof(buf));
-  memcpy(buf, ref, sizeof(buf));
-
-  // A negative buffer size should always result in an error.
-  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), fmt, ' '));
-  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
-
-  // Zero buffer size should always result in an error.
-  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, fmt, ' '));
-  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
-
-  // A one-byte buffer should always print a single NUL byte.
-  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1,
-            SafeSNPrintf(buf, 1, fmt, ' '));
-  EXPECT_EQ(0, buf[0]);
-  EXPECT_TRUE(!memcmp(buf + 1, ref + 1, sizeof(buf) - 1));
-  memcpy(buf, ref, sizeof(buf));
-
-  // A larger (but limited) buffer should always leave the trailing bytes
-  // unchanged.
-  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1,
-            SafeSNPrintf(buf, 2, fmt, ' '));
-  EXPECT_EQ(text[0], buf[0]);
-  EXPECT_EQ(0, buf[1]);
-  EXPECT_TRUE(!memcmp(buf + 2, ref + 2, sizeof(buf) - 2));
-  memcpy(buf, ref, sizeof(buf));
-
-  // A unrestricted buffer length should always leave the trailing bytes
-  // unchanged.
-  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1,
-            SafeSNPrintf(buf, sizeof(buf), fmt, ' '));
-  EXPECT_EQ(std::string(text), std::string(buf));
-  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
-                      sizeof(buf) - sizeof(text)));
-  memcpy(buf, ref, sizeof(buf));
-
-  // The same test using SafeSPrintf() instead of SafeSNPrintf().
-  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1, SafeSPrintf(buf, fmt, ' '));
-  EXPECT_EQ(std::string(text), std::string(buf));
-  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
-                      sizeof(buf) - sizeof(text)));
-  memcpy(buf, ref, sizeof(buf));
-
-  // Check for deduplication of '%' percent characters.
-  EXPECT_EQ(1, SafeSPrintf(buf, "%%", 0));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%%%%", 0));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%Y", 0));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%%Y", 0));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%%%Y", 0));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%%%%Y", 0));
-#if defined(NDEBUG)
-  EXPECT_EQ(1, SafeSPrintf(buf, "%", 0));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
-#elif defined(ALLOW_DEATH_TEST)
-  EXPECT_DEATH(SafeSPrintf(buf, "%", 0), "ch");
-  EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
-#endif
-}
-
-TEST(SafeSPrintfTestPA, MissingArg) {
-#if defined(NDEBUG)
-  char buf[20];
-  EXPECT_EQ(3, SafeSPrintf(buf, "%c%c", 'A'));
-  EXPECT_EQ("A%c", std::string(buf));
-#elif defined(ALLOW_DEATH_TEST)
-  char buf[20];
-  EXPECT_DEATH(SafeSPrintf(buf, "%c%c", 'A'), "cur_arg < max_args");
-#endif
-}
-
-TEST(SafeSPrintfTestPA, ASANFriendlyBufferTest) {
-  // Print into a buffer that is sized exactly to size. ASAN can verify that
-  // nobody attempts to write past the end of the buffer.
-  // There is a more complicated test in PrintLongString() that covers a lot
-  // more edge case, but it is also harder to debug in case of a failure.
-  const char kTestString[] = "This is a test";
-  std::unique_ptr<char[]> buf(new char[sizeof(kTestString)]);
-  EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
-            SafeSNPrintf(buf.get(), sizeof(kTestString), kTestString));
-  EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
-  EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
-            SafeSNPrintf(buf.get(), sizeof(kTestString), "%s", kTestString));
-  EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
-}
-
-TEST(SafeSPrintfTestPA, NArgs) {
-  // Pre-C++11 compilers have a different code path, that can only print
-  // up to ten distinct arguments.
-  // We test both SafeSPrintf() and SafeSNPrintf(). This makes sure we don't
-  // have typos in the copy-n-pasted code that is needed to deal with various
-  // numbers of arguments.
-  char buf[12];
-  EXPECT_EQ(1, SafeSPrintf(buf, "%c", 1));
-  EXPECT_EQ("\1", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%c%c", 1, 2));
-  EXPECT_EQ("\1\2", std::string(buf));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%c%c%c", 1, 2, 3));
-  EXPECT_EQ("\1\2\3", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, "%c%c%c%c", 1, 2, 3, 4));
-  EXPECT_EQ("\1\2\3\4", std::string(buf));
-  EXPECT_EQ(5, SafeSPrintf(buf, "%c%c%c%c%c", 1, 2, 3, 4, 5));
-  EXPECT_EQ("\1\2\3\4\5", std::string(buf));
-  EXPECT_EQ(6, SafeSPrintf(buf, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
-  EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
-  EXPECT_EQ(7, SafeSPrintf(buf, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
-  EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
-  EXPECT_EQ(8, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8));
-  EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
-  EXPECT_EQ(9,
-            SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8, 9));
-  EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
-  EXPECT_EQ(10, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8,
-                            9, 10));
-
-  // Repeat all the tests with SafeSNPrintf() instead of SafeSPrintf().
-  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
-  EXPECT_EQ(1, SafeSNPrintf(buf, 11, "%c", 1));
-  EXPECT_EQ("\1", std::string(buf));
-  EXPECT_EQ(2, SafeSNPrintf(buf, 11, "%c%c", 1, 2));
-  EXPECT_EQ("\1\2", std::string(buf));
-  EXPECT_EQ(3, SafeSNPrintf(buf, 11, "%c%c%c", 1, 2, 3));
-  EXPECT_EQ("\1\2\3", std::string(buf));
-  EXPECT_EQ(4, SafeSNPrintf(buf, 11, "%c%c%c%c", 1, 2, 3, 4));
-  EXPECT_EQ("\1\2\3\4", std::string(buf));
-  EXPECT_EQ(5, SafeSNPrintf(buf, 11, "%c%c%c%c%c", 1, 2, 3, 4, 5));
-  EXPECT_EQ("\1\2\3\4\5", std::string(buf));
-  EXPECT_EQ(6, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
-  EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
-  EXPECT_EQ(7, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
-  EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
-  EXPECT_EQ(8,
-            SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8));
-  EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
-  EXPECT_EQ(9, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7,
-                            8, 9));
-  EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
-  EXPECT_EQ(10, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6,
-                             7, 8, 9, 10));
-  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
-
-  EXPECT_EQ(11, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7,
-                            8, 9, 10, 11));
-  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
-  EXPECT_EQ(11, SafeSNPrintf(buf, 12, "%c%c%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5,
-                             6, 7, 8, 9, 10, 11));
-  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
-}
-
-TEST(SafeSPrintfTestPA, DataTypes) {
-  char buf[40];
-
-  // Bytes
-  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint8_t)1));
-  EXPECT_EQ("1", std::string(buf));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%d", (uint8_t)-1));
-  EXPECT_EQ("255", std::string(buf));
-  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int8_t)1));
-  EXPECT_EQ("1", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int8_t)-1));
-  EXPECT_EQ("-1", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, "%d", (int8_t)-128));
-  EXPECT_EQ("-128", std::string(buf));
-
-  // Half-words
-  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint16_t)1));
-  EXPECT_EQ("1", std::string(buf));
-  EXPECT_EQ(5, SafeSPrintf(buf, "%d", (uint16_t)-1));
-  EXPECT_EQ("65535", std::string(buf));
-  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int16_t)1));
-  EXPECT_EQ("1", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int16_t)-1));
-  EXPECT_EQ("-1", std::string(buf));
-  EXPECT_EQ(6, SafeSPrintf(buf, "%d", (int16_t)-32768));
-  EXPECT_EQ("-32768", std::string(buf));
-
-  // Words
-  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint32_t)1));
-  EXPECT_EQ("1", std::string(buf));
-  EXPECT_EQ(10, SafeSPrintf(buf, "%d", (uint32_t)-1));
-  EXPECT_EQ("4294967295", std::string(buf));
-  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int32_t)1));
-  EXPECT_EQ("1", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int32_t)-1));
-  EXPECT_EQ("-1", std::string(buf));
-  // Work-around for an limitation of C90
-  EXPECT_EQ(11, SafeSPrintf(buf, "%d", (int32_t)-2147483647 - 1));
-  EXPECT_EQ("-2147483648", std::string(buf));
-
-  // Quads
-  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint64_t)1));
-  EXPECT_EQ("1", std::string(buf));
-  EXPECT_EQ(20, SafeSPrintf(buf, "%d", (uint64_t)-1));
-  EXPECT_EQ("18446744073709551615", std::string(buf));
-  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int64_t)1));
-  EXPECT_EQ("1", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int64_t)-1));
-  EXPECT_EQ("-1", std::string(buf));
-  // Work-around for an limitation of C90
-  EXPECT_EQ(20, SafeSPrintf(buf, "%d", (int64_t)-9223372036854775807LL - 1));
-  EXPECT_EQ("-9223372036854775808", std::string(buf));
-
-  // Strings (both const and mutable).
-  EXPECT_EQ(4, SafeSPrintf(buf, "test"));
-  EXPECT_EQ("test", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, buf));
-  EXPECT_EQ("test", std::string(buf));
-
-  // Pointer
-  char addr[20];
-  snprintf(addr, sizeof(addr), "0x%llX", (unsigned long long)(uintptr_t)buf);
-  SafeSPrintf(buf, "%p", buf);
-  EXPECT_EQ(std::string(addr), std::string(buf));
-  SafeSPrintf(buf, "%p", (const char*)buf);
-  EXPECT_EQ(std::string(addr), std::string(buf));
-  snprintf(addr, sizeof(addr), "0x%llX",
-           (unsigned long long)(uintptr_t)snprintf);
-  SafeSPrintf(buf, "%p", snprintf);
-  EXPECT_EQ(std::string(addr), std::string(buf));
-
-  // Padding for pointers is a little more complicated because of the "0x"
-  // prefix. Padding with '0' zeros is relatively straight-forward, but
-  // padding with ' ' spaces requires more effort.
-  snprintf(addr, sizeof(addr), "0x%017llX", (unsigned long long)(uintptr_t)buf);
-  SafeSPrintf(buf, "%019p", buf);
-  EXPECT_EQ(std::string(addr), std::string(buf));
-  snprintf(addr, sizeof(addr), "0x%llX", (unsigned long long)(uintptr_t)buf);
-  memset(addr, ' ',
-         (char*)memmove(addr + sizeof(addr) - strlen(addr) - 1, addr,
-                        strlen(addr) + 1) -
-             addr);
-  SafeSPrintf(buf, "%19p", buf);
-  EXPECT_EQ(std::string(addr), std::string(buf));
-}
-
-namespace {
-void PrintLongString(char* buf, size_t sz) {
-  // Output a reasonably complex expression into a limited-size buffer.
-  // At least one byte is available for writing the NUL character.
-  PA_BASE_CHECK(sz > static_cast<size_t>(0));
-
-  // Allocate slightly more space, so that we can verify that SafeSPrintf()
-  // never writes past the end of the buffer.
-  std::unique_ptr<char[]> tmp(new char[sz + 2]);
-  memset(tmp.get(), 'X', sz + 2);
-
-  // Use SafeSPrintf() to output a complex list of arguments:
-  // - test padding and truncating %c single characters.
-  // - test truncating %s simple strings.
-  // - test mismatching arguments and truncating (for %d != %s).
-  // - test zero-padding and truncating %x hexadecimal numbers.
-  // - test outputting and truncating %d MININT.
-  // - test outputting and truncating %p arbitrary pointer values.
-  // - test outputting, padding and truncating NULL-pointer %s strings.
-  char* out = tmp.get();
-  size_t out_sz = sz;
-  size_t len;
-  for (std::unique_ptr<char[]> perfect_buf;;) {
-    size_t needed =
-        SafeSNPrintf(out, out_sz,
-#if defined(NDEBUG)
-                     "A%2cong %s: %d %010X %d %p%7s", 'l', "string", "",
-#else
-                     "A%2cong %s: %%d %010X %d %p%7s", 'l', "string",
-#endif
-                     0xDEADBEEF, std::numeric_limits<intptr_t>::min(),
-                     PrintLongString, static_cast<char*>(nullptr)) +
-        1;
-
-    // Various sanity checks:
-    // The numbered of characters needed to print the full string should always
-    // be bigger or equal to the bytes that have actually been output.
-    len = strlen(tmp.get());
-    PA_BASE_CHECK(needed >= len + 1);
-
-    // The number of characters output should always fit into the buffer that
-    // was passed into SafeSPrintf().
-    PA_BASE_CHECK(len < out_sz);
-
-    // The output is always terminated with a NUL byte (actually, this test is
-    // always going to pass, as strlen() already verified this)
-    EXPECT_FALSE(tmp[len]);
-
-    // ASAN can check that we are not overwriting buffers, iff we make sure the
-    // buffer is exactly the size that we are expecting to be written. After
-    // running SafeSNPrintf() the first time, it is possible to compute the
-    // correct buffer size for this test. So, allocate a second buffer and run
-    // the exact same SafeSNPrintf() command again.
-    if (!perfect_buf.get()) {
-      out_sz = std::min(needed, sz);
-      out = new char[out_sz];
-      perfect_buf.reset(out);
-    } else {
-      break;
-    }
-  }
-
-  // All trailing bytes are unchanged.
-  for (size_t i = len + 1; i < sz + 2; ++i) {
-    EXPECT_EQ('X', tmp[i]);
-  }
-
-  // The text that was generated by SafeSPrintf() should always match the
-  // equivalent text generated by snprintf(). Please note that the format
-  // string for snprintf() is not complicated, as it does not have the
-  // benefit of getting type information from the C++ compiler.
-  //
-  // N.B.: It would be so much cleaner to use snprintf(). But unfortunately,
-  //       Visual Studio doesn't support this function, and the work-arounds
-  //       are all really awkward.
-  char ref[256];
-  PA_BASE_CHECK(sz <= sizeof(ref));
-  snprintf(ref, sizeof(ref), "A long string: %%d 00DEADBEEF %lld 0x%llX <NULL>",
-           static_cast<long long>(std::numeric_limits<intptr_t>::min()),
-           static_cast<unsigned long long>(
-               reinterpret_cast<uintptr_t>(PrintLongString)));
-  ref[sz - 1] = '\000';
-
-#if defined(NDEBUG)
-  const size_t kSSizeMax = std::numeric_limits<ssize_t>::max();
-#else
-  const size_t kSSizeMax = internal::GetSafeSPrintfSSizeMaxForTest();
-#endif
-
-  // Compare the output from SafeSPrintf() to the one from snprintf().
-  EXPECT_EQ(std::string(ref).substr(0, kSSizeMax - 1), std::string(tmp.get()));
-
-  // We allocated a slightly larger buffer, so that we could perform some
-  // extra sanity checks. Now that the tests have all passed, we copy the
-  // data to the output buffer that the caller provided.
-  memcpy(buf, tmp.get(), len + 1);
-}
-
-#if !defined(NDEBUG)
-class ScopedSafeSPrintfSSizeMaxSetter {
- public:
-  ScopedSafeSPrintfSSizeMaxSetter(size_t sz) {
-    old_ssize_max_ = internal::GetSafeSPrintfSSizeMaxForTest();
-    internal::SetSafeSPrintfSSizeMaxForTest(sz);
-  }
-
-  ScopedSafeSPrintfSSizeMaxSetter(const ScopedSafeSPrintfSSizeMaxSetter&) =
-      delete;
-  ScopedSafeSPrintfSSizeMaxSetter& operator=(
-      const ScopedSafeSPrintfSSizeMaxSetter&) = delete;
-
-  ~ScopedSafeSPrintfSSizeMaxSetter() {
-    internal::SetSafeSPrintfSSizeMaxForTest(old_ssize_max_);
-  }
-
- private:
-  size_t old_ssize_max_;
-};
-#endif
-
-}  // anonymous namespace
-
-TEST(SafeSPrintfTestPA, Truncation) {
-  // We use PrintLongString() to print a complex long string and then
-  // truncate to all possible lengths. This ends up exercising a lot of
-  // different code paths in SafeSPrintf() and IToASCII(), as truncation can
-  // happen in a lot of different states.
-  char ref[256];
-  PrintLongString(ref, sizeof(ref));
-  for (size_t i = strlen(ref) + 1; i; --i) {
-    char buf[sizeof(ref)];
-    PrintLongString(buf, i);
-    EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
-  }
-
-  // When compiling in debug mode, we have the ability to fake a small
-  // upper limit for the maximum value that can be stored in an ssize_t.
-  // SafeSPrintf() uses this upper limit to determine how many bytes it will
-  // write to the buffer, even if the caller claimed a bigger buffer size.
-  // Repeat the truncation test and verify that this other code path in
-  // SafeSPrintf() works correctly, too.
-#if !defined(NDEBUG)
-  for (size_t i = strlen(ref) + 1; i > 1; --i) {
-    ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(i);
-    char buf[sizeof(ref)];
-    PrintLongString(buf, sizeof(buf));
-    EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
-  }
-
-  // kSSizeMax is also used to constrain the maximum amount of padding, before
-  // SafeSPrintf() detects an error in the format string.
-  ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(100);
-  char buf[256];
-  EXPECT_EQ(99, SafeSPrintf(buf, "%99c", ' '));
-  EXPECT_EQ(std::string(99, ' '), std::string(buf));
-  *buf = '\000';
-#if defined(ALLOW_DEATH_TEST)
-  EXPECT_DEATH(SafeSPrintf(buf, "%100c", ' '), "padding <= max_padding");
-#endif
-  EXPECT_EQ(0, *buf);
-#endif
-}
-
-TEST(SafeSPrintfTestPA, Padding) {
-  char buf[40], fmt[40];
-
-  // Chars %c
-  EXPECT_EQ(1, SafeSPrintf(buf, "%c", 'A'));
-  EXPECT_EQ("A", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%2c", 'A'));
-  EXPECT_EQ(" A", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%02c", 'A'));
-  EXPECT_EQ(" A", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, "%-2c", 'A'));
-  EXPECT_EQ("%-2c", std::string(buf));
-  SafeSPrintf(fmt, "%%%dc", std::numeric_limits<ssize_t>::max() - 1);
-  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
-            SafeSPrintf(buf, fmt, 'A'));
-  SafeSPrintf(fmt, "%%%dc",
-              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
-#if defined(NDEBUG)
-  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 'A'));
-  EXPECT_EQ("%c", std::string(buf));
-#elif defined(ALLOW_DEATH_TEST)
-  EXPECT_DEATH(SafeSPrintf(buf, fmt, 'A'), "padding <= max_padding");
-#endif
-
-  // Octal %o
-  EXPECT_EQ(1, SafeSPrintf(buf, "%o", 1));
-  EXPECT_EQ("1", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%2o", 1));
-  EXPECT_EQ(" 1", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%02o", 1));
-  EXPECT_EQ("01", std::string(buf));
-  EXPECT_EQ(12, SafeSPrintf(buf, "%12o", -1));
-  EXPECT_EQ(" 37777777777", std::string(buf));
-  EXPECT_EQ(12, SafeSPrintf(buf, "%012o", -1));
-  EXPECT_EQ("037777777777", std::string(buf));
-  EXPECT_EQ(23, SafeSPrintf(buf, "%23o", -1LL));
-  EXPECT_EQ(" 1777777777777777777777", std::string(buf));
-  EXPECT_EQ(23, SafeSPrintf(buf, "%023o", -1LL));
-  EXPECT_EQ("01777777777777777777777", std::string(buf));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%2o", 0111));
-  EXPECT_EQ("111", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, "%-2o", 1));
-  EXPECT_EQ("%-2o", std::string(buf));
-  SafeSPrintf(fmt, "%%%do", std::numeric_limits<ssize_t>::max() - 1);
-  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
-            SafeSNPrintf(buf, 4, fmt, 1));
-  EXPECT_EQ("   ", std::string(buf));
-  SafeSPrintf(fmt, "%%0%do", std::numeric_limits<ssize_t>::max() - 1);
-  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
-            SafeSNPrintf(buf, 4, fmt, 1));
-  EXPECT_EQ("000", std::string(buf));
-  SafeSPrintf(fmt, "%%%do",
-              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
-#if defined(NDEBUG)
-  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
-  EXPECT_EQ("%o", std::string(buf));
-#elif defined(ALLOW_DEATH_TEST)
-  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
-#endif
-
-  // Decimals %d
-  EXPECT_EQ(1, SafeSPrintf(buf, "%d", 1));
-  EXPECT_EQ("1", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%2d", 1));
-  EXPECT_EQ(" 1", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%02d", 1));
-  EXPECT_EQ("01", std::string(buf));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%3d", -1));
-  EXPECT_EQ(" -1", std::string(buf));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%03d", -1));
-  EXPECT_EQ("-01", std::string(buf));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%2d", 111));
-  EXPECT_EQ("111", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, "%2d", -111));
-  EXPECT_EQ("-111", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, "%-2d", 1));
-  EXPECT_EQ("%-2d", std::string(buf));
-  SafeSPrintf(fmt, "%%%dd", std::numeric_limits<ssize_t>::max() - 1);
-  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
-            SafeSNPrintf(buf, 4, fmt, 1));
-  EXPECT_EQ("   ", std::string(buf));
-  SafeSPrintf(fmt, "%%0%dd", std::numeric_limits<ssize_t>::max() - 1);
-  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
-            SafeSNPrintf(buf, 4, fmt, 1));
-  EXPECT_EQ("000", std::string(buf));
-  SafeSPrintf(fmt, "%%%dd",
-              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
-#if defined(NDEBUG)
-  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
-  EXPECT_EQ("%d", std::string(buf));
-#elif defined(ALLOW_DEATH_TEST)
-  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
-#endif
-
-  // Hex %X
-  EXPECT_EQ(1, SafeSPrintf(buf, "%X", 1));
-  EXPECT_EQ("1", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%2X", 1));
-  EXPECT_EQ(" 1", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%02X", 1));
-  EXPECT_EQ("01", std::string(buf));
-  EXPECT_EQ(9, SafeSPrintf(buf, "%9X", -1));
-  EXPECT_EQ(" FFFFFFFF", std::string(buf));
-  EXPECT_EQ(9, SafeSPrintf(buf, "%09X", -1));
-  EXPECT_EQ("0FFFFFFFF", std::string(buf));
-  EXPECT_EQ(17, SafeSPrintf(buf, "%17X", -1LL));
-  EXPECT_EQ(" FFFFFFFFFFFFFFFF", std::string(buf));
-  EXPECT_EQ(17, SafeSPrintf(buf, "%017X", -1LL));
-  EXPECT_EQ("0FFFFFFFFFFFFFFFF", std::string(buf));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%2X", 0x111));
-  EXPECT_EQ("111", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, "%-2X", 1));
-  EXPECT_EQ("%-2X", std::string(buf));
-  SafeSPrintf(fmt, "%%%dX", std::numeric_limits<ssize_t>::max() - 1);
-  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
-            SafeSNPrintf(buf, 4, fmt, 1));
-  EXPECT_EQ("   ", std::string(buf));
-  SafeSPrintf(fmt, "%%0%dX", std::numeric_limits<ssize_t>::max() - 1);
-  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
-            SafeSNPrintf(buf, 4, fmt, 1));
-  EXPECT_EQ("000", std::string(buf));
-  SafeSPrintf(fmt, "%%%dX",
-              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
-#if defined(NDEBUG)
-  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
-  EXPECT_EQ("%X", std::string(buf));
-#elif defined(ALLOW_DEATH_TEST)
-  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
-#endif
-
-  // Pointer %p
-  EXPECT_EQ(3, SafeSPrintf(buf, "%p", (void*)1));
-  EXPECT_EQ("0x1", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, "%4p", (void*)1));
-  EXPECT_EQ(" 0x1", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, "%04p", (void*)1));
-  EXPECT_EQ("0x01", std::string(buf));
-  EXPECT_EQ(5, SafeSPrintf(buf, "%4p", (void*)0x111));
-  EXPECT_EQ("0x111", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, "%-2p", (void*)1));
-  EXPECT_EQ("%-2p", std::string(buf));
-  SafeSPrintf(fmt, "%%%dp", std::numeric_limits<ssize_t>::max() - 1);
-  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
-            SafeSNPrintf(buf, 4, fmt, (void*)1));
-  EXPECT_EQ("   ", std::string(buf));
-  SafeSPrintf(fmt, "%%0%dp", std::numeric_limits<ssize_t>::max() - 1);
-  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
-            SafeSNPrintf(buf, 4, fmt, (void*)1));
-  EXPECT_EQ("0x0", std::string(buf));
-  SafeSPrintf(fmt, "%%%dp",
-              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
-#if defined(NDEBUG)
-  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
-  EXPECT_EQ("%p", std::string(buf));
-#elif defined(ALLOW_DEATH_TEST)
-  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
-#endif
-
-  // String
-  EXPECT_EQ(1, SafeSPrintf(buf, "%s", "A"));
-  EXPECT_EQ("A", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%2s", "A"));
-  EXPECT_EQ(" A", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%02s", "A"));
-  EXPECT_EQ(" A", std::string(buf));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%2s", "AAA"));
-  EXPECT_EQ("AAA", std::string(buf));
-  EXPECT_EQ(4, SafeSPrintf(buf, "%-2s", "A"));
-  EXPECT_EQ("%-2s", std::string(buf));
-  SafeSPrintf(fmt, "%%%ds", std::numeric_limits<ssize_t>::max() - 1);
-  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
-            SafeSNPrintf(buf, 4, fmt, "A"));
-  EXPECT_EQ("   ", std::string(buf));
-  SafeSPrintf(fmt, "%%0%ds", std::numeric_limits<ssize_t>::max() - 1);
-  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
-            SafeSNPrintf(buf, 4, fmt, "A"));
-  EXPECT_EQ("   ", std::string(buf));
-  SafeSPrintf(fmt, "%%%ds",
-              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
-#if defined(NDEBUG)
-  EXPECT_EQ(2, SafeSPrintf(buf, fmt, "A"));
-  EXPECT_EQ("%s", std::string(buf));
-#elif defined(ALLOW_DEATH_TEST)
-  EXPECT_DEATH(SafeSPrintf(buf, fmt, "A"), "padding <= max_padding");
-#endif
-}
-
-TEST(SafeSPrintfTestPA, EmbeddedNul) {
-  char buf[] = {'X', 'X', 'X', 'X'};
-  EXPECT_EQ(2, SafeSPrintf(buf, "%3c", 0));
-  EXPECT_EQ(' ', buf[0]);
-  EXPECT_EQ(' ', buf[1]);
-  EXPECT_EQ(0, buf[2]);
-  EXPECT_EQ('X', buf[3]);
-
-  // Check handling of a NUL format character. N.B. this takes two different
-  // code paths depending on whether we are actually passing arguments. If
-  // we don't have any arguments, we are running in the fast-path code, that
-  // looks (almost) like a strncpy().
-#if defined(NDEBUG)
-  EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
-  EXPECT_EQ("%%", std::string(buf));
-  EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
-  EXPECT_EQ("%%", std::string(buf));
-#elif defined(ALLOW_DEATH_TEST)
-  EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
-  EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
-#endif
-}
-
-TEST(SafeSPrintfTestPA, EmitNULL) {
-  char buf[40];
-#if defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion-null"
-#endif
-  EXPECT_EQ(1, SafeSPrintf(buf, "%d", NULL));
-  EXPECT_EQ("0", std::string(buf));
-  EXPECT_EQ(3, SafeSPrintf(buf, "%p", NULL));
-  EXPECT_EQ("0x0", std::string(buf));
-  EXPECT_EQ(6, SafeSPrintf(buf, "%s", NULL));
-  EXPECT_EQ("<NULL>", std::string(buf));
-#if defined(__GCC__)
-#pragma GCC diagnostic pop
-#endif
-}
-
-TEST(SafeSPrintfTestPA, PointerSize) {
-  // The internal data representation is a 64bit value, independent of the
-  // native word size. We want to perform sign-extension for signed integers,
-  // but we want to avoid doing so for pointer types. This could be a
-  // problem on systems, where pointers are only 32bit. This tests verifies
-  // that there is no such problem.
-  char* str = reinterpret_cast<char*>(0x80000000u);
-  void* ptr = str;
-  char buf[40];
-  EXPECT_EQ(10, SafeSPrintf(buf, "%p", str));
-  EXPECT_EQ("0x80000000", std::string(buf));
-  EXPECT_EQ(10, SafeSPrintf(buf, "%p", ptr));
-  EXPECT_EQ("0x80000000", std::string(buf));
-}
-
-}  // namespace partition_alloc::internal::base::strings
diff --git a/base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf.cc b/base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf.cc
deleted file mode 100644
index 48df6d9..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h"
-
-#include <stdarg.h>
-#include <stdio.h>
-
-namespace partition_alloc::internal::base {
-
-std::string PA_PRINTF_FORMAT(1, 2)
-    TruncatingStringPrintf(const char* format, ...) {
-  base::ScopedClearLastError last_error;
-  char stack_buf[kMaxLengthOfTruncatingStringPrintfResult + 1];
-  va_list arguments;
-  va_start(arguments, format);
-#if BUILDFLAG(IS_WIN)
-  int result = vsnprintf_s(stack_buf, std::size(stack_buf), _TRUNCATE, format,
-                           arguments);
-#else
-  int result = vsnprintf(stack_buf, std::size(stack_buf), format, arguments);
-#endif
-  va_end(arguments);
-#if BUILDFLAG(IS_WIN)
-  // If an output error is encountered or data is larger than count,
-  // a negative value is returned. So to see whether an output error is really
-  // encountered or not, need to see errno. If errno == EINVAL or
-  // errno == ERANGE, an output error is encountered. If not, an output is
-  // just truncated.
-  if (result < 0 && (errno == EINVAL || errno == ERANGE))
-    return std::string();
-#else
-  // If an output error is encountered, a negative value is returned.
-  // In the case, return an empty string.
-  if (result < 0)
-    return std::string();
-#endif
-  // If result is equal or larger than std::size(stack_buf), the output was
-  // truncated. ::base::StringPrintf doesn't truncate output.
-  return std::string(stack_buf);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf.h b/base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf.h
deleted file mode 100644
index 5551fb6..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_STRINGS_STRINGPRINTF_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_STRINGS_STRINGPRINTF_H_
-
-#include <stdarg.h>  // va_list
-
-#include <string>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base {
-
-// Since Only SystemErrorCodeToString and partition_alloc_perftests use
-// StringPrintf, make StringPrintf not to support too long results.
-// Instead, define max result length and truncate such results.
-static constexpr size_t kMaxLengthOfTruncatingStringPrintfResult = 255U;
-
-// Return a C++ string given printf-like input.
-[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) std::string
-    TruncatingStringPrintf(const char* format, ...) PA_PRINTF_FORMAT(1, 2);
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_STRINGS_STRINGPRINTF_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf_pa_unittest.cc
deleted file mode 100644
index 20849d6..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf_pa_unittest.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf.h"
-
-#include <errno.h>
-#include <stddef.h>
-
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal::base {
-
-TEST(PartitionAllocStringPrintfTest, TruncatingStringPrintfEmpty) {
-  EXPECT_EQ("", TruncatingStringPrintf("%s", ""));
-}
-
-TEST(PartitionAllocStringPrintfTest, TruncatingStringPrintfMisc) {
-  EXPECT_EQ("123hello w",
-            TruncatingStringPrintf("%3d%2s %1c", 123, "hello", 'w'));
-}
-
-// Test that TruncatingStringPrintf truncates too long result.
-// The original TruncatingStringPrintf does not truncate. Instead, it allocates
-// memory and returns an entire result.
-TEST(PartitionAllocStringPrintfTest, TruncatingStringPrintfTruncatesResult) {
-  std::vector<char> buffer;
-  buffer.resize(kMaxLengthOfTruncatingStringPrintfResult + 1);
-  std::fill(buffer.begin(), buffer.end(), 'a');
-  buffer.push_back('\0');
-  std::string result = TruncatingStringPrintf("%s", buffer.data());
-  EXPECT_EQ(kMaxLengthOfTruncatingStringPrintfResult, result.length());
-  EXPECT_EQ(std::string::npos, result.find_first_not_of('a'));
-}
-
-// Test that TruncatingStringPrintf does not change errno.
-TEST(PartitionAllocStringPrintfTest, TruncatingStringPrintfErrno) {
-  errno = 1;
-  EXPECT_EQ("", TruncatingStringPrintf("%s", ""));
-  EXPECT_EQ(1, errno);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h b/base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h
deleted file mode 100644
index d7d2437..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace partition_alloc::internal::base {
-
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) SysInfo {
- public:
-  // Retrieves detailed numeric values for the OS version.
-  // DON'T USE THIS ON THE MAC OR WINDOWS to determine the current OS release
-  // for OS version-specific feature checks and workarounds. If you must use
-  // an OS version check instead of a feature check, use the base::mac::IsOS*
-  // family from base/mac/mac_util.h, or base::win::GetVersion from
-  // base/win/windows_version.h.
-  static void OperatingSystemVersionNumbers(int32_t* major_version,
-                                            int32_t* minor_version,
-                                            int32_t* bugfix_version);
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/system/sys_info_ios.mm b/base/allocator/partition_allocator/partition_alloc_base/system/sys_info_ios.mm
deleted file mode 100644
index e6b7ce8..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/system/sys_info_ios.mm
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h"
-
-#import <Foundation/Foundation.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
-
-namespace partition_alloc::internal::base {
-
-// static
-void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
-                                            int32_t* minor_version,
-                                            int32_t* bugfix_version) {
-  NSOperatingSystemVersion version =
-      [[NSProcessInfo processInfo] operatingSystemVersion];
-  *major_version = saturated_cast<int32_t>(version.majorVersion);
-  *minor_version = saturated_cast<int32_t>(version.minorVersion);
-  *bugfix_version = saturated_cast<int32_t>(version.patchVersion);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/system/sys_info_mac.mm b/base/allocator/partition_allocator/partition_alloc_base/system/sys_info_mac.mm
deleted file mode 100644
index b938759..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/system/sys_info_mac.mm
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h"
-
-#import <Foundation/Foundation.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
-
-namespace partition_alloc::internal::base {
-
-// static
-void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
-                                            int32_t* minor_version,
-                                            int32_t* bugfix_version) {
-  NSOperatingSystemVersion version =
-      NSProcessInfo.processInfo.operatingSystemVersion;
-  *major_version = saturated_cast<int32_t>(version.majorVersion);
-  *minor_version = saturated_cast<int32_t>(version.minorVersion);
-  *bugfix_version = saturated_cast<int32_t>(version.patchVersion);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h b/base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h
deleted file mode 100644
index 5cf5ea0..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This header file contains macro definitions for thread safety annotations
-// that allow developers to document the locking policies of multi-threaded
-// code. The annotations can also help program analysis tools to identify
-// potential thread safety issues.
-//
-// Note that no analysis is done inside constructors and destructors,
-// regardless of what attributes are used. See
-// https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#no-checking-inside-constructors-and-destructors
-// for details.
-//
-// Note that the annotations we use are described as deprecated in the Clang
-// documentation, linked below. E.g. we use PA_EXCLUSIVE_LOCKS_REQUIRED where
-// the Clang docs use REQUIRES.
-//
-// http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
-//
-// We use the deprecated Clang annotations to match Abseil (relevant header
-// linked below) and its ecosystem of libraries. We will follow Abseil with
-// respect to upgrading to more modern annotations.
-//
-// https://github.com/abseil/abseil-cpp/blob/master/absl/base/thread_annotations.h
-//
-// These annotations are implemented using compiler attributes. Using the macros
-// defined here instead of raw attributes allow for portability and future
-// compatibility.
-//
-// When referring to mutexes in the arguments of the attributes, you should
-// use variable names or more complex expressions (e.g. my_object->mutex_)
-// that evaluate to a concrete mutex object whenever possible. If the mutex
-// you want to refer to is not in scope, you may use a member pointer
-// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREAD_ANNOTATIONS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREAD_ANNOTATIONS_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "build/build_config.h"
-
-#if defined(__clang__)
-#define PA_THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
-#else
-#define PA_THREAD_ANNOTATION_ATTRIBUTE__(x)  // no-op
-#endif
-
-// PA_GUARDED_BY()
-//
-// Documents if a shared field or global variable needs to be protected by a
-// mutex. PA_GUARDED_BY() allows the user to specify a particular mutex that
-// should be held when accessing the annotated variable.
-//
-// Example:
-//
-//   Mutex mu;
-//   int p1 PA_GUARDED_BY(mu);
-#define PA_GUARDED_BY(x) PA_THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
-
-// PA_PT_GUARDED_BY()
-//
-// Documents if the memory location pointed to by a pointer should be guarded
-// by a mutex when dereferencing the pointer.
-//
-// Example:
-//   Mutex mu;
-//   int *p1 PA_PT_GUARDED_BY(mu);
-//
-// Note that a pointer variable to a shared memory location could itself be a
-// shared variable.
-//
-// Example:
-//
-//     // `q`, guarded by `mu1`, points to a shared memory location that is
-//     // guarded by `mu2`:
-//     int *q PA_GUARDED_BY(mu1) PA_PT_GUARDED_BY(mu2);
-#define PA_PT_GUARDED_BY(x) PA_THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
-
-// PA_ACQUIRED_AFTER() / PA_ACQUIRED_BEFORE()
-//
-// Documents the acquisition order between locks that can be held
-// simultaneously by a thread. For any two locks that need to be annotated
-// to establish an acquisition order, only one of them needs the annotation.
-// (i.e. You don't have to annotate both locks with both PA_ACQUIRED_AFTER
-// and PA_ACQUIRED_BEFORE.)
-//
-// Example:
-//
-//   Mutex m1;
-//   Mutex m2 PA_ACQUIRED_AFTER(m1);
-#define PA_ACQUIRED_AFTER(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
-
-#define PA_ACQUIRED_BEFORE(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
-
-// PA_EXCLUSIVE_LOCKS_REQUIRED() / PA_SHARED_LOCKS_REQUIRED()
-//
-// Documents a function that expects a mutex to be held prior to entry.
-// The mutex is expected to be held both on entry to, and exit from, the
-// function.
-//
-// Example:
-//
-//   Mutex mu1, mu2;
-//   int a PA_GUARDED_BY(mu1);
-//   int b PA_GUARDED_BY(mu2);
-//
-//   void foo() PA_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... };
-#define PA_EXCLUSIVE_LOCKS_REQUIRED(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
-
-#define PA_SHARED_LOCKS_REQUIRED(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
-
-// PA_LOCKS_EXCLUDED()
-//
-// Documents the locks acquired in the body of the function. These locks
-// cannot be held when calling this function (as Abseil's `Mutex` locks are
-// non-reentrant).
-#define PA_LOCKS_EXCLUDED(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
-
-// PA_LOCK_RETURNED()
-//
-// Documents a function that returns a mutex without acquiring it.  For example,
-// a public getter method that returns a pointer to a private mutex should
-// be annotated with PA_LOCK_RETURNED.
-#define PA_LOCK_RETURNED(x) PA_THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
-
-// PA_LOCKABLE
-//
-// Documents if a class/type is a lockable type (such as the `Mutex` class).
-#define PA_LOCKABLE PA_THREAD_ANNOTATION_ATTRIBUTE__(lockable)
-
-// PA_SCOPED_LOCKABLE
-//
-// Documents if a class does RAII locking (such as the `MutexLock` class).
-// The constructor should use `PA_*_LOCK_FUNCTION()` to specify the mutex that
-// is acquired, and the destructor should use `PA_UNLOCK_FUNCTION()` with no
-// arguments; the analysis will assume that the destructor unlocks whatever the
-// constructor locked.
-#define PA_SCOPED_LOCKABLE PA_THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
-
-// PA_EXCLUSIVE_LOCK_FUNCTION()
-//
-// Documents functions that acquire a lock in the body of a function, and do
-// not release it.
-#define PA_EXCLUSIVE_LOCK_FUNCTION(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
-
-// PA_SHARED_LOCK_FUNCTION()
-//
-// Documents functions that acquire a shared (reader) lock in the body of a
-// function, and do not release it.
-#define PA_SHARED_LOCK_FUNCTION(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
-
-// PA_UNLOCK_FUNCTION()
-//
-// Documents functions that expect a lock to be held on entry to the function,
-// and release it in the body of the function.
-#define PA_UNLOCK_FUNCTION(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
-
-// PA_EXCLUSIVE_TRYLOCK_FUNCTION() / PA_SHARED_TRYLOCK_FUNCTION()
-//
-// Documents functions that try to acquire a lock, and return success or failure
-// (or a non-boolean value that can be interpreted as a boolean).
-// The first argument should be `true` for functions that return `true` on
-// success, or `false` for functions that return `false` on success. The second
-// argument specifies the mutex that is locked on success. If unspecified, this
-// mutex is assumed to be `this`.
-#define PA_EXCLUSIVE_TRYLOCK_FUNCTION(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
-
-#define PA_SHARED_TRYLOCK_FUNCTION(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
-
-// PA_ASSERT_EXCLUSIVE_LOCK() / PA_ASSERT_SHARED_LOCK()
-//
-// Documents functions that dynamically check to see if a lock is held, and fail
-// if it is not held.
-#define PA_ASSERT_EXCLUSIVE_LOCK(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
-
-#define PA_ASSERT_SHARED_LOCK(...) \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
-
-// PA_NO_THREAD_SAFETY_ANALYSIS
-//
-// Turns off thread safety checking within the body of a particular function.
-// This annotation is used to mark functions that are known to be correct, but
-// the locking behavior is more complicated than the analyzer can handle.
-#define PA_NO_THREAD_SAFETY_ANALYSIS \
-  PA_THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
-
-//------------------------------------------------------------------------------
-// Tool-Supplied Annotations
-//------------------------------------------------------------------------------
-
-// PA_TS_UNCHECKED should be placed around lock expressions that are not valid
-// C++ syntax, but which are present for documentation purposes.  These
-// annotations will be ignored by the analysis.
-#define PA_TS_UNCHECKED(x) ""
-
-// PA_TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
-// It is used by automated tools to mark and disable invalid expressions.
-// The annotation should either be fixed, or changed to PA_TS_UNCHECKED.
-#define PA_TS_FIXME(x) ""
-
-// Like PA_NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of
-// a particular function.  However, this attribute is used to mark functions
-// that are incorrect and need to be fixed.  It is used by automated tools to
-// avoid breaking the build when the analysis is updated.
-// Code owners are expected to eventually fix the routine.
-#define PA_NO_THREAD_SAFETY_ANALYSIS_FIXME PA_NO_THREAD_SAFETY_ANALYSIS
-
-// Similar to PA_NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a
-// PA_GUARDED_BY annotation that needs to be fixed, because it is producing
-// thread safety warning.  It disables the PA_GUARDED_BY.
-#define PA_GUARDED_BY_FIXME(x)
-
-// Disables warnings for a single read operation.  This can be used to avoid
-// warnings when it is known that the read is not actually involved in a race,
-// but the compiler cannot confirm that.
-#define PA_TS_UNCHECKED_READ(x) \
-  partition_alloc::internal::thread_safety_analysis::ts_unchecked_read(x)
-
-namespace partition_alloc::internal::thread_safety_analysis {
-
-// Takes a reference to a guarded data member, and returns an unguarded
-// reference.
-template <typename T>
-inline const T& ts_unchecked_read(const T& v) PA_NO_THREAD_SAFETY_ANALYSIS {
-  return v;
-}
-
-template <typename T>
-inline T& ts_unchecked_read(T& v) PA_NO_THREAD_SAFETY_ANALYSIS {
-  return v;
-}
-
-}  // namespace partition_alloc::internal::thread_safety_analysis
-
-// The above is imported as-is from abseil-cpp. The following Chromium-specific
-// synonyms are added for Chromium concepts (SequenceChecker/ThreadChecker).
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-
-// Equivalent to PA_GUARDED_BY for SequenceChecker/ThreadChecker. Currently,
-#define PA_GUARDED_BY_CONTEXT(name) PA_GUARDED_BY(name)
-
-// Equivalent to PA_EXCLUSIVE_LOCKS_REQUIRED for SequenceChecker/ThreadChecker.
-#define PA_VALID_CONTEXT_REQUIRED(name) PA_EXCLUSIVE_LOCKS_REQUIRED(name)
-
-#else  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-#define PA_GUARDED_BY_CONTEXT(name)
-#define PA_VALID_CONTEXT_REQUIRED(name)
-
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREAD_ANNOTATIONS_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/thread_annotations_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/thread_annotations_pa_unittest.cc
deleted file mode 100644
index 2f0ae82..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/thread_annotations_pa_unittest.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-class PA_LOCKABLE Lock {
- public:
-  void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION() {}
-  void Release() PA_UNLOCK_FUNCTION() {}
-};
-
-class PA_SCOPED_LOCKABLE AutoLock {
- public:
-  AutoLock(Lock& lock) PA_EXCLUSIVE_LOCK_FUNCTION(lock) : lock_(lock) {
-    lock.Acquire();
-  }
-  ~AutoLock() PA_UNLOCK_FUNCTION() { lock_.Release(); }
-
- private:
-  Lock& lock_;
-};
-
-class ThreadSafe {
- public:
-  void ExplicitIncrement();
-  void ImplicitIncrement();
-
- private:
-  Lock lock_;
-  int counter_ PA_GUARDED_BY(lock_);
-};
-
-void ThreadSafe::ExplicitIncrement() {
-  lock_.Acquire();
-  ++counter_;
-  lock_.Release();
-}
-
-void ThreadSafe::ImplicitIncrement() {
-  AutoLock auto_lock(lock_);
-  counter_++;
-}
-
-TEST(PartitionAllocThreadAnnotationsTest, ExplicitIncrement) {
-  ThreadSafe thread_safe;
-  thread_safe.ExplicitIncrement();
-}
-TEST(PartitionAllocThreadAnnotationsTest, ImplicitIncrement) {
-  ThreadSafe thread_safe;
-  thread_safe.ImplicitIncrement();
-}
-
-}  // anonymous namespace
diff --git a/base/allocator/partition_allocator/partition_alloc_base/thread_annotations_pa_unittest.nc b/base/allocator/partition_allocator/partition_alloc_base/thread_annotations_pa_unittest.nc
deleted file mode 100644
index cbf4fbb..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/thread_annotations_pa_unittest.nc
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a "No Compile Test" suite.
-// https://dev.chromium.org/developers/testing/no-compile-tests
-
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-
-namespace {
-
-class PA_LOCKABLE Lock {
- public:
-  void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION() {}
-  void Release() PA_UNLOCK_FUNCTION() {}
-};
-
-class PA_SCOPED_LOCKABLE AutoLock {
- public:
-  AutoLock(Lock& lock) PA_EXCLUSIVE_LOCK_FUNCTION(lock) : lock_(lock) {
-    lock.Acquire();
-  }
-  ~AutoLock() PA_UNLOCK_FUNCTION() { lock_.Release(); }
-
- private:
-  Lock& lock_;
-};
-class ThreadSafe {
- public:
-  void BuggyIncrement();
- private:
-  Lock lock_;
-  int counter_ PA_GUARDED_BY(lock_);
-};
-
-#if defined(NCTEST_LOCK_WITHOUT_UNLOCK)  // [r"fatal error: mutex 'lock_' is still held at the end of function"]
-
-void ThreadSafe::BuggyIncrement() {
-  lock_.Acquire();
-  ++counter_;
-  // Forgot to release the lock.
-}
-
-#elif defined(NCTEST_ACCESS_WITHOUT_LOCK)  // [r"fatal error: writing variable 'counter_' requires holding mutex 'lock_' exclusively"]
-
-void ThreadSafe::BuggyIncrement() {
-  // Member access without holding the lock guarding it.
-  ++counter_;
-}
-
-#elif defined(NCTEST_ACCESS_WITHOUT_SCOPED_LOCK)  // [r"fatal error: writing variable 'counter_' requires holding mutex 'lock_' exclusively"]
-
-void ThreadSafe::BuggyIncrement() {
-  {
-    AutoLock auto_lock(lock_);
-    // The AutoLock will go out of scope before the guarded member access.
-  }
-  ++counter_;
-}
-
-#elif defined(NCTEST_GUARDED_BY_WRONG_TYPE)  // [r"fatal error: 'guarded_by' attribute requires arguments whose type is annotated"]
-
-int not_lockable;
-int global_counter PA_GUARDED_BY(not_lockable);
-
-// Defined to avoid link error.
-void ThreadSafe::BuggyIncrement() { }
-
-#endif
-
-}  // anonymous namespace
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.cc b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.cc
deleted file mode 100644
index 2aa8c6a..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-
-namespace partition_alloc::internal::base {
-
-namespace {
-
-// SetThreadNameHook is invoked by EnablePCScan(). EnablePCScan() will be
-// invoked soon after running RunBrowser, RunZygote, and RunContentProcess.
-// So g_set_thread_name_proc can be non-atomic.
-SetThreadNameProc g_set_thread_name_proc = nullptr;
-
-}  // namespace
-
-void PlatformThread::SetThreadNameHook(SetThreadNameProc hook) {
-  g_set_thread_name_proc = hook;
-}
-
-// static
-void PlatformThread::SetName(const std::string& name) {
-  if (!g_set_thread_name_proc)
-    return;
-  g_set_thread_name_proc(name);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h
deleted file mode 100644
index a99ed13..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// WARNING: You should *NOT* be using this class directly.  PlatformThread is
-// the low-level platform-specific abstraction to the OS's threading interface.
-// You should instead be using a message-loop driven Thread, see thread.h.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_H_
-
-#include <stddef.h>
-
-#include <iosfwd>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_ref.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_WIN)
-#include "base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h"
-#elif BUILDFLAG(IS_FUCHSIA)
-#include <zircon/types.h>
-#elif BUILDFLAG(IS_APPLE)
-#include <mach/mach_types.h>
-#elif BUILDFLAG(IS_POSIX)
-#include <pthread.h>
-#include <unistd.h>
-#endif
-
-namespace partition_alloc::internal::base {
-
-// Used for logging. Always an integer value.
-#if BUILDFLAG(IS_WIN)
-typedef DWORD PlatformThreadId;
-#elif BUILDFLAG(IS_FUCHSIA)
-typedef zx_handle_t PlatformThreadId;
-#elif BUILDFLAG(IS_APPLE)
-typedef mach_port_t PlatformThreadId;
-#elif BUILDFLAG(IS_POSIX)
-typedef pid_t PlatformThreadId;
-#endif
-
-// Used to operate on threads.
-class PlatformThreadHandle {
- public:
-#if BUILDFLAG(IS_WIN)
-  typedef void* Handle;
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  typedef pthread_t Handle;
-#endif
-
-  constexpr PlatformThreadHandle() : handle_(0) {}
-
-  explicit constexpr PlatformThreadHandle(Handle handle) : handle_(handle) {}
-
-  bool is_equal(const PlatformThreadHandle& other) const {
-    return handle_ == other.handle_;
-  }
-
-  bool is_null() const { return !handle_; }
-
-  Handle platform_handle() const { return handle_; }
-
- private:
-  Handle handle_;
-};
-
-const PlatformThreadId kInvalidThreadId(0);
-
-typedef void (*SetThreadNameProc)(const std::string&);
-
-// A namespace for low-level thread functions.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PlatformThread {
- public:
-  PlatformThread() = delete;
-  PlatformThread(const PlatformThread&) = delete;
-  PlatformThread& operator=(const PlatformThread&) = delete;
-
-  // Gets the current thread id, which may be useful for logging purposes.
-  static PlatformThreadId CurrentId();
-
-  // Gets the current thread reference, which can be used to check if
-  // we're on the right thread quickly.
-  static PlatformThreadRef CurrentRef();
-
-  // Get the handle representing the current thread. On Windows, this is a
-  // pseudo handle constant which will always represent the thread using it and
-  // hence should not be shared with other threads nor be used to differentiate
-  // the current thread from another.
-  static PlatformThreadHandle CurrentHandle();
-
-  // Sleeps for the specified duration (real-time; ignores time overrides).
-  // Note: The sleep duration may be in base::Time or base::TimeTicks, depending
-  // on platform. If you're looking to use this in unit tests testing delayed
-  // tasks, this will be unreliable - instead, use
-  // base::test::TaskEnvironment with MOCK_TIME mode.
-  static void Sleep(TimeDelta duration);
-
-  // Sets the thread name visible to debuggers/tools. This will try to
-  // initialize the context for current thread unless it's a WorkerThread.
-  static void SetName(const std::string& name);
-
-  static void SetThreadNameHook(SetThreadNameProc hook);
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_android_for_testing.cc b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_android_for_testing.cc
deleted file mode 100644
index 8b613c2..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_android_for_testing.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
-
-#include <pthread.h>
-#include <stddef.h>
-
-namespace partition_alloc::internal::base {
-
-void InitThreading() {}
-
-void TerminateOnThread() {
-  // partition alloc tests don't use AttachCurrentThread(), because
-  // the tests don't set / get any thread priority. So no need to do
-  // "base::android::DetachFromVM();" here.
-}
-
-size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
-#if !defined(ADDRESS_SANITIZER)
-  return 0;
-#else
-  // AddressSanitizer bloats the stack approximately 2x. Default stack size of
-  // 1Mb is not enough for some tests (see http://crbug.com/263749 for example).
-  return 2 * (1 << 20);  // 2Mb
-#endif
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h
deleted file mode 100644
index 57b85ea..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// WARNING: You should *NOT* be using this class directly.  PlatformThread is
-// the low-level platform-specific abstraction to the OS's threading interface.
-// You should instead be using a message-loop driven Thread, see thread.h.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_FOR_TESTING_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_FOR_TESTING_H_
-
-#include <stddef.h>
-
-#include <iosfwd>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base {
-
-// A namespace for low-level thread functions.
-class PlatformThreadForTesting : public PlatformThread {
- public:
-  // Implement this interface to run code on a background thread.  Your
-  // ThreadMain method will be called on the newly created thread.
-  class Delegate {
-   public:
-    virtual void ThreadMain() = 0;
-
-   protected:
-    virtual ~Delegate() = default;
-  };
-
-  PlatformThreadForTesting() = delete;
-  PlatformThreadForTesting(const PlatformThreadForTesting&) = delete;
-  PlatformThreadForTesting& operator=(const PlatformThreadForTesting&) = delete;
-
-  // Yield the current thread so another thread can be scheduled.
-  //
-  // Note: this is likely not the right call to make in most situations. If this
-  // is part of a spin loop, consider base::Lock, which likely has better tail
-  // latency. Yielding the thread has different effects depending on the
-  // platform, system load, etc., and can result in yielding the CPU for less
-  // than 1us, or many tens of ms.
-  static void YieldCurrentThread();
-
-  // Creates a new thread.  The `stack_size` parameter can be 0 to indicate
-  // that the default stack size should be used.  Upon success,
-  // `*thread_handle` will be assigned a handle to the newly created thread,
-  // and `delegate`'s ThreadMain method will be executed on the newly created
-  // thread.
-  // NOTE: When you are done with the thread handle, you must call Join to
-  // release system resources associated with the thread.  You must ensure that
-  // the Delegate object outlives the thread.
-  static bool Create(size_t stack_size,
-                     Delegate* delegate,
-                     PlatformThreadHandle* thread_handle);
-
-  // Joins with a thread created via the Create function.  This function blocks
-  // the caller until the designated thread exits.  This will invalidate
-  // `thread_handle`.
-  static void Join(PlatformThreadHandle thread_handle);
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  // Returns the default thread stack size set by chrome. If we do not
-  // explicitly set default size then returns 0.
-  static size_t GetDefaultThreadStackSize();
-#endif
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREADD_FOR_TESTING_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc
deleted file mode 100644
index 7c1c401..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
-
-#include <pthread.h>
-#include <stddef.h>
-
-namespace partition_alloc::internal::base {
-
-void InitThreading() {}
-
-void TerminateOnThread() {}
-
-size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
-  return 0;
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_internal_posix.h b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_internal_posix.h
deleted file mode 100644
index e7d2909..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_internal_posix.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base::internal {
-
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-// Current thread id is cached in thread local storage for performance reasons.
-// In some rare cases it's important to invalidate that cache explicitly (e.g.
-// after going through clone() syscall which does not call pthread_atfork()
-// handlers).
-// This can only be called when the process is single-threaded.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void InvalidateTidCache();
-#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-
-}  // namespace partition_alloc::internal::base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_linux_for_testing.cc b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_linux_for_testing.cc
deleted file mode 100644
index 6218bad..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_linux_for_testing.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
-
-#include <pthread.h>
-#include <stddef.h>
-
-namespace partition_alloc::internal::base {
-
-void InitThreading() {}
-
-void TerminateOnThread() {}
-
-size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
-#if !defined(THREAD_SANITIZER)
-  return 0;
-#else   // defined(THREAD_SANITIZER)
-  // ThreadSanitizer bloats the stack heavily. Evidence has been that the
-  // default stack size isn't enough for some browser tests.
-  return 2 * (1 << 23);  // 2 times 8192K (the default stack size on Linux).
-#endif  // defined(THREAD_SANITIZER)
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_mac_for_testing.mm b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_mac_for_testing.mm
deleted file mode 100644
index 4fc20a5..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_mac_for_testing.mm
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
-
-#import <Foundation/Foundation.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#include <mach/thread_policy.h>
-#include <mach/thread_switch.h>
-#include <stddef.h>
-#include <sys/resource.h>
-
-#include <algorithm>
-#include <atomic>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base {
-
-// If Cocoa is to be used on more than one thread, it must know that the
-// application is multithreaded.  Since it's possible to enter Cocoa code
-// from threads created by pthread_thread_create, Cocoa won't necessarily
-// be aware that the application is multithreaded.  Spawning an NSThread is
-// enough to get Cocoa to set up for multithreaded operation, so this is done
-// if necessary before pthread_thread_create spawns any threads.
-//
-// http://developer.apple.com/documentation/Cocoa/Conceptual/Multithreading/CreatingThreads/chapter_4_section_4.html
-void InitThreading() {
-  static BOOL multithreaded = [NSThread isMultiThreaded];
-  if (!multithreaded) {
-    // +[NSObject class] is idempotent.
-    [NSThread detachNewThreadSelector:@selector(class)
-                             toTarget:[NSObject class]
-                           withObject:nil];
-    multithreaded = YES;
-
-    PA_BASE_DCHECK([NSThread isMultiThreaded]);
-  }
-}
-
-// static
-void PlatformThreadForTesting::YieldCurrentThread() {
-  // Don't use sched_yield(), as it can lead to 10ms delays.
-  //
-  // This only depresses the thread priority for 1ms, which is more in line
-  // with what calling code likely wants. See this bug in webkit for context:
-  // https://bugs.webkit.org/show_bug.cgi?id=204871
-  mach_msg_timeout_t timeout_ms = 1;
-  thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DEPRESS, timeout_ms);
-}
-
-size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
-#if BUILDFLAG(IS_IOS)
-  return 0;
-#else
-  // The Mac OS X default for a pthread stack size is 512kB.
-  // Libc-594.1.4/pthreads/pthread.c's pthread_attr_init uses
-  // DEFAULT_STACK_SIZE for this purpose.
-  //
-  // 512kB isn't quite generous enough for some deeply recursive threads that
-  // otherwise request the default stack size by specifying 0. Here, adopt
-  // glibc's behavior as on Linux, which is to use the current stack size
-  // limit (ulimit -s) as the default stack size. See
-  // glibc-2.11.1/nptl/nptl-init.c's __pthread_initialize_minimal_internal. To
-  // avoid setting the limit below the Mac OS X default or the minimum usable
-  // stack size, these values are also considered. If any of these values
-  // can't be determined, or if stack size is unlimited (ulimit -s unlimited),
-  // stack_size is left at 0 to get the system default.
-  //
-  // Mac OS X normally only applies ulimit -s to the main thread stack. On
-  // contemporary OS X and Linux systems alike, this value is generally 8MB
-  // or in that neighborhood.
-  size_t default_stack_size = 0;
-  struct rlimit stack_rlimit;
-  if (pthread_attr_getstacksize(&attributes, &default_stack_size) == 0 &&
-      getrlimit(RLIMIT_STACK, &stack_rlimit) == 0 &&
-      stack_rlimit.rlim_cur != RLIM_INFINITY) {
-    default_stack_size = std::max(
-        std::max(default_stack_size, static_cast<size_t>(PTHREAD_STACK_MIN)),
-        static_cast<size_t>(stack_rlimit.rlim_cur));
-  }
-  return default_stack_size;
-#endif
-}
-
-void TerminateOnThread() {}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_posix.cc b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_posix.cc
deleted file mode 100644
index 46cbada..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_posix.cc
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-
-#include <errno.h>
-#include <pthread.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_internal_posix.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-#include <sys/syscall.h>
-#include <atomic>
-#endif
-
-#if BUILDFLAG(IS_FUCHSIA)
-#include <zircon/process.h>
-#endif
-
-namespace partition_alloc::internal::base {
-
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-
-namespace {
-
-// Store the thread ids in local storage since calling the SWI can be
-// expensive and PlatformThread::CurrentId is used liberally.
-thread_local pid_t g_thread_id = -1;
-
-// A boolean value that indicates that the value stored in |g_thread_id| on the
-// main thread is invalid, because it hasn't been updated since the process
-// forked.
-//
-// This used to work by setting |g_thread_id| to -1 in a pthread_atfork handler.
-// However, when a multithreaded process forks, it is only allowed to call
-// async-signal-safe functions until it calls an exec() syscall. However,
-// accessing TLS may allocate (see crbug.com/1275748), which is not
-// async-signal-safe and therefore causes deadlocks, corruption, and crashes.
-//
-// It's Atomic to placate TSAN.
-std::atomic<bool> g_main_thread_tid_cache_valid = false;
-
-// Tracks whether the current thread is the main thread, and therefore whether
-// |g_main_thread_tid_cache_valid| is relevant for the current thread. This is
-// also updated by PlatformThread::CurrentId().
-thread_local bool g_is_main_thread = true;
-
-class InitAtFork {
- public:
-  InitAtFork() {
-    pthread_atfork(nullptr, nullptr, internal::InvalidateTidCache);
-  }
-};
-
-}  // namespace
-
-namespace internal {
-
-void InvalidateTidCache() {
-  g_main_thread_tid_cache_valid.store(false, std::memory_order_relaxed);
-}
-
-}  // namespace internal
-
-#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-
-// static
-PlatformThreadId PlatformThread::CurrentId() {
-  // Pthreads doesn't have the concept of a thread ID, so we have to reach down
-  // into the kernel.
-#if BUILDFLAG(IS_APPLE)
-  return pthread_mach_thread_np(pthread_self());
-#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-  static InitAtFork init_at_fork;
-  if (g_thread_id == -1 ||
-      (g_is_main_thread &&
-       !g_main_thread_tid_cache_valid.load(std::memory_order_relaxed))) {
-    // Update the cached tid.
-    g_thread_id = syscall(__NR_gettid);
-    // If this is the main thread, we can mark the tid_cache as valid.
-    // Otherwise, stop the current thread from always entering this slow path.
-    if (g_thread_id == getpid()) {
-      g_main_thread_tid_cache_valid.store(true, std::memory_order_relaxed);
-    } else {
-      g_is_main_thread = false;
-    }
-  } else {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    if (g_thread_id != syscall(__NR_gettid)) {
-      PA_RAW_LOG(
-          FATAL,
-          "Thread id stored in TLS is different from thread id returned by "
-          "the system. It is likely that the process was forked without going "
-          "through fork().");
-    }
-#endif
-  }
-  return g_thread_id;
-#elif BUILDFLAG(IS_ANDROID)
-  // Note: do not cache the return value inside a thread_local variable on
-  // Android (as above). The reasons are:
-  // - thread_local is slow on Android (goes through emutls)
-  // - gettid() is fast, since its return value is cached in pthread (in the
-  //   thread control block of pthread). See gettid.c in bionic.
-  return gettid();
-#elif BUILDFLAG(IS_FUCHSIA)
-  return zx_thread_self();
-#elif BUILDFLAG(IS_SOLARIS) || BUILDFLAG(IS_QNX)
-  return pthread_self();
-#elif BUILDFLAG(IS_POSIX) && BUILDFLAG(IS_AIX)
-  return pthread_self();
-#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_AIX)
-  return reinterpret_cast<int64_t>(pthread_self());
-#endif
-}
-
-// static
-PlatformThreadRef PlatformThread::CurrentRef() {
-  return PlatformThreadRef(pthread_self());
-}
-
-// static
-void PlatformThread::Sleep(TimeDelta duration) {
-  struct timespec sleep_time, remaining;
-
-  // Break the duration into seconds and nanoseconds.
-  // NOTE: TimeDelta's microseconds are int64s while timespec's
-  // nanoseconds are longs, so this unpacking must prevent overflow.
-  sleep_time.tv_sec = duration.InSeconds();
-  duration -= Seconds(sleep_time.tv_sec);
-  sleep_time.tv_nsec = duration.InMicroseconds() * 1000;  // nanoseconds
-
-  while (nanosleep(&sleep_time, &remaining) == -1 && errno == EINTR)
-    sleep_time = remaining;
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_posix_for_testing.cc b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_posix_for_testing.cc
deleted file mode 100644
index 138b1d6..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_posix_for_testing.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
-
-#include <errno.h>
-#include <pthread.h>
-#include <sched.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <memory>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_internal_posix.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_FUCHSIA)
-#include <zircon/process.h>
-#else
-#include <sys/resource.h>
-#endif
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#include "base/allocator/partition_allocator/starscan/stack/stack.h"
-#endif
-
-namespace partition_alloc::internal::base {
-
-void InitThreading();
-void TerminateOnThread();
-size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes);
-
-namespace {
-
-struct ThreadParams {
-  PlatformThreadForTesting::Delegate* delegate = nullptr;
-};
-
-void* ThreadFunc(void* params) {
-  PlatformThreadForTesting::Delegate* delegate = nullptr;
-
-  {
-    std::unique_ptr<ThreadParams> thread_params(
-        static_cast<ThreadParams*>(params));
-
-    delegate = thread_params->delegate;
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
-    PCScan::NotifyThreadCreated(GetStackPointer());
-#endif
-  }
-
-  delegate->ThreadMain();
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
-  PCScan::NotifyThreadDestroyed();
-#endif
-
-  TerminateOnThread();
-  return nullptr;
-}
-
-bool CreateThread(size_t stack_size,
-                  PlatformThreadForTesting::Delegate* delegate,
-                  PlatformThreadHandle* thread_handle) {
-  PA_BASE_DCHECK(thread_handle);
-  base::InitThreading();
-
-  pthread_attr_t attributes;
-  pthread_attr_init(&attributes);
-
-  // Get a better default if available.
-  if (stack_size == 0) {
-    stack_size = base::GetDefaultThreadStackSize(attributes);
-  }
-
-  if (stack_size > 0) {
-    pthread_attr_setstacksize(&attributes, stack_size);
-  }
-
-  std::unique_ptr<ThreadParams> params(new ThreadParams);
-  params->delegate = delegate;
-
-  pthread_t handle;
-  int err = pthread_create(&handle, &attributes, ThreadFunc, params.get());
-  bool success = !err;
-  if (success) {
-    // ThreadParams should be deleted on the created thread after used.
-    std::ignore = params.release();
-  } else {
-    // Value of |handle| is undefined if pthread_create fails.
-    handle = 0;
-    errno = err;
-    PA_PLOG(ERROR) << "pthread_create";
-  }
-  *thread_handle = PlatformThreadHandle(handle);
-
-  pthread_attr_destroy(&attributes);
-
-  return success;
-}
-
-}  // namespace
-
-#if !BUILDFLAG(IS_APPLE)
-// static
-void PlatformThreadForTesting::YieldCurrentThread() {
-  sched_yield();
-}
-#endif  // !BUILDFLAG(IS_APPLE)
-
-// static
-bool PlatformThreadForTesting::Create(size_t stack_size,
-                                      Delegate* delegate,
-                                      PlatformThreadHandle* thread_handle) {
-  return CreateThread(stack_size, delegate, thread_handle);
-}
-
-// static
-void PlatformThreadForTesting::Join(PlatformThreadHandle thread_handle) {
-  // Joining another thread may block the current thread for a long time, since
-  // the thread referred to by |thread_handle| may still be running long-lived /
-  // blocking tasks.
-
-  // Remove ScopedBlockingCallWithBaseSyncPrimitives, because only partition
-  // alloc tests use PlatformThread::Join. So there is no special requirement
-  // to monitor blocking calls
-  // (by using ThreadGroupImpl::WorkerThreadDelegateImpl).
-  //
-  // base::internal::ScopedBlockingCallWithBaseSyncPrimitives
-  //   scoped_blocking_call(base::BlockingType::MAY_BLOCK);
-  PA_BASE_CHECK(0 == pthread_join(thread_handle.platform_handle(), nullptr));
-}
-
-// static
-size_t PlatformThreadForTesting::GetDefaultThreadStackSize() {
-  pthread_attr_t attributes;
-  pthread_attr_init(&attributes);
-  return base::GetDefaultThreadStackSize(attributes);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_ref.h b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_ref.h
deleted file mode 100644
index f7457a9..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_ref.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// WARNING: *DO NOT* use this class directly. base::PlatformThreadRef is a
-// low-level platform-specific abstraction to the OS's threading interface.
-// Instead, consider using a message-loop driven base::Thread, see
-// base/threading/thread.h.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_REF_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_REF_H_
-
-#include <iosfwd>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_WIN)
-#include "base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h"
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-#include <pthread.h>
-#endif
-
-namespace partition_alloc::internal::base {
-
-// Used for thread checking and debugging.
-// Meant to be as fast as possible.
-// These are produced by PlatformThread::CurrentRef(), and used to later
-// check if we are on the same thread or not by using ==. These are safe
-// to copy between threads, but can't be copied to another process as they
-// have no meaning there. Also, the internal identifier can be re-used
-// after a thread dies, so a PlatformThreadRef cannot be reliably used
-// to distinguish a new thread from an old, dead thread.
-class PlatformThreadRef {
- public:
-#if BUILDFLAG(IS_WIN)
-  using RefType = DWORD;
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  using RefType = pthread_t;
-#endif
-
-  constexpr PlatformThreadRef() = default;
-  explicit constexpr PlatformThreadRef(RefType id) : id_(id) {}
-
-  bool operator==(PlatformThreadRef other) const { return id_ == other.id_; }
-  bool operator!=(PlatformThreadRef other) const { return id_ != other.id_; }
-
-  bool is_null() const { return id_ == 0; }
-
- private:
-  RefType id_ = 0;
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_REF_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_win.cc b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_win.cc
deleted file mode 100644
index c5ff387..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_win.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-
-#include <stddef.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
-
-#include <windows.h>
-
-namespace partition_alloc::internal::base {
-
-// static
-PlatformThreadId PlatformThread::CurrentId() {
-  return ::GetCurrentThreadId();
-}
-
-// static
-PlatformThreadRef PlatformThread::CurrentRef() {
-  return PlatformThreadRef(::GetCurrentThreadId());
-}
-
-// static
-PlatformThreadHandle PlatformThread::CurrentHandle() {
-  return PlatformThreadHandle(::GetCurrentThread());
-}
-
-// static
-void PlatformThread::Sleep(TimeDelta duration) {
-  // When measured with a high resolution clock, Sleep() sometimes returns much
-  // too early. We may need to call it repeatedly to get the desired duration.
-  // PlatformThread::Sleep doesn't support mock-time, so this always uses
-  // real-time.
-  const TimeTicks end = subtle::TimeTicksNowIgnoringOverride() + duration;
-  for (TimeTicks now = subtle::TimeTicksNowIgnoringOverride(); now < end;
-       now = subtle::TimeTicksNowIgnoringOverride()) {
-    ::Sleep(static_cast<DWORD>((end - now).InMillisecondsRoundedUp()));
-  }
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_win_for_testing.cc b/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_win_for_testing.cc
deleted file mode 100644
index 66ca33d..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_win_for_testing.cc
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
-
-#include <stddef.h>
-
-#include "base/allocator/partition_allocator/oom.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "build/build_config.h"
-
-#include <windows.h>
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#include "base/allocator/partition_allocator/starscan/stack/stack.h"
-#endif
-
-namespace partition_alloc::internal::base {
-
-namespace {
-
-// base/win/scoped_handle.h looks too much to just run partition_alloc
-// tests.
-class ScopedHandle {
- public:
-  ScopedHandle() : handle_(INVALID_HANDLE_VALUE) {}
-
-  ~ScopedHandle() {
-    if (handle_ != INVALID_HANDLE_VALUE)
-      CloseHandle(handle_);
-    handle_ = INVALID_HANDLE_VALUE;
-  }
-
-  void Set(HANDLE handle) {
-    if (handle != handle_) {
-      if (handle != INVALID_HANDLE_VALUE)
-        CloseHandle(handle_);
-      handle_ = handle;
-    }
-  }
-
- private:
-  HANDLE handle_;
-};
-
-struct ThreadParams {
-  PlatformThreadForTesting::Delegate* delegate = nullptr;
-};
-
-DWORD __stdcall ThreadFunc(void* params) {
-  ThreadParams* thread_params = static_cast<ThreadParams*>(params);
-  PlatformThreadForTesting::Delegate* delegate = thread_params->delegate;
-
-  // Retrieve a copy of the thread handle to use as the key in the
-  // thread name mapping.
-  PlatformThreadHandle::Handle platform_handle;
-  BOOL did_dup = DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
-                                 GetCurrentProcess(), &platform_handle, 0,
-                                 FALSE, DUPLICATE_SAME_ACCESS);
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
-  PCScan::NotifyThreadCreated(GetStackPointer());
-#endif
-
-  ScopedHandle scoped_platform_handle;
-  if (did_dup) {
-    scoped_platform_handle.Set(platform_handle);
-  }
-
-  delete thread_params;
-  delegate->ThreadMain();
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
-  PCScan::NotifyThreadDestroyed();
-#endif
-  return 0;
-}
-
-// CreateThreadInternal() matches PlatformThread::CreateWithPriority(), except
-// that |out_thread_handle| may be nullptr, in which case a non-joinable thread
-// is created.
-bool CreateThreadInternal(size_t stack_size,
-                          PlatformThreadForTesting::Delegate* delegate,
-                          PlatformThreadHandle* out_thread_handle) {
-  unsigned int flags = 0;
-  if (stack_size > 0) {
-    flags = STACK_SIZE_PARAM_IS_A_RESERVATION;
-#if defined(ARCH_CPU_32_BITS)
-  } else {
-    // The process stack size is increased to give spaces to |RendererMain| in
-    // |chrome/BUILD.gn|, but keep the default stack size of other threads to
-    // 1MB for the address space pressure.
-    flags = STACK_SIZE_PARAM_IS_A_RESERVATION;
-    static BOOL is_wow64 = -1;
-    if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
-      is_wow64 = FALSE;
-    // When is_wow64 is set that means we are running on 64-bit Windows and we
-    // get 4 GiB of address space. In that situation we can afford to use 1 MiB
-    // of address space for stacks. When running on 32-bit Windows we only get
-    // 2 GiB of address space so we need to conserve. Typically stack usage on
-    // these threads is only about 100 KiB.
-    if (is_wow64)
-      stack_size = 1024 * 1024;
-    else
-      stack_size = 512 * 1024;
-#endif
-  }
-
-  ThreadParams* params = new ThreadParams;
-  params->delegate = delegate;
-
-  // Using CreateThread here vs _beginthreadex makes thread creation a bit
-  // faster and doesn't require the loader lock to be available.  Our code will
-  // have to work running on CreateThread() threads anyway, since we run code on
-  // the Windows thread pool, etc.  For some background on the difference:
-  // http://www.microsoft.com/msj/1099/win32/win321099.aspx
-  void* thread_handle =
-      ::CreateThread(nullptr, stack_size, ThreadFunc, params, flags, nullptr);
-
-  if (!thread_handle) {
-    DWORD last_error = ::GetLastError();
-
-    switch (last_error) {
-      case ERROR_NOT_ENOUGH_MEMORY:
-      case ERROR_OUTOFMEMORY:
-      case ERROR_COMMITMENT_LIMIT:
-        TerminateBecauseOutOfMemory(stack_size);
-        break;
-
-      default:
-        break;
-    }
-
-    delete params;
-    return false;
-  }
-
-  if (out_thread_handle)
-    *out_thread_handle = PlatformThreadHandle(thread_handle);
-  else
-    CloseHandle(thread_handle);
-  return true;
-}
-
-}  // namespace
-
-// static
-void PlatformThreadForTesting::YieldCurrentThread() {
-  ::Sleep(0);
-}
-
-// static
-void PlatformThreadForTesting::Join(PlatformThreadHandle thread_handle) {
-  PA_BASE_DCHECK(thread_handle.platform_handle());
-
-  DWORD thread_id = 0;
-  thread_id = ::GetThreadId(thread_handle.platform_handle());
-  DWORD last_error = 0;
-  if (!thread_id)
-    last_error = ::GetLastError();
-
-  // Record information about the exiting thread in case joining hangs.
-  base::debug::Alias(&thread_id);
-  base::debug::Alias(&last_error);
-
-  // Remove ScopedBlockingCallWithBaseSyncPrimitives, because only partition
-  // alloc tests use PlatformThread::Join. So there is no special requirement
-  // to monitor blocking calls
-  // (by using ThreadGroupImpl::WorkerThreadDelegateImpl).
-  //
-  // base::internal::ScopedBlockingCallWithBaseSyncPrimitives
-  //   scoped_blocking_call(base::BlockingType::MAY_BLOCK);
-
-  // Wait for the thread to exit.  It should already have terminated but make
-  // sure this assumption is valid.
-  PA_BASE_CHECK(WAIT_OBJECT_0 ==
-                WaitForSingleObject(thread_handle.platform_handle(), INFINITE));
-  CloseHandle(thread_handle.platform_handle());
-}
-
-// static
-bool PlatformThreadForTesting::Create(size_t stack_size,
-                                      Delegate* delegate,
-                                      PlatformThreadHandle* thread_handle) {
-  PA_BASE_DCHECK(thread_handle);
-  return CreateThreadInternal(stack_size, delegate, thread_handle);
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/time/time.cc b/base/allocator/partition_allocator/partition_alloc_base/time/time.cc
deleted file mode 100644
index 1683ab3..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/time/time.cc
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-
-#include <atomic>
-#include <cmath>
-#include <limits>
-#include <ostream>
-#include <tuple>
-#include <utility>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
-
-namespace partition_alloc::internal::base {
-
-namespace internal {
-
-std::atomic<TimeNowFunction> g_time_now_function{
-    &subtle::TimeNowIgnoringOverride};
-
-std::atomic<TimeNowFunction> g_time_now_from_system_time_function{
-    &subtle::TimeNowFromSystemTimeIgnoringOverride};
-
-std::atomic<TimeTicksNowFunction> g_time_ticks_now_function{
-    &subtle::TimeTicksNowIgnoringOverride};
-
-std::atomic<ThreadTicksNowFunction> g_thread_ticks_now_function{
-    &subtle::ThreadTicksNowIgnoringOverride};
-
-}  // namespace internal
-
-// TimeDelta ------------------------------------------------------------------
-
-int TimeDelta::InDays() const {
-  if (!is_inf())
-    return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
-  return (delta_ < 0) ? std::numeric_limits<int>::min()
-                      : std::numeric_limits<int>::max();
-}
-
-int TimeDelta::InDaysFloored() const {
-  if (!is_inf()) {
-    const int result = delta_ / Time::kMicrosecondsPerDay;
-    // Convert |result| from truncating to flooring.
-    return (result * Time::kMicrosecondsPerDay > delta_) ? (result - 1)
-                                                         : result;
-  }
-  return (delta_ < 0) ? std::numeric_limits<int>::min()
-                      : std::numeric_limits<int>::max();
-}
-
-double TimeDelta::InMillisecondsF() const {
-  if (!is_inf())
-    return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
-  return (delta_ < 0) ? -std::numeric_limits<double>::infinity()
-                      : std::numeric_limits<double>::infinity();
-}
-
-int64_t TimeDelta::InMilliseconds() const {
-  if (!is_inf())
-    return delta_ / Time::kMicrosecondsPerMillisecond;
-  return (delta_ < 0) ? std::numeric_limits<int64_t>::min()
-                      : std::numeric_limits<int64_t>::max();
-}
-
-int64_t TimeDelta::InMillisecondsRoundedUp() const {
-  if (!is_inf()) {
-    const int64_t result = delta_ / Time::kMicrosecondsPerMillisecond;
-    // Convert |result| from truncating to ceiling.
-    return (delta_ > result * Time::kMicrosecondsPerMillisecond) ? (result + 1)
-                                                                 : result;
-  }
-  return delta_;
-}
-
-double TimeDelta::InMicrosecondsF() const {
-  if (!is_inf())
-    return static_cast<double>(delta_);
-  return (delta_ < 0) ? -std::numeric_limits<double>::infinity()
-                      : std::numeric_limits<double>::infinity();
-}
-
-TimeDelta TimeDelta::CeilToMultiple(TimeDelta interval) const {
-  if (is_inf() || interval.is_zero())
-    return *this;
-  const TimeDelta remainder = *this % interval;
-  if (delta_ < 0)
-    return *this - remainder;
-  return remainder.is_zero() ? *this
-                             : (*this - remainder + interval.magnitude());
-}
-
-TimeDelta TimeDelta::FloorToMultiple(TimeDelta interval) const {
-  if (is_inf() || interval.is_zero())
-    return *this;
-  const TimeDelta remainder = *this % interval;
-  if (delta_ < 0) {
-    return remainder.is_zero() ? *this
-                               : (*this - remainder - interval.magnitude());
-  }
-  return *this - remainder;
-}
-
-TimeDelta TimeDelta::RoundToMultiple(TimeDelta interval) const {
-  if (is_inf() || interval.is_zero())
-    return *this;
-  if (interval.is_inf())
-    return TimeDelta();
-  const TimeDelta half = interval.magnitude() / 2;
-  return (delta_ < 0) ? (*this - half).CeilToMultiple(interval)
-                      : (*this + half).FloorToMultiple(interval);
-}
-
-// Time -----------------------------------------------------------------------
-
-// static
-Time Time::Now() {
-  return internal::g_time_now_function.load(std::memory_order_relaxed)();
-}
-
-// static
-Time Time::NowFromSystemTime() {
-  // Just use g_time_now_function because it returns the system time.
-  return internal::g_time_now_from_system_time_function.load(
-      std::memory_order_relaxed)();
-}
-
-time_t Time::ToTimeT() const {
-  if (is_null())
-    return 0;  // Preserve 0 so we can tell it doesn't exist.
-  if (!is_inf() && ((std::numeric_limits<int64_t>::max() -
-                     kTimeTToMicrosecondsOffset) > us_))
-    return (*this - UnixEpoch()).InSeconds();
-  return (us_ < 0) ? std::numeric_limits<time_t>::min()
-                   : std::numeric_limits<time_t>::max();
-}
-
-// static
-Time Time::FromDoubleT(double dt) {
-  // Preserve 0 so we can tell it doesn't exist.
-  return (dt == 0 || std::isnan(dt)) ? Time() : (UnixEpoch() + Seconds(dt));
-}
-
-double Time::ToDoubleT() const {
-  if (is_null())
-    return 0;  // Preserve 0 so we can tell it doesn't exist.
-  if (!is_inf())
-    return (*this - UnixEpoch()).InSecondsF();
-  return (us_ < 0) ? -std::numeric_limits<double>::infinity()
-                   : std::numeric_limits<double>::infinity();
-}
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-// static
-Time Time::FromTimeSpec(const timespec& ts) {
-  return FromDoubleT(ts.tv_sec +
-                     static_cast<double>(ts.tv_nsec) / kNanosecondsPerSecond);
-}
-#endif
-
-// static
-Time Time::FromJsTime(double ms_since_epoch) {
-  // The epoch is a valid time, so this constructor doesn't interpret 0 as the
-  // null time.
-  return UnixEpoch() + Milliseconds(ms_since_epoch);
-}
-
-double Time::ToJsTime() const {
-  // Preserve 0 so the invalid result doesn't depend on the platform.
-  return is_null() ? 0 : ToJsTimeIgnoringNull();
-}
-
-double Time::ToJsTimeIgnoringNull() const {
-  // Preserve max and min without offset to prevent over/underflow.
-  if (!is_inf())
-    return (*this - UnixEpoch()).InMillisecondsF();
-  return (us_ < 0) ? -std::numeric_limits<double>::infinity()
-                   : std::numeric_limits<double>::infinity();
-}
-
-Time Time::FromJavaTime(int64_t ms_since_epoch) {
-  return UnixEpoch() + Milliseconds(ms_since_epoch);
-}
-
-int64_t Time::ToJavaTime() const {
-  // Preserve 0 so the invalid result doesn't depend on the platform.
-  if (is_null())
-    return 0;
-  if (!is_inf())
-    return (*this - UnixEpoch()).InMilliseconds();
-  return (us_ < 0) ? std::numeric_limits<int64_t>::min()
-                   : std::numeric_limits<int64_t>::max();
-}
-
-// static
-bool Time::FromMillisecondsSinceUnixEpoch(int64_t unix_milliseconds,
-                                          Time* time) {
-  // Adjust the provided time from milliseconds since the Unix epoch (1970) to
-  // microseconds since the Windows epoch (1601), avoiding overflows.
-  CheckedNumeric<int64_t> checked_microseconds_win_epoch = unix_milliseconds;
-  checked_microseconds_win_epoch *= kMicrosecondsPerMillisecond;
-  checked_microseconds_win_epoch += kTimeTToMicrosecondsOffset;
-  *time = Time(checked_microseconds_win_epoch.ValueOrDefault(0));
-  return checked_microseconds_win_epoch.IsValid();
-}
-
-int64_t Time::ToRoundedDownMillisecondsSinceUnixEpoch() const {
-  constexpr int64_t kEpochOffsetMillis =
-      kTimeTToMicrosecondsOffset / kMicrosecondsPerMillisecond;
-  static_assert(kTimeTToMicrosecondsOffset % kMicrosecondsPerMillisecond == 0,
-                "assumption: no epoch offset sub-milliseconds");
-
-  // Compute the milliseconds since UNIX epoch without the possibility of
-  // under/overflow. Round the result towards -infinity.
-  //
-  // If |us_| is negative and includes fractions of a millisecond, subtract one
-  // more to effect the round towards -infinity. C-style integer truncation
-  // takes care of all other cases.
-  const int64_t millis = us_ / kMicrosecondsPerMillisecond;
-  const int64_t submillis = us_ % kMicrosecondsPerMillisecond;
-  return millis - kEpochOffsetMillis - (submillis < 0);
-}
-
-// TimeTicks ------------------------------------------------------------------
-
-// static
-TimeTicks TimeTicks::Now() {
-  return internal::g_time_ticks_now_function.load(std::memory_order_relaxed)();
-}
-
-// static
-TimeTicks TimeTicks::UnixEpoch() {
-  static const TimeTicks epoch([]() {
-    return subtle::TimeTicksNowIgnoringOverride() -
-           (subtle::TimeNowIgnoringOverride() - Time::UnixEpoch());
-  }());
-  return epoch;
-}
-
-TimeTicks TimeTicks::SnappedToNextTick(TimeTicks tick_phase,
-                                       TimeDelta tick_interval) const {
-  // |interval_offset| is the offset from |this| to the next multiple of
-  // |tick_interval| after |tick_phase|, possibly negative if in the past.
-  TimeDelta interval_offset = (tick_phase - *this) % tick_interval;
-  // If |this| is exactly on the interval (i.e. offset==0), don't adjust.
-  // Otherwise, if |tick_phase| was in the past, adjust forward to the next
-  // tick after |this|.
-  if (!interval_offset.is_zero() && tick_phase < *this)
-    interval_offset += tick_interval;
-  return *this + interval_offset;
-}
-
-// ThreadTicks ----------------------------------------------------------------
-
-// static
-ThreadTicks ThreadTicks::Now() {
-  return internal::g_thread_ticks_now_function.load(
-      std::memory_order_relaxed)();
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/time/time.h b/base/allocator/partition_allocator/partition_alloc_base/time/time.h
deleted file mode 100644
index 2f4b799..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/time/time.h
+++ /dev/null
@@ -1,1016 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// `Time` represents an absolute point in coordinated universal time (UTC),
-// internally represented as microseconds (s/1,000,000) since the Windows epoch
-// (1601-01-01 00:00:00 UTC). System-dependent clock interface routines are
-// defined in time_PLATFORM.cc. Note that values for `Time` may skew and jump
-// around as the operating system makes adjustments to synchronize (e.g., with
-// NTP servers). Thus, client code that uses the `Time` class must account for
-// this.
-//
-// `TimeDelta` represents a duration of time, internally represented in
-// microseconds.
-//
-// `TimeTicks` and `ThreadTicks` represent an abstract time that is most of the
-// time incrementing, for use in measuring time durations. Internally, they are
-// represented in microseconds. They cannot be converted to a human-readable
-// time, but are guaranteed not to decrease (unlike the `Time` class). Note
-// that `TimeTicks` may "stand still" (e.g., if the computer is suspended), and
-// `ThreadTicks` will "stand still" whenever the thread has been de-scheduled
-// by the operating system.
-//
-// All time classes are copyable, assignable, and occupy 64 bits per instance.
-// Prefer to pass them by value, e.g.:
-//
-//   void MyFunction(TimeDelta arg);
-//
-// All time classes support `operator<<` with logging streams, e.g. `LOG(INFO)`.
-// For human-readable formatting, use //base/i18n/time_formatting.h.
-//
-// Example use cases for different time classes:
-//
-//   Time:        Interpreting the wall-clock time provided by a remote system.
-//                Detecting whether cached resources have expired. Providing the
-//                user with a display of the current date and time. Determining
-//                the amount of time between events across re-boots of the
-//                machine.
-//
-//   TimeTicks:   Tracking the amount of time a task runs. Executing delayed
-//                tasks at the right time. Computing presentation timestamps.
-//                Synchronizing audio and video using TimeTicks as a common
-//                reference clock (lip-sync). Measuring network round-trip
-//                latency.
-//
-//   ThreadTicks: Benchmarking how long the current thread has been doing actual
-//                work.
-//
-// Serialization:
-//
-// Use the helpers in //base/json/values_util.h when serializing `Time`
-// or `TimeDelta` to/from `base::Value`.
-//
-// Otherwise:
-//
-// - Time: use `FromDeltaSinceWindowsEpoch()`/`ToDeltaSinceWindowsEpoch()`.
-// - TimeDelta: use `base::Microseconds()`/`InMicroseconds()`.
-//
-// `TimeTicks` and `ThreadTicks` do not have a stable origin; serialization for
-// the purpose of persistence is not supported.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TIME_TIME_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TIME_TIME_H_
-
-#include <stdint.h>
-#include <time.h>
-
-#include <iosfwd>
-#include <limits>
-
-#include "base/allocator/partition_allocator/chromeos_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_APPLE)
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#endif  // BUILDFLAG(IS_APPLE)
-
-#if BUILDFLAG(IS_FUCHSIA)
-#include <zircon/types.h>
-#endif
-
-#if BUILDFLAG(IS_APPLE)
-#include <CoreFoundation/CoreFoundation.h>
-#include <mach/mach_time.h>
-// Avoid Mac system header macro leak.
-#undef TYPE_BOOL
-#endif
-
-#if BUILDFLAG(IS_ANDROID)
-#include <jni.h>
-#endif
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#if BUILDFLAG(IS_WIN)
-#include "base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h"
-
-namespace ABI {
-namespace Windows {
-namespace Foundation {
-struct DateTime;
-}  // namespace Foundation
-}  // namespace Windows
-}  // namespace ABI
-#endif
-
-namespace partition_alloc::internal::base {
-
-class TimeDelta;
-
-template <typename T>
-constexpr TimeDelta Microseconds(T n);
-
-#if BUILDFLAG(IS_WIN)
-class PlatformThreadHandle;
-#endif
-
-// TimeDelta ------------------------------------------------------------------
-
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) TimeDelta {
- public:
-  constexpr TimeDelta() = default;
-
-#if BUILDFLAG(IS_WIN)
-  static TimeDelta FromQPCValue(LONGLONG qpc_value);
-  // TODO(crbug.com/989694): Avoid base::TimeDelta factory functions
-  // based on absolute time
-  static TimeDelta FromFileTime(FILETIME ft);
-  static TimeDelta FromWinrtDateTime(ABI::Windows::Foundation::DateTime dt);
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  static TimeDelta FromTimeSpec(const timespec& ts);
-#endif
-#if BUILDFLAG(IS_FUCHSIA)
-  static TimeDelta FromZxDuration(zx_duration_t nanos);
-#endif
-#if BUILDFLAG(IS_APPLE)
-#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-  static TimeDelta FromMachTime(uint64_t mach_time);
-#endif  // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-#endif  // BUILDFLAG(IS_APPLE)
-
-  // Converts an integer value representing TimeDelta to a class. This is used
-  // when deserializing a |TimeDelta| structure, using a value known to be
-  // compatible. It is not provided as a constructor because the integer type
-  // may be unclear from the perspective of a caller.
-  //
-  // DEPRECATED - Do not use in new code. http://crbug.com/634507
-  static constexpr TimeDelta FromInternalValue(int64_t delta) {
-    return TimeDelta(delta);
-  }
-
-  // Returns the maximum time delta, which should be greater than any reasonable
-  // time delta we might compare it to. If converted to double with ToDouble()
-  // it becomes an IEEE double infinity. Use FiniteMax() if you want a very
-  // large number that doesn't do this. TimeDelta math saturates at the end
-  // points so adding to TimeDelta::Max() leaves the value unchanged.
-  // Subtracting should leave the value unchanged but currently changes it
-  // TODO(https://crbug.com/869387).
-  static constexpr TimeDelta Max();
-
-  // Returns the minimum time delta, which should be less than than any
-  // reasonable time delta we might compare it to. For more details see the
-  // comments for Max().
-  static constexpr TimeDelta Min();
-
-  // Returns the maximum time delta which is not equivalent to infinity. Only
-  // subtracting a finite time delta from this time delta has a defined result.
-  static constexpr TimeDelta FiniteMax();
-
-  // Returns the minimum time delta which is not equivalent to -infinity. Only
-  // adding a finite time delta to this time delta has a defined result.
-  static constexpr TimeDelta FiniteMin();
-
-  // Returns the internal numeric value of the TimeDelta object. Please don't
-  // use this and do arithmetic on it, as it is more error prone than using the
-  // provided operators.
-  // For serializing, use FromInternalValue to reconstitute.
-  //
-  // DEPRECATED - Do not use in new code. http://crbug.com/634507
-  constexpr int64_t ToInternalValue() const { return delta_; }
-
-  // Returns the magnitude (absolute value) of this TimeDelta.
-  constexpr TimeDelta magnitude() const { return TimeDelta(delta_.Abs()); }
-
-  // Returns true if the time delta is a zero, positive or negative time delta.
-  constexpr bool is_zero() const { return delta_ == 0; }
-  constexpr bool is_positive() const { return delta_ > 0; }
-  constexpr bool is_negative() const { return delta_ < 0; }
-
-  // Returns true if the time delta is the maximum/minimum time delta.
-  constexpr bool is_max() const { return *this == Max(); }
-  constexpr bool is_min() const { return *this == Min(); }
-  constexpr bool is_inf() const { return is_min() || is_max(); }
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  struct timespec ToTimeSpec() const;
-#endif
-#if BUILDFLAG(IS_FUCHSIA)
-  zx_duration_t ToZxDuration() const;
-#endif
-#if BUILDFLAG(IS_WIN)
-  ABI::Windows::Foundation::DateTime ToWinrtDateTime() const;
-#endif
-
-  // Returns the frequency in Hertz (cycles per second) that has a period of
-  // *this.
-  constexpr double ToHz() const;
-
-  // Returns the time delta in some unit. Minimum argument values return as
-  // -inf for doubles and min type values otherwise. Maximum ones are treated as
-  // +inf for doubles and max type values otherwise. Their results will produce
-  // an is_min() or is_max() TimeDelta. The InXYZF versions return a floating
-  // point value. The InXYZ versions return a truncated value (aka rounded
-  // towards zero, std::trunc() behavior). The InXYZFloored() versions round to
-  // lesser integers (std::floor() behavior). The XYZRoundedUp() versions round
-  // up to greater integers (std::ceil() behavior). WARNING: Floating point
-  // arithmetic is such that XXX(t.InXXXF()) may not precisely equal |t|.
-  // Hence, floating point values should not be used for storage.
-  int InDays() const;
-  int InDaysFloored() const;
-  constexpr int InHours() const;
-  constexpr int InMinutes() const;
-  constexpr double InSecondsF() const;
-  constexpr int64_t InSeconds() const;
-  double InMillisecondsF() const;
-  int64_t InMilliseconds() const;
-  int64_t InMillisecondsRoundedUp() const;
-  constexpr int64_t InMicroseconds() const { return delta_; }
-  double InMicrosecondsF() const;
-  constexpr int64_t InNanoseconds() const;
-
-  // Computations with other deltas.
-  constexpr TimeDelta operator+(TimeDelta other) const;
-  constexpr TimeDelta operator-(TimeDelta other) const;
-
-  constexpr TimeDelta& operator+=(TimeDelta other) {
-    return *this = (*this + other);
-  }
-  constexpr TimeDelta& operator-=(TimeDelta other) {
-    return *this = (*this - other);
-  }
-  constexpr TimeDelta operator-() const {
-    if (!is_inf())
-      return TimeDelta(-delta_);
-    return (delta_ < 0) ? Max() : Min();
-  }
-
-  // Computations with numeric types.
-  template <typename T>
-  constexpr TimeDelta operator*(T a) const {
-    return TimeDelta(int64_t{delta_ * a});
-  }
-  template <typename T>
-  constexpr TimeDelta operator/(T a) const {
-    return TimeDelta(int64_t{delta_ / a});
-  }
-  template <typename T>
-  constexpr TimeDelta& operator*=(T a) {
-    return *this = (*this * a);
-  }
-  template <typename T>
-  constexpr TimeDelta& operator/=(T a) {
-    return *this = (*this / a);
-  }
-
-  // This does floating-point division. For an integer result, either call
-  // IntDiv(), or (possibly clearer) use this operator with
-  // base::Clamp{Ceil,Floor,Round}() or base::saturated_cast() (for truncation).
-  // Note that converting to double here drops precision to 53 bits.
-  constexpr double operator/(TimeDelta a) const {
-    // 0/0 and inf/inf (any combination of positive and negative) are invalid
-    // (they are almost certainly not intentional, and result in NaN, which
-    // turns into 0 if clamped to an integer; this makes introducing subtle bugs
-    // too easy).
-    PA_BASE_CHECK(!is_zero() || !a.is_zero());
-    PA_BASE_CHECK(!is_inf() || !a.is_inf());
-
-    return ToDouble() / a.ToDouble();
-  }
-  constexpr int64_t IntDiv(TimeDelta a) const {
-    if (!is_inf() && !a.is_zero())
-      return int64_t{delta_ / a.delta_};
-
-    // For consistency, use the same edge case CHECKs and behavior as the code
-    // above.
-    PA_BASE_CHECK(!is_zero() || !a.is_zero());
-    PA_BASE_CHECK(!is_inf() || !a.is_inf());
-    return ((delta_ < 0) == (a.delta_ < 0))
-               ? std::numeric_limits<int64_t>::max()
-               : std::numeric_limits<int64_t>::min();
-  }
-
-  constexpr TimeDelta operator%(TimeDelta a) const {
-    return TimeDelta(
-        (is_inf() || a.is_zero() || a.is_inf()) ? delta_ : (delta_ % a.delta_));
-  }
-  constexpr TimeDelta& operator%=(TimeDelta other) {
-    return *this = (*this % other);
-  }
-
-  // Comparison operators.
-  constexpr bool operator==(TimeDelta other) const {
-    return delta_ == other.delta_;
-  }
-  constexpr bool operator!=(TimeDelta other) const {
-    return delta_ != other.delta_;
-  }
-  constexpr bool operator<(TimeDelta other) const {
-    return delta_ < other.delta_;
-  }
-  constexpr bool operator<=(TimeDelta other) const {
-    return delta_ <= other.delta_;
-  }
-  constexpr bool operator>(TimeDelta other) const {
-    return delta_ > other.delta_;
-  }
-  constexpr bool operator>=(TimeDelta other) const {
-    return delta_ >= other.delta_;
-  }
-
-  // Returns this delta, ceiled/floored/rounded-away-from-zero to the nearest
-  // multiple of |interval|.
-  TimeDelta CeilToMultiple(TimeDelta interval) const;
-  TimeDelta FloorToMultiple(TimeDelta interval) const;
-  TimeDelta RoundToMultiple(TimeDelta interval) const;
-
- private:
-  // Constructs a delta given the duration in microseconds. This is private
-  // to avoid confusion by callers with an integer constructor. Use
-  // base::Seconds, base::Milliseconds, etc. instead.
-  constexpr explicit TimeDelta(int64_t delta_us) : delta_(delta_us) {}
-  constexpr explicit TimeDelta(ClampedNumeric<int64_t> delta_us)
-      : delta_(delta_us) {}
-
-  // Returns a double representation of this TimeDelta's tick count.  In
-  // particular, Max()/Min() are converted to +/-infinity.
-  constexpr double ToDouble() const {
-    if (!is_inf())
-      return static_cast<double>(delta_);
-    return (delta_ < 0) ? -std::numeric_limits<double>::infinity()
-                        : std::numeric_limits<double>::infinity();
-  }
-
-  // Delta in microseconds.
-  ClampedNumeric<int64_t> delta_ = 0;
-};
-
-constexpr TimeDelta TimeDelta::operator+(TimeDelta other) const {
-  if (!other.is_inf())
-    return TimeDelta(delta_ + other.delta_);
-
-  // Additions involving two infinities are only valid if signs match.
-  PA_BASE_CHECK(!is_inf() || (delta_ == other.delta_));
-  return other;
-}
-
-constexpr TimeDelta TimeDelta::operator-(TimeDelta other) const {
-  if (!other.is_inf())
-    return TimeDelta(delta_ - other.delta_);
-
-  // Subtractions involving two infinities are only valid if signs differ.
-  PA_BASE_CHECK(int64_t{delta_} != int64_t{other.delta_});
-  return (other.delta_ < 0) ? Max() : Min();
-}
-
-template <typename T>
-constexpr TimeDelta operator*(T a, TimeDelta td) {
-  return td * a;
-}
-
-// TimeBase--------------------------------------------------------------------
-
-// Do not reference the time_internal::TimeBase template class directly.  Please
-// use one of the time subclasses instead, and only reference the public
-// TimeBase members via those classes.
-namespace time_internal {
-
-// Provides value storage and comparison/math operations common to all time
-// classes. Each subclass provides for strong type-checking to ensure
-// semantically meaningful comparison/math of time values from the same clock
-// source or timeline.
-template <class TimeClass>
-class TimeBase {
- public:
-  static constexpr int64_t kHoursPerDay = 24;
-  static constexpr int64_t kSecondsPerMinute = 60;
-  static constexpr int64_t kMinutesPerHour = 60;
-  static constexpr int64_t kSecondsPerHour =
-      kSecondsPerMinute * kMinutesPerHour;
-  static constexpr int64_t kMillisecondsPerSecond = 1000;
-  static constexpr int64_t kMillisecondsPerDay =
-      kMillisecondsPerSecond * kSecondsPerHour * kHoursPerDay;
-  static constexpr int64_t kMicrosecondsPerMillisecond = 1000;
-  static constexpr int64_t kMicrosecondsPerSecond =
-      kMicrosecondsPerMillisecond * kMillisecondsPerSecond;
-  static constexpr int64_t kMicrosecondsPerMinute =
-      kMicrosecondsPerSecond * kSecondsPerMinute;
-  static constexpr int64_t kMicrosecondsPerHour =
-      kMicrosecondsPerMinute * kMinutesPerHour;
-  static constexpr int64_t kMicrosecondsPerDay =
-      kMicrosecondsPerHour * kHoursPerDay;
-  static constexpr int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
-  static constexpr int64_t kNanosecondsPerMicrosecond = 1000;
-  static constexpr int64_t kNanosecondsPerSecond =
-      kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
-
-  // Returns true if this object has not been initialized.
-  //
-  // Warning: Be careful when writing code that performs math on time values,
-  // since it's possible to produce a valid "zero" result that should not be
-  // interpreted as a "null" value.
-  constexpr bool is_null() const { return us_ == 0; }
-
-  // Returns true if this object represents the maximum/minimum time.
-  constexpr bool is_max() const { return *this == Max(); }
-  constexpr bool is_min() const { return *this == Min(); }
-  constexpr bool is_inf() const { return is_min() || is_max(); }
-
-  // Returns the maximum/minimum times, which should be greater/less than than
-  // any reasonable time with which we might compare it.
-  static constexpr TimeClass Max() {
-    return TimeClass(std::numeric_limits<int64_t>::max());
-  }
-
-  static constexpr TimeClass Min() {
-    return TimeClass(std::numeric_limits<int64_t>::min());
-  }
-
-  // For legacy serialization only. When serializing to `base::Value`, prefer
-  // the helpers from //base/json/values_util.h instead. Otherwise, use
-  // `Time::ToDeltaSinceWindowsEpoch()` for `Time` and
-  // `TimeDelta::InMiseconds()` for `TimeDelta`. See http://crbug.com/634507.
-  constexpr int64_t ToInternalValue() const { return us_; }
-
-  // The amount of time since the origin (or "zero") point. This is a syntactic
-  // convenience to aid in code readability, mainly for debugging/testing use
-  // cases.
-  //
-  // Warning: While the Time subclass has a fixed origin point, the origin for
-  // the other subclasses can vary each time the application is restarted.
-  constexpr TimeDelta since_origin() const;
-
-  // Compute the difference between two times.
-  constexpr TimeDelta operator-(const TimeBase<TimeClass>& other) const;
-
-  // Return a new time modified by some delta.
-  constexpr TimeClass operator+(TimeDelta delta) const;
-  constexpr TimeClass operator-(TimeDelta delta) const;
-
-  // Modify by some time delta.
-  constexpr TimeClass& operator+=(TimeDelta delta) {
-    return static_cast<TimeClass&>(*this = (*this + delta));
-  }
-  constexpr TimeClass& operator-=(TimeDelta delta) {
-    return static_cast<TimeClass&>(*this = (*this - delta));
-  }
-
-  // Comparison operators
-  constexpr bool operator==(const TimeBase<TimeClass>& other) const {
-    return us_ == other.us_;
-  }
-  constexpr bool operator!=(const TimeBase<TimeClass>& other) const {
-    return us_ != other.us_;
-  }
-  constexpr bool operator<(const TimeBase<TimeClass>& other) const {
-    return us_ < other.us_;
-  }
-  constexpr bool operator<=(const TimeBase<TimeClass>& other) const {
-    return us_ <= other.us_;
-  }
-  constexpr bool operator>(const TimeBase<TimeClass>& other) const {
-    return us_ > other.us_;
-  }
-  constexpr bool operator>=(const TimeBase<TimeClass>& other) const {
-    return us_ >= other.us_;
-  }
-
- protected:
-  constexpr explicit TimeBase(int64_t us) : us_(us) {}
-
-  // Time value in a microsecond timebase.
-  int64_t us_;
-};
-
-#if BUILDFLAG(IS_WIN)
-#if defined(ARCH_CPU_ARM64)
-// TSCTicksPerSecond is not supported on Windows on Arm systems because the
-// cycle-counting methods use the actual CPU cycle count, and not a consistent
-// incrementing counter.
-#else
-// Returns true if the CPU support constant rate TSC.
-[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool HasConstantRateTSC();
-
-// Returns the frequency of the TSC in ticks per second, or 0 if it hasn't
-// been measured yet. Needs to be guarded with a call to HasConstantRateTSC().
-[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) double TSCTicksPerSecond();
-#endif
-#endif  // BUILDFLAG(IS_WIN)
-
-}  // namespace time_internal
-
-template <class TimeClass>
-inline constexpr TimeClass operator+(TimeDelta delta, TimeClass t) {
-  return t + delta;
-}
-
-// Time -----------------------------------------------------------------------
-
-// Represents a wall clock time in UTC. Values are not guaranteed to be
-// monotonically non-decreasing and are subject to large amounts of skew.
-// Time is stored internally as microseconds since the Windows epoch (1601).
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) Time
-    : public time_internal::TimeBase<Time> {
- public:
-  // Offset of UNIX epoch (1970-01-01 00:00:00 UTC) from Windows FILETIME epoch
-  // (1601-01-01 00:00:00 UTC), in microseconds. This value is derived from the
-  // following: ((1970-1601)*365+89)*24*60*60*1000*1000, where 89 is the number
-  // of leap year days between 1601 and 1970: (1970-1601)/4 excluding 1700,
-  // 1800, and 1900.
-  static constexpr int64_t kTimeTToMicrosecondsOffset =
-      INT64_C(11644473600000000);
-
-#if BUILDFLAG(IS_WIN)
-  // To avoid overflow in QPC to Microseconds calculations, since we multiply
-  // by kMicrosecondsPerSecond, then the QPC value should not exceed
-  // (2^63 - 1) / 1E6. If it exceeds that threshold, we divide then multiply.
-  static constexpr int64_t kQPCOverflowThreshold = INT64_C(0x8637BD05AF7);
-#endif
-
-  // Contains the NULL time. Use Time::Now() to get the current time.
-  constexpr Time() : TimeBase(0) {}
-
-  // Returns the time for epoch in Unix-like system (Jan 1, 1970).
-  static constexpr Time UnixEpoch() { return Time(kTimeTToMicrosecondsOffset); }
-
-  // Returns the current time. Watch out, the system might adjust its clock
-  // in which case time will actually go backwards. We don't guarantee that
-  // times are increasing, or that two calls to Now() won't be the same.
-  static Time Now();
-
-  // Returns the current time. Same as Now() except that this function always
-  // uses system time so that there are no discrepancies between the returned
-  // time and system time even on virtual environments including our test bot.
-  // For timing sensitive unittests, this function should be used.
-  static Time NowFromSystemTime();
-
-  // Converts to/from TimeDeltas relative to the Windows epoch (1601-01-01
-  // 00:00:00 UTC).
-  //
-  // For serialization, when handling `base::Value`, prefer the helpers in
-  // //base/json/values_util.h instead. Otherwise, use these methods for
-  // opaque serialization and deserialization, e.g.
-  //
-  //   // Serialization:
-  //   base::Time last_updated = ...;
-  //   SaveToDatabase(last_updated.ToDeltaSinceWindowsEpoch().InMicroseconds());
-  //
-  //   // Deserialization:
-  //   base::Time last_updated = base::Time::FromDeltaSinceWindowsEpoch(
-  //       base::Microseconds(LoadFromDatabase()));
-  //
-  // Do not use `FromInternalValue()` or `ToInternalValue()` for this purpose.
-  static constexpr Time FromDeltaSinceWindowsEpoch(TimeDelta delta) {
-    return Time(delta.InMicroseconds());
-  }
-
-  constexpr TimeDelta ToDeltaSinceWindowsEpoch() const {
-    return Microseconds(us_);
-  }
-
-  // Converts to/from time_t in UTC and a Time class.
-  static constexpr Time FromTimeT(time_t tt);
-  time_t ToTimeT() const;
-
-  // Converts time to/from a double which is the number of seconds since epoch
-  // (Jan 1, 1970).  Webkit uses this format to represent time.
-  // Because WebKit initializes double time value to 0 to indicate "not
-  // initialized", we map it to empty Time object that also means "not
-  // initialized".
-  static Time FromDoubleT(double dt);
-  double ToDoubleT() const;
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  // Converts the timespec structure to time. MacOS X 10.8.3 (and tentatively,
-  // earlier versions) will have the |ts|'s tv_nsec component zeroed out,
-  // having a 1 second resolution, which agrees with
-  // https://developer.apple.com/legacy/library/#technotes/tn/tn1150.html#HFSPlusDates.
-  static Time FromTimeSpec(const timespec& ts);
-#endif
-
-  // Converts to/from the Javascript convention for times, a number of
-  // milliseconds since the epoch:
-  // https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Date/getTime.
-  //
-  // Don't use ToJsTime() in new code, since it contains a subtle hack (only
-  // exactly 1601-01-01 00:00 UTC is represented as 1970-01-01 00:00 UTC), and
-  // that is not appropriate for general use. Try to use ToJsTimeIgnoringNull()
-  // unless you have a very good reason to use ToJsTime().
-  static Time FromJsTime(double ms_since_epoch);
-  double ToJsTime() const;
-  double ToJsTimeIgnoringNull() const;
-
-  // Converts to/from Java convention for times, a number of milliseconds since
-  // the epoch. Because the Java format has less resolution, converting to Java
-  // time is a lossy operation.
-  static Time FromJavaTime(int64_t ms_since_epoch);
-  int64_t ToJavaTime() const;
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-  static Time FromTimeVal(struct timeval t);
-  struct timeval ToTimeVal() const;
-#endif
-
-#if BUILDFLAG(IS_FUCHSIA)
-  static Time FromZxTime(zx_time_t time);
-  zx_time_t ToZxTime() const;
-#endif
-
-#if BUILDFLAG(IS_APPLE)
-  static Time FromCFAbsoluteTime(CFAbsoluteTime t);
-  CFAbsoluteTime ToCFAbsoluteTime() const;
-#if defined(__OBJC__)
-  static Time FromNSDate(NSDate* date);
-  NSDate* ToNSDate() const;
-#endif
-#endif
-
-#if BUILDFLAG(IS_WIN)
-  static Time FromFileTime(FILETIME ft);
-  FILETIME ToFileTime() const;
-#endif  // BUILDFLAG(IS_WIN)
-
-  // For legacy deserialization only. Converts an integer value representing
-  // Time to a class. This may be used when deserializing a |Time| structure,
-  // using a value known to be compatible. It is not provided as a constructor
-  // because the integer type may be unclear from the perspective of a caller.
-  //
-  // DEPRECATED - Do not use in new code. When deserializing from `base::Value`,
-  // prefer the helpers from //base/json/values_util.h instead.
-  // Otherwise, use `Time::FromDeltaSinceWindowsEpoch()` for `Time` and
-  // `TimeDelta::FromMiseconds()` for `TimeDelta`. http://crbug.com/634507
-  static constexpr Time FromInternalValue(int64_t us) { return Time(us); }
-
- private:
-  friend class time_internal::TimeBase<Time>;
-
-  constexpr explicit Time(int64_t microseconds_since_win_epoch)
-      : TimeBase(microseconds_since_win_epoch) {}
-
-  // Converts the provided time in milliseconds since the Unix epoch (1970) to a
-  // Time object, avoiding overflows.
-  [[nodiscard]] static bool FromMillisecondsSinceUnixEpoch(
-      int64_t unix_milliseconds,
-      Time* time);
-
-  // Returns the milliseconds since the Unix epoch (1970), rounding the
-  // microseconds towards -infinity.
-  int64_t ToRoundedDownMillisecondsSinceUnixEpoch() const;
-};
-
-// Factory methods that return a TimeDelta of the given unit.
-// WARNING: Floating point arithmetic is such that XXX(t.InXXXF()) may not
-// precisely equal |t|. Hence, floating point values should not be used for
-// storage.
-
-template <typename T>
-constexpr TimeDelta Days(T n) {
-  return TimeDelta::FromInternalValue(MakeClampedNum(n) *
-                                      Time::kMicrosecondsPerDay);
-}
-template <typename T>
-constexpr TimeDelta Hours(T n) {
-  return TimeDelta::FromInternalValue(MakeClampedNum(n) *
-                                      Time::kMicrosecondsPerHour);
-}
-template <typename T>
-constexpr TimeDelta Minutes(T n) {
-  return TimeDelta::FromInternalValue(MakeClampedNum(n) *
-                                      Time::kMicrosecondsPerMinute);
-}
-template <typename T>
-constexpr TimeDelta Seconds(T n) {
-  return TimeDelta::FromInternalValue(MakeClampedNum(n) *
-                                      Time::kMicrosecondsPerSecond);
-}
-template <typename T>
-constexpr TimeDelta Milliseconds(T n) {
-  return TimeDelta::FromInternalValue(MakeClampedNum(n) *
-                                      Time::kMicrosecondsPerMillisecond);
-}
-template <typename T>
-constexpr TimeDelta Microseconds(T n) {
-  return TimeDelta::FromInternalValue(MakeClampedNum(n));
-}
-template <typename T>
-constexpr TimeDelta Nanoseconds(T n) {
-  return TimeDelta::FromInternalValue(MakeClampedNum(n) /
-                                      Time::kNanosecondsPerMicrosecond);
-}
-template <typename T>
-constexpr TimeDelta Hertz(T n) {
-  return n ? TimeDelta::FromInternalValue(Time::kMicrosecondsPerSecond /
-                                          MakeClampedNum(n))
-           : TimeDelta::Max();
-}
-
-// TimeDelta functions that must appear below the declarations of Time/TimeDelta
-
-constexpr double TimeDelta::ToHz() const {
-  return Seconds(1) / *this;
-}
-
-constexpr int TimeDelta::InHours() const {
-  // saturated_cast<> is necessary since very large (but still less than
-  // min/max) deltas would result in overflow.
-  return saturated_cast<int>(delta_ / Time::kMicrosecondsPerHour);
-}
-
-constexpr int TimeDelta::InMinutes() const {
-  // saturated_cast<> is necessary since very large (but still less than
-  // min/max) deltas would result in overflow.
-  return saturated_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
-}
-
-constexpr double TimeDelta::InSecondsF() const {
-  if (!is_inf())
-    return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
-  return (delta_ < 0) ? -std::numeric_limits<double>::infinity()
-                      : std::numeric_limits<double>::infinity();
-}
-
-constexpr int64_t TimeDelta::InSeconds() const {
-  return is_inf() ? delta_ : (delta_ / Time::kMicrosecondsPerSecond);
-}
-
-constexpr int64_t TimeDelta::InNanoseconds() const {
-  return base::ClampMul(delta_, Time::kNanosecondsPerMicrosecond);
-}
-
-// static
-constexpr TimeDelta TimeDelta::Max() {
-  return TimeDelta(std::numeric_limits<int64_t>::max());
-}
-
-// static
-constexpr TimeDelta TimeDelta::Min() {
-  return TimeDelta(std::numeric_limits<int64_t>::min());
-}
-
-// static
-constexpr TimeDelta TimeDelta::FiniteMax() {
-  return TimeDelta(std::numeric_limits<int64_t>::max() - 1);
-}
-
-// static
-constexpr TimeDelta TimeDelta::FiniteMin() {
-  return TimeDelta(std::numeric_limits<int64_t>::min() + 1);
-}
-
-// TimeBase functions that must appear below the declarations of Time/TimeDelta
-namespace time_internal {
-
-template <class TimeClass>
-constexpr TimeDelta TimeBase<TimeClass>::since_origin() const {
-  return Microseconds(us_);
-}
-
-template <class TimeClass>
-constexpr TimeDelta TimeBase<TimeClass>::operator-(
-    const TimeBase<TimeClass>& other) const {
-  return Microseconds(us_ - other.us_);
-}
-
-template <class TimeClass>
-constexpr TimeClass TimeBase<TimeClass>::operator+(TimeDelta delta) const {
-  return TimeClass((Microseconds(us_) + delta).InMicroseconds());
-}
-
-template <class TimeClass>
-constexpr TimeClass TimeBase<TimeClass>::operator-(TimeDelta delta) const {
-  return TimeClass((Microseconds(us_) - delta).InMicroseconds());
-}
-
-}  // namespace time_internal
-
-// Time functions that must appear below the declarations of Time/TimeDelta
-
-// static
-constexpr Time Time::FromTimeT(time_t tt) {
-  if (tt == 0)
-    return Time();  // Preserve 0 so we can tell it doesn't exist.
-  return (tt == std::numeric_limits<time_t>::max())
-             ? Max()
-             : (UnixEpoch() + Seconds(tt));
-}
-
-// TimeTicks ------------------------------------------------------------------
-
-// Represents monotonically non-decreasing clock time.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) TimeTicks
-    : public time_internal::TimeBase<TimeTicks> {
- public:
-  // The underlying clock used to generate new TimeTicks.
-  enum class Clock {
-    FUCHSIA_ZX_CLOCK_MONOTONIC,
-    LINUX_CLOCK_MONOTONIC,
-    IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME,
-    MAC_MACH_ABSOLUTE_TIME,
-    WIN_QPC,
-    WIN_ROLLOVER_PROTECTED_TIME_GET_TIME
-  };
-
-  constexpr TimeTicks() : TimeBase(0) {}
-
-  // Platform-dependent tick count representing "right now." When
-  // IsHighResolution() returns false, the resolution of the clock could be
-  // as coarse as ~15.6ms. Otherwise, the resolution should be no worse than one
-  // microsecond.
-  static TimeTicks Now();
-
-  // Returns true if the high resolution clock is working on this system and
-  // Now() will return high resolution values. Note that, on systems where the
-  // high resolution clock works but is deemed inefficient, the low resolution
-  // clock will be used instead.
-  [[nodiscard]] static bool IsHighResolution();
-
-  // Returns true if TimeTicks is consistent across processes, meaning that
-  // timestamps taken on different processes can be safely compared with one
-  // another. (Note that, even on platforms where this returns true, time values
-  // from different threads that are within one tick of each other must be
-  // considered to have an ambiguous ordering.)
-  [[nodiscard]] static bool IsConsistentAcrossProcesses();
-
-#if BUILDFLAG(IS_FUCHSIA)
-  // Converts between TimeTicks and an ZX_CLOCK_MONOTONIC zx_time_t value.
-  static TimeTicks FromZxTime(zx_time_t nanos_since_boot);
-  zx_time_t ToZxTime() const;
-#endif
-
-#if BUILDFLAG(IS_WIN)
-  // Translates an absolute QPC timestamp into a TimeTicks value. The returned
-  // value has the same origin as Now(). Do NOT attempt to use this if
-  // IsHighResolution() returns false.
-  static TimeTicks FromQPCValue(LONGLONG qpc_value);
-#endif
-
-#if BUILDFLAG(IS_APPLE)
-#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-  static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time);
-
-  // Sets the current Mach timebase to `timebase`. Returns the old timebase.
-  static mach_timebase_info_data_t SetMachTimebaseInfoForTesting(
-      mach_timebase_info_data_t timebase);
-
-#endif  // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-#endif  // BUILDFLAG(IS_APPLE)
-
-#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(PA_IS_CHROMEOS_ASH)
-  // Converts to TimeTicks the value obtained from SystemClock.uptimeMillis().
-  // Note: this conversion may be non-monotonic in relation to previously
-  // obtained TimeTicks::Now() values because of the truncation (to
-  // milliseconds) performed by uptimeMillis().
-  static TimeTicks FromUptimeMillis(int64_t uptime_millis_value);
-
-#endif  // BUILDFLAG(IS_ANDROID) || BUILDFLAG(PA_IS_CHROMEOS_ASH)
-
-#if BUILDFLAG(IS_ANDROID)
-  // Converts to TimeTicks the value obtained from System.nanoTime(). This
-  // conversion will be monotonic in relation to previously obtained
-  // TimeTicks::Now() values as the clocks are based on the same posix monotonic
-  // clock, with nanoTime() potentially providing higher resolution.
-  static TimeTicks FromJavaNanoTime(int64_t nano_time_value);
-
-  // Truncates the TimeTicks value to the precision of SystemClock#uptimeMillis.
-  // Note that the clocks already share the same monotonic clock source.
-  jlong ToUptimeMillis() const;
-
-  // Returns the TimeTicks value as microseconds in the timebase of
-  // SystemClock#uptimeMillis.
-  // Note that the clocks already share the same monotonic clock source.
-  //
-  // System.nanoTime() may be used to get sub-millisecond precision in Java code
-  // and may be compared against this value as the two share the same clock
-  // source (though be sure to convert nanos to micros).
-  jlong ToUptimeMicros() const;
-
-#endif  // BUILDFLAG(IS_ANDROID)
-
-  // Get an estimate of the TimeTick value at the time of the UnixEpoch. Because
-  // Time and TimeTicks respond differently to user-set time and NTP
-  // adjustments, this number is only an estimate. Nevertheless, this can be
-  // useful when you need to relate the value of TimeTicks to a real time and
-  // date. Note: Upon first invocation, this function takes a snapshot of the
-  // realtime clock to establish a reference point.  This function will return
-  // the same value for the duration of the application, but will be different
-  // in future application runs.
-  static TimeTicks UnixEpoch();
-
-  // Returns |this| snapped to the next tick, given a |tick_phase| and
-  // repeating |tick_interval| in both directions. |this| may be before,
-  // after, or equal to the |tick_phase|.
-  TimeTicks SnappedToNextTick(TimeTicks tick_phase,
-                              TimeDelta tick_interval) const;
-
-  // Returns an enum indicating the underlying clock being used to generate
-  // TimeTicks timestamps. This function should only be used for debugging and
-  // logging purposes.
-  static Clock GetClock();
-
-  // Converts an integer value representing TimeTicks to a class. This may be
-  // used when deserializing a |TimeTicks| structure, using a value known to be
-  // compatible. It is not provided as a constructor because the integer type
-  // may be unclear from the perspective of a caller.
-  //
-  // DEPRECATED - Do not use in new code. For deserializing TimeTicks values,
-  // prefer TimeTicks + TimeDelta(); however, be aware that the origin is not
-  // fixed and may vary. Serializing for persistence is strongly discouraged.
-  // http://crbug.com/634507
-  static constexpr TimeTicks FromInternalValue(int64_t us) {
-    return TimeTicks(us);
-  }
-
- protected:
-#if BUILDFLAG(IS_WIN)
-  typedef DWORD (*TickFunctionType)(void);
-  static TickFunctionType SetMockTickFunction(TickFunctionType ticker);
-#endif
-
- private:
-  friend class time_internal::TimeBase<TimeTicks>;
-
-  // Please use Now() to create a new object. This is for internal use
-  // and testing.
-  constexpr explicit TimeTicks(int64_t us) : TimeBase(us) {}
-};
-
-// ThreadTicks ----------------------------------------------------------------
-
-// Represents a clock, specific to a particular thread, than runs only while the
-// thread is running.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadTicks
-    : public time_internal::TimeBase<ThreadTicks> {
- public:
-  constexpr ThreadTicks() : TimeBase(0) {}
-
-  // Returns true if ThreadTicks::Now() is supported on this system.
-  [[nodiscard]] static bool IsSupported() {
-#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
-    BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
-    return true;
-#elif BUILDFLAG(IS_WIN)
-    return IsSupportedWin();
-#else
-    return false;
-#endif
-  }
-
-  // Waits until the initialization is completed. Needs to be guarded with a
-  // call to IsSupported().
-  static void WaitUntilInitialized() {
-#if BUILDFLAG(IS_WIN)
-    WaitUntilInitializedWin();
-#endif
-  }
-
-  // Returns thread-specific CPU-time on systems that support this feature.
-  // Needs to be guarded with a call to IsSupported(). Use this timer
-  // to (approximately) measure how much time the calling thread spent doing
-  // actual work vs. being de-scheduled. May return bogus results if the thread
-  // migrates to another CPU between two calls. Returns an empty ThreadTicks
-  // object until the initialization is completed. If a clock reading is
-  // absolutely needed, call WaitUntilInitialized() before this method.
-  static ThreadTicks Now();
-
-#if BUILDFLAG(IS_WIN)
-  // Similar to Now() above except this returns thread-specific CPU time for an
-  // arbitrary thread. All comments for Now() method above apply apply to this
-  // method as well.
-  static ThreadTicks GetForThread(const PlatformThreadHandle& thread_handle);
-#endif
-
-  // Converts an integer value representing ThreadTicks to a class. This may be
-  // used when deserializing a |ThreadTicks| structure, using a value known to
-  // be compatible. It is not provided as a constructor because the integer type
-  // may be unclear from the perspective of a caller.
-  //
-  // DEPRECATED - Do not use in new code. For deserializing ThreadTicks values,
-  // prefer ThreadTicks + TimeDelta(); however, be aware that the origin is not
-  // fixed and may vary. Serializing for persistence is strongly
-  // discouraged. http://crbug.com/634507
-  static constexpr ThreadTicks FromInternalValue(int64_t us) {
-    return ThreadTicks(us);
-  }
-
- private:
-  friend class time_internal::TimeBase<ThreadTicks>;
-
-  // Please use Now() or GetForThread() to create a new object. This is for
-  // internal use and testing.
-  constexpr explicit ThreadTicks(int64_t us) : TimeBase(us) {}
-
-#if BUILDFLAG(IS_WIN)
-  [[nodiscard]] static bool IsSupportedWin();
-  static void WaitUntilInitializedWin();
-#endif
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TIME_TIME_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/time/time_android.cc b/base/allocator/partition_allocator/partition_alloc_base/time/time_android.cc
deleted file mode 100644
index fa814d2..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/time/time_android.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-
-namespace partition_alloc::internal::base {
-
-// static
-TimeTicks TimeTicks::FromUptimeMillis(int64_t uptime_millis_value) {
-  // The implementation of the SystemClock.uptimeMillis() in AOSP uses the same
-  // clock as base::TimeTicks::Now(): clock_gettime(CLOCK_MONOTONIC), see in
-  // platform/system/code:
-  // 1. libutils/SystemClock.cpp
-  // 2. libutils/Timers.cpp
-  //
-  // We are not aware of any motivations for Android OEMs to modify the AOSP
-  // implementation of either uptimeMillis() or clock_gettime(CLOCK_MONOTONIC),
-  // so we assume that there are no such customizations.
-  //
-  // Under these assumptions the conversion is as safe as copying the value of
-  // base::TimeTicks::Now() with a loss of sub-millisecond precision.
-  return TimeTicks(uptime_millis_value * Time::kMicrosecondsPerMillisecond);
-}
-
-// This file is included on chromeos_ash because it needs to interpret
-// UptimeMillis values from the Android container.
-#if BUILDFLAG(IS_ANDROID)
-
-// static
-TimeTicks TimeTicks::FromJavaNanoTime(int64_t nano_time_value) {
-  // The implementation of the System.nanoTime() in AOSP uses the same
-  // clock as UptimeMillis() and base::TimeTicks::Now():
-  // clock_gettime(CLOCK_MONOTONIC), see ojluni/src/main/native/System.c in
-  // AOSP.
-  //
-  // From Android documentation on android.os.SystemClock:
-  //   [uptimeMillis()] is the basis for most interval timing such as
-  //   Thread.sleep(millls), Object.wait(millis), and System.nanoTime().
-  //
-  // We are not aware of any motivations for Android OEMs to modify the AOSP
-  // implementation of either uptimeMillis(), nanoTime, or
-  // clock_gettime(CLOCK_MONOTONIC), so we assume that there are no such
-  // customizations.
-  //
-  // Under these assumptions the conversion is as safe as copying the value of
-  // base::TimeTicks::Now() without the (theoretical) sub-microsecond
-  // resolution.
-  return TimeTicks(nano_time_value / Time::kNanosecondsPerMicrosecond);
-}
-
-jlong TimeTicks::ToUptimeMillis() const {
-  // See FromUptimeMillis. UptimeMillis and TimeTicks use the same clock source,
-  // and only differ in resolution.
-  return us_ / Time::kMicrosecondsPerMillisecond;
-}
-
-jlong TimeTicks::ToUptimeMicros() const {
-  // Same as ToUptimeMillis but maintains sub-millisecond precision.
-  return us_;
-}
-
-#endif  // BUILDFLAG(IS_ANDROID)
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/time/time_conversion_posix.cc b/base/allocator/partition_allocator/partition_alloc_base/time/time_conversion_posix.cc
deleted file mode 100644
index e713eb3..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/time/time_conversion_posix.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-
-#include <stdint.h>
-#include <sys/time.h>
-#include <time.h>
-
-#include <limits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-
-namespace partition_alloc::internal::base {
-
-// static
-TimeDelta TimeDelta::FromTimeSpec(const timespec& ts) {
-  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
-                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
-}
-
-struct timespec TimeDelta::ToTimeSpec() const {
-  int64_t microseconds = InMicroseconds();
-  time_t seconds = 0;
-  if (microseconds >= Time::kMicrosecondsPerSecond) {
-    seconds = InSeconds();
-    microseconds -= seconds * Time::kMicrosecondsPerSecond;
-  }
-  struct timespec result = {
-      seconds,
-      static_cast<long>(microseconds * Time::kNanosecondsPerMicrosecond)};
-  return result;
-}
-
-// static
-Time Time::FromTimeVal(struct timeval t) {
-  PA_BASE_DCHECK(t.tv_usec < static_cast<int>(Time::kMicrosecondsPerSecond));
-  PA_BASE_DCHECK(t.tv_usec >= 0);
-  if (t.tv_usec == 0 && t.tv_sec == 0)
-    return Time();
-  if (t.tv_usec == static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1 &&
-      t.tv_sec == std::numeric_limits<time_t>::max())
-    return Max();
-  return Time((static_cast<int64_t>(t.tv_sec) * Time::kMicrosecondsPerSecond) +
-              t.tv_usec + kTimeTToMicrosecondsOffset);
-}
-
-struct timeval Time::ToTimeVal() const {
-  struct timeval result;
-  if (is_null()) {
-    result.tv_sec = 0;
-    result.tv_usec = 0;
-    return result;
-  }
-  if (is_max()) {
-    result.tv_sec = std::numeric_limits<time_t>::max();
-    result.tv_usec = static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1;
-    return result;
-  }
-  int64_t us = us_ - kTimeTToMicrosecondsOffset;
-  result.tv_sec = us / Time::kMicrosecondsPerSecond;
-  result.tv_usec = us % Time::kMicrosecondsPerSecond;
-  return result;
-}
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/time/time_fuchsia.cc b/base/allocator/partition_allocator/partition_alloc_base/time/time_fuchsia.cc
deleted file mode 100644
index 8b14d13..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/time/time_fuchsia.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-
-#include <threads.h>
-#include <zircon/syscalls.h>
-#include <zircon/threads.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
-
-namespace partition_alloc::internal::base {
-
-// Time -----------------------------------------------------------------------
-
-namespace subtle {
-Time TimeNowIgnoringOverride() {
-  timespec ts;
-  int status = timespec_get(&ts, TIME_UTC);
-  PA_BASE_CHECK(status != 0);
-  return Time::FromTimeSpec(ts);
-}
-
-Time TimeNowFromSystemTimeIgnoringOverride() {
-  // Just use TimeNowIgnoringOverride() because it returns the system time.
-  return TimeNowIgnoringOverride();
-}
-}  // namespace subtle
-
-// TimeTicks ------------------------------------------------------------------
-
-namespace subtle {
-TimeTicks TimeTicksNowIgnoringOverride() {
-  const zx_time_t nanos_since_boot = zx_clock_get_monotonic();
-  PA_BASE_CHECK(0 != nanos_since_boot);
-  return TimeTicks::FromZxTime(nanos_since_boot);
-}
-}  // namespace subtle
-
-// static
-TimeDelta TimeDelta::FromZxDuration(zx_duration_t nanos) {
-  return Nanoseconds(nanos);
-}
-
-zx_duration_t TimeDelta::ToZxDuration() const {
-  return InNanoseconds();
-}
-
-// static
-Time Time::FromZxTime(zx_time_t nanos_since_unix_epoch) {
-  return UnixEpoch() + Nanoseconds(nanos_since_unix_epoch);
-}
-
-zx_time_t Time::ToZxTime() const {
-  return (*this - UnixEpoch()).InNanoseconds();
-}
-
-// static
-TimeTicks::Clock TimeTicks::GetClock() {
-  return Clock::FUCHSIA_ZX_CLOCK_MONOTONIC;
-}
-
-// static
-bool TimeTicks::IsHighResolution() {
-  return true;
-}
-
-// static
-bool TimeTicks::IsConsistentAcrossProcesses() {
-  return true;
-}
-
-// static
-TimeTicks TimeTicks::FromZxTime(zx_time_t nanos_since_boot) {
-  return TimeTicks() + Nanoseconds(nanos_since_boot);
-}
-
-zx_time_t TimeTicks::ToZxTime() const {
-  return (*this - TimeTicks()).InNanoseconds();
-}
-
-// ThreadTicks ----------------------------------------------------------------
-
-namespace subtle {
-ThreadTicks ThreadTicksNowIgnoringOverride() {
-  zx_info_thread_stats_t info;
-  zx_status_t status = zx_object_get_info(thrd_get_zx_handle(thrd_current()),
-                                          ZX_INFO_THREAD_STATS, &info,
-                                          sizeof(info), nullptr, nullptr);
-  PA_BASE_CHECK(status == ZX_OK);
-  return ThreadTicks() + Nanoseconds(info.total_runtime);
-}
-}  // namespace subtle
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/time/time_mac.mm b/base/allocator/partition_allocator/partition_alloc_base/time/time_mac.mm
deleted file mode 100644
index 86aa44e..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/time/time_mac.mm
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-
-#import <Foundation/Foundation.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/sysctl.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <time.h>
-
-#if BUILDFLAG(IS_IOS)
-#include <errno.h>
-#endif
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base {
-
-namespace {
-
-#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-// Returns a pointer to the initialized Mach timebase info struct.
-mach_timebase_info_data_t* MachTimebaseInfo() {
-  static mach_timebase_info_data_t timebase_info = []() {
-    mach_timebase_info_data_t info;
-    kern_return_t kr = mach_timebase_info(&info);
-    PA_BASE_DCHECK(kr == KERN_SUCCESS) << "mach_timebase_info";
-    PA_BASE_DCHECK(info.numer);
-    PA_BASE_DCHECK(info.denom);
-    return info;
-  }();
-  return &timebase_info;
-}
-
-int64_t MachTimeToMicroseconds(uint64_t mach_time) {
-  // timebase_info gives us the conversion factor between absolute time tick
-  // units and nanoseconds.
-  mach_timebase_info_data_t* timebase_info = MachTimebaseInfo();
-
-  // Take the fast path when the conversion is 1:1. The result will for sure fit
-  // into an int_64 because we're going from nanoseconds to microseconds.
-  if (timebase_info->numer == timebase_info->denom) {
-    return static_cast<int64_t>(mach_time / Time::kNanosecondsPerMicrosecond);
-  }
-
-  uint64_t microseconds = 0;
-  const uint64_t divisor =
-      timebase_info->denom * Time::kNanosecondsPerMicrosecond;
-
-  // Microseconds is mach_time * timebase.numer /
-  // (timebase.denom * kNanosecondsPerMicrosecond). Divide first to reduce
-  // the chance of overflow. Also stash the remainder right now, a likely
-  // byproduct of the division.
-  microseconds = mach_time / divisor;
-  const uint64_t mach_time_remainder = mach_time % divisor;
-
-  // Now multiply, keeping an eye out for overflow.
-  PA_BASE_CHECK(!__builtin_umulll_overflow(microseconds, timebase_info->numer,
-                                           &microseconds));
-
-  // By dividing first we lose precision. Regain it by adding back the
-  // microseconds from the remainder, with an eye out for overflow.
-  uint64_t least_significant_microseconds =
-      (mach_time_remainder * timebase_info->numer) / divisor;
-  PA_BASE_CHECK(!__builtin_uaddll_overflow(
-      microseconds, least_significant_microseconds, &microseconds));
-
-  // Don't bother with the rollover handling that the Windows version does.
-  // The returned time in microseconds is enough for 292,277 years (starting
-  // from 2^63 because the returned int64_t is signed,
-  // 9223372036854775807 / (1e6 * 60 * 60 * 24 * 365.2425) = 292,277).
-  return checked_cast<int64_t>(microseconds);
-}
-#endif  // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-
-// Returns monotonically growing number of ticks in microseconds since some
-// unspecified starting point.
-int64_t ComputeCurrentTicks() {
-#if !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-  struct timespec tp;
-  // clock_gettime() returns 0 on success and -1 on failure. Failure can only
-  // happen because of bad arguments (unsupported clock type or timespec
-  // pointer out of accessible address space). Here it is known that neither
-  // can happen since the timespec parameter is stack allocated right above and
-  // `CLOCK_MONOTONIC` is supported on all versions of iOS that Chrome is
-  // supported on.
-  int res = clock_gettime(CLOCK_MONOTONIC, &tp);
-  PA_BASE_DCHECK(0 == res) << "Failed clock_gettime, errno: " << errno;
-
-  return (int64_t)tp.tv_sec * 1000000 + tp.tv_nsec / 1000;
-#else
-  // mach_absolute_time is it when it comes to ticks on the Mac.  Other calls
-  // with less precision (such as TickCount) just call through to
-  // mach_absolute_time.
-  return MachTimeToMicroseconds(mach_absolute_time());
-#endif  // !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-}
-
-int64_t ComputeThreadTicks() {
-  // The pthreads library keeps a cached reference to the thread port, which
-  // does not have to be released like mach_thread_self() does.
-  mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
-  if (thread_port == MACH_PORT_NULL) {
-    PA_DLOG(ERROR) << "Failed to get pthread_mach_thread_np()";
-    return 0;
-  }
-
-  mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
-  thread_basic_info_data_t thread_info_data;
-
-  kern_return_t kr = thread_info(
-      thread_port, THREAD_BASIC_INFO,
-      reinterpret_cast<thread_info_t>(&thread_info_data), &thread_info_count);
-  PA_BASE_DCHECK(kr == KERN_SUCCESS) << "thread_info";
-
-  CheckedNumeric<int64_t> absolute_micros(thread_info_data.user_time.seconds +
-                                          thread_info_data.system_time.seconds);
-  absolute_micros *= Time::kMicrosecondsPerSecond;
-  absolute_micros += (thread_info_data.user_time.microseconds +
-                      thread_info_data.system_time.microseconds);
-  return absolute_micros.ValueOrDie();
-}
-
-}  // namespace
-
-// The Time routines in this file use Mach and CoreFoundation APIs, since the
-// POSIX definition of time_t in Mac OS X wraps around after 2038--and
-// there are already cookie expiration dates, etc., past that time out in
-// the field.  Using CFDate prevents that problem, and using mach_absolute_time
-// for TimeTicks gives us nice high-resolution interval timing.
-
-// Time -----------------------------------------------------------------------
-
-namespace subtle {
-Time TimeNowIgnoringOverride() {
-  return Time::FromCFAbsoluteTime(CFAbsoluteTimeGetCurrent());
-}
-
-Time TimeNowFromSystemTimeIgnoringOverride() {
-  // Just use TimeNowIgnoringOverride() because it returns the system time.
-  return TimeNowIgnoringOverride();
-}
-}  // namespace subtle
-
-// static
-Time Time::FromCFAbsoluteTime(CFAbsoluteTime t) {
-  static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
-                "CFAbsoluteTime must have an infinity value");
-  if (t == 0)
-    return Time();  // Consider 0 as a null Time.
-  return (t == std::numeric_limits<CFAbsoluteTime>::infinity())
-             ? Max()
-             : (UnixEpoch() +
-                Seconds(double{t + kCFAbsoluteTimeIntervalSince1970}));
-}
-
-CFAbsoluteTime Time::ToCFAbsoluteTime() const {
-  static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
-                "CFAbsoluteTime must have an infinity value");
-  if (is_null())
-    return 0;  // Consider 0 as a null Time.
-  return is_max() ? std::numeric_limits<CFAbsoluteTime>::infinity()
-                  : (CFAbsoluteTime{(*this - UnixEpoch()).InSecondsF()} -
-                     kCFAbsoluteTimeIntervalSince1970);
-}
-
-// static
-Time Time::FromNSDate(NSDate* date) {
-  PA_BASE_DCHECK(date);
-  return FromCFAbsoluteTime(date.timeIntervalSinceReferenceDate);
-}
-
-NSDate* Time::ToNSDate() const {
-  return [NSDate dateWithTimeIntervalSinceReferenceDate:ToCFAbsoluteTime()];
-}
-
-// TimeDelta ------------------------------------------------------------------
-
-#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-// static
-TimeDelta TimeDelta::FromMachTime(uint64_t mach_time) {
-  return Microseconds(MachTimeToMicroseconds(mach_time));
-}
-#endif  // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-
-// TimeTicks ------------------------------------------------------------------
-
-namespace subtle {
-TimeTicks TimeTicksNowIgnoringOverride() {
-  return TimeTicks() + Microseconds(ComputeCurrentTicks());
-}
-}  // namespace subtle
-
-// static
-bool TimeTicks::IsHighResolution() {
-  return true;
-}
-
-// static
-bool TimeTicks::IsConsistentAcrossProcesses() {
-  return true;
-}
-
-#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-// static
-TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) {
-  return TimeTicks(MachTimeToMicroseconds(mach_absolute_time));
-}
-
-// static
-mach_timebase_info_data_t TimeTicks::SetMachTimebaseInfoForTesting(
-    mach_timebase_info_data_t timebase) {
-  mach_timebase_info_data_t orig_timebase = *MachTimebaseInfo();
-
-  *MachTimebaseInfo() = timebase;
-
-  return orig_timebase;
-}
-
-#endif  // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-
-// static
-TimeTicks::Clock TimeTicks::GetClock() {
-#if !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-  return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
-#else
-  return Clock::MAC_MACH_ABSOLUTE_TIME;
-#endif  // !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-}
-
-// ThreadTicks ----------------------------------------------------------------
-
-namespace subtle {
-ThreadTicks ThreadTicksNowIgnoringOverride() {
-  return ThreadTicks() + Microseconds(ComputeThreadTicks());
-}
-}  // namespace subtle
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/time/time_now_posix.cc b/base/allocator/partition_allocator/partition_alloc_base/time/time_now_posix.cc
deleted file mode 100644
index 12cd6bf..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/time/time_now_posix.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-
-#include <stdint.h>
-#include <sys/time.h>
-#include <time.h>
-
-#include "build/build_config.h"
-#if BUILDFLAG(IS_ANDROID) && !defined(__LP64__)
-#include <time64.h>
-#endif
-#include <unistd.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/notreached.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
-
-// Ensure the Fuchsia and Mac builds do not include this module. Instead,
-// non-POSIX implementation is used for sampling the system clocks.
-#if BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_APPLE)
-#error "This implementation is for POSIX platforms other than Fuchsia or Mac."
-#endif
-
-namespace partition_alloc::internal::base {
-
-namespace {
-
-int64_t ConvertTimespecToMicros(const struct timespec& ts) {
-  // On 32-bit systems, the calculation cannot overflow int64_t.
-  // 2**32 * 1000000 + 2**64 / 1000 < 2**63
-  if (sizeof(ts.tv_sec) <= 4 && sizeof(ts.tv_nsec) <= 8) {
-    int64_t result = ts.tv_sec;
-    result *= Time::kMicrosecondsPerSecond;
-    result += (ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
-    return result;
-  }
-  CheckedNumeric<int64_t> result(ts.tv_sec);
-  result *= Time::kMicrosecondsPerSecond;
-  result += (ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
-  return result.ValueOrDie();
-}
-
-// Helper function to get results from clock_gettime() and convert to a
-// microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
-// on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
-// _POSIX_MONOTONIC_CLOCK to -1.
-#if (BUILDFLAG(IS_POSIX) && defined(_POSIX_MONOTONIC_CLOCK) && \
-     _POSIX_MONOTONIC_CLOCK >= 0) ||                           \
-    BUILDFLAG(IS_BSD) || BUILDFLAG(IS_ANDROID)
-int64_t ClockNow(clockid_t clk_id) {
-  struct timespec ts;
-  PA_BASE_CHECK(clock_gettime(clk_id, &ts) == 0);
-  return ConvertTimespecToMicros(ts);
-}
-#else  // _POSIX_MONOTONIC_CLOCK
-#error No usable tick clock function on this platform.
-#endif  // _POSIX_MONOTONIC_CLOCK
-
-}  // namespace
-
-// Time -----------------------------------------------------------------------
-
-namespace subtle {
-Time TimeNowIgnoringOverride() {
-  struct timeval tv;
-  struct timezone tz = {0, 0};  // UTC
-  PA_BASE_CHECK(gettimeofday(&tv, &tz) == 0);
-  // Combine seconds and microseconds in a 64-bit field containing microseconds
-  // since the epoch.  That's enough for nearly 600 centuries.  Adjust from
-  // Unix (1970) to Windows (1601) epoch.
-  return Time() +
-         Microseconds((tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec) +
-                      Time::kTimeTToMicrosecondsOffset);
-}
-
-Time TimeNowFromSystemTimeIgnoringOverride() {
-  // Just use TimeNowIgnoringOverride() because it returns the system time.
-  return TimeNowIgnoringOverride();
-}
-}  // namespace subtle
-
-// TimeTicks ------------------------------------------------------------------
-
-namespace subtle {
-TimeTicks TimeTicksNowIgnoringOverride() {
-  return TimeTicks() + Microseconds(ClockNow(CLOCK_MONOTONIC));
-}
-}  // namespace subtle
-
-// static
-TimeTicks::Clock TimeTicks::GetClock() {
-  return Clock::LINUX_CLOCK_MONOTONIC;
-}
-
-// static
-bool TimeTicks::IsHighResolution() {
-  return true;
-}
-
-// static
-bool TimeTicks::IsConsistentAcrossProcesses() {
-  return true;
-}
-
-// ThreadTicks ----------------------------------------------------------------
-
-namespace subtle {
-ThreadTicks ThreadTicksNowIgnoringOverride() {
-#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
-    BUILDFLAG(IS_ANDROID)
-  return ThreadTicks() + Microseconds(ClockNow(CLOCK_THREAD_CPUTIME_ID));
-#else
-  PA_NOTREACHED();
-#endif
-}
-}  // namespace subtle
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/time/time_override.cc b/base/allocator/partition_allocator/partition_alloc_base/time/time_override.cc
deleted file mode 100644
index dda5134..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/time/time_override.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-
-namespace partition_alloc::internal::base::subtle {
-
-// static
-bool ScopedTimeClockOverrides::overrides_active_ = false;
-
-ScopedTimeClockOverrides::ScopedTimeClockOverrides(
-    TimeNowFunction time_override,
-    TimeTicksNowFunction time_ticks_override,
-    ThreadTicksNowFunction thread_ticks_override) {
-  PA_BASE_DCHECK(!overrides_active_);
-  overrides_active_ = true;
-  if (time_override) {
-    internal::g_time_now_function.store(time_override,
-                                        std::memory_order_relaxed);
-    internal::g_time_now_from_system_time_function.store(
-        time_override, std::memory_order_relaxed);
-  }
-  if (time_ticks_override) {
-    internal::g_time_ticks_now_function.store(time_ticks_override,
-                                              std::memory_order_relaxed);
-  }
-  if (thread_ticks_override) {
-    internal::g_thread_ticks_now_function.store(thread_ticks_override,
-                                                std::memory_order_relaxed);
-  }
-}
-
-ScopedTimeClockOverrides::~ScopedTimeClockOverrides() {
-  internal::g_time_now_function.store(&TimeNowIgnoringOverride);
-  internal::g_time_now_from_system_time_function.store(
-      &TimeNowFromSystemTimeIgnoringOverride);
-  internal::g_time_ticks_now_function.store(&TimeTicksNowIgnoringOverride);
-  internal::g_thread_ticks_now_function.store(&ThreadTicksNowIgnoringOverride);
-  overrides_active_ = false;
-}
-
-}  // namespace partition_alloc::internal::base::subtle
diff --git a/base/allocator/partition_allocator/partition_alloc_base/time/time_override.h b/base/allocator/partition_allocator/partition_alloc_base/time/time_override.h
deleted file mode 100644
index 69f4772..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/time/time_override.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TIME_TIME_OVERRIDE_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TIME_TIME_OVERRIDE_H_
-
-#include <atomic>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base {
-
-using TimeNowFunction = decltype(&Time::Now);
-using TimeTicksNowFunction = decltype(&TimeTicks::Now);
-using ThreadTicksNowFunction = decltype(&ThreadTicks::Now);
-
-// Time overrides should be used with extreme caution. Discuss with //base/time
-// OWNERS before adding a new one.
-namespace subtle {
-
-// Override the return value of Time::Now and Time::NowFromSystemTime /
-// TimeTicks::Now / ThreadTicks::Now to emulate time, e.g. for tests or to
-// modify progression of time. It is recommended that the override be set while
-// single-threaded and before the first call to Now() to avoid threading issues
-// and inconsistencies in returned values. Overriding time while other threads
-// are running is very subtle and should be reserved for developer only use
-// cases (e.g. virtual time in devtools) where any flakiness caused by a racy
-// time update isn't surprising. Instantiating a ScopedTimeClockOverrides while
-// other threads are running might break their expectation that TimeTicks and
-// ThreadTicks increase monotonically. Nested overrides are not allowed.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedTimeClockOverrides {
- public:
-  // Pass |nullptr| for any override if it shouldn't be overriden.
-  ScopedTimeClockOverrides(TimeNowFunction time_override,
-                           TimeTicksNowFunction time_ticks_override,
-                           ThreadTicksNowFunction thread_ticks_override);
-
-  ScopedTimeClockOverrides(const ScopedTimeClockOverrides&) = delete;
-  ScopedTimeClockOverrides& operator=(const ScopedTimeClockOverrides&) = delete;
-
-  // Restores the platform default Now() functions.
-  ~ScopedTimeClockOverrides();
-
-  static bool overrides_active() { return overrides_active_; }
-
- private:
-  static bool overrides_active_;
-};
-
-// These methods return the platform default Time::Now / TimeTicks::Now /
-// ThreadTicks::Now values even while an override is in place. These methods
-// should only be used in places where emulated time should be disregarded. For
-// example, they can be used to implement test timeouts for tests that may
-// override time.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) Time TimeNowIgnoringOverride();
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-Time TimeNowFromSystemTimeIgnoringOverride();
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) TimeTicks TimeTicksNowIgnoringOverride();
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-ThreadTicks ThreadTicksNowIgnoringOverride();
-
-}  // namespace subtle
-
-namespace internal {
-
-// These function pointers are used by platform-independent implementations of
-// the Now() methods and ScopedTimeClockOverrides. They are set to point to the
-// respective NowIgnoringOverride functions by default, but can also be set by
-// platform-specific code to select a default implementation at runtime, thereby
-// avoiding the indirection via the NowIgnoringOverride functions. Note that the
-// pointers can be overridden and later reset to the NowIgnoringOverride
-// functions by ScopedTimeClockOverrides.
-extern std::atomic<TimeNowFunction> g_time_now_function;
-extern std::atomic<TimeNowFunction> g_time_now_from_system_time_function;
-extern std::atomic<TimeTicksNowFunction> g_time_ticks_now_function;
-extern std::atomic<ThreadTicksNowFunction> g_thread_ticks_now_function;
-
-}  // namespace internal
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TIME_TIME_OVERRIDE_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/time/time_win.cc b/base/allocator/partition_allocator/partition_alloc_base/time/time_win.cc
deleted file mode 100644
index 143c59c..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/time/time_win.cc
+++ /dev/null
@@ -1,553 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Windows Timer Primer
-//
-// A good article:  http://www.ddj.com/windows/184416651
-// A good mozilla bug:  http://bugzilla.mozilla.org/show_bug.cgi?id=363258
-//
-// The default windows timer, GetSystemTimeAsFileTime is not very precise.
-// It is only good to ~15.5ms.
-//
-// QueryPerformanceCounter is the logical choice for a high-precision timer.
-// However, it is known to be buggy on some hardware.  Specifically, it can
-// sometimes "jump".  On laptops, QPC can also be very expensive to call.
-// It's 3-4x slower than timeGetTime() on desktops, but can be 10x slower
-// on laptops.  A unittest exists which will show the relative cost of various
-// timers on any system.
-//
-// The next logical choice is timeGetTime().  timeGetTime has a precision of
-// 1ms, but only if you call APIs (timeBeginPeriod()) which affect all other
-// applications on the system.  By default, precision is only 15.5ms.
-// Unfortunately, we don't want to call timeBeginPeriod because we don't
-// want to affect other applications.  Further, on mobile platforms, use of
-// faster multimedia timers can hurt battery life.  See the intel
-// article about this here:
-// http://softwarecommunity.intel.com/articles/eng/1086.htm
-//
-// To work around all this, we're going to generally use timeGetTime().  We
-// will only increase the system-wide timer if we're not running on battery
-// power.
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-
-#include <windows.foundation.h>
-#include <windows.h>
-
-#include <mmsystem.h>
-
-#include <stdint.h>
-
-#include <atomic>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/bit_cast.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal::base {
-
-namespace {
-
-// From MSDN, FILETIME "Contains a 64-bit value representing the number of
-// 100-nanosecond intervals since January 1, 1601 (UTC)."
-int64_t FileTimeToMicroseconds(const FILETIME& ft) {
-  // Need to bit_cast to fix alignment, then divide by 10 to convert
-  // 100-nanoseconds to microseconds. This only works on little-endian
-  // machines.
-  return bit_cast<int64_t, FILETIME>(ft) / 10;
-}
-
-bool CanConvertToFileTime(int64_t us) {
-  return us >= 0 && us <= (std::numeric_limits<int64_t>::max() / 10);
-}
-
-FILETIME MicrosecondsToFileTime(int64_t us) {
-  PA_BASE_DCHECK(CanConvertToFileTime(us))
-      << "Out-of-range: Cannot convert " << us
-      << " microseconds to FILETIME units.";
-
-  // Multiply by 10 to convert microseconds to 100-nanoseconds. Bit_cast will
-  // handle alignment problems. This only works on little-endian machines.
-  return bit_cast<FILETIME, int64_t>(us * 10);
-}
-
-int64_t CurrentWallclockMicroseconds() {
-  FILETIME ft;
-  ::GetSystemTimeAsFileTime(&ft);
-  return FileTimeToMicroseconds(ft);
-}
-
-// Time between resampling the un-granular clock for this API.
-constexpr TimeDelta kMaxTimeToAvoidDrift = Seconds(60);
-
-int64_t g_initial_time = 0;
-TimeTicks g_initial_ticks;
-
-void InitializeClock() {
-  g_initial_ticks = subtle::TimeTicksNowIgnoringOverride();
-  g_initial_time = CurrentWallclockMicroseconds();
-}
-
-// Returns the current value of the performance counter.
-uint64_t QPCNowRaw() {
-  LARGE_INTEGER perf_counter_now = {};
-  // According to the MSDN documentation for QueryPerformanceCounter(), this
-  // will never fail on systems that run XP or later.
-  // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
-  ::QueryPerformanceCounter(&perf_counter_now);
-  return perf_counter_now.QuadPart;
-}
-
-}  // namespace
-
-// Time -----------------------------------------------------------------------
-
-namespace subtle {
-Time TimeNowIgnoringOverride() {
-  if (g_initial_time == 0)
-    InitializeClock();
-
-  // We implement time using the high-resolution timers so that we can get
-  // timeouts which are smaller than 10-15ms.  If we just used
-  // CurrentWallclockMicroseconds(), we'd have the less-granular timer.
-  //
-  // To make this work, we initialize the clock (g_initial_time) and the
-  // counter (initial_ctr).  To compute the initial time, we can check
-  // the number of ticks that have elapsed, and compute the delta.
-  //
-  // To avoid any drift, we periodically resync the counters to the system
-  // clock.
-  while (true) {
-    TimeTicks ticks = TimeTicksNowIgnoringOverride();
-
-    // Calculate the time elapsed since we started our timer
-    TimeDelta elapsed = ticks - g_initial_ticks;
-
-    // Check if enough time has elapsed that we need to resync the clock.
-    if (elapsed > kMaxTimeToAvoidDrift) {
-      InitializeClock();
-      continue;
-    }
-
-    return Time() + elapsed + Microseconds(g_initial_time);
-  }
-}
-
-Time TimeNowFromSystemTimeIgnoringOverride() {
-  // Force resync.
-  InitializeClock();
-  return Time() + Microseconds(g_initial_time);
-}
-}  // namespace subtle
-
-// static
-Time Time::FromFileTime(FILETIME ft) {
-  if (bit_cast<int64_t, FILETIME>(ft) == 0)
-    return Time();
-  if (ft.dwHighDateTime == std::numeric_limits<DWORD>::max() &&
-      ft.dwLowDateTime == std::numeric_limits<DWORD>::max())
-    return Max();
-  return Time(FileTimeToMicroseconds(ft));
-}
-
-FILETIME Time::ToFileTime() const {
-  if (is_null())
-    return bit_cast<FILETIME, int64_t>(0);
-  if (is_max()) {
-    FILETIME result;
-    result.dwHighDateTime = std::numeric_limits<DWORD>::max();
-    result.dwLowDateTime = std::numeric_limits<DWORD>::max();
-    return result;
-  }
-  return MicrosecondsToFileTime(us_);
-}
-
-// TimeTicks ------------------------------------------------------------------
-
-namespace {
-
-// We define a wrapper to adapt between the __stdcall and __cdecl call of the
-// mock function, and to avoid a static constructor.  Assigning an import to a
-// function pointer directly would require setup code to fetch from the IAT.
-DWORD timeGetTimeWrapper() {
-  return timeGetTime();
-}
-
-DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
-
-// A structure holding the most significant bits of "last seen" and a
-// "rollover" counter.
-union LastTimeAndRolloversState {
-  // The state as a single 32-bit opaque value.
-  std::atomic<int32_t> as_opaque_32{0};
-
-  // The state as usable values.
-  struct {
-    // The top 8-bits of the "last" time. This is enough to check for rollovers
-    // and the small bit-size means fewer CompareAndSwap operations to store
-    // changes in state, which in turn makes for fewer retries.
-    uint8_t last_8;
-    // A count of the number of detected rollovers. Using this as bits 47-32
-    // of the upper half of a 64-bit value results in a 48-bit tick counter.
-    // This extends the total rollover period from about 49 days to about 8800
-    // years while still allowing it to be stored with last_8 in a single
-    // 32-bit value.
-    uint16_t rollovers;
-  } as_values;
-};
-std::atomic<int32_t> g_last_time_and_rollovers = 0;
-static_assert(sizeof(LastTimeAndRolloversState) <=
-                  sizeof(g_last_time_and_rollovers),
-              "LastTimeAndRolloversState does not fit in a single atomic word");
-
-// We use timeGetTime() to implement TimeTicks::Now().  This can be problematic
-// because it returns the number of milliseconds since Windows has started,
-// which will roll over the 32-bit value every ~49 days.  We try to track
-// rollover ourselves, which works if TimeTicks::Now() is called at least every
-// 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
-TimeTicks RolloverProtectedNow() {
-  LastTimeAndRolloversState state;
-  DWORD now;  // DWORD is always unsigned 32 bits.
-
-  while (true) {
-    // Fetch the "now" and "last" tick values, updating "last" with "now" and
-    // incrementing the "rollovers" counter if the tick-value has wrapped back
-    // around. Atomic operations ensure that both "last" and "rollovers" are
-    // always updated together.
-    int32_t original =
-        g_last_time_and_rollovers.load(std::memory_order_acquire);
-    state.as_opaque_32 = original;
-    now = g_tick_function();
-    uint8_t now_8 = static_cast<uint8_t>(now >> 24);
-    if (now_8 < state.as_values.last_8)
-      ++state.as_values.rollovers;
-    state.as_values.last_8 = now_8;
-
-    // If the state hasn't changed, exit the loop.
-    if (state.as_opaque_32 == original)
-      break;
-
-    // Save the changed state. If the existing value is unchanged from the
-    // original, exit the loop.
-    int32_t check = g_last_time_and_rollovers.compare_exchange_strong(
-        original, state.as_opaque_32, std::memory_order_release);
-    if (check == original)
-      break;
-
-    // Another thread has done something in between so retry from the top.
-  }
-
-  return TimeTicks() +
-         Milliseconds(now +
-                      (static_cast<uint64_t>(state.as_values.rollovers) << 32));
-}
-
-// Discussion of tick counter options on Windows:
-//
-// (1) CPU cycle counter. (Retrieved via RDTSC)
-// The CPU counter provides the highest resolution time stamp and is the least
-// expensive to retrieve. However, on older CPUs, two issues can affect its
-// reliability: First it is maintained per processor and not synchronized
-// between processors. Also, the counters will change frequency due to thermal
-// and power changes, and stop in some states.
-//
-// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
-// resolution (<1 microsecond) time stamp. On most hardware running today, it
-// auto-detects and uses the constant-rate RDTSC counter to provide extremely
-// efficient and reliable time stamps.
-//
-// On older CPUs where RDTSC is unreliable, it falls back to using more
-// expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
-// PM timer, and can involve system calls; and all this is up to the HAL (with
-// some help from ACPI). According to
-// http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
-// worst case, it gets the counter from the rollover interrupt on the
-// programmable interrupt timer. In best cases, the HAL may conclude that the
-// RDTSC counter runs at a constant frequency, then it uses that instead. On
-// multiprocessor machines, it will try to verify the values returned from
-// RDTSC on each processor are consistent with each other, and apply a handful
-// of workarounds for known buggy hardware. In other words, QPC is supposed to
-// give consistent results on a multiprocessor computer, but for older CPUs it
-// can be unreliable due bugs in BIOS or HAL.
-//
-// (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
-// milliseconds) time stamp but is comparatively less expensive to retrieve and
-// more reliable. Time::EnableHighResolutionTimer() and
-// Time::ActivateHighResolutionTimer() can be called to alter the resolution of
-// this timer; and also other Windows applications can alter it, affecting this
-// one.
-
-TimeTicks InitialNowFunction();
-
-// See "threading notes" in InitializeNowFunctionPointer() for details on how
-// concurrent reads/writes to these globals has been made safe.
-std::atomic<TimeTicksNowFunction> g_time_ticks_now_ignoring_override_function{
-    &InitialNowFunction};
-int64_t g_qpc_ticks_per_second = 0;
-
-TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
-  // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
-  // InitializeNowFunctionPointer(), has happened by this point.
-  std::atomic_thread_fence(std::memory_order_acquire);
-
-  PA_BASE_DCHECK(g_qpc_ticks_per_second > 0);
-
-  // If the QPC Value is below the overflow threshold, we proceed with
-  // simple multiply and divide.
-  if (qpc_value < Time::kQPCOverflowThreshold) {
-    return Microseconds(qpc_value * Time::kMicrosecondsPerSecond /
-                        g_qpc_ticks_per_second);
-  }
-  // Otherwise, calculate microseconds in a round about manner to avoid
-  // overflow and precision issues.
-  int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
-  int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
-  return Microseconds((whole_seconds * Time::kMicrosecondsPerSecond) +
-                      ((leftover_ticks * Time::kMicrosecondsPerSecond) /
-                       g_qpc_ticks_per_second));
-}
-
-TimeTicks QPCNow() {
-  return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw());
-}
-
-void InitializeNowFunctionPointer() {
-  LARGE_INTEGER ticks_per_sec = {};
-  if (!QueryPerformanceFrequency(&ticks_per_sec))
-    ticks_per_sec.QuadPart = 0;
-
-  // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
-  // the low-resolution clock.
-  //
-  // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
-  // will still use the low-resolution clock. A CPU lacking a non-stop time
-  // counter will cause Windows to provide an alternate QPC implementation that
-  // works, but is expensive to use.
-  //
-  // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
-  // ~72% of users fall within this category.
-  CPU cpu;
-  const TimeTicksNowFunction now_function =
-      (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter())
-          ? &RolloverProtectedNow
-          : &QPCNow;
-
-  // Threading note 1: In an unlikely race condition, it's possible for two or
-  // more threads to enter InitializeNowFunctionPointer() in parallel. This is
-  // not a problem since all threads end up writing out the same values
-  // to the global variables, and those variable being atomic are safe to read
-  // from other threads.
-  //
-  // Threading note 2: A release fence is placed here to ensure, from the
-  // perspective of other threads using the function pointers, that the
-  // assignment to |g_qpc_ticks_per_second| happens before the function pointers
-  // are changed.
-  g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
-  std::atomic_thread_fence(std::memory_order_release);
-  // Also set g_time_ticks_now_function to avoid the additional indirection via
-  // TimeTicksNowIgnoringOverride() for future calls to TimeTicks::Now(), only
-  // if it wasn't already overridden to a different value. memory_order_relaxed
-  // is sufficient since an explicit fence was inserted above.
-  base::TimeTicksNowFunction initial_time_ticks_now_function =
-      &subtle::TimeTicksNowIgnoringOverride;
-  internal::g_time_ticks_now_function.compare_exchange_strong(
-      initial_time_ticks_now_function, now_function, std::memory_order_relaxed);
-  g_time_ticks_now_ignoring_override_function.store(now_function,
-                                                    std::memory_order_relaxed);
-}
-
-TimeTicks InitialNowFunction() {
-  InitializeNowFunctionPointer();
-  return g_time_ticks_now_ignoring_override_function.load(
-      std::memory_order_relaxed)();
-}
-
-}  // namespace
-
-// static
-TimeTicks::TickFunctionType TimeTicks::SetMockTickFunction(
-    TickFunctionType ticker) {
-  TickFunctionType old = g_tick_function;
-  g_tick_function = ticker;
-  g_last_time_and_rollovers.store(0, std::memory_order_relaxed);
-  return old;
-}
-
-namespace subtle {
-TimeTicks TimeTicksNowIgnoringOverride() {
-  return g_time_ticks_now_ignoring_override_function.load(
-      std::memory_order_relaxed)();
-}
-}  // namespace subtle
-
-// static
-TimeTicks::Clock TimeTicks::GetClock() {
-  return Clock::WIN_ROLLOVER_PROTECTED_TIME_GET_TIME;
-}
-
-// ThreadTicks ----------------------------------------------------------------
-
-namespace subtle {
-ThreadTicks ThreadTicksNowIgnoringOverride() {
-  return ThreadTicks::GetForThread(PlatformThread::CurrentHandle());
-}
-}  // namespace subtle
-
-// static
-ThreadTicks ThreadTicks::GetForThread(
-    const PlatformThreadHandle& thread_handle) {
-  PA_BASE_DCHECK(IsSupported());
-
-#if defined(ARCH_CPU_ARM64)
-  // QueryThreadCycleTime versus TSCTicksPerSecond doesn't have much relation to
-  // actual elapsed time on Windows on Arm, because QueryThreadCycleTime is
-  // backed by the actual number of CPU cycles executed, rather than a
-  // constant-rate timer like Intel. To work around this, use GetThreadTimes
-  // (which isn't as accurate but is meaningful as a measure of elapsed
-  // per-thread time).
-  FILETIME creation_time, exit_time, kernel_time, user_time;
-  ::GetThreadTimes(thread_handle.platform_handle(), &creation_time, &exit_time,
-                   &kernel_time, &user_time);
-
-  const int64_t us = FileTimeToMicroseconds(user_time);
-#else
-  // Get the number of TSC ticks used by the current thread.
-  ULONG64 thread_cycle_time = 0;
-  ::QueryThreadCycleTime(thread_handle.platform_handle(), &thread_cycle_time);
-
-  // Get the frequency of the TSC.
-  const double tsc_ticks_per_second = time_internal::TSCTicksPerSecond();
-  if (tsc_ticks_per_second == 0)
-    return ThreadTicks();
-
-  // Return the CPU time of the current thread.
-  const double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
-  const int64_t us =
-      static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond);
-#endif
-
-  return ThreadTicks(us);
-}
-
-// static
-bool ThreadTicks::IsSupportedWin() {
-#if defined(ARCH_CPU_ARM64)
-  // The Arm implementation does not use QueryThreadCycleTime and therefore does
-  // not care about the time stamp counter.
-  return true;
-#else
-  return time_internal::HasConstantRateTSC();
-#endif
-}
-
-// static
-void ThreadTicks::WaitUntilInitializedWin() {
-#if !defined(ARCH_CPU_ARM64)
-  while (time_internal::TSCTicksPerSecond() == 0)
-    ::Sleep(10);
-#endif
-}
-
-// static
-TimeTicks TimeTicks::FromQPCValue(LONGLONG qpc_value) {
-  return TimeTicks() + QPCValueToTimeDelta(qpc_value);
-}
-
-// TimeDelta ------------------------------------------------------------------
-
-// static
-TimeDelta TimeDelta::FromQPCValue(LONGLONG qpc_value) {
-  return QPCValueToTimeDelta(qpc_value);
-}
-
-// static
-TimeDelta TimeDelta::FromFileTime(FILETIME ft) {
-  return Microseconds(FileTimeToMicroseconds(ft));
-}
-
-// static
-TimeDelta TimeDelta::FromWinrtDateTime(ABI::Windows::Foundation::DateTime dt) {
-  // UniversalTime is 100 ns intervals since January 1, 1601 (UTC)
-  return Microseconds(dt.UniversalTime / 10);
-}
-
-ABI::Windows::Foundation::DateTime TimeDelta::ToWinrtDateTime() const {
-  ABI::Windows::Foundation::DateTime date_time;
-  date_time.UniversalTime = InMicroseconds() * 10;
-  return date_time;
-}
-
-#if !defined(ARCH_CPU_ARM64)
-namespace time_internal {
-
-bool HasConstantRateTSC() {
-  static bool is_supported = CPU().has_non_stop_time_stamp_counter();
-  return is_supported;
-}
-
-double TSCTicksPerSecond() {
-  PA_BASE_DCHECK(HasConstantRateTSC());
-  // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
-  // frequency, because there is no guarantee that the TSC frequency is equal to
-  // the performance counter frequency.
-  // The TSC frequency is cached in a static variable because it takes some time
-  // to compute it.
-  static double tsc_ticks_per_second = 0;
-  if (tsc_ticks_per_second != 0)
-    return tsc_ticks_per_second;
-
-  // Increase the thread priority to reduces the chances of having a context
-  // switch during a reading of the TSC and the performance counter.
-  const int previous_priority = ::GetThreadPriority(::GetCurrentThread());
-  ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
-
-  // The first time that this function is called, make an initial reading of the
-  // TSC and the performance counter.
-
-  static const uint64_t tsc_initial = __rdtsc();
-  static const uint64_t perf_counter_initial = QPCNowRaw();
-
-  // Make a another reading of the TSC and the performance counter every time
-  // that this function is called.
-  const uint64_t tsc_now = __rdtsc();
-  const uint64_t perf_counter_now = QPCNowRaw();
-
-  // Reset the thread priority.
-  ::SetThreadPriority(::GetCurrentThread(), previous_priority);
-
-  // Make sure that at least 50 ms elapsed between the 2 readings. The first
-  // time that this function is called, we don't expect this to be the case.
-  // Note: The longer the elapsed time between the 2 readings is, the more
-  //   accurate the computed TSC frequency will be. The 50 ms value was
-  //   chosen because local benchmarks show that it allows us to get a
-  //   stddev of less than 1 tick/us between multiple runs.
-  // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
-  //   this will never fail on systems that run XP or later.
-  //   https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
-  LARGE_INTEGER perf_counter_frequency = {};
-  ::QueryPerformanceFrequency(&perf_counter_frequency);
-  PA_BASE_DCHECK(perf_counter_now >= perf_counter_initial);
-  const uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
-  const double elapsed_time_seconds =
-      perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
-
-  constexpr double kMinimumEvaluationPeriodSeconds = 0.05;
-  if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
-    return 0;
-
-  // Compute the frequency of the TSC.
-  PA_BASE_DCHECK(tsc_now >= tsc_initial);
-  const uint64_t tsc_ticks = tsc_now - tsc_initial;
-  tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
-
-  return tsc_ticks_per_second;
-}
-
-}  // namespace time_internal
-#endif  // defined(ARCH_CPU_ARM64)
-
-}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/types/strong_alias.h b/base/allocator/partition_allocator/partition_alloc_base/types/strong_alias.h
deleted file mode 100644
index 0e096bf..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/types/strong_alias.h
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TYPES_STRONG_ALIAS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TYPES_STRONG_ALIAS_H_
-
-#include <functional>
-#include <type_traits>
-#include <utility>
-
-namespace partition_alloc::internal::base {
-
-// A type-safe alternative for a typedef or a 'using' directive.
-//
-// C++ currently does not support type-safe typedefs, despite multiple proposals
-// (ex. http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3515.pdf). The
-// next best thing is to try and emulate them in library code.
-//
-// The motivation is to disallow several classes of errors:
-//
-// using Orange = int;
-// using Apple = int;
-// Apple apple(2);
-// Orange orange = apple;  // Orange should not be able to become an Apple.
-// Orange x = orange + apple;  // Shouldn't add Oranges and Apples.
-// if (orange > apple);  // Shouldn't compare Apples to Oranges.
-// void foo(Orange);
-// void foo(Apple);  // Redefinition.
-// etc.
-//
-// StrongAlias may instead be used as follows:
-//
-// using Orange = StrongAlias<class OrangeTag, int>;
-// using Apple = StrongAlias<class AppleTag, int>;
-// using Banana = StrongAlias<class BananaTag, std::string>;
-// Apple apple(2);
-// Banana banana("Hello");
-// Orange orange = apple;  // Does not compile.
-// Orange other_orange = orange;  // Compiles, types match.
-// Orange x = orange + apple;  // Does not compile.
-// Orange y = Orange(orange.value() + apple.value());  // Compiles.
-// Orange z = Orange(banana->size() + *other_orange);  // Compiles.
-// if (orange > apple);  // Does not compile.
-// if (orange > other_orange);  // Compiles.
-// void foo(Orange);
-// void foo(Apple);  // Compiles into separate overload.
-//
-// StrongAlias is a zero-cost abstraction, it's compiled away.
-//
-// TagType is an empty tag class (also called "phantom type") that only serves
-// the type system to differentiate between different instantiations of the
-// template.
-// UnderlyingType may be almost any value type. Note that some methods of the
-// StrongAlias may be unavailable (ie. produce elaborate compilation errors when
-// used) if UnderlyingType doesn't support them.
-//
-// StrongAlias only directly exposes comparison operators (for convenient use in
-// ordered containers) and a Hasher struct (for unordered_map/set). It's
-// impossible, without reflection, to expose all methods of the UnderlyingType
-// in StrongAlias's interface. It's also potentially unwanted (ex. you don't
-// want to be able to add two StrongAliases that represent socket handles).
-// A getter and dereference operators are provided in case you need to access
-// the UnderlyingType.
-//
-// See also
-// - //styleguide/c++/blink-c++.md which provides recommendation and examples of
-//   using StrongAlias<Tag, bool> instead of a bare bool.
-// - IdType<...> which provides helpers for specializing StrongAlias to be
-//   used as an id.
-// - TokenType<...> which provides helpers for specializing StrongAlias to be
-//   used as a wrapper of base::UnguessableToken.
-template <typename TagType, typename UnderlyingType>
-class StrongAlias {
- public:
-  constexpr StrongAlias() = default;
-  constexpr explicit StrongAlias(const UnderlyingType& v) : value_(v) {}
-  constexpr explicit StrongAlias(UnderlyingType&& v) noexcept
-      : value_(std::move(v)) {}
-
-  constexpr UnderlyingType* operator->() { return &value_; }
-  constexpr const UnderlyingType* operator->() const { return &value_; }
-
-  constexpr UnderlyingType& operator*() & { return value_; }
-  constexpr const UnderlyingType& operator*() const& { return value_; }
-  constexpr UnderlyingType&& operator*() && { return std::move(value_); }
-  constexpr const UnderlyingType&& operator*() const&& {
-    return std::move(value_);
-  }
-
-  constexpr UnderlyingType& value() & { return value_; }
-  constexpr const UnderlyingType& value() const& { return value_; }
-  constexpr UnderlyingType&& value() && { return std::move(value_); }
-  constexpr const UnderlyingType&& value() const&& { return std::move(value_); }
-
-  constexpr explicit operator const UnderlyingType&() const& { return value_; }
-
-  constexpr bool operator==(const StrongAlias& other) const {
-    return value_ == other.value_;
-  }
-  constexpr bool operator!=(const StrongAlias& other) const {
-    return value_ != other.value_;
-  }
-  constexpr bool operator<(const StrongAlias& other) const {
-    return value_ < other.value_;
-  }
-  constexpr bool operator<=(const StrongAlias& other) const {
-    return value_ <= other.value_;
-  }
-  constexpr bool operator>(const StrongAlias& other) const {
-    return value_ > other.value_;
-  }
-  constexpr bool operator>=(const StrongAlias& other) const {
-    return value_ >= other.value_;
-  }
-
-  // Hasher to use in std::unordered_map, std::unordered_set, etc.
-  //
-  // Example usage:
-  //     using MyType = base::StrongAlias<...>;
-  //     using MySet = std::unordered_set<MyType, typename MyType::Hasher>;
-  //
-  // https://google.github.io/styleguide/cppguide.html#std_hash asks to avoid
-  // defining specializations of `std::hash` - this is why the hasher needs to
-  // be explicitly specified and why the following code will *not* work:
-  //     using MyType = base::StrongAlias<...>;
-  //     using MySet = std::unordered_set<MyType>;  // This won't work.
-  struct Hasher {
-    using argument_type = StrongAlias;
-    using result_type = std::size_t;
-    result_type operator()(const argument_type& id) const {
-      return std::hash<UnderlyingType>()(id.value());
-    }
-  };
-
- protected:
-  UnderlyingType value_;
-};
-
-}  // namespace partition_alloc::internal::base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TYPES_STRONG_ALIAS_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/win/win_handle_types.h b/base/allocator/partition_allocator/partition_alloc_base/win/win_handle_types.h
deleted file mode 100644
index 9e41b4c..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/win/win_handle_types.h
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_WIN_WIN_HANDLE_TYPES_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_WIN_WIN_HANDLE_TYPES_H_
-
-// Forward declare Windows compatible handles.
-
-#define PA_WINDOWS_HANDLE_TYPE(name) \
-  struct name##__;                   \
-  typedef struct name##__* name;
-#include "base/allocator/partition_allocator/partition_alloc_base/win/win_handle_types_list.inc"
-#undef PA_WINDOWS_HANDLE_TYPE
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_WIN_WIN_HANDLE_TYPES_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h b/base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h
deleted file mode 100644
index 768b7ca..0000000
--- a/base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains defines and typedefs that allow popular Windows types to
-// be used without the overhead of including windows.h.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_WIN_WINDOWS_TYPES_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_WIN_WINDOWS_TYPES_H_
-
-// Needed for function prototypes.
-#include <specstrings.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// typedef and define the most commonly used Windows integer types.
-
-typedef unsigned long DWORD;
-typedef long LONG;
-typedef __int64 LONGLONG;
-typedef unsigned __int64 ULONGLONG;
-
-#define VOID void
-typedef char CHAR;
-typedef short SHORT;
-typedef long LONG;
-typedef int INT;
-typedef unsigned int UINT;
-typedef unsigned int* PUINT;
-typedef unsigned __int64 UINT64;
-typedef void* LPVOID;
-typedef void* PVOID;
-typedef void* HANDLE;
-typedef int BOOL;
-typedef unsigned char BYTE;
-typedef BYTE BOOLEAN;
-typedef DWORD ULONG;
-typedef unsigned short WORD;
-typedef WORD UWORD;
-typedef WORD ATOM;
-
-// Forward declare some Windows struct/typedef sets.
-
-typedef struct _RTL_SRWLOCK RTL_SRWLOCK;
-typedef RTL_SRWLOCK SRWLOCK, *PSRWLOCK;
-
-typedef struct _FILETIME FILETIME;
-
-struct PA_CHROME_SRWLOCK {
-  PVOID Ptr;
-};
-
-// The trailing white-spaces after this macro are required, for compatibility
-// with the definition in winnt.h.
-#define RTL_SRWLOCK_INIT {0}                            // NOLINT
-#define SRWLOCK_INIT RTL_SRWLOCK_INIT
-
-// clang-format on
-
-// Define some macros needed when prototyping Windows functions.
-
-#define DECLSPEC_IMPORT __declspec(dllimport)
-#define WINBASEAPI DECLSPEC_IMPORT
-#define WINAPI __stdcall
-
-// Needed for LockImpl.
-WINBASEAPI _Releases_exclusive_lock_(*SRWLock) VOID WINAPI
-    ReleaseSRWLockExclusive(_Inout_ PSRWLOCK SRWLock);
-WINBASEAPI BOOLEAN WINAPI TryAcquireSRWLockExclusive(_Inout_ PSRWLOCK SRWLock);
-
-// Needed for thread_local_storage.h
-WINBASEAPI LPVOID WINAPI TlsGetValue(_In_ DWORD dwTlsIndex);
-
-WINBASEAPI BOOL WINAPI TlsSetValue(_In_ DWORD dwTlsIndex,
-                                   _In_opt_ LPVOID lpTlsValue);
-
-WINBASEAPI _Check_return_ _Post_equals_last_error_ DWORD WINAPI
-    GetLastError(VOID);
-
-WINBASEAPI VOID WINAPI SetLastError(_In_ DWORD dwErrCode);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_WIN_WINDOWS_TYPES_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_check.h b/base/allocator/partition_allocator/partition_alloc_check.h
deleted file mode 100644
index 2e13665..0000000
--- a/base/allocator/partition_allocator/partition_alloc_check.h
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "build/build_config.h"
-
-// When PartitionAlloc is used as the default allocator, we cannot use the
-// regular (D)CHECK() macros, as they allocate internally. When an assertion is
-// triggered, they format strings, leading to reentrancy in the code, which none
-// of PartitionAlloc is designed to support (and especially not for error
-// paths).
-//
-// As a consequence:
-// - When PartitionAlloc is not malloc(), use the regular macros
-// - Otherwise, crash immediately. This provides worse error messages though.
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-// For official build discard log strings to reduce binary bloat.
-#if !PA_BASE_CHECK_WILL_STREAM()
-// See base/check.h for implementation details.
-#define PA_CHECK(condition)                        \
-  PA_UNLIKELY(!(condition)) ? PA_IMMEDIATE_CRASH() \
-                            : PA_EAT_CHECK_STREAM_PARAMS()
-#else
-// PartitionAlloc uses async-signal-safe RawCheckFailure() for error reporting.
-// Async-signal-safe functions are guaranteed to not allocate as otherwise they
-// could operate with inconsistent allocator state.
-#define PA_CHECK(condition)                                                \
-  PA_UNLIKELY(!(condition))                                                \
-  ? ::partition_alloc::internal::logging::RawCheckFailure(                 \
-        __FILE__ "(" PA_STRINGIFY(__LINE__) ") Check failed: " #condition) \
-  : PA_EAT_CHECK_STREAM_PARAMS()
-#endif  // !CHECK_WILL_STREAM()
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-#define PA_DCHECK(condition) PA_CHECK(condition)
-#else
-#define PA_DCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-#define PA_PCHECK(condition)                                 \
-  if (!(condition)) {                                        \
-    int error = errno;                                       \
-    ::partition_alloc::internal::base::debug::Alias(&error); \
-    PA_IMMEDIATE_CRASH();                                    \
-  }
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-#define PA_DPCHECK(condition) PA_PCHECK(condition)
-#else
-#define PA_DPCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-#else
-#define PA_CHECK(condition) PA_BASE_CHECK(condition)
-#define PA_DCHECK(condition) PA_BASE_DCHECK(condition)
-#define PA_PCHECK(condition) PA_BASE_PCHECK(condition)
-#define PA_DPCHECK(condition) PA_BASE_DPCHECK(condition)
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-// Expensive dchecks that run within *Scan. These checks are only enabled in
-// debug builds with dchecks enabled.
-#if !defined(NDEBUG)
-#define PA_SCAN_DCHECK_IS_ON() BUILDFLAG(PA_DCHECK_IS_ON)
-#else
-#define PA_SCAN_DCHECK_IS_ON() 0
-#endif
-
-#if PA_SCAN_DCHECK_IS_ON()
-#define PA_SCAN_DCHECK(expr) PA_DCHECK(expr)
-#else
-#define PA_SCAN_DCHECK(expr) PA_EAT_CHECK_STREAM_PARAMS(!(expr))
-#endif
-
-#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
-
-// Use this macro to assert on things that are conditionally constexpr as
-// determined by PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR or
-// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR. Where fixed at compile time, this
-// is a static_assert. Where determined at run time, this is a PA_CHECK.
-// Therefore, this macro must only be used where both a static_assert and a
-// PA_CHECK would be viable, that is, within a function, and ideally a function
-// that executes only once, early in the program, such as during initialization.
-#define STATIC_ASSERT_OR_PA_CHECK(condition, message) \
-  static_assert(condition, message)
-
-#else
-
-#define STATIC_ASSERT_OR_PA_CHECK(condition, message) \
-  do {                                                \
-    PA_CHECK(condition) << (message);                 \
-  } while (false)
-
-#endif
-
-// alignas(16) DebugKv causes breakpad_unittests and sandbox_linux_unittests
-// failures on android-marshmallow-x86-rel because of SIGSEGV.
-#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_X86_FAMILY) && \
-    defined(ARCH_CPU_32_BITS)
-#define PA_DEBUGKV_ALIGN alignas(8)
-#else
-#define PA_DEBUGKV_ALIGN alignas(16)
-#endif
-
-namespace partition_alloc::internal {
-
-static constexpr size_t kDebugKeyMaxLength = 8ull;
-
-// Used for PA_DEBUG_DATA_ON_STACK, below.
-struct PA_DEBUGKV_ALIGN DebugKv {
-  // 16 bytes object aligned on 16 bytes, to make it easier to see in crash
-  // reports.
-  char k[kDebugKeyMaxLength] = {};  // Not necessarily 0-terminated.
-  uint64_t v = 0;
-
-  DebugKv(const char* key, uint64_t value) : v(value) {
-    // Fill with ' ', so that the stack dump is nicer to read.  Not using
-    // memset() on purpose, this header is included from *many* places.
-    for (size_t index = 0; index < sizeof k; index++) {
-      k[index] = ' ';
-    }
-
-    for (size_t index = 0; index < sizeof k; index++) {
-      k[index] = key[index];
-      if (key[index] == '\0') {
-        break;
-      }
-    }
-  }
-};
-
-}  // namespace partition_alloc::internal
-
-#define PA_CONCAT(x, y) x##y
-#define PA_CONCAT2(x, y) PA_CONCAT(x, y)
-#define PA_DEBUG_UNIQUE_NAME PA_CONCAT2(kv, __LINE__)
-
-// Puts a key-value pair on the stack for debugging. `base::debug::Alias()`
-// makes sure a local variable is saved on the stack, but the variables can be
-// hard to find in crash reports, particularly if the frame pointer is not
-// present / invalid.
-//
-// This puts a key right before the value on the stack. The key has to be a C
-// string, which gets truncated if it's longer than 8 characters.
-// Example use:
-// PA_DEBUG_DATA_ON_STACK("size", 0x42)
-//
-// Sample output in lldb:
-// (lldb) x 0x00007fffffffd0d0 0x00007fffffffd0f0
-// 0x7fffffffd0d0: 73 69 7a 65 00 00 00 00 42 00 00 00 00 00 00 00
-// size............
-//
-// With gdb, one can use:
-// x/8g <STACK_POINTER>
-// to see the data. With lldb, "x <STACK_POINTER> <FRAME_POJNTER>" can be used.
-#define PA_DEBUG_DATA_ON_STACK(name, value)                               \
-  static_assert(sizeof name <=                                            \
-                ::partition_alloc::internal::kDebugKeyMaxLength + 1);     \
-  ::partition_alloc::internal::DebugKv PA_DEBUG_UNIQUE_NAME{name, value}; \
-  ::partition_alloc::internal::base::debug::Alias(&PA_DEBUG_UNIQUE_NAME);
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_config.h b/base/allocator/partition_allocator/partition_alloc_config.h
deleted file mode 100644
index 7d96ec7..0000000
--- a/base/allocator/partition_allocator/partition_alloc_config.h
+++ /dev/null
@@ -1,333 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONFIG_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONFIG_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "build/build_config.h"
-
-// PA_CONFIG() uses a similar trick as BUILDFLAG() to allow the compiler catch
-// typos or a missing #include.
-//
-// -----------------------------------------------------------------------------
-// Housekeeping Rules
-// -----------------------------------------------------------------------------
-// 1. Prefix all config macros in this file with PA_CONFIG_ and define them in
-//    a function-like manner, e.g. PA_CONFIG_MY_SETTING().
-// 2. Both positive and negative cases must be defined.
-// 3. Don't use PA_CONFIG_MY_SETTING() directly outside of this file, use
-//    PA_CONFIG(flag-without-PA_CONFIG_) instead, e.g. PA_CONFIG(MY_SETTING).
-// 4. Do not use PA_CONFIG() when defining config macros, or it will lead to
-//    recursion. Either use #if/#else, or PA_CONFIG_MY_SETTING() directly.
-// 5. Try to use constexpr instead of macros wherever possible.
-// TODO(bartekn): Convert macros to constexpr or BUILDFLAG as much as possible.
-#define PA_CONFIG(flag) (PA_CONFIG_##flag())
-
-// Assert that the heuristic in partition_alloc.gni is accurate on supported
-// configurations.
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-static_assert(sizeof(void*) == 8, "");
-#else
-static_assert(sizeof(void*) != 8, "");
-#endif  // PA_CONFIG(HAS_64_BITS_POINTERS)
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS) && \
-    (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP)
-#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 1
-#else
-#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 0
-#endif
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_IOS)
-// Allow PA to select an alternate pool size at run-time before initialization,
-// rather than using a single constexpr value.
-//
-// This is needed on iOS because iOS test processes can't handle large pools
-// (see crbug.com/1250788).
-//
-// This setting is specific to 64-bit, as 32-bit has a different implementation.
-#define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 1
-#else
-#define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 0
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_IOS)
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS) && \
-    (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
-#include <linux/version.h>
-// TODO(bikineev): Enable for ChromeOS.
-#define PA_CONFIG_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED() \
-  (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
-#else
-#define PA_CONFIG_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED() 0
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS) &&
-        // (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
-
-#if BUILDFLAG(USE_STARSCAN)
-// Use card table to avoid races for PCScan configuration without safepoints.
-// The card table provides the guaranteee that for a marked card the underling
-// super-page is fully initialized.
-#define PA_CONFIG_STARSCAN_USE_CARD_TABLE() 1
-#else
-// The card table is permanently disabled for 32-bit.
-#define PA_CONFIG_STARSCAN_USE_CARD_TABLE() 0
-#endif  // BUILDFLAG(USE_STARSCAN)
-
-// Use batched freeing when sweeping pages. This builds up a freelist in the
-// scanner thread and appends to the slot-span's freelist only once.
-#define PA_CONFIG_STARSCAN_BATCHED_FREE() 1
-
-// TODO(bikineev): Temporarily disable inlining in *Scan to get clearer
-// stacktraces.
-#define PA_CONFIG_STARSCAN_NOINLINE_SCAN_FUNCTIONS() 1
-
-// TODO(bikineev): Temporarily disable *Scan in MemoryReclaimer as it seems to
-// cause significant jank.
-#define PA_CONFIG_STARSCAN_ENABLE_STARSCAN_ON_RECLAIM() 0
-
-// Double free detection comes with expensive cmpxchg (with the loop around it).
-// We currently disable it to improve the runtime.
-#define PA_CONFIG_STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED() 0
-
-// POSIX is not only UNIX, e.g. macOS and other OSes. We do use Linux-specific
-// features such as futex(2).
-#define PA_CONFIG_HAS_LINUX_KERNEL() \
-  (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID))
-
-// On some platforms, we implement locking by spinning in userspace, then going
-// into the kernel only if there is contention. This requires platform support,
-// namely:
-// - On Linux, futex(2)
-// - On Windows, a fast userspace "try" operation which is available with
-//   SRWLock
-// - On macOS, pthread_mutex_trylock() is fast by default starting with macOS
-//   10.14. Chromium targets an earlier version, so it cannot be known at
-//   compile-time. So we use something different.
-//   TODO(https://crbug.com/1459032): macOS 10.15 is now required; switch to
-//   better locking.
-// - Otherwise, on POSIX we assume that a fast userspace pthread_mutex_trylock()
-//   is available.
-//
-// Otherwise, a userspace spinlock implementation is used.
-#if PA_CONFIG(HAS_LINUX_KERNEL) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || \
-    BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-#define PA_CONFIG_HAS_FAST_MUTEX() 1
-#else
-#define PA_CONFIG_HAS_FAST_MUTEX() 0
-#endif
-
-// If defined, enables zeroing memory on Free() with roughly 1% probability.
-// This applies only to normal buckets, as direct-map allocations are always
-// decommitted.
-// TODO(bartekn): Re-enable once PartitionAlloc-Everywhere evaluation is done.
-#define PA_CONFIG_ZERO_RANDOMLY_ON_FREE() 0
-
-// Need TLS support.
-#define PA_CONFIG_THREAD_CACHE_SUPPORTED() \
-  (BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_FUCHSIA))
-
-// Too expensive for official builds, as it adds cache misses to all
-// allocations. On the other hand, we want wide metrics coverage to get
-// realistic profiles.
-#define PA_CONFIG_THREAD_CACHE_ALLOC_STATS() \
-  (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && !defined(OFFICIAL_BUILD))
-
-// Optional statistics collection. Lightweight, contrary to the ones above,
-// hence enabled by default.
-#define PA_CONFIG_THREAD_CACHE_ENABLE_STATISTICS() 1
-
-// Enable free list shadow entry to strengthen hardening as much as possible.
-// The shadow entry is an inversion (bitwise-NOT) of the encoded `next` pointer.
-//
-// Disabled when ref-count is placed in the previous slot, as it will overlap
-// with the shadow for the smallest slots.
-//
-// Disabled on Big Endian CPUs, because encoding is also a bitwise-NOT there,
-// making the shadow entry equal to the original, valid pointer to the next
-// slot. In case Use-after-Free happens, we'd rather not hand out a valid,
-// ready-to-use pointer.
-#define PA_CONFIG_HAS_FREELIST_SHADOW_ENTRY()    \
-  (!BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && \
-   defined(ARCH_CPU_LITTLE_ENDIAN))
-
-#define PA_CONFIG_HAS_MEMORY_TAGGING()              \
-  (defined(ARCH_CPU_ARM64) && defined(__clang__) && \
-   (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID)))
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-static_assert(sizeof(void*) == 8);
-#endif
-
-// If memory tagging is enabled with BRP previous slot, the MTE tag and BRP ref
-// count will cause a race (crbug.com/1445816). To prevent this, the
-// ref_count_size is increased to the MTE granule size and the ref count is not
-// tagged.
-#if PA_CONFIG(HAS_MEMORY_TAGGING) &&            \
-    BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
-    BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-#define PA_CONFIG_INCREASE_REF_COUNT_SIZE_FOR_MTE() 1
-#else
-#define PA_CONFIG_INCREASE_REF_COUNT_SIZE_FOR_MTE() 0
-#endif
-
-// Specifies whether allocation extras need to be added.
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-#define PA_CONFIG_EXTRAS_REQUIRED() 1
-#else
-#define PA_CONFIG_EXTRAS_REQUIRED() 0
-#endif
-
-// Count and total wall clock time spent in memory related system calls. This
-// doesn't cover all system calls, in particular the ones related to locking.
-//
-// Not enabled by default, as it has a runtime cost, and causes issues with some
-// builds (e.g. Windows).
-// However the total count is collected on all platforms.
-#define PA_CONFIG_COUNT_SYSCALL_TIME() 0
-
-// On Windows, |thread_local| variables cannot be marked "dllexport", see
-// compiler error C2492 at
-// https://docs.microsoft.com/en-us/cpp/error-messages/compiler-errors-1/compiler-error-c2492?view=msvc-160.
-// Don't use it there.
-//
-// On macOS and iOS:
-// - With PartitionAlloc-Everywhere, thread_local allocates, reentering the
-//   allocator.
-// - Component builds triggered a clang bug: crbug.com/1243375
-//
-// Regardless, the "normal" TLS access is fast on x86_64 (see partition_tls.h),
-// so don't bother with thread_local anywhere.
-#define PA_CONFIG_THREAD_LOCAL_TLS() \
-  (!(BUILDFLAG(IS_WIN) && defined(COMPONENT_BUILD)) && !BUILDFLAG(IS_APPLE))
-
-// When PartitionAlloc is malloc(), detect malloc() becoming re-entrant by
-// calling malloc() again.
-//
-// Limitations:
-// - BUILDFLAG(PA_DCHECK_IS_ON) due to runtime cost
-// - thread_local TLS to simplify the implementation
-// - Not on Android due to bot failures
-#if BUILDFLAG(PA_DCHECK_IS_ON) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
-    PA_CONFIG(THREAD_LOCAL_TLS) && !BUILDFLAG(IS_ANDROID)
-#define PA_CONFIG_HAS_ALLOCATION_GUARD() 1
-#else
-#define PA_CONFIG_HAS_ALLOCATION_GUARD() 0
-#endif
-
-// On Android, we have to go through emutls, since this is always a shared
-// library, so don't bother.
-#if PA_CONFIG(THREAD_LOCAL_TLS) && !BUILDFLAG(IS_ANDROID)
-#define PA_CONFIG_THREAD_CACHE_FAST_TLS() 1
-#else
-#define PA_CONFIG_THREAD_CACHE_FAST_TLS() 0
-#endif
-
-// Lazy commit should only be enabled on Windows, because commit charge is
-// only meaningful and limited on Windows. It affects performance on other
-// platforms and is simply not needed there due to OS supporting overcommit.
-#if BUILDFLAG(IS_WIN)
-constexpr bool kUseLazyCommit = true;
-#else
-constexpr bool kUseLazyCommit = false;
-#endif
-
-// On these platforms, lock all the partitions before fork(), and unlock after.
-// This may be required on more platforms in the future.
-#define PA_CONFIG_HAS_ATFORK_HANDLER() \
-  (BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS))
-
-// PartitionAlloc uses PartitionRootEnumerator to acquire all
-// PartitionRoots at BeforeFork and to release at AfterFork.
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_CONFIG(HAS_ATFORK_HANDLER)
-#define PA_CONFIG_USE_PARTITION_ROOT_ENUMERATOR() 1
-#else
-#define PA_CONFIG_USE_PARTITION_ROOT_ENUMERATOR() 0
-#endif
-
-// Due to potential conflict with the free list pointer in the "previous slot"
-// mode in the smallest bucket, we can't check both the cookie and the dangling
-// raw_ptr at the same time.
-#define PA_CONFIG_REF_COUNT_CHECK_COOKIE()         \
-  (!(BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) &&  \
-     BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)) && \
-   (BUILDFLAG(PA_DCHECK_IS_ON) ||                  \
-    BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)))
-
-// Use available space in the reference count to store the initially requested
-// size from the application. This is used for debugging.
-#if !PA_CONFIG(REF_COUNT_CHECK_COOKIE) && \
-    !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-// Set to 1 when needed.
-#define PA_CONFIG_REF_COUNT_STORE_REQUESTED_SIZE() 0
-#else
-// You probably want it at 0, outside of local testing, or else
-// PartitionRefCount will grow past 8B.
-#define PA_CONFIG_REF_COUNT_STORE_REQUESTED_SIZE() 0
-#endif
-
-#if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE) && \
-    PA_CONFIG(REF_COUNT_CHECK_COOKIE)
-#error "Cannot use a cookie *and* store the allocation size"
-#endif
-
-// Prefer smaller slot spans.
-//
-// Smaller slot spans may improve dirty memory fragmentation, but may also
-// increase address space usage.
-//
-// This is intended to roll out more broadly, but only enabled on Linux for now
-// to get performance bot and real-world data pre-A/B experiment.
-//
-// Also enabled on ARM64 macOS, as the 16kiB pages on this platform lead to
-// larger slot spans.
-#define PA_CONFIG_PREFER_SMALLER_SLOT_SPANS() \
-  (BUILDFLAG(IS_LINUX) || (BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64)))
-
-// Enable shadow metadata.
-//
-// With this flag, shadow pools will be mapped, on which writable shadow
-// metadatas are placed, and the real metadatas are set to read-only instead.
-// This feature is only enabled with 64-bit environment because pools work
-// differently with 32-bits pointers (see glossary).
-#if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \
-    BUILDFLAG(HAS_64_BIT_POINTERS)
-#define PA_CONFIG_ENABLE_SHADOW_METADATA() 1
-#else
-#define PA_CONFIG_ENABLE_SHADOW_METADATA() 0
-#endif
-
-// According to crbug.com/1349955#c24, macOS 11 has a bug where they asset that
-// malloc_size() of an allocation is equal to the requested size. This is
-// generally not true. The assert passed only because it happened to be true for
-// the sizes they requested. BRP changes that, hence can't be deployed without a
-// workaround.
-//
-// The bug has been fixed in macOS 12. Here we can only check the platform, and
-// the version is checked dynamically later.
-#define PA_CONFIG_ENABLE_MAC11_MALLOC_SIZE_HACK() \
-  (BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && BUILDFLAG(IS_MAC))
-
-#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-
-#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
-#error "Dynamically selected pool size is currently not supported"
-#endif
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-// TODO(1376980): Address MTE once it's enabled.
-#error "Compressed pointers don't support tag in the upper bits"
-#endif
-
-#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-
-// PA_CONFIG(IS_NONCLANG_MSVC): mimics the compound condition used by
-// Chromium's `//base/compiler_specific.h` to detect true (non-Clang)
-// MSVC.
-#if defined(COMPILER_MSVC) && !defined(__clang__)
-#define PA_CONFIG_IS_NONCLANG_MSVC() 1
-#else
-#define PA_CONFIG_IS_NONCLANG_MSVC() 0
-#endif
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONFIG_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_constants.h b/base/allocator/partition_allocator/partition_alloc_constants.h
deleted file mode 100644
index 24e2dcb..0000000
--- a/base/allocator/partition_allocator/partition_alloc_constants.h
+++ /dev/null
@@ -1,496 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
-
-#include <algorithm>
-#include <climits>
-#include <cstddef>
-#include <limits>
-
-#include "base/allocator/partition_allocator/address_pool_manager_types.h"
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
-#include <mach/vm_page_size.h>
-#endif
-
-namespace partition_alloc {
-
-// Bit flag constants used as `flag` argument of PartitionRoot::AllocWithFlags,
-// AlignedAllocWithFlags, etc.
-struct AllocFlags {
-  static constexpr unsigned int kReturnNull = 1 << 0;
-  static constexpr unsigned int kZeroFill = 1 << 1;
-  // Don't allow allocation override hooks. Override hooks are expected to
-  // check for the presence of this flag and return false if it is active.
-  static constexpr unsigned int kNoOverrideHooks = 1 << 2;
-  // Never let a memory tool like ASan (if active) perform the allocation.
-  static constexpr unsigned int kNoMemoryToolOverride = 1 << 3;
-  // Don't allow any hooks (override or observers).
-  static constexpr unsigned int kNoHooks = 1 << 4;  // Internal.
-  // If the allocation requires a "slow path" (such as allocating/committing a
-  // new slot span), return nullptr instead. Note this makes all large
-  // allocations return nullptr, such as direct-mapped ones, and even for
-  // smaller ones, a nullptr value is common.
-  static constexpr unsigned int kFastPathOrReturnNull = 1 << 5;  // Internal.
-  // An allocation override hook should tag the allocated memory for MTE.
-  static constexpr unsigned int kMemoryShouldBeTaggedForMte =
-      1 << 6;  // Internal.
-
-  static constexpr unsigned int kLastFlag = kMemoryShouldBeTaggedForMte;
-};
-
-// Bit flag constants used as `flag` argument of PartitionRoot::Free<flags>.
-struct FreeFlags {
-  // See AllocFlags::kNoMemoryToolOverride.
-  static constexpr unsigned int kNoMemoryToolOverride = 1 << 0;
-
-  static constexpr unsigned int kLastFlag = kNoMemoryToolOverride;
-};
-
-namespace internal {
-
-// Size of a cache line. Not all CPUs in the world have a 64 bytes cache line
-// size, but as of 2021, most do. This is in particular the case for almost all
-// x86_64 and almost all ARM CPUs supported by Chromium. As this is used for
-// static alignment, we cannot query the CPU at runtime to determine the actual
-// alignment, so use 64 bytes everywhere. Since this is only used to avoid false
-// sharing, getting this wrong only results in lower performance, not incorrect
-// code.
-constexpr size_t kPartitionCachelineSize = 64;
-
-// Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size.
-// It is typical for a `PartitionPage` to be based on multiple system pages.
-// Most references to "page" refer to `PartitionPage`s.
-//
-// *Super pages* are the underlying system allocations we make. Super pages
-// contain multiple partition pages and include space for a small amount of
-// metadata per partition page.
-//
-// Inside super pages, we store *slot spans*. A slot span is a continguous range
-// of one or more `PartitionPage`s that stores allocations of the same size.
-// Slot span sizes are adjusted depending on the allocation size, to make sure
-// the packing does not lead to unused (wasted) space at the end of the last
-// system page of the span. For our current maximum slot span size of 64 KiB and
-// other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
-// up against the end of a system page.
-
-#if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONGARCH64)
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PartitionPageShift() {
-  return 16;  // 64 KiB
-}
-#elif defined(ARCH_CPU_PPC64)
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PartitionPageShift() {
-  return 18;  // 256 KiB
-}
-#elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
-    (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PartitionPageShift() {
-  return PageAllocationGranularityShift() + 2;
-}
-#else
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PartitionPageShift() {
-  return 14;  // 16 KiB
-}
-#endif
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PartitionPageSize() {
-  return 1 << PartitionPageShift();
-}
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PartitionPageOffsetMask() {
-  return PartitionPageSize() - 1;
-}
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-PartitionPageBaseMask() {
-  return ~PartitionPageOffsetMask();
-}
-
-// Number of system pages per regular slot span. Above this limit, we call it
-// a single-slot span, as the span literally hosts only one slot, and has
-// somewhat different implementation. At run-time, single-slot spans can be
-// differentiated with a call to CanStoreRawSize().
-// TODO: Should this be 1 on platforms with page size larger than 4kB, e.g.
-// ARM macOS or defined(_MIPS_ARCH_LOONGSON)?
-constexpr size_t kMaxPartitionPagesPerRegularSlotSpan = 4;
-
-// To avoid fragmentation via never-used freelist entries, we hand out partition
-// freelist sections gradually, in units of the dominant system page size. What
-// we're actually doing is avoiding filling the full `PartitionPage` (16 KiB)
-// with freelist pointers right away. Writing freelist pointers will fault and
-// dirty a private page, which is very wasteful if we never actually store
-// objects there.
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-NumSystemPagesPerPartitionPage() {
-  return PartitionPageSize() >> SystemPageShift();
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-MaxSystemPagesPerRegularSlotSpan() {
-  return NumSystemPagesPerPartitionPage() *
-         kMaxPartitionPagesPerRegularSlotSpan;
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-MaxRegularSlotSpanSize() {
-  return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
-}
-
-// The maximum size that is used in an alternate bucket distribution. After this
-// threshold, we only have 1 slot per slot-span, so external fragmentation
-// doesn't matter. So, using the alternate bucket distribution after this
-// threshold has no benefit, and only increases internal fragmentation.
-//
-// We would like this to be |MaxRegularSlotSpanSize()| on all platforms, but
-// this is not constexpr on all platforms, so on other platforms we hardcode it,
-// even though this may be too low, e.g. on systems with a page size >4KiB.
-constexpr size_t kHighThresholdForAlternateDistribution =
-#if PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
-    MaxRegularSlotSpanSize();
-#else
-    1 << 16;
-#endif
-
-// We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
-// These chunks are called *super pages*. We do this so that we can store
-// metadata in the first few pages of each 2 MiB-aligned section. This makes
-// freeing memory very fast. 2 MiB size & alignment were chosen, because this
-// virtual address block represents a full but single page table allocation on
-// ARM, ia32 and x64, which may be slightly more performance&memory efficient.
-// (Note, these super pages are backed by 4 KiB system pages and have nothing to
-// do with OS concept of "huge pages"/"large pages", even though the size
-// coincides.)
-//
-// The layout of the super page is as follows. The sizes below are the same for
-// 32- and 64-bit platforms.
-//
-//     +-----------------------+
-//     | Guard page (4 KiB)    |
-//     | Metadata page (4 KiB) |
-//     | Guard pages (8 KiB)   |
-//     | Free Slot Bitmap      |
-//     | *Scan State Bitmap    |
-//     | Slot span             |
-//     | Slot span             |
-//     | ...                   |
-//     | Slot span             |
-//     | Guard pages (16 KiB)  |
-//     +-----------------------+
-//
-// Free Slot Bitmap is only present when USE_FREESLOT_BITMAP is true. State
-// Bitmap is inserted for partitions that may have quarantine enabled.
-//
-// If refcount_at_end_allocation is enabled, RefcountBitmap(4KiB) is inserted
-// after the Metadata page for BackupRefPtr. The guard pages after the bitmap
-// will be 4KiB.
-//
-//...
-//     | Metadata page (4 KiB) |
-//     | RefcountBitmap (4 KiB)|
-//     | Guard pages (4 KiB)   |
-//...
-//
-// Each slot span is a contiguous range of one or more `PartitionPage`s. Note
-// that slot spans of different sizes may co-exist with one super page. Even
-// slot spans of the same size may support different slot sizes. However, all
-// slots within a span have to be of the same size.
-//
-// The metadata page has the following format. Note that the `PartitionPage`
-// that is not at the head of a slot span is "unused" (by most part, it only
-// stores the offset from the head page). In other words, the metadata for the
-// slot span is stored only in the first `PartitionPage` of the slot span.
-// Metadata accesses to other `PartitionPage`s are redirected to the first
-// `PartitionPage`.
-//
-//     +---------------------------------------------+
-//     | SuperPageExtentEntry (32 B)                 |
-//     | PartitionPage of slot span 1 (32 B, used)   |
-//     | PartitionPage of slot span 1 (32 B, unused) |
-//     | PartitionPage of slot span 1 (32 B, unused) |
-//     | PartitionPage of slot span 2 (32 B, used)   |
-//     | PartitionPage of slot span 3 (32 B, used)   |
-//     | ...                                         |
-//     | PartitionPage of slot span N (32 B, used)   |
-//     | PartitionPage of slot span N (32 B, unused) |
-//     | PartitionPage of slot span N (32 B, unused) |
-//     +---------------------------------------------+
-//
-// A direct-mapped page has an identical layout at the beginning to fake it
-// looking like a super page:
-//
-//     +---------------------------------+
-//     | Guard page (4 KiB)              |
-//     | Metadata page (4 KiB)           |
-//     | Guard pages (8 KiB)             |
-//     | Direct mapped object            |
-//     | Guard page (4 KiB, 32-bit only) |
-//     +---------------------------------+
-//
-// A direct-mapped page's metadata page has the following layout (on 64 bit
-// architectures. On 32 bit ones, the layout is identical, some sizes are
-// different due to smaller pointers.):
-//
-//     +----------------------------------+
-//     | SuperPageExtentEntry (32 B)      |
-//     | PartitionPage (32 B)             |
-//     | PartitionBucket (40 B)           |
-//     | PartitionDirectMapExtent (32 B)  |
-//     +----------------------------------+
-//
-// See |PartitionDirectMapMetadata| for details.
-
-constexpr size_t kGiB = 1024 * 1024 * 1024ull;
-constexpr size_t kSuperPageShift = 21;  // 2 MiB
-constexpr size_t kSuperPageSize = 1 << kSuperPageShift;
-constexpr size_t kSuperPageAlignment = kSuperPageSize;
-constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
-constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
-
-// PartitionAlloc's address space is split into pools. See `glossary.md`.
-
-enum pool_handle : unsigned {
-  kNullPoolHandle = 0u,
-
-  kRegularPoolHandle,
-  kBRPPoolHandle,
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  kConfigurablePoolHandle,
-#endif
-
-// New pool_handles will be added here.
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  // The thread isolated pool must come last since we write-protect its entry in
-  // the metadata tables, e.g. AddressPoolManager::aligned_pools_
-  kThreadIsolatedPoolHandle,
-#endif
-  kMaxPoolHandle
-};
-
-// kNullPoolHandle doesn't have metadata, hence - 1
-constexpr size_t kNumPools = kMaxPoolHandle - 1;
-
-// Maximum pool size. With exception of Configurable Pool, it is also
-// the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which
-// allows to choose a different size at initialization time for certain
-// configurations.
-//
-// Special-case Android and iOS, which incur test failures with larger
-// pools. Regardless, allocating >8GiB with malloc() on these platforms is
-// unrealistic as of 2022.
-//
-// When pointer compression is enabled, we cannot use large pools (at most
-// 8GB for each of the glued pools).
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) || \
-    BUILDFLAG(ENABLE_POINTER_COMPRESSION)
-constexpr size_t kPoolMaxSize = 8 * kGiB;
-#else
-constexpr size_t kPoolMaxSize = 16 * kGiB;
-#endif
-#else  // BUILDFLAG(HAS_64_BIT_POINTERS)
-constexpr size_t kPoolMaxSize = 4 * kGiB;
-#endif
-constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize;
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-static_assert(kThreadIsolatedPoolHandle == kNumPools,
-              "The thread isolated pool must come last since we write-protect "
-              "its metadata.");
-#endif
-
-// Slots larger than this size will not receive MTE protection. Pages intended
-// for allocations larger than this constant should not be backed with PROT_MTE
-// (which saves shadow tag memory). We also save CPU cycles by skipping tagging
-// of large areas which are less likely to benefit from MTE protection.
-constexpr size_t kMaxMemoryTaggingSize = 1024;
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-// Returns whether the tag of |object| overflowed, meaning the containing slot
-// needs to be moved to quarantine.
-PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
-  // The tag with which the slot is put to quarantine.
-  constexpr uintptr_t kOverflowTag = 0x0f00000000000000uLL;
-  static_assert((kOverflowTag & kPtrTagMask) != 0,
-                "Overflow tag must be in tag bits");
-  return (reinterpret_cast<uintptr_t>(object) & kPtrTagMask) == kOverflowTag;
-}
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-NumPartitionPagesPerSuperPage() {
-  return kSuperPageSize >> PartitionPageShift();
-}
-
-PA_ALWAYS_INLINE constexpr size_t MaxSuperPagesInPool() {
-  return kMaxSuperPagesInPool;
-}
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-// In 64-bit mode, the direct map allocation granularity is super page size,
-// because this is the reservation granularity of the pools.
-PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularity() {
-  return kSuperPageSize;
-}
-
-PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularityShift() {
-  return kSuperPageShift;
-}
-#else   // BUILDFLAG(HAS_64_BIT_POINTERS)
-// In 32-bit mode, address space is space is a scarce resource. Use the system
-// allocation granularity, which is the lowest possible address space allocation
-// unit. However, don't go below partition page size, so that pool bitmaps
-// don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-DirectMapAllocationGranularity() {
-  return std::max(PageAllocationGranularity(), PartitionPageSize());
-}
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-DirectMapAllocationGranularityShift() {
-  return std::max(PageAllocationGranularityShift(), PartitionPageShift());
-}
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-DirectMapAllocationGranularityOffsetMask() {
-  return DirectMapAllocationGranularity() - 1;
-}
-
-// The "order" of an allocation is closely related to the power-of-1 size of the
-// allocation. More precisely, the order is the bit index of the
-// most-significant-bit in the allocation size, where the bit numbers starts at
-// index 1 for the least-significant-bit.
-//
-// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
-// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
-
-// PartitionAlloc should return memory properly aligned for any type, to behave
-// properly as a generic allocator. This is not strictly required as long as
-// types are explicitly allocated with PartitionAlloc, but is to use it as a
-// malloc() implementation, and generally to match malloc()'s behavior.
-//
-// In practice, this means 8 bytes alignment on 32 bit architectures, and 16
-// bytes on 64 bit ones.
-//
-// Keep in sync with //tools/memory/partition_allocator/objects_per_size_py.
-constexpr size_t kMinBucketedOrder =
-    kAlignment == 16 ? 5 : 4;  // 2^(order - 1), that is 16 or 8.
-// The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
-constexpr size_t kMaxBucketedOrder = 20;
-constexpr size_t kNumBucketedOrders =
-    (kMaxBucketedOrder - kMinBucketedOrder) + 1;
-// 8 buckets per order (for the higher orders).
-// Note: this is not what is used by default, but the maximum amount of buckets
-// per order. By default, only 4 are used.
-constexpr size_t kNumBucketsPerOrderBits = 3;
-constexpr size_t kNumBucketsPerOrder = 1 << kNumBucketsPerOrderBits;
-constexpr size_t kNumBuckets = kNumBucketedOrders * kNumBucketsPerOrder;
-constexpr size_t kSmallestBucket = 1 << (kMinBucketedOrder - 1);
-constexpr size_t kMaxBucketSpacing =
-    1 << ((kMaxBucketedOrder - 1) - kNumBucketsPerOrderBits);
-constexpr size_t kMaxBucketed = (1 << (kMaxBucketedOrder - 1)) +
-                                ((kNumBucketsPerOrder - 1) * kMaxBucketSpacing);
-// Limit when downsizing a direct mapping using `realloc`:
-constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
-// Intentionally set to less than 2GiB to make sure that a 2GiB allocation
-// fails. This is a security choice in Chrome, to help making size_t vs int bugs
-// harder to exploit.
-
-// The definition of MaxDirectMapped does only depend on constants that are
-// unconditionally constexpr. Therefore it is not necessary to use
-// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR here.
-PA_ALWAYS_INLINE constexpr size_t MaxDirectMapped() {
-  // Subtract kSuperPageSize to accommodate for granularity inside
-  // PartitionRoot::GetDirectMapReservationSize.
-  return (1UL << 31) - kSuperPageSize;
-}
-
-// Max alignment supported by AlignedAllocWithFlags().
-// kSuperPageSize alignment can't be easily supported, because each super page
-// starts with guard pages & metadata.
-constexpr size_t kMaxSupportedAlignment = kSuperPageSize / 2;
-
-constexpr size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
-
-// When a SlotSpan becomes empty, the allocator tries to avoid re-using it
-// immediately, to help with fragmentation. At this point, it becomes dirty
-// committed memory, which we want to minimize. This could be decommitted
-// immediately, but that would imply doing a lot of system calls. In particular,
-// for single-slot SlotSpans, a malloc() / free() loop would cause a *lot* of
-// system calls.
-//
-// As an intermediate step, empty SlotSpans are placed into a per-partition
-// global ring buffer, giving the newly-empty SlotSpan a chance to be re-used
-// before getting decommitted. A new entry (i.e. a newly empty SlotSpan) taking
-// the place used by a previous one will lead the previous SlotSpan to be
-// decommitted immediately, provided that it is still empty.
-//
-// Setting this value higher means giving more time for reuse to happen, at the
-// cost of possibly increasing peak committed memory usage (and increasing the
-// size of PartitionRoot a bit, since the ring buffer is there). Note that the
-// ring buffer doesn't necessarily contain an empty SlotSpan, as SlotSpans are
-// *not* removed from it when re-used. So the ring buffer really is a buffer of
-// *possibly* empty SlotSpans.
-//
-// In all cases, PartitionRoot::PurgeMemory() with the
-// PurgeFlags::kDecommitEmptySlotSpans flag will eagerly decommit all entries
-// in the ring buffer, so with periodic purge enabled, this typically happens
-// every few seconds.
-constexpr size_t kEmptyCacheIndexBits = 7;
-// kMaxFreeableSpans is the buffer size, but is never used as an index value,
-// hence <= is appropriate.
-constexpr size_t kMaxFreeableSpans = 1 << kEmptyCacheIndexBits;
-constexpr size_t kDefaultEmptySlotSpanRingSize = 16;
-
-// If the total size in bytes of allocated but not committed pages exceeds this
-// value (probably it is a "out of virtual address space" crash), a special
-// crash stack trace is generated at
-// `PartitionOutOfMemoryWithLotsOfUncommitedPages`. This is to distinguish "out
-// of virtual address space" from "out of physical memory" in crash reports.
-constexpr size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024;  // 1 GiB
-
-// These byte values match tcmalloc.
-constexpr unsigned char kUninitializedByte = 0xAB;
-constexpr unsigned char kFreedByte = 0xCD;
-
-constexpr unsigned char kQuarantinedByte = 0xEF;
-
-// 1 is smaller than anything we can use, as it is not properly aligned. Not
-// using a large size, since PartitionBucket::slot_size is a uint32_t, and
-// static_cast<uint32_t>(-1) is too close to a "real" size.
-constexpr size_t kInvalidBucketSize = 1;
-
-#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-// Requested size that require the hack.
-constexpr size_t kMac11MallocSizeHackRequestedSize = 32;
-#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-}  // namespace internal
-
-// These constants are used outside PartitionAlloc itself, so we provide
-// non-internal aliases here.
-using ::partition_alloc::internal::kInvalidBucketSize;
-using ::partition_alloc::internal::kMaxSuperPagesInPool;
-using ::partition_alloc::internal::kMaxSupportedAlignment;
-using ::partition_alloc::internal::kNumBuckets;
-using ::partition_alloc::internal::kSuperPageSize;
-using ::partition_alloc::internal::MaxDirectMapped;
-using ::partition_alloc::internal::PartitionPageSize;
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_for_testing.h b/base/allocator/partition_allocator/partition_alloc_for_testing.h
deleted file mode 100644
index 07f13b8..0000000
--- a/base/allocator/partition_allocator/partition_alloc_for_testing.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_
-
-#include "base/allocator/partition_allocator/partition_alloc.h"
-
-namespace partition_alloc {
-namespace internal {
-
-constexpr bool AllowLeaks = true;
-constexpr bool DisallowLeaks = false;
-
-// A subclass of PartitionAllocator for testing. It will free all resources,
-// i.e. allocated memory, memory inside freelist, and so on, when destructing
-// it or when manually invoking reset().
-// If need to check if there are any memory allocated but not freed yet,
-// use allow_leaks=false. We will see CHECK failure inside reset() if any
-// leak is detected. Otherwise (e.g. intentional leaks), use allow_leaks=true.
-template <bool allow_leaks>
-struct PartitionAllocatorForTesting : public PartitionAllocator {
-  PartitionAllocatorForTesting() : PartitionAllocator() {}
-
-  explicit PartitionAllocatorForTesting(PartitionOptions opts)
-      : PartitionAllocator(opts) {}
-
-  ~PartitionAllocatorForTesting() { reset(); }
-
-  PA_ALWAYS_INLINE void reset() {
-    PartitionAllocator::root()->ResetForTesting(allow_leaks);
-  }
-};
-
-}  // namespace internal
-
-using PartitionAllocatorForTesting =
-    internal::PartitionAllocatorForTesting<internal::DisallowLeaks>;
-
-using PartitionAllocatorAllowLeaksForTesting =
-    internal::PartitionAllocatorForTesting<internal::AllowLeaks>;
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_forward.h b/base/allocator/partition_allocator/partition_alloc_forward.h
deleted file mode 100644
index aa808f2..0000000
--- a/base/allocator/partition_allocator/partition_alloc_forward.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
-
-#include <algorithm>
-#include <cstddef>
-#include <cstdint>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-
-namespace partition_alloc {
-
-namespace internal {
-
-// Alignment has two constraints:
-// - Alignment requirement for scalar types: alignof(std::max_align_t)
-// - Alignment requirement for operator new().
-//
-// The two are separate on Windows 64 bits, where the first one is 8 bytes, and
-// the second one 16. We could technically return something different for
-// malloc() and operator new(), but this would complicate things, and most of
-// our allocations are presumably coming from operator new() anyway.
-constexpr size_t kAlignment =
-    std::max(alignof(max_align_t),
-             static_cast<size_t>(__STDCPP_DEFAULT_NEW_ALIGNMENT__));
-static_assert(kAlignment <= 16,
-              "PartitionAlloc doesn't support a fundamental alignment larger "
-              "than 16 bytes.");
-
-struct SlotSpanMetadata;
-class PA_LOCKABLE Lock;
-
-// This type trait verifies a type can be used as a pointer offset.
-//
-// We support pointer offsets in signed (ptrdiff_t) or unsigned (size_t) values.
-// Smaller types are also allowed.
-template <typename Z>
-static constexpr bool is_offset_type =
-    std::is_integral_v<Z> && sizeof(Z) <= sizeof(ptrdiff_t);
-
-}  // namespace internal
-
-class PartitionStatsDumper;
-
-struct PartitionRoot;
-
-namespace internal {
-// Declare PartitionRootLock() for thread analysis. Its implementation
-// is defined in partition_root.h.
-Lock& PartitionRootLock(PartitionRoot*);
-}  // namespace internal
-
-}  // namespace partition_alloc
-
-// From https://clang.llvm.org/docs/AttributeReference.html#malloc:
-//
-// The malloc attribute indicates that the function acts like a system memory
-// allocation function, returning a pointer to allocated storage disjoint from
-// the storage for any other object accessible to the caller.
-//
-// Note that it doesn't apply to realloc()-type functions, as they can return
-// the same pointer as the one passed as a parameter, as noted in e.g. stdlib.h
-// on Linux systems.
-#if PA_HAS_ATTRIBUTE(malloc)
-#define PA_MALLOC_FN __attribute__((malloc))
-#endif
-
-// Allows the compiler to assume that the return value is aligned on a
-// kAlignment boundary. This is useful for e.g. using aligned vector
-// instructions in the constructor for zeroing.
-#if PA_HAS_ATTRIBUTE(assume_aligned)
-#define PA_MALLOC_ALIGNED \
-  __attribute__((assume_aligned(::partition_alloc::internal::kAlignment)))
-#endif
-
-#if !defined(PA_MALLOC_FN)
-#define PA_MALLOC_FN
-#endif
-
-#if !defined(PA_MALLOC_ALIGNED)
-#define PA_MALLOC_ALIGNED
-#endif
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_hooks.cc b/base/allocator/partition_allocator/partition_alloc_hooks.cc
deleted file mode 100644
index c6dba14..0000000
--- a/base/allocator/partition_allocator/partition_alloc_hooks.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
-
-#include <ostream>
-
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-
-namespace partition_alloc {
-
-namespace {
-
-internal::Lock g_hook_lock;
-
-internal::Lock& GetHooksLock() {
-  return g_hook_lock;
-}
-
-}  // namespace
-
-std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false);
-std::atomic<PartitionAllocHooks::AllocationObserverHook*>
-    PartitionAllocHooks::allocation_observer_hook_(nullptr);
-std::atomic<PartitionAllocHooks::FreeObserverHook*>
-    PartitionAllocHooks::free_observer_hook_(nullptr);
-std::atomic<PartitionAllocHooks::AllocationOverrideHook*>
-    PartitionAllocHooks::allocation_override_hook_(nullptr);
-std::atomic<PartitionAllocHooks::FreeOverrideHook*>
-    PartitionAllocHooks::free_override_hook_(nullptr);
-std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
-    PartitionAllocHooks::realloc_override_hook_(nullptr);
-std::atomic<PartitionAllocHooks::QuarantineOverrideHook*>
-    PartitionAllocHooks::quarantine_override_hook_(nullptr);
-
-void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
-                                           FreeObserverHook* free_hook) {
-  internal::ScopedGuard guard(GetHooksLock());
-
-  // Chained hooks are not supported. Registering a non-null hook when a
-  // non-null hook is already registered indicates somebody is trying to
-  // overwrite a hook.
-  PA_CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
-           (!alloc_hook && !free_hook))
-      << "Overwriting already set observer hooks";
-  allocation_observer_hook_ = alloc_hook;
-  free_observer_hook_ = free_hook;
-
-  hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
-}
-
-void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
-                                           FreeOverrideHook* free_hook,
-                                           ReallocOverrideHook realloc_hook) {
-  internal::ScopedGuard guard(GetHooksLock());
-
-  PA_CHECK((!allocation_override_hook_ && !free_override_hook_ &&
-            !realloc_override_hook_) ||
-           (!alloc_hook && !free_hook && !realloc_hook))
-      << "Overwriting already set override hooks";
-  allocation_override_hook_ = alloc_hook;
-  free_override_hook_ = free_hook;
-  realloc_override_hook_ = realloc_hook;
-
-  hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
-}
-
-void PartitionAllocHooks::AllocationObserverHookIfEnabled(
-    const partition_alloc::AllocationNotificationData& notification_data) {
-  if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed)) {
-    hook(notification_data);
-  }
-}
-
-bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
-    void** out,
-    unsigned int flags,
-    size_t size,
-    const char* type_name) {
-  if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed)) {
-    return hook(out, flags, size, type_name);
-  }
-  return false;
-}
-
-void PartitionAllocHooks::FreeObserverHookIfEnabled(
-    const FreeNotificationData& notification_data) {
-  if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed)) {
-    hook(notification_data);
-  }
-}
-
-bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
-  if (auto* hook = free_override_hook_.load(std::memory_order_relaxed)) {
-    return hook(address);
-  }
-  return false;
-}
-
-void PartitionAllocHooks::ReallocObserverHookIfEnabled(
-    const FreeNotificationData& free_notification_data,
-    const AllocationNotificationData& allocation_notification_data) {
-  // Report a reallocation as a free followed by an allocation.
-  AllocationObserverHook* allocation_hook =
-      allocation_observer_hook_.load(std::memory_order_relaxed);
-  FreeObserverHook* free_hook =
-      free_observer_hook_.load(std::memory_order_relaxed);
-  if (allocation_hook && free_hook) {
-    free_hook(free_notification_data);
-    allocation_hook(allocation_notification_data);
-  }
-}
-
-bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
-                                                       void* address) {
-  if (ReallocOverrideHook* hook =
-          realloc_override_hook_.load(std::memory_order_relaxed)) {
-    return hook(out, address);
-  }
-  return false;
-}
-
-// Do not unset the hook if there are remaining quarantined slots
-// not to break checks on unquarantining.
-void PartitionAllocHooks::SetQuarantineOverrideHook(
-    QuarantineOverrideHook* hook) {
-  quarantine_override_hook_.store(hook, std::memory_order_release);
-}
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/partition_alloc_hooks.h b/base/allocator/partition_allocator/partition_alloc_hooks.h
deleted file mode 100644
index ace1f4b..0000000
--- a/base/allocator/partition_allocator/partition_alloc_hooks.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_
-
-#include <atomic>
-#include <cstddef>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace partition_alloc {
-
-class AllocationNotificationData;
-class FreeNotificationData;
-
-// PartitionAlloc supports setting hooks to observe allocations/frees as they
-// occur as well as 'override' hooks that allow overriding those operations.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
- public:
-  // Log allocation and free events.
-  typedef void AllocationObserverHook(
-      const AllocationNotificationData& notification_data);
-  typedef void FreeObserverHook(const FreeNotificationData& notification_data);
-
-  // If it returns true, the allocation has been overridden with the pointer in
-  // *out.
-  typedef bool AllocationOverrideHook(void** out,
-                                      unsigned int flags,
-                                      size_t size,
-                                      const char* type_name);
-  // If it returns true, then the allocation was overridden and has been freed.
-  typedef bool FreeOverrideHook(void* address);
-  // If it returns true, the underlying allocation is overridden and *out holds
-  // the size of the underlying allocation.
-  typedef bool ReallocOverrideHook(size_t* out, void* address);
-
-  // Special hook type, independent of the rest. Triggered when `free()` detects
-  // outstanding references to the allocation.
-  // IMPORTANT: Make sure the hook always overwrites `[address, address + size)`
-  // with a bit pattern that cannot be interpreted as a valid memory address.
-  typedef void QuarantineOverrideHook(void* address, size_t size);
-
-  // To unhook, call Set*Hooks with nullptrs.
-  static void SetObserverHooks(AllocationObserverHook* alloc_hook,
-                               FreeObserverHook* free_hook);
-  static void SetOverrideHooks(AllocationOverrideHook* alloc_hook,
-                               FreeOverrideHook* free_hook,
-                               ReallocOverrideHook realloc_hook);
-
-  // Helper method to check whether hooks are enabled. This is an optimization
-  // so that if a function needs to call observer and override hooks in two
-  // different places this value can be cached and only loaded once.
-  static bool AreHooksEnabled() {
-    return hooks_enabled_.load(std::memory_order_relaxed);
-  }
-
-  static void AllocationObserverHookIfEnabled(
-      const partition_alloc::AllocationNotificationData& notification_data);
-  static bool AllocationOverrideHookIfEnabled(void** out,
-                                              unsigned int flags,
-                                              size_t size,
-                                              const char* type_name);
-
-  static void FreeObserverHookIfEnabled(
-      const FreeNotificationData& notification_data);
-  static bool FreeOverrideHookIfEnabled(void* address);
-
-  static void ReallocObserverHookIfEnabled(
-      const FreeNotificationData& free_notification_data,
-      const AllocationNotificationData& allocation_notification_data);
-  static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
-
-  PA_ALWAYS_INLINE static QuarantineOverrideHook* GetQuarantineOverrideHook() {
-    return quarantine_override_hook_.load(std::memory_order_acquire);
-  }
-
-  static void SetQuarantineOverrideHook(QuarantineOverrideHook* hook);
-
- private:
-  // Single bool that is used to indicate whether observer or allocation hooks
-  // are set to reduce the numbers of loads required to check whether hooking is
-  // enabled.
-  static std::atomic<bool> hooks_enabled_;
-
-  // Lock used to synchronize Set*Hooks calls.
-  static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
-  static std::atomic<FreeObserverHook*> free_observer_hook_;
-
-  static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
-  static std::atomic<FreeOverrideHook*> free_override_hook_;
-  static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
-
-  static std::atomic<QuarantineOverrideHook*> quarantine_override_hook_;
-};
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_perftest.cc b/base/allocator/partition_allocator/partition_alloc_perftest.cc
deleted file mode 100644
index 8815708..0000000
--- a/base/allocator/partition_allocator/partition_alloc_perftest.cc
+++ /dev/null
@@ -1,516 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <algorithm>
-#include <atomic>
-#include <limits>
-#include <memory>
-#include <vector>
-
-#include "base/allocator/partition_allocator/extended_api.h"
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/strings/stringprintf.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_for_testing.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "base/allocator/partition_allocator/thread_cache.h"
-#include "base/debug/debugging_buildflags.h"
-#include "base/timer/lap_timer.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/perf/perf_result_reporter.h"
-
-#if BUILDFLAG(IS_ANDROID) || defined(ARCH_CPU_32_BITS) || BUILDFLAG(IS_FUCHSIA)
-// Some tests allocate many GB of memory, which can cause issues on Android and
-// address-space exhaustion for any 32-bit process.
-#define MEMORY_CONSTRAINED
-#endif
-
-#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
-#include "base/allocator/dispatcher/dispatcher.h"
-#include "base/debug/allocation_trace.h"
-#endif
-
-namespace partition_alloc::internal {
-
-namespace {
-
-// Change kTimeLimit to something higher if you need more time to capture a
-// trace.
-constexpr ::base::TimeDelta kTimeLimit = ::base::Seconds(2);
-constexpr int kWarmupRuns = 10000;
-constexpr int kTimeCheckInterval = 100000;
-constexpr size_t kAllocSize = 40;
-
-// Size constants are mostly arbitrary, but try to simulate something like CSS
-// parsing which consists of lots of relatively small objects.
-constexpr int kMultiBucketMinimumSize = 24;
-constexpr int kMultiBucketIncrement = 13;
-// Final size is 24 + (13 * 22) = 310 bytes.
-constexpr int kMultiBucketRounds = 22;
-
-constexpr char kMetricPrefixMemoryAllocation[] = "MemoryAllocation.";
-constexpr char kMetricThroughput[] = "throughput";
-constexpr char kMetricTimePerAllocation[] = "time_per_allocation";
-
-perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
-  perf_test::PerfResultReporter reporter(kMetricPrefixMemoryAllocation,
-                                         story_name);
-  reporter.RegisterImportantMetric(kMetricThroughput, "runs/s");
-  reporter.RegisterImportantMetric(kMetricTimePerAllocation, "ns");
-  return reporter;
-}
-
-enum class AllocatorType {
-  kSystem,
-  kPartitionAlloc,
-  kPartitionAllocWithThreadCache,
-#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
-  kPartitionAllocWithAllocationStackTraceRecorder,
-#endif
-};
-
-class Allocator {
- public:
-  Allocator() = default;
-  virtual ~Allocator() = default;
-  virtual void* Alloc(size_t size) = 0;
-  virtual void Free(void* data) = 0;
-};
-
-class SystemAllocator : public Allocator {
- public:
-  SystemAllocator() = default;
-  ~SystemAllocator() override = default;
-  void* Alloc(size_t size) override { return malloc(size); }
-  void Free(void* data) override { free(data); }
-};
-
-class PartitionAllocator : public Allocator {
- public:
-  PartitionAllocator() = default;
-  ~PartitionAllocator() override { alloc_.DestructForTesting(); }
-
-  void* Alloc(size_t size) override {
-    return alloc_.AllocWithFlagsNoHooks(0, size, PartitionPageSize());
-  }
-  void Free(void* data) override {
-    // Even though it's easy to invoke the fast path with alloc_.FreeNoHooks(),
-    // we chose to use the slower path, because it's more common with PA-E.
-    PartitionRoot::FreeNoHooksInUnknownRoot(data);
-  }
-
- private:
-  PartitionRoot alloc_{PartitionOptions{}};
-};
-
-class PartitionAllocatorWithThreadCache : public Allocator {
- public:
-  explicit PartitionAllocatorWithThreadCache(bool use_alternate_bucket_dist)
-      : scope_(allocator_.root()) {
-    ThreadCacheRegistry::Instance().PurgeAll();
-    if (!use_alternate_bucket_dist) {
-      allocator_.root()->SwitchToDenserBucketDistribution();
-    } else {
-      allocator_.root()->ResetBucketDistributionForTesting();
-    }
-  }
-  ~PartitionAllocatorWithThreadCache() override = default;
-
-  void* Alloc(size_t size) override {
-    return allocator_.root()->AllocWithFlagsNoHooks(0, size,
-                                                    PartitionPageSize());
-  }
-  void Free(void* data) override {
-    // Even though it's easy to invoke the fast path with alloc_.Free(),
-    // we chose to use the slower path, because it's more common with PA-E.
-    PartitionRoot::FreeInUnknownRoot(data);
-  }
-
- private:
-  static constexpr partition_alloc::PartitionOptions kOpts = {
-#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-    .thread_cache = PartitionOptions::ThreadCache::kEnabled,
-#endif
-  };
-  PartitionAllocatorForTesting<internal::DisallowLeaks> allocator_{kOpts};
-  internal::ThreadCacheProcessScopeForTesting scope_;
-};
-
-#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
-class PartitionAllocatorWithAllocationStackTraceRecorder : public Allocator {
- public:
-  explicit PartitionAllocatorWithAllocationStackTraceRecorder(
-      bool register_hooks)
-      : register_hooks_(register_hooks) {
-    if (register_hooks_) {
-      dispatcher_.InitializeForTesting(&recorder_);
-    }
-  }
-
-  ~PartitionAllocatorWithAllocationStackTraceRecorder() override {
-    if (register_hooks_) {
-      dispatcher_.ResetForTesting();
-    }
-  }
-
-  void* Alloc(size_t size) override {
-    return alloc_.AllocWithFlags(0, size, nullptr);
-  }
-
-  void Free(void* data) override {
-    // Even though it's easy to invoke the fast path with alloc_.Free(),
-    // we chose to use the slower path, because it's more common with PA-E.
-    PartitionRoot::FreeInUnknownRoot(data);
-  }
-
- private:
-  bool const register_hooks_;
-  PartitionRoot alloc_{PartitionOptions{}};
-  ::base::allocator::dispatcher::Dispatcher& dispatcher_ =
-      ::base::allocator::dispatcher::Dispatcher::GetInstance();
-  ::base::debug::tracer::AllocationTraceRecorder recorder_;
-};
-#endif  // BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
-
-class TestLoopThread : public base::PlatformThreadForTesting::Delegate {
- public:
-  TestLoopThread(float (*test_fn)(Allocator*), Allocator* allocator)
-      : test_fn_(test_fn), allocator_(allocator) {
-    PA_CHECK(base::PlatformThreadForTesting::Create(0, this, &thread_handle_));
-  }
-
-  float Run() {
-    base::PlatformThreadForTesting::Join(thread_handle_);
-    return laps_per_second_;
-  }
-
-  void ThreadMain() override { laps_per_second_ = test_fn_(allocator_); }
-
-  float (*test_fn_)(Allocator*) = nullptr;
-  Allocator* allocator_ = nullptr;
-  base::PlatformThreadHandle thread_handle_;
-  std::atomic<float> laps_per_second_;
-};
-
-void DisplayResults(const std::string& story_name,
-                    float iterations_per_second) {
-  auto reporter = SetUpReporter(story_name);
-  reporter.AddResult(kMetricThroughput, iterations_per_second);
-  reporter.AddResult(kMetricTimePerAllocation,
-                     static_cast<size_t>(1e9 / iterations_per_second));
-}
-
-class MemoryAllocationPerfNode {
- public:
-  MemoryAllocationPerfNode* GetNext() const { return next_; }
-  void SetNext(MemoryAllocationPerfNode* p) { next_ = p; }
-  static void FreeAll(MemoryAllocationPerfNode* first, Allocator* alloc) {
-    MemoryAllocationPerfNode* cur = first;
-    while (cur != nullptr) {
-      MemoryAllocationPerfNode* next = cur->GetNext();
-      alloc->Free(cur);
-      cur = next;
-    }
-  }
-
- private:
-  MemoryAllocationPerfNode* next_ = nullptr;
-};
-
-#if !defined(MEMORY_CONSTRAINED)
-float SingleBucket(Allocator* allocator) {
-  auto* first =
-      reinterpret_cast<MemoryAllocationPerfNode*>(allocator->Alloc(kAllocSize));
-  size_t allocated_memory = kAllocSize;
-
-  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
-  MemoryAllocationPerfNode* cur = first;
-  do {
-    auto* next = reinterpret_cast<MemoryAllocationPerfNode*>(
-        allocator->Alloc(kAllocSize));
-    PA_CHECK(next != nullptr);
-    cur->SetNext(next);
-    cur = next;
-    timer.NextLap();
-    allocated_memory += kAllocSize;
-    // With multiple threads, can get OOM otherwise.
-    if (allocated_memory > 200e6) {
-      cur->SetNext(nullptr);
-      MemoryAllocationPerfNode::FreeAll(first->GetNext(), allocator);
-      cur = first;
-      allocated_memory = kAllocSize;
-    }
-  } while (!timer.HasTimeLimitExpired());
-
-  // next_ = nullptr only works if the class constructor is called (it's not
-  // called in this case because then we can allocate arbitrary-length
-  // payloads.)
-  cur->SetNext(nullptr);
-  MemoryAllocationPerfNode::FreeAll(first, allocator);
-
-  return timer.LapsPerSecond();
-}
-#endif  // defined(MEMORY_CONSTRAINED)
-
-float SingleBucketWithFree(Allocator* allocator) {
-  // Allocate an initial element to make sure the bucket stays set up.
-  void* elem = allocator->Alloc(kAllocSize);
-
-  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
-  do {
-    void* cur = allocator->Alloc(kAllocSize);
-    PA_CHECK(cur != nullptr);
-    allocator->Free(cur);
-    timer.NextLap();
-  } while (!timer.HasTimeLimitExpired());
-
-  allocator->Free(elem);
-  return timer.LapsPerSecond();
-}
-
-#if !defined(MEMORY_CONSTRAINED)
-float MultiBucket(Allocator* allocator) {
-  auto* first =
-      reinterpret_cast<MemoryAllocationPerfNode*>(allocator->Alloc(kAllocSize));
-  MemoryAllocationPerfNode* cur = first;
-  size_t allocated_memory = kAllocSize;
-
-  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
-  do {
-    for (int i = 0; i < kMultiBucketRounds; i++) {
-      size_t size = kMultiBucketMinimumSize + (i * kMultiBucketIncrement);
-      auto* next =
-          reinterpret_cast<MemoryAllocationPerfNode*>(allocator->Alloc(size));
-      PA_CHECK(next != nullptr);
-      cur->SetNext(next);
-      cur = next;
-      allocated_memory += size;
-    }
-
-    // Can OOM with multiple threads.
-    if (allocated_memory > 100e6) {
-      cur->SetNext(nullptr);
-      MemoryAllocationPerfNode::FreeAll(first->GetNext(), allocator);
-      cur = first;
-      allocated_memory = kAllocSize;
-    }
-
-    timer.NextLap();
-  } while (!timer.HasTimeLimitExpired());
-
-  cur->SetNext(nullptr);
-  MemoryAllocationPerfNode::FreeAll(first, allocator);
-
-  return timer.LapsPerSecond() * kMultiBucketRounds;
-}
-#endif  // defined(MEMORY_CONSTRAINED)
-
-float MultiBucketWithFree(Allocator* allocator) {
-  std::vector<void*> elems;
-  elems.reserve(kMultiBucketRounds);
-  // Do an initial round of allocation to make sure that the buckets stay in
-  // use (and aren't accidentally released back to the OS).
-  for (int i = 0; i < kMultiBucketRounds; i++) {
-    void* cur =
-        allocator->Alloc(kMultiBucketMinimumSize + (i * kMultiBucketIncrement));
-    PA_CHECK(cur != nullptr);
-    elems.push_back(cur);
-  }
-
-  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
-  do {
-    for (int i = 0; i < kMultiBucketRounds; i++) {
-      void* cur = allocator->Alloc(kMultiBucketMinimumSize +
-                                   (i * kMultiBucketIncrement));
-      PA_CHECK(cur != nullptr);
-      allocator->Free(cur);
-    }
-    timer.NextLap();
-  } while (!timer.HasTimeLimitExpired());
-
-  for (void* ptr : elems) {
-    allocator->Free(ptr);
-  }
-
-  return timer.LapsPerSecond() * kMultiBucketRounds;
-}
-
-float DirectMapped(Allocator* allocator) {
-  constexpr size_t kSize = 2 * 1000 * 1000;
-
-  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
-  do {
-    void* cur = allocator->Alloc(kSize);
-    PA_CHECK(cur != nullptr);
-    allocator->Free(cur);
-    timer.NextLap();
-  } while (!timer.HasTimeLimitExpired());
-
-  return timer.LapsPerSecond();
-}
-
-std::unique_ptr<Allocator> CreateAllocator(AllocatorType type,
-                                           bool use_alternate_bucket_dist) {
-  switch (type) {
-    case AllocatorType::kSystem:
-      return std::make_unique<SystemAllocator>();
-    case AllocatorType::kPartitionAlloc:
-      return std::make_unique<PartitionAllocator>();
-    case AllocatorType::kPartitionAllocWithThreadCache:
-      return std::make_unique<PartitionAllocatorWithThreadCache>(
-          use_alternate_bucket_dist);
-#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
-    case AllocatorType::kPartitionAllocWithAllocationStackTraceRecorder:
-      return std::make_unique<
-          PartitionAllocatorWithAllocationStackTraceRecorder>(true);
-#endif
-  }
-}
-
-void LogResults(int thread_count,
-                AllocatorType alloc_type,
-                uint64_t total_laps_per_second,
-                uint64_t min_laps_per_second) {
-  PA_LOG(INFO) << "RESULTSCSV: " << thread_count << ","
-               << static_cast<int>(alloc_type) << "," << total_laps_per_second
-               << "," << min_laps_per_second;
-}
-
-void RunTest(int thread_count,
-             bool use_alternate_bucket_dist,
-             AllocatorType alloc_type,
-             float (*test_fn)(Allocator*),
-             float (*noisy_neighbor_fn)(Allocator*),
-             const char* story_base_name) {
-  auto alloc = CreateAllocator(alloc_type, use_alternate_bucket_dist);
-
-  std::unique_ptr<TestLoopThread> noisy_neighbor_thread = nullptr;
-  if (noisy_neighbor_fn) {
-    noisy_neighbor_thread =
-        std::make_unique<TestLoopThread>(noisy_neighbor_fn, alloc.get());
-  }
-
-  std::vector<std::unique_ptr<TestLoopThread>> threads;
-  for (int i = 0; i < thread_count; ++i) {
-    threads.push_back(std::make_unique<TestLoopThread>(test_fn, alloc.get()));
-  }
-
-  uint64_t total_laps_per_second = 0;
-  uint64_t min_laps_per_second = std::numeric_limits<uint64_t>::max();
-  for (int i = 0; i < thread_count; ++i) {
-    uint64_t laps_per_second = threads[i]->Run();
-    min_laps_per_second = std::min(laps_per_second, min_laps_per_second);
-    total_laps_per_second += laps_per_second;
-  }
-
-  if (noisy_neighbor_thread) {
-    noisy_neighbor_thread->Run();
-  }
-
-  char const* alloc_type_str;
-  switch (alloc_type) {
-    case AllocatorType::kSystem:
-      alloc_type_str = "System";
-      break;
-    case AllocatorType::kPartitionAlloc:
-      alloc_type_str = "PartitionAlloc";
-      break;
-    case AllocatorType::kPartitionAllocWithThreadCache:
-      alloc_type_str = "PartitionAllocWithThreadCache";
-      break;
-#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
-    case AllocatorType::kPartitionAllocWithAllocationStackTraceRecorder:
-      alloc_type_str = "PartitionAllocWithAllocationStackTraceRecorder";
-      break;
-#endif
-  }
-
-  std::string name = base::TruncatingStringPrintf(
-      "%s%s_%s_%d", kMetricPrefixMemoryAllocation, story_base_name,
-      alloc_type_str, thread_count);
-
-  DisplayResults(name + "_total", total_laps_per_second);
-  DisplayResults(name + "_worst", min_laps_per_second);
-  LogResults(thread_count, alloc_type, total_laps_per_second,
-             min_laps_per_second);
-}
-
-class PartitionAllocMemoryAllocationPerfTest
-    : public testing::TestWithParam<std::tuple<int, bool, AllocatorType>> {};
-
-// Only one partition with a thread cache: cannot use the thread cache when
-// PartitionAlloc is malloc().
-INSTANTIATE_TEST_SUITE_P(
-    ,
-    PartitionAllocMemoryAllocationPerfTest,
-    ::testing::Combine(
-        ::testing::Values(1, 2, 3, 4),
-        ::testing::Values(false, true),
-        ::testing::Values(
-            AllocatorType::kSystem,
-            AllocatorType::kPartitionAlloc,
-            AllocatorType::kPartitionAllocWithThreadCache
-#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
-            ,
-            AllocatorType::kPartitionAllocWithAllocationStackTraceRecorder
-#endif
-            )));
-
-// This test (and the other one below) allocates a large amount of memory, which
-// can cause issues on Android.
-#if !defined(MEMORY_CONSTRAINED)
-TEST_P(PartitionAllocMemoryAllocationPerfTest, SingleBucket) {
-  auto params = GetParam();
-  RunTest(std::get<int>(params), std::get<bool>(params),
-          std::get<AllocatorType>(params), SingleBucket, nullptr,
-          "SingleBucket");
-}
-#endif  // defined(MEMORY_CONSTRAINED)
-
-TEST_P(PartitionAllocMemoryAllocationPerfTest, SingleBucketWithFree) {
-  auto params = GetParam();
-  RunTest(std::get<int>(params), std::get<bool>(params),
-          std::get<AllocatorType>(params), SingleBucketWithFree, nullptr,
-          "SingleBucketWithFree");
-}
-
-#if !defined(MEMORY_CONSTRAINED)
-TEST_P(PartitionAllocMemoryAllocationPerfTest, MultiBucket) {
-  auto params = GetParam();
-  RunTest(std::get<int>(params), std::get<bool>(params),
-          std::get<AllocatorType>(params), MultiBucket, nullptr, "MultiBucket");
-}
-#endif  // defined(MEMORY_CONSTRAINED)
-
-TEST_P(PartitionAllocMemoryAllocationPerfTest, MultiBucketWithFree) {
-  auto params = GetParam();
-  RunTest(std::get<int>(params), std::get<bool>(params),
-          std::get<AllocatorType>(params), MultiBucketWithFree, nullptr,
-          "MultiBucketWithFree");
-}
-
-TEST_P(PartitionAllocMemoryAllocationPerfTest, DirectMapped) {
-  auto params = GetParam();
-  RunTest(std::get<int>(params), std::get<bool>(params),
-          std::get<AllocatorType>(params), DirectMapped, nullptr,
-          "DirectMapped");
-}
-
-#if !defined(MEMORY_CONSTRAINED)
-TEST_P(PartitionAllocMemoryAllocationPerfTest,
-       DISABLED_MultiBucketWithNoisyNeighbor) {
-  auto params = GetParam();
-  RunTest(std::get<int>(params), std::get<bool>(params),
-          std::get<AllocatorType>(params), MultiBucket, DirectMapped,
-          "MultiBucketWithNoisyNeighbor");
-}
-#endif  // !defined(MEMORY_CONSTRAINED)
-
-}  // namespace
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/partition_alloc_unittest.cc b/base/allocator/partition_allocator/partition_alloc_unittest.cc
deleted file mode 100644
index 44706e7..0000000
--- a/base/allocator/partition_allocator/partition_alloc_unittest.cc
+++ /dev/null
@@ -1,5498 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_alloc_for_testing.h"
-
-#include <algorithm>
-#include <cstddef>
-#include <cstdint>
-#include <cstdlib>
-#include <cstring>
-#include <iostream>
-#include <limits>
-#include <memory>
-#include <random>
-#include <set>
-#include <tuple>
-#include <vector>
-
-#include "base/allocator/partition_allocator/address_space_randomization.h"
-#include "base/allocator/partition_allocator/chromecast_buildflags.h"
-#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
-#include "base/allocator/partition_allocator/freeslot_bitmap.h"
-#include "base/allocator/partition_allocator/memory_reclaimer.h"
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_bucket.h"
-#include "base/allocator/partition_allocator/partition_cookie.h"
-#include "base/allocator/partition_allocator/partition_freelist_entry.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_ref_count.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-#include "base/system/sys_info.h"
-#include "base/test/gtest_util.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if defined(__ARM_FEATURE_MEMORY_TAGGING)
-#include <arm_acle.h>
-#endif
-
-#if BUILDFLAG(IS_POSIX)
-#if BUILDFLAG(IS_LINUX)
-// We need PKEY_DISABLE_WRITE in this file; glibc defines it in sys/mman.h but
-// it's actually Linux-specific and other Linux libcs define it in linux/mman.h.
-// We have to include both to be sure we get the definition.
-#include <linux/mman.h>
-#endif  // BUILDFLAG(IS_LINUX)
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#endif  // BUILDFLAG(IS_POSIX)
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_MAC)
-#include <OpenCL/opencl.h>
-#endif
-
-#if BUILDFLAG(IS_MAC)
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h"
-#endif
-
-#if BUILDFLAG(ENABLE_PKEYS)
-#include <sys/syscall.h>
-#endif
-
-// In the MTE world, the upper bits of a pointer can be decorated with a tag,
-// thus allowing many versions of the same pointer to exist. These macros take
-// that into account when comparing.
-#define PA_EXPECT_PTR_EQ(ptr1, ptr2) \
-  { EXPECT_EQ(UntagPtr(ptr1), UntagPtr(ptr2)); }
-#define PA_EXPECT_PTR_NE(ptr1, ptr2) \
-  { EXPECT_NE(UntagPtr(ptr1), UntagPtr(ptr2)); }
-
-#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-
-namespace {
-
-bool IsLargeMemoryDevice() {
-  // Treat any device with 4GiB or more of physical memory as a "large memory
-  // device". We check for slightly less than GiB so that devices with a small
-  // amount of memory not accessible to the OS still count as "large".
-  //
-  // Set to 4GiB, since we have 2GiB Android devices where tests flakily fail
-  // (e.g. Nexus 5X, crbug.com/1191195).
-  return base::SysInfo::AmountOfPhysicalMemory() >= 4000ULL * 1024 * 1024;
-}
-
-bool SetAddressSpaceLimit() {
-#if !defined(ARCH_CPU_64_BITS) || !BUILDFLAG(IS_POSIX)
-  // 32 bits => address space is limited already.
-  return true;
-#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE)
-  // macOS will accept, but not enforce, |RLIMIT_AS| changes. See
-  // https://crbug.com/435269 and rdar://17576114.
-  //
-  // Note: This number must be not less than 6 GB, because with
-  // sanitizer_coverage_flags=edge, it reserves > 5 GB of address space. See
-  // https://crbug.com/674665.
-  const size_t kAddressSpaceLimit = static_cast<size_t>(6144) * 1024 * 1024;
-  struct rlimit limit;
-  if (getrlimit(RLIMIT_DATA, &limit) != 0) {
-    return false;
-  }
-  if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
-    limit.rlim_cur = kAddressSpaceLimit;
-    if (setrlimit(RLIMIT_DATA, &limit) != 0) {
-      return false;
-    }
-  }
-  return true;
-#else
-  return false;
-#endif
-}
-
-bool ClearAddressSpaceLimit() {
-#if !defined(ARCH_CPU_64_BITS) || !BUILDFLAG(IS_POSIX)
-  return true;
-#elif BUILDFLAG(IS_POSIX)
-  struct rlimit limit;
-  if (getrlimit(RLIMIT_DATA, &limit) != 0) {
-    return false;
-  }
-  limit.rlim_cur = limit.rlim_max;
-  if (setrlimit(RLIMIT_DATA, &limit) != 0) {
-    return false;
-  }
-  return true;
-#else
-  return false;
-#endif
-}
-
-const size_t kTestSizes[] = {
-    1,
-    17,
-    100,
-    partition_alloc::internal::SystemPageSize(),
-    partition_alloc::internal::SystemPageSize() + 1,
-    partition_alloc::PartitionRoot::GetDirectMapSlotSize(100),
-    1 << 20,
-    1 << 21,
-};
-constexpr size_t kTestSizesCount = std::size(kTestSizes);
-
-void AllocateRandomly(partition_alloc::PartitionRoot* root,
-                      size_t count,
-                      unsigned int flags) {
-  std::vector<void*> allocations(count, nullptr);
-  for (size_t i = 0; i < count; ++i) {
-    const size_t size =
-        kTestSizes[partition_alloc::internal::base::RandGenerator(
-            kTestSizesCount)];
-    allocations[i] = root->AllocWithFlags(flags, size, nullptr);
-    EXPECT_NE(nullptr, allocations[i]) << " size: " << size << " i: " << i;
-  }
-
-  for (size_t i = 0; i < count; ++i) {
-    if (allocations[i]) {
-      root->Free(allocations[i]);
-    }
-  }
-}
-
-void HandleOOM(size_t unused_size) {
-  PA_LOG(FATAL) << "Out of memory";
-}
-
-int g_dangling_raw_ptr_detected_count = 0;
-int g_dangling_raw_ptr_released_count = 0;
-
-class CountDanglingRawPtr {
- public:
-  CountDanglingRawPtr() {
-    g_dangling_raw_ptr_detected_count = 0;
-    g_dangling_raw_ptr_released_count = 0;
-    old_detected_fn_ = partition_alloc::GetDanglingRawPtrDetectedFn();
-    old_released_fn_ = partition_alloc::GetDanglingRawPtrReleasedFn();
-
-    partition_alloc::SetDanglingRawPtrDetectedFn(
-        CountDanglingRawPtr::DanglingRawPtrDetected);
-    partition_alloc::SetDanglingRawPtrReleasedFn(
-        CountDanglingRawPtr::DanglingRawPtrReleased);
-  }
-  ~CountDanglingRawPtr() {
-    partition_alloc::SetDanglingRawPtrDetectedFn(old_detected_fn_);
-    partition_alloc::SetDanglingRawPtrReleasedFn(old_released_fn_);
-  }
-
- private:
-  static void DanglingRawPtrDetected(uintptr_t) {
-    g_dangling_raw_ptr_detected_count++;
-  }
-  static void DanglingRawPtrReleased(uintptr_t) {
-    g_dangling_raw_ptr_released_count++;
-  }
-
-  partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
-  partition_alloc::DanglingRawPtrReleasedFn* old_released_fn_;
-};
-
-}  // namespace
-
-// Note: This test exercises interfaces inside the `partition_alloc`
-// namespace, but inspects objects inside `partition_alloc::internal`.
-// For ease of reading, the tests are placed into the latter namespace.
-namespace partition_alloc::internal {
-
-using BucketDistribution = PartitionRoot::BucketDistribution;
-using SlotSpan = SlotSpanMetadata;
-
-const size_t kTestAllocSize = 16;
-
-#if !BUILDFLAG(PA_DCHECK_IS_ON)
-const size_t kPointerOffset = kPartitionRefCountOffsetAdjustment;
-const size_t kExtraAllocSizeWithoutRefCount = 0ull;
-#else
-const size_t kPointerOffset = kPartitionRefCountOffsetAdjustment;
-const size_t kExtraAllocSizeWithoutRefCount = kCookieSize;
-#endif
-
-const char* type_name = nullptr;
-
-void SetDistributionForPartitionRoot(PartitionRoot* root,
-                                     BucketDistribution distribution) {
-  switch (distribution) {
-    case BucketDistribution::kNeutral:
-      root->ResetBucketDistributionForTesting();
-      break;
-    case BucketDistribution::kDenser:
-      root->SwitchToDenserBucketDistribution();
-      break;
-  }
-}
-
-struct PartitionAllocTestParam {
-  BucketDistribution bucket_distribution;
-  bool use_pkey_pool;
-  size_t ref_count_size;
-};
-
-const std::vector<PartitionAllocTestParam> GetPartitionAllocTestParams() {
-  std::vector<size_t> ref_count_sizes = {0, 8, 16};
-  // sizeof(PartitionRefCount) == 8 under some configurations, so we can't force
-  // the size down to 4.
-#if !PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE) && \
-    !PA_CONFIG(REF_COUNT_CHECK_COOKIE) &&         \
-    !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-  ref_count_sizes.push_back(4);
-#endif
-  // Using MTE or Mac13 workaroud increases extras size without increasing
-  // sizeof(PartitionRefCount), so we don't have to exclude it here, as long as
-  // ExtraAllocSize() accounts for it.
-
-  std::vector<PartitionAllocTestParam> params;
-  for (size_t ref_count_size : ref_count_sizes) {
-    params.emplace_back(PartitionAllocTestParam{BucketDistribution::kNeutral,
-                                                false, ref_count_size});
-    params.emplace_back(PartitionAllocTestParam{BucketDistribution::kDenser,
-                                                false, ref_count_size});
-#if BUILDFLAG(ENABLE_PKEYS)
-    if (CPUHasPkeySupport()) {
-      params.emplace_back(PartitionAllocTestParam{BucketDistribution::kNeutral,
-                                                  true, ref_count_size});
-      params.emplace_back(PartitionAllocTestParam{BucketDistribution::kDenser,
-                                                  true, ref_count_size});
-    }
-#endif
-  }
-  return params;
-}
-
-class PartitionAllocTest
-    : public testing::TestWithParam<PartitionAllocTestParam> {
- protected:
-  class ScopedPageAllocation {
-   public:
-    ScopedPageAllocation(PartitionAllocator& allocator,
-                         base::CheckedNumeric<size_t> npages)
-        : allocator_(allocator),
-          npages_(npages),
-          ptr_(static_cast<char*>(allocator_.root()->Alloc(
-              (npages * SystemPageSize() - ExtraAllocSize(allocator_))
-                  .ValueOrDie(),
-              type_name))) {}
-
-    ~ScopedPageAllocation() { allocator_.root()->Free(ptr_); }
-
-    void TouchAllPages() {
-      memset(ptr_, 'A',
-             ((npages_ * SystemPageSize()) - ExtraAllocSize(allocator_))
-                 .ValueOrDie());
-    }
-
-    void* PageAtIndex(size_t index) {
-      return ptr_ - kPointerOffset + (SystemPageSize() * index);
-    }
-
-   private:
-    PartitionAllocator& allocator_;
-    const base::CheckedNumeric<size_t> npages_;
-    char* ptr_;
-  };
-
-  PartitionAllocTest() = default;
-
-  ~PartitionAllocTest() override = default;
-
-  struct PartitionTestOptions {
-    bool use_memory_reclaimer = false;
-    bool uncap_empty_slot_span_memory = false;
-    bool set_bucket_distribution = false;
-  };
-
-  void InitializeTestRoot(PartitionRoot* root,
-                          PartitionOptions opts,
-                          PartitionTestOptions test_opts) {
-    root->Init(opts);
-    if (test_opts.use_memory_reclaimer) {
-      MemoryReclaimer::Instance()->RegisterPartition(root);
-    }
-    if (test_opts.uncap_empty_slot_span_memory) {
-      root->UncapEmptySlotSpanMemoryForTesting();
-    }
-    if (test_opts.set_bucket_distribution) {
-      SetDistributionForPartitionRoot(root, GetBucketDistribution());
-    }
-  }
-
-  std::unique_ptr<PartitionRoot> CreateCustomTestRoot(
-      PartitionOptions opts,
-      PartitionTestOptions test_opts) {
-    auto root = std::make_unique<PartitionRoot>();
-    InitializeTestRoot(root.get(), opts, test_opts);
-    return root;
-  }
-
-  void InitializeMainTestAllocators() {
-#if BUILDFLAG(ENABLE_PKEYS)
-    int pkey = PkeyAlloc(UseThreadIsolatedPool() ? 0 : PKEY_DISABLE_WRITE);
-    if (pkey != -1) {
-      pkey_ = pkey;
-    }
-    // We always want to have a pkey allocator initialized to make sure that the
-    // other pools still work. As part of the initializition, we tag some memory
-    // with the new pkey, effectively making it read-only. So there's some
-    // potential for breakage that this should catch.
-    InitializeTestRoot(
-        pkey_allocator.root(),
-        PartitionOptions{
-            .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-            .ref_count_size = GetParam().ref_count_size,
-            .thread_isolation = ThreadIsolationOption(pkey_),
-        },
-        PartitionTestOptions{.use_memory_reclaimer = true});
-
-    if (UseThreadIsolatedPool() && pkey_ != kInvalidPkey) {
-      InitializeTestRoot(
-          allocator.root(),
-          PartitionOptions{
-              .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-              .ref_count_size = GetParam().ref_count_size,
-              .thread_isolation = ThreadIsolationOption(pkey_),
-          },
-          PartitionTestOptions{.use_memory_reclaimer = true,
-                               .uncap_empty_slot_span_memory = true,
-                               .set_bucket_distribution = true});
-    } else
-#endif  // BUILDFLAG(ENABLE_PKEYS)
-    {
-      InitializeTestRoot(
-          allocator.root(),
-          PartitionOptions {
-#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
-    BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-            // AlignedAllocWithFlags() can't be called when BRP is in the
-            // "before allocation" mode, because this mode adds extras before
-            // the allocation. Extras after the allocation are ok.
-            .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-#endif
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-            .backup_ref_ptr = PartitionOptions::BackupRefPtr::kEnabled,
-#endif
-            .ref_count_size = GetParam().ref_count_size,
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-            .memory_tagging =
-            {.enabled =
-                 partition_alloc::internal::base::CPU::GetInstanceNoAllocation()
-                         .has_mte()
-                     ? PartitionOptions::MemoryTagging::kEnabled
-                     : PartitionOptions::MemoryTagging::kDisabled,
-            }
-#endif
-          },
-          PartitionTestOptions{.use_memory_reclaimer = true,
-                               .uncap_empty_slot_span_memory = true,
-                               .set_bucket_distribution = true});
-    }
-
-    InitializeTestRoot(
-        aligned_allocator.root(),
-        PartitionOptions{
-            .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-            .ref_count_size = GetParam().ref_count_size,
-        },
-        PartitionTestOptions{.use_memory_reclaimer = true,
-                             .uncap_empty_slot_span_memory = true,
-                             .set_bucket_distribution = true});
-  }
-
-  size_t RealAllocSize() const {
-    return partition_alloc::internal::base::bits::AlignUp(
-        kTestAllocSize + ExtraAllocSize(allocator), kAlignment);
-  }
-
-  void SetUp() override {
-    PartitionRoot::EnableSortActiveSlotSpans();
-    PartitionAllocGlobalInit(HandleOOM);
-    InitializeMainTestAllocators();
-
-    test_bucket_index_ = SizeToIndex(RealAllocSize());
-  }
-
-  size_t SizeToIndex(size_t size) {
-    const auto distribution_to_use = GetBucketDistribution();
-    return PartitionRoot::SizeToBucketIndex(size, distribution_to_use);
-  }
-
-  size_t SizeToBucketSize(size_t size) {
-    const auto index = SizeToIndex(size);
-    return allocator.root()->buckets[index].slot_size;
-  }
-
-  void TearDown() override {
-    allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
-                                  PurgeFlags::kDiscardUnusedSystemPages);
-    PartitionAllocGlobalUninitForTesting();
-#if BUILDFLAG(ENABLE_PKEYS)
-    if (pkey_ != kInvalidPkey) {
-      PkeyFree(pkey_);
-    }
-#endif
-  }
-
-  static size_t ExtraAllocSize(const PartitionAllocator& allocator) {
-    size_t ref_count_size = 0;
-    // Duplicate the logic from PartitionRoot::Init().
-    if (allocator.root()->brp_enabled()) {
-      ref_count_size = GetParam().ref_count_size;
-      if (!ref_count_size) {
-        ref_count_size = kPartitionRefCountSizeAdjustment;
-      }
-      ref_count_size = AlignUpRefCountSizeForMac(ref_count_size);
-#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
-      if (allocator.root()->IsMemoryTaggingEnabled()) {
-        ref_count_size = partition_alloc::internal::base::bits::AlignUp(
-            ref_count_size, kMemTagGranuleSize);
-      }
-#endif  // PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
-    }
-    return kExtraAllocSizeWithoutRefCount + ref_count_size;
-  }
-
-  size_t GetNumPagesPerSlotSpan(size_t size) {
-    size_t real_size = size + ExtraAllocSize(allocator);
-    size_t bucket_index = SizeToIndex(real_size);
-    PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
-    // TODO(tasak): make get_pages_per_slot_span() available at
-    // partition_alloc_unittest.cc. Is it allowable to make the code from
-    // partition_bucet.cc to partition_bucket.h?
-    return (bucket->num_system_pages_per_slot_span +
-            (NumSystemPagesPerPartitionPage() - 1)) /
-           NumSystemPagesPerPartitionPage();
-  }
-
-  SlotSpan* GetFullSlotSpan(size_t size) {
-    size_t real_size = size + ExtraAllocSize(allocator);
-    size_t bucket_index = SizeToIndex(real_size);
-    PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
-    size_t num_slots =
-        (bucket->num_system_pages_per_slot_span * SystemPageSize()) /
-        bucket->slot_size;
-    uintptr_t first = 0;
-    uintptr_t last = 0;
-    size_t i;
-    for (i = 0; i < num_slots; ++i) {
-      void* ptr = allocator.root()->Alloc(size, type_name);
-      EXPECT_TRUE(ptr);
-      if (!i) {
-        first = allocator.root()->ObjectToSlotStart(ptr);
-      } else if (i == num_slots - 1) {
-        last = allocator.root()->ObjectToSlotStart(ptr);
-      }
-    }
-    EXPECT_EQ(SlotSpan::FromSlotStart(first), SlotSpan::FromSlotStart(last));
-    if (bucket->num_system_pages_per_slot_span ==
-        NumSystemPagesPerPartitionPage()) {
-      EXPECT_EQ(first & PartitionPageBaseMask(),
-                last & PartitionPageBaseMask());
-    }
-    EXPECT_EQ(num_slots, bucket->active_slot_spans_head->num_allocated_slots);
-    EXPECT_EQ(nullptr, bucket->active_slot_spans_head->get_freelist_head());
-    EXPECT_TRUE(bucket->is_valid());
-    EXPECT_TRUE(bucket->active_slot_spans_head !=
-                SlotSpan::get_sentinel_slot_span());
-    EXPECT_TRUE(bucket->active_slot_spans_head->is_full());
-    return bucket->active_slot_spans_head;
-  }
-
-  void CycleFreeCache(size_t size) {
-    for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
-      void* ptr = allocator.root()->Alloc(size, type_name);
-      auto* slot_span =
-          SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-      auto* bucket = slot_span->bucket;
-      EXPECT_EQ(1u, bucket->active_slot_spans_head->num_allocated_slots);
-      allocator.root()->Free(ptr);
-      EXPECT_EQ(0u, bucket->active_slot_spans_head->num_allocated_slots);
-      EXPECT_TRUE(bucket->active_slot_spans_head->in_empty_cache() ||
-                  bucket->active_slot_spans_head ==
-                      SlotSpanMetadata::get_sentinel_slot_span());
-    }
-  }
-
-  enum ReturnNullTestMode {
-    kPartitionAllocWithFlags,
-    kPartitionReallocWithFlags,
-    kPartitionRootTryRealloc,
-  };
-
-  void DoReturnNullTest(size_t alloc_size, ReturnNullTestMode mode) {
-    // TODO(crbug.com/678782): Where necessary and possible, disable the
-    // platform's OOM-killing behavior. OOM-killing makes this test flaky on
-    // low-memory devices.
-    if (!IsLargeMemoryDevice()) {
-      PA_LOG(WARNING)
-          << "Skipping test on this device because of crbug.com/678782";
-      PA_LOG(FATAL) << "Passed DoReturnNullTest";
-    }
-
-    ASSERT_TRUE(SetAddressSpaceLimit());
-
-    // Work out the number of allocations for 6 GB of memory.
-    const int num_allocations = (6 * 1024 * 1024) / (alloc_size / 1024);
-
-    void** ptrs = static_cast<void**>(
-        allocator.root()->Alloc(num_allocations * sizeof(void*), type_name));
-    int i;
-
-    for (i = 0; i < num_allocations; ++i) {
-      switch (mode) {
-        case kPartitionAllocWithFlags: {
-          ptrs[i] = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull,
-                                                     alloc_size, type_name);
-          break;
-        }
-        case kPartitionReallocWithFlags: {
-          ptrs[i] = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull, 1,
-                                                     type_name);
-          ptrs[i] = allocator.root()->ReallocWithFlags(
-              AllocFlags::kReturnNull, ptrs[i], alloc_size, type_name);
-          break;
-        }
-        case kPartitionRootTryRealloc: {
-          ptrs[i] = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull, 1,
-                                                     type_name);
-          ptrs[i] =
-              allocator.root()->TryRealloc(ptrs[i], alloc_size, type_name);
-        }
-      }
-
-      if (!i) {
-        EXPECT_TRUE(ptrs[0]);
-      }
-      if (!ptrs[i]) {
-        ptrs[i] = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull,
-                                                   alloc_size, type_name);
-        EXPECT_FALSE(ptrs[i]);
-        break;
-      }
-    }
-
-    // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
-    // we're not actually testing anything here.
-    EXPECT_LT(i, num_allocations);
-
-    // Free, reallocate and free again each block we allocated. We do this to
-    // check that freeing memory also works correctly after a failed allocation.
-    for (--i; i >= 0; --i) {
-      allocator.root()->Free(ptrs[i]);
-      ptrs[i] = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull,
-                                                 alloc_size, type_name);
-      EXPECT_TRUE(ptrs[i]);
-      allocator.root()->Free(ptrs[i]);
-    }
-
-    allocator.root()->Free(ptrs);
-
-    EXPECT_TRUE(ClearAddressSpaceLimit());
-    PA_LOG(FATAL) << "Passed DoReturnNullTest";
-  }
-
-  void RunRefCountReallocSubtest(size_t orig_size, size_t new_size);
-
-  PA_NOINLINE PA_MALLOC_FN void* Alloc(size_t size) {
-    return allocator.root()->Alloc(size, "");
-  }
-
-  PA_NOINLINE void Free(void* ptr) { allocator.root()->Free(ptr); }
-
-  BucketDistribution GetBucketDistribution() const {
-    return GetParam().bucket_distribution;
-  }
-
-  bool UseThreadIsolatedPool() const { return GetParam().use_pkey_pool; }
-  bool UseBRPPool() const { return allocator.root()->brp_enabled(); }
-
-  partition_alloc::PartitionAllocatorForTesting allocator;
-  partition_alloc::PartitionAllocatorForTesting aligned_allocator;
-#if BUILDFLAG(ENABLE_PKEYS)
-  partition_alloc::PartitionAllocatorForTesting pkey_allocator;
-#endif
-  size_t test_bucket_index_;
-
-#if BUILDFLAG(ENABLE_PKEYS)
-  int pkey_ = kInvalidPkey;
-#endif
-};
-
-// Death tests misbehave on Android, http://crbug.com/643760.
-#if defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
-#define PA_HAS_DEATH_TESTS
-
-class PartitionAllocDeathTest : public PartitionAllocTest {};
-
-INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
-                         PartitionAllocDeathTest,
-                         testing::ValuesIn(GetPartitionAllocTestParams()));
-
-#endif
-
-namespace {
-
-void FreeFullSlotSpan(PartitionRoot* root, SlotSpan* slot_span) {
-  EXPECT_TRUE(slot_span->is_full());
-  size_t size = slot_span->bucket->slot_size;
-  size_t num_slots =
-      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
-      size;
-  EXPECT_EQ(num_slots, slot_span->num_allocated_slots);
-  uintptr_t address = SlotSpan::ToSlotSpanStart(slot_span);
-  size_t i;
-  for (i = 0; i < num_slots; ++i) {
-    root->Free(root->SlotStartToObject(address));
-    address += size;
-  }
-  EXPECT_TRUE(slot_span->is_empty());
-}
-
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-bool CheckPageInCore(void* ptr, bool in_core) {
-  unsigned char ret = 0;
-  EXPECT_EQ(0, mincore(ptr, SystemPageSize(), &ret));
-  return in_core == (ret & 1);
-}
-
-#define CHECK_PAGE_IN_CORE(ptr, in_core) \
-  EXPECT_TRUE(CheckPageInCore(ptr, in_core))
-#else
-#define CHECK_PAGE_IN_CORE(ptr, in_core) (void)(0)
-#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-
-class MockPartitionStatsDumper : public PartitionStatsDumper {
- public:
-  MockPartitionStatsDumper() = default;
-
-  void PartitionDumpTotals(const char* partition_name,
-                           const PartitionMemoryStats* stats) override {
-    EXPECT_GE(stats->total_mmapped_bytes, stats->total_resident_bytes);
-    EXPECT_EQ(total_resident_bytes, stats->total_resident_bytes);
-    EXPECT_EQ(total_active_bytes, stats->total_active_bytes);
-    EXPECT_EQ(total_decommittable_bytes, stats->total_decommittable_bytes);
-    EXPECT_EQ(total_discardable_bytes, stats->total_discardable_bytes);
-  }
-
-  void PartitionsDumpBucketStats(
-      [[maybe_unused]] const char* partition_name,
-      const PartitionBucketMemoryStats* stats) override {
-    EXPECT_TRUE(stats->is_valid);
-    EXPECT_EQ(0u, stats->bucket_slot_size & sizeof(void*));
-    bucket_stats.push_back(*stats);
-    total_resident_bytes += stats->resident_bytes;
-    total_active_bytes += stats->active_bytes;
-    total_decommittable_bytes += stats->decommittable_bytes;
-    total_discardable_bytes += stats->discardable_bytes;
-  }
-
-  bool IsMemoryAllocationRecorded() {
-    return total_resident_bytes != 0 && total_active_bytes != 0;
-  }
-
-  const PartitionBucketMemoryStats* GetBucketStats(size_t bucket_size) {
-    for (auto& stat : bucket_stats) {
-      if (stat.bucket_slot_size == bucket_size) {
-        return &stat;
-      }
-    }
-    return nullptr;
-  }
-
- private:
-  size_t total_resident_bytes = 0;
-  size_t total_active_bytes = 0;
-  size_t total_decommittable_bytes = 0;
-  size_t total_discardable_bytes = 0;
-
-  std::vector<PartitionBucketMemoryStats> bucket_stats;
-};
-
-}  // namespace
-
-INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
-                         PartitionAllocTest,
-                         testing::ValuesIn(GetPartitionAllocTestParams()));
-
-// Check that the most basic of allocate / free pairs work.
-TEST_P(PartitionAllocTest, Basic) {
-  PartitionRoot::Bucket* bucket =
-      &allocator.root()->buckets[test_bucket_index_];
-  auto* seed_slot_span = SlotSpan::get_sentinel_slot_span();
-
-  EXPECT_FALSE(bucket->empty_slot_spans_head);
-  EXPECT_FALSE(bucket->decommitted_slot_spans_head);
-  EXPECT_EQ(seed_slot_span, bucket->active_slot_spans_head);
-  EXPECT_EQ(nullptr, bucket->active_slot_spans_head->next_slot_span);
-
-  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr);
-  EXPECT_EQ(kPointerOffset, UntagPtr(ptr) & PartitionPageOffsetMask());
-  // Check that the offset appears to include a guard page.
-  EXPECT_EQ(PartitionPageSize() +
-                partition_alloc::internal::ReservedFreeSlotBitmapSize() +
-                kPointerOffset,
-            UntagPtr(ptr) & kSuperPageOffsetMask);
-
-  allocator.root()->Free(ptr);
-  // Expect that the last active slot span gets noticed as empty but doesn't get
-  // decommitted.
-  EXPECT_TRUE(bucket->empty_slot_spans_head);
-  EXPECT_FALSE(bucket->decommitted_slot_spans_head);
-}
-
-// Test multiple allocations, and freelist handling.
-TEST_P(PartitionAllocTest, MultiAlloc) {
-  void* ptr1 = allocator.root()->Alloc(kTestAllocSize, type_name);
-  void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr1);
-  EXPECT_TRUE(ptr2);
-  ptrdiff_t diff = UntagPtr(ptr2) - UntagPtr(ptr1);
-  EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
-
-  // Check that we re-use the just-freed slot.
-  allocator.root()->Free(ptr2);
-  ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr2);
-  diff = UntagPtr(ptr2) - UntagPtr(ptr1);
-  EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
-  allocator.root()->Free(ptr1);
-  ptr1 = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr1);
-  diff = UntagPtr(ptr2) - UntagPtr(ptr1);
-  EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
-
-  void* ptr3 = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr3);
-  diff = UntagPtr(ptr3) - UntagPtr(ptr1);
-  EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize() * 2), diff);
-
-  allocator.root()->Free(ptr1);
-  allocator.root()->Free(ptr2);
-  allocator.root()->Free(ptr3);
-}
-
-// Test a bucket with multiple slot spans.
-TEST_P(PartitionAllocTest, MultiSlotSpans) {
-  PartitionRoot::Bucket* bucket =
-      &allocator.root()->buckets[test_bucket_index_];
-
-  auto* slot_span = GetFullSlotSpan(kTestAllocSize);
-  FreeFullSlotSpan(allocator.root(), slot_span);
-  EXPECT_TRUE(bucket->empty_slot_spans_head);
-  EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
-  EXPECT_EQ(nullptr, slot_span->next_slot_span);
-  EXPECT_EQ(0u, slot_span->num_allocated_slots);
-
-  slot_span = GetFullSlotSpan(kTestAllocSize);
-  auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
-
-  EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
-  EXPECT_EQ(nullptr, slot_span2->next_slot_span);
-  EXPECT_EQ(SlotSpan::ToSlotSpanStart(slot_span) & kSuperPageBaseMask,
-            SlotSpan::ToSlotSpanStart(slot_span2) & kSuperPageBaseMask);
-
-  // Fully free the non-current slot span. This will leave us with no current
-  // active slot span because one is empty and the other is full.
-  FreeFullSlotSpan(allocator.root(), slot_span);
-  EXPECT_EQ(0u, slot_span->num_allocated_slots);
-  EXPECT_TRUE(bucket->empty_slot_spans_head);
-  EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
-            bucket->active_slot_spans_head);
-
-  // Allocate a new slot span, it should pull from the freelist.
-  slot_span = GetFullSlotSpan(kTestAllocSize);
-  EXPECT_FALSE(bucket->empty_slot_spans_head);
-  EXPECT_EQ(slot_span, bucket->active_slot_spans_head);
-
-  FreeFullSlotSpan(allocator.root(), slot_span);
-  FreeFullSlotSpan(allocator.root(), slot_span2);
-  EXPECT_EQ(0u, slot_span->num_allocated_slots);
-  EXPECT_EQ(0u, slot_span2->num_allocated_slots);
-  EXPECT_EQ(0u, slot_span2->num_unprovisioned_slots);
-  EXPECT_TRUE(slot_span2->in_empty_cache());
-}
-
-// Test some finer aspects of internal slot span transitions.
-TEST_P(PartitionAllocTest, SlotSpanTransitions) {
-  PartitionRoot::Bucket* bucket =
-      &allocator.root()->buckets[test_bucket_index_];
-
-  auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
-  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
-  EXPECT_EQ(nullptr, slot_span1->next_slot_span);
-  auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
-  EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
-  EXPECT_EQ(nullptr, slot_span2->next_slot_span);
-
-  // Bounce slot_span1 back into the non-full list then fill it up again.
-  void* ptr = allocator.root()->SlotStartToObject(
-      SlotSpan::ToSlotSpanStart(slot_span1));
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
-  std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
-  EXPECT_EQ(slot_span2, bucket->active_slot_spans_head->next_slot_span);
-
-  // Allocating another slot span at this point should cause us to scan over
-  // slot_span1 (which is both full and NOT our current slot span), and evict it
-  // from the freelist. Older code had a O(n^2) condition due to failure to do
-  // this.
-  auto* slot_span3 = GetFullSlotSpan(kTestAllocSize);
-  EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
-  EXPECT_EQ(nullptr, slot_span3->next_slot_span);
-
-  // Work out a pointer into slot_span2 and free it.
-  ptr = allocator.root()->SlotStartToObject(
-      SlotSpan::ToSlotSpanStart(slot_span2));
-  allocator.root()->Free(ptr);
-  // Trying to allocate at this time should cause us to cycle around to
-  // slot_span2 and find the recently freed slot.
-  void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
-  PA_EXPECT_PTR_EQ(ptr, ptr2);
-  EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
-  EXPECT_EQ(slot_span3, slot_span2->next_slot_span);
-
-  // Work out a pointer into slot_span1 and free it. This should pull the slot
-  // span back into the list of available slot spans.
-  ptr = allocator.root()->SlotStartToObject(
-      SlotSpan::ToSlotSpanStart(slot_span1));
-  allocator.root()->Free(ptr);
-  // This allocation should be satisfied by slot_span1.
-  ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
-  PA_EXPECT_PTR_EQ(ptr, ptr2);
-  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
-  EXPECT_EQ(slot_span2, slot_span1->next_slot_span);
-
-  FreeFullSlotSpan(allocator.root(), slot_span3);
-  FreeFullSlotSpan(allocator.root(), slot_span2);
-  FreeFullSlotSpan(allocator.root(), slot_span1);
-
-  // Allocating whilst in this state exposed a bug, so keep the test.
-  ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  allocator.root()->Free(ptr);
-}
-
-// Test that ExtraAllocSize() is exactly what PA takes away from the slot for
-// extras.
-TEST_P(PartitionAllocTest, ExtraAllocSize) {
-  // There is a bucket with a slot size exactly that (asserted below).
-  size_t slot_size = 64;
-  size_t bucket_index =
-      allocator.root()->SizeToBucketIndex(slot_size, GetBucketDistribution());
-  PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
-  ASSERT_EQ(bucket->slot_size, slot_size);
-
-  // The first allocation is expected to span exactly the capcity of the slot.
-  // The second one should overflow into a higher-size slot, and not fill its
-  // capacity.
-  size_t requested_size1 = slot_size - ExtraAllocSize(allocator);
-  size_t requested_size2 = requested_size1 + 1;
-  void* ptr1 = allocator.root()->Alloc(requested_size1, "");
-  void* ptr2 = allocator.root()->Alloc(requested_size2, "");
-  size_t capacity1 = allocator.root()->AllocationCapacityFromSlotStart(
-      allocator.root()->ObjectToSlotStart(ptr1));
-  size_t capacity2 = allocator.root()->AllocationCapacityFromSlotStart(
-      allocator.root()->ObjectToSlotStart(ptr2));
-  EXPECT_EQ(capacity1, requested_size1);
-  EXPECT_LT(capacity1, capacity2);
-  EXPECT_LT(requested_size2, capacity2);
-  allocator.root()->Free(ptr1);
-  allocator.root()->Free(ptr2);
-}
-
-TEST_P(PartitionAllocTest, PreferSlotSpansWithProvisionedEntries) {
-  size_t size = SystemPageSize() - ExtraAllocSize(allocator);
-  size_t real_size = size + ExtraAllocSize(allocator);
-  size_t bucket_index =
-      allocator.root()->SizeToBucketIndex(real_size, GetBucketDistribution());
-  PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
-  ASSERT_EQ(bucket->slot_size, real_size);
-  size_t slots_per_span = bucket->num_system_pages_per_slot_span;
-
-  // Make 10 full slot spans.
-  constexpr int kSpans = 10;
-  std::vector<std::vector<void*>> allocated_memory_spans(kSpans);
-  for (int span_index = 0; span_index < kSpans; span_index++) {
-    for (size_t i = 0; i < slots_per_span; i++) {
-      allocated_memory_spans[span_index].push_back(
-          allocator.root()->Alloc(size, ""));
-    }
-  }
-
-  // Reverse ordering, since a newly non-full span is placed at the head of the
-  // active list.
-  for (int span_index = kSpans - 1; span_index >= 0; span_index--) {
-    allocator.root()->Free(allocated_memory_spans[span_index].back());
-    allocated_memory_spans[span_index].pop_back();
-  }
-
-  // Since slot spans are large enough and we freed memory from the end, the
-  // slot spans become partially provisioned after PurgeMemory().
-  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
-                                PurgeFlags::kDiscardUnusedSystemPages);
-  std::vector<SlotSpanMetadata*> active_slot_spans;
-  for (auto* span = bucket->active_slot_spans_head; span;
-       span = span->next_slot_span) {
-    active_slot_spans.push_back(span);
-    ASSERT_EQ(span->num_unprovisioned_slots, 1u);
-    // But no freelist entries.
-    ASSERT_FALSE(span->get_freelist_head());
-  }
-
-  // Free one entry in the middle span, creating a freelist entry.
-  constexpr size_t kSpanIndex = 5;
-  allocator.root()->Free(allocated_memory_spans[kSpanIndex].back());
-  allocated_memory_spans[kSpanIndex].pop_back();
-
-  ASSERT_TRUE(active_slot_spans[kSpanIndex]->get_freelist_head());
-  ASSERT_FALSE(bucket->active_slot_spans_head->get_freelist_head());
-
-  // It must come from the middle slot span even though the first one has
-  // unprovisioned space.
-  void* new_ptr = allocator.root()->Alloc(size, "");
-
-  // Comes from the middle slot span, since it has a freelist entry.
-  auto* new_active_slot_span = active_slot_spans[kSpanIndex];
-  ASSERT_FALSE(new_active_slot_span->get_freelist_head());
-
-  // The middle slot span was moved to the front.
-  active_slot_spans.erase(active_slot_spans.begin() + kSpanIndex);
-  active_slot_spans.insert(active_slot_spans.begin(), new_active_slot_span);
-
-  // Check slot span ordering.
-  int index = 0;
-  for (auto* span = bucket->active_slot_spans_head; span;
-       span = span->next_slot_span) {
-    EXPECT_EQ(span, active_slot_spans[index]);
-    index++;
-  }
-  EXPECT_EQ(index, kSpans);
-
-  allocator.root()->Free(new_ptr);
-  for (int span_index = 0; span_index < kSpans; span_index++) {
-    for (void* ptr : allocated_memory_spans[span_index]) {
-      allocator.root()->Free(ptr);
-    }
-  }
-}
-
-// Test some corner cases relating to slot span transitions in the internal
-// free slot span list metadata bucket.
-TEST_P(PartitionAllocTest, FreeSlotSpanListSlotSpanTransitions) {
-  PartitionRoot::Bucket* bucket =
-      &allocator.root()->buckets[test_bucket_index_];
-
-  size_t num_to_fill_free_list_slot_span =
-      PartitionPageSize() / (sizeof(SlotSpan) + ExtraAllocSize(allocator));
-  // The +1 is because we need to account for the fact that the current slot
-  // span never gets thrown on the freelist.
-  ++num_to_fill_free_list_slot_span;
-  auto slot_spans =
-      std::make_unique<SlotSpan*[]>(num_to_fill_free_list_slot_span);
-
-  size_t i;
-  for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
-    slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
-  }
-  EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
-            bucket->active_slot_spans_head);
-  for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
-    FreeFullSlotSpan(allocator.root(), slot_spans[i]);
-  }
-  EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
-  EXPECT_TRUE(bucket->empty_slot_spans_head);
-
-  // Allocate / free in a different bucket size so we get control of a
-  // different free slot span list. We need two slot spans because one will be
-  // the last active slot span and not get freed.
-  auto* slot_span1 = GetFullSlotSpan(kTestAllocSize * 2);
-  auto* slot_span2 = GetFullSlotSpan(kTestAllocSize * 2);
-  FreeFullSlotSpan(allocator.root(), slot_span1);
-  FreeFullSlotSpan(allocator.root(), slot_span2);
-
-  for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
-    slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
-  }
-  EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
-            bucket->active_slot_spans_head);
-
-  for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
-    FreeFullSlotSpan(allocator.root(), slot_spans[i]);
-  }
-  EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
-  EXPECT_TRUE(bucket->empty_slot_spans_head);
-}
-
-// Test a large series of allocations that cross more than one underlying
-// super page.
-TEST_P(PartitionAllocTest, MultiPageAllocs) {
-  size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
-  // 1 super page has 2 guard partition pages and a tag bitmap.
-  size_t num_slot_spans_needed =
-      (NumPartitionPagesPerSuperPage() - 2 -
-       partition_alloc::internal::NumPartitionPagesPerFreeSlotBitmap()) /
-      num_pages_per_slot_span;
-
-  // We need one more slot span in order to cross super page boundary.
-  ++num_slot_spans_needed;
-
-  EXPECT_GT(num_slot_spans_needed, 1u);
-  auto slot_spans = std::make_unique<SlotSpan*[]>(num_slot_spans_needed);
-  uintptr_t first_super_page_base = 0;
-  size_t i;
-  for (i = 0; i < num_slot_spans_needed; ++i) {
-    slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
-    uintptr_t slot_span_start = SlotSpan::ToSlotSpanStart(slot_spans[i]);
-    if (!i) {
-      first_super_page_base = slot_span_start & kSuperPageBaseMask;
-    }
-    if (i == num_slot_spans_needed - 1) {
-      uintptr_t second_super_page_base = slot_span_start & kSuperPageBaseMask;
-      uintptr_t second_super_page_offset =
-          slot_span_start & kSuperPageOffsetMask;
-      EXPECT_FALSE(second_super_page_base == first_super_page_base);
-      // Check that we allocated a guard page and the reserved tag bitmap for
-      // the second page.
-      EXPECT_EQ(PartitionPageSize() +
-                    partition_alloc::internal::ReservedFreeSlotBitmapSize(),
-                second_super_page_offset);
-    }
-  }
-  for (i = 0; i < num_slot_spans_needed; ++i) {
-    FreeFullSlotSpan(allocator.root(), slot_spans[i]);
-  }
-}
-
-// Test the generic allocation functions that can handle arbitrary sizes and
-// reallocing etc.
-TEST_P(PartitionAllocTest, Alloc) {
-  void* ptr = allocator.root()->Alloc(1, type_name);
-  EXPECT_TRUE(ptr);
-  allocator.root()->Free(ptr);
-  ptr = allocator.root()->Alloc(kMaxBucketed + 1, type_name);
-  EXPECT_TRUE(ptr);
-  allocator.root()->Free(ptr);
-
-  // To make both alloc(x + 1) and alloc(x + kSmallestBucket) to allocate from
-  // the same bucket, partition_alloc::internal::base::bits::AlignUp(1 + x +
-  // ExtraAllocSize(allocator), kAlignment)
-  // == partition_alloc::internal::base::bits::AlignUp(kSmallestBucket + x +
-  // ExtraAllocSize(allocator), kAlignment), because slot_size is multiples of
-  // kAlignment. So (x + ExtraAllocSize(allocator)) must be multiples of
-  // kAlignment. x =
-  // partition_alloc::internal::base::bits::AlignUp(ExtraAllocSize(allocator),
-  // kAlignment) - ExtraAllocSize(allocator);
-  size_t base_size = partition_alloc::internal::base::bits::AlignUp(
-                         ExtraAllocSize(allocator), kAlignment) -
-                     ExtraAllocSize(allocator);
-  ptr = allocator.root()->Alloc(base_size + 1, type_name);
-  EXPECT_TRUE(ptr);
-  void* orig_ptr = ptr;
-  char* char_ptr = static_cast<char*>(ptr);
-  *char_ptr = 'A';
-
-  // Change the size of the realloc, remaining inside the same bucket.
-  void* new_ptr = allocator.root()->Realloc(ptr, base_size + 2, type_name);
-  PA_EXPECT_PTR_EQ(ptr, new_ptr);
-  new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
-  PA_EXPECT_PTR_EQ(ptr, new_ptr);
-  new_ptr =
-      allocator.root()->Realloc(ptr, base_size + kSmallestBucket, type_name);
-  PA_EXPECT_PTR_EQ(ptr, new_ptr);
-
-  // Change the size of the realloc, switching buckets.
-  new_ptr = allocator.root()->Realloc(ptr, base_size + kSmallestBucket + 1,
-                                      type_name);
-  PA_EXPECT_PTR_NE(new_ptr, ptr);
-  // Check that the realloc copied correctly.
-  char* new_char_ptr = static_cast<char*>(new_ptr);
-  EXPECT_EQ(*new_char_ptr, 'A');
-#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-  // Subtle: this checks for an old bug where we copied too much from the
-  // source of the realloc. The condition can be detected by a trashing of
-  // the uninitialized value in the space of the upsized allocation.
-  EXPECT_EQ(kUninitializedByte,
-            static_cast<unsigned char>(*(new_char_ptr + kSmallestBucket)));
-#endif
-  *new_char_ptr = 'B';
-  // The realloc moved. To check that the old allocation was freed, we can
-  // do an alloc of the old allocation size and check that the old allocation
-  // address is at the head of the freelist and reused.
-  void* reused_ptr = allocator.root()->Alloc(base_size + 1, type_name);
-  PA_EXPECT_PTR_EQ(reused_ptr, orig_ptr);
-  allocator.root()->Free(reused_ptr);
-
-  // Downsize the realloc.
-  ptr = new_ptr;
-  new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
-  PA_EXPECT_PTR_EQ(new_ptr, orig_ptr);
-  new_char_ptr = static_cast<char*>(new_ptr);
-  EXPECT_EQ(*new_char_ptr, 'B');
-  *new_char_ptr = 'C';
-
-  // Upsize the realloc to outside the partition.
-  ptr = new_ptr;
-  new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed + 1, type_name);
-  PA_EXPECT_PTR_NE(new_ptr, ptr);
-  new_char_ptr = static_cast<char*>(new_ptr);
-  EXPECT_EQ(*new_char_ptr, 'C');
-  *new_char_ptr = 'D';
-
-  // Upsize and downsize the realloc, remaining outside the partition.
-  ptr = new_ptr;
-  new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 10, type_name);
-  new_char_ptr = static_cast<char*>(new_ptr);
-  EXPECT_EQ(*new_char_ptr, 'D');
-  *new_char_ptr = 'E';
-  ptr = new_ptr;
-  new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 2, type_name);
-  new_char_ptr = static_cast<char*>(new_ptr);
-  EXPECT_EQ(*new_char_ptr, 'E');
-  *new_char_ptr = 'F';
-
-  // Downsize the realloc to inside the partition.
-  ptr = new_ptr;
-  new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
-  PA_EXPECT_PTR_NE(new_ptr, ptr);
-  PA_EXPECT_PTR_EQ(new_ptr, orig_ptr);
-  new_char_ptr = static_cast<char*>(new_ptr);
-  EXPECT_EQ(*new_char_ptr, 'F');
-
-  allocator.root()->Free(new_ptr);
-}
-
-// Test the generic allocation functions can handle some specific sizes of
-// interest.
-TEST_P(PartitionAllocTest, AllocSizes) {
-  {
-    void* ptr = allocator.root()->Alloc(0, type_name);
-    EXPECT_TRUE(ptr);
-    allocator.root()->Free(ptr);
-  }
-
-  {
-    // PartitionPageSize() is interesting because it results in just one
-    // allocation per page, which tripped up some corner cases.
-    const size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
-    void* ptr = allocator.root()->Alloc(size, type_name);
-    EXPECT_TRUE(ptr);
-    void* ptr2 = allocator.root()->Alloc(size, type_name);
-    EXPECT_TRUE(ptr2);
-    allocator.root()->Free(ptr);
-    // Should be freeable at this point.
-    auto* slot_span =
-        SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-    EXPECT_TRUE(slot_span->in_empty_cache());
-    allocator.root()->Free(ptr2);
-  }
-
-  {
-    // Single-slot slot span size.
-    const size_t size =
-        PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan + 1;
-
-    void* ptr = allocator.root()->Alloc(size, type_name);
-    EXPECT_TRUE(ptr);
-    memset(ptr, 'A', size);
-    void* ptr2 = allocator.root()->Alloc(size, type_name);
-    EXPECT_TRUE(ptr2);
-    void* ptr3 = allocator.root()->Alloc(size, type_name);
-    EXPECT_TRUE(ptr3);
-    void* ptr4 = allocator.root()->Alloc(size, type_name);
-    EXPECT_TRUE(ptr4);
-
-    auto* slot_span = SlotSpanMetadata::FromSlotStart(
-        allocator.root()->ObjectToSlotStart(ptr));
-    auto* slot_span2 =
-        SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr3));
-    EXPECT_NE(slot_span, slot_span2);
-
-    allocator.root()->Free(ptr);
-    allocator.root()->Free(ptr3);
-    allocator.root()->Free(ptr2);
-    // Should be freeable at this point.
-    EXPECT_TRUE(slot_span->in_empty_cache());
-    EXPECT_EQ(0u, slot_span->num_allocated_slots);
-    EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
-    void* new_ptr_1 = allocator.root()->Alloc(size, type_name);
-    PA_EXPECT_PTR_EQ(ptr2, new_ptr_1);
-    void* new_ptr_2 = allocator.root()->Alloc(size, type_name);
-    PA_EXPECT_PTR_EQ(ptr3, new_ptr_2);
-
-    allocator.root()->Free(new_ptr_1);
-    allocator.root()->Free(new_ptr_2);
-    allocator.root()->Free(ptr4);
-
-#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-    // |SlotSpanMetadata::Free| must poison the slot's contents with
-    // |kFreedByte|.
-    EXPECT_EQ(kFreedByte,
-              *(static_cast<unsigned char*>(new_ptr_1) + (size - 1)));
-#endif
-  }
-
-  // Can we allocate a massive (128MB) size?
-  // Add +1, to test for cookie writing alignment issues.
-  // Test this only if the device has enough memory or it might fail due
-  // to OOM.
-  if (IsLargeMemoryDevice()) {
-    void* ptr = allocator.root()->Alloc(128 * 1024 * 1024 + 1, type_name);
-    allocator.root()->Free(ptr);
-  }
-
-  {
-    // Check a more reasonable, but still direct mapped, size.
-    // Chop a system page and a byte off to test for rounding errors.
-    size_t size = 20 * 1024 * 1024;
-    ASSERT_GT(size, kMaxBucketed);
-    size -= SystemPageSize();
-    size -= 1;
-    void* ptr = allocator.root()->Alloc(size, type_name);
-    char* char_ptr = static_cast<char*>(ptr);
-    *(char_ptr + (size - 1)) = 'A';
-    allocator.root()->Free(ptr);
-
-    // Can we free null?
-    allocator.root()->Free(nullptr);
-
-    // Do we correctly get a null for a failed allocation?
-    EXPECT_EQ(nullptr,
-              allocator.root()->AllocWithFlags(
-                  AllocFlags::kReturnNull, 3u * 1024 * 1024 * 1024, type_name));
-  }
-}
-
-// Test that we can fetch the real allocated size after an allocation.
-TEST_P(PartitionAllocTest, AllocGetSizeAndStart) {
-  void* ptr;
-  size_t requested_size, actual_capacity, predicted_capacity;
-
-  // Allocate something small.
-  requested_size = 511 - ExtraAllocSize(allocator);
-  predicted_capacity =
-      allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
-  ptr = allocator.root()->Alloc(requested_size, type_name);
-  EXPECT_TRUE(ptr);
-  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
-  actual_capacity =
-      allocator.root()->AllocationCapacityFromSlotStart(slot_start);
-  EXPECT_EQ(predicted_capacity, actual_capacity);
-  EXPECT_LT(requested_size, actual_capacity);
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  if (UseBRPPool()) {
-    uintptr_t address = UntagPtr(ptr);
-    for (size_t offset = 0; offset < requested_size; ++offset) {
-      EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
-                slot_start);
-    }
-  }
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  allocator.root()->Free(ptr);
-
-  // Allocate a size that should be a perfect match for a bucket, because it
-  // is an exact power of 2.
-  requested_size = (256 * 1024) - ExtraAllocSize(allocator);
-  predicted_capacity =
-      allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
-  ptr = allocator.root()->Alloc(requested_size, type_name);
-  EXPECT_TRUE(ptr);
-  slot_start = allocator.root()->ObjectToSlotStart(ptr);
-  actual_capacity =
-      allocator.root()->AllocationCapacityFromSlotStart(slot_start);
-  EXPECT_EQ(predicted_capacity, actual_capacity);
-  EXPECT_EQ(requested_size, actual_capacity);
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  if (UseBRPPool()) {
-    uintptr_t address = UntagPtr(ptr);
-    for (size_t offset = 0; offset < requested_size; offset += 877) {
-      EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
-                slot_start);
-    }
-  }
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  allocator.root()->Free(ptr);
-
-  // Allocate a size that is a system page smaller than a bucket.
-  // AllocationCapacityFromSlotStart() should return a larger size than we asked
-  // for now.
-  size_t num = 64;
-  while (num * SystemPageSize() >= 1024 * 1024) {
-    num /= 2;
-  }
-  requested_size =
-      num * SystemPageSize() - SystemPageSize() - ExtraAllocSize(allocator);
-  predicted_capacity =
-      allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
-  ptr = allocator.root()->Alloc(requested_size, type_name);
-  EXPECT_TRUE(ptr);
-  slot_start = allocator.root()->ObjectToSlotStart(ptr);
-  actual_capacity =
-      allocator.root()->AllocationCapacityFromSlotStart(slot_start);
-  EXPECT_EQ(predicted_capacity, actual_capacity);
-  EXPECT_EQ(requested_size + SystemPageSize(), actual_capacity);
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  if (UseBRPPool()) {
-    uintptr_t address = UntagPtr(ptr);
-    for (size_t offset = 0; offset < requested_size; offset += 4999) {
-      EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
-                slot_start);
-    }
-  }
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  allocator.root()->Free(ptr);
-
-  // Allocate the maximum allowed bucketed size.
-  requested_size = kMaxBucketed - ExtraAllocSize(allocator);
-  predicted_capacity =
-      allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
-  ptr = allocator.root()->Alloc(requested_size, type_name);
-  EXPECT_TRUE(ptr);
-  slot_start = allocator.root()->ObjectToSlotStart(ptr);
-  actual_capacity =
-      allocator.root()->AllocationCapacityFromSlotStart(slot_start);
-  EXPECT_EQ(predicted_capacity, actual_capacity);
-  EXPECT_EQ(requested_size, actual_capacity);
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  if (UseBRPPool()) {
-    uintptr_t address = UntagPtr(ptr);
-    for (size_t offset = 0; offset < requested_size; offset += 4999) {
-      EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
-                slot_start);
-    }
-  }
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-  // Check that we can write at the end of the reported size too.
-  char* char_ptr = static_cast<char*>(ptr);
-  *(char_ptr + (actual_capacity - 1)) = 'A';
-  allocator.root()->Free(ptr);
-
-  // Allocate something very large, and uneven.
-  if (IsLargeMemoryDevice()) {
-    requested_size = 128 * 1024 * 1024 - 33;
-    predicted_capacity =
-        allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
-    ptr = allocator.root()->Alloc(requested_size, type_name);
-    EXPECT_TRUE(ptr);
-    slot_start = allocator.root()->ObjectToSlotStart(ptr);
-    actual_capacity =
-        allocator.root()->AllocationCapacityFromSlotStart(slot_start);
-    EXPECT_EQ(predicted_capacity, actual_capacity);
-
-    EXPECT_LT(requested_size, actual_capacity);
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    if (UseBRPPool()) {
-      uintptr_t address = UntagPtr(ptr);
-      for (size_t offset = 0; offset < requested_size; offset += 16111) {
-        EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
-                  slot_start);
-      }
-    }
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    allocator.root()->Free(ptr);
-  }
-
-  // Too large allocation.
-  requested_size = MaxDirectMapped() + 1;
-  predicted_capacity =
-      allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
-  EXPECT_EQ(requested_size, predicted_capacity);
-}
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-TEST_P(PartitionAllocTest, MTEProtectsFreedPtr) {
-  // This test checks that Arm's memory tagging extension (MTE) is correctly
-  // protecting freed pointers.
-  base::CPU cpu;
-  if (!cpu.has_mte()) {
-    // This test won't pass without MTE support.
-    GTEST_SKIP();
-  }
-
-  // Create an arbitrarily-sized small allocation.
-  size_t alloc_size = 64 - ExtraAllocSize(allocator);
-  uint64_t* ptr1 =
-      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
-  EXPECT_TRUE(ptr1);
-
-  // Invalidate the pointer by freeing it.
-  allocator.root()->Free(ptr1);
-
-  // When we immediately reallocate a pointer, we should see the same allocation
-  // slot but with a different tag (PA_EXPECT_PTR_EQ ignores the MTE tag).
-  uint64_t* ptr2 =
-      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
-  PA_EXPECT_PTR_EQ(ptr1, ptr2);
-  // The different tag bits mean that ptr1 is not the same as ptr2.
-  EXPECT_NE(ptr1, ptr2);
-
-  // When we free again, we expect a new tag for that area that's different from
-  // ptr1 and ptr2.
-  allocator.root()->Free(ptr2);
-  uint64_t* ptr3 =
-      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
-  PA_EXPECT_PTR_EQ(ptr2, ptr3);
-  EXPECT_NE(ptr1, ptr3);
-  EXPECT_NE(ptr2, ptr3);
-
-  // We don't check anything about ptr3, but we do clean it up to avoid DCHECKs.
-  allocator.root()->Free(ptr3);
-}
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-TEST_P(PartitionAllocTest, IsPtrWithinSameAlloc) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  const size_t kMinReasonableTestSize =
-      partition_alloc::internal::base::bits::AlignUp(
-          ExtraAllocSize(allocator) + 1, kAlignment);
-  ASSERT_GT(kMinReasonableTestSize, ExtraAllocSize(allocator));
-  const size_t kSizes[] = {kMinReasonableTestSize,
-                           256,
-                           SystemPageSize(),
-                           PartitionPageSize(),
-                           MaxRegularSlotSpanSize(),
-                           MaxRegularSlotSpanSize() + 1,
-                           MaxRegularSlotSpanSize() + SystemPageSize(),
-                           MaxRegularSlotSpanSize() + PartitionPageSize(),
-                           kMaxBucketed,
-                           kMaxBucketed + 1,
-                           kMaxBucketed + SystemPageSize(),
-                           kMaxBucketed + PartitionPageSize(),
-                           kSuperPageSize};
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  constexpr size_t kFarFarAwayDelta = 512 * kGiB;
-#else
-  constexpr size_t kFarFarAwayDelta = kGiB;
-#endif
-  for (size_t size : kSizes) {
-    size_t requested_size = size - ExtraAllocSize(allocator);
-    // For regular slot-span allocations, confirm the size fills the entire
-    // slot. Otherwise the test would be ineffective, as Partition Alloc has no
-    // ability to check against the actual allocated size.
-    // Single-slot slot-spans and direct map don't have that problem.
-    if (size <= MaxRegularSlotSpanSize()) {
-      ASSERT_EQ(requested_size,
-                allocator.root()->AllocationCapacityFromRequestedSize(
-                    requested_size));
-    }
-
-    constexpr size_t kNumRepeats = 3;
-    void* ptrs[kNumRepeats];
-    for (void*& ptr : ptrs) {
-      ptr = allocator.root()->Alloc(requested_size, type_name);
-      // Double check.
-      if (size <= MaxRegularSlotSpanSize()) {
-        uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
-        EXPECT_EQ(
-            requested_size,
-            allocator.root()->AllocationCapacityFromSlotStart(slot_start));
-      }
-
-      uintptr_t address = UntagPtr(ptr);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address, address - kFarFarAwayDelta, 0u),
-                PtrPosWithinAlloc::kFarOOB);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address, address - kSuperPageSize, 0u),
-                PtrPosWithinAlloc::kFarOOB);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address, address - 1, 0u),
-                PtrPosWithinAlloc::kFarOOB);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address, address, 0u),
-                PtrPosWithinAlloc::kInBounds);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size / 2, 0u),
-                PtrPosWithinAlloc::kInBounds);
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size - 1, 1u),
-                PtrPosWithinAlloc::kInBounds);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size, 1u),
-                PtrPosWithinAlloc::kAllocEnd);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size - 4, 4u),
-                PtrPosWithinAlloc::kInBounds);
-      for (size_t subtrahend = 0; subtrahend < 4; subtrahend++) {
-        EXPECT_EQ(IsPtrWithinSameAlloc(
-                      address, address + requested_size - subtrahend, 4u),
-                  PtrPosWithinAlloc::kAllocEnd);
-      }
-#else  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size, 0u),
-                PtrPosWithinAlloc::kInBounds);
-#endif
-      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size + 1, 0u),
-                PtrPosWithinAlloc::kFarOOB);
-      EXPECT_EQ(IsPtrWithinSameAlloc(
-                    address, address + requested_size + kSuperPageSize, 0u),
-                PtrPosWithinAlloc::kFarOOB);
-      EXPECT_EQ(IsPtrWithinSameAlloc(
-                    address, address + requested_size + kFarFarAwayDelta, 0u),
-                PtrPosWithinAlloc::kFarOOB);
-      EXPECT_EQ(
-          IsPtrWithinSameAlloc(address + requested_size,
-                               address + requested_size + kFarFarAwayDelta, 0u),
-          PtrPosWithinAlloc::kFarOOB);
-      EXPECT_EQ(
-          IsPtrWithinSameAlloc(address + requested_size,
-                               address + requested_size + kSuperPageSize, 0u),
-          PtrPosWithinAlloc::kFarOOB);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
-                                     address + requested_size + 1, 0u),
-                PtrPosWithinAlloc::kFarOOB);
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 1,
-                                     address + requested_size - 1, 1u),
-                PtrPosWithinAlloc::kInBounds);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 1,
-                                     address + requested_size, 1u),
-                PtrPosWithinAlloc::kAllocEnd);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
-                                     address + requested_size, 1u),
-                PtrPosWithinAlloc::kAllocEnd);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 4,
-                                     address + requested_size - 4, 4u),
-                PtrPosWithinAlloc::kInBounds);
-      for (size_t addend = 1; addend < 4; addend++) {
-        EXPECT_EQ(
-            IsPtrWithinSameAlloc(address + requested_size - 4,
-                                 address + requested_size - 4 + addend, 4u),
-            PtrPosWithinAlloc::kAllocEnd);
-      }
-#else  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
-                                     address + requested_size, 0u),
-                PtrPosWithinAlloc::kInBounds);
-#endif
-      EXPECT_EQ(IsPtrWithinSameAlloc(
-                    address + requested_size,
-                    address + requested_size - (requested_size / 2), 0u),
-                PtrPosWithinAlloc::kInBounds);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size, address, 0u),
-                PtrPosWithinAlloc::kInBounds);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size, address - 1, 0u),
-                PtrPosWithinAlloc::kFarOOB);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
-                                     address - kSuperPageSize, 0u),
-                PtrPosWithinAlloc::kFarOOB);
-      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
-                                     address - kFarFarAwayDelta, 0u),
-                PtrPosWithinAlloc::kFarOOB);
-    }
-
-    for (void* ptr : ptrs) {
-      allocator.root()->Free(ptr);
-    }
-  }
-}
-
-TEST_P(PartitionAllocTest, GetSlotStartMultiplePages) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  auto* root = allocator.root();
-  // Find the smallest bucket with multiple PartitionPages. When searching for
-  // a bucket here, we need to check two conditions:
-  // (1) The bucket is used in our current bucket distribution.
-  // (2) The bucket is large enough that our requested size (see below) will be
-  // non-zero.
-  size_t real_size = 0;
-  for (const auto& bucket : root->buckets) {
-    if ((root->buckets + SizeToIndex(bucket.slot_size))->slot_size !=
-        bucket.slot_size) {
-      continue;
-    }
-    if (bucket.slot_size <= ExtraAllocSize(allocator)) {
-      continue;
-    }
-    if (bucket.num_system_pages_per_slot_span >
-        NumSystemPagesPerPartitionPage()) {
-      real_size = bucket.slot_size;
-      break;
-    }
-  }
-
-  // Make sure that we've managed to find an appropriate bucket.
-  ASSERT_GT(real_size, 0u);
-
-  const size_t requested_size = real_size - ExtraAllocSize(allocator);
-  // Double check we don't end up with 0 or negative size.
-  EXPECT_GT(requested_size, 0u);
-  EXPECT_LE(requested_size, real_size);
-  const auto* bucket = allocator.root()->buckets + SizeToIndex(real_size);
-  EXPECT_EQ(bucket->slot_size, real_size);
-  // Make sure the test is testing multiple partition pages case.
-  EXPECT_GT(bucket->num_system_pages_per_slot_span,
-            PartitionPageSize() / SystemPageSize());
-  size_t num_slots =
-      (bucket->num_system_pages_per_slot_span * SystemPageSize()) / real_size;
-  std::vector<void*> ptrs;
-  for (size_t i = 0; i < num_slots; ++i) {
-    ptrs.push_back(allocator.root()->Alloc(requested_size, type_name));
-  }
-  for (void* ptr : ptrs) {
-    uintptr_t address = UntagPtr(ptr);
-    uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
-    EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start),
-              requested_size);
-    for (size_t offset = 0; offset < requested_size; offset += 13) {
-      EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
-                slot_start);
-    }
-    allocator.root()->Free(ptr);
-  }
-}
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-// Test the realloc() contract.
-TEST_P(PartitionAllocTest, Realloc) {
-  // realloc(0, size) should be equivalent to malloc().
-  void* ptr = allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
-  memset(ptr, 'A', kTestAllocSize);
-  auto* slot_span =
-      SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  // realloc(ptr, 0) should be equivalent to free().
-  void* ptr2 = allocator.root()->Realloc(ptr, 0, type_name);
-  EXPECT_EQ(nullptr, ptr2);
-  EXPECT_EQ(allocator.root()->ObjectToSlotStart(ptr),
-            UntagPtr(slot_span->get_freelist_head()));
-
-  // Test that growing an allocation with realloc() copies everything from the
-  // old allocation.
-  size_t size = SystemPageSize() - ExtraAllocSize(allocator);
-  // Confirm size fills the entire slot.
-  ASSERT_EQ(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
-  ptr = allocator.root()->Alloc(size, type_name);
-  memset(ptr, 'A', size);
-  ptr2 = allocator.root()->Realloc(ptr, size + 1, type_name);
-  PA_EXPECT_PTR_NE(ptr, ptr2);
-  char* char_ptr2 = static_cast<char*>(ptr2);
-  EXPECT_EQ('A', char_ptr2[0]);
-  EXPECT_EQ('A', char_ptr2[size - 1]);
-#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-  EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr2[size]));
-#endif
-
-  // Test that shrinking an allocation with realloc() also copies everything
-  // from the old allocation. Use |size - 1| to test what happens to the extra
-  // space before the cookie.
-  ptr = allocator.root()->Realloc(ptr2, size - 1, type_name);
-  PA_EXPECT_PTR_NE(ptr2, ptr);
-  char* char_ptr = static_cast<char*>(ptr);
-  EXPECT_EQ('A', char_ptr[0]);
-  EXPECT_EQ('A', char_ptr[size - 2]);
-#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-  EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr[size - 1]));
-#endif
-
-  allocator.root()->Free(ptr);
-
-  // Single-slot slot spans...
-  // Test that growing an allocation with realloc() copies everything from the
-  // old allocation.
-  size = MaxRegularSlotSpanSize() + 1;
-  ASSERT_LE(2 * size, kMaxBucketed);  // should be in single-slot span range
-  // Confirm size doesn't fill the entire slot.
-  ASSERT_LT(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
-  ptr = allocator.root()->Alloc(size, type_name);
-  memset(ptr, 'A', size);
-  ptr2 = allocator.root()->Realloc(ptr, size * 2, type_name);
-  PA_EXPECT_PTR_NE(ptr, ptr2);
-  char_ptr2 = static_cast<char*>(ptr2);
-  EXPECT_EQ('A', char_ptr2[0]);
-  EXPECT_EQ('A', char_ptr2[size - 1]);
-#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-  EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr2[size]));
-#endif
-  allocator.root()->Free(ptr2);
-
-  // Test that shrinking an allocation with realloc() also copies everything
-  // from the old allocation.
-  size = 2 * (MaxRegularSlotSpanSize() + 1);
-  ASSERT_GT(size / 2, MaxRegularSlotSpanSize());  // in single-slot span range
-  ptr = allocator.root()->Alloc(size, type_name);
-  memset(ptr, 'A', size);
-  ptr2 = allocator.root()->Realloc(ptr2, size / 2, type_name);
-  PA_EXPECT_PTR_NE(ptr, ptr2);
-  char_ptr2 = static_cast<char*>(ptr2);
-  EXPECT_EQ('A', char_ptr2[0]);
-  EXPECT_EQ('A', char_ptr2[size / 2 - 1]);
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // For single-slot slot spans, the cookie is always placed immediately after
-  // the allocation.
-  EXPECT_EQ(kCookieValue[0], static_cast<unsigned char>(char_ptr2[size / 2]));
-#endif
-  allocator.root()->Free(ptr2);
-
-  // Test that shrinking a direct mapped allocation happens in-place.
-  // Pick a large size so that Realloc doesn't think it's worthwhile to
-  // downsize even if one less super page is used (due to high granularity on
-  // 64-bit systems).
-  size = 10 * kSuperPageSize + SystemPageSize() - 42;
-  ASSERT_GT(size - 32 * SystemPageSize(), kMaxBucketed);
-  ptr = allocator.root()->Alloc(size, type_name);
-  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
-  size_t actual_capacity =
-      allocator.root()->AllocationCapacityFromSlotStart(slot_start);
-  ptr2 = allocator.root()->Realloc(ptr, size - SystemPageSize(), type_name);
-  uintptr_t slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
-  EXPECT_EQ(slot_start, slot_start2);
-  EXPECT_EQ(actual_capacity - SystemPageSize(),
-            allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
-  void* ptr3 =
-      allocator.root()->Realloc(ptr2, size - 32 * SystemPageSize(), type_name);
-  uintptr_t slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
-  EXPECT_EQ(slot_start2, slot_start3);
-  EXPECT_EQ(actual_capacity - 32 * SystemPageSize(),
-            allocator.root()->AllocationCapacityFromSlotStart(slot_start3));
-
-  // Test that a previously in-place shrunk direct mapped allocation can be
-  // expanded up again up to its original size.
-  ptr = allocator.root()->Realloc(ptr3, size, type_name);
-  slot_start = allocator.root()->ObjectToSlotStart(ptr);
-  EXPECT_EQ(slot_start3, slot_start);
-  EXPECT_EQ(actual_capacity,
-            allocator.root()->AllocationCapacityFromSlotStart(slot_start));
-
-  // Test that the allocation can be expanded in place up to its capacity.
-  ptr2 = allocator.root()->Realloc(ptr, actual_capacity, type_name);
-  slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
-  EXPECT_EQ(slot_start, slot_start2);
-  EXPECT_EQ(actual_capacity,
-            allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
-
-  // Test that a direct mapped allocation is performed not in-place when the
-  // new size is small enough.
-  ptr3 = allocator.root()->Realloc(ptr2, SystemPageSize(), type_name);
-  slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
-  EXPECT_NE(slot_start, slot_start3);
-
-  allocator.root()->Free(ptr3);
-}
-
-TEST_P(PartitionAllocTest, ReallocDirectMapAligned) {
-  size_t alignments[] = {
-      PartitionPageSize(),
-      2 * PartitionPageSize(),
-      kMaxSupportedAlignment / 2,
-      kMaxSupportedAlignment,
-  };
-
-  for (size_t alignment : alignments) {
-    // Test that shrinking a direct mapped allocation happens in-place.
-    // Pick a large size so that Realloc doesn't think it's worthwhile to
-    // downsize even if one less super page is used (due to high granularity on
-    // 64-bit systems), even if the alignment padding is taken out.
-    size_t size = 10 * kSuperPageSize + SystemPageSize() - 42;
-    ASSERT_GT(size, kMaxBucketed);
-    void* ptr =
-        allocator.root()->AllocWithFlagsInternal(0, size, alignment, type_name);
-    uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
-    size_t actual_capacity =
-        allocator.root()->AllocationCapacityFromSlotStart(slot_start);
-    void* ptr2 =
-        allocator.root()->Realloc(ptr, size - SystemPageSize(), type_name);
-    uintptr_t slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
-    EXPECT_EQ(slot_start, slot_start2);
-    EXPECT_EQ(actual_capacity - SystemPageSize(),
-              allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
-    void* ptr3 = allocator.root()->Realloc(ptr2, size - 32 * SystemPageSize(),
-                                           type_name);
-    uintptr_t slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
-    EXPECT_EQ(slot_start2, slot_start3);
-    EXPECT_EQ(actual_capacity - 32 * SystemPageSize(),
-              allocator.root()->AllocationCapacityFromSlotStart(slot_start3));
-
-    // Test that a previously in-place shrunk direct mapped allocation can be
-    // expanded up again up to its original size.
-    ptr = allocator.root()->Realloc(ptr3, size, type_name);
-    slot_start = allocator.root()->ObjectToSlotStart(ptr);
-    EXPECT_EQ(slot_start3, slot_start);
-    EXPECT_EQ(actual_capacity,
-              allocator.root()->AllocationCapacityFromSlotStart(slot_start));
-
-    // Test that the allocation can be expanded in place up to its capacity.
-    ptr2 = allocator.root()->Realloc(ptr, actual_capacity, type_name);
-    slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
-    EXPECT_EQ(slot_start, slot_start2);
-    EXPECT_EQ(actual_capacity,
-              allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
-
-    // Test that a direct mapped allocation is performed not in-place when the
-    // new size is small enough.
-    ptr3 = allocator.root()->Realloc(ptr2, SystemPageSize(), type_name);
-    slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
-    EXPECT_NE(slot_start2, slot_start3);
-
-    allocator.root()->Free(ptr3);
-  }
-}
-
-TEST_P(PartitionAllocTest, ReallocDirectMapAlignedRelocate) {
-  // Pick size such that the alignment will put it cross the super page
-  // boundary.
-  size_t size = 2 * kSuperPageSize - kMaxSupportedAlignment + SystemPageSize();
-  ASSERT_GT(size, kMaxBucketed);
-  void* ptr = allocator.root()->AllocWithFlagsInternal(
-      0, size, kMaxSupportedAlignment, type_name);
-  // Reallocating with the same size will actually relocate, because without a
-  // need for alignment we can downsize the reservation significantly.
-  void* ptr2 = allocator.root()->Realloc(ptr, size, type_name);
-  PA_EXPECT_PTR_NE(ptr, ptr2);
-  allocator.root()->Free(ptr2);
-
-  // Again pick size such that the alignment will put it cross the super page
-  // boundary, but this time make it so large that Realloc doesn't fing it worth
-  // shrinking.
-  size = 10 * kSuperPageSize - kMaxSupportedAlignment + SystemPageSize();
-  ASSERT_GT(size, kMaxBucketed);
-  ptr = allocator.root()->AllocWithFlagsInternal(
-      0, size, kMaxSupportedAlignment, type_name);
-  ptr2 = allocator.root()->Realloc(ptr, size, type_name);
-  EXPECT_EQ(ptr, ptr2);
-  allocator.root()->Free(ptr2);
-}
-
-// Tests the handing out of freelists for partial slot spans.
-TEST_P(PartitionAllocTest, PartialPageFreelists) {
-  size_t big_size = SystemPageSize() - ExtraAllocSize(allocator);
-  size_t bucket_index = SizeToIndex(big_size + ExtraAllocSize(allocator));
-  PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
-  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
-
-  void* ptr = allocator.root()->Alloc(big_size, type_name);
-  EXPECT_TRUE(ptr);
-
-  auto* slot_span =
-      SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  size_t total_slots =
-      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
-      (big_size + ExtraAllocSize(allocator));
-  EXPECT_EQ(4u, total_slots);
-  // The freelist should have one entry, because we were able to exactly fit
-  // one object slot and one freelist pointer (the null that the head points
-  // to) into a system page.
-  EXPECT_FALSE(slot_span->get_freelist_head());
-  EXPECT_EQ(1u, slot_span->num_allocated_slots);
-  EXPECT_EQ(3u, slot_span->num_unprovisioned_slots);
-
-  void* ptr2 = allocator.root()->Alloc(big_size, type_name);
-  EXPECT_TRUE(ptr2);
-  EXPECT_FALSE(slot_span->get_freelist_head());
-  EXPECT_EQ(2u, slot_span->num_allocated_slots);
-  EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
-
-  void* ptr3 = allocator.root()->Alloc(big_size, type_name);
-  EXPECT_TRUE(ptr3);
-  EXPECT_FALSE(slot_span->get_freelist_head());
-  EXPECT_EQ(3u, slot_span->num_allocated_slots);
-  EXPECT_EQ(1u, slot_span->num_unprovisioned_slots);
-
-  void* ptr4 = allocator.root()->Alloc(big_size, type_name);
-  EXPECT_TRUE(ptr4);
-  EXPECT_FALSE(slot_span->get_freelist_head());
-  EXPECT_EQ(4u, slot_span->num_allocated_slots);
-  EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
-
-  void* ptr5 = allocator.root()->Alloc(big_size, type_name);
-  EXPECT_TRUE(ptr5);
-
-  auto* slot_span2 =
-      SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr5));
-  EXPECT_EQ(1u, slot_span2->num_allocated_slots);
-
-  // Churn things a little whilst there's a partial slot span freelist.
-  allocator.root()->Free(ptr);
-  ptr = allocator.root()->Alloc(big_size, type_name);
-  void* ptr6 = allocator.root()->Alloc(big_size, type_name);
-
-  allocator.root()->Free(ptr);
-  allocator.root()->Free(ptr2);
-  allocator.root()->Free(ptr3);
-  allocator.root()->Free(ptr4);
-  allocator.root()->Free(ptr5);
-  allocator.root()->Free(ptr6);
-  EXPECT_TRUE(slot_span->in_empty_cache());
-  EXPECT_TRUE(slot_span2->in_empty_cache());
-  EXPECT_TRUE(slot_span2->get_freelist_head());
-  EXPECT_EQ(0u, slot_span2->num_allocated_slots);
-
-  // Size that's just above half a page.
-  size_t non_dividing_size =
-      SystemPageSize() / 2 + 1 - ExtraAllocSize(allocator);
-  bucket_index = SizeToIndex(non_dividing_size + ExtraAllocSize(allocator));
-  bucket = &allocator.root()->buckets[bucket_index];
-  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
-
-  ptr = allocator.root()->Alloc(non_dividing_size, type_name);
-  EXPECT_TRUE(ptr);
-
-  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  total_slots =
-      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
-      bucket->slot_size;
-
-  EXPECT_FALSE(slot_span->get_freelist_head());
-  EXPECT_EQ(1u, slot_span->num_allocated_slots);
-  EXPECT_EQ(total_slots - 1, slot_span->num_unprovisioned_slots);
-
-  ptr2 = allocator.root()->Alloc(non_dividing_size, type_name);
-  EXPECT_TRUE(ptr2);
-  EXPECT_TRUE(slot_span->get_freelist_head());
-  EXPECT_EQ(2u, slot_span->num_allocated_slots);
-  // 2 slots got provisioned: the first one fills the rest of the first (already
-  // provision page) and exceeds it by just a tad, thus leading to provisioning
-  // a new page, and the second one fully fits within that new page.
-  EXPECT_EQ(total_slots - 3, slot_span->num_unprovisioned_slots);
-
-  ptr3 = allocator.root()->Alloc(non_dividing_size, type_name);
-  EXPECT_TRUE(ptr3);
-  EXPECT_FALSE(slot_span->get_freelist_head());
-  EXPECT_EQ(3u, slot_span->num_allocated_slots);
-  EXPECT_EQ(total_slots - 3, slot_span->num_unprovisioned_slots);
-
-  allocator.root()->Free(ptr);
-  allocator.root()->Free(ptr2);
-  allocator.root()->Free(ptr3);
-  EXPECT_TRUE(slot_span->in_empty_cache());
-  EXPECT_TRUE(slot_span2->get_freelist_head());
-  EXPECT_EQ(0u, slot_span2->num_allocated_slots);
-
-  // And test a couple of sizes that do not cross SystemPageSize() with a
-  // single allocation.
-  size_t medium_size = (SystemPageSize() / 2) - ExtraAllocSize(allocator);
-  bucket_index = SizeToIndex(medium_size + ExtraAllocSize(allocator));
-  bucket = &allocator.root()->buckets[bucket_index];
-  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
-
-  ptr = allocator.root()->Alloc(medium_size, type_name);
-  EXPECT_TRUE(ptr);
-  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  EXPECT_EQ(1u, slot_span->num_allocated_slots);
-  total_slots =
-      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
-      (medium_size + ExtraAllocSize(allocator));
-  size_t first_slot_span_slots =
-      SystemPageSize() / (medium_size + ExtraAllocSize(allocator));
-  EXPECT_EQ(2u, first_slot_span_slots);
-  EXPECT_EQ(total_slots - first_slot_span_slots,
-            slot_span->num_unprovisioned_slots);
-
-  allocator.root()->Free(ptr);
-
-  size_t small_size = (SystemPageSize() / 4) - ExtraAllocSize(allocator);
-  bucket_index = SizeToIndex(small_size + ExtraAllocSize(allocator));
-  bucket = &allocator.root()->buckets[bucket_index];
-  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
-
-  ptr = allocator.root()->Alloc(small_size, type_name);
-  EXPECT_TRUE(ptr);
-  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  EXPECT_EQ(1u, slot_span->num_allocated_slots);
-  total_slots =
-      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
-      (small_size + ExtraAllocSize(allocator));
-  first_slot_span_slots =
-      SystemPageSize() / (small_size + ExtraAllocSize(allocator));
-  EXPECT_EQ(total_slots - first_slot_span_slots,
-            slot_span->num_unprovisioned_slots);
-
-  allocator.root()->Free(ptr);
-  EXPECT_TRUE(slot_span->get_freelist_head());
-  EXPECT_EQ(0u, slot_span->num_allocated_slots);
-
-  ASSERT_LT(ExtraAllocSize(allocator), 64u);
-  size_t very_small_size = (ExtraAllocSize(allocator) <= 32)
-                               ? (32 - ExtraAllocSize(allocator))
-                               : (64 - ExtraAllocSize(allocator));
-  size_t very_small_adjusted_size =
-      allocator.root()->AdjustSize0IfNeeded(very_small_size);
-  bucket_index =
-      SizeToIndex(very_small_adjusted_size + ExtraAllocSize(allocator));
-  bucket = &allocator.root()->buckets[bucket_index];
-  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
-
-  ptr = allocator.root()->Alloc(very_small_size, type_name);
-  EXPECT_TRUE(ptr);
-  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  EXPECT_EQ(1u, slot_span->num_allocated_slots);
-  size_t very_small_actual_size = allocator.root()->GetUsableSize(ptr);
-  total_slots =
-      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
-      (very_small_actual_size + ExtraAllocSize(allocator));
-  first_slot_span_slots =
-      SystemPageSize() / (very_small_actual_size + ExtraAllocSize(allocator));
-  EXPECT_EQ(total_slots - first_slot_span_slots,
-            slot_span->num_unprovisioned_slots);
-
-  allocator.root()->Free(ptr);
-  EXPECT_TRUE(slot_span->get_freelist_head());
-  EXPECT_EQ(0u, slot_span->num_allocated_slots);
-
-  // And try an allocation size (against the generic allocator) that is
-  // larger than a system page.
-  size_t page_and_a_half_size =
-      (SystemPageSize() + (SystemPageSize() / 2)) - ExtraAllocSize(allocator);
-  ptr = allocator.root()->Alloc(page_and_a_half_size, type_name);
-  EXPECT_TRUE(ptr);
-  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  EXPECT_EQ(1u, slot_span->num_allocated_slots);
-  // Only the first slot was provisioned, and that's the one that was just
-  // allocated so the free list is empty.
-  EXPECT_TRUE(!slot_span->get_freelist_head());
-  total_slots =
-      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
-      (page_and_a_half_size + ExtraAllocSize(allocator));
-  EXPECT_EQ(total_slots - 1, slot_span->num_unprovisioned_slots);
-  ptr2 = allocator.root()->Alloc(page_and_a_half_size, type_name);
-  EXPECT_TRUE(ptr);
-  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  EXPECT_EQ(2u, slot_span->num_allocated_slots);
-  // As above, only one slot was provisioned.
-  EXPECT_TRUE(!slot_span->get_freelist_head());
-  EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
-  allocator.root()->Free(ptr);
-  allocator.root()->Free(ptr2);
-
-  // And then make sure than exactly the page size only faults one page.
-  size_t page_size = SystemPageSize() - ExtraAllocSize(allocator);
-  ptr = allocator.root()->Alloc(page_size, type_name);
-  EXPECT_TRUE(ptr);
-  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  EXPECT_EQ(1u, slot_span->num_allocated_slots);
-  EXPECT_TRUE(slot_span->get_freelist_head());
-  total_slots =
-      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
-      (page_size + ExtraAllocSize(allocator));
-  EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
-  allocator.root()->Free(ptr);
-}
-
-// Test some of the fragmentation-resistant properties of the allocator.
-TEST_P(PartitionAllocTest, SlotSpanRefilling) {
-  PartitionRoot::Bucket* bucket =
-      &allocator.root()->buckets[test_bucket_index_];
-
-  // Grab two full slot spans and a non-full slot span.
-  auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
-  auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
-  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr);
-  EXPECT_NE(slot_span1, bucket->active_slot_spans_head);
-  EXPECT_NE(slot_span2, bucket->active_slot_spans_head);
-  auto* slot_span =
-      SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  EXPECT_EQ(1u, slot_span->num_allocated_slots);
-
-  // Work out a pointer into slot_span2 and free it; and then slot_span1 and
-  // free it.
-  void* ptr2 = allocator.root()->SlotStartToObject(
-      SlotSpan::ToSlotSpanStart(slot_span1));
-  allocator.root()->Free(ptr2);
-  ptr2 = allocator.root()->SlotStartToObject(
-      SlotSpan::ToSlotSpanStart(slot_span2));
-  allocator.root()->Free(ptr2);
-
-  // If we perform two allocations from the same bucket now, we expect to
-  // refill both the nearly full slot spans.
-  std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
-  std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_EQ(1u, slot_span->num_allocated_slots);
-
-  FreeFullSlotSpan(allocator.root(), slot_span2);
-  FreeFullSlotSpan(allocator.root(), slot_span1);
-  allocator.root()->Free(ptr);
-}
-
-// Basic tests to ensure that allocations work for partial page buckets.
-TEST_P(PartitionAllocTest, PartialPages) {
-  // Find a size that is backed by a partial partition page.
-  size_t size = sizeof(void*);
-  size_t bucket_index;
-
-  PartitionRoot::Bucket* bucket = nullptr;
-  constexpr size_t kMaxSize = 4000u;
-  while (size < kMaxSize) {
-    bucket_index = SizeToIndex(size + ExtraAllocSize(allocator));
-    bucket = &allocator.root()->buckets[bucket_index];
-    if (bucket->num_system_pages_per_slot_span %
-        NumSystemPagesPerPartitionPage()) {
-      break;
-    }
-    size += sizeof(void*);
-  }
-  EXPECT_LT(size, kMaxSize);
-
-  auto* slot_span1 = GetFullSlotSpan(size);
-  auto* slot_span2 = GetFullSlotSpan(size);
-  FreeFullSlotSpan(allocator.root(), slot_span2);
-  FreeFullSlotSpan(allocator.root(), slot_span1);
-}
-
-// Test correct handling if our mapping collides with another.
-TEST_P(PartitionAllocTest, MappingCollision) {
-  size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
-  // The -2 is because the first and last partition pages in a super page are
-  // guard pages. We also discount the partition pages used for the tag bitmap.
-  size_t num_slot_span_needed =
-      (NumPartitionPagesPerSuperPage() - 2 -
-       partition_alloc::internal::NumPartitionPagesPerFreeSlotBitmap()) /
-      num_pages_per_slot_span;
-  size_t num_partition_pages_needed =
-      num_slot_span_needed * num_pages_per_slot_span;
-
-  auto first_super_page_pages =
-      std::make_unique<SlotSpan*[]>(num_partition_pages_needed);
-  auto second_super_page_pages =
-      std::make_unique<SlotSpan*[]>(num_partition_pages_needed);
-
-  size_t i;
-  for (i = 0; i < num_partition_pages_needed; ++i) {
-    first_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
-  }
-
-  uintptr_t slot_span_start =
-      SlotSpan::ToSlotSpanStart(first_super_page_pages[0]);
-  EXPECT_EQ(PartitionPageSize() +
-                partition_alloc::internal::ReservedFreeSlotBitmapSize(),
-            slot_span_start & kSuperPageOffsetMask);
-  uintptr_t super_page =
-      slot_span_start - PartitionPageSize() -
-      partition_alloc::internal::ReservedFreeSlotBitmapSize();
-  // Map a single system page either side of the mapping for our allocations,
-  // with the goal of tripping up alignment of the next mapping.
-  uintptr_t map1 =
-      AllocPages(super_page - PageAllocationGranularity(),
-                 PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kInaccessible),
-                 PageTag::kPartitionAlloc);
-  EXPECT_TRUE(map1);
-  uintptr_t map2 =
-      AllocPages(super_page + kSuperPageSize, PageAllocationGranularity(),
-                 PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kInaccessible),
-                 PageTag::kPartitionAlloc);
-  EXPECT_TRUE(map2);
-
-  for (i = 0; i < num_partition_pages_needed; ++i) {
-    second_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
-  }
-
-  FreePages(map1, PageAllocationGranularity());
-  FreePages(map2, PageAllocationGranularity());
-
-  super_page = SlotSpan::ToSlotSpanStart(second_super_page_pages[0]);
-  EXPECT_EQ(PartitionPageSize() +
-                partition_alloc::internal::ReservedFreeSlotBitmapSize(),
-            super_page & kSuperPageOffsetMask);
-  super_page -= PartitionPageSize() +
-                partition_alloc::internal::ReservedFreeSlotBitmapSize();
-  // Map a single system page either side of the mapping for our allocations,
-  // with the goal of tripping up alignment of the next mapping.
-  map1 = AllocPages(super_page - PageAllocationGranularity(),
-                    PageAllocationGranularity(), PageAllocationGranularity(),
-                    PageAccessibilityConfiguration(
-                        PageAccessibilityConfiguration::kReadWriteTagged),
-                    PageTag::kPartitionAlloc);
-  EXPECT_TRUE(map1);
-  map2 = AllocPages(super_page + kSuperPageSize, PageAllocationGranularity(),
-                    PageAllocationGranularity(),
-                    PageAccessibilityConfiguration(
-                        PageAccessibilityConfiguration::kReadWriteTagged),
-                    PageTag::kPartitionAlloc);
-  EXPECT_TRUE(map2);
-  EXPECT_TRUE(TrySetSystemPagesAccess(
-      map1, PageAllocationGranularity(),
-      PageAccessibilityConfiguration(
-          PageAccessibilityConfiguration::kInaccessible)));
-  EXPECT_TRUE(TrySetSystemPagesAccess(
-      map2, PageAllocationGranularity(),
-      PageAccessibilityConfiguration(
-          PageAccessibilityConfiguration::kInaccessible)));
-
-  auto* slot_span_in_third_super_page = GetFullSlotSpan(kTestAllocSize);
-  FreePages(map1, PageAllocationGranularity());
-  FreePages(map2, PageAllocationGranularity());
-
-  EXPECT_EQ(0u, SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
-                    PartitionPageOffsetMask());
-
-  // And make sure we really did get a page in a new superpage.
-  EXPECT_NE(
-      SlotSpan::ToSlotSpanStart(first_super_page_pages[0]) & kSuperPageBaseMask,
-      SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
-          kSuperPageBaseMask);
-  EXPECT_NE(SlotSpan::ToSlotSpanStart(second_super_page_pages[0]) &
-                kSuperPageBaseMask,
-            SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
-                kSuperPageBaseMask);
-
-  FreeFullSlotSpan(allocator.root(), slot_span_in_third_super_page);
-  for (i = 0; i < num_partition_pages_needed; ++i) {
-    FreeFullSlotSpan(allocator.root(), first_super_page_pages[i]);
-    FreeFullSlotSpan(allocator.root(), second_super_page_pages[i]);
-  }
-}
-
-// Tests that slot spans in the free slot span cache do get freed as
-// appropriate.
-TEST_P(PartitionAllocTest, FreeCache) {
-  EXPECT_EQ(0U, allocator.root()->get_total_size_of_committed_pages());
-
-  size_t big_size = 1000 - ExtraAllocSize(allocator);
-  size_t bucket_index = SizeToIndex(big_size + ExtraAllocSize(allocator));
-  PartitionBucket* bucket = &allocator.root()->buckets[bucket_index];
-
-  void* ptr = allocator.root()->Alloc(big_size, type_name);
-  EXPECT_TRUE(ptr);
-  auto* slot_span =
-      SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
-  EXPECT_EQ(1u, slot_span->num_allocated_slots);
-  // Lazy commit commits only needed pages.
-  size_t expected_committed_size =
-      kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
-  EXPECT_EQ(expected_committed_size,
-            allocator.root()->get_total_size_of_committed_pages());
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(0u, slot_span->num_allocated_slots);
-  EXPECT_TRUE(slot_span->in_empty_cache());
-  EXPECT_TRUE(slot_span->get_freelist_head());
-
-  CycleFreeCache(kTestAllocSize);
-
-  // Flushing the cache should have really freed the unused slot spans.
-  EXPECT_FALSE(slot_span->get_freelist_head());
-  EXPECT_FALSE(slot_span->in_empty_cache());
-  EXPECT_EQ(0u, slot_span->num_allocated_slots);
-  size_t num_system_pages_per_slot_span = allocator.root()
-                                              ->buckets[test_bucket_index_]
-                                              .num_system_pages_per_slot_span;
-  size_t expected_size =
-      kUseLazyCommit ? SystemPageSize()
-                     : num_system_pages_per_slot_span * SystemPageSize();
-  EXPECT_EQ(expected_size,
-            allocator.root()->get_total_size_of_committed_pages());
-
-  // Check that an allocation works ok whilst in this state (a free'd slot span
-  // as the active slot spans head).
-  ptr = allocator.root()->Alloc(big_size, type_name);
-  EXPECT_FALSE(bucket->empty_slot_spans_head);
-  allocator.root()->Free(ptr);
-
-  // Also check that a slot span that is bouncing immediately between empty and
-  // used does not get freed.
-  for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
-    ptr = allocator.root()->Alloc(big_size, type_name);
-    EXPECT_TRUE(slot_span->get_freelist_head());
-    allocator.root()->Free(ptr);
-    EXPECT_TRUE(slot_span->get_freelist_head());
-  }
-  EXPECT_EQ(expected_committed_size,
-            allocator.root()->get_total_size_of_committed_pages());
-}
-
-// Tests for a bug we had with losing references to free slot spans.
-TEST_P(PartitionAllocTest, LostFreeSlotSpansBug) {
-  size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
-
-  void* ptr = allocator.root()->Alloc(size, type_name);
-  EXPECT_TRUE(ptr);
-  void* ptr2 = allocator.root()->Alloc(size, type_name);
-  EXPECT_TRUE(ptr2);
-
-  SlotSpanMetadata* slot_span =
-      SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-  SlotSpanMetadata* slot_span2 = SlotSpanMetadata::FromSlotStart(
-      allocator.root()->ObjectToSlotStart(ptr2));
-  PartitionBucket* bucket = slot_span->bucket;
-
-  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
-  EXPECT_EQ(1u, slot_span->num_allocated_slots);
-  EXPECT_EQ(1u, slot_span2->num_allocated_slots);
-  EXPECT_TRUE(slot_span->is_full());
-  EXPECT_TRUE(slot_span2->is_full());
-  // The first span was kicked out from the active list, but the second one
-  // wasn't.
-  EXPECT_TRUE(slot_span->marked_full);
-  EXPECT_FALSE(slot_span2->marked_full);
-
-  allocator.root()->Free(ptr);
-  allocator.root()->Free(ptr2);
-
-  EXPECT_TRUE(bucket->empty_slot_spans_head);
-  EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
-  EXPECT_EQ(0u, slot_span->num_allocated_slots);
-  EXPECT_EQ(0u, slot_span2->num_allocated_slots);
-  EXPECT_FALSE(slot_span->is_full());
-  EXPECT_FALSE(slot_span->is_full());
-  EXPECT_FALSE(slot_span->marked_full);
-  EXPECT_FALSE(slot_span2->marked_full);
-  EXPECT_TRUE(slot_span->get_freelist_head());
-  EXPECT_TRUE(slot_span2->get_freelist_head());
-
-  CycleFreeCache(kTestAllocSize);
-
-  EXPECT_FALSE(slot_span->get_freelist_head());
-  EXPECT_FALSE(slot_span2->get_freelist_head());
-
-  EXPECT_TRUE(bucket->empty_slot_spans_head);
-  EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
-  EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
-            bucket->active_slot_spans_head);
-
-  // At this moment, we have two decommitted slot spans, on the empty list.
-  ptr = allocator.root()->Alloc(size, type_name);
-  EXPECT_TRUE(ptr);
-  allocator.root()->Free(ptr);
-
-  EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
-            bucket->active_slot_spans_head);
-  EXPECT_TRUE(bucket->empty_slot_spans_head);
-  EXPECT_TRUE(bucket->decommitted_slot_spans_head);
-
-  CycleFreeCache(kTestAllocSize);
-
-  // We're now set up to trigger a historical bug by scanning over the active
-  // slot spans list. The current code gets into a different state, but we'll
-  // keep the test as being an interesting corner case.
-  ptr = allocator.root()->Alloc(size, type_name);
-  EXPECT_TRUE(ptr);
-  allocator.root()->Free(ptr);
-
-  EXPECT_TRUE(bucket->is_valid());
-  EXPECT_TRUE(bucket->empty_slot_spans_head);
-  EXPECT_TRUE(bucket->decommitted_slot_spans_head);
-}
-
-#if defined(PA_HAS_DEATH_TESTS)
-
-// Unit tests that check if an allocation fails in "return null" mode,
-// repeating it doesn't crash, and still returns null. The tests need to
-// stress memory subsystem limits to do so, hence they try to allocate
-// 6 GB of memory, each with a different per-allocation block sizes.
-//
-// On 64-bit systems we need to restrict the address space to force allocation
-// failure, so these tests run only on POSIX systems that provide setrlimit(),
-// and use it to limit address space to 6GB.
-//
-// Disable these tests on Android because, due to the allocation-heavy behavior,
-// they tend to get OOM-killed rather than pass.
-//
-// Disable these test on Windows, since they run slower, so tend to timout and
-// cause flake.
-#if !BUILDFLAG(IS_WIN) &&                                      \
-        (!defined(ARCH_CPU_64_BITS) ||                         \
-         (BUILDFLAG(IS_POSIX) &&                               \
-          !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)))) || \
-    BUILDFLAG(IS_FUCHSIA)
-#define MAYBE_RepeatedAllocReturnNullDirect RepeatedAllocReturnNullDirect
-#define MAYBE_RepeatedReallocReturnNullDirect RepeatedReallocReturnNullDirect
-#define MAYBE_RepeatedTryReallocReturnNullDirect \
-  RepeatedTryReallocReturnNullDirect
-#else
-#define MAYBE_RepeatedAllocReturnNullDirect \
-  DISABLED_RepeatedAllocReturnNullDirect
-#define MAYBE_RepeatedReallocReturnNullDirect \
-  DISABLED_RepeatedReallocReturnNullDirect
-#define MAYBE_RepeatedTryReallocReturnNullDirect \
-  DISABLED_RepeatedTryReallocReturnNullDirect
-#endif
-
-// The following four tests wrap a called function in an expect death statement
-// to perform their test, because they are non-hermetic. Specifically they are
-// going to attempt to exhaust the allocatable memory, which leaves the
-// allocator in a bad global state.
-// Performing them as death tests causes them to be forked into their own
-// process, so they won't pollute other tests.
-//
-// These tests are *very* slow when BUILDFLAG(PA_DCHECK_IS_ON), because they
-// memset() many GiB of data (see crbug.com/1168168).
-// TODO(lizeb): make these tests faster.
-TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedAllocReturnNullDirect) {
-  // A direct-mapped allocation size.
-  size_t direct_map_size = 32 * 1024 * 1024;
-  ASSERT_GT(direct_map_size, kMaxBucketed);
-  EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionAllocWithFlags),
-               "Passed DoReturnNullTest");
-}
-
-// Repeating above test with Realloc
-TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedReallocReturnNullDirect) {
-  size_t direct_map_size = 32 * 1024 * 1024;
-  ASSERT_GT(direct_map_size, kMaxBucketed);
-  EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionReallocWithFlags),
-               "Passed DoReturnNullTest");
-}
-
-// Repeating above test with TryRealloc
-TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedTryReallocReturnNullDirect) {
-  size_t direct_map_size = 32 * 1024 * 1024;
-  ASSERT_GT(direct_map_size, kMaxBucketed);
-  EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionRootTryRealloc),
-               "Passed DoReturnNullTest");
-}
-
-// TODO(crbug.com/1348221) re-enable the tests below, once the allocator
-// actually returns nullptr for non direct-mapped allocations.
-// When doing so, they will need to be made MAYBE_ like those above.
-//
-// Tests "return null" with a 512 kB block size.
-TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedAllocReturnNull) {
-  // A single-slot but non-direct-mapped allocation size.
-  size_t single_slot_size = 512 * 1024;
-  ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
-  ASSERT_LE(single_slot_size, kMaxBucketed);
-  EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionAllocWithFlags),
-               "Passed DoReturnNullTest");
-}
-
-// Repeating above test with Realloc.
-TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedReallocReturnNull) {
-  size_t single_slot_size = 512 * 1024;
-  ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
-  ASSERT_LE(single_slot_size, kMaxBucketed);
-  EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionReallocWithFlags),
-               "Passed DoReturnNullTest");
-}
-
-// Repeating above test with TryRealloc.
-TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedTryReallocReturnNull) {
-  size_t single_slot_size = 512 * 1024;
-  ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
-  ASSERT_LE(single_slot_size, kMaxBucketed);
-  EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionRootTryRealloc),
-               "Passed DoReturnNullTest");
-}
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-// Check that Arm's memory tagging extension (MTE) is correctly protecting
-// freed pointers. Writes to a free pointer should result in a crash.
-TEST_P(PartitionAllocDeathTest, MTEProtectsFreedPtr) {
-  base::CPU cpu;
-  if (!cpu.has_mte()) {
-    // This test won't pass on systems without MTE.
-    GTEST_SKIP();
-  }
-
-  constexpr uint64_t kCookie = 0x1234567890ABCDEF;
-  constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;
-
-  // Make an arbitrary-sized small allocation.
-  size_t alloc_size = 64 - ExtraAllocSize(allocator);
-  uint64_t* ptr =
-      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
-  EXPECT_TRUE(ptr);
-
-  // Check that the allocation's writable.
-  *ptr = kCookie;
-
-  // Invalidate ptr by freeing it.
-  allocator.root()->Free(ptr);
-
-  // Writing to ptr after free() should crash
-  EXPECT_EXIT(
-      {
-        // Should be in synchronous MTE mode for running this test.
-        *ptr = kQuarantined;
-      },
-      testing::KilledBySignal(SIGSEGV), "");
-}
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
-// Make sure that malloc(-1) dies.
-// In the past, we had an integer overflow that would alias malloc(-1) to
-// malloc(0), which is not good.
-TEST_P(PartitionAllocDeathTest, LargeAllocs) {
-  // Largest alloc.
-  EXPECT_DEATH(allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
-  // And the smallest allocation we expect to die.
-  // TODO(bartekn): Separate into its own test, as it wouldn't run (same below).
-  EXPECT_DEATH(allocator.root()->Alloc(MaxDirectMapped() + 1, type_name), "");
-}
-
-// These tests don't work deterministically when BRP is enabled on certain
-// architectures. On Free(), BRP's ref-count gets overwritten by an encoded
-// freelist pointer. On little-endian 64-bit architectures, this happens to be
-// always an even number, which will triggers BRP's own CHECK (sic!). On other
-// architectures, it's likely to be an odd number >1, which will fool BRP into
-// thinking the memory isn't freed and still referenced, thus making it
-// quarantine it and return early, before PA_CHECK(slot_start != freelist_head)
-// is reached.
-// TODO(bartekn): Enable in the BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) case.
-#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
-    (BUILDFLAG(HAS_64_BIT_POINTERS) && defined(ARCH_CPU_LITTLE_ENDIAN))
-
-// Check that our immediate double-free detection works.
-TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree) {
-  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr);
-  allocator.root()->Free(ptr);
-  EXPECT_DEATH(allocator.root()->Free(ptr), "");
-}
-
-// As above, but when this isn't the only slot in the span.
-TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree2ndSlot) {
-  void* ptr0 = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr0);
-  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr);
-  allocator.root()->Free(ptr);
-  EXPECT_DEATH(allocator.root()->Free(ptr), "");
-  allocator.root()->Free(ptr0);
-}
-
-// Check that our double-free detection based on |num_allocated_slots| not going
-// below 0 works.
-//
-// Unlike in ImmediateDoubleFree test, we can't have a 2ndSlot version, as this
-// protection wouldn't work when there is another slot present in the span. It
-// will prevent |num_allocated_slots| from going below 0.
-TEST_P(PartitionAllocDeathTest, NumAllocatedSlotsDoubleFree) {
-  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr);
-  void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr2);
-  allocator.root()->Free(ptr);
-  allocator.root()->Free(ptr2);
-  // This is not an immediate double-free so our immediate detection won't
-  // fire. However, it does take |num_allocated_slots| to -1, which is illegal
-  // and should be trapped.
-  EXPECT_DEATH(allocator.root()->Free(ptr), "");
-}
-
-#endif  // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
-        // (BUILDFLAG(HAS_64_BIT_POINTERS) && defined(ARCH_CPU_LITTLE_ENDIAN))
-
-// Check that guard pages are present where expected.
-TEST_P(PartitionAllocDeathTest, DirectMapGuardPages) {
-  const size_t kSizes[] = {
-      kMaxBucketed + ExtraAllocSize(allocator) + 1,
-      kMaxBucketed + SystemPageSize(), kMaxBucketed + PartitionPageSize(),
-      partition_alloc::internal::base::bits::AlignUp(
-          kMaxBucketed + kSuperPageSize, kSuperPageSize) -
-          PartitionRoot::GetDirectMapMetadataAndGuardPagesSize()};
-  for (size_t size : kSizes) {
-    ASSERT_GT(size, kMaxBucketed);
-    size -= ExtraAllocSize(allocator);
-    EXPECT_GT(size, kMaxBucketed)
-        << "allocation not large enough for direct allocation";
-    void* ptr = allocator.root()->Alloc(size, type_name);
-
-    EXPECT_TRUE(ptr);
-    char* char_ptr = static_cast<char*>(ptr) - kPointerOffset;
-
-    EXPECT_DEATH(*(char_ptr - 1) = 'A', "");
-    EXPECT_DEATH(*(char_ptr + partition_alloc::internal::base::bits::AlignUp(
-                                  size, SystemPageSize())) = 'A',
-                 "");
-
-    allocator.root()->Free(ptr);
-  }
-}
-
-// These tests rely on precise layout. They handle cookie, not ref-count.
-#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
-    PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-
-TEST_P(PartitionAllocDeathTest, UseAfterFreeDetection) {
-  base::CPU cpu;
-  void* data = allocator.root()->Alloc(100, "");
-  allocator.root()->Free(data);
-
-  // use after free, not crashing here, but the next allocation should crash,
-  // since we corrupted the freelist.
-  memset(data, 0x42, 100);
-  EXPECT_DEATH(allocator.root()->Alloc(100, ""), "");
-}
-
-TEST_P(PartitionAllocDeathTest, FreelistCorruption) {
-  base::CPU cpu;
-  const size_t alloc_size = 2 * sizeof(void*);
-  void** fake_freelist_entry =
-      static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
-  fake_freelist_entry[0] = nullptr;
-  fake_freelist_entry[1] = nullptr;
-
-  void** uaf_data =
-      static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
-  allocator.root()->Free(uaf_data);
-  // Try to confuse the allocator. This is still easy to circumvent willingly,
-  // "just" need to set uaf_data[1] to ~uaf_data[0].
-  void* previous_uaf_data = uaf_data[0];
-  uaf_data[0] = fake_freelist_entry;
-  EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
-
-  // Restore the freelist entry value, otherwise freelist corruption is detected
-  // in TearDown(), crashing this process.
-  uaf_data[0] = previous_uaf_data;
-
-  allocator.root()->Free(fake_freelist_entry);
-}
-
-// With BUILDFLAG(PA_DCHECK_IS_ON), cookie already handles off-by-one detection.
-#if !BUILDFLAG(PA_DCHECK_IS_ON)
-TEST_P(PartitionAllocDeathTest, OffByOneDetection) {
-  base::CPU cpu;
-  const size_t alloc_size = 2 * sizeof(void*);
-  char* array = static_cast<char*>(allocator.root()->Alloc(alloc_size, ""));
-  if (cpu.has_mte()) {
-    EXPECT_DEATH(array[alloc_size] = 'A', "");
-  } else {
-    char previous_value = array[alloc_size];
-    // volatile is required to prevent the compiler from getting too clever and
-    // eliding the out-of-bounds write. The root cause is that the PA_MALLOC_FN
-    // annotation tells the compiler (among other things) that the returned
-    // value cannot alias anything.
-    *const_cast<volatile char*>(&array[alloc_size]) = 'A';
-    // Crash at the next allocation. This assumes that we are touching a new,
-    // non-randomized slot span, where the next slot to be handed over to the
-    // application directly follows the current one.
-    EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
-
-    // Restore integrity, otherwise the process will crash in TearDown().
-    array[alloc_size] = previous_value;
-  }
-}
-
-TEST_P(PartitionAllocDeathTest, OffByOneDetectionWithRealisticData) {
-  base::CPU cpu;
-  const size_t alloc_size = 2 * sizeof(void*);
-  void** array = static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
-  char valid;
-  if (cpu.has_mte()) {
-    EXPECT_DEATH(array[2] = &valid, "");
-  } else {
-    void* previous_value = array[2];
-    // As above, needs volatile to convince the compiler to perform the write.
-    *const_cast<void* volatile*>(&array[2]) = &valid;
-    // Crash at the next allocation. This assumes that we are touching a new,
-    // non-randomized slot span, where the next slot to be handed over to the
-    // application directly follows the current one.
-    EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
-    array[2] = previous_value;
-  }
-}
-#endif  // !BUILDFLAG(PA_DCHECK_IS_ON)
-
-#endif  // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
-        // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
-
-#endif  // !defined(PA_HAS_DEATH_TESTS)
-
-// Tests that |PartitionDumpStats| and |PartitionDumpStats| run without
-// crashing and return non-zero values when memory is allocated.
-TEST_P(PartitionAllocTest, DumpMemoryStats) {
-  {
-    void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-    MockPartitionStatsDumper mock_stats_dumper;
-    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                &mock_stats_dumper);
-    EXPECT_TRUE(mock_stats_dumper.IsMemoryAllocationRecorded());
-    allocator.root()->Free(ptr);
-  }
-
-  // This series of tests checks the active -> empty -> decommitted states.
-  {
-    {
-      void* ptr =
-          allocator.root()->Alloc(2048 - ExtraAllocSize(allocator), type_name);
-      MockPartitionStatsDumper dumper;
-      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                  &dumper);
-      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-      const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
-      EXPECT_TRUE(stats);
-      EXPECT_TRUE(stats->is_valid);
-      EXPECT_EQ(2048u, stats->bucket_slot_size);
-      EXPECT_EQ(2048u, stats->active_bytes);
-      EXPECT_EQ(1u, stats->active_count);
-      EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
-      EXPECT_EQ(0u, stats->decommittable_bytes);
-      EXPECT_EQ(0u, stats->discardable_bytes);
-      EXPECT_EQ(0u, stats->num_full_slot_spans);
-      EXPECT_EQ(1u, stats->num_active_slot_spans);
-      EXPECT_EQ(0u, stats->num_empty_slot_spans);
-      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
-      allocator.root()->Free(ptr);
-    }
-
-    {
-      MockPartitionStatsDumper dumper;
-      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                  &dumper);
-      EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
-
-      const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
-      EXPECT_TRUE(stats);
-      EXPECT_TRUE(stats->is_valid);
-      EXPECT_EQ(2048u, stats->bucket_slot_size);
-      EXPECT_EQ(0u, stats->active_bytes);
-      EXPECT_EQ(0u, stats->active_count);
-      EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
-      EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
-      EXPECT_EQ(0u, stats->discardable_bytes);
-      EXPECT_EQ(0u, stats->num_full_slot_spans);
-      EXPECT_EQ(0u, stats->num_active_slot_spans);
-      EXPECT_EQ(1u, stats->num_empty_slot_spans);
-      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
-    }
-
-    // TODO(crbug.com/722911): Commenting this out causes this test to fail when
-    // run singly (--gtest_filter=PartitionAllocTest.DumpMemoryStats), but not
-    // when run with the others (--gtest_filter=PartitionAllocTest.*).
-    CycleFreeCache(kTestAllocSize);
-
-    {
-      MockPartitionStatsDumper dumper;
-      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                  &dumper);
-      EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
-
-      const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
-      EXPECT_TRUE(stats);
-      EXPECT_TRUE(stats->is_valid);
-      EXPECT_EQ(2048u, stats->bucket_slot_size);
-      EXPECT_EQ(0u, stats->active_bytes);
-      EXPECT_EQ(0u, stats->active_count);
-      EXPECT_EQ(0u, stats->resident_bytes);
-      EXPECT_EQ(0u, stats->decommittable_bytes);
-      EXPECT_EQ(0u, stats->discardable_bytes);
-      EXPECT_EQ(0u, stats->num_full_slot_spans);
-      EXPECT_EQ(0u, stats->num_active_slot_spans);
-      EXPECT_EQ(0u, stats->num_empty_slot_spans);
-      EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
-    }
-  }
-
-  // This test checks for correct empty slot span list accounting.
-  {
-    size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
-    void* ptr1 = allocator.root()->Alloc(size, type_name);
-    void* ptr2 = allocator.root()->Alloc(size, type_name);
-    allocator.root()->Free(ptr1);
-    allocator.root()->Free(ptr2);
-
-    CycleFreeCache(kTestAllocSize);
-
-    ptr1 = allocator.root()->Alloc(size, type_name);
-
-    {
-      MockPartitionStatsDumper dumper;
-      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                  &dumper);
-      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-      const PartitionBucketMemoryStats* stats =
-          dumper.GetBucketStats(PartitionPageSize());
-      EXPECT_TRUE(stats);
-      EXPECT_TRUE(stats->is_valid);
-      EXPECT_EQ(PartitionPageSize(), stats->bucket_slot_size);
-      EXPECT_EQ(PartitionPageSize(), stats->active_bytes);
-      EXPECT_EQ(1u, stats->active_count);
-      EXPECT_EQ(PartitionPageSize(), stats->resident_bytes);
-      EXPECT_EQ(0u, stats->decommittable_bytes);
-      EXPECT_EQ(0u, stats->discardable_bytes);
-      EXPECT_EQ(1u, stats->num_full_slot_spans);
-      EXPECT_EQ(0u, stats->num_active_slot_spans);
-      EXPECT_EQ(0u, stats->num_empty_slot_spans);
-      EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
-    }
-    allocator.root()->Free(ptr1);
-  }
-
-  // This test checks for correct direct mapped accounting.
-  {
-    size_t size_smaller = kMaxBucketed + 1;
-    size_t size_bigger = (kMaxBucketed * 2) + 1;
-    size_t real_size_smaller =
-        (size_smaller + SystemPageOffsetMask()) & SystemPageBaseMask();
-    size_t real_size_bigger =
-        (size_bigger + SystemPageOffsetMask()) & SystemPageBaseMask();
-    void* ptr = allocator.root()->Alloc(size_smaller, type_name);
-    void* ptr2 = allocator.root()->Alloc(size_bigger, type_name);
-
-    {
-      MockPartitionStatsDumper dumper;
-      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                  &dumper);
-      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-      const PartitionBucketMemoryStats* stats =
-          dumper.GetBucketStats(real_size_smaller);
-      EXPECT_TRUE(stats);
-      EXPECT_TRUE(stats->is_valid);
-      EXPECT_TRUE(stats->is_direct_map);
-      EXPECT_EQ(real_size_smaller, stats->bucket_slot_size);
-      EXPECT_EQ(real_size_smaller, stats->active_bytes);
-      EXPECT_EQ(1u, stats->active_count);
-      EXPECT_EQ(real_size_smaller, stats->resident_bytes);
-      EXPECT_EQ(0u, stats->decommittable_bytes);
-      EXPECT_EQ(0u, stats->discardable_bytes);
-      EXPECT_EQ(1u, stats->num_full_slot_spans);
-      EXPECT_EQ(0u, stats->num_active_slot_spans);
-      EXPECT_EQ(0u, stats->num_empty_slot_spans);
-      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
-
-      stats = dumper.GetBucketStats(real_size_bigger);
-      EXPECT_TRUE(stats);
-      EXPECT_TRUE(stats->is_valid);
-      EXPECT_TRUE(stats->is_direct_map);
-      EXPECT_EQ(real_size_bigger, stats->bucket_slot_size);
-      EXPECT_EQ(real_size_bigger, stats->active_bytes);
-      EXPECT_EQ(1u, stats->active_count);
-      EXPECT_EQ(real_size_bigger, stats->resident_bytes);
-      EXPECT_EQ(0u, stats->decommittable_bytes);
-      EXPECT_EQ(0u, stats->discardable_bytes);
-      EXPECT_EQ(1u, stats->num_full_slot_spans);
-      EXPECT_EQ(0u, stats->num_active_slot_spans);
-      EXPECT_EQ(0u, stats->num_empty_slot_spans);
-      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
-    }
-
-    allocator.root()->Free(ptr2);
-    allocator.root()->Free(ptr);
-
-    // Whilst we're here, allocate again and free with different ordering to
-    // give a workout to our linked list code.
-    ptr = allocator.root()->Alloc(size_smaller, type_name);
-    ptr2 = allocator.root()->Alloc(size_bigger, type_name);
-    allocator.root()->Free(ptr);
-    allocator.root()->Free(ptr2);
-  }
-
-  // This test checks large-but-not-quite-direct allocations.
-  {
-    const size_t requested_size = 16 * SystemPageSize();
-    void* ptr = allocator.root()->Alloc(requested_size + 1, type_name);
-
-    {
-      MockPartitionStatsDumper dumper;
-      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                  &dumper);
-      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-      size_t slot_size = SizeToBucketSize(requested_size + 1);
-      const PartitionBucketMemoryStats* stats =
-          dumper.GetBucketStats(slot_size);
-      ASSERT_TRUE(stats);
-      EXPECT_TRUE(stats->is_valid);
-      EXPECT_FALSE(stats->is_direct_map);
-      EXPECT_EQ(slot_size, stats->bucket_slot_size);
-      EXPECT_EQ(requested_size + 1 + ExtraAllocSize(allocator),
-                stats->active_bytes);
-      EXPECT_EQ(1u, stats->active_count);
-      EXPECT_EQ(slot_size, stats->resident_bytes);
-      EXPECT_EQ(0u, stats->decommittable_bytes);
-      EXPECT_EQ((slot_size - (requested_size + 1)) / SystemPageSize() *
-                    SystemPageSize(),
-                stats->discardable_bytes);
-      EXPECT_EQ(1u, stats->num_full_slot_spans);
-      EXPECT_EQ(0u, stats->num_active_slot_spans);
-      EXPECT_EQ(0u, stats->num_empty_slot_spans);
-      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
-    }
-
-    allocator.root()->Free(ptr);
-
-    {
-      MockPartitionStatsDumper dumper;
-      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                  &dumper);
-      EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
-
-      size_t slot_size = SizeToBucketSize(requested_size + 1);
-      const PartitionBucketMemoryStats* stats =
-          dumper.GetBucketStats(slot_size);
-      EXPECT_TRUE(stats);
-      EXPECT_TRUE(stats->is_valid);
-      EXPECT_FALSE(stats->is_direct_map);
-      EXPECT_EQ(slot_size, stats->bucket_slot_size);
-      EXPECT_EQ(0u, stats->active_bytes);
-      EXPECT_EQ(0u, stats->active_count);
-      EXPECT_EQ(slot_size, stats->resident_bytes);
-      EXPECT_EQ(slot_size, stats->decommittable_bytes);
-      EXPECT_EQ(0u, stats->num_full_slot_spans);
-      EXPECT_EQ(0u, stats->num_active_slot_spans);
-      EXPECT_EQ(1u, stats->num_empty_slot_spans);
-      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
-    }
-
-    void* ptr2 = allocator.root()->Alloc(requested_size + SystemPageSize() + 1,
-                                         type_name);
-    EXPECT_EQ(ptr, ptr2);
-
-    {
-      MockPartitionStatsDumper dumper;
-      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                  &dumper);
-      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-      size_t slot_size =
-          SizeToBucketSize(requested_size + SystemPageSize() + 1);
-      const PartitionBucketMemoryStats* stats =
-          dumper.GetBucketStats(slot_size);
-      EXPECT_TRUE(stats);
-      EXPECT_TRUE(stats->is_valid);
-      EXPECT_FALSE(stats->is_direct_map);
-      EXPECT_EQ(slot_size, stats->bucket_slot_size);
-      EXPECT_EQ(
-          requested_size + SystemPageSize() + 1 + ExtraAllocSize(allocator),
-          stats->active_bytes);
-      EXPECT_EQ(1u, stats->active_count);
-      EXPECT_EQ(slot_size, stats->resident_bytes);
-      EXPECT_EQ(0u, stats->decommittable_bytes);
-      EXPECT_EQ((slot_size - (requested_size + SystemPageSize() + 1)) /
-                    SystemPageSize() * SystemPageSize(),
-                stats->discardable_bytes);
-      EXPECT_EQ(1u, stats->num_full_slot_spans);
-      EXPECT_EQ(0u, stats->num_active_slot_spans);
-      EXPECT_EQ(0u, stats->num_empty_slot_spans);
-      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
-    }
-
-    allocator.root()->Free(ptr2);
-  }
-}
-
-// Tests the API to purge freeable memory.
-TEST_P(PartitionAllocTest, Purge) {
-  char* ptr = static_cast<char*>(
-      allocator.root()->Alloc(2048 - ExtraAllocSize(allocator), type_name));
-  allocator.root()->Free(ptr);
-  {
-    MockPartitionStatsDumper dumper;
-    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                &dumper);
-    EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
-
-    const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
-    EXPECT_TRUE(stats);
-    EXPECT_TRUE(stats->is_valid);
-    EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
-    EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
-  }
-  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
-  {
-    MockPartitionStatsDumper dumper;
-    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                &dumper);
-    EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
-
-    const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
-    EXPECT_TRUE(stats);
-    EXPECT_TRUE(stats->is_valid);
-    EXPECT_EQ(0u, stats->decommittable_bytes);
-    EXPECT_EQ(0u, stats->resident_bytes);
-  }
-  // Calling purge again here is a good way of testing we didn't mess up the
-  // state of the free cache ring.
-  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
-
-  // A single-slot but non-direct-mapped allocation size.
-  size_t single_slot_size = 512 * 1024;
-  ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
-  ASSERT_LE(single_slot_size, kMaxBucketed);
-  char* big_ptr =
-      static_cast<char*>(allocator.root()->Alloc(single_slot_size, type_name));
-  allocator.root()->Free(big_ptr);
-  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
-
-  CHECK_PAGE_IN_CORE(ptr - kPointerOffset, false);
-  CHECK_PAGE_IN_CORE(big_ptr - kPointerOffset, false);
-}
-
-// Tests that we prefer to allocate into a non-empty partition page over an
-// empty one. This is an important aspect of minimizing memory usage for some
-// allocation sizes, particularly larger ones.
-TEST_P(PartitionAllocTest, PreferActiveOverEmpty) {
-  size_t size = (SystemPageSize() * 2) - ExtraAllocSize(allocator);
-  // Allocate 3 full slot spans worth of 8192-byte allocations.
-  // Each slot span for this size is 16384 bytes, or 1 partition page and 2
-  // slots.
-  void* ptr1 = allocator.root()->Alloc(size, type_name);
-  void* ptr2 = allocator.root()->Alloc(size, type_name);
-  void* ptr3 = allocator.root()->Alloc(size, type_name);
-  void* ptr4 = allocator.root()->Alloc(size, type_name);
-  void* ptr5 = allocator.root()->Alloc(size, type_name);
-  void* ptr6 = allocator.root()->Alloc(size, type_name);
-
-  SlotSpanMetadata* slot_span1 = SlotSpanMetadata::FromSlotStart(
-      allocator.root()->ObjectToSlotStart(ptr1));
-  SlotSpanMetadata* slot_span2 = SlotSpanMetadata::FromSlotStart(
-      allocator.root()->ObjectToSlotStart(ptr3));
-  SlotSpanMetadata* slot_span3 = SlotSpanMetadata::FromSlotStart(
-      allocator.root()->ObjectToSlotStart(ptr6));
-  EXPECT_NE(slot_span1, slot_span2);
-  EXPECT_NE(slot_span2, slot_span3);
-  PartitionBucket* bucket = slot_span1->bucket;
-  EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
-
-  // Free up the 2nd slot in each slot span.
-  // This leaves the active list containing 3 slot spans, each with 1 used and 1
-  // free slot. The active slot span will be the one containing ptr1.
-  allocator.root()->Free(ptr6);
-  allocator.root()->Free(ptr4);
-  allocator.root()->Free(ptr2);
-  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
-
-  // Empty the middle slot span in the active list.
-  allocator.root()->Free(ptr3);
-  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
-
-  // Empty the first slot span in the active list -- also the current slot span.
-  allocator.root()->Free(ptr1);
-
-  // A good choice here is to re-fill the third slot span since the first two
-  // are empty. We used to fail that.
-  void* ptr7 = allocator.root()->Alloc(size, type_name);
-  PA_EXPECT_PTR_EQ(ptr6, ptr7);
-  EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
-
-  allocator.root()->Free(ptr5);
-  allocator.root()->Free(ptr7);
-}
-
-// Tests the API to purge discardable memory.
-TEST_P(PartitionAllocTest, PurgeDiscardableSecondPage) {
-  // Free the second of two 4096 byte allocations and then purge.
-  void* ptr1 = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  char* ptr2 = static_cast<char*>(allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name));
-  allocator.root()->Free(ptr2);
-  SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
-      allocator.root()->ObjectToSlotStart(ptr1));
-  EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
-  {
-    MockPartitionStatsDumper dumper;
-    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                &dumper);
-    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-    const PartitionBucketMemoryStats* stats =
-        dumper.GetBucketStats(SystemPageSize());
-    EXPECT_TRUE(stats);
-    EXPECT_TRUE(stats->is_valid);
-    EXPECT_EQ(0u, stats->decommittable_bytes);
-    EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
-    EXPECT_EQ(SystemPageSize(), stats->active_bytes);
-    EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
-  }
-  CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
-  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
-  CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
-  EXPECT_EQ(3u, slot_span->num_unprovisioned_slots);
-
-  allocator.root()->Free(ptr1);
-}
-
-TEST_P(PartitionAllocTest, PurgeDiscardableFirstPage) {
-  // Free the first of two 4096 byte allocations and then purge.
-  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name));
-  void* ptr2 = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  allocator.root()->Free(ptr1);
-  {
-    MockPartitionStatsDumper dumper;
-    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                &dumper);
-    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-    const PartitionBucketMemoryStats* stats =
-        dumper.GetBucketStats(SystemPageSize());
-    EXPECT_TRUE(stats);
-    EXPECT_TRUE(stats->is_valid);
-    EXPECT_EQ(0u, stats->decommittable_bytes);
-#if BUILDFLAG(IS_WIN)
-    EXPECT_EQ(0u, stats->discardable_bytes);
-#else
-    EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
-#endif
-    EXPECT_EQ(SystemPageSize(), stats->active_bytes);
-    EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
-  }
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
-  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
-
-  allocator.root()->Free(ptr2);
-}
-
-TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAlloc) {
-  const size_t requested_size = 2.5 * SystemPageSize();
-  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name));
-  void* ptr2 = allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name);
-  void* ptr3 = allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name);
-  void* ptr4 = allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name);
-  memset(ptr1, 'A', requested_size - ExtraAllocSize(allocator));
-  memset(ptr2, 'A', requested_size - ExtraAllocSize(allocator));
-  allocator.root()->Free(ptr1);
-  allocator.root()->Free(ptr2);
-  {
-    MockPartitionStatsDumper dumper;
-    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                &dumper);
-    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-    const PartitionBucketMemoryStats* stats =
-        dumper.GetBucketStats(requested_size);
-    EXPECT_TRUE(stats);
-    EXPECT_TRUE(stats->is_valid);
-    EXPECT_EQ(0u, stats->decommittable_bytes);
-#if BUILDFLAG(IS_WIN)
-    EXPECT_EQ(3 * SystemPageSize(), stats->discardable_bytes);
-#else
-    EXPECT_EQ(4 * SystemPageSize(), stats->discardable_bytes);
-#endif
-    EXPECT_EQ(requested_size * 2, stats->active_bytes);
-    EXPECT_EQ(10 * SystemPageSize(), stats->resident_bytes);
-  }
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
-  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
-  // Except for Windows, the first page is discardable because the freelist
-  // pointer on this page is nullptr. Note that CHECK_PAGE_IN_CORE only executes
-  // checks for Linux and ChromeOS, not for Windows.
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), false);
-
-  allocator.root()->Free(ptr3);
-  allocator.root()->Free(ptr4);
-}
-
-TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAllocOnSlotBoundary) {
-  const size_t requested_size = 2.5 * SystemPageSize();
-  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name));
-  void* ptr2 = allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name);
-  void* ptr3 = allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name);
-  void* ptr4 = allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name);
-  memset(ptr1, 'A', requested_size - ExtraAllocSize(allocator));
-  memset(ptr2, 'A', requested_size - ExtraAllocSize(allocator));
-  allocator.root()->Free(ptr2);
-  allocator.root()->Free(ptr1);
-  {
-    MockPartitionStatsDumper dumper;
-    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                &dumper);
-    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-    const PartitionBucketMemoryStats* stats =
-        dumper.GetBucketStats(requested_size);
-    EXPECT_TRUE(stats);
-    EXPECT_TRUE(stats->is_valid);
-    EXPECT_EQ(0u, stats->decommittable_bytes);
-#if BUILDFLAG(IS_WIN)
-    EXPECT_EQ(3 * SystemPageSize(), stats->discardable_bytes);
-#else
-    EXPECT_EQ(4 * SystemPageSize(), stats->discardable_bytes);
-#endif
-    EXPECT_EQ(requested_size * 2, stats->active_bytes);
-    EXPECT_EQ(10 * SystemPageSize(), stats->resident_bytes);
-  }
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
-  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
-  // Except for Windows, the third page is discardable because the freelist
-  // pointer on this page is nullptr. Note that CHECK_PAGE_IN_CORE only executes
-  // checks for Linux and ChromeOS, not for Windows.
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), false);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), false);
-
-  allocator.root()->Free(ptr3);
-  allocator.root()->Free(ptr4);
-}
-
-TEST_P(PartitionAllocTest, PurgeDiscardableManyPages) {
-  // On systems with large pages, use less pages because:
-  // 1) There must be a bucket for kFirstAllocPages * SystemPageSize(), and
-  // 2) On low-end systems, using too many large pages can OOM during the test
-  const bool kHasLargePages = SystemPageSize() > 4096;
-  const size_t kFirstAllocPages = kHasLargePages ? 32 : 64;
-  const size_t kSecondAllocPages = kHasLargePages ? 31 : 61;
-
-  // Detect case (1) from above.
-  PA_DCHECK(kFirstAllocPages * SystemPageSize() < (1UL << kMaxBucketedOrder));
-
-  const size_t kDeltaPages = kFirstAllocPages - kSecondAllocPages;
-
-  {
-    ScopedPageAllocation p(allocator, kFirstAllocPages);
-    p.TouchAllPages();
-  }
-
-  ScopedPageAllocation p(allocator, kSecondAllocPages);
-
-  MockPartitionStatsDumper dumper;
-  allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                              &dumper);
-  EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-  const PartitionBucketMemoryStats* stats =
-      dumper.GetBucketStats(kFirstAllocPages * SystemPageSize());
-  EXPECT_TRUE(stats);
-  EXPECT_TRUE(stats->is_valid);
-  EXPECT_EQ(0u, stats->decommittable_bytes);
-  EXPECT_EQ(kDeltaPages * SystemPageSize(), stats->discardable_bytes);
-  EXPECT_EQ(kSecondAllocPages * SystemPageSize(), stats->active_bytes);
-  EXPECT_EQ(kFirstAllocPages * SystemPageSize(), stats->resident_bytes);
-
-  for (size_t i = 0; i < kFirstAllocPages; i++) {
-    CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
-  }
-
-  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
-
-  for (size_t i = 0; i < kSecondAllocPages; i++) {
-    CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
-  }
-  for (size_t i = kSecondAllocPages; i < kFirstAllocPages; i++) {
-    CHECK_PAGE_IN_CORE(p.PageAtIndex(i), false);
-  }
-}
-
-TEST_P(PartitionAllocTest, PurgeDiscardableWithFreeListRewrite) {
-  // This sub-test tests truncation of the provisioned slots in a trickier
-  // case where the freelist is rewritten.
-  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
-  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name));
-  void* ptr2 = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  void* ptr3 = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  void* ptr4 = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  ptr1[0] = 'A';
-  ptr1[SystemPageSize()] = 'A';
-  ptr1[SystemPageSize() * 2] = 'A';
-  ptr1[SystemPageSize() * 3] = 'A';
-  SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
-      allocator.root()->ObjectToSlotStart(ptr1));
-  allocator.root()->Free(ptr2);
-  allocator.root()->Free(ptr4);
-  allocator.root()->Free(ptr1);
-  EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
-
-  {
-    MockPartitionStatsDumper dumper;
-    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                &dumper);
-    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-    const PartitionBucketMemoryStats* stats =
-        dumper.GetBucketStats(SystemPageSize());
-    EXPECT_TRUE(stats);
-    EXPECT_TRUE(stats->is_valid);
-    EXPECT_EQ(0u, stats->decommittable_bytes);
-#if BUILDFLAG(IS_WIN)
-    EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
-#else
-    EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
-#endif
-    EXPECT_EQ(SystemPageSize(), stats->active_bytes);
-    EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
-  }
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
-  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
-  EXPECT_EQ(1u, slot_span->num_unprovisioned_slots);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
-
-  // Let's check we didn't brick the freelist.
-  void* ptr1b = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  PA_EXPECT_PTR_EQ(ptr1, ptr1b);
-  void* ptr2b = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  PA_EXPECT_PTR_EQ(ptr2, ptr2b);
-  EXPECT_FALSE(slot_span->get_freelist_head());
-
-  allocator.root()->Free(ptr1);
-  allocator.root()->Free(ptr2);
-  allocator.root()->Free(ptr3);
-}
-
-TEST_P(PartitionAllocTest, PurgeDiscardableDoubleTruncateFreeList) {
-  // This sub-test is similar, but tests a double-truncation.
-  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
-  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name));
-  void* ptr2 = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  void* ptr3 = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  void* ptr4 = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  ptr1[0] = 'A';
-  ptr1[SystemPageSize()] = 'A';
-  ptr1[SystemPageSize() * 2] = 'A';
-  ptr1[SystemPageSize() * 3] = 'A';
-  SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
-      allocator.root()->ObjectToSlotStart(ptr1));
-  allocator.root()->Free(ptr4);
-  allocator.root()->Free(ptr3);
-  EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
-
-  {
-    MockPartitionStatsDumper dumper;
-    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                &dumper);
-    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-    const PartitionBucketMemoryStats* stats =
-        dumper.GetBucketStats(SystemPageSize());
-    EXPECT_TRUE(stats);
-    EXPECT_TRUE(stats->is_valid);
-    EXPECT_EQ(0u, stats->decommittable_bytes);
-    EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
-    EXPECT_EQ(2 * SystemPageSize(), stats->active_bytes);
-    EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
-  }
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
-  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
-  EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), false);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
-
-  EXPECT_FALSE(slot_span->get_freelist_head());
-
-  allocator.root()->Free(ptr1);
-  allocator.root()->Free(ptr2);
-}
-
-TEST_P(PartitionAllocTest, PurgeDiscardableSmallSlotsWithTruncate) {
-  size_t requested_size = 0.5 * SystemPageSize();
-  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name));
-  void* ptr2 = allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name);
-  void* ptr3 = allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name);
-  void* ptr4 = allocator.root()->Alloc(
-      requested_size - ExtraAllocSize(allocator), type_name);
-  allocator.root()->Free(ptr3);
-  allocator.root()->Free(ptr4);
-  SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
-      allocator.root()->ObjectToSlotStart(ptr1));
-  EXPECT_EQ(4u, slot_span->num_unprovisioned_slots);
-  {
-    MockPartitionStatsDumper dumper;
-    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
-                                &dumper);
-    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
-
-    const PartitionBucketMemoryStats* stats =
-        dumper.GetBucketStats(requested_size);
-    EXPECT_TRUE(stats);
-    EXPECT_TRUE(stats->is_valid);
-    EXPECT_EQ(0u, stats->decommittable_bytes);
-    EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
-    EXPECT_EQ(requested_size * 2, stats->active_bytes);
-    EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
-  }
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
-  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
-  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
-  EXPECT_EQ(6u, slot_span->num_unprovisioned_slots);
-
-  allocator.root()->Free(ptr1);
-  allocator.root()->Free(ptr2);
-}
-
-TEST_P(PartitionAllocTest, ActiveListMaintenance) {
-  size_t size = SystemPageSize() - ExtraAllocSize(allocator);
-  size_t real_size = size + ExtraAllocSize(allocator);
-  size_t bucket_index =
-      allocator.root()->SizeToBucketIndex(real_size, GetBucketDistribution());
-  PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
-  ASSERT_EQ(bucket->slot_size, real_size);
-  size_t slots_per_span = bucket->num_system_pages_per_slot_span;
-
-  // Make 10 full slot spans.
-  constexpr int kSpans = 10;
-  std::vector<std::vector<void*>> allocated_memory_spans(kSpans);
-  for (int span_index = 0; span_index < kSpans; span_index++) {
-    for (size_t i = 0; i < slots_per_span; i++) {
-      allocated_memory_spans[span_index].push_back(
-          allocator.root()->Alloc(size, ""));
-    }
-  }
-
-  // Free one entry in the middle span, creating a partial slot span.
-  constexpr size_t kSpanIndex = 5;
-  allocator.root()->Free(allocated_memory_spans[kSpanIndex].back());
-  allocated_memory_spans[kSpanIndex].pop_back();
-
-  // Empty the last slot span.
-  for (void* ptr : allocated_memory_spans[kSpans - 1]) {
-    allocator.root()->Free(ptr);
-  }
-  allocated_memory_spans.pop_back();
-
-  // The active list now is:
-  // Partial -> Empty -> Full -> Full -> ... -> Full
-  bucket->MaintainActiveList();
-
-  // Only one entry in the active list.
-  ASSERT_NE(bucket->active_slot_spans_head,
-            SlotSpanMetadata::get_sentinel_slot_span());
-  EXPECT_FALSE(bucket->active_slot_spans_head->next_slot_span);
-
-  // The empty list has 1 entry.
-  ASSERT_NE(bucket->empty_slot_spans_head,
-            SlotSpanMetadata::get_sentinel_slot_span());
-  EXPECT_FALSE(bucket->empty_slot_spans_head->next_slot_span);
-
-  // The rest are full slot spans.
-  EXPECT_EQ(8u, bucket->num_full_slot_spans);
-
-  // Free all memory.
-  for (const auto& span : allocated_memory_spans) {
-    for (void* ptr : span) {
-      allocator.root()->Free(ptr);
-    }
-  }
-}
-
-TEST_P(PartitionAllocTest, ReallocMovesCookie) {
-  // Resize so as to be sure to hit a "resize in place" case, and ensure that
-  // use of the entire result is compatible with the debug mode's cookie, even
-  // when the bucket size is large enough to span more than one partition page
-  // and we can track the "raw" size. See https://crbug.com/709271
-  static const size_t kSize = MaxRegularSlotSpanSize();
-  void* ptr = allocator.root()->Alloc(kSize + 1, type_name);
-  EXPECT_TRUE(ptr);
-
-  memset(ptr, 0xbd, kSize + 1);
-  ptr = allocator.root()->Realloc(ptr, kSize + 2, type_name);
-  EXPECT_TRUE(ptr);
-
-  memset(ptr, 0xbd, kSize + 2);
-  allocator.root()->Free(ptr);
-}
-
-TEST_P(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
-  // For crbug.com/781473
-  static constexpr size_t kSize = 264;
-  void* ptr = allocator.root()->Alloc(kSize, type_name);
-  EXPECT_TRUE(ptr);
-
-  ptr = allocator.root()->Realloc(ptr, kSize + 16, type_name);
-  EXPECT_TRUE(ptr);
-
-  allocator.root()->Free(ptr);
-}
-
-TEST_P(PartitionAllocTest, ZeroFill) {
-  static constexpr size_t kAllZerosSentinel =
-      std::numeric_limits<size_t>::max();
-  for (size_t size : kTestSizes) {
-    char* p = static_cast<char*>(
-        allocator.root()->AllocWithFlags(AllocFlags::kZeroFill, size, nullptr));
-    size_t non_zero_position = kAllZerosSentinel;
-    for (size_t i = 0; i < size; ++i) {
-      if (0 != p[i]) {
-        non_zero_position = i;
-        break;
-      }
-    }
-    EXPECT_EQ(kAllZerosSentinel, non_zero_position)
-        << "test allocation size: " << size;
-    allocator.root()->Free(p);
-  }
-
-  for (int i = 0; i < 10; ++i) {
-    SCOPED_TRACE(i);
-    AllocateRandomly(allocator.root(), 250, AllocFlags::kZeroFill);
-  }
-}
-
-TEST_P(PartitionAllocTest, Bug_897585) {
-  // Need sizes big enough to be direct mapped and a delta small enough to
-  // allow re-use of the slot span when cookied. These numbers fall out of the
-  // test case in the indicated bug.
-  size_t kInitialSize = 983050;
-  size_t kDesiredSize = 983100;
-  ASSERT_GT(kInitialSize, kMaxBucketed);
-  ASSERT_GT(kDesiredSize, kMaxBucketed);
-  void* ptr = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull,
-                                               kInitialSize, nullptr);
-  ASSERT_NE(nullptr, ptr);
-  ptr = allocator.root()->ReallocWithFlags(AllocFlags::kReturnNull, ptr,
-                                           kDesiredSize, nullptr);
-  ASSERT_NE(nullptr, ptr);
-  memset(ptr, 0xbd, kDesiredSize);
-  allocator.root()->Free(ptr);
-}
-
-TEST_P(PartitionAllocTest, OverrideHooks) {
-  constexpr size_t kOverriddenSize = 1234;
-  constexpr const char* kOverriddenType = "Overridden type";
-  constexpr unsigned char kOverriddenChar = 'A';
-
-  // Marked static so that we can use them in non-capturing lambdas below.
-  // (Non-capturing lambdas convert directly to function pointers.)
-  static volatile bool free_called = false;
-  static void* overridden_allocation = nullptr;
-  overridden_allocation = malloc(kOverriddenSize);
-  memset(overridden_allocation, kOverriddenChar, kOverriddenSize);
-
-  PartitionAllocHooks::SetOverrideHooks(
-      [](void** out, unsigned int flags, size_t size,
-         const char* type_name) -> bool {
-        if (size == kOverriddenSize && type_name == kOverriddenType) {
-          *out = overridden_allocation;
-          return true;
-        }
-        return false;
-      },
-      [](void* address) -> bool {
-        if (address == overridden_allocation) {
-          free_called = true;
-          return true;
-        }
-        return false;
-      },
-      [](size_t* out, void* address) -> bool {
-        if (address == overridden_allocation) {
-          *out = kOverriddenSize;
-          return true;
-        }
-        return false;
-      });
-
-  void* ptr = allocator.root()->AllocWithFlags(
-      AllocFlags::kReturnNull, kOverriddenSize, kOverriddenType);
-  ASSERT_EQ(ptr, overridden_allocation);
-
-  allocator.root()->Free(ptr);
-  EXPECT_TRUE(free_called);
-
-  // overridden_allocation has not actually been freed so we can now immediately
-  // realloc it.
-  free_called = false;
-  ptr = allocator.root()->ReallocWithFlags(AllocFlags::kReturnNull, ptr, 1,
-                                           nullptr);
-  ASSERT_NE(ptr, nullptr);
-  EXPECT_NE(ptr, overridden_allocation);
-  EXPECT_TRUE(free_called);
-  EXPECT_EQ(*(char*)ptr, kOverriddenChar);
-  allocator.root()->Free(ptr);
-
-  PartitionAllocHooks::SetOverrideHooks(nullptr, nullptr, nullptr);
-  free(overridden_allocation);
-}
-
-TEST_P(PartitionAllocTest, Alignment) {
-  std::vector<void*> allocated_ptrs;
-
-  for (size_t size = 1; size <= PartitionPageSize(); size <<= 1) {
-    if (size <= ExtraAllocSize(allocator)) {
-      continue;
-    }
-    size_t requested_size = size - ExtraAllocSize(allocator);
-
-    // All allocations which are not direct-mapped occupy contiguous slots of a
-    // span, starting on a page boundary. This means that allocations are first
-    // rounded up to the nearest bucket size, then have an address of the form:
-    //   (partition-page-aligned address) + i * bucket_size.
-    //
-    // All powers of two are bucket sizes, meaning that all power of two
-    // allocations smaller than a page will be aligned on the allocation size.
-    size_t expected_alignment = size;
-    for (int index = 0; index < 3; index++) {
-      void* ptr = allocator.root()->Alloc(requested_size, "");
-      allocated_ptrs.push_back(ptr);
-      EXPECT_EQ(0u,
-                allocator.root()->ObjectToSlotStart(ptr) % expected_alignment)
-          << (index + 1) << "-th allocation of size=" << size;
-    }
-  }
-
-  for (void* ptr : allocated_ptrs) {
-    allocator.root()->Free(ptr);
-  }
-}
-
-TEST_P(PartitionAllocTest, FundamentalAlignment) {
-  // See the test above for details. Essentially, checking the bucket size is
-  // sufficient to ensure that alignment will always be respected, as long as
-  // the fundamental alignment is <= 16 bytes.
-  size_t fundamental_alignment = kAlignment;
-  for (size_t size = 0; size < SystemPageSize(); size++) {
-    // Allocate several pointers, as the first one in use in a size class will
-    // be aligned on a page boundary.
-    void* ptr = allocator.root()->Alloc(size, "");
-    void* ptr2 = allocator.root()->Alloc(size, "");
-    void* ptr3 = allocator.root()->Alloc(size, "");
-
-    EXPECT_EQ(UntagPtr(ptr) % fundamental_alignment, 0u);
-    EXPECT_EQ(UntagPtr(ptr2) % fundamental_alignment, 0u);
-    EXPECT_EQ(UntagPtr(ptr3) % fundamental_alignment, 0u);
-
-    uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-    // The capacity(C) is slot size - ExtraAllocSize(allocator).
-    // Since slot size is multiples of kAlignment,
-    // C % kAlignment == (slot_size - ExtraAllocSize(allocator)) % kAlignment.
-    // C % kAlignment == (-ExtraAllocSize(allocator)) % kAlignment.
-    // Since kCookieSize is a multiple of kAlignment,
-    // C % kAlignment == (-kInSlotRefCountBufferSize) % kAlignment
-    // == (kAlignment - kInSlotRefCountBufferSize) % kAlignment.
-    EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start) %
-                  fundamental_alignment,
-              UseBRPPool()
-                  ? (-ExtraAllocSize(allocator) % fundamental_alignment)
-                  : 0);
-#else
-    EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start) %
-                  fundamental_alignment,
-              -ExtraAllocSize(allocator) % fundamental_alignment);
-#endif
-
-    allocator.root()->Free(ptr);
-    allocator.root()->Free(ptr2);
-    allocator.root()->Free(ptr3);
-  }
-}
-
-void VerifyAlignment(PartitionRoot* root, size_t size, size_t alignment) {
-  std::vector<void*> allocated_ptrs;
-
-  for (int index = 0; index < 3; index++) {
-    void* ptr = root->AlignedAllocWithFlags(0, alignment, size);
-    ASSERT_TRUE(ptr);
-    allocated_ptrs.push_back(ptr);
-    EXPECT_EQ(0ull, UntagPtr(ptr) % alignment)
-        << (index + 1) << "-th allocation of size=" << size
-        << ", alignment=" << alignment;
-  }
-
-  for (void* ptr : allocated_ptrs) {
-    root->Free(ptr);
-  }
-}
-
-TEST_P(PartitionAllocTest, AlignedAllocations) {
-  size_t alloc_sizes[] = {1,
-                          10,
-                          100,
-                          1000,
-                          10000,
-                          60000,
-                          70000,
-                          130000,
-                          500000,
-                          900000,
-                          kMaxBucketed + 1,
-                          2 * kMaxBucketed,
-                          kSuperPageSize - 2 * PartitionPageSize(),
-                          4 * kMaxBucketed};
-  for (size_t alloc_size : alloc_sizes) {
-    for (size_t alignment = 1; alignment <= kMaxSupportedAlignment;
-         alignment <<= 1) {
-      VerifyAlignment(aligned_allocator.root(), alloc_size, alignment);
-
-      // Verify alignment on the regular allocator only when BRP is off, or when
-      // it's on in the "previous slot" mode. See the comment in SetUp().
-#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
-    BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-      VerifyAlignment(allocator.root(), alloc_size, alignment);
-#endif
-    }
-  }
-}
-
-// Test that the optimized `GetSlotNumber` implementation produces valid
-// results.
-TEST_P(PartitionAllocTest, OptimizedGetSlotNumber) {
-  for (size_t i = 0; i < kNumBuckets; ++i) {
-    auto& bucket = allocator.root()->buckets[i];
-    if (SizeToIndex(bucket.slot_size) != i) {
-      continue;
-    }
-    for (size_t slot = 0, offset = 0; slot < bucket.get_slots_per_span();
-         ++slot, offset += bucket.slot_size) {
-      EXPECT_EQ(slot, bucket.GetSlotNumber(offset));
-      EXPECT_EQ(slot, bucket.GetSlotNumber(offset + bucket.slot_size / 2));
-      EXPECT_EQ(slot, bucket.GetSlotNumber(offset + bucket.slot_size - 1));
-    }
-  }
-}
-
-TEST_P(PartitionAllocTest, GetUsableSizeNull) {
-  EXPECT_EQ(0ULL, PartitionRoot::GetUsableSize(nullptr));
-}
-
-TEST_P(PartitionAllocTest, GetUsableSize) {
-#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-  allocator.root()->EnableMac11MallocSizeHackForTesting(
-      GetParam().ref_count_size);
-#endif
-  size_t delta = 31;
-  for (size_t size = 1; size <= kMinDirectMappedDownsize; size += delta) {
-    void* ptr = allocator.root()->Alloc(size, "");
-    EXPECT_TRUE(ptr);
-    size_t usable_size = PartitionRoot::GetUsableSize(ptr);
-    size_t usable_size_with_hack =
-        PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(ptr);
-#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-    if (size != internal::kMac11MallocSizeHackRequestedSize)
-#endif
-      EXPECT_EQ(usable_size_with_hack, usable_size);
-    EXPECT_LE(size, usable_size);
-    memset(ptr, 0xDE, usable_size);
-    // Should not crash when free the ptr.
-    allocator.root()->Free(ptr);
-  }
-}
-
-#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-TEST_P(PartitionAllocTest, GetUsableSizeWithMac11MallocSizeHack) {
-  if (!internal::base::mac::IsOS11()) {
-    GTEST_SKIP() << "Skpping because the test is for Mac11.";
-  }
-
-  allocator.root()->EnableMac11MallocSizeHackForTesting(
-      GetParam().ref_count_size);
-  size_t size = internal::kMac11MallocSizeHackRequestedSize;
-  void* ptr = allocator.root()->Alloc(size, "");
-  size_t usable_size = PartitionRoot::GetUsableSize(ptr);
-  size_t usable_size_with_hack =
-      PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(ptr);
-  EXPECT_EQ(usable_size,
-            allocator.root()->settings.mac11_malloc_size_hack_usable_size_);
-  EXPECT_EQ(usable_size_with_hack, size);
-
-  allocator.root()->Free(ptr);
-}
-#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-
-TEST_P(PartitionAllocTest, Bookkeeping) {
-  auto& root = *allocator.root();
-
-  EXPECT_EQ(0U, root.total_size_of_committed_pages);
-  EXPECT_EQ(0U, root.max_size_of_committed_pages);
-  EXPECT_EQ(0U, root.get_total_size_of_allocated_bytes());
-  EXPECT_EQ(0U, root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(0U, root.total_size_of_super_pages);
-  size_t small_size = 1000;
-
-  // A full slot span of size 1 partition page is committed.
-  void* ptr = root.Alloc(small_size - ExtraAllocSize(allocator), type_name);
-  // Lazy commit commits only needed pages.
-  size_t expected_committed_size =
-      kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
-  size_t expected_super_pages_size = kSuperPageSize;
-  size_t expected_max_committed_size = expected_committed_size;
-  size_t bucket_index = SizeToIndex(small_size - ExtraAllocSize(allocator));
-  PartitionBucket* bucket = &root.buckets[bucket_index];
-  size_t expected_total_allocated_size = bucket->slot_size;
-  size_t expected_max_allocated_size = expected_total_allocated_size;
-
-  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-  EXPECT_EQ(expected_total_allocated_size,
-            root.get_total_size_of_allocated_bytes());
-  EXPECT_EQ(expected_max_allocated_size,
-            root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-
-  // Freeing memory doesn't result in decommitting pages right away.
-  root.Free(ptr);
-  expected_total_allocated_size = 0U;
-  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-  EXPECT_EQ(expected_total_allocated_size,
-            root.get_total_size_of_allocated_bytes());
-  EXPECT_EQ(expected_max_allocated_size,
-            root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-
-  // Allocating the same size lands it in the same slot span.
-  ptr = root.Alloc(small_size - ExtraAllocSize(allocator), type_name);
-  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-  EXPECT_EQ(expected_max_allocated_size,
-            root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-
-  // Freeing memory doesn't result in decommitting pages right away.
-  root.Free(ptr);
-  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-  EXPECT_EQ(expected_max_allocated_size,
-            root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-
-  // Allocating another size commits another slot span.
-  ptr = root.Alloc(2 * small_size - ExtraAllocSize(allocator), type_name);
-  expected_committed_size +=
-      kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
-  expected_max_committed_size =
-      std::max(expected_max_committed_size, expected_committed_size);
-  expected_max_allocated_size =
-      std::max(expected_max_allocated_size, static_cast<size_t>(2048));
-  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-  EXPECT_EQ(expected_max_allocated_size,
-            root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-
-  // Freeing memory doesn't result in decommitting pages right away.
-  root.Free(ptr);
-  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-  EXPECT_EQ(expected_max_allocated_size,
-            root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-
-  // Single-slot slot spans...
-  //
-  // When the system page size is larger than 4KiB, we don't necessarily have
-  // enough space in the superpage to store two of the largest bucketed
-  // allocations, particularly when we reserve extra space for e.g. bitmaps.
-  // To avoid this, we use something just below kMaxBucketed.
-  size_t big_size = kMaxBucketed * 4 / 5 - SystemPageSize();
-
-  ASSERT_GT(big_size, MaxRegularSlotSpanSize());
-  ASSERT_LE(big_size, kMaxBucketed);
-  bucket_index = SizeToIndex(big_size - ExtraAllocSize(allocator));
-  bucket = &root.buckets[bucket_index];
-  // Assert the allocation doesn't fill the entire span nor entire partition
-  // page, to make the test more interesting.
-  ASSERT_LT(big_size, bucket->get_bytes_per_span());
-  ASSERT_NE(big_size % PartitionPageSize(), 0U);
-  ptr = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
-  expected_committed_size += bucket->get_bytes_per_span();
-  expected_max_committed_size =
-      std::max(expected_max_committed_size, expected_committed_size);
-  expected_total_allocated_size += bucket->get_bytes_per_span();
-  expected_max_allocated_size =
-      std::max(expected_max_allocated_size, expected_total_allocated_size);
-  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-  EXPECT_EQ(expected_total_allocated_size,
-            root.get_total_size_of_allocated_bytes());
-  EXPECT_EQ(expected_max_allocated_size,
-            root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-
-  // Allocating 2nd time doesn't overflow the super page...
-  void* ptr2 = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
-  expected_committed_size += bucket->get_bytes_per_span();
-  expected_max_committed_size =
-      std::max(expected_max_committed_size, expected_committed_size);
-  expected_total_allocated_size += bucket->get_bytes_per_span();
-  expected_max_allocated_size =
-      std::max(expected_max_allocated_size, expected_total_allocated_size);
-  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-  EXPECT_EQ(expected_total_allocated_size,
-            root.get_total_size_of_allocated_bytes());
-  EXPECT_EQ(expected_max_allocated_size,
-            root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-
-  // ... but 3rd time does.
-  void* ptr3 = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
-  expected_committed_size += bucket->get_bytes_per_span();
-  expected_max_committed_size =
-      std::max(expected_max_committed_size, expected_committed_size);
-  expected_total_allocated_size += bucket->get_bytes_per_span();
-  expected_max_allocated_size =
-      std::max(expected_max_allocated_size, expected_total_allocated_size);
-  expected_super_pages_size += kSuperPageSize;
-  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-  EXPECT_EQ(expected_total_allocated_size,
-            root.get_total_size_of_allocated_bytes());
-  EXPECT_EQ(expected_max_allocated_size,
-            root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-
-  // Freeing memory doesn't result in decommitting pages right away.
-  root.Free(ptr);
-  root.Free(ptr2);
-  root.Free(ptr3);
-  expected_total_allocated_size -= 3 * bucket->get_bytes_per_span();
-  expected_max_allocated_size =
-      std::max(expected_max_allocated_size, expected_total_allocated_size);
-  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-  EXPECT_EQ(expected_total_allocated_size,
-            root.get_total_size_of_allocated_bytes());
-  EXPECT_EQ(expected_max_allocated_size,
-            root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-
-  // Now everything should be decommitted. The reserved space for super pages
-  // stays the same and will never go away (by design).
-  root.PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
-  expected_committed_size = 0;
-  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-  EXPECT_EQ(expected_total_allocated_size,
-            root.get_total_size_of_allocated_bytes());
-  EXPECT_EQ(expected_max_allocated_size,
-            root.get_max_size_of_allocated_bytes());
-  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-
-  // None of the above should affect the direct map space.
-  EXPECT_EQ(0U, root.total_size_of_direct_mapped_pages);
-
-  size_t huge_sizes[] = {
-      kMaxBucketed + SystemPageSize(),
-      kMaxBucketed + SystemPageSize() + 123,
-      kSuperPageSize - PageAllocationGranularity(),
-      kSuperPageSize - SystemPageSize() - PartitionPageSize(),
-      kSuperPageSize - PartitionPageSize(),
-      kSuperPageSize - SystemPageSize(),
-      kSuperPageSize,
-      kSuperPageSize + SystemPageSize(),
-      kSuperPageSize + PartitionPageSize(),
-      kSuperPageSize + SystemPageSize() + PartitionPageSize(),
-      kSuperPageSize + PageAllocationGranularity(),
-      kSuperPageSize + DirectMapAllocationGranularity(),
-  };
-  size_t alignments[] = {
-      PartitionPageSize(),
-      2 * PartitionPageSize(),
-      kMaxSupportedAlignment / 2,
-      kMaxSupportedAlignment,
-  };
-  for (size_t huge_size : huge_sizes) {
-    ASSERT_GT(huge_size, kMaxBucketed);
-    for (size_t alignment : alignments) {
-      // For direct map, we commit only as many pages as needed.
-      size_t aligned_size = partition_alloc::internal::base::bits::AlignUp(
-          huge_size, SystemPageSize());
-      ptr = root.AllocWithFlagsInternal(
-          0, huge_size - ExtraAllocSize(allocator), alignment, type_name);
-      expected_committed_size += aligned_size;
-      expected_max_committed_size =
-          std::max(expected_max_committed_size, expected_committed_size);
-      expected_total_allocated_size += aligned_size;
-      expected_max_allocated_size =
-          std::max(expected_max_allocated_size, expected_total_allocated_size);
-      // The total reserved map includes metadata and guard pages at the ends.
-      // It also includes alignment. However, these would double count the first
-      // partition page, so it needs to be subtracted.
-      size_t surrounding_pages_size =
-          PartitionRoot::GetDirectMapMetadataAndGuardPagesSize() + alignment -
-          PartitionPageSize();
-      size_t expected_direct_map_size =
-          partition_alloc::internal::base::bits::AlignUp(
-              aligned_size + surrounding_pages_size,
-              DirectMapAllocationGranularity());
-      EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-      EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-      EXPECT_EQ(expected_total_allocated_size,
-                root.get_total_size_of_allocated_bytes());
-      EXPECT_EQ(expected_max_allocated_size,
-                root.get_max_size_of_allocated_bytes());
-      EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-      EXPECT_EQ(expected_direct_map_size,
-                root.total_size_of_direct_mapped_pages);
-
-      // Freeing memory in the diret map decommits pages right away. The address
-      // space is released for re-use too.
-      root.Free(ptr);
-      expected_committed_size -= aligned_size;
-      expected_direct_map_size = 0;
-      expected_max_committed_size =
-          std::max(expected_max_committed_size, expected_committed_size);
-      expected_total_allocated_size -= aligned_size;
-      expected_max_allocated_size =
-          std::max(expected_max_allocated_size, expected_total_allocated_size);
-      EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
-      EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
-      EXPECT_EQ(expected_total_allocated_size,
-                root.get_total_size_of_allocated_bytes());
-      EXPECT_EQ(expected_max_allocated_size,
-                root.get_max_size_of_allocated_bytes());
-      EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
-      EXPECT_EQ(expected_direct_map_size,
-                root.total_size_of_direct_mapped_pages);
-    }
-  }
-}
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-TEST_P(PartitionAllocTest, RefCountBasic) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  constexpr uint64_t kCookie = 0x1234567890ABCDEF;
-  constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;
-
-  size_t alloc_size = 64 - ExtraAllocSize(allocator);
-  uint64_t* ptr1 =
-      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
-  EXPECT_TRUE(ptr1);
-
-  *ptr1 = kCookie;
-
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr1));
-  EXPECT_TRUE(ref_count->IsAliveWithNoKnownRefs());
-
-  ref_count->Acquire();
-  EXPECT_FALSE(ref_count->Release());
-  EXPECT_TRUE(ref_count->IsAliveWithNoKnownRefs());
-  EXPECT_EQ(*ptr1, kCookie);
-
-  ref_count->AcquireFromUnprotectedPtr();
-  EXPECT_FALSE(ref_count->IsAliveWithNoKnownRefs());
-
-  allocator.root()->Free(ptr1);
-  // The allocation shouldn't be reclaimed, and its contents should be zapped.
-  // Retag ptr1 to get its correct MTE tag.
-  ptr1 = TagPtr(ptr1);
-  EXPECT_NE(*ptr1, kCookie);
-  EXPECT_EQ(*ptr1, kQuarantined);
-
-  // The allocator should not reuse the original slot since its reference count
-  // doesn't equal zero.
-  uint64_t* ptr2 =
-      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
-  EXPECT_NE(ptr1, ptr2);
-  allocator.root()->Free(ptr2);
-
-  // When the last reference is released, the slot should become reusable.
-  // Retag ref_count because PartitionAlloc retags ptr to enforce quarantine.
-  ref_count = TagPtr(ref_count);
-  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
-  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr1));
-  uint64_t* ptr3 =
-      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
-  EXPECT_EQ(ptr1, ptr3);
-  allocator.root()->Free(ptr3);
-}
-
-void PartitionAllocTest::RunRefCountReallocSubtest(size_t orig_size,
-                                                   size_t new_size) {
-  void* ptr1 = allocator.root()->Alloc(orig_size, type_name);
-  EXPECT_TRUE(ptr1);
-
-  auto* ref_count1 =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr1));
-  EXPECT_TRUE(ref_count1->IsAliveWithNoKnownRefs());
-
-  ref_count1->AcquireFromUnprotectedPtr();
-  EXPECT_FALSE(ref_count1->IsAliveWithNoKnownRefs());
-
-  void* ptr2 = allocator.root()->Realloc(ptr1, new_size, type_name);
-  EXPECT_TRUE(ptr2);
-
-  // PartitionAlloc may retag memory areas on realloc (even if they
-  // do not move), so recover the true tag here.
-  ref_count1 = TagPtr(ref_count1);
-
-  // Re-query ref-count. It may have moved if Realloc changed the slot.
-  auto* ref_count2 =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr2));
-
-  if (UntagPtr(ptr1) == UntagPtr(ptr2)) {
-    // If the slot didn't change, ref-count should stay the same.
-    EXPECT_EQ(ref_count1, ref_count2);
-    EXPECT_FALSE(ref_count2->IsAliveWithNoKnownRefs());
-
-    EXPECT_FALSE(ref_count2->ReleaseFromUnprotectedPtr());
-  } else {
-    // If the allocation was moved to another slot, the old ref-count stayed
-    // in the same location in memory, is no longer alive, but still has a
-    // reference. The new ref-count is alive, but has no references.
-    EXPECT_NE(ref_count1, ref_count2);
-    EXPECT_FALSE(ref_count1->IsAlive());
-    EXPECT_FALSE(ref_count1->IsAliveWithNoKnownRefs());
-    EXPECT_TRUE(ref_count2->IsAliveWithNoKnownRefs());
-
-    EXPECT_TRUE(ref_count1->ReleaseFromUnprotectedPtr());
-    PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr1));
-  }
-
-  allocator.root()->Free(ptr2);
-}
-
-TEST_P(PartitionAllocTest, RefCountRealloc) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  size_t alloc_sizes[] = {500, 5000, 50000, 400000};
-
-  for (size_t alloc_size : alloc_sizes) {
-    alloc_size -= ExtraAllocSize(allocator);
-    RunRefCountReallocSubtest(alloc_size, alloc_size - 9);
-    RunRefCountReallocSubtest(alloc_size, alloc_size + 9);
-    RunRefCountReallocSubtest(alloc_size, alloc_size * 2);
-    RunRefCountReallocSubtest(alloc_size, alloc_size / 2);
-  }
-}
-
-int g_unretained_dangling_raw_ptr_detected_count = 0;
-
-class UnretainedDanglingRawPtrTest : public PartitionAllocTest {
- public:
-  void SetUp() override {
-    PartitionAllocTest::SetUp();
-    g_unretained_dangling_raw_ptr_detected_count = 0;
-    old_detected_fn_ = partition_alloc::GetUnretainedDanglingRawPtrDetectedFn();
-
-    partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
-        &UnretainedDanglingRawPtrTest::DanglingRawPtrDetected);
-    old_unretained_dangling_ptr_enabled_ =
-        partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(true);
-  }
-  void TearDown() override {
-    partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(old_detected_fn_);
-    partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(
-        old_unretained_dangling_ptr_enabled_);
-    PartitionAllocTest::TearDown();
-  }
-
- private:
-  static void DanglingRawPtrDetected(uintptr_t) {
-    g_unretained_dangling_raw_ptr_detected_count++;
-  }
-
-  partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
-  bool old_unretained_dangling_ptr_enabled_;
-};
-
-INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
-                         UnretainedDanglingRawPtrTest,
-                         testing::ValuesIn(GetPartitionAllocTestParams()));
-
-TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrNoReport) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr);
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
-  ref_count->Acquire();
-  EXPECT_TRUE(ref_count->IsAlive());
-  // Allocation is still live, so calling ReportIfDangling() should not result
-  // in any detections.
-  ref_count->ReportIfDangling();
-  EXPECT_EQ(g_unretained_dangling_raw_ptr_detected_count, 0);
-  EXPECT_FALSE(ref_count->Release());
-  allocator.root()->Free(ptr);
-}
-
-TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrShouldReport) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr);
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
-  ref_count->AcquireFromUnprotectedPtr();
-  EXPECT_TRUE(ref_count->IsAlive());
-  allocator.root()->Free(ptr);
-  // At this point, memory shouldn't be alive...
-  EXPECT_FALSE(ref_count->IsAlive());
-  // ...and we should report the ptr as dangling.
-  ref_count->ReportIfDangling();
-  EXPECT_EQ(g_unretained_dangling_raw_ptr_detected_count, 1);
-  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
-
-  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
-}
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-TEST_P(PartitionAllocTest, BackupRefPtrGuardRegion) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  size_t alignment = internal::PageAllocationGranularity();
-
-  uintptr_t requested_address;
-  memset(&requested_address, internal::kQuarantinedByte,
-         sizeof(requested_address));
-  requested_address = RoundDownToPageAllocationGranularity(requested_address);
-
-  uintptr_t allocated_address =
-      AllocPages(requested_address, alignment, alignment,
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWrite),
-                 PageTag::kPartitionAlloc);
-  EXPECT_NE(allocated_address, requested_address);
-
-  if (allocated_address) {
-    FreePages(allocated_address, alignment);
-  }
-}
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-
-// Allocate memory, and reference it from 3 raw_ptr. Among them 2 will be
-// dangling.
-TEST_P(PartitionAllocTest, DanglingPtr) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  CountDanglingRawPtr dangling_checks;
-
-  // Allocate memory, and reference it from 3 raw_ptr.
-  uint64_t* ptr = static_cast<uint64_t*>(
-      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
-  ref_count->Acquire();
-  ref_count->Acquire();
-  ref_count->Acquire();
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The first raw_ptr stops referencing it, before the memory has been
-  // released.
-  EXPECT_FALSE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
-  // Free it. This creates two dangling pointer.
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The dangling raw_ptr stop referencing it.
-  EXPECT_FALSE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The dangling raw_ptr stop referencing it again.
-  EXPECT_TRUE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-#else
-  // Free it. This creates two dangling pointer.
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The dangling raw_ptr stop referencing it.
-  EXPECT_FALSE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
-
-  // The dangling raw_ptr stop referencing it again.
-  EXPECT_TRUE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 2);
-#endif
-
-  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
-}
-
-// Allocate memory, and reference it from 3
-// raw_ptr<T, DisableDanglingPtrDetection>. Among them 2 will be dangling. This
-// doesn't trigger any dangling raw_ptr checks.
-TEST_P(PartitionAllocTest, DanglingDanglingPtr) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  CountDanglingRawPtr dangling_checks;
-
-  // Allocate memory, and reference it from 3 raw_ptr.
-  uint64_t* ptr = static_cast<uint64_t*>(
-      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
-  ref_count->AcquireFromUnprotectedPtr();
-  ref_count->AcquireFromUnprotectedPtr();
-  ref_count->AcquireFromUnprotectedPtr();
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The first raw_ptr<T, DisableDanglingPtrDetection> stops referencing it,
-  // before the memory has been released.
-  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // Free it. This creates two dangling raw_ptr<T, DisableDanglingPtrDetection>.
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The dangling raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
-  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The dangling raw_ptr<T, DisableDanglingPtrDetection> stop referencing it
-  // again.
-  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
-}
-
-// When 'free' is called, it remain one raw_ptr<> and one
-// raw_ptr<T, DisableDanglingPtrDetection>. The raw_ptr<> is released first.
-TEST_P(PartitionAllocTest, DanglingMixedReleaseRawPtrFirst) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  CountDanglingRawPtr dangling_checks;
-
-  uint64_t* ptr = static_cast<uint64_t*>(
-      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
-  // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
-  ref_count->AcquireFromUnprotectedPtr();
-  ref_count->Acquire();
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
-  // Free it.
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<> stops referencing it.
-  EXPECT_FALSE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
-  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-#else
-  // Free it.
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<> stops referencing it.
-  EXPECT_FALSE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
-
-  // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
-  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
-#endif
-
-  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
-}
-
-// When 'free' is called, it remain one raw_ptr<> and one
-// raw_ptr<T, DisableDanglingPtrDetection>.
-// The raw_ptr<T, DisableDanglingPtrDetection> is released first. This
-// triggers the dangling raw_ptr<> checks.
-TEST_P(PartitionAllocTest, DanglingMixedReleaseDanglingPtrFirst) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  CountDanglingRawPtr dangling_checks;
-
-  void* ptr =
-      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
-  // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
-  ref_count->AcquireFromUnprotectedPtr();
-  ref_count->Acquire();
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
-  // Free it.
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<> stops referencing it.
-  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
-  EXPECT_TRUE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-#else
-  // Free it.
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<> stops referencing it.
-  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
-  EXPECT_TRUE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
-#endif
-
-  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
-}
-
-// When 'free' is called, it remains one
-// raw_ptr<T, DisableDanglingPtrDetection>, then it is used to acquire one
-// dangling raw_ptr<>. Release the raw_ptr<> first.
-TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtr) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  CountDanglingRawPtr dangling_checks;
-
-  void* ptr =
-      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
-  // Acquire a raw_ptr<T, DisableDanglingPtrDetection>.
-  ref_count->AcquireFromUnprotectedPtr();
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // Free it once.
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // A raw_ptr<> starts referencing it.
-  ref_count->Acquire();
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<> stops referencing it.
-  EXPECT_FALSE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<T, DisableDanglingPtrDetection> stops referencing it.
-  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
-}
-
-// Same as 'DanglingPtrUsedToAcquireNewRawPtr', but release the
-// raw_ptr<T, DisableDanglingPtrDetection> before the raw_ptr<>.
-TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtrVariant) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  CountDanglingRawPtr dangling_checks;
-
-  void* ptr =
-      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
-  // Acquire a raw_ptr<T, DisableDanglingPtrDetection>.
-  ref_count->AcquireFromUnprotectedPtr();
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // Free it.
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // A raw_ptr<> starts referencing it.
-  ref_count->Acquire();
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<> stops referencing it.
-  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<T, DisableDanglingPtrDetection> stops referencing it.
-  EXPECT_TRUE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
-}
-
-// Acquire a raw_ptr<T>, and release it before freeing memory. In the
-// background, there is one raw_ptr<T, DisableDanglingPtrDetection>. This
-// doesn't trigger any dangling raw_ptr<T> checks.
-TEST_P(PartitionAllocTest, RawPtrReleasedBeforeFree) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  CountDanglingRawPtr dangling_checks;
-
-  void* ptr =
-      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
-  // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
-  ref_count->Acquire();
-  ref_count->AcquireFromUnprotectedPtr();
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // Release the raw_ptr<>.
-  EXPECT_FALSE(ref_count->Release());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // Free it.
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
-  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
-  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
-  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
-
-  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
-}
-
-#if defined(PA_HAS_DEATH_TESTS)
-// DCHECK message are stripped in official build. It causes death tests with
-// matchers to fail.
-#if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)
-
-// Acquire() once, Release() twice => CRASH
-TEST_P(PartitionAllocDeathTest, ReleaseUnderflowRawPtr) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  void* ptr =
-      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
-  ref_count->Acquire();
-  EXPECT_FALSE(ref_count->Release());
-  EXPECT_DCHECK_DEATH(ref_count->Release());
-  allocator.root()->Free(ptr);
-}
-
-// AcquireFromUnprotectedPtr() once, ReleaseFromUnprotectedPtr() twice => CRASH
-TEST_P(PartitionAllocDeathTest, ReleaseUnderflowDanglingPtr) {
-  if (!UseBRPPool()) {
-    return;
-  }
-
-  void* ptr =
-      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
-  auto* ref_count =
-      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
-  ref_count->AcquireFromUnprotectedPtr();
-  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
-  EXPECT_DCHECK_DEATH(ref_count->ReleaseFromUnprotectedPtr());
-  allocator.root()->Free(ptr);
-}
-
-#endif  //! defined(OFFICIAL_BUILD) || !defined(NDEBUG)
-#endif  // defined(PA_HAS_DEATH_TESTS)
-#endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-
-TEST_P(PartitionAllocTest, ReservationOffset) {
-  // For normal buckets, offset should be kOffsetTagNormalBuckets.
-  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr);
-  uintptr_t address = UntagPtr(ptr);
-  EXPECT_EQ(kOffsetTagNormalBuckets, *ReservationOffsetPointer(address));
-  allocator.root()->Free(ptr);
-
-  // For direct-map,
-  size_t large_size = kSuperPageSize * 5 + PartitionPageSize() * .5f;
-  ASSERT_GT(large_size, kMaxBucketed);
-  ptr = allocator.root()->Alloc(large_size, type_name);
-  EXPECT_TRUE(ptr);
-  address = UntagPtr(ptr);
-  EXPECT_EQ(0U, *ReservationOffsetPointer(address));
-  EXPECT_EQ(1U, *ReservationOffsetPointer(address + kSuperPageSize));
-  EXPECT_EQ(2U, *ReservationOffsetPointer(address + kSuperPageSize * 2));
-  EXPECT_EQ(3U, *ReservationOffsetPointer(address + kSuperPageSize * 3));
-  EXPECT_EQ(4U, *ReservationOffsetPointer(address + kSuperPageSize * 4));
-  EXPECT_EQ(5U, *ReservationOffsetPointer(address + kSuperPageSize * 5));
-
-  // In-place realloc doesn't affect the offsets.
-  void* new_ptr = allocator.root()->Realloc(ptr, large_size * .8, type_name);
-  EXPECT_EQ(new_ptr, ptr);
-  EXPECT_EQ(0U, *ReservationOffsetPointer(address));
-  EXPECT_EQ(1U, *ReservationOffsetPointer(address + kSuperPageSize));
-  EXPECT_EQ(2U, *ReservationOffsetPointer(address + kSuperPageSize * 2));
-  EXPECT_EQ(3U, *ReservationOffsetPointer(address + kSuperPageSize * 3));
-  EXPECT_EQ(4U, *ReservationOffsetPointer(address + kSuperPageSize * 4));
-  EXPECT_EQ(5U, *ReservationOffsetPointer(address + kSuperPageSize * 5));
-
-  allocator.root()->Free(ptr);
-  // After free, the offsets must be kOffsetTagNotAllocated.
-  EXPECT_EQ(kOffsetTagNotAllocated, *ReservationOffsetPointer(address));
-  EXPECT_EQ(kOffsetTagNotAllocated,
-            *ReservationOffsetPointer(address + kSuperPageSize));
-  EXPECT_EQ(kOffsetTagNotAllocated,
-            *ReservationOffsetPointer(address + kSuperPageSize * 2));
-  EXPECT_EQ(kOffsetTagNotAllocated,
-            *ReservationOffsetPointer(address + kSuperPageSize * 3));
-  EXPECT_EQ(kOffsetTagNotAllocated,
-            *ReservationOffsetPointer(address + kSuperPageSize * 4));
-  EXPECT_EQ(kOffsetTagNotAllocated,
-            *ReservationOffsetPointer(address + kSuperPageSize * 5));
-}
-
-TEST_P(PartitionAllocTest, GetReservationStart) {
-  size_t large_size = kSuperPageSize * 3 + PartitionPageSize() * .5f;
-  ASSERT_GT(large_size, kMaxBucketed);
-  void* ptr = allocator.root()->Alloc(large_size, type_name);
-  EXPECT_TRUE(ptr);
-  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
-  uintptr_t reservation_start = slot_start - PartitionPageSize();
-  EXPECT_EQ(0U, reservation_start & DirectMapAllocationGranularityOffsetMask());
-
-  uintptr_t address = UntagPtr(ptr);
-  for (uintptr_t a = address; a < address + large_size; ++a) {
-    uintptr_t address2 = GetDirectMapReservationStart(a) + PartitionPageSize();
-    EXPECT_EQ(slot_start, address2);
-  }
-
-  EXPECT_EQ(reservation_start, GetDirectMapReservationStart(slot_start));
-
-  allocator.root()->Free(ptr);
-}
-
-TEST_P(PartitionAllocTest, CheckReservationType) {
-  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  EXPECT_TRUE(ptr);
-  uintptr_t address = UntagPtr(ptr);
-  uintptr_t address_to_check = address;
-  EXPECT_FALSE(IsReservationStart(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
-  EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
-  address_to_check = address + kTestAllocSize - 1;
-  EXPECT_FALSE(IsReservationStart(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
-  EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
-  address_to_check =
-      partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
-  EXPECT_TRUE(IsReservationStart(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
-  EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
-  allocator.root()->Free(ptr);
-  // Freeing keeps a normal-bucket super page in memory.
-  address_to_check =
-      partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
-  EXPECT_TRUE(IsReservationStart(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
-  EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
-
-  size_t large_size = 2 * kSuperPageSize;
-  ASSERT_GT(large_size, kMaxBucketed);
-  ptr = allocator.root()->Alloc(large_size, type_name);
-  EXPECT_TRUE(ptr);
-  address = UntagPtr(ptr);
-  address_to_check = address;
-  EXPECT_FALSE(IsReservationStart(address_to_check));
-  EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
-  EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
-  address_to_check =
-      partition_alloc::internal::base::bits::AlignUp(address, kSuperPageSize);
-  EXPECT_FALSE(IsReservationStart(address_to_check));
-  EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
-  EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
-  address_to_check = address + large_size - 1;
-  EXPECT_FALSE(IsReservationStart(address_to_check));
-  EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
-  EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
-  address_to_check =
-      partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
-  EXPECT_TRUE(IsReservationStart(address_to_check));
-  EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
-  EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
-  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
-  allocator.root()->Free(ptr);
-  // Freeing releases direct-map super pages.
-  address_to_check =
-      partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // Expect to DCHECK on unallocated region.
-  EXPECT_DEATH_IF_SUPPORTED(IsReservationStart(address_to_check), "");
-#endif
-  EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
-  EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
-  EXPECT_FALSE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
-}
-
-// Test for crash http://crbug.com/1169003.
-TEST_P(PartitionAllocTest, CrossPartitionRootRealloc) {
-  // Size is large enough to satisfy it from a single-slot slot span
-  size_t test_size = MaxRegularSlotSpanSize() - ExtraAllocSize(allocator);
-  void* ptr = allocator.root()->AllocWithFlags(AllocFlags::kReturnNull,
-                                               test_size, nullptr);
-  EXPECT_TRUE(ptr);
-
-  // Create new root and call PurgeMemory to simulate ConfigurePartitions().
-  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
-                                PurgeFlags::kDiscardUnusedSystemPages);
-  std::unique_ptr<PartitionRoot> new_root = CreateCustomTestRoot(
-      PartitionOptions{
-          .ref_count_size = GetParam().ref_count_size,
-      },
-      PartitionTestOptions{.set_bucket_distribution = true});
-
-  // Realloc from |allocator.root()| into |new_root|.
-  void* ptr2 = new_root->ReallocWithFlags(AllocFlags::kReturnNull, ptr,
-                                          test_size + 1024, nullptr);
-  EXPECT_TRUE(ptr2);
-  PA_EXPECT_PTR_NE(ptr, ptr2);
-}
-
-TEST_P(PartitionAllocTest, FastPathOrReturnNull) {
-  size_t allocation_size = 64;
-  // The very first allocation is never a fast path one, since it needs a new
-  // super page and a new partition page.
-  EXPECT_FALSE(allocator.root()->AllocWithFlags(
-      AllocFlags::kFastPathOrReturnNull, allocation_size, ""));
-  void* ptr = allocator.root()->AllocWithFlags(0, allocation_size, "");
-  ASSERT_TRUE(ptr);
-
-  // Next one is, since the partition page has been activated.
-  void* ptr2 = allocator.root()->AllocWithFlags(
-      AllocFlags::kFastPathOrReturnNull, allocation_size, "");
-  EXPECT_TRUE(ptr2);
-
-  // First allocation of a different bucket is slow.
-  EXPECT_FALSE(allocator.root()->AllocWithFlags(
-      AllocFlags::kFastPathOrReturnNull, 2 * allocation_size, ""));
-
-  size_t allocated_size = 2 * allocation_size;
-  std::vector<void*> ptrs;
-  while (void* new_ptr = allocator.root()->AllocWithFlags(
-             AllocFlags::kFastPathOrReturnNull, allocation_size, "")) {
-    ptrs.push_back(new_ptr);
-    allocated_size += allocation_size;
-  }
-  EXPECT_LE(allocated_size,
-            PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan);
-
-  for (void* ptr_to_free : ptrs) {
-    allocator.root()->FreeNoHooks(ptr_to_free);
-  }
-
-  allocator.root()->FreeNoHooks(ptr);
-  allocator.root()->FreeNoHooks(ptr2);
-}
-
-#if defined(PA_HAS_DEATH_TESTS)
-// DCHECK message are stripped in official build. It causes death tests with
-// matchers to fail.
-#if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)
-
-TEST_P(PartitionAllocDeathTest, CheckTriggered) {
-  EXPECT_DCHECK_DEATH_WITH(PA_CHECK(5 == 7), "Check failed.*5 == 7");
-  EXPECT_DEATH(PA_CHECK(5 == 7), "Check failed.*5 == 7");
-}
-
-#endif  // !defined(OFFICIAL_BUILD) && !defined(NDEBUG)
-#endif  // defined(PA_HAS_DEATH_TESTS)
-
-// Not on chromecast, since gtest considers extra output from itself as a test
-// failure:
-// https://ci.chromium.org/ui/p/chromium/builders/ci/Cast%20Audio%20Linux/98492/overview
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(PA_HAS_DEATH_TESTS) && \
-    !BUILDFLAG(PA_IS_CASTOS)
-
-namespace {
-
-PA_NOINLINE void FreeForTest(void* data) {
-  free(data);
-}
-
-class ThreadDelegateForPreforkHandler
-    : public base::PlatformThreadForTesting::Delegate {
- public:
-  ThreadDelegateForPreforkHandler(std::atomic<bool>& please_stop,
-                                  std::atomic<int>& started_threads,
-                                  const int alloc_size)
-      : please_stop_(please_stop),
-        started_threads_(started_threads),
-        alloc_size_(alloc_size) {}
-
-  void ThreadMain() override {
-    started_threads_++;
-    while (!please_stop_.load(std::memory_order_relaxed)) {
-      void* ptr = malloc(alloc_size_);
-
-      // A simple malloc() / free() pair can be discarded by the compiler (and
-      // is), making the test fail. It is sufficient to make |FreeForTest()| a
-      // PA_NOINLINE function for the call to not be eliminated, but it is
-      // required.
-      FreeForTest(ptr);
-    }
-  }
-
- private:
-  std::atomic<bool>& please_stop_;
-  std::atomic<int>& started_threads_;
-  const int alloc_size_;
-};
-
-}  // namespace
-
-// Disabled because executing it causes Gtest to show a warning in the output,
-// which confuses the runner on some platforms, making the test report an
-// "UNKNOWN" status even though it succeeded.
-TEST_P(PartitionAllocTest, DISABLED_PreforkHandler) {
-  std::atomic<bool> please_stop;
-  std::atomic<int> started_threads{0};
-
-  // Continuously allocates / frees memory, bypassing the thread cache. This
-  // makes it likely that this thread will own the lock, and that the
-  // EXPECT_EXIT() part will deadlock.
-  constexpr size_t kAllocSize = ThreadCache::kLargeSizeThreshold + 1;
-  ThreadDelegateForPreforkHandler delegate(please_stop, started_threads,
-                                           kAllocSize);
-
-  constexpr int kThreads = 4;
-  base::PlatformThreadHandle thread_handles[kThreads];
-  for (auto& thread_handle : thread_handles) {
-    base::PlatformThreadForTesting::Create(0, &delegate, &thread_handle);
-  }
-  // Make sure all threads are actually already running.
-  while (started_threads != kThreads) {
-  }
-
-  EXPECT_EXIT(
-      {
-        void* ptr = malloc(kAllocSize);
-        FreeForTest(ptr);
-        exit(1);
-      },
-      ::testing::ExitedWithCode(1), "");
-
-  please_stop.store(true);
-  for (auto& thread_handle : thread_handles) {
-    base::PlatformThreadForTesting::Join(thread_handle);
-  }
-}
-
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
-        // PA_CONFIG(HAS_DEATH_TESTS) && !BUILDFLAG(PA_IS_CASTOS)
-
-// Checks the bucket index logic.
-TEST_P(PartitionAllocTest, GetIndex) {
-  BucketIndexLookup lookup{};
-
-  for (size_t size = 0; size < kMaxBucketed; size++) {
-    size_t index = BucketIndexLookup::GetIndex(size);
-    ASSERT_GE(lookup.bucket_sizes()[index], size);
-  }
-
-  // Make sure that power-of-two have exactly matching buckets.
-  for (size_t size = (1 << (kMinBucketedOrder - 1)); size < kMaxBucketed;
-       size <<= 1) {
-    size_t index = BucketIndexLookup::GetIndex(size);
-    ASSERT_EQ(lookup.bucket_sizes()[index], size);
-  }
-}
-
-// Used to check alignment. If the compiler understands the annotations, the
-// zeroing in the constructor uses aligned SIMD instructions.
-TEST_P(PartitionAllocTest, MallocFunctionAnnotations) {
-  struct TestStruct {
-    uint64_t a = 0;
-    uint64_t b = 0;
-  };
-
-  void* buffer = Alloc(sizeof(TestStruct));
-  // Should use "mov*a*ps" on x86_64.
-  auto* x = new (buffer) TestStruct();
-
-  EXPECT_EQ(x->a, 0u);
-  Free(buffer);
-}
-
-// Test that the ConfigurablePool works properly.
-TEST_P(PartitionAllocTest, ConfigurablePool) {
-  EXPECT_FALSE(IsConfigurablePoolAvailable());
-
-  // The rest is only applicable to 64-bit mode
-#if defined(ARCH_CPU_64_BITS)
-  // Repeat the test for every possible Pool size
-  const size_t max_pool_size = PartitionAddressSpace::ConfigurablePoolMaxSize();
-  const size_t min_pool_size = PartitionAddressSpace::ConfigurablePoolMinSize();
-  for (size_t pool_size = max_pool_size; pool_size >= min_pool_size;
-       pool_size /= 2) {
-    PA_DCHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(pool_size));
-    EXPECT_FALSE(IsConfigurablePoolAvailable());
-    uintptr_t pool_base =
-        AllocPages(pool_size, pool_size,
-                   PageAccessibilityConfiguration(
-                       PageAccessibilityConfiguration::kInaccessible),
-                   PageTag::kPartitionAlloc);
-    EXPECT_NE(0u, pool_base);
-    PartitionAddressSpace::InitConfigurablePool(pool_base, pool_size);
-
-    EXPECT_TRUE(IsConfigurablePoolAvailable());
-
-    std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
-        PartitionOptions{
-            .use_configurable_pool =
-                PartitionOptions::UseConfigurablePool::kIfAvailable,
-            .ref_count_size = GetParam().ref_count_size,
-        },
-        PartitionTestOptions{.uncap_empty_slot_span_memory = true,
-                             .set_bucket_distribution = true});
-
-    const size_t count = 250;
-    std::vector<void*> allocations(count, nullptr);
-    for (size_t i = 0; i < count; ++i) {
-      const size_t size = kTestSizes[base::RandGenerator(kTestSizesCount)];
-      allocations[i] = root->Alloc(size, nullptr);
-      EXPECT_NE(nullptr, allocations[i]);
-      // We don't Untag allocations here because MTE is disabled for
-      // configurable pools used by V8.
-      // https://bugs.chromium.org/p/v8/issues/detail?id=13117
-      uintptr_t allocation_base = reinterpret_cast<uintptr_t>(allocations[i]);
-      EXPECT_EQ(allocation_base, UntagPtr(allocations[i]));
-      EXPECT_TRUE(allocation_base >= pool_base &&
-                  allocation_base < pool_base + pool_size);
-    }
-
-    PartitionAddressSpace::UninitConfigurablePoolForTesting();
-    FreePages(pool_base, pool_size);
-  }
-
-#endif  // defined(ARCH_CPU_64_BITS)
-}
-
-TEST_P(PartitionAllocTest, EmptySlotSpanSizeIsCapped) {
-  // Use another root, since the ones from the test harness disable the empty
-  // slot span size cap.
-  std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
-      PartitionOptions{
-          .ref_count_size = GetParam().ref_count_size,
-      },
-      PartitionTestOptions{.set_bucket_distribution = true});
-
-  // Allocate some memory, don't free it to keep committed memory.
-  std::vector<void*> allocated_memory;
-  const size_t size = SystemPageSize();
-  const size_t count = 400;
-  for (size_t i = 0; i < count; i++) {
-    void* ptr = root->Alloc(size, "");
-    allocated_memory.push_back(ptr);
-  }
-  ASSERT_GE(root->total_size_of_committed_pages.load(std::memory_order_relaxed),
-            size * count);
-
-  // To create empty slot spans, allocate from single-slot slot spans, 128kiB at
-  // a time.
-  std::vector<void*> single_slot_allocated_memory;
-  constexpr size_t single_slot_count = kDefaultEmptySlotSpanRingSize - 1;
-  const size_t single_slot_size = MaxRegularSlotSpanSize() + 1;
-  // Make sure that even with allocation size rounding up, a single allocation
-  // is still below the threshold.
-  ASSERT_LT(MaxRegularSlotSpanSize() * 2,
-            ((count * size) >> root->max_empty_slot_spans_dirty_bytes_shift));
-  for (size_t i = 0; i < single_slot_count; i++) {
-    void* ptr = root->Alloc(single_slot_size, "");
-    single_slot_allocated_memory.push_back(ptr);
-  }
-
-  // Free everything at once, creating as many empty slot spans as there are
-  // allocations (since they are from single-slot slot spans).
-  for (void* ptr : single_slot_allocated_memory) {
-    root->Free(ptr);
-  }
-
-  // Still have some committed empty slot spans.
-  // PA_TS_UNCHECKED_READ() is not an issue here, since everything is
-  // single-threaded.
-  EXPECT_GT(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes), 0u);
-  // But not all, as the cap triggered.
-  EXPECT_LT(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
-            single_slot_count * single_slot_size);
-
-  // Nothing left after explicit purge.
-  root->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
-  EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes), 0u);
-
-  for (void* ptr : allocated_memory) {
-    root->Free(ptr);
-  }
-}
-
-TEST_P(PartitionAllocTest, IncreaseEmptySlotSpanRingSize) {
-  std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
-      PartitionOptions{
-          .use_configurable_pool =
-              PartitionOptions::UseConfigurablePool::kIfAvailable,
-          .ref_count_size = GetParam().ref_count_size,
-      },
-      PartitionTestOptions{.uncap_empty_slot_span_memory = true,
-                           .set_bucket_distribution = true});
-
-  std::vector<void*> single_slot_allocated_memory;
-  constexpr size_t single_slot_count = kDefaultEmptySlotSpanRingSize + 10;
-  const size_t single_slot_size = MaxRegularSlotSpanSize() + 1;
-  const size_t bucket_size =
-      root->buckets[SizeToIndex(single_slot_size)].slot_size;
-
-  for (size_t i = 0; i < single_slot_count; i++) {
-    void* ptr = root->Alloc(single_slot_size, "");
-    single_slot_allocated_memory.push_back(ptr);
-  }
-
-  // Free everything at once, creating as many empty slot spans as there are
-  // allocations (since they are from single-slot slot spans).
-  for (void* ptr : single_slot_allocated_memory) {
-    root->Free(ptr);
-  }
-  single_slot_allocated_memory.clear();
-
-  // Some of the free()-s above overflowed the slot span ring.
-  EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
-            kDefaultEmptySlotSpanRingSize * bucket_size);
-
-  // Now can cache more slot spans.
-  root->EnableLargeEmptySlotSpanRing();
-
-  constexpr size_t single_slot_large_count = kDefaultEmptySlotSpanRingSize + 10;
-  for (size_t i = 0; i < single_slot_large_count; i++) {
-    void* ptr = root->Alloc(single_slot_size, "");
-    single_slot_allocated_memory.push_back(ptr);
-  }
-
-  for (void* ptr : single_slot_allocated_memory) {
-    root->Free(ptr);
-  }
-  single_slot_allocated_memory.clear();
-
-  // No overflow this time.
-  EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
-            single_slot_large_count * bucket_size);
-
-  constexpr size_t single_slot_too_many_count = kMaxFreeableSpans + 10;
-  for (size_t i = 0; i < single_slot_too_many_count; i++) {
-    void* ptr = root->Alloc(single_slot_size, "");
-    single_slot_allocated_memory.push_back(ptr);
-  }
-
-  for (void* ptr : single_slot_allocated_memory) {
-    root->Free(ptr);
-  }
-  single_slot_allocated_memory.clear();
-
-  // Overflow still works.
-  EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
-            kMaxFreeableSpans * bucket_size);
-}
-
-#if BUILDFLAG(PA_IS_CAST_ANDROID) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-extern "C" {
-void* __real_malloc(size_t);
-}  // extern "C"
-
-TEST_P(PartitionAllocTest, HandleMixedAllocations) {
-  void* ptr = __real_malloc(12);
-  // Should not crash, no test assertion.
-  free(ptr);
-}
-#endif
-
-TEST_P(PartitionAllocTest, SortFreelist) {
-  const size_t count = 100;
-  const size_t allocation_size = 1;
-  void* first_ptr = allocator.root()->Alloc(allocation_size, "");
-
-  std::vector<void*> allocations;
-  for (size_t i = 0; i < count; ++i) {
-    allocations.push_back(allocator.root()->Alloc(allocation_size, ""));
-  }
-
-  // Shuffle and free memory out of order.
-  std::random_device rd;
-  std::mt19937 generator(rd());
-  std::shuffle(allocations.begin(), allocations.end(), generator);
-
-  // Keep one allocation alive (first_ptr), so that the SlotSpan is not fully
-  // empty.
-  for (void* ptr : allocations) {
-    allocator.root()->Free(ptr);
-  }
-  allocations.clear();
-
-  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
-
-  size_t bucket_index =
-      SizeToIndex(allocation_size + ExtraAllocSize(allocator));
-  auto& bucket = allocator.root()->buckets[bucket_index];
-  EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
-
-  // Can sort again.
-  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
-  EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
-
-  for (size_t i = 0; i < count; ++i) {
-    allocations.push_back(allocator.root()->Alloc(allocation_size, ""));
-    // Allocating keeps the freelist sorted.
-    EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
-  }
-
-  // Check that it is sorted.
-  for (size_t i = 1; i < allocations.size(); i++) {
-    EXPECT_LT(UntagPtr(allocations[i - 1]), UntagPtr(allocations[i]));
-  }
-
-  for (void* ptr : allocations) {
-    allocator.root()->Free(ptr);
-    // Free()-ing memory destroys order.  Not looking at the head of the active
-    // list, as it is not necessarily the one from which |ptr| came from.
-    auto* slot_span =
-        SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
-    EXPECT_FALSE(slot_span->freelist_is_sorted());
-  }
-
-  allocator.root()->Free(first_ptr);
-}
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_LINUX) && \
-    defined(ARCH_CPU_64_BITS)
-TEST_P(PartitionAllocTest, CrashOnUnknownPointer) {
-  int not_a_heap_object = 42;
-  EXPECT_DEATH(allocator.root()->Free(&not_a_heap_object), "");
-}
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
-        // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_64_BITS)
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_MAC)
-
-// Adapted from crashpad tests.
-class ScopedOpenCLNoOpKernel {
- public:
-  ScopedOpenCLNoOpKernel()
-      : context_(nullptr),
-        program_(nullptr),
-        kernel_(nullptr),
-        success_(false) {}
-
-  ScopedOpenCLNoOpKernel(const ScopedOpenCLNoOpKernel&) = delete;
-  ScopedOpenCLNoOpKernel& operator=(const ScopedOpenCLNoOpKernel&) = delete;
-
-  ~ScopedOpenCLNoOpKernel() {
-    if (kernel_) {
-      cl_int rv = clReleaseKernel(kernel_);
-      EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseKernel";
-    }
-
-    if (program_) {
-      cl_int rv = clReleaseProgram(program_);
-      EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseProgram";
-    }
-
-    if (context_) {
-      cl_int rv = clReleaseContext(context_);
-      EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseContext";
-    }
-  }
-
-  void SetUp() {
-    cl_platform_id platform_id;
-    cl_int rv = clGetPlatformIDs(1, &platform_id, nullptr);
-    ASSERT_EQ(rv, CL_SUCCESS) << "clGetPlatformIDs";
-    cl_device_id device_id;
-    rv =
-        clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_CPU, 1, &device_id, nullptr);
-#if defined(ARCH_CPU_ARM64)
-    // CL_DEVICE_TYPE_CPU doesn’t seem to work at all on arm64, meaning that
-    // these weird OpenCL modules probably don’t show up there at all. Keep this
-    // test even on arm64 in case this ever does start working.
-    if (rv == CL_INVALID_VALUE) {
-      return;
-    }
-#endif  // ARCH_CPU_ARM64
-    ASSERT_EQ(rv, CL_SUCCESS) << "clGetDeviceIDs";
-
-    context_ = clCreateContext(nullptr, 1, &device_id, nullptr, nullptr, &rv);
-    ASSERT_EQ(rv, CL_SUCCESS) << "clCreateContext";
-
-    const char* sources[] = {
-        "__kernel void NoOp(void) {barrier(CLK_LOCAL_MEM_FENCE);}",
-    };
-    const size_t source_lengths[] = {
-        strlen(sources[0]),
-    };
-    static_assert(std::size(sources) == std::size(source_lengths),
-                  "arrays must be parallel");
-
-    program_ = clCreateProgramWithSource(context_, std::size(sources), sources,
-                                         source_lengths, &rv);
-    ASSERT_EQ(rv, CL_SUCCESS) << "clCreateProgramWithSource";
-
-    rv = clBuildProgram(program_, 1, &device_id, "-cl-opt-disable", nullptr,
-                        nullptr);
-    ASSERT_EQ(rv, CL_SUCCESS) << "clBuildProgram";
-
-    kernel_ = clCreateKernel(program_, "NoOp", &rv);
-    ASSERT_EQ(rv, CL_SUCCESS) << "clCreateKernel";
-
-    success_ = true;
-  }
-
-  bool success() const { return success_; }
-
- private:
-  cl_context context_;
-  cl_program program_;
-  cl_kernel kernel_;
-  bool success_;
-};
-
-// On macOS 10.11, allocations are made with PartitionAlloc, but the pointer
-// is incorrectly passed by CoreFoundation to the previous default zone,
-// causing crashes. This is intended to detect these issues regressing in future
-// versions of macOS.
-TEST_P(PartitionAllocTest, OpenCL) {
-  ScopedOpenCLNoOpKernel kernel;
-  kernel.SetUp();
-#if !defined(ARCH_CPU_ARM64)
-  ASSERT_TRUE(kernel.success());
-#endif
-}
-
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
-        // BUILDFLAG(IS_MAC)
-
-TEST_P(PartitionAllocTest, SmallSlotSpanWaste) {
-  for (PartitionRoot::Bucket& bucket : allocator.root()->buckets) {
-    const size_t slot_size = bucket.slot_size;
-    if (slot_size == kInvalidBucketSize) {
-      continue;
-    }
-
-    size_t small_system_page_count =
-        partition_alloc::internal::ComputeSystemPagesPerSlotSpan(
-            bucket.slot_size, true);
-    size_t small_waste =
-        (small_system_page_count * SystemPageSize()) % slot_size;
-
-    EXPECT_LT(small_waste, .05 * SystemPageSize());
-    if (slot_size <= MaxRegularSlotSpanSize()) {
-      EXPECT_LE(small_system_page_count, MaxSystemPagesPerRegularSlotSpan());
-    }
-  }
-}
-
-TEST_P(PartitionAllocTest, SortActiveSlotSpans) {
-  auto run_test = [](size_t count) {
-    PartitionBucket bucket;
-    bucket.Init(16);
-    bucket.active_slot_spans_head = nullptr;
-
-    std::vector<SlotSpanMetadata> slot_spans;
-    slot_spans.reserve(count);
-
-    // Add slot spans with random freelist length.
-    for (size_t i = 0; i < count; i++) {
-      slot_spans.emplace_back(&bucket);
-      auto& slot_span = slot_spans.back();
-      slot_span.num_unprovisioned_slots =
-          partition_alloc::internal::base::RandGenerator(
-              bucket.get_slots_per_span() / 2);
-      slot_span.num_allocated_slots =
-          partition_alloc::internal::base::RandGenerator(
-              bucket.get_slots_per_span() - slot_span.num_unprovisioned_slots);
-      slot_span.next_slot_span = bucket.active_slot_spans_head;
-      bucket.active_slot_spans_head = &slot_span;
-    }
-
-    bucket.SortActiveSlotSpans();
-
-    std::set<SlotSpanMetadata*> seen_slot_spans;
-    std::vector<SlotSpanMetadata*> sorted_slot_spans;
-    for (auto* slot_span = bucket.active_slot_spans_head; slot_span;
-         slot_span = slot_span->next_slot_span) {
-      sorted_slot_spans.push_back(slot_span);
-      seen_slot_spans.insert(slot_span);
-    }
-
-    // None repeated, none missing.
-    EXPECT_EQ(seen_slot_spans.size(), sorted_slot_spans.size());
-    EXPECT_EQ(seen_slot_spans.size(), slot_spans.size());
-
-    // The first slot spans are sorted.
-    size_t sorted_spans_count =
-        std::min(PartitionBucket::kMaxSlotSpansToSort, count);
-    EXPECT_TRUE(std::is_sorted(sorted_slot_spans.begin(),
-                               sorted_slot_spans.begin() + sorted_spans_count,
-                               partition_alloc::internal::CompareSlotSpans));
-
-    // Slot spans with no freelist entries are at the end of the sorted run.
-    auto has_empty_freelist = [](SlotSpanMetadata* a) {
-      return a->GetFreelistLength() == 0;
-    };
-    auto it = std::find_if(sorted_slot_spans.begin(),
-                           sorted_slot_spans.begin() + sorted_spans_count,
-                           has_empty_freelist);
-    if (it != sorted_slot_spans.end()) {
-      EXPECT_TRUE(std::all_of(it,
-                              sorted_slot_spans.begin() + sorted_spans_count,
-                              has_empty_freelist));
-    }
-  };
-
-  // Everything is sorted.
-  run_test(PartitionBucket::kMaxSlotSpansToSort / 2);
-  // Only the first slot spans are sorted.
-  run_test(PartitionBucket::kMaxSlotSpansToSort * 2);
-
-  // Corner cases.
-  run_test(0);
-  run_test(1);
-}
-
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsUsedAfterAlloc) {
-  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
-  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
-
-  allocator.root()->Free(ptr);
-}
-
-TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsFreeAfterFree) {
-  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
-  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
-  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
-
-  allocator.root()->Free(ptr);
-  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
-}
-
-TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterDecommit) {
-  void* ptr1 = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr1);
-  allocator.root()->Free(ptr1);
-
-  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
-  // Decommit the slot span. Bitmap will be rewritten in Decommit().
-  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
-  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
-}
-
-TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterPurge) {
-  void* ptr1 = allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name);
-  char* ptr2 = static_cast<char*>(allocator.root()->Alloc(
-      SystemPageSize() - ExtraAllocSize(allocator), type_name));
-  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr2);
-  allocator.root()->Free(ptr2);
-
-  CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
-  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
-  // Bitmap will be rewritten in PartitionPurgeSlotSpan().
-  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
-  CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
-  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
-
-  allocator.root()->Free(ptr1);
-}
-
-#endif  // BUILDFLAG(USE_FREESLOT_BITMAP)
-
-}  // namespace partition_alloc::internal
-
-#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/partition_bucket.cc b/base/allocator/partition_allocator/partition_bucket.cc
deleted file mode 100644
index 29efa29..0000000
--- a/base/allocator/partition_allocator/partition_bucket.cc
+++ /dev/null
@@ -1,1471 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_bucket.h"
-
-#include <algorithm>
-#include <cstdint>
-#include <tuple>
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/freeslot_bitmap.h"
-#include "base/allocator/partition_allocator/freeslot_bitmap_constants.h"
-#include "base/allocator/partition_allocator/oom.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
-#include "base/allocator/partition_allocator/partition_oom.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#endif
-
-namespace partition_alloc::internal {
-
-namespace {
-
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-PA_ALWAYS_INLINE uintptr_t ShadowMetadataStart(uintptr_t super_page,
-                                               pool_handle pool) {
-  uintptr_t shadow_metadata_start =
-      super_page + SystemPageSize() + ShadowPoolOffset(pool);
-  PA_DCHECK(!PartitionAddressSpace::IsInRegularPool(shadow_metadata_start));
-  PA_DCHECK(!PartitionAddressSpace::IsInBRPPool(shadow_metadata_start));
-  return shadow_metadata_start;
-}
-#endif
-
-[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryMappingFailure(
-    PartitionRoot* root,
-    size_t size) PA_LOCKS_EXCLUDED(PartitionRootLock(root)) {
-  PA_NO_CODE_FOLDING();
-  root->OutOfMemory(size);
-  PA_IMMEDIATE_CRASH();  // Not required, kept as documentation.
-}
-
-[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryCommitFailure(
-    PartitionRoot* root,
-    size_t size) PA_LOCKS_EXCLUDED(PartitionRootLock(root)) {
-  PA_NO_CODE_FOLDING();
-  root->OutOfMemory(size);
-  PA_IMMEDIATE_CRASH();  // Not required, kept as documentation.
-}
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-// |start| has to be aligned to kSuperPageSize, but |end| doesn't. This means
-// that a partial super page is allowed at the end. Since the block list uses
-// kSuperPageSize granularity, a partial super page is considered blocked if
-// there is a raw_ptr<T> pointing anywhere in that super page, even if doesn't
-// point to that partially allocated region.
-bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
-  PA_DCHECK(!(start % kSuperPageSize));
-  for (uintptr_t super_page = start; super_page < end;
-       super_page += kSuperPageSize) {
-    // If any blocked super page is found inside the given memory region,
-    // the memory region is blocked.
-    if (!AddressPoolManagerBitmap::IsAllowedSuperPageForBRPPool(super_page)) {
-      AddressPoolManagerBitmap::IncrementBlocklistHitCount();
-      return false;
-    }
-  }
-  return true;
-}
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS) &&
-        // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-// Reserves |requested_size| worth of super pages from the specified pool.
-// If BRP pool is requested this function will honor BRP block list.
-//
-// The returned address will be aligned to kSuperPageSize, and so
-// |requested_address| should be. |requested_size| doesn't have to be, however.
-//
-// |requested_address| is merely a hint, which will be attempted, but easily
-// given up on if doesn't work the first time.
-//
-// The function doesn't need to hold root->lock_ or any other locks, because:
-// - It (1) reserves memory, (2) then consults AreAllowedSuperPagesForBRPPool
-//   for that memory, and (3) returns the memory if
-//   allowed, or unreserves and decommits if not allowed. So no other
-//   overlapping region can be allocated while executing
-//   AreAllowedSuperPagesForBRPPool.
-// - IsAllowedSuperPageForBRPPool (used by AreAllowedSuperPagesForBRPPool) is
-//   designed to not need locking.
-uintptr_t ReserveMemoryFromPool(pool_handle pool,
-                                uintptr_t requested_address,
-                                size_t requested_size) {
-  PA_DCHECK(!(requested_address % kSuperPageSize));
-
-  uintptr_t reserved_address = AddressPoolManager::GetInstance().Reserve(
-      pool, requested_address, requested_size);
-
-  // In 32-bit mode, when allocating from BRP pool, verify that the requested
-  // allocation honors the block list. Find a better address otherwise.
-#if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  if (pool == kBRPPoolHandle) {
-    constexpr int kMaxRandomAddressTries = 10;
-    for (int i = 0; i < kMaxRandomAddressTries; ++i) {
-      if (!reserved_address ||
-          AreAllowedSuperPagesForBRPPool(reserved_address,
-                                         reserved_address + requested_size)) {
-        break;
-      }
-      AddressPoolManager::GetInstance().UnreserveAndDecommit(
-          pool, reserved_address, requested_size);
-      // No longer try to honor |requested_address|, because it didn't work for
-      // us last time.
-      reserved_address =
-          AddressPoolManager::GetInstance().Reserve(pool, 0, requested_size);
-    }
-
-    // If the allocation attempt succeeds, we will break out of the following
-    // loop immediately.
-    //
-    // Last resort: sequentially scan the whole 32-bit address space. The number
-    // of blocked super-pages should be very small, so we expect to practically
-    // never need to run the following code. Note that it may fail to find an
-    // available super page, e.g., when it becomes available after the scan
-    // passes through it, but we accept the risk.
-    for (uintptr_t address_to_try = kSuperPageSize; address_to_try != 0;
-         address_to_try += kSuperPageSize) {
-      if (!reserved_address ||
-          AreAllowedSuperPagesForBRPPool(reserved_address,
-                                         reserved_address + requested_size)) {
-        break;
-      }
-      AddressPoolManager::GetInstance().UnreserveAndDecommit(
-          pool, reserved_address, requested_size);
-      // Reserve() can return a different pointer than attempted.
-      reserved_address = AddressPoolManager::GetInstance().Reserve(
-          pool, address_to_try, requested_size);
-    }
-
-    // If the loop ends naturally, the last allocated region hasn't been
-    // verified. Do it now.
-    if (reserved_address &&
-        !AreAllowedSuperPagesForBRPPool(reserved_address,
-                                        reserved_address + requested_size)) {
-      AddressPoolManager::GetInstance().UnreserveAndDecommit(
-          pool, reserved_address, requested_size);
-      reserved_address = 0;
-    }
-  }
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS) &&
-        // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-  // Only mark the region as belonging to the pool after it has passed the
-  // blocklist check in order to avoid a potential race with destructing a
-  // raw_ptr<T> object that points to non-PA memory in another thread.
-  // If `MarkUsed` was called earlier, the other thread could incorrectly
-  // determine that the allocation had come form PartitionAlloc.
-  if (reserved_address) {
-    AddressPoolManager::GetInstance().MarkUsed(pool, reserved_address,
-                                               requested_size);
-  }
-#endif
-
-  PA_DCHECK(!(reserved_address % kSuperPageSize));
-  return reserved_address;
-}
-
-SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
-                                     unsigned int flags,
-                                     size_t raw_size,
-                                     size_t slot_span_alignment) {
-  PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
-            base::bits::IsPowerOfTwo(slot_span_alignment));
-
-  // No static EXCLUSIVE_LOCKS_REQUIRED(), as the checker doesn't understand
-  // scoped unlocking.
-  PartitionRootLock(root).AssertAcquired();
-
-  const bool return_null = flags & AllocFlags::kReturnNull;
-  if (PA_UNLIKELY(raw_size > MaxDirectMapped())) {
-    if (return_null) {
-      return nullptr;
-    }
-
-    // The lock is here to protect PA from:
-    // 1. Concurrent calls
-    // 2. Reentrant calls
-    //
-    // This is fine here however, as:
-    // 1. Concurrency: |PartitionRoot::OutOfMemory()| never returns, so the lock
-    //    will not be re-acquired, which would lead to acting on inconsistent
-    //    data that could have been modified in-between releasing and acquiring
-    //    it.
-    // 2. Reentrancy: This is why we release the lock. On some platforms,
-    //    terminating the process may free() memory, or even possibly try to
-    //    allocate some. Calling free() is fine, but will deadlock since
-    //    |PartitionRoot::lock_| is not recursive.
-    //
-    // Supporting reentrant calls properly is hard, and not a requirement for
-    // PA. However up to that point, we've only *read* data, not *written* to
-    // any state. Reentrant calls are then fine, especially as we don't continue
-    // on this path. The only downside is possibly endless recursion if the OOM
-    // handler allocates and fails to use UncheckedMalloc() or equivalent, but
-    // that's violating the contract of base::TerminateBecauseOutOfMemory().
-    ScopedUnlockGuard unlock{PartitionRootLock(root)};
-    PartitionExcessiveAllocationSize(raw_size);
-  }
-
-  PartitionDirectMapExtent* map_extent = nullptr;
-  PartitionPage* page = nullptr;
-
-  {
-    // Getting memory for direct-mapped allocations doesn't interact with the
-    // rest of the allocator, but takes a long time, as it involves several
-    // system calls. Although no mmap() (or equivalent) calls are made on
-    // 64 bit systems, page permissions are changed with mprotect(), which is
-    // a syscall.
-    //
-    // These calls are almost always slow (at least a couple us per syscall on a
-    // desktop Linux machine), and they also have a very long latency tail,
-    // possibly from getting descheduled. As a consequence, we should not hold
-    // the lock when performing a syscall. This is not the only problematic
-    // location, but since this one doesn't interact with the rest of the
-    // allocator, we can safely drop and then re-acquire the lock.
-    //
-    // Note that this only affects allocations that are not served out of the
-    // thread cache, but as a simple example the buffer partition in blink is
-    // frequently used for large allocations (e.g. ArrayBuffer), and frequent,
-    // small ones (e.g. WTF::String), and does not have a thread cache.
-    ScopedUnlockGuard scoped_unlock{PartitionRootLock(root)};
-
-    const size_t slot_size = PartitionRoot::GetDirectMapSlotSize(raw_size);
-    // The super page starts with a partition page worth of metadata and guard
-    // pages, hence alignment requests ==PartitionPageSize() will be
-    // automatically satisfied. Padding is needed for higher-order alignment
-    // requests. Note, |slot_span_alignment| is at least 1 partition page.
-    const size_t padding_for_alignment =
-        slot_span_alignment - PartitionPageSize();
-    const size_t reservation_size = PartitionRoot::GetDirectMapReservationSize(
-        raw_size + padding_for_alignment);
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    const size_t available_reservation_size =
-        reservation_size - padding_for_alignment -
-        PartitionRoot::GetDirectMapMetadataAndGuardPagesSize();
-    PA_DCHECK(slot_size <= available_reservation_size);
-#endif
-
-    pool_handle pool = root->ChoosePool();
-    uintptr_t reservation_start;
-    {
-      // Reserving memory from the pool is actually not a syscall on 64 bit
-      // platforms.
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-      ScopedSyscallTimer timer{root};
-#endif
-      reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size);
-    }
-    if (PA_UNLIKELY(!reservation_start)) {
-      if (return_null) {
-        return nullptr;
-      }
-
-      PartitionOutOfMemoryMappingFailure(root, reservation_size);
-    }
-
-    root->total_size_of_direct_mapped_pages.fetch_add(
-        reservation_size, std::memory_order_relaxed);
-
-    // Shift by 1 partition page (metadata + guard pages) and alignment padding.
-    const uintptr_t slot_start =
-        reservation_start + PartitionPageSize() + padding_for_alignment;
-
-    {
-      ScopedSyscallTimer timer{root};
-      RecommitSystemPages(reservation_start + SystemPageSize(),
-                          SystemPageSize(),
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-                          root->PageAccessibilityWithThreadIsolationIfEnabled(
-                              PageAccessibilityConfiguration::kRead),
-#else
-                          root->PageAccessibilityWithThreadIsolationIfEnabled(
-                              PageAccessibilityConfiguration::kReadWrite),
-#endif
-                          PageAccessibilityDisposition::kRequireUpdate);
-    }
-
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-    // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is
-    // used, allocate a SystemPage for RefCount "bitmap" (only one of its
-    // elements will be used).
-    if (pool == kBRPPoolHandle) {
-      ScopedSyscallTimer timer{root};
-      RecommitSystemPages(reservation_start + SystemPageSize() * 2,
-                          SystemPageSize(),
-                          root->PageAccessibilityWithThreadIsolationIfEnabled(
-                              PageAccessibilityConfiguration::kReadWrite),
-                          PageAccessibilityDisposition::kRequireUpdate);
-    }
-#endif
-
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-    {
-      ScopedSyscallTimer timer{root};
-      RecommitSystemPages(ShadowMetadataStart(reservation_start, pool),
-                          SystemPageSize(),
-                          root->PageAccessibilityWithThreadIsolationIfEnabled(
-                              PageAccessibilityConfiguration::kReadWrite),
-                          PageAccessibilityDisposition::kRequireUpdate);
-    }
-#endif
-
-    // No need to hold root->lock_. Now that memory is reserved, no other
-    // overlapping region can be allocated (because of how pools work),
-    // so no other thread can update the same offset table entries at the
-    // same time. Furthermore, nobody will be ready these offsets until this
-    // function returns.
-    uintptr_t address_start = reservation_start;
-    uintptr_t address_end = address_start + reservation_size;
-    auto* offset_ptr = ReservationOffsetPointer(address_start);
-    uint16_t offset = 0;
-    while (address_start < address_end) {
-      PA_DCHECK(offset_ptr < GetReservationOffsetTableEnd(address_start));
-      PA_DCHECK(offset < kOffsetTagNormalBuckets);
-      *offset_ptr++ = offset++;
-      address_start += kSuperPageSize;
-    }
-
-    auto* super_page_extent = PartitionSuperPageToExtent(reservation_start);
-    super_page_extent->root = root;
-    // The new structures are all located inside a fresh system page so they
-    // will all be zeroed out. These DCHECKs are for documentation and to assert
-    // our expectations of the kernel.
-    PA_DCHECK(!super_page_extent->number_of_consecutive_super_pages);
-    PA_DCHECK(!super_page_extent->next);
-
-    PartitionPage* first_page =
-        reinterpret_cast<PartitionPage*>(super_page_extent) + 1;
-    page = PartitionPage::FromAddr(slot_start);
-    // |first_page| and |page| may be equal, if there is no alignment padding.
-    if (page != first_page) {
-      PA_DCHECK(page > first_page);
-      PA_DCHECK(page - first_page <= PartitionPage::kMaxSlotSpanMetadataOffset);
-      PA_CHECK(!first_page->is_valid);
-      first_page->has_valid_span_after_this = true;
-      first_page->slot_span_metadata_offset = page - first_page;
-    }
-    auto* metadata = reinterpret_cast<PartitionDirectMapMetadata*>(page);
-    // Since direct map metadata is larger than PartitionPage, make sure the
-    // first and the last bytes are on the same system page, i.e. within the
-    // super page metadata region.
-    PA_DCHECK(base::bits::AlignDown(reinterpret_cast<uintptr_t>(metadata),
-                                    SystemPageSize()) ==
-              base::bits::AlignDown(reinterpret_cast<uintptr_t>(metadata) +
-                                        sizeof(PartitionDirectMapMetadata) - 1,
-                                    SystemPageSize()));
-    PA_DCHECK(page == &metadata->page);
-    page->is_valid = true;
-    PA_DCHECK(!page->has_valid_span_after_this);
-    PA_DCHECK(!page->slot_span_metadata_offset);
-    PA_DCHECK(!page->slot_span_metadata.next_slot_span);
-    PA_DCHECK(!page->slot_span_metadata.marked_full);
-    PA_DCHECK(!page->slot_span_metadata.num_allocated_slots);
-    PA_DCHECK(!page->slot_span_metadata.num_unprovisioned_slots);
-    PA_DCHECK(!page->slot_span_metadata.in_empty_cache());
-
-    PA_DCHECK(!metadata->subsequent_page.subsequent_page_metadata.raw_size);
-    // Raw size is set later, by the caller.
-    metadata->subsequent_page.slot_span_metadata_offset = 1;
-
-    PA_DCHECK(!metadata->bucket.active_slot_spans_head);
-    PA_DCHECK(!metadata->bucket.empty_slot_spans_head);
-    PA_DCHECK(!metadata->bucket.decommitted_slot_spans_head);
-    PA_DCHECK(!metadata->bucket.num_system_pages_per_slot_span);
-    PA_DCHECK(!metadata->bucket.num_full_slot_spans);
-    metadata->bucket.slot_size = slot_size;
-
-    new (&page->slot_span_metadata) SlotSpanMetadata(&metadata->bucket);
-
-    // It is typically possible to map a large range of inaccessible pages, and
-    // this is leveraged in multiple places, including the pools. However,
-    // this doesn't mean that we can commit all this memory.  For the vast
-    // majority of allocations, this just means that we crash in a slightly
-    // different place, but for callers ready to handle failures, we have to
-    // return nullptr. See crbug.com/1187404.
-    //
-    // Note that we didn't check above, because if we cannot even commit a
-    // single page, then this is likely hopeless anyway, and we will crash very
-    // soon.
-    //
-    // Direct map never uses tagging, as size is always >kMaxMemoryTaggingSize.
-    PA_DCHECK(raw_size > kMaxMemoryTaggingSize);
-    const bool ok = root->TryRecommitSystemPagesForData(
-        slot_start, slot_size, PageAccessibilityDisposition::kRequireUpdate,
-        false);
-    if (!ok) {
-      if (!return_null) {
-        PartitionOutOfMemoryCommitFailure(root, slot_size);
-      }
-
-      {
-        ScopedSyscallTimer timer{root};
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-        AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
-                                                     reservation_size);
-#endif
-        AddressPoolManager::GetInstance().UnreserveAndDecommit(
-            pool, reservation_start, reservation_size);
-      }
-
-      root->total_size_of_direct_mapped_pages.fetch_sub(
-          reservation_size, std::memory_order_relaxed);
-
-      return nullptr;
-    }
-
-    auto* next_entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
-    page->slot_span_metadata.SetFreelistHead(next_entry);
-
-    map_extent = &metadata->direct_map_extent;
-    map_extent->reservation_size = reservation_size;
-    map_extent->padding_for_alignment = padding_for_alignment;
-    map_extent->bucket = &metadata->bucket;
-  }
-
-  PartitionRootLock(root).AssertAcquired();
-
-  // Maintain the doubly-linked list of all direct mappings.
-  map_extent->next_extent = root->direct_map_list;
-  if (map_extent->next_extent) {
-    map_extent->next_extent->prev_extent = map_extent;
-  }
-  map_extent->prev_extent = nullptr;
-  root->direct_map_list = map_extent;
-
-  return &page->slot_span_metadata;
-}
-
-uint8_t ComputeSystemPagesPerSlotSpanPreferSmall(size_t slot_size) {
-  if (slot_size > MaxRegularSlotSpanSize()) {
-    // This is technically not needed, as for now all the larger slot sizes are
-    // multiples of the system page size.
-    return base::bits::AlignUp(slot_size, SystemPageSize()) / SystemPageSize();
-  }
-
-  // Smaller slot spans waste less address space, as well as potentially lower
-  // fragmentation:
-  // - Address space: This comes from fuller SuperPages (since the tail end of a
-  //   SuperPage is more likely to be used when the slot span is smaller. Also,
-  //   if a slot span is partially used, a smaller slot span will use less
-  //   address space.
-  // - In-slot fragmentation: Slot span management code will prioritize
-  //   almost-full slot spans, as well as trying to keep empty slot spans
-  //   empty. The more granular this logic can work, the better.
-  //
-  // Since metadata space overhead is constant per-PartitionPage, keeping
-  // smaller slot spans makes sense.
-  //
-  // Underlying memory allocation is done per-PartitionPage, but memory commit
-  // is done per system page. This means that we prefer to fill the entirety of
-  // a PartitionPage with a slot span, but we can tolerate some system pages
-  // being empty at the end, as these will not cost committed or dirty memory.
-  //
-  // The choice below is, for multi-slot slot spans:
-  // - If a full PartitionPage slot span is possible with less than 2% of a
-  //   *single* system page wasted, use it. The smallest possible size wins.
-  // - Otherwise, select the size with the smallest virtual address space
-  //   loss. Allow a SlotSpan to leave some slack in its PartitionPage, up to
-  //   1/4 of the total.
-  for (size_t partition_page_count = 1;
-       partition_page_count <= kMaxPartitionPagesPerRegularSlotSpan;
-       partition_page_count++) {
-    size_t candidate_size = partition_page_count * PartitionPageSize();
-    size_t waste = candidate_size % slot_size;
-    if (waste <= .02 * SystemPageSize()) {
-      return partition_page_count * NumSystemPagesPerPartitionPage();
-    }
-  }
-
-  size_t best_count = 0;
-  size_t best_waste = std::numeric_limits<size_t>::max();
-  for (size_t partition_page_count = 1;
-       partition_page_count <= kMaxPartitionPagesPerRegularSlotSpan;
-       partition_page_count++) {
-    // Prefer no slack.
-    for (size_t slack = 0; slack < partition_page_count; slack++) {
-      size_t system_page_count =
-          partition_page_count * NumSystemPagesPerPartitionPage() - slack;
-      size_t candidate_size = system_page_count * SystemPageSize();
-      size_t waste = candidate_size % slot_size;
-      if (waste < best_waste) {
-        best_waste = waste;
-        best_count = system_page_count;
-      }
-    }
-  }
-  return best_count;
-}
-
-uint8_t ComputeSystemPagesPerSlotSpanInternal(size_t slot_size) {
-  // This works out reasonably for the current bucket sizes of the generic
-  // allocator, and the current values of partition page size and constants.
-  // Specifically, we have enough room to always pack the slots perfectly into
-  // some number of system pages. The only waste is the waste associated with
-  // unfaulted pages (i.e. wasted address space).
-  // TODO: we end up using a lot of system pages for very small sizes. For
-  // example, we'll use 12 system pages for slot size 24. The slot size is so
-  // small that the waste would be tiny with just 4, or 1, system pages.  Later,
-  // we can investigate whether there are anti-fragmentation benefits to using
-  // fewer system pages.
-  double best_waste_ratio = 1.0f;
-  uint16_t best_pages = 0;
-  if (slot_size > MaxRegularSlotSpanSize()) {
-    // TODO(ajwong): Why is there a DCHECK here for this?
-    // http://crbug.com/776537
-    PA_DCHECK(!(slot_size % SystemPageSize()));
-    best_pages = static_cast<uint16_t>(slot_size >> SystemPageShift());
-    PA_CHECK(best_pages <= std::numeric_limits<uint8_t>::max());
-    return static_cast<uint8_t>(best_pages);
-  }
-  PA_DCHECK(slot_size <= MaxRegularSlotSpanSize());
-  for (uint16_t i = NumSystemPagesPerPartitionPage() - 1;
-       i <= MaxSystemPagesPerRegularSlotSpan(); ++i) {
-    size_t page_size = i << SystemPageShift();
-    size_t num_slots = page_size / slot_size;
-    size_t waste = page_size - (num_slots * slot_size);
-    // Leaving a page unfaulted is not free; the page will occupy an empty page
-    // table entry.  Make a simple attempt to account for that.
-    //
-    // TODO(ajwong): This looks wrong. PTEs are allocated for all pages
-    // regardless of whether or not they are wasted. Should it just
-    // be waste += i * sizeof(void*)?
-    // http://crbug.com/776537
-    size_t num_remainder_pages = i & (NumSystemPagesPerPartitionPage() - 1);
-    size_t num_unfaulted_pages =
-        num_remainder_pages
-            ? (NumSystemPagesPerPartitionPage() - num_remainder_pages)
-            : 0;
-    waste += sizeof(void*) * num_unfaulted_pages;
-    double waste_ratio =
-        static_cast<double>(waste) / static_cast<double>(page_size);
-    if (waste_ratio < best_waste_ratio) {
-      best_waste_ratio = waste_ratio;
-      best_pages = i;
-    }
-  }
-  PA_DCHECK(best_pages > 0);
-  PA_CHECK(best_pages <= MaxSystemPagesPerRegularSlotSpan());
-  return static_cast<uint8_t>(best_pages);
-}
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-// Returns size that should be tagged. Avoiding the previous slot ref count if
-// it exists to avoid a race (crbug.com/1445816).
-PA_ALWAYS_INLINE size_t TagSizeForSlot(PartitionRoot* root, size_t slot_size) {
-#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
-  return slot_size - root->settings.ref_count_size;
-#else
-  return slot_size;
-#endif
-}
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
-}  // namespace
-
-uint8_t ComputeSystemPagesPerSlotSpan(size_t slot_size,
-                                      bool prefer_smaller_slot_spans) {
-  if (prefer_smaller_slot_spans) {
-    size_t system_page_count =
-        ComputeSystemPagesPerSlotSpanPreferSmall(slot_size);
-    size_t waste = (system_page_count * SystemPageSize()) % slot_size;
-    // In case the waste is too large (more than 5% of a page), don't try to use
-    // the "small" slot span formula. This happens when we have a lot of
-    // buckets, in some cases the formula doesn't find a nice, small size.
-    if (waste <= .05 * SystemPageSize()) {
-      return system_page_count;
-    }
-  }
-
-  return ComputeSystemPagesPerSlotSpanInternal(slot_size);
-}
-
-void PartitionBucket::Init(uint32_t new_slot_size) {
-  slot_size = new_slot_size;
-  slot_size_reciprocal = kReciprocalMask / new_slot_size + 1;
-  active_slot_spans_head = SlotSpanMetadata::get_sentinel_slot_span_non_const();
-  empty_slot_spans_head = nullptr;
-  decommitted_slot_spans_head = nullptr;
-  num_full_slot_spans = 0;
-  bool prefer_smaller_slot_spans =
-#if PA_CONFIG(PREFER_SMALLER_SLOT_SPANS)
-      true
-#else
-      false
-#endif
-      ;
-  num_system_pages_per_slot_span =
-      ComputeSystemPagesPerSlotSpan(slot_size, prefer_smaller_slot_spans);
-}
-
-PA_ALWAYS_INLINE SlotSpanMetadata* PartitionBucket::AllocNewSlotSpan(
-    PartitionRoot* root,
-    unsigned int flags,
-    size_t slot_span_alignment) {
-  PA_DCHECK(!(root->next_partition_page % PartitionPageSize()));
-  PA_DCHECK(!(root->next_partition_page_end % PartitionPageSize()));
-
-  size_t num_partition_pages = get_pages_per_slot_span();
-  size_t slot_span_reservation_size = num_partition_pages
-                                      << PartitionPageShift();
-  size_t slot_span_committed_size = get_bytes_per_span();
-  PA_DCHECK(num_partition_pages <= NumPartitionPagesPerSuperPage());
-  PA_DCHECK(slot_span_committed_size % SystemPageSize() == 0);
-  PA_DCHECK(slot_span_committed_size <= slot_span_reservation_size);
-
-  uintptr_t adjusted_next_partition_page =
-      base::bits::AlignUp(root->next_partition_page, slot_span_alignment);
-  if (PA_UNLIKELY(adjusted_next_partition_page + slot_span_reservation_size >
-                  root->next_partition_page_end)) {
-    // AllocNewSuperPage() may crash (e.g. address space exhaustion), put data
-    // on stack.
-    PA_DEBUG_DATA_ON_STACK("slotsize", slot_size);
-    PA_DEBUG_DATA_ON_STACK("spansize", slot_span_reservation_size);
-
-    // In this case, we can no longer hand out pages from the current super page
-    // allocation. Get a new super page.
-    if (!AllocNewSuperPage(root, flags)) {
-      return nullptr;
-    }
-    // AllocNewSuperPage() updates root->next_partition_page, re-query.
-    adjusted_next_partition_page =
-        base::bits::AlignUp(root->next_partition_page, slot_span_alignment);
-    PA_CHECK(adjusted_next_partition_page + slot_span_reservation_size <=
-             root->next_partition_page_end);
-  }
-
-  auto* gap_start_page = PartitionPage::FromAddr(root->next_partition_page);
-  auto* gap_end_page = PartitionPage::FromAddr(adjusted_next_partition_page);
-  for (auto* page = gap_start_page; page < gap_end_page; ++page) {
-    PA_DCHECK(!page->is_valid);
-    page->has_valid_span_after_this = 1;
-  }
-  root->next_partition_page =
-      adjusted_next_partition_page + slot_span_reservation_size;
-
-  uintptr_t slot_span_start = adjusted_next_partition_page;
-  auto* slot_span = &gap_end_page->slot_span_metadata;
-  InitializeSlotSpan(slot_span);
-  // Now that slot span is initialized, it's safe to call FromSlotStart.
-  PA_DCHECK(slot_span == SlotSpanMetadata::FromSlotStart(slot_span_start));
-
-  // System pages in the super page come in a decommited state. Commit them
-  // before vending them back.
-  // If lazy commit is enabled, pages will be committed when provisioning slots,
-  // in ProvisionMoreSlotsAndAllocOne(), not here.
-  if (!kUseLazyCommit) {
-    PA_DEBUG_DATA_ON_STACK("slotsize", slot_size);
-    PA_DEBUG_DATA_ON_STACK("spansize", slot_span_reservation_size);
-    PA_DEBUG_DATA_ON_STACK("spancmt", slot_span_committed_size);
-
-    root->RecommitSystemPagesForData(
-        slot_span_start, slot_span_committed_size,
-        PageAccessibilityDisposition::kRequireUpdate,
-        slot_size <= kMaxMemoryTaggingSize);
-  }
-
-  PA_CHECK(get_slots_per_span() <= kMaxSlotsPerSlotSpan);
-
-  // Double check that we had enough space in the super page for the new slot
-  // span.
-  PA_DCHECK(root->next_partition_page <= root->next_partition_page_end);
-
-  return slot_span;
-}
-
-uintptr_t PartitionBucket::AllocNewSuperPageSpan(PartitionRoot* root,
-                                                 size_t super_page_count,
-                                                 unsigned int flags) {
-  PA_CHECK(super_page_count > 0);
-  PA_CHECK(super_page_count <=
-           std::numeric_limits<size_t>::max() / kSuperPageSize);
-  // Need a new super page. We want to allocate super pages in a contiguous
-  // address region as much as possible. This is important for not causing
-  // page table bloat and not fragmenting address spaces in 32 bit
-  // architectures.
-  uintptr_t requested_address = root->next_super_page;
-  pool_handle pool = root->ChoosePool();
-  uintptr_t super_page_span_start = ReserveMemoryFromPool(
-      pool, requested_address, super_page_count * kSuperPageSize);
-  if (PA_UNLIKELY(!super_page_span_start)) {
-    if (flags & AllocFlags::kReturnNull) {
-      return 0;
-    }
-
-    // Didn't manage to get a new uncommitted super page -> address space issue.
-    ::partition_alloc::internal::ScopedUnlockGuard unlock{
-        PartitionRootLock(root)};
-    PartitionOutOfMemoryMappingFailure(root, kSuperPageSize);
-  }
-
-  uintptr_t super_page_span_end =
-      super_page_span_start + super_page_count * kSuperPageSize;
-  for (uintptr_t super_page = super_page_span_start;
-       super_page < super_page_span_end; super_page += kSuperPageSize) {
-    InitializeSuperPage(root, super_page, 0);
-  }
-  return super_page_span_start;
-}
-
-PA_ALWAYS_INLINE uintptr_t
-PartitionBucket::AllocNewSuperPage(PartitionRoot* root, unsigned int flags) {
-  auto super_page = AllocNewSuperPageSpan(root, 1, flags);
-  if (PA_UNLIKELY(!super_page)) {
-    // If the `kReturnNull` flag isn't set and the allocation attempt fails,
-    // `AllocNewSuperPageSpan` should've failed with an OOM crash.
-    PA_DCHECK(flags & AllocFlags::kReturnNull);
-    return 0;
-  }
-  return SuperPagePayloadBegin(super_page, root->IsQuarantineAllowed());
-}
-
-PA_ALWAYS_INLINE uintptr_t
-PartitionBucket::InitializeSuperPage(PartitionRoot* root,
-                                     uintptr_t super_page,
-                                     uintptr_t requested_address) {
-  *ReservationOffsetPointer(super_page) = kOffsetTagNormalBuckets;
-
-  root->total_size_of_super_pages.fetch_add(kSuperPageSize,
-                                            std::memory_order_relaxed);
-
-  root->next_super_page = super_page + kSuperPageSize;
-  uintptr_t state_bitmap =
-      super_page + PartitionPageSize() +
-      (is_direct_mapped() ? 0 : ReservedFreeSlotBitmapSize());
-#if BUILDFLAG(USE_STARSCAN)
-  PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap);
-  const size_t state_bitmap_reservation_size =
-      root->IsQuarantineAllowed() ? ReservedStateBitmapSize() : 0;
-  const size_t state_bitmap_size_to_commit =
-      root->IsQuarantineAllowed() ? CommittedStateBitmapSize() : 0;
-  PA_DCHECK(state_bitmap_reservation_size % PartitionPageSize() == 0);
-  PA_DCHECK(state_bitmap_size_to_commit % SystemPageSize() == 0);
-  PA_DCHECK(state_bitmap_size_to_commit <= state_bitmap_reservation_size);
-  uintptr_t payload = state_bitmap + state_bitmap_reservation_size;
-#else
-  uintptr_t payload = state_bitmap;
-#endif  // BUILDFLAG(USE_STARSCAN)
-
-  root->next_partition_page = payload;
-  root->next_partition_page_end = root->next_super_page - PartitionPageSize();
-  PA_DCHECK(payload ==
-            SuperPagePayloadBegin(super_page, root->IsQuarantineAllowed()));
-  PA_DCHECK(root->next_partition_page_end == SuperPagePayloadEnd(super_page));
-
-  // Keep the first partition page in the super page inaccessible to serve as a
-  // guard page, except an "island" in the middle where we put page metadata and
-  // also a tiny amount of extent metadata.
-  {
-    ScopedSyscallTimer timer{root};
-    RecommitSystemPages(super_page + SystemPageSize(), SystemPageSize(),
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-                        root->PageAccessibilityWithThreadIsolationIfEnabled(
-                            PageAccessibilityConfiguration::kRead),
-#else
-                        root->PageAccessibilityWithThreadIsolationIfEnabled(
-                            PageAccessibilityConfiguration::kReadWrite),
-#endif
-                        PageAccessibilityDisposition::kRequireUpdate);
-  }
-
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-  // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is
-  // used, allocate a SystemPage for RefCount bitmap.
-  if (root->ChoosePool() == kBRPPoolHandle) {
-    ScopedSyscallTimer timer{root};
-    RecommitSystemPages(super_page + SystemPageSize() * 2, SystemPageSize(),
-                        root->PageAccessibilityWithThreadIsolationIfEnabled(
-                            PageAccessibilityConfiguration::kReadWrite),
-                        PageAccessibilityDisposition::kRequireUpdate);
-  }
-#endif
-
-#if PA_CONFIG(ENABLE_SHADOW_METADATA)
-  {
-    ScopedSyscallTimer timer{root};
-    RecommitSystemPages(ShadowMetadataStart(super_page, root->ChoosePool()),
-                        SystemPageSize(),
-                        root->PageAccessibilityWithThreadIsolationIfEnabled(
-                            PageAccessibilityConfiguration::kReadWrite),
-                        PageAccessibilityDisposition::kRequireUpdate);
-  }
-#endif
-
-  // If we were after a specific address, but didn't get it, assume that
-  // the system chose a lousy address. Here most OS'es have a default
-  // algorithm that isn't randomized. For example, most Linux
-  // distributions will allocate the mapping directly before the last
-  // successful mapping, which is far from random. So we just get fresh
-  // randomness for the next mapping attempt.
-  if (requested_address && requested_address != super_page) {
-    root->next_super_page = 0;
-  }
-
-  // We allocated a new super page so update super page metadata.
-  // First check if this is a new extent or not.
-  auto* latest_extent = PartitionSuperPageToExtent(super_page);
-  // By storing the root in every extent metadata object, we have a fast way
-  // to go from a pointer within the partition to the root object.
-  latest_extent->root = root;
-  // Most new extents will be part of a larger extent, and these two fields
-  // are unused, but we initialize them to 0 so that we get a clear signal
-  // in case they are accidentally used.
-  latest_extent->number_of_consecutive_super_pages = 0;
-  latest_extent->next = nullptr;
-  latest_extent->number_of_nonempty_slot_spans = 0;
-
-  PartitionSuperPageExtentEntry* current_extent = root->current_extent;
-  const bool is_new_extent = super_page != requested_address;
-  if (PA_UNLIKELY(is_new_extent)) {
-    if (PA_UNLIKELY(!current_extent)) {
-      PA_DCHECK(!root->first_extent);
-      root->first_extent = latest_extent;
-    } else {
-      PA_DCHECK(current_extent->number_of_consecutive_super_pages);
-      current_extent->next = latest_extent;
-    }
-    root->current_extent = latest_extent;
-    latest_extent->number_of_consecutive_super_pages = 1;
-  } else {
-    // We allocated next to an existing extent so just nudge the size up a
-    // little.
-    PA_DCHECK(current_extent->number_of_consecutive_super_pages);
-    ++current_extent->number_of_consecutive_super_pages;
-    PA_DCHECK(payload > SuperPagesBeginFromExtent(current_extent) &&
-              payload < SuperPagesEndFromExtent(current_extent));
-  }
-
-  // If PCScan is used, commit the state bitmap. Otherwise, leave it uncommitted
-  // and let PartitionRoot::RegisterScannableRoot() commit it when needed. Make
-  // sure to register the super-page after it has been fully initialized.
-  // Otherwise, the concurrent scanner may try to access |extent->root| which
-  // could be not initialized yet.
-#if BUILDFLAG(USE_STARSCAN)
-  if (root->IsQuarantineEnabled()) {
-    {
-      ScopedSyscallTimer timer{root};
-      RecommitSystemPages(state_bitmap, state_bitmap_size_to_commit,
-                          root->PageAccessibilityWithThreadIsolationIfEnabled(
-                              PageAccessibilityConfiguration::kReadWrite),
-                          PageAccessibilityDisposition::kRequireUpdate);
-    }
-    PCScan::RegisterNewSuperPage(root, super_page);
-  }
-#endif  // BUILDFLAG(USE_STARSCAN)
-
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-  // Commit the pages for freeslot bitmap.
-  if (!is_direct_mapped()) {
-    uintptr_t freeslot_bitmap_addr = super_page + PartitionPageSize();
-    PA_DCHECK(SuperPageFreeSlotBitmapAddr(super_page) == freeslot_bitmap_addr);
-    ScopedSyscallTimer timer{root};
-    RecommitSystemPages(freeslot_bitmap_addr, CommittedFreeSlotBitmapSize(),
-                        root->PageAccessibilityWithThreadIsolationIfEnabled(
-                            PageAccessibilityConfiguration::kReadWrite),
-                        PageAccessibilityDisposition::kRequireUpdate);
-  }
-#endif
-
-  return payload;
-}
-
-PA_ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(
-    SlotSpanMetadata* slot_span) {
-  new (slot_span) SlotSpanMetadata(this);
-
-  slot_span->Reset();
-
-  uint16_t num_partition_pages = get_pages_per_slot_span();
-  auto* page = reinterpret_cast<PartitionPage*>(slot_span);
-  for (uint16_t i = 0; i < num_partition_pages; ++i, ++page) {
-    PA_DCHECK(i <= PartitionPage::kMaxSlotSpanMetadataOffset);
-    page->slot_span_metadata_offset = i;
-    page->is_valid = true;
-  }
-}
-
-PA_ALWAYS_INLINE uintptr_t
-PartitionBucket::ProvisionMoreSlotsAndAllocOne(PartitionRoot* root,
-                                               SlotSpanMetadata* slot_span) {
-  PA_DCHECK(slot_span != SlotSpanMetadata::get_sentinel_slot_span());
-  size_t num_slots = slot_span->num_unprovisioned_slots;
-  PA_DCHECK(num_slots);
-  PA_DCHECK(num_slots <= get_slots_per_span());
-  // We should only get here when _every_ slot is either used or unprovisioned.
-  // (The third possible state is "on the freelist". If we have a non-empty
-  // freelist, we should not get here.)
-  PA_DCHECK(num_slots + slot_span->num_allocated_slots == get_slots_per_span());
-  // Similarly, make explicitly sure that the freelist is empty.
-  PA_DCHECK(!slot_span->get_freelist_head());
-  PA_DCHECK(!slot_span->is_full());
-
-  uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
-  // If we got here, the first unallocated slot is either partially or fully on
-  // an uncommitted page. If the latter, it must be at the start of that page.
-  uintptr_t return_slot =
-      slot_span_start + (slot_size * slot_span->num_allocated_slots);
-  uintptr_t next_slot = return_slot + slot_size;
-  uintptr_t commit_start = base::bits::AlignUp(return_slot, SystemPageSize());
-  PA_DCHECK(next_slot > commit_start);
-  uintptr_t commit_end = base::bits::AlignUp(next_slot, SystemPageSize());
-  // If the slot was partially committed, |return_slot| and |next_slot| fall
-  // in different pages. If the slot was fully uncommitted, |return_slot| points
-  // to the page start and |next_slot| doesn't, thus only the latter gets
-  // rounded up.
-  PA_DCHECK(commit_end > commit_start);
-
-  // The slot being returned is considered allocated.
-  slot_span->num_allocated_slots++;
-  // Round down, because a slot that doesn't fully fit in the new page(s) isn't
-  // provisioned.
-  size_t slots_to_provision = (commit_end - return_slot) / slot_size;
-  slot_span->num_unprovisioned_slots -= slots_to_provision;
-  PA_DCHECK(slot_span->num_allocated_slots +
-                slot_span->num_unprovisioned_slots <=
-            get_slots_per_span());
-
-  // If lazy commit is enabled, meaning system pages in the slot span come
-  // in an initially decommitted state, commit them here.
-  // Note, we can't use PageAccessibilityDisposition::kAllowKeepForPerf, because
-  // we have no knowledge which pages have been committed before (it doesn't
-  // matter on Windows anyway).
-  if (kUseLazyCommit) {
-    // TODO(lizeb): Handle commit failure.
-    root->RecommitSystemPagesForData(
-        commit_start, commit_end - commit_start,
-        PageAccessibilityDisposition::kRequireUpdate,
-        slot_size <= kMaxMemoryTaggingSize);
-  }
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  const bool use_tagging =
-      root->IsMemoryTaggingEnabled() && slot_size <= kMaxMemoryTaggingSize;
-  if (PA_LIKELY(use_tagging)) {
-    // Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
-    TagMemoryRangeRandomly(return_slot, TagSizeForSlot(root, slot_size));
-  }
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-  // Add all slots that fit within so far committed pages to the free list.
-  PartitionFreelistEntry* prev_entry = nullptr;
-  uintptr_t next_slot_end = next_slot + slot_size;
-  size_t free_list_entries_added = 0;
-  while (next_slot_end <= commit_end) {
-    void* next_slot_ptr;
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-    if (PA_LIKELY(use_tagging)) {
-      // Ensure the MTE-tag of the memory pointed by other provisioned slot is
-      // unguessable. They will be returned to the app as is, and the MTE-tag
-      // will only change upon calling Free().
-      next_slot_ptr =
-          TagMemoryRangeRandomly(next_slot, TagSizeForSlot(root, slot_size));
-    } else {
-      // No MTE-tagging for larger slots, just cast.
-      next_slot_ptr = reinterpret_cast<void*>(next_slot);
-    }
-#else  // PA_CONFIG(HAS_MEMORY_TAGGING)
-    next_slot_ptr = reinterpret_cast<void*>(next_slot);
-#endif
-    auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
-    if (!slot_span->get_freelist_head()) {
-      PA_DCHECK(!prev_entry);
-      PA_DCHECK(!free_list_entries_added);
-      slot_span->SetFreelistHead(entry);
-    } else {
-      PA_DCHECK(free_list_entries_added);
-      prev_entry->SetNext(entry);
-    }
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-    FreeSlotBitmapMarkSlotAsFree(next_slot);
-#endif
-    next_slot = next_slot_end;
-    next_slot_end = next_slot + slot_size;
-    prev_entry = entry;
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    free_list_entries_added++;
-#endif
-  }
-
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-  FreeSlotBitmapMarkSlotAsFree(return_slot);
-#endif
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // The only provisioned slot not added to the free list is the one being
-  // returned.
-  PA_DCHECK(slots_to_provision == free_list_entries_added + 1);
-  // We didn't necessarily provision more than one slot (e.g. if |slot_size|
-  // is large), meaning that |slot_span->freelist_head| can be nullptr.
-  if (slot_span->get_freelist_head()) {
-    PA_DCHECK(free_list_entries_added);
-    slot_span->get_freelist_head()->CheckFreeList(slot_size);
-  }
-#endif
-
-  // We had no free slots, and created some (potentially 0) in sorted order.
-  slot_span->set_freelist_sorted();
-
-  return return_slot;
-}
-
-bool PartitionBucket::SetNewActiveSlotSpan() {
-  SlotSpanMetadata* slot_span = active_slot_spans_head;
-  if (slot_span == SlotSpanMetadata::get_sentinel_slot_span()) {
-    return false;
-  }
-
-  SlotSpanMetadata* next_slot_span;
-
-  // The goal here is to find a suitable slot span in the active list. Suitable
-  // slot spans are |is_active()|, i.e. they either have (a) freelist entries,
-  // or (b) unprovisioned free space. The first case is preferable, since it
-  // doesn't cost a system call, and doesn't cause new memory to become dirty.
-  //
-  // While looking for a new slot span, active list maintenance is performed,
-  // that is:
-  // - Empty and decommitted slot spans are moved to their respective lists.
-  // - Full slot spans are removed from the active list but are not moved
-  //   anywhere. They could be tracked in a separate list, but this would
-  //   increase cost non trivially. Indeed, a full slot span is likely to become
-  //   non-full at some point (due to a free() hitting it). Since we only have
-  //   space in the metadata for a single linked list pointer, removing the
-  //   newly-non-full slot span from the "full" list would require walking it
-  //   (to know what's before it in the full list).
-  //
-  // Since we prefer slot spans with provisioned freelist entries, maintenance
-  // happens in two stages:
-  // 1. Walk the list to find candidates. Each of the skipped slot span is moved
-  //    to either:
-  //   - one of the long-lived lists: empty, decommitted
-  //   - the temporary "active slots spans with no freelist entry" list
-  //   - Nowhere for full slot spans.
-  // 2. Once we have a candidate:
-  //   - Set it as the new active list head
-  //   - Reattach the temporary list
-  //
-  // Note that in most cases, the whole list will not be walked and maintained
-  // at this stage.
-
-  SlotSpanMetadata* to_provision_head = nullptr;
-  SlotSpanMetadata* to_provision_tail = nullptr;
-
-  for (; slot_span; slot_span = next_slot_span) {
-    next_slot_span = slot_span->next_slot_span;
-    PA_DCHECK(slot_span->bucket == this);
-    PA_DCHECK(slot_span != empty_slot_spans_head);
-    PA_DCHECK(slot_span != decommitted_slot_spans_head);
-
-    if (slot_span->is_active()) {
-      // Has provisioned slots.
-      if (slot_span->get_freelist_head()) {
-        // Will use this slot span, no need to go further.
-        break;
-      } else {
-        // Keeping head and tail because we don't want to reverse the list.
-        if (!to_provision_head) {
-          to_provision_head = slot_span;
-        }
-        if (to_provision_tail) {
-          to_provision_tail->next_slot_span = slot_span;
-        }
-        to_provision_tail = slot_span;
-        slot_span->next_slot_span = nullptr;
-      }
-    } else if (slot_span->is_empty()) {
-      slot_span->next_slot_span = empty_slot_spans_head;
-      empty_slot_spans_head = slot_span;
-    } else if (PA_LIKELY(slot_span->is_decommitted())) {
-      slot_span->next_slot_span = decommitted_slot_spans_head;
-      decommitted_slot_spans_head = slot_span;
-    } else {
-      PA_DCHECK(slot_span->is_full());
-      // Move this slot span... nowhere, and also mark it as full. We need it
-      // marked so that free'ing can tell, and move it back into the active
-      // list.
-      slot_span->marked_full = 1;
-      ++num_full_slot_spans;
-      // Overflow. Most likely a correctness issue in the code.  It is in theory
-      // possible that the number of full slot spans really reaches (1 << 24),
-      // but this is very unlikely (and not possible with most pool settings).
-      PA_CHECK(num_full_slot_spans);
-      // Not necessary but might help stop accidents.
-      slot_span->next_slot_span = nullptr;
-    }
-  }
-
-  bool usable_active_list_head = false;
-  // Found an active slot span with provisioned entries on the freelist.
-  if (slot_span) {
-    usable_active_list_head = true;
-    // We have active slot spans with unprovisioned entries. Re-attach them into
-    // the active list, past the span with freelist entries.
-    if (to_provision_head) {
-      auto* next = slot_span->next_slot_span;
-      slot_span->next_slot_span = to_provision_head;
-      to_provision_tail->next_slot_span = next;
-    }
-    active_slot_spans_head = slot_span;
-  } else if (to_provision_head) {
-    usable_active_list_head = true;
-    // Need to provision new slots.
-    active_slot_spans_head = to_provision_head;
-  } else {
-    // Active list is now empty.
-    active_slot_spans_head =
-        SlotSpanMetadata::get_sentinel_slot_span_non_const();
-  }
-
-  return usable_active_list_head;
-}
-
-void PartitionBucket::MaintainActiveList() {
-  SlotSpanMetadata* slot_span = active_slot_spans_head;
-  if (slot_span == SlotSpanMetadata::get_sentinel_slot_span()) {
-    return;
-  }
-
-  SlotSpanMetadata* new_active_slot_spans_head = nullptr;
-  SlotSpanMetadata* new_active_slot_spans_tail = nullptr;
-
-  SlotSpanMetadata* next_slot_span;
-  for (; slot_span; slot_span = next_slot_span) {
-    next_slot_span = slot_span->next_slot_span;
-
-    if (slot_span->is_active()) {
-      // Ordering in the active slot span list matters, don't reverse it.
-      if (!new_active_slot_spans_head) {
-        new_active_slot_spans_head = slot_span;
-      }
-      if (new_active_slot_spans_tail) {
-        new_active_slot_spans_tail->next_slot_span = slot_span;
-      }
-      new_active_slot_spans_tail = slot_span;
-      slot_span->next_slot_span = nullptr;
-    } else if (slot_span->is_empty()) {
-      // For the empty and decommitted lists, LIFO ordering makes sense (since
-      // it would lead to reusing memory which has been touched relatively
-      // recently, which only matters for committed spans though).
-      slot_span->next_slot_span = empty_slot_spans_head;
-      empty_slot_spans_head = slot_span;
-    } else if (slot_span->is_decommitted()) {
-      slot_span->next_slot_span = decommitted_slot_spans_head;
-      decommitted_slot_spans_head = slot_span;
-    } else {
-      // Full slot spans are not tracked, just accounted for.
-      PA_DCHECK(slot_span->is_full());
-      slot_span->marked_full = 1;
-      ++num_full_slot_spans;
-      PA_CHECK(num_full_slot_spans);  // Overflow.
-      slot_span->next_slot_span = nullptr;
-    }
-  }
-
-  if (!new_active_slot_spans_head) {
-    new_active_slot_spans_head =
-        SlotSpanMetadata::get_sentinel_slot_span_non_const();
-  }
-  active_slot_spans_head = new_active_slot_spans_head;
-}
-
-void PartitionBucket::SortSlotSpanFreelists() {
-  for (auto* slot_span = active_slot_spans_head; slot_span;
-       slot_span = slot_span->next_slot_span) {
-    // No need to sort the freelist if it's already sorted. Note that if the
-    // freelist is sorted, this means that it didn't change at all since the
-    // last call. This may be a good signal to shrink it if possible (if an
-    // entire OS page is free, we can decommit it).
-    //
-    // Besides saving CPU, this also avoids touching memory of fully idle slot
-    // spans, which may required paging.
-    if (slot_span->num_allocated_slots > 0 &&
-        !slot_span->freelist_is_sorted()) {
-      slot_span->SortFreelist();
-    }
-  }
-}
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool CompareSlotSpans(SlotSpanMetadata* a, SlotSpanMetadata* b) {
-  auto criteria_tuple = [](SlotSpanMetadata const* a) {
-    size_t freelist_length = a->GetFreelistLength();
-    // The criteria are, in order (hence the lexicographic comparison below):
-    // 1. Prefer slot spans with freelist entries. The ones without freelist
-    //    entries would be skipped in SetNewActiveSlotSpan() anyway.
-    // 2. Then the ones with the fewest freelist entries. They are either close
-    //    to being full (for the provisioned memory), or close to being pushed
-    //    at the end of the list (since they would not have freelist entries
-    //    anymore, and would either fall into the first case, or be skipped by
-    //    SetNewActiveSlotSpan()).
-    // 3. The ones with the fewer unprovisioned slots, meaning that they are
-    //    close to being completely full.
-    //
-    // Note that this sorting order is not necessarily the best one when slot
-    // spans are partially provisioned. From local testing, in steady-state,
-    // most slot spans are entirely provisioned (or decommitted), which may be a
-    // consequence of the lack of partial slot span decommit, or of fairly
-    // effective fragmentation avoidance heuristics. Make sure to evaluate
-    // whether an alternative sorting order (sorting according to freelist size
-    // + unprovisioned slots) makes more sense.
-    return std::tuple<bool, size_t, size_t>{
-        freelist_length == 0, freelist_length, a->num_unprovisioned_slots};
-  };
-
-  return criteria_tuple(a) < criteria_tuple(b);
-}
-
-void PartitionBucket::SortActiveSlotSpans() {
-  // Sorting up to |kMaxSlotSpansToSort| slot spans. This is capped for two
-  // reasons:
-  // - Limiting execution time
-  // - Current code cannot allocate.
-  //
-  // In practice though, it's rare to have that many active slot spans.
-  SlotSpanMetadata* active_spans_array[kMaxSlotSpansToSort];
-  size_t index = 0;
-  SlotSpanMetadata* overflow_spans_start = nullptr;
-
-  for (auto* slot_span = active_slot_spans_head; slot_span;
-       slot_span = slot_span->next_slot_span) {
-    if (index < kMaxSlotSpansToSort) {
-      active_spans_array[index++] = slot_span;
-    } else {
-      // Starting from this one, not sorting the slot spans.
-      overflow_spans_start = slot_span;
-      break;
-    }
-  }
-
-  // We sort the active slot spans so that allocations are preferably serviced
-  // from the fullest ones. This way we hope to reduce fragmentation by keeping
-  // as few slot spans as full as possible.
-  //
-  // With perfect information on allocation lifespan, we would be able to pack
-  // allocations and get almost no fragmentation. This is obviously not the
-  // case, so we have partially full SlotSpans. Nevertheless, as a heuristic we
-  // want to:
-  // - Keep almost-empty slot spans as empty as possible
-  // - Keep mostly-full slot spans as full as possible
-  //
-  // The first part is done in the hope that future free()s will make these
-  // slot spans completely empty, allowing us to reclaim them. To that end, sort
-  // SlotSpans periodically so that the fullest ones are preferred.
-  //
-  // std::sort() is not completely guaranteed to never allocate memory. However,
-  // it may not throw std::bad_alloc, which constrains the implementation. In
-  // addition, this is protected by the reentrancy guard, so we would detect
-  // such an allocation.
-  std::sort(active_spans_array, active_spans_array + index, CompareSlotSpans);
-
-  active_slot_spans_head = overflow_spans_start;
-
-  // Reverse order, since we insert at the head of the list.
-  for (int i = index - 1; i >= 0; i--) {
-    if (active_spans_array[i] == SlotSpanMetadata::get_sentinel_slot_span()) {
-      // The sentinel is const, don't try to write to it.
-      PA_DCHECK(active_slot_spans_head == nullptr);
-    } else {
-      active_spans_array[i]->next_slot_span = active_slot_spans_head;
-    }
-    active_slot_spans_head = active_spans_array[i];
-  }
-}
-
-uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
-                                         unsigned int flags,
-                                         size_t raw_size,
-                                         size_t slot_span_alignment,
-                                         bool* is_already_zeroed) {
-  PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
-            base::bits::IsPowerOfTwo(slot_span_alignment));
-
-  // The slow path is called when the freelist is empty. The only exception is
-  // when a higher-order alignment is requested, in which case the freelist
-  // logic is bypassed and we go directly for slot span allocation.
-  bool allocate_aligned_slot_span = slot_span_alignment > PartitionPageSize();
-  PA_DCHECK(!active_slot_spans_head->get_freelist_head() ||
-            allocate_aligned_slot_span);
-
-  SlotSpanMetadata* new_slot_span = nullptr;
-  // |new_slot_span->bucket| will always be |this|, except when |this| is the
-  // sentinel bucket, which is used to signal a direct mapped allocation.  In
-  // this case |new_bucket| will be set properly later. This avoids a read for
-  // most allocations.
-  PartitionBucket* new_bucket = this;
-  *is_already_zeroed = false;
-
-  // For the PartitionRoot::Alloc() API, we have a bunch of buckets
-  // marked as special cases. We bounce them through to the slow path so that
-  // we can still have a blazing fast hot path due to lack of corner-case
-  // branches.
-  //
-  // Note: The ordering of the conditionals matter! In particular,
-  // SetNewActiveSlotSpan() has a side-effect even when returning
-  // false where it sweeps the active list and may move things into the empty or
-  // decommitted lists which affects the subsequent conditional.
-  if (PA_UNLIKELY(is_direct_mapped())) {
-    PA_DCHECK(raw_size > kMaxBucketed);
-    PA_DCHECK(this == &root->sentinel_bucket);
-    PA_DCHECK(active_slot_spans_head ==
-              SlotSpanMetadata::get_sentinel_slot_span());
-
-    // No fast path for direct-mapped allocations.
-    if (flags & AllocFlags::kFastPathOrReturnNull) {
-      return 0;
-    }
-
-    new_slot_span =
-        PartitionDirectMap(root, flags, raw_size, slot_span_alignment);
-    if (new_slot_span) {
-      new_bucket = new_slot_span->bucket;
-    }
-    // Memory from PageAllocator is always zeroed.
-    *is_already_zeroed = true;
-  } else if (PA_LIKELY(!allocate_aligned_slot_span && SetNewActiveSlotSpan())) {
-    // First, did we find an active slot span in the active list?
-    new_slot_span = active_slot_spans_head;
-    PA_DCHECK(new_slot_span->is_active());
-  } else if (PA_LIKELY(!allocate_aligned_slot_span &&
-                       (empty_slot_spans_head != nullptr ||
-                        decommitted_slot_spans_head != nullptr))) {
-    // Second, look in our lists of empty and decommitted slot spans.
-    // Check empty slot spans first, which are preferred, but beware that an
-    // empty slot span might have been decommitted.
-    while (PA_LIKELY((new_slot_span = empty_slot_spans_head) != nullptr)) {
-      PA_DCHECK(new_slot_span->bucket == this);
-      PA_DCHECK(new_slot_span->is_empty() || new_slot_span->is_decommitted());
-      empty_slot_spans_head = new_slot_span->next_slot_span;
-      // Accept the empty slot span unless it got decommitted.
-      if (new_slot_span->get_freelist_head()) {
-        new_slot_span->next_slot_span = nullptr;
-        new_slot_span->ToSuperPageExtent()
-            ->IncrementNumberOfNonemptySlotSpans();
-
-        // Re-activating an empty slot span, update accounting.
-        size_t dirty_size = base::bits::AlignUp(
-            new_slot_span->GetProvisionedSize(), SystemPageSize());
-        PA_DCHECK(root->empty_slot_spans_dirty_bytes >= dirty_size);
-        root->empty_slot_spans_dirty_bytes -= dirty_size;
-
-        break;
-      }
-      PA_DCHECK(new_slot_span->is_decommitted());
-      new_slot_span->next_slot_span = decommitted_slot_spans_head;
-      decommitted_slot_spans_head = new_slot_span;
-    }
-    if (PA_UNLIKELY(!new_slot_span) &&
-        PA_LIKELY(decommitted_slot_spans_head != nullptr)) {
-      // Commit can be expensive, don't do it.
-      if (flags & AllocFlags::kFastPathOrReturnNull) {
-        return 0;
-      }
-
-      new_slot_span = decommitted_slot_spans_head;
-      PA_DCHECK(new_slot_span->bucket == this);
-      PA_DCHECK(new_slot_span->is_decommitted());
-      decommitted_slot_spans_head = new_slot_span->next_slot_span;
-
-      // If lazy commit is enabled, pages will be recommitted when provisioning
-      // slots, in ProvisionMoreSlotsAndAllocOne(), not here.
-      if (!kUseLazyCommit) {
-        uintptr_t slot_span_start =
-            SlotSpanMetadata::ToSlotSpanStart(new_slot_span);
-        // Since lazy commit isn't used, we have a guarantee that all slot span
-        // pages have been previously committed, and then decommitted using
-        // PageAccessibilityDisposition::kAllowKeepForPerf, so use the
-        // same option as an optimization.
-        // TODO(lizeb): Handle commit failure.
-        root->RecommitSystemPagesForData(
-            slot_span_start, new_slot_span->bucket->get_bytes_per_span(),
-            PageAccessibilityDisposition::kAllowKeepForPerf,
-            slot_size <= kMaxMemoryTaggingSize);
-      }
-
-      new_slot_span->Reset();
-      *is_already_zeroed = DecommittedMemoryIsAlwaysZeroed();
-    }
-    PA_DCHECK(new_slot_span);
-  } else {
-    // Getting a new slot span is expensive, don't do it.
-    if (flags & AllocFlags::kFastPathOrReturnNull) {
-      return 0;
-    }
-
-    // Third. If we get here, we need a brand new slot span.
-    // TODO(bartekn): For single-slot slot spans, we can use rounded raw_size
-    // as slot_span_committed_size.
-    new_slot_span = AllocNewSlotSpan(root, flags, slot_span_alignment);
-    // New memory from PageAllocator is always zeroed.
-    *is_already_zeroed = true;
-  }
-
-  // Bail if we had a memory allocation failure.
-  if (PA_UNLIKELY(!new_slot_span)) {
-    PA_DCHECK(active_slot_spans_head ==
-              SlotSpanMetadata::get_sentinel_slot_span());
-    if (flags & AllocFlags::kReturnNull) {
-      return 0;
-    }
-    // See comment in PartitionDirectMap() for unlocking.
-    ScopedUnlockGuard unlock{PartitionRootLock(root)};
-    root->OutOfMemory(raw_size);
-    PA_IMMEDIATE_CRASH();  // Not required, kept as documentation.
-  }
-
-  PA_DCHECK(new_bucket != &root->sentinel_bucket);
-  new_bucket->active_slot_spans_head = new_slot_span;
-  if (new_slot_span->CanStoreRawSize()) {
-    new_slot_span->SetRawSize(raw_size);
-  }
-
-  // If we found an active slot span with free slots, or an empty slot span, we
-  // have a usable freelist head.
-  if (PA_LIKELY(new_slot_span->get_freelist_head() != nullptr)) {
-    PartitionFreelistEntry* entry =
-        new_slot_span->PopForAlloc(new_bucket->slot_size);
-
-    // We may have set *is_already_zeroed to true above, make sure that the
-    // freelist entry doesn't contain data. Either way, it wouldn't be a good
-    // idea to let users see our internal data.
-    uintptr_t slot_start = entry->ClearForAllocation();
-    return slot_start;
-  }
-
-  // Otherwise, we need to provision more slots by committing more pages. Build
-  // the free list for the newly provisioned slots.
-  PA_DCHECK(new_slot_span->num_unprovisioned_slots);
-  return ProvisionMoreSlotsAndAllocOne(root, new_slot_span);
-}
-
-uintptr_t PartitionBucket::AllocNewSuperPageSpanForGwpAsan(
-    PartitionRoot* root,
-    size_t super_page_count,
-    unsigned int flags) {
-  return AllocNewSuperPageSpan(root, super_page_count, flags);
-}
-
-void PartitionBucket::InitializeSlotSpanForGwpAsan(
-    SlotSpanMetadata* slot_span) {
-  InitializeSlotSpan(slot_span);
-}
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/partition_bucket.h b/base/allocator/partition_allocator/partition_bucket.h
deleted file mode 100644
index 7dcccba..0000000
--- a/base/allocator/partition_allocator/partition_bucket.h
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
-
-#include <cstddef>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_page_constants.h"
-
-namespace partition_alloc::internal {
-
-constexpr inline int kPartitionNumSystemPagesPerSlotSpanBits = 8;
-
-// Visible for testing.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-uint8_t ComputeSystemPagesPerSlotSpan(size_t slot_size,
-                                      bool prefer_smaller_slot_spans);
-
-// Visible for testing.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool CompareSlotSpans(SlotSpanMetadata* a, SlotSpanMetadata* b);
-
-struct PartitionBucket {
-  // Accessed most in hot path => goes first. Only nullptr for invalid buckets,
-  // may be pointing to the sentinel.
-  SlotSpanMetadata* active_slot_spans_head;
-
-  SlotSpanMetadata* empty_slot_spans_head;
-  SlotSpanMetadata* decommitted_slot_spans_head;
-  uint32_t slot_size;
-  uint32_t num_system_pages_per_slot_span
-      : kPartitionNumSystemPagesPerSlotSpanBits;
-  uint32_t num_full_slot_spans : 24;
-
-  // `slot_size_reciprocal` is used to improve the performance of
-  // `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
-  // chosen to provide the desired accuracy. As a result, we can replace a slow
-  // integer division (or modulo) operation with a pair of multiplication and a
-  // bit shift, i.e. `value / size` becomes `(value * size_reciprocal) >> M`.
-  uint64_t slot_size_reciprocal;
-
-  // This is `M` from the formula above. For accurate results, both `value` and
-  // `size`, which are bound by `kMaxBucketed` for our purposes, must be less
-  // than `2 ** (M / 2)`. On the other hand, the result of the expression
-  // `3 * M / 2` must be less than 64, otherwise integer overflow can occur.
-  static constexpr uint64_t kReciprocalShift = 42;
-  static constexpr uint64_t kReciprocalMask = (1ull << kReciprocalShift) - 1;
-  static_assert(
-      kMaxBucketed < (1 << (kReciprocalShift / 2)),
-      "GetSlotOffset may produce an incorrect result when kMaxBucketed is too "
-      "large.");
-
-  static constexpr size_t kMaxSlotSpansToSort = 200;
-
-  // Public API.
-  PA_COMPONENT_EXPORT(PARTITION_ALLOC) void Init(uint32_t new_slot_size);
-
-  // Sets |is_already_zeroed| to true if the allocation was satisfied by
-  // requesting (a) new page(s) from the operating system, or false otherwise.
-  // This enables an optimization for when callers use
-  // |AllocFlags::kZeroFill|: there is no need to call memset on fresh
-  // pages; the OS has already zeroed them. (See
-  // |PartitionRoot::AllocFromBucket|.)
-  //
-  // Note the matching Free() functions are in SlotSpanMetadata.
-  PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t
-      SlowPathAlloc(PartitionRoot* root,
-                    unsigned int flags,
-                    size_t raw_size,
-                    size_t slot_span_alignment,
-                    bool* is_already_zeroed)
-          PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
-
-  PA_ALWAYS_INLINE bool CanStoreRawSize() const {
-    // For direct-map as well as single-slot slot spans (recognized by checking
-    // against |MaxRegularSlotSpanSize()|), we have some spare metadata space in
-    // subsequent PartitionPage to store the raw size. It isn't only metadata
-    // space though, slot spans that have more than one slot can't have raw size
-    // stored, because we wouldn't know which slot it applies to.
-    if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize())) {
-      return false;
-    }
-
-    PA_DCHECK((slot_size % SystemPageSize()) == 0);
-    PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1);
-
-    return true;
-  }
-
-  // Some buckets are pseudo-buckets, which are disabled because they would
-  // otherwise not fulfill alignment constraints.
-  PA_ALWAYS_INLINE bool is_valid() const {
-    return active_slot_spans_head != nullptr;
-  }
-  PA_ALWAYS_INLINE bool is_direct_mapped() const {
-    return !num_system_pages_per_slot_span;
-  }
-  PA_ALWAYS_INLINE size_t get_bytes_per_span() const {
-    // Cannot overflow, num_system_pages_per_slot_span is a bitfield, and 255
-    // pages fit in a size_t.
-    static_assert(kPartitionNumSystemPagesPerSlotSpanBits <= 8, "");
-    return static_cast<size_t>(num_system_pages_per_slot_span)
-           << SystemPageShift();
-  }
-  PA_ALWAYS_INLINE size_t get_slots_per_span() const {
-    size_t ret = GetSlotNumber(get_bytes_per_span());
-    PA_DCHECK(ret <= kMaxSlotsPerSlotSpan);
-    return ret;
-  }
-  // Returns a natural number of partition pages (calculated by
-  // ComputeSystemPagesPerSlotSpan()) to allocate from the current super page
-  // when the bucket runs out of slots.
-  PA_ALWAYS_INLINE size_t get_pages_per_slot_span() const {
-    // Rounds up to nearest multiple of NumSystemPagesPerPartitionPage().
-    return (num_system_pages_per_slot_span +
-            (NumSystemPagesPerPartitionPage() - 1)) /
-           NumSystemPagesPerPartitionPage();
-  }
-
-  // This helper function scans a bucket's active slot span list for a suitable
-  // new active slot span.  When it finds a suitable new active slot span (one
-  // that has free slots and is not empty), it is set as the new active slot
-  // span. If there is no suitable new active slot span, the current active slot
-  // span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential
-  // slot spans are scanned, they are tidied up according to their state. Empty
-  // slot spans are swept on to the empty list, decommitted slot spans on to the
-  // decommitted list and full slot spans are unlinked from any list.
-  //
-  // This is where the guts of the bucket maintenance is done!
-  bool SetNewActiveSlotSpan();
-
-  // Walks the entire active slot span list, and perform regular maintenance,
-  // where empty, decommitted and full slot spans are moved to their
-  // steady-state place.
-  PA_COMPONENT_EXPORT(PARTITION_ALLOC) void MaintainActiveList();
-
-  // Returns a slot number starting from the beginning of the slot span.
-  PA_ALWAYS_INLINE size_t GetSlotNumber(size_t offset_in_slot_span) const {
-    // See the static assertion for `kReciprocalShift` above.
-    PA_DCHECK(offset_in_slot_span <= kMaxBucketed);
-    PA_DCHECK(slot_size <= kMaxBucketed);
-
-    const size_t offset_in_slot =
-        ((offset_in_slot_span * slot_size_reciprocal) >> kReciprocalShift);
-    PA_DCHECK(offset_in_slot_span / slot_size == offset_in_slot);
-
-    return offset_in_slot;
-  }
-
-  // Sort the freelists of all slot spans.
-  void SortSlotSpanFreelists();
-  // Sort the active slot span list in ascending freelist length.
-  PA_COMPONENT_EXPORT(PARTITION_ALLOC) void SortActiveSlotSpans();
-
-  // We need `AllocNewSuperPageSpan` and `InitializeSlotSpan` to stay
-  // PA_ALWAYS_INLINE for speed, but we also need to use them from a separate
-  // compilation unit.
-  uintptr_t AllocNewSuperPageSpanForGwpAsan(PartitionRoot* root,
-                                            size_t super_page_count,
-                                            unsigned int flags)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
-  void InitializeSlotSpanForGwpAsan(SlotSpanMetadata* slot_span);
-
- private:
-  // Allocates several consecutive super pages. Returns the address of the first
-  // super page.
-  PA_ALWAYS_INLINE uintptr_t AllocNewSuperPageSpan(PartitionRoot* root,
-                                                   size_t super_page_count,
-                                                   unsigned int flags)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
-  // Allocates a new slot span with size |num_partition_pages| from the
-  // current extent. Metadata within this slot span will be initialized.
-  // Returns nullptr on error.
-  PA_ALWAYS_INLINE SlotSpanMetadata* AllocNewSlotSpan(
-      PartitionRoot* root,
-      unsigned int flags,
-      size_t slot_span_alignment)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
-
-  // Allocates a new super page from the current extent, if possible. All
-  // slot-spans will be in the decommitted state. Returns the address of the
-  // super page's payload, or 0 on error.
-  PA_ALWAYS_INLINE uintptr_t AllocNewSuperPage(PartitionRoot* root,
-                                               unsigned int flags)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
-
-  // Each bucket allocates a slot span when it runs out of slots.
-  // A slot span's size is equal to get_pages_per_slot_span() number of
-  // partition pages. This function initializes all PartitionPage within the
-  // span to point to the first PartitionPage which holds all the metadata
-  // for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket
-  // as the owner of the span. It does NOT put the slots into the bucket's
-  // freelist.
-  PA_ALWAYS_INLINE void InitializeSlotSpan(SlotSpanMetadata* slot_span);
-
-  // Initializes a super page. Returns the address of the super page's payload.
-  PA_ALWAYS_INLINE uintptr_t InitializeSuperPage(PartitionRoot* root,
-                                                 uintptr_t super_page,
-                                                 uintptr_t requested_address)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
-  // Commit 1 or more pages in |slot_span|, enough to get the next slot, which
-  // is returned by this function. If more slots fit into the committed pages,
-  // they'll be added to the free list of the slot span (note that next pointers
-  // are stored inside the slots).
-  // The free list must be empty when calling this function.
-  //
-  // If |slot_span| was freshly allocated, it must have been passed through
-  // InitializeSlotSpan() first.
-  PA_ALWAYS_INLINE uintptr_t
-  ProvisionMoreSlotsAndAllocOne(PartitionRoot* root,
-                                SlotSpanMetadata* slot_span)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
-};
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
diff --git a/base/allocator/partition_allocator/partition_bucket_lookup.h b/base/allocator/partition_allocator/partition_bucket_lookup.h
deleted file mode 100644
index ccc9122..0000000
--- a/base/allocator/partition_allocator/partition_bucket_lookup.h
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-
-namespace partition_alloc::internal {
-
-// Don't use an anonymous namespace for the constants because it can inhibit
-// collapsing them together, even when they are tagged as inline.
-
-// Precalculate some shift and mask constants used in the hot path.
-// Example: malloc(41) == 101001 binary.
-// Order is 6 (1 << 6-1) == 32 is highest bit set.
-// order_index is the next three MSB == 010 == 2.
-// sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
-// for the sub_order_index).
-constexpr uint8_t OrderIndexShift(uint8_t order) {
-  if (order < kNumBucketsPerOrderBits + 1) {
-    return 0;
-  }
-
-  return order - (kNumBucketsPerOrderBits + 1);
-}
-
-constexpr size_t OrderSubIndexMask(uint8_t order) {
-  if (order == kBitsPerSizeT) {
-    return static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1);
-  }
-
-  return ((static_cast<size_t>(1) << order) - 1) >>
-         (kNumBucketsPerOrderBits + 1);
-}
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-#define PA_BITS_PER_SIZE_T 64
-static_assert(kBitsPerSizeT == 64, "");
-#else
-#define PA_BITS_PER_SIZE_T 32
-static_assert(kBitsPerSizeT == 32, "");
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-inline constexpr uint8_t kOrderIndexShift[PA_BITS_PER_SIZE_T + 1] = {
-    OrderIndexShift(0),  OrderIndexShift(1),  OrderIndexShift(2),
-    OrderIndexShift(3),  OrderIndexShift(4),  OrderIndexShift(5),
-    OrderIndexShift(6),  OrderIndexShift(7),  OrderIndexShift(8),
-    OrderIndexShift(9),  OrderIndexShift(10), OrderIndexShift(11),
-    OrderIndexShift(12), OrderIndexShift(13), OrderIndexShift(14),
-    OrderIndexShift(15), OrderIndexShift(16), OrderIndexShift(17),
-    OrderIndexShift(18), OrderIndexShift(19), OrderIndexShift(20),
-    OrderIndexShift(21), OrderIndexShift(22), OrderIndexShift(23),
-    OrderIndexShift(24), OrderIndexShift(25), OrderIndexShift(26),
-    OrderIndexShift(27), OrderIndexShift(28), OrderIndexShift(29),
-    OrderIndexShift(30), OrderIndexShift(31), OrderIndexShift(32),
-#if PA_BITS_PER_SIZE_T == 64
-    OrderIndexShift(33), OrderIndexShift(34), OrderIndexShift(35),
-    OrderIndexShift(36), OrderIndexShift(37), OrderIndexShift(38),
-    OrderIndexShift(39), OrderIndexShift(40), OrderIndexShift(41),
-    OrderIndexShift(42), OrderIndexShift(43), OrderIndexShift(44),
-    OrderIndexShift(45), OrderIndexShift(46), OrderIndexShift(47),
-    OrderIndexShift(48), OrderIndexShift(49), OrderIndexShift(50),
-    OrderIndexShift(51), OrderIndexShift(52), OrderIndexShift(53),
-    OrderIndexShift(54), OrderIndexShift(55), OrderIndexShift(56),
-    OrderIndexShift(57), OrderIndexShift(58), OrderIndexShift(59),
-    OrderIndexShift(60), OrderIndexShift(61), OrderIndexShift(62),
-    OrderIndexShift(63), OrderIndexShift(64)
-#endif
-};
-
-inline constexpr size_t kOrderSubIndexMask[PA_BITS_PER_SIZE_T + 1] = {
-    OrderSubIndexMask(0),  OrderSubIndexMask(1),  OrderSubIndexMask(2),
-    OrderSubIndexMask(3),  OrderSubIndexMask(4),  OrderSubIndexMask(5),
-    OrderSubIndexMask(6),  OrderSubIndexMask(7),  OrderSubIndexMask(8),
-    OrderSubIndexMask(9),  OrderSubIndexMask(10), OrderSubIndexMask(11),
-    OrderSubIndexMask(12), OrderSubIndexMask(13), OrderSubIndexMask(14),
-    OrderSubIndexMask(15), OrderSubIndexMask(16), OrderSubIndexMask(17),
-    OrderSubIndexMask(18), OrderSubIndexMask(19), OrderSubIndexMask(20),
-    OrderSubIndexMask(21), OrderSubIndexMask(22), OrderSubIndexMask(23),
-    OrderSubIndexMask(24), OrderSubIndexMask(25), OrderSubIndexMask(26),
-    OrderSubIndexMask(27), OrderSubIndexMask(28), OrderSubIndexMask(29),
-    OrderSubIndexMask(30), OrderSubIndexMask(31), OrderSubIndexMask(32),
-#if PA_BITS_PER_SIZE_T == 64
-    OrderSubIndexMask(33), OrderSubIndexMask(34), OrderSubIndexMask(35),
-    OrderSubIndexMask(36), OrderSubIndexMask(37), OrderSubIndexMask(38),
-    OrderSubIndexMask(39), OrderSubIndexMask(40), OrderSubIndexMask(41),
-    OrderSubIndexMask(42), OrderSubIndexMask(43), OrderSubIndexMask(44),
-    OrderSubIndexMask(45), OrderSubIndexMask(46), OrderSubIndexMask(47),
-    OrderSubIndexMask(48), OrderSubIndexMask(49), OrderSubIndexMask(50),
-    OrderSubIndexMask(51), OrderSubIndexMask(52), OrderSubIndexMask(53),
-    OrderSubIndexMask(54), OrderSubIndexMask(55), OrderSubIndexMask(56),
-    OrderSubIndexMask(57), OrderSubIndexMask(58), OrderSubIndexMask(59),
-    OrderSubIndexMask(60), OrderSubIndexMask(61), OrderSubIndexMask(62),
-    OrderSubIndexMask(63), OrderSubIndexMask(64)
-#endif
-};
-
-// The class used to generate the bucket lookup table at compile-time.
-class BucketIndexLookup final {
- public:
-  PA_ALWAYS_INLINE static constexpr uint16_t GetIndexForNeutralBuckets(
-      size_t size);
-  PA_ALWAYS_INLINE static constexpr uint16_t GetIndexForDenserBuckets(
-      size_t size);
-  PA_ALWAYS_INLINE static constexpr uint16_t GetIndex(size_t size);
-
-  constexpr BucketIndexLookup() {
-    constexpr uint16_t sentinel_bucket_index = kNumBuckets;
-
-    InitBucketSizes();
-
-    uint16_t* bucket_index_ptr = &bucket_index_lookup_[0];
-    uint16_t bucket_index = 0;
-
-    // Very small allocations, smaller than the first bucketed order ->
-    // everything goes to the first bucket.
-    for (uint8_t order = 0; order < kMinBucketedOrder; ++order) {
-      for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
-        *bucket_index_ptr++ = 0;
-      }
-    }
-
-    // Normal buckets.
-    for (uint8_t order = kMinBucketedOrder; order <= kMaxBucketedOrder;
-         ++order) {
-      size_t size = static_cast<size_t>(1) << (order - 1);
-      size_t current_increment = size >> kNumBucketsPerOrderBits;
-      for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
-        *bucket_index_ptr++ = bucket_index;
-
-        // For small sizes, buckets are close together (current_increment is
-        // small). For instance, for:
-        // - kAlignment == 16 (which is the case on most 64 bit systems)
-        // - kNumBucketsPerOrder == 4
-        //
-        // The 3 next buckets after 16 are {20, 24, 28}. None of these are a
-        // multiple of kAlignment, so they use the next bucket, that is 32 here.
-        if (size % kAlignment != 0) {
-          PA_DCHECK(bucket_sizes_[bucket_index] > size);
-          // Do not increment bucket_index, since in the example above
-          // current_size may be 20, and bucket_sizes_[bucket_index] == 32.
-        } else {
-          PA_DCHECK(bucket_sizes_[bucket_index] == size);
-          bucket_index++;
-        }
-
-        size += current_increment;
-      }
-    }
-
-    // Direct-mapped, and overflow.
-    for (uint8_t order = kMaxBucketedOrder + 1; order <= kBitsPerSizeT;
-         ++order) {
-      for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
-        *bucket_index_ptr++ = sentinel_bucket_index;
-      }
-    }
-
-    // Smaller because some buckets are not valid due to alignment constraints.
-    PA_DCHECK(bucket_index < kNumBuckets);
-    PA_DCHECK(bucket_index_ptr == bucket_index_lookup_ + ((kBitsPerSizeT + 1) *
-                                                          kNumBucketsPerOrder));
-    // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
-    // which tries to overflow to a non-existent order.
-    *bucket_index_ptr = sentinel_bucket_index;
-  }
-  constexpr const size_t* bucket_sizes() const { return &bucket_sizes_[0]; }
-
- private:
-  constexpr void InitBucketSizes() {
-    size_t current_size = kSmallestBucket;
-    size_t current_increment = kSmallestBucket >> kNumBucketsPerOrderBits;
-    size_t* bucket_size = &bucket_sizes_[0];
-    for (size_t i = 0; i < kNumBucketedOrders; ++i) {
-      for (size_t j = 0; j < kNumBucketsPerOrder; ++j) {
-        // All bucket sizes have to be multiples of kAlignment, skip otherwise.
-        if (current_size % kAlignment == 0) {
-          *bucket_size = current_size;
-          ++bucket_size;
-        }
-        current_size += current_increment;
-      }
-      current_increment <<= 1;
-    }
-
-    // The remaining buckets are invalid.
-    while (bucket_size < bucket_sizes_ + kNumBuckets) {
-      *(bucket_size++) = kInvalidBucketSize;
-    }
-  }
-
-  size_t bucket_sizes_[kNumBuckets]{};
-  // The bucket lookup table lets us map a size_t to a bucket quickly.
-  // The trailing +1 caters for the overflow case for very large allocation
-  // sizes.  It is one flat array instead of a 2D array because in the 2D
-  // world, we'd need to index array[blah][max+1] which risks undefined
-  // behavior.
-  uint16_t
-      bucket_index_lookup_[((kBitsPerSizeT + 1) * kNumBucketsPerOrder) + 1]{};
-};
-
-PA_ALWAYS_INLINE constexpr size_t RoundUpToPowerOfTwo(size_t size) {
-  const size_t n = 1 << base::bits::Log2Ceiling(static_cast<uint32_t>(size));
-  PA_DCHECK(size <= n);
-  return n;
-}
-
-PA_ALWAYS_INLINE constexpr size_t RoundUpSize(size_t size) {
-  const size_t next_power = RoundUpToPowerOfTwo(size);
-  const size_t prev_power = next_power >> 1;
-  PA_DCHECK(size <= next_power);
-  PA_DCHECK(prev_power < size);
-  if (size <= prev_power * 5 / 4) {
-    return prev_power * 5 / 4;
-  } else {
-    return next_power;
-  }
-}
-
-PA_ALWAYS_INLINE constexpr uint16_t RoundUpToOdd(uint16_t size) {
-  return (size % 2 == 0) + size;
-}
-
-// static
-PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndexForDenserBuckets(
-    size_t size) {
-  // This forces the bucket table to be constant-initialized and immediately
-  // materialized in the binary.
-  constexpr BucketIndexLookup lookup{};
-  const size_t order =
-      kBitsPerSizeT -
-      static_cast<size_t>(base::bits::CountLeadingZeroBits(size));
-  // The order index is simply the next few bits after the most significant
-  // bit.
-  const size_t order_index =
-      (size >> kOrderIndexShift[order]) & (kNumBucketsPerOrder - 1);
-  // And if the remaining bits are non-zero we must bump the bucket up.
-  const size_t sub_order_index = size & kOrderSubIndexMask[order];
-  const uint16_t index =
-      lookup.bucket_index_lookup_[(order << kNumBucketsPerOrderBits) +
-                                  order_index + !!sub_order_index];
-  PA_DCHECK(index <= kNumBuckets);  // Last one is the sentinel bucket.
-  return index;
-}
-
-// static
-PA_ALWAYS_INLINE constexpr uint16_t
-BucketIndexLookup::GetIndexForNeutralBuckets(size_t size) {
-  const auto index = GetIndexForDenserBuckets(size);
-  // Below the minimum size, 4 and 8 bucket distributions are the same, since we
-  // can't fit any more buckets per order; this is due to alignment
-  // requirements: each bucket must be a multiple of the alignment, which
-  // implies the difference between buckets must also be a multiple of the
-  // alignment. In smaller orders, this limits the number of buckets we can
-  // have per order. So, for these small order, we do not want to skip every
-  // second bucket.
-  //
-  // We also do not want to go about the index for the max bucketed size.
-  if (size > kAlignment * kNumBucketsPerOrder &&
-      index < GetIndexForDenserBuckets(kMaxBucketed)) {
-    return RoundUpToOdd(index);
-  } else {
-    return index;
-  }
-}
-
-// static
-PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndex(size_t size) {
-  // For any order 2^N, under the denser bucket distribution ("Distribution A"),
-  // we have 4 evenly distributed buckets: 2^N, 1.25*2^N, 1.5*2^N, and 1.75*2^N.
-  // These numbers represent the maximum size of an allocation that can go into
-  // a given bucket.
-  //
-  // Under the less dense bucket distribution ("Distribution B"), we only have
-  // 2 buckets for the same order 2^N: 2^N and 1.25*2^N.
-  //
-  // Everything that would be mapped to the last two buckets of an order under
-  // Distribution A is instead mapped to the first bucket of the next order
-  // under Distribution B. The following diagram shows roughly what this looks
-  // like for the order starting from 2^10, as an example.
-  //
-  // A: ... | 2^10 | 1.25*2^10 | 1.5*2^10 | 1.75*2^10 | 2^11 | ...
-  // B: ... | 2^10 | 1.25*2^10 | -------- | --------- | 2^11 | ...
-  //
-  // So, an allocation of size 1.4*2^10 would go into the 1.5*2^10 bucket under
-  // Distribution A, but to the 2^11 bucket under Distribution B.
-  if (1 << 8 < size && size < kHighThresholdForAlternateDistribution) {
-    return BucketIndexLookup::GetIndexForNeutralBuckets(RoundUpSize(size));
-  }
-  return BucketIndexLookup::GetIndexForNeutralBuckets(size);
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_
diff --git a/base/allocator/partition_allocator/partition_cookie.h b/base/allocator/partition_allocator/partition_cookie.h
deleted file mode 100644
index 7c6b4a2..0000000
--- a/base/allocator/partition_allocator/partition_cookie.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-
-namespace partition_alloc::internal {
-
-static constexpr size_t kCookieSize = 16;
-
-// Cookie is enabled for debug builds.
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-
-inline constexpr unsigned char kCookieValue[kCookieSize] = {
-    0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
-    0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
-
-constexpr size_t kPartitionCookieSizeAdjustment = kCookieSize;
-
-PA_ALWAYS_INLINE void PartitionCookieCheckValue(unsigned char* cookie_ptr) {
-  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) {
-    PA_DCHECK(*cookie_ptr == kCookieValue[i]);
-  }
-}
-
-PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) {
-  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) {
-    *cookie_ptr = kCookieValue[i];
-  }
-}
-
-#else
-
-constexpr size_t kPartitionCookieSizeAdjustment = 0;
-
-PA_ALWAYS_INLINE void PartitionCookieCheckValue(unsigned char* address) {}
-
-PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) {}
-
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
diff --git a/base/allocator/partition_allocator/partition_dcheck_helper.cc b/base/allocator/partition_allocator/partition_dcheck_helper.cc
deleted file mode 100644
index dbd3492..0000000
--- a/base/allocator/partition_allocator/partition_dcheck_helper.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_dcheck_helper.h"
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_bucket.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-
-namespace partition_alloc::internal {
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-
-void DCheckIsValidSlotSpan(internal::SlotSpanMetadata* slot_span) {
-  PartitionRoot* root = PartitionRoot::FromSlotSpan(slot_span);
-  PA_DCHECK(root->inverted_self == ~reinterpret_cast<uintptr_t>(root));
-}
-
-void DCheckIsValidShiftFromSlotStart(internal::SlotSpanMetadata* slot_span,
-                                     uintptr_t shift_from_slot_start) {
-  PartitionRoot* root = PartitionRoot::FromSlotSpan(slot_span);
-  PA_DCHECK(shift_from_slot_start >= root->settings.extras_offset);
-  // Use <= to allow an address immediately past the object.
-  PA_DCHECK(shift_from_slot_start <=
-            root->settings.extras_offset + root->GetSlotUsableSize(slot_span));
-}
-
-void DCheckIsWithInSuperPagePayload(uintptr_t address) {
-  uintptr_t super_page = address & kSuperPageBaseMask;
-  auto* extent = PartitionSuperPageToExtent(super_page);
-  PA_DCHECK(IsWithinSuperPagePayload(address,
-                                     IsManagedByNormalBuckets(address) &&
-                                         extent->root->IsQuarantineAllowed()));
-}
-
-void DCheckIsValidObjectAddress(internal::SlotSpanMetadata* slot_span,
-                                uintptr_t object_addr) {
-  uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
-  auto* root = PartitionRoot::FromSlotSpan(slot_span);
-  PA_DCHECK((object_addr - slot_span_start) % slot_span->bucket->slot_size ==
-            root->settings.extras_offset);
-}
-
-void DCheckNumberOfPartitionPagesInSuperPagePayload(
-    const PartitionSuperPageExtentEntry* entry,
-    const PartitionRoot* root,
-    size_t number_of_nonempty_slot_spans) {
-  uintptr_t super_page = base::bits::AlignDown(
-      reinterpret_cast<uintptr_t>(entry), kSuperPageAlignment);
-  size_t number_of_partition_pages_in_superpage_payload =
-      SuperPagePayloadSize(super_page, root->IsQuarantineAllowed()) /
-      PartitionPageSize();
-  PA_DCHECK(number_of_partition_pages_in_superpage_payload >
-            number_of_nonempty_slot_spans);
-}
-
-void DCheckRootLockIsAcquired(PartitionRoot* root) {
-  PartitionRootLock(root).AssertAcquired();
-}
-
-void DCheckRootLockOfSlotSpanIsAcquired(internal::SlotSpanMetadata* slot_span) {
-  DCheckRootLockIsAcquired(PartitionRoot::FromSlotSpan(slot_span));
-}
-
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/partition_dcheck_helper.h b/base/allocator/partition_allocator/partition_dcheck_helper.h
deleted file mode 100644
index 846dbe7..0000000
--- a/base/allocator/partition_allocator/partition_dcheck_helper.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DCHECK_HELPER_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DCHECK_HELPER_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-
-namespace partition_alloc::internal {
-
-struct PartitionSuperPageExtentEntry;
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-
-// To allow these asserts to have empty bodies in no-DCHECK() builds, while
-// avoiding issues with circular includes.
-#define PA_EMPTY_BODY_IF_DCHECK_IS_OFF()
-// Export symbol if dcheck-is-on. Because the body is not empty.
-#define PA_EXPORT_IF_DCHECK_IS_ON() PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-
-#else  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-// The static_assert() eats follow-on semicolons.
-#define PA_EMPTY_BODY_IF_DCHECK_IS_OFF() \
-  {}                                     \
-  static_assert(true)
-// inline if dcheck-is-off so it's no overhead.
-#define PA_EXPORT_IF_DCHECK_IS_ON() PA_ALWAYS_INLINE
-
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-PA_EXPORT_IF_DCHECK_IS_ON()
-void DCheckIsValidSlotSpan(internal::SlotSpanMetadata* slot_span)
-    PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
-
-PA_EXPORT_IF_DCHECK_IS_ON()
-void DCheckIsWithInSuperPagePayload(uintptr_t address)
-    PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
-
-PA_EXPORT_IF_DCHECK_IS_ON()
-void DCheckNumberOfPartitionPagesInSuperPagePayload(
-    const PartitionSuperPageExtentEntry* entry,
-    const PartitionRoot* root,
-    size_t number_of_nonempty_slot_spans) PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
-
-PA_EXPORT_IF_DCHECK_IS_ON()
-void DCheckIsValidShiftFromSlotStart(internal::SlotSpanMetadata* slot_span,
-                                     size_t shift_from_slot_start)
-    PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
-
-// Checks that the object is exactly |extras_offset| away from a multiple of
-// slot size (i.e. from a slot start).
-PA_EXPORT_IF_DCHECK_IS_ON()
-void DCheckIsValidObjectAddress(internal::SlotSpanMetadata* slot_span,
-                                uintptr_t object_addr)
-    PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
-
-PA_EXPORT_IF_DCHECK_IS_ON()
-void DCheckRootLockIsAcquired(PartitionRoot* root)
-    PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DCHECK_HELPER_H_
diff --git a/base/allocator/partition_allocator/partition_direct_map_extent.h b/base/allocator/partition_allocator/partition_direct_map_extent.h
deleted file mode 100644
index 302dac8..0000000
--- a/base/allocator/partition_allocator/partition_direct_map_extent.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_bucket.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-
-namespace partition_alloc::internal {
-
-struct PartitionDirectMapExtent {
-  PartitionDirectMapExtent* next_extent;
-  PartitionDirectMapExtent* prev_extent;
-  PartitionBucket* bucket;
-  // Size of the entire reservation, including guard pages, meta-data,
-  // padding for alignment before allocation, and padding for granularity at the
-  // end of the allocation.
-  size_t reservation_size;
-  // Padding between the first partition page (guard pages + meta-data) and
-  // the allocation.
-  size_t padding_for_alignment;
-
-  PA_ALWAYS_INLINE static PartitionDirectMapExtent* FromSlotSpan(
-      SlotSpanMetadata* slot_span);
-};
-
-// Metadata page for direct-mapped allocations.
-struct PartitionDirectMapMetadata {
-  // |page| and |subsequent_page| are needed to match the layout of normal
-  // buckets (specifically, of single-slot slot spans), with the caveat that
-  // only the first subsequent page is needed (for SubsequentPageMetadata) and
-  // others aren't used for direct map.
-  PartitionPage page;
-  PartitionPage subsequent_page;
-  // The following fields are metadata specific to direct map allocations. All
-  // these fields will easily fit into the precalculated metadata region,
-  // because a direct map allocation starts no further than half way through the
-  // super page.
-  PartitionBucket bucket;
-  PartitionDirectMapExtent direct_map_extent;
-
-  PA_ALWAYS_INLINE static PartitionDirectMapMetadata* FromSlotSpan(
-      SlotSpanMetadata* slot_span);
-};
-
-PA_ALWAYS_INLINE PartitionDirectMapMetadata*
-PartitionDirectMapMetadata::FromSlotSpan(SlotSpanMetadata* slot_span) {
-  PA_DCHECK(slot_span->bucket->is_direct_mapped());
-  // |*slot_span| is the first field of |PartitionDirectMapMetadata|, just cast.
-  auto* metadata = reinterpret_cast<PartitionDirectMapMetadata*>(slot_span);
-  PA_DCHECK(&metadata->page.slot_span_metadata == slot_span);
-  return metadata;
-}
-
-PA_ALWAYS_INLINE PartitionDirectMapExtent*
-PartitionDirectMapExtent::FromSlotSpan(SlotSpanMetadata* slot_span) {
-  PA_DCHECK(slot_span->bucket->is_direct_mapped());
-  return &PartitionDirectMapMetadata::FromSlotSpan(slot_span)
-              ->direct_map_extent;
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
diff --git a/base/allocator/partition_allocator/partition_freelist_entry.cc b/base/allocator/partition_allocator/partition_freelist_entry.cc
deleted file mode 100644
index 8c48687..0000000
--- a/base/allocator/partition_allocator/partition_freelist_entry.cc
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_freelist_entry.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-
-namespace partition_alloc::internal {
-
-void FreelistCorruptionDetected(size_t slot_size) {
-  // Make it visible in minidumps.
-  PA_DEBUG_DATA_ON_STACK("slotsize", slot_size);
-  PA_IMMEDIATE_CRASH();
-}
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/partition_freelist_entry.h b/base/allocator/partition_allocator/partition_freelist_entry.h
deleted file mode 100644
index 82a0f65..0000000
--- a/base/allocator/partition_allocator/partition_freelist_entry.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Indirection header that allows callers to use
-// `PartitionFreelistEntry` without regard for the implementation.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
-
-#include <cstddef>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-
-namespace partition_alloc::internal {
-
-[[noreturn]] PA_NOINLINE PA_COMPONENT_EXPORT(
-    PARTITION_ALLOC) void FreelistCorruptionDetected(size_t slot_size);
-
-}  // namespace partition_alloc::internal
-
-#if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
-// New header goes here
-#else
-#include "base/allocator/partition_allocator/encoded_freelist.h"  // IWYU pragma: export
-#endif  // BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
diff --git a/base/allocator/partition_allocator/partition_lock.h b/base/allocator/partition_allocator/partition_lock.h
deleted file mode 100644
index f549c73..0000000
--- a/base/allocator/partition_allocator/partition_lock.h
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
-
-#include <atomic>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/spinning_mutex.h"
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal {
-
-class PA_LOCKABLE Lock {
- public:
-  inline constexpr Lock();
-  void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION() {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-    LiftThreadIsolationScope lift_thread_isolation_restrictions;
-#endif
-
-    // When PartitionAlloc is malloc(), it can easily become reentrant. For
-    // instance, a DCHECK() triggers in external code (such as
-    // base::Lock). DCHECK() error message formatting allocates, which triggers
-    // PartitionAlloc, and then we get reentrancy, and in this case infinite
-    // recursion.
-    //
-    // To avoid that, crash quickly when the code becomes reentrant.
-    base::PlatformThreadRef current_thread = base::PlatformThread::CurrentRef();
-    if (!lock_.Try()) {
-      // The lock wasn't free when we tried to acquire it. This can be because
-      // another thread or *this* thread was holding it.
-      //
-      // If it's this thread holding it, then it cannot have become free in the
-      // meantime, and the current value of |owning_thread_ref_| is valid, as it
-      // was set by this thread. Assuming that writes to |owning_thread_ref_|
-      // are atomic, then if it's us, we are trying to recursively acquire a
-      // non-recursive lock.
-      //
-      // Note that we don't rely on a DCHECK() in base::Lock(), as it would
-      // itself allocate. Meaning that without this code, a reentrancy issue
-      // hangs on Linux.
-      if (PA_UNLIKELY(owning_thread_ref_.load(std::memory_order_acquire) ==
-                      current_thread)) {
-        // Trying to acquire lock while it's held by this thread: reentrancy
-        // issue.
-        PA_IMMEDIATE_CRASH();
-      }
-      lock_.Acquire();
-    }
-    owning_thread_ref_.store(current_thread, std::memory_order_release);
-#else
-    lock_.Acquire();
-#endif
-  }
-
-  void Release() PA_UNLOCK_FUNCTION() {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-    LiftThreadIsolationScope lift_thread_isolation_restrictions;
-#endif
-    owning_thread_ref_.store(base::PlatformThreadRef(),
-                             std::memory_order_release);
-#endif
-    lock_.Release();
-  }
-  void AssertAcquired() const PA_ASSERT_EXCLUSIVE_LOCK() {
-    lock_.AssertAcquired();
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-    LiftThreadIsolationScope lift_thread_isolation_restrictions;
-#endif
-    PA_DCHECK(owning_thread_ref_.load(std ::memory_order_acquire) ==
-              base::PlatformThread::CurrentRef());
-#endif
-  }
-
-  void Reinit() PA_UNLOCK_FUNCTION() {
-    lock_.AssertAcquired();
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    owning_thread_ref_.store(base::PlatformThreadRef(),
-                             std::memory_order_release);
-#endif
-    lock_.Reinit();
-  }
-
- private:
-  SpinningMutex lock_;
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // Should in theory be protected by |lock_|, but we need to read it to detect
-  // recursive lock acquisition (and thus, the allocator becoming reentrant).
-  std::atomic<base::PlatformThreadRef> owning_thread_ref_ =
-      base::PlatformThreadRef();
-#endif
-};
-
-class PA_SCOPED_LOCKABLE ScopedGuard {
- public:
-  explicit ScopedGuard(Lock& lock) PA_EXCLUSIVE_LOCK_FUNCTION(lock)
-      : lock_(lock) {
-    lock_.Acquire();
-  }
-  ~ScopedGuard() PA_UNLOCK_FUNCTION() { lock_.Release(); }
-
- private:
-  Lock& lock_;
-};
-
-class PA_SCOPED_LOCKABLE ScopedUnlockGuard {
- public:
-  explicit ScopedUnlockGuard(Lock& lock) PA_UNLOCK_FUNCTION(lock)
-      : lock_(lock) {
-    lock_.Release();
-  }
-  ~ScopedUnlockGuard() PA_EXCLUSIVE_LOCK_FUNCTION() { lock_.Acquire(); }
-
- private:
-  Lock& lock_;
-};
-
-constexpr Lock::Lock() = default;
-
-// We want PartitionRoot to not have a global destructor, so this should not
-// have one.
-static_assert(std::is_trivially_destructible<Lock>::value, "");
-
-}  // namespace partition_alloc::internal
-
-namespace base {
-namespace internal {
-
-using PartitionLock = ::partition_alloc::internal::Lock;
-using PartitionAutoLock = ::partition_alloc::internal::ScopedGuard;
-
-}  // namespace internal
-}  // namespace base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
diff --git a/base/allocator/partition_allocator/partition_lock_perftest.cc b/base/allocator/partition_allocator/partition_lock_perftest.cc
deleted file mode 100644
index ec57f4f..0000000
--- a/base/allocator/partition_allocator/partition_lock_perftest.cc
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_lock.h"
-
-#include <vector>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "base/timer/lap_timer.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/perf/perf_result_reporter.h"
-
-namespace partition_alloc::internal {
-
-namespace {
-
-constexpr int kWarmupRuns = 1;
-constexpr ::base::TimeDelta kTimeLimit = ::base::Seconds(1);
-constexpr int kTimeCheckInterval = 100000;
-
-constexpr char kMetricPrefixLock[] = "PartitionLock.";
-constexpr char kMetricLockUnlockThroughput[] = "lock_unlock_throughput";
-constexpr char kMetricLockUnlockLatency[] = "lock_unlock_latency_ns";
-constexpr char kStoryBaseline[] = "baseline_story";
-constexpr char kStoryWithCompetingThread[] = "with_competing_thread";
-
-perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
-  perf_test::PerfResultReporter reporter(kMetricPrefixLock, story_name);
-  reporter.RegisterImportantMetric(kMetricLockUnlockThroughput, "runs/s");
-  reporter.RegisterImportantMetric(kMetricLockUnlockLatency, "ns");
-  return reporter;
-}
-
-class Spin : public base::PlatformThreadForTesting::Delegate {
- public:
-  Spin(Lock* lock, uint32_t* data)
-      : lock_(lock), data_(data), should_stop_(false) {}
-  ~Spin() override = default;
-
-  void ThreadMain() override {
-    started_count_++;
-    // Local variable to avoid "cache line ping-pong" from influencing the
-    // results.
-    uint32_t count = 0;
-    while (!should_stop_.load(std::memory_order_relaxed)) {
-      lock_->Acquire();
-      count++;
-      lock_->Release();
-    }
-
-    lock_->Acquire();
-    (*data_) += count;
-    lock_->Release();
-  }
-
-  // Called from another thread to stop the loop.
-  void Stop() { should_stop_ = true; }
-  int started_count() const { return started_count_; }
-
- private:
-  Lock* lock_;
-  uint32_t* data_ GUARDED_BY(lock_);
-  std::atomic<bool> should_stop_;
-  std::atomic<int> started_count_{0};
-};
-
-}  // namespace
-
-TEST(PartitionLockPerfTest, Simple) {
-  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
-  [[maybe_unused]] uint32_t data = 0;
-
-  Lock lock;
-
-  do {
-    lock.Acquire();
-    data += 1;
-    lock.Release();
-    timer.NextLap();
-  } while (!timer.HasTimeLimitExpired());
-
-  auto reporter = SetUpReporter(kStoryBaseline);
-  reporter.AddResult(kMetricLockUnlockThroughput, timer.LapsPerSecond());
-  reporter.AddResult(kMetricLockUnlockLatency, 1e9 / timer.LapsPerSecond());
-}
-
-TEST(PartitionLockPerfTest, WithCompetingThreads) {
-  uint32_t data = 0;
-
-  Lock lock;
-
-  // Starts a competing thread executing the same loop as this thread.
-  Spin thread_main(&lock, &data);
-  std::vector<base::PlatformThreadHandle> thread_handles;
-  constexpr int kThreads = 4;
-
-  for (int i = 0; i < kThreads; i++) {
-    base::PlatformThreadHandle thread_handle;
-    ASSERT_TRUE(base::PlatformThreadForTesting::Create(0, &thread_main,
-                                                       &thread_handle));
-    thread_handles.push_back(thread_handle);
-  }
-  // Wait for all the threads to start.
-  while (thread_main.started_count() != kThreads) {
-  }
-
-  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
-  do {
-    lock.Acquire();
-    data += 1;
-    lock.Release();
-    timer.NextLap();
-  } while (!timer.HasTimeLimitExpired());
-
-  thread_main.Stop();
-  for (int i = 0; i < kThreads; i++) {
-    base::PlatformThreadForTesting::Join(thread_handles[i]);
-  }
-
-  auto reporter = SetUpReporter(kStoryWithCompetingThread);
-  reporter.AddResult(kMetricLockUnlockThroughput, timer.LapsPerSecond());
-  reporter.AddResult(kMetricLockUnlockLatency, 1e9 / timer.LapsPerSecond());
-}
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/partition_lock_unittest.cc b/base/allocator/partition_allocator/partition_lock_unittest.cc
deleted file mode 100644
index 626bc4b..0000000
--- a/base/allocator/partition_allocator/partition_lock_unittest.cc
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_lock.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal {
-
-TEST(PartitionAllocLockTest, Simple) {
-  Lock lock;
-  lock.Acquire();
-  lock.Release();
-}
-
-namespace {
-
-Lock g_lock;
-
-}  // namespace
-
-TEST(PartitionAllocLockTest, StaticLockStartsUnlocked) {
-  g_lock.Acquire();
-  g_lock.Release();
-}
-
-namespace {
-
-class ThreadDelegateForContended
-    : public base::PlatformThreadForTesting::Delegate {
- public:
-  explicit ThreadDelegateForContended(Lock& start_lock,
-                                      Lock& lock,
-                                      int iterations,
-                                      int& counter)
-      : start_lock_(start_lock),
-        lock_(lock),
-        iterations_(iterations),
-        counter_(counter) {}
-
-  void ThreadMain() override {
-    start_lock_.Acquire();
-    start_lock_.Release();
-
-    for (int i = 0; i < iterations_; i++) {
-      lock_.Acquire();
-      ++counter_;
-      lock_.Release();
-    }
-  }
-
- private:
-  Lock& start_lock_;
-  Lock& lock_;
-  const int iterations_;
-  int& counter_;
-};
-
-}  // namespace
-
-TEST(PartitionAllocLockTest, Contended) {
-  int counter = 0;  // *Not* atomic.
-  std::vector<internal::base::PlatformThreadHandle> thread_handles;
-  constexpr int iterations_per_thread = 1000000;
-  constexpr int num_threads = 4;
-
-  Lock lock;
-  Lock start_lock;
-
-  ThreadDelegateForContended delegate(start_lock, lock, iterations_per_thread,
-                                      counter);
-
-  start_lock.Acquire();  // Make sure that the threads compete, by waiting until
-                         // all of them have at least been created.
-  for (int i = 0; i < num_threads; ++i) {
-    base::PlatformThreadHandle handle;
-    base::PlatformThreadForTesting::Create(0, &delegate, &handle);
-    thread_handles.push_back(handle);
-  }
-
-  start_lock.Release();
-
-  for (int i = 0; i < num_threads; ++i) {
-    base::PlatformThreadForTesting::Join(thread_handles[i]);
-  }
-  EXPECT_EQ(iterations_per_thread * num_threads, counter);
-}
-
-namespace {
-
-class ThreadDelegateForSlowThreads
-    : public base::PlatformThreadForTesting::Delegate {
- public:
-  explicit ThreadDelegateForSlowThreads(Lock& start_lock,
-                                        Lock& lock,
-                                        int iterations,
-                                        int& counter)
-      : start_lock_(start_lock),
-        lock_(lock),
-        iterations_(iterations),
-        counter_(counter) {}
-
-  void ThreadMain() override {
-    start_lock_.Acquire();
-    start_lock_.Release();
-
-    for (int i = 0; i < iterations_; i++) {
-      lock_.Acquire();
-      ++counter_;
-      // Hold the lock for a while, to force futex()-based locks to sleep.
-      base::PlatformThread::Sleep(base::Milliseconds(1));
-      lock_.Release();
-    }
-  }
-
- private:
-  Lock& start_lock_;
-  Lock& lock_;
-  const int iterations_;
-  int& counter_;
-};
-
-}  // namespace
-
-TEST(PartitionAllocLockTest, SlowThreads) {
-  int counter = 0;  // *Not* atomic.
-  std::vector<base::PlatformThreadHandle> thread_handles;
-  constexpr int iterations_per_thread = 100;
-  constexpr int num_threads = 4;
-
-  Lock lock;
-  Lock start_lock;
-
-  ThreadDelegateForSlowThreads delegate(start_lock, lock, iterations_per_thread,
-                                        counter);
-
-  start_lock.Acquire();  // Make sure that the threads compete, by waiting until
-                         // all of them have at least been created.
-  for (int i = 0; i < num_threads; i++) {
-    base::PlatformThreadHandle handle;
-    base::PlatformThreadForTesting::Create(0, &delegate, &handle);
-    thread_handles.push_back(handle);
-  }
-
-  start_lock.Release();
-
-  for (int i = 0; i < num_threads; i++) {
-    base::PlatformThreadForTesting::Join(thread_handles[i]);
-  }
-  EXPECT_EQ(iterations_per_thread * num_threads, counter);
-}
-
-TEST(PartitionAllocLockTest, AssertAcquired) {
-  Lock lock;
-  lock.Acquire();
-  lock.AssertAcquired();
-  lock.Release();
-}
-
-// AssertAcquired() is only enforced with DCHECK()s.
-#if defined(GTEST_HAS_DEATH_TEST) && BUILDFLAG(PA_DCHECK_IS_ON)
-
-TEST(PartitionAllocLockTest, AssertAcquiredDeathTest) {
-  Lock lock;
-  EXPECT_DEATH(lock.AssertAcquired(), "");
-}
-
-namespace {
-
-class ThreadDelegateForAssertAcquiredAnotherThreadHoldsTheLock
-    : public base::PlatformThreadForTesting::Delegate {
- public:
-  explicit ThreadDelegateForAssertAcquiredAnotherThreadHoldsTheLock(Lock& lock)
-      : lock_(lock) {}
-
-  void ThreadMain() PA_NO_THREAD_SAFETY_ANALYSIS override { lock_.Acquire(); }
-
- private:
-  Lock& lock_;
-};
-
-}  // namespace
-
-TEST(PartitionAllocLockTest, AssertAcquiredAnotherThreadHoldsTheLock) {
-  Lock lock;
-  // PA_NO_THREAD_SAFETY_ANALYSIS: The checker rightfully points out that the
-  // lock is still held at the end of the function, which is what we want here.
-  ThreadDelegateForAssertAcquiredAnotherThreadHoldsTheLock delegate(lock);
-  base::PlatformThreadHandle handle;
-  base::PlatformThreadForTesting::Create(0, &delegate, &handle);
-  // Join before the test, otherwise some platforms' gtest have trouble with
-  // EXPECT_DEATH() and multiple live threads.
-  base::PlatformThreadForTesting::Join(handle);
-
-  EXPECT_DEATH(lock.AssertAcquired(), "");
-}
-
-#if BUILDFLAG(IS_APPLE)
-
-namespace {
-
-class ThreadDelegateForReinitInOtherThread
-    : public base::PlatformThreadForTesting::Delegate {
- public:
-  explicit ThreadDelegateForReinitInOtherThread(Lock& lock) : lock_(lock) {}
-
-  void ThreadMain() PA_NO_THREAD_SAFETY_ANALYSIS override {
-    lock_.Reinit();
-    lock_.Acquire();
-    lock_.Release();
-  }
-
- private:
-  Lock& lock_;
-};
-
-}  // namespace
-
-// On Apple OSes, it is not allowed to unlock a lock from another thread, so
-// we need to re-initialize it.
-TEST(PartitionAllocLockTest, ReinitInOtherThread) PA_NO_THREAD_SAFETY_ANALYSIS {
-  Lock lock;
-  lock.Acquire();
-
-  ThreadDelegateForReinitInOtherThread delegate(lock);
-  base::PlatformThreadHandle handle;
-  base::PlatformThreadForTesting::Create(0, &delegate, &handle);
-  base::PlatformThreadForTesting::Join(handle);
-}
-#endif  // BUILDFLAG(IS_APPLE)
-
-#endif  // defined(GTEST_HAS_DEATH_TEST) && BUILDFLAG(PA_DCHECK_IS_ON)
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/partition_oom.cc b/base/allocator/partition_allocator/partition_oom.cc
deleted file mode 100644
index cafa119..0000000
--- a/base/allocator/partition_allocator/partition_oom.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_oom.h"
-
-#include "base/allocator/partition_allocator/oom.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal {
-
-OomFunction g_oom_handling_function = nullptr;
-
-PA_NOINLINE PA_NOT_TAIL_CALLED void PartitionExcessiveAllocationSize(
-    size_t size) {
-  PA_NO_CODE_FOLDING();
-  OOM_CRASH(size);
-}
-
-#if !defined(ARCH_CPU_64_BITS)
-PA_NOINLINE PA_NOT_TAIL_CALLED void
-PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) {
-  PA_NO_CODE_FOLDING();
-  OOM_CRASH(size);
-}
-
-[[noreturn]] PA_NOT_TAIL_CALLED PA_NOINLINE void
-PartitionOutOfMemoryWithLargeVirtualSize(size_t virtual_size) {
-  PA_NO_CODE_FOLDING();
-  OOM_CRASH(virtual_size);
-}
-
-#endif  // !defined(ARCH_CPU_64_BITS)
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/partition_oom.h b/base/allocator/partition_allocator/partition_oom.h
deleted file mode 100644
index 085d128..0000000
--- a/base/allocator/partition_allocator/partition_oom.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Holds functions for generating OOM errors from PartitionAlloc. This is
-// distinct from oom.h in that it is meant only for use in PartitionAlloc.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
-
-#include <stddef.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "build/build_config.h"
-
-namespace partition_alloc {
-
-using OomFunction = void (*)(size_t);
-
-namespace internal {
-
-// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
-extern OomFunction g_oom_handling_function;
-
-[[noreturn]] PA_NOINLINE PA_COMPONENT_EXPORT(
-    PARTITION_ALLOC) void PartitionExcessiveAllocationSize(size_t size);
-
-#if !defined(ARCH_CPU_64_BITS)
-[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(
-    size_t size);
-[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryWithLargeVirtualSize(
-    size_t virtual_size);
-#endif
-
-}  // namespace internal
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
diff --git a/base/allocator/partition_allocator/partition_page.cc b/base/allocator/partition_allocator/partition_page.cc
deleted file mode 100644
index 0890972..0000000
--- a/base/allocator/partition_allocator/partition_page.cc
+++ /dev/null
@@ -1,369 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_page.h"
-
-#include <algorithm>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/freeslot_bitmap.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-#include "base/allocator/partition_allocator/tagging.h"
-
-namespace partition_alloc::internal {
-
-namespace {
-
-void UnmapNow(uintptr_t reservation_start,
-              size_t reservation_size,
-              pool_handle pool);
-
-PA_ALWAYS_INLINE void PartitionDirectUnmap(SlotSpanMetadata* slot_span) {
-  auto* root = PartitionRoot::FromSlotSpan(slot_span);
-  PartitionRootLock(root).AssertAcquired();
-  auto* extent = PartitionDirectMapExtent::FromSlotSpan(slot_span);
-
-  // Maintain the doubly-linked list of all direct mappings.
-  if (extent->prev_extent) {
-    PA_DCHECK(extent->prev_extent->next_extent == extent);
-    extent->prev_extent->next_extent = extent->next_extent;
-  } else {
-    root->direct_map_list = extent->next_extent;
-  }
-  if (extent->next_extent) {
-    PA_DCHECK(extent->next_extent->prev_extent == extent);
-    extent->next_extent->prev_extent = extent->prev_extent;
-  }
-
-  // The actual decommit is deferred below after releasing the lock.
-  root->DecreaseCommittedPages(slot_span->bucket->slot_size);
-
-  size_t reservation_size = extent->reservation_size;
-  PA_DCHECK(!(reservation_size & DirectMapAllocationGranularityOffsetMask()));
-  PA_DCHECK(root->total_size_of_direct_mapped_pages >= reservation_size);
-  root->total_size_of_direct_mapped_pages -= reservation_size;
-
-  uintptr_t reservation_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
-  // The mapping may start at an unspecified location within a super page, but
-  // we always reserve memory aligned to super page size.
-  reservation_start = base::bits::AlignDown(reservation_start, kSuperPageSize);
-
-  // All the metadata have been updated above, in particular the mapping has
-  // been unlinked. We can safely release the memory outside the lock, which is
-  // important as decommitting memory can be expensive.
-  //
-  // This can create a fake "address space exhaustion" OOM, in the case where
-  // e.g. a large allocation is freed on a thread, and another large one is made
-  // from another *before* UnmapNow() has finished running. In this case the
-  // second one may not find enough space in the pool, and fail. This is
-  // expected to be very rare though, and likely preferable to holding the lock
-  // while releasing the address space.
-  ScopedUnlockGuard unlock{PartitionRootLock(root)};
-  ScopedSyscallTimer timer{root};
-  UnmapNow(reservation_start, reservation_size, root->ChoosePool());
-}
-
-}  // namespace
-
-PA_ALWAYS_INLINE void SlotSpanMetadata::RegisterEmpty() {
-  PA_DCHECK(is_empty());
-  auto* root = PartitionRoot::FromSlotSpan(this);
-  PartitionRootLock(root).AssertAcquired();
-
-  root->empty_slot_spans_dirty_bytes +=
-      base::bits::AlignUp(GetProvisionedSize(), SystemPageSize());
-
-  ToSuperPageExtent()->DecrementNumberOfNonemptySlotSpans();
-
-  // If the slot span is already registered as empty, give it another life.
-  if (in_empty_cache_) {
-    PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans);
-    PA_DCHECK(root->global_empty_slot_span_ring[empty_cache_index_] == this);
-    root->global_empty_slot_span_ring[empty_cache_index_] = nullptr;
-  }
-
-  int16_t current_index = root->global_empty_slot_span_ring_index;
-  SlotSpanMetadata* slot_span_to_decommit =
-      root->global_empty_slot_span_ring[current_index];
-  // The slot span might well have been re-activated, filled up, etc. before we
-  // get around to looking at it here.
-  if (slot_span_to_decommit) {
-    slot_span_to_decommit->DecommitIfPossible(root);
-  }
-
-  // We put the empty slot span on our global list of "slot spans that were once
-  // empty", thus providing it a bit of breathing room to get re-used before we
-  // really free it. This reduces the number of system calls. Otherwise any
-  // free() from a single-slot slot span would lead to a syscall, for instance.
-  root->global_empty_slot_span_ring[current_index] = this;
-  empty_cache_index_ = current_index;
-  in_empty_cache_ = 1;
-  ++current_index;
-  if (current_index == root->global_empty_slot_span_ring_size) {
-    current_index = 0;
-  }
-  root->global_empty_slot_span_ring_index = current_index;
-
-  // Avoid wasting too much memory on empty slot spans. Note that we only divide
-  // by powers of two, since division can be very slow, and this path is taken
-  // for every single-slot slot span deallocation.
-  //
-  // Empty slot spans are also all decommitted with MemoryReclaimer, but it may
-  // never run, be delayed arbitrarily, and/or miss large memory spikes.
-  size_t max_empty_dirty_bytes =
-      root->total_size_of_committed_pages.load(std::memory_order_relaxed) >>
-      root->max_empty_slot_spans_dirty_bytes_shift;
-  if (root->empty_slot_spans_dirty_bytes > max_empty_dirty_bytes) {
-    root->ShrinkEmptySlotSpansRing(std::min(
-        root->empty_slot_spans_dirty_bytes / 2, max_empty_dirty_bytes));
-  }
-}
-// static
-const SlotSpanMetadata SlotSpanMetadata::sentinel_slot_span_;
-
-// static
-const SlotSpanMetadata* SlotSpanMetadata::get_sentinel_slot_span() {
-  return &sentinel_slot_span_;
-}
-
-// static
-SlotSpanMetadata* SlotSpanMetadata::get_sentinel_slot_span_non_const() {
-  return const_cast<SlotSpanMetadata*>(&sentinel_slot_span_);
-}
-
-SlotSpanMetadata::SlotSpanMetadata(PartitionBucket* bucket)
-    : bucket(bucket), can_store_raw_size_(bucket->CanStoreRawSize()) {}
-
-void SlotSpanMetadata::FreeSlowPath(size_t number_of_freed) {
-  DCheckRootLockIsAcquired(PartitionRoot::FromSlotSpan(this));
-  PA_DCHECK(this != get_sentinel_slot_span());
-
-  // The caller has already modified |num_allocated_slots|. It is a
-  // responsibility of this function to react to it, and update the state. We
-  // can get here only if the slot span is marked full and/or is now empty. Both
-  // are possible at the same time, which can happen when the caller lowered
-  // |num_allocated_slots| from "all" to 0 (common for single-slot spans). First
-  // execute the "is marked full" path, as it sets up |active_slot_spans_head|
-  // in a way later needed for the "is empty" path.
-  if (marked_full) {
-    // Direct map slot spans aren't added to any lists, hence never marked full.
-    PA_DCHECK(!bucket->is_direct_mapped());
-    // Double check that the slot span was full.
-    PA_DCHECK(num_allocated_slots ==
-              bucket->get_slots_per_span() - number_of_freed);
-    marked_full = 0;
-    // Fully used slot span became partially used. It must be put back on the
-    // non-full list. Also make it the current slot span to increase the
-    // chances of it being filled up again. The old current slot span will be
-    // the next slot span.
-    PA_DCHECK(!next_slot_span);
-    if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span())) {
-      next_slot_span = bucket->active_slot_spans_head;
-    }
-    bucket->active_slot_spans_head = this;
-    PA_CHECK(bucket->num_full_slot_spans);  // Underflow.
-    --bucket->num_full_slot_spans;
-  }
-
-  if (PA_LIKELY(num_allocated_slots == 0)) {
-    // Slot span became fully unused.
-    if (PA_UNLIKELY(bucket->is_direct_mapped())) {
-      PartitionDirectUnmap(this);
-      return;
-    }
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    freelist_head->CheckFreeList(bucket->slot_size);
-#endif
-    // If it's the current active slot span, change it. We bounce the slot span
-    // to the empty list as a force towards defragmentation.
-    if (PA_LIKELY(this == bucket->active_slot_spans_head)) {
-      bucket->SetNewActiveSlotSpan();
-    }
-    PA_DCHECK(bucket->active_slot_spans_head != this);
-
-    if (CanStoreRawSize()) {
-      SetRawSize(0);
-    }
-
-    RegisterEmpty();
-  }
-}
-
-void SlotSpanMetadata::Decommit(PartitionRoot* root) {
-  PartitionRootLock(root).AssertAcquired();
-  PA_DCHECK(is_empty());
-  PA_DCHECK(!bucket->is_direct_mapped());
-  uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(this);
-  // If lazy commit is enabled, only provisioned slots are committed.
-  size_t dirty_size =
-      base::bits::AlignUp(GetProvisionedSize(), SystemPageSize());
-  size_t size_to_decommit =
-      kUseLazyCommit ? dirty_size : bucket->get_bytes_per_span();
-
-  PA_DCHECK(root->empty_slot_spans_dirty_bytes >= dirty_size);
-  root->empty_slot_spans_dirty_bytes -= dirty_size;
-
-  // Not decommitted slot span must've had at least 1 allocation.
-  PA_DCHECK(size_to_decommit > 0);
-  root->DecommitSystemPagesForData(
-      slot_span_start, size_to_decommit,
-      PageAccessibilityDisposition::kAllowKeepForPerf);
-
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-  FreeSlotBitmapReset(slot_span_start, slot_span_start + size_to_decommit,
-                      bucket->slot_size);
-#endif
-
-  // We actually leave the decommitted slot span in the active list. We'll sweep
-  // it on to the decommitted list when we next walk the active list.
-  // Pulling this trick enables us to use a singly-linked list for all
-  // cases, which is critical in keeping the slot span metadata structure down
-  // to 32 bytes in size.
-  SetFreelistHead(nullptr);
-  num_unprovisioned_slots = 0;
-  PA_DCHECK(is_decommitted());
-  PA_DCHECK(bucket);
-}
-
-void SlotSpanMetadata::DecommitIfPossible(PartitionRoot* root) {
-  PartitionRootLock(root).AssertAcquired();
-  PA_DCHECK(in_empty_cache_);
-  PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans);
-  PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index_]);
-  in_empty_cache_ = 0;
-  if (is_empty()) {
-    Decommit(root);
-  }
-}
-
-void SlotSpanMetadata::SortFreelist() {
-  std::bitset<kMaxSlotsPerSlotSpan> free_slots;
-  uintptr_t slot_span_start = ToSlotSpanStart(this);
-
-  size_t num_provisioned_slots =
-      bucket->get_slots_per_span() - num_unprovisioned_slots;
-  PA_CHECK(num_provisioned_slots <= kMaxSlotsPerSlotSpan);
-
-  size_t num_free_slots = 0;
-  size_t slot_size = bucket->slot_size;
-  for (PartitionFreelistEntry* head = freelist_head; head;
-       head = head->GetNext(slot_size)) {
-    ++num_free_slots;
-    size_t offset_in_slot_span = SlotStartPtr2Addr(head) - slot_span_start;
-    size_t slot_number = bucket->GetSlotNumber(offset_in_slot_span);
-    PA_DCHECK(slot_number < num_provisioned_slots);
-    free_slots[slot_number] = true;
-  }
-  PA_DCHECK(num_free_slots == GetFreelistLength());
-
-  // Empty or single-element list is always sorted.
-  if (num_free_slots > 1) {
-    PartitionFreelistEntry* back = nullptr;
-    PartitionFreelistEntry* head = nullptr;
-
-    for (size_t slot_number = 0; slot_number < num_provisioned_slots;
-         slot_number++) {
-      if (free_slots[slot_number]) {
-        uintptr_t slot_start = slot_span_start + (slot_size * slot_number);
-        auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
-
-        if (!head) {
-          head = entry;
-        } else {
-          back->SetNext(entry);
-        }
-
-        back = entry;
-      }
-    }
-    SetFreelistHead(head);
-  }
-
-  freelist_is_sorted_ = true;
-}
-
-namespace {
-
-void UnmapNow(uintptr_t reservation_start,
-              size_t reservation_size,
-              pool_handle pool) {
-  PA_DCHECK(reservation_start && reservation_size > 0);
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  if (pool == kBRPPoolHandle) {
-    // In 32-bit mode, the beginning of a reservation may be excluded from the
-    // BRP pool, so shift the pointer. Other pools don't have this logic.
-    PA_DCHECK(IsManagedByPartitionAllocBRPPool(
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-        reservation_start
-#else
-        reservation_start +
-        AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
-            AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-        ));
-  } else
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  {
-    PA_DCHECK(pool == kRegularPoolHandle
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-              || pool == kThreadIsolatedPoolHandle
-#endif
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-              ||
-              (IsConfigurablePoolAvailable() && pool == kConfigurablePoolHandle)
-#endif
-    );
-    // Non-BRP pools don't need adjustment that BRP needs in 32-bit mode.
-    PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) ||
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-              IsManagedByPartitionAllocThreadIsolatedPool(reservation_start) ||
-#endif
-              IsManagedByPartitionAllocConfigurablePool(reservation_start));
-  }
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-  PA_DCHECK((reservation_start & kSuperPageOffsetMask) == 0);
-  uintptr_t reservation_end = reservation_start + reservation_size;
-  auto* offset_ptr = ReservationOffsetPointer(reservation_start);
-  // Reset the offset table entries for the given memory before unreserving
-  // it. Since the memory is not unreserved and not available for other
-  // threads, the table entries for the memory are not modified by other
-  // threads either. So we can update the table entries without race
-  // condition.
-  uint16_t i = 0;
-  for (uintptr_t address = reservation_start; address < reservation_end;
-       address += kSuperPageSize) {
-    PA_DCHECK(offset_ptr < GetReservationOffsetTableEnd(address));
-    PA_DCHECK(*offset_ptr == i++);
-    *offset_ptr++ = kOffsetTagNotAllocated;
-  }
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-  AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
-                                               reservation_size);
-#endif
-
-  // After resetting the table entries, unreserve and decommit the memory.
-  AddressPoolManager::GetInstance().UnreserveAndDecommit(
-      pool, reservation_start, reservation_size);
-}
-
-}  // namespace
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/partition_page.h b/base/allocator/partition_allocator/partition_page.h
deleted file mode 100644
index 6a36c60..0000000
--- a/base/allocator/partition_allocator/partition_page.h
+++ /dev/null
@@ -1,809 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/address_pool_manager_types.h"
-#include "base/allocator/partition_allocator/freeslot_bitmap_constants.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_bucket.h"
-#include "base/allocator/partition_allocator/partition_dcheck_helper.h"
-#include "base/allocator/partition_allocator/partition_freelist_entry.h"
-#include "base/allocator/partition_allocator/partition_page_constants.h"
-#include "base/allocator/partition_allocator/partition_superpage_extent_entry.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
-#endif
-
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-#include "base/allocator/partition_allocator/partition_ref_count.h"
-#endif
-
-namespace partition_alloc::internal {
-
-#if BUILDFLAG(USE_STARSCAN)
-using AllocationStateMap =
-    StateBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
-#endif
-
-// Metadata of the slot span.
-//
-// Some notes on slot span states. It can be in one of four major states:
-// 1) Active.
-// 2) Full.
-// 3) Empty.
-// 4) Decommitted.
-// An active slot span has available free slots, as well as allocated ones.
-// A full slot span has no free slots. An empty slot span has no allocated
-// slots, and a decommitted slot span is an empty one that had its backing
-// memory released back to the system.
-//
-// There are three linked lists tracking slot spans. The "active" list is an
-// approximation of a list of active slot spans. It is an approximation because
-// full, empty and decommitted slot spans may briefly be present in the list
-// until we next do a scan over it. The "empty" list holds mostly empty slot
-// spans, but may briefly hold decommitted ones too. The "decommitted" list
-// holds only decommitted slot spans.
-//
-// The significant slot span transitions are:
-// - Free() will detect when a full slot span has a slot freed and immediately
-//   return the slot span to the head of the active list.
-// - Free() will detect when a slot span is fully emptied. It _may_ add it to
-//   the empty list or it _may_ leave it on the active list until a future
-//   list scan.
-// - Alloc() _may_ scan the active page list in order to fulfil the request.
-//   If it does this, full, empty and decommitted slot spans encountered will be
-//   booted out of the active list. If there are no suitable active slot spans
-//   found, an empty or decommitted slot spans (if one exists) will be pulled
-//   from the empty/decommitted list on to the active list.
-#pragma pack(push, 1)
-struct SlotSpanMetadata {
- private:
-  PartitionFreelistEntry* freelist_head = nullptr;
-
- public:
-  // TODO(lizeb): Make as many fields as possible private or const, to
-  // encapsulate things more clearly.
-  SlotSpanMetadata* next_slot_span = nullptr;
-  PartitionBucket* const bucket = nullptr;
-
-  // CHECK()ed in AllocNewSlotSpan().
-  // The maximum number of bits needed to cover all currently supported OSes.
-  static constexpr size_t kMaxSlotsPerSlotSpanBits = 13;
-  static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), "");
-
-  // |marked_full| isn't equivalent to being full. Slot span is marked as full
-  // iff it isn't on the active slot span list (or any other list).
-  uint32_t marked_full : 1;
-  // |num_allocated_slots| is 0 for empty or decommitted slot spans, which can
-  // be further differentiated by checking existence of the freelist.
-  uint32_t num_allocated_slots : kMaxSlotsPerSlotSpanBits;
-  uint32_t num_unprovisioned_slots : kMaxSlotsPerSlotSpanBits;
-
- private:
-  const uint32_t can_store_raw_size_ : 1;
-  uint32_t freelist_is_sorted_ : 1;
-  uint32_t unused1_ : (32 - 1 - 2 * kMaxSlotsPerSlotSpanBits - 1 - 1);
-  // If |in_empty_cache_|==1, |empty_cache_index| is undefined and mustn't be
-  // used.
-  uint16_t in_empty_cache_ : 1;
-  uint16_t empty_cache_index_ : kEmptyCacheIndexBits;  // < kMaxFreeableSpans.
-  uint16_t unused2_ : (16 - 1 - kEmptyCacheIndexBits);
-  // Can use only 48 bits (6B) in this bitfield, as this structure is embedded
-  // in PartitionPage which has 2B worth of fields and must fit in 32B.
-
- public:
-  PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-  explicit SlotSpanMetadata(PartitionBucket* bucket);
-
-  inline SlotSpanMetadata(const SlotSpanMetadata&);
-
-  // Public API
-  // Note the matching Alloc() functions are in PartitionPage.
-  PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) void FreeSlowPath(
-      size_t number_of_freed);
-  PA_ALWAYS_INLINE PartitionFreelistEntry* PopForAlloc(size_t size);
-  PA_ALWAYS_INLINE void Free(uintptr_t ptr, PartitionRoot* root);
-  // Appends the passed freelist to the slot-span's freelist. Please note that
-  // the function doesn't increment the tags of the passed freelist entries,
-  // since FreeNoHooks() did it already.
-  PA_ALWAYS_INLINE void AppendFreeList(PartitionFreelistEntry* head,
-                                       PartitionFreelistEntry* tail,
-                                       size_t number_of_freed,
-                                       PartitionRoot* root);
-
-  void Decommit(PartitionRoot* root);
-  void DecommitIfPossible(PartitionRoot* root);
-
-  // Sorts the freelist in ascending addresses order.
-  void SortFreelist();
-  // Inserts the slot span into the empty ring, making space for the new slot
-  // span, and potentially shrinking the ring.
-  void RegisterEmpty();
-
-  // Pointer/address manipulation functions. These must be static as the input
-  // |slot_span| pointer may be the result of an offset calculation and
-  // therefore cannot be trusted. The objective of these functions is to
-  // sanitize this input.
-  PA_ALWAYS_INLINE static uintptr_t ToSlotSpanStart(
-      const SlotSpanMetadata* slot_span);
-  PA_ALWAYS_INLINE static SlotSpanMetadata* FromAddr(uintptr_t address);
-  PA_ALWAYS_INLINE static SlotSpanMetadata* FromSlotStart(uintptr_t slot_start);
-  PA_ALWAYS_INLINE static SlotSpanMetadata* FromObject(void* object);
-  PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerAddr(
-      uintptr_t address);
-  PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerPtr(void* ptr);
-
-  PA_ALWAYS_INLINE PartitionSuperPageExtentEntry* ToSuperPageExtent() const;
-
-  // Checks if it is feasible to store raw_size.
-  PA_ALWAYS_INLINE bool CanStoreRawSize() const { return can_store_raw_size_; }
-  // The caller is responsible for ensuring that raw_size can be stored before
-  // calling Set/GetRawSize.
-  PA_ALWAYS_INLINE void SetRawSize(size_t raw_size);
-  PA_ALWAYS_INLINE size_t GetRawSize() const;
-
-  PA_ALWAYS_INLINE PartitionFreelistEntry* get_freelist_head() const {
-    return freelist_head;
-  }
-  PA_ALWAYS_INLINE void SetFreelistHead(PartitionFreelistEntry* new_head);
-
-  // Returns size of the region used within a slot. The used region comprises
-  // of actual allocated data, extras and possibly empty space in the middle.
-  PA_ALWAYS_INLINE size_t GetUtilizedSlotSize() const {
-    // The returned size can be:
-    // - The slot size for small buckets.
-    // - Exact size needed to satisfy allocation (incl. extras), for large
-    //   buckets and direct-mapped allocations (see also the comment in
-    //   CanStoreRawSize() for more info).
-    if (PA_LIKELY(!CanStoreRawSize())) {
-      return bucket->slot_size;
-    }
-    return GetRawSize();
-  }
-
-  // This includes padding due to rounding done at allocation; we don't know the
-  // requested size at deallocation, so we use this in both places.
-  PA_ALWAYS_INLINE size_t GetSlotSizeForBookkeeping() const {
-    // This could be more precise for allocations where CanStoreRawSize()
-    // returns true (large allocations). However this is called for *every*
-    // allocation, so we don't want an extra branch there.
-    return bucket->slot_size;
-  }
-
-  // Returns the total size of the slots that are currently provisioned.
-  PA_ALWAYS_INLINE size_t GetProvisionedSize() const {
-    size_t num_provisioned_slots =
-        bucket->get_slots_per_span() - num_unprovisioned_slots;
-    size_t provisioned_size = num_provisioned_slots * bucket->slot_size;
-    PA_DCHECK(provisioned_size <= bucket->get_bytes_per_span());
-    return provisioned_size;
-  }
-
-  // Return the number of entries in the freelist.
-  size_t GetFreelistLength() const {
-    size_t num_provisioned_slots =
-        bucket->get_slots_per_span() - num_unprovisioned_slots;
-    return num_provisioned_slots - num_allocated_slots;
-  }
-
-  PA_ALWAYS_INLINE void Reset();
-
-  // TODO(ajwong): Can this be made private?  https://crbug.com/787153
-  PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-  static const SlotSpanMetadata* get_sentinel_slot_span();
-  // The sentinel is not supposed to be modified and hence we mark it as const
-  // under the hood. However, we often store it together with mutable metadata
-  // objects and need a non-const pointer.
-  // You can use this function for this case, but you need to ensure that the
-  // returned object will not be written to.
-  static SlotSpanMetadata* get_sentinel_slot_span_non_const();
-
-  // Slot span state getters.
-  PA_ALWAYS_INLINE bool is_active() const;
-  PA_ALWAYS_INLINE bool is_full() const;
-  PA_ALWAYS_INLINE bool is_empty() const;
-  PA_ALWAYS_INLINE bool is_decommitted() const;
-  PA_ALWAYS_INLINE bool in_empty_cache() const { return in_empty_cache_; }
-  PA_ALWAYS_INLINE bool freelist_is_sorted() const {
-    return freelist_is_sorted_;
-  }
-  PA_ALWAYS_INLINE void set_freelist_sorted() { freelist_is_sorted_ = true; }
-
- private:
-  // sentinel_slot_span_ is used as a sentinel to indicate that there is no slot
-  // span in the active list. We could use nullptr, but in that case we need to
-  // add a null-check branch to the hot allocation path. We want to avoid that.
-  //
-  // Note, this declaration is kept in the header as opposed to an anonymous
-  // namespace so the getter can be fully inlined.
-  static const SlotSpanMetadata sentinel_slot_span_;
-  // For the sentinel.
-  inline constexpr SlotSpanMetadata() noexcept;
-};
-#pragma pack(pop)
-static_assert(sizeof(SlotSpanMetadata) <= kPageMetadataSize,
-              "SlotSpanMetadata must fit into a Page Metadata slot.");
-
-inline constexpr SlotSpanMetadata::SlotSpanMetadata() noexcept
-    : marked_full(0),
-      num_allocated_slots(0),
-      num_unprovisioned_slots(0),
-      can_store_raw_size_(false),
-      freelist_is_sorted_(true),
-      unused1_(0),
-      in_empty_cache_(0),
-      empty_cache_index_(0),
-      unused2_(0) {
-  (void)unused1_;
-  (void)unused2_;
-}
-
-inline SlotSpanMetadata::SlotSpanMetadata(const SlotSpanMetadata&) = default;
-
-// Metadata of a non-first partition page in a slot span.
-struct SubsequentPageMetadata {
-  // Raw size is the size needed to satisfy the allocation (requested size +
-  // extras). If available, it can be used to report better statistics or to
-  // bring protective cookie closer to the allocated memory.
-  //
-  // It can be used only if:
-  // - there is no more than one slot in the slot span (otherwise we wouldn't
-  //   know which slot the raw size applies to)
-  // - there is more than one partition page in the slot span (the metadata of
-  //   the first one is used to store slot information, but the second one is
-  //   available for extra information)
-  size_t raw_size;
-};
-
-// Each partition page has metadata associated with it. The metadata of the
-// first page of a slot span, describes that slot span. If a slot span spans
-// more than 1 page, the page metadata may contain rudimentary additional
-// information.
-// "Pack" the union so that common page metadata still fits within
-// kPageMetadataSize. (SlotSpanMetadata is also "packed".)
-#pragma pack(push, 1)
-struct PartitionPage {
-  union {
-    SlotSpanMetadata slot_span_metadata;
-
-    SubsequentPageMetadata subsequent_page_metadata;
-
-    // sizeof(PartitionPageMetadata) must always be:
-    // - a power of 2 (for fast modulo operations)
-    // - below kPageMetadataSize
-    //
-    // This makes sure that this is respected no matter the architecture.
-    char optional_padding[kPageMetadataSize - sizeof(uint8_t) - sizeof(bool)];
-  };
-
-  // The first PartitionPage of the slot span holds its metadata. This offset
-  // tells how many pages in from that first page we are.
-  // For direct maps, the first page metadata (that isn't super page extent
-  // entry) uses this field to tell how many pages to the right the direct map
-  // metadata starts.
-  //
-  // 6 bits is enough to represent all possible offsets, given that the smallest
-  // partition page is 16kiB and the offset won't exceed 1MiB.
-  static constexpr uint16_t kMaxSlotSpanMetadataBits = 6;
-  static constexpr uint16_t kMaxSlotSpanMetadataOffset =
-      (1 << kMaxSlotSpanMetadataBits) - 1;
-  uint8_t slot_span_metadata_offset : kMaxSlotSpanMetadataBits;
-
-  // |is_valid| tells whether the page is part of a slot span. If |false|,
-  // |has_valid_span_after_this| tells whether it's an unused region in between
-  // slot spans within the super page.
-  // Note, |is_valid| has been added for clarity, but if we ever need to save
-  // this bit, it can be inferred from:
-  //   |!slot_span_metadata_offset && slot_span_metadata->bucket|.
-  bool is_valid : 1;
-  bool has_valid_span_after_this : 1;
-  uint8_t unused;
-
-  PA_ALWAYS_INLINE static PartitionPage* FromAddr(uintptr_t address);
-};
-#pragma pack(pop)
-static_assert(sizeof(PartitionPage) == kPageMetadataSize,
-              "PartitionPage must be able to fit in a metadata slot");
-
-// Certain functions rely on PartitionPage being either SlotSpanMetadata or
-// SubsequentPageMetadata, and therefore freely casting between each other.
-static_assert(offsetof(PartitionPage, slot_span_metadata) == 0, "");
-static_assert(offsetof(PartitionPage, subsequent_page_metadata) == 0, "");
-
-PA_ALWAYS_INLINE PartitionPage* PartitionSuperPageToMetadataArea(
-    uintptr_t super_page) {
-  // This can't be just any super page, but it has to be the first super page of
-  // the reservation, as we assume here that the metadata is near its beginning.
-  PA_DCHECK(IsReservationStart(super_page));
-  PA_DCHECK(!(super_page & kSuperPageOffsetMask));
-  // The metadata area is exactly one system page (the guard page) into the
-  // super page.
-  return reinterpret_cast<PartitionPage*>(super_page + SystemPageSize());
-}
-
-PA_ALWAYS_INLINE const SubsequentPageMetadata* GetSubsequentPageMetadata(
-    const PartitionPage* page) {
-  return &(page + 1)->subsequent_page_metadata;
-}
-
-PA_ALWAYS_INLINE SubsequentPageMetadata* GetSubsequentPageMetadata(
-    PartitionPage* page) {
-  return &(page + 1)->subsequent_page_metadata;
-}
-
-PA_ALWAYS_INLINE PartitionSuperPageExtentEntry* PartitionSuperPageToExtent(
-    uintptr_t super_page) {
-  // The very first entry of the metadata is the super page extent entry.
-  return reinterpret_cast<PartitionSuperPageExtentEntry*>(
-      PartitionSuperPageToMetadataArea(super_page));
-}
-
-#if BUILDFLAG(USE_STARSCAN)
-
-// Size that should be reserved for state bitmap (if present) inside a super
-// page. Elements of a super page are partition-page-aligned, hence the returned
-// size is a multiple of partition page size.
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-ReservedStateBitmapSize() {
-  return base::bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize());
-}
-
-// Size that should be committed for state bitmap (if present) inside a super
-// page. It is a multiple of system page size.
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-CommittedStateBitmapSize() {
-  return base::bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize());
-}
-
-// Returns the address/pointer to the state bitmap in the super page. It's the
-// caller's responsibility to ensure that the bitmaps even exist.
-PA_ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
-  PA_DCHECK(!(super_page % kSuperPageAlignment));
-  return super_page + PartitionPageSize() +
-         (IsManagedByNormalBuckets(super_page) ? ReservedFreeSlotBitmapSize()
-                                               : 0);
-}
-
-PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
-    uintptr_t super_page) {
-  return reinterpret_cast<AllocationStateMap*>(
-      SuperPageStateBitmapAddr(super_page));
-}
-
-#else  // BUILDFLAG(USE_STARSCAN)
-
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-ReservedStateBitmapSize() {
-  return 0ull;
-}
-
-#endif  // BUILDFLAG(USE_STARSCAN)
-
-PA_ALWAYS_INLINE uintptr_t
-SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,
-                            bool with_quarantine) {
-  return PartitionPageSize() +
-         (is_managed_by_normal_buckets ? ReservedFreeSlotBitmapSize() : 0) +
-         (with_quarantine ? ReservedStateBitmapSize() : 0);
-}
-
-PA_ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page,
-                                                 bool with_quarantine) {
-  PA_DCHECK(!(super_page % kSuperPageAlignment));
-  return super_page +
-         SuperPagePayloadStartOffset(IsManagedByNormalBuckets(super_page),
-                                     with_quarantine);
-}
-
-PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEndOffset() {
-  return kSuperPageSize - PartitionPageSize();
-}
-
-PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEnd(uintptr_t super_page) {
-  PA_DCHECK(!(super_page % kSuperPageAlignment));
-  return super_page + SuperPagePayloadEndOffset();
-}
-
-PA_ALWAYS_INLINE size_t SuperPagePayloadSize(uintptr_t super_page,
-                                             bool with_quarantine) {
-  return SuperPagePayloadEnd(super_page) -
-         SuperPagePayloadBegin(super_page, with_quarantine);
-}
-
-PA_ALWAYS_INLINE PartitionSuperPageExtentEntry*
-SlotSpanMetadata::ToSuperPageExtent() const {
-  uintptr_t super_page = reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask;
-  return PartitionSuperPageToExtent(super_page);
-}
-
-// Returns whether the pointer lies within the super page's payload area (i.e.
-// area devoted to slot spans). It doesn't check whether it's within a valid
-// slot span. It merely ensures it doesn't fall in a meta-data region that would
-// surely never contain user data.
-PA_ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address,
-                                               bool with_quarantine) {
-  // Quarantine can only be enabled for normal buckets in the current code.
-  PA_DCHECK(!with_quarantine || IsManagedByNormalBuckets(address));
-  uintptr_t super_page = address & kSuperPageBaseMask;
-  uintptr_t payload_start = SuperPagePayloadBegin(super_page, with_quarantine);
-  uintptr_t payload_end = SuperPagePayloadEnd(super_page);
-  return address >= payload_start && address < payload_end;
-}
-
-// Converts from an address inside a super page into a pointer to the
-// PartitionPage object (within super pages's metadata) that describes the
-// partition page where |address| is located. |address| doesn't have to be
-// located within a valid (i.e. allocated) slot span, but must be within the
-// super page's payload area (i.e. area devoted to slot spans).
-//
-// While it is generally valid for |ptr| to be in the middle of an allocation,
-// care has to be taken with direct maps that span multiple super pages. This
-// function's behavior is undefined if |ptr| lies in a subsequent super page.
-PA_ALWAYS_INLINE PartitionPage* PartitionPage::FromAddr(uintptr_t address) {
-  uintptr_t super_page = address & kSuperPageBaseMask;
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  PA_DCHECK(IsReservationStart(super_page));
-  DCheckIsWithInSuperPagePayload(address);
-#endif
-
-  uintptr_t partition_page_index =
-      (address & kSuperPageOffsetMask) >> PartitionPageShift();
-  // Index 0 is invalid because it is the super page extent metadata and the
-  // last index is invalid because the whole PartitionPage is set as guard
-  // pages. This repeats part of the payload PA_DCHECK above, which also checks
-  // for other exclusions.
-  PA_DCHECK(partition_page_index);
-  PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
-  return PartitionSuperPageToMetadataArea(super_page) + partition_page_index;
-}
-
-// Converts from a pointer to the SlotSpanMetadata object (within a super
-// pages's metadata) into a pointer to the beginning of the slot span. This
-// works on direct maps too.
-PA_ALWAYS_INLINE uintptr_t
-SlotSpanMetadata::ToSlotSpanStart(const SlotSpanMetadata* slot_span) {
-  uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(slot_span);
-  uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
-
-  // A valid |page| must be past the first guard System page and within
-  // the following metadata region.
-  PA_DCHECK(super_page_offset > SystemPageSize());
-  // Must be less than total metadata region.
-  PA_DCHECK(super_page_offset <
-            SystemPageSize() +
-                (NumPartitionPagesPerSuperPage() * kPageMetadataSize));
-  uintptr_t partition_page_index =
-      (super_page_offset - SystemPageSize()) >> kPageMetadataShift;
-  // Index 0 is invalid because it is the super page extent metadata and the
-  // last index is invalid because the whole PartitionPage is set as guard
-  // pages.
-  PA_DCHECK(partition_page_index);
-  PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
-  uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
-  return super_page_base + (partition_page_index << PartitionPageShift());
-}
-
-// Converts an address inside a slot span into a pointer to the SlotSpanMetadata
-// object (within super pages's metadata) that describes the slot span
-// containing that slot.
-//
-// CAUTION! For direct-mapped allocation, |address| has to be within the first
-// partition page.
-PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromAddr(
-    uintptr_t address) {
-  auto* page = PartitionPage::FromAddr(address);
-  PA_DCHECK(page->is_valid);
-  // Partition pages in the same slot span share the same SlotSpanMetadata
-  // object (located in the first PartitionPage object of that span). Adjust
-  // for that.
-  page -= page->slot_span_metadata_offset;
-  PA_DCHECK(page->is_valid);
-  PA_DCHECK(!page->slot_span_metadata_offset);
-  auto* slot_span = &page->slot_span_metadata;
-  // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
-  DCheckIsValidSlotSpan(slot_span);
-  // For direct map, if |address| doesn't point within the first partition page,
-  // |slot_span_metadata_offset| will be 0, |page| won't get shifted, leaving
-  // |slot_size| at 0.
-  PA_DCHECK(slot_span->bucket->slot_size);
-  return slot_span;
-}
-
-// Like |FromAddr|, but asserts that |slot_start| indeed points to the
-// beginning of a slot. It doesn't check if the slot is actually allocated.
-//
-// This works on direct maps too.
-PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromSlotStart(
-    uintptr_t slot_start) {
-  auto* slot_span = FromAddr(slot_start);
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // Checks that the pointer is a multiple of slot size.
-  uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
-  PA_DCHECK(!((slot_start - slot_span_start) % slot_span->bucket->slot_size));
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-  return slot_span;
-}
-
-// Like |FromAddr|, but asserts that |object| indeed points to the beginning of
-// an object. It doesn't check if the object is actually allocated.
-//
-// This works on direct maps too.
-PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObject(void* object) {
-  uintptr_t object_addr = ObjectPtr2Addr(object);
-  auto* slot_span = FromAddr(object_addr);
-  DCheckIsValidObjectAddress(slot_span, object_addr);
-  return slot_span;
-}
-
-// Like |FromAddr|, but asserts that |address| indeed points within an object.
-// It doesn't check if the object is actually allocated.
-//
-// CAUTION! For direct-mapped allocation, |address| has to be within the first
-// partition page.
-PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObjectInnerAddr(
-    uintptr_t address) {
-  auto* slot_span = FromAddr(address);
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // Checks that the address is within the expected object boundaries.
-  uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
-  uintptr_t shift_from_slot_start =
-      (address - slot_span_start) % slot_span->bucket->slot_size;
-  DCheckIsValidShiftFromSlotStart(slot_span, shift_from_slot_start);
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-  return slot_span;
-}
-
-PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObjectInnerPtr(
-    void* ptr) {
-  return FromObjectInnerAddr(ObjectInnerPtr2Addr(ptr));
-}
-
-PA_ALWAYS_INLINE void SlotSpanMetadata::SetRawSize(size_t raw_size) {
-  PA_DCHECK(CanStoreRawSize());
-  auto* subsequent_page_metadata =
-      GetSubsequentPageMetadata(reinterpret_cast<PartitionPage*>(this));
-  subsequent_page_metadata->raw_size = raw_size;
-}
-
-PA_ALWAYS_INLINE size_t SlotSpanMetadata::GetRawSize() const {
-  PA_DCHECK(CanStoreRawSize());
-  const auto* subsequent_page_metadata =
-      GetSubsequentPageMetadata(reinterpret_cast<const PartitionPage*>(this));
-  return subsequent_page_metadata->raw_size;
-}
-
-PA_ALWAYS_INLINE void SlotSpanMetadata::SetFreelistHead(
-    PartitionFreelistEntry* new_head) {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // |this| is in the metadata region, hence isn't MTE-tagged. Untag |new_head|
-  // as well.
-  uintptr_t new_head_untagged = UntagPtr(new_head);
-  PA_DCHECK(!new_head ||
-            (reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
-                (new_head_untagged & kSuperPageBaseMask));
-#endif
-  freelist_head = new_head;
-  // Inserted something new in the freelist, assume that it is not sorted
-  // anymore.
-  freelist_is_sorted_ = false;
-}
-
-PA_ALWAYS_INLINE PartitionFreelistEntry* SlotSpanMetadata::PopForAlloc(
-    size_t size) {
-  // Not using bucket->slot_size directly as the compiler doesn't know that
-  // |bucket->slot_size| is the same as |size|.
-  PA_DCHECK(size == bucket->slot_size);
-  PartitionFreelistEntry* result = freelist_head;
-  // Not setting freelist_is_sorted_ to false since this doesn't destroy
-  // ordering.
-  freelist_head = freelist_head->GetNext(size);
-  num_allocated_slots++;
-  return result;
-}
-
-PA_ALWAYS_INLINE void SlotSpanMetadata::Free(uintptr_t slot_start,
-                                             PartitionRoot* root)
-    // PartitionRootLock() is not defined inside partition_page.h, but
-    // static analysis doesn't require the implementation.
-    PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)) {
-  DCheckRootLockIsAcquired(root);
-  auto* entry = static_cast<internal::PartitionFreelistEntry*>(
-      SlotStartAddr2Ptr(slot_start));
-  // Catches an immediate double free.
-  PA_CHECK(entry != freelist_head);
-  // Look for double free one level deeper in debug.
-  PA_DCHECK(!freelist_head ||
-            entry != freelist_head->GetNext(bucket->slot_size));
-  entry->SetNext(freelist_head);
-  SetFreelistHead(entry);
-  // A best effort double-free check. Works only on empty slot spans.
-  PA_CHECK(num_allocated_slots);
-  --num_allocated_slots;
-  // If the span is marked full, or became empty, take the slow path to update
-  // internal state.
-  if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
-    FreeSlowPath(1);
-  } else {
-    // All single-slot allocations must go through the slow path to
-    // correctly update the raw size.
-    PA_DCHECK(!CanStoreRawSize());
-  }
-}
-
-PA_ALWAYS_INLINE void SlotSpanMetadata::AppendFreeList(
-    PartitionFreelistEntry* head,
-    PartitionFreelistEntry* tail,
-    size_t number_of_freed,
-    PartitionRoot* root) PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)) {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  DCheckRootLockIsAcquired(root);
-  PA_DCHECK(!tail->GetNext(bucket->slot_size));
-  PA_DCHECK(number_of_freed);
-  PA_DCHECK(num_allocated_slots);
-  if (CanStoreRawSize()) {
-    PA_DCHECK(number_of_freed == 1);
-  }
-  {
-    size_t number_of_entries = 0;
-    for (auto* entry = head; entry;
-         entry = entry->GetNext(bucket->slot_size), ++number_of_entries) {
-      uintptr_t untagged_entry = UntagPtr(entry);
-      // Check that all entries belong to this slot span.
-      PA_DCHECK(ToSlotSpanStart(this) <= untagged_entry);
-      PA_DCHECK(untagged_entry <
-                ToSlotSpanStart(this) + bucket->get_bytes_per_span());
-    }
-    PA_DCHECK(number_of_entries == number_of_freed);
-  }
-#endif
-
-  tail->SetNext(freelist_head);
-  SetFreelistHead(head);
-  PA_DCHECK(num_allocated_slots >= number_of_freed);
-  num_allocated_slots -= number_of_freed;
-  // If the span is marked full, or became empty, take the slow path to update
-  // internal state.
-  if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
-    FreeSlowPath(number_of_freed);
-  } else {
-    // All single-slot allocations must go through the slow path to
-    // correctly update the raw size.
-    PA_DCHECK(!CanStoreRawSize());
-  }
-}
-
-PA_ALWAYS_INLINE bool SlotSpanMetadata::is_active() const {
-  PA_DCHECK(this != get_sentinel_slot_span());
-  bool ret =
-      (num_allocated_slots > 0 && (freelist_head || num_unprovisioned_slots));
-  if (ret) {
-    PA_DCHECK(!marked_full);
-    PA_DCHECK(num_allocated_slots < bucket->get_slots_per_span());
-  }
-  return ret;
-}
-
-PA_ALWAYS_INLINE bool SlotSpanMetadata::is_full() const {
-  PA_DCHECK(this != get_sentinel_slot_span());
-  bool ret = (num_allocated_slots == bucket->get_slots_per_span());
-  if (ret) {
-    PA_DCHECK(!freelist_head);
-    PA_DCHECK(!num_unprovisioned_slots);
-    // May or may not be marked full, so don't check for that.
-  }
-  return ret;
-}
-
-PA_ALWAYS_INLINE bool SlotSpanMetadata::is_empty() const {
-  PA_DCHECK(this != get_sentinel_slot_span());
-  bool ret = (!num_allocated_slots && freelist_head);
-  if (ret) {
-    PA_DCHECK(!marked_full);
-  }
-  return ret;
-}
-
-PA_ALWAYS_INLINE bool SlotSpanMetadata::is_decommitted() const {
-  PA_DCHECK(this != get_sentinel_slot_span());
-  bool ret = (!num_allocated_slots && !freelist_head);
-  if (ret) {
-    PA_DCHECK(!marked_full);
-    PA_DCHECK(!num_unprovisioned_slots);
-    PA_DCHECK(!in_empty_cache_);
-  }
-  return ret;
-}
-
-PA_ALWAYS_INLINE void SlotSpanMetadata::Reset() {
-  PA_DCHECK(is_decommitted());
-
-  size_t num_slots_per_span = bucket->get_slots_per_span();
-  PA_DCHECK(num_slots_per_span <= kMaxSlotsPerSlotSpan);
-  num_unprovisioned_slots = static_cast<uint32_t>(num_slots_per_span);
-  PA_DCHECK(num_unprovisioned_slots);
-
-  ToSuperPageExtent()->IncrementNumberOfNonemptySlotSpans();
-
-  next_slot_span = nullptr;
-}
-
-#if BUILDFLAG(USE_STARSCAN)
-// Returns the state bitmap from an address within a normal-bucket super page.
-// It's the caller's responsibility to ensure that the bitmap exists.
-PA_ALWAYS_INLINE AllocationStateMap* StateBitmapFromAddr(uintptr_t address) {
-  PA_DCHECK(IsManagedByNormalBuckets(address));
-  uintptr_t super_page = address & kSuperPageBaseMask;
-  return SuperPageStateBitmap(super_page);
-}
-#endif  // BUILDFLAG(USE_STARSCAN)
-
-// Iterates over all slot spans in a super-page. |Callback| must return true if
-// early return is needed.
-template <typename Callback>
-void IterateSlotSpans(uintptr_t super_page,
-                      bool with_quarantine,
-                      Callback callback) {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  PA_DCHECK(!(super_page % kSuperPageAlignment));
-  auto* extent_entry = PartitionSuperPageToExtent(super_page);
-  DCheckRootLockIsAcquired(extent_entry->root);
-#endif
-
-  using Page = PartitionPage;
-  using SlotSpan = SlotSpanMetadata;
-  auto* const first_page =
-      Page::FromAddr(SuperPagePayloadBegin(super_page, with_quarantine));
-  auto* const last_page =
-      Page::FromAddr(SuperPagePayloadEnd(super_page) - PartitionPageSize());
-  Page* page;
-  SlotSpan* slot_span;
-  for (page = first_page; page <= last_page;) {
-    PA_DCHECK(!page->slot_span_metadata_offset);  // Ensure slot span beginning.
-    if (!page->is_valid) {
-      if (page->has_valid_span_after_this) {
-        // The page doesn't represent a valid slot span, but there is another
-        // one somewhere after this. Keep iterating to find it.
-        ++page;
-        continue;
-      }
-      // There are currently no valid spans from here on. No need to iterate
-      // the rest of the super page.
-      break;
-    }
-    slot_span = &page->slot_span_metadata;
-    if (callback(slot_span)) {
-      return;
-    }
-    page += slot_span->bucket->get_pages_per_slot_span();
-  }
-  // Each super page must have at least one valid slot span.
-  PA_DCHECK(page > first_page);
-  // Just a quick check that the search ended at a valid slot span and there
-  // was no unnecessary iteration over gaps afterwards.
-  PA_DCHECK(page == reinterpret_cast<Page*>(slot_span) +
-                        slot_span->bucket->get_pages_per_slot_span());
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
diff --git a/base/allocator/partition_allocator/partition_page_constants.h b/base/allocator/partition_allocator/partition_page_constants.h
deleted file mode 100644
index f1e37c8..0000000
--- a/base/allocator/partition_allocator/partition_page_constants.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_CONSTANTS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_CONSTANTS_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal {
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE)
-// System page size is not a constant on Apple OSes, but is either 4 or 16kiB
-// (1 << 12 or 1 << 14), as checked in PartitionRoot::Init(). And
-// PartitionPageSize() is 4 times the OS page size.
-static constexpr size_t kMaxSlotsPerSlotSpan = 4 * (1 << 14) / kSmallestBucket;
-#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
-// System page size can be 4, 16, or 64 kiB on Linux on arm64. 64 kiB is
-// currently (kMaxSlotsPerSlotSpanBits == 13) not supported by the code,
-// so we use the 16 kiB maximum (64 kiB will crash).
-static constexpr size_t kMaxSlotsPerSlotSpan = 4 * (1 << 14) / kSmallestBucket;
-#else
-// A slot span can "span" multiple PartitionPages, but then its slot size is
-// larger, so it doesn't have as many slots.
-static constexpr size_t kMaxSlotsPerSlotSpan =
-    PartitionPageSize() / kSmallestBucket;
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE)
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/partition_ref_count.h b/base/allocator/partition_allocator/partition_ref_count.h
deleted file mode 100644
index 00ade40..0000000
--- a/base/allocator/partition_allocator/partition_ref_count.h
+++ /dev/null
@@ -1,497 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
-
-#include <atomic>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_MAC)
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h"
-#endif  // BUILDFLAG(IS_MAC)
-
-namespace partition_alloc::internal {
-
-// Aligns up (on 8B boundary) and returns `ref_count_size` if needed.
-// *  Known to be needed on MacOS 13: https://crbug.com/1378822.
-// *  Thought to be needed on MacOS 14: https://crbug.com/1457756.
-// *  No-op everywhere else.
-//
-// Placed outside `BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)`
-// intentionally to accommodate usage in contexts also outside
-// this gating.
-PA_ALWAYS_INLINE size_t AlignUpRefCountSizeForMac(size_t ref_count_size) {
-#if BUILDFLAG(IS_MAC)
-  if (internal::base::mac::IsOS13() || internal::base::mac::IsOS14()) {
-    return internal::base::bits::AlignUp(ref_count_size, 8);
-  }
-#endif  // BUILDFLAG(IS_MAC)
-  return ref_count_size;
-}
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-// Special-purpose atomic reference count class used by RawPtrBackupRefImpl.
-// The least significant bit of the count is reserved for tracking the liveness
-// state of an allocation: it's set when the allocation is created and cleared
-// on free(). So the count can be:
-//
-// 1 for an allocation that is just returned from Alloc()
-// 2 * k + 1 for a "live" allocation with k references
-// 2 * k for an allocation with k dangling references after Free()
-//
-// This protects against double-free's, as we check whether the reference count
-// is odd in |ReleaseFromAllocator()|, and if not we have a double-free.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
- public:
-  // This class holds an atomic bit field: `count_`. It holds up to 5 values:
-  //
-  // bits   name                   description
-  // -----  ---------------------  ----------------------------------------
-  // 0      is_allocated           Whether or not the memory is held by the
-  //                               allocator.
-  //                               - 1 at construction time.
-  //                               - Decreased in ReleaseFromAllocator();
-  //
-  // 1-31   ptr_count              Number of raw_ptr<T>.
-  //                               - Increased in Acquire()
-  //                               - Decreased in Release()
-  //
-  // 32     dangling_detected      A dangling raw_ptr<> has been detected.
-  // 33     needs_mac11_malloc_    Whether malloc_size() return value needs to
-  //          size_hack            be adjusted for this allocation.
-  //
-  // 34-63  unprotected_ptr_count  Number of
-  //                               raw_ptr<T, DisableDanglingPtrDetection>
-  //                               - Increased in AcquireFromUnprotectedPtr().
-  //                               - Decreased in ReleaseFromUnprotectedPtr().
-  //
-  // The allocation is reclaimed if all of:
-  // - |is_allocated|
-  // - |ptr_count|
-  // - |unprotected_ptr_count|
-  // are zero.
-  //
-  // During ReleaseFromAllocator(), if |ptr_count| is not zero,
-  // |dangling_detected| is set and the error is reported via
-  // DanglingRawPtrDetected(id). The matching DanglingRawPtrReleased(id) will be
-  // called when the last raw_ptr<> is released.
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-  using CountType = uint64_t;
-  static constexpr CountType kMemoryHeldByAllocatorBit = 0x0000'0000'0000'0001;
-  static constexpr CountType kPtrCountMask = 0x0000'0000'FFFF'FFFE;
-  static constexpr CountType kUnprotectedPtrCountMask = 0xFFFF'FFFC'0000'0000;
-  static constexpr CountType kDanglingRawPtrDetectedBit = 0x0000'0001'0000'0000;
-  static constexpr CountType kNeedsMac11MallocSizeHackBit =
-      0x0000'0002'0000'0000;
-
-  static constexpr CountType kPtrInc = 0x0000'0000'0000'0002;
-  static constexpr CountType kUnprotectedPtrInc = 0x0000'0004'0000'0000;
-#else
-  using CountType = uint32_t;
-  static constexpr CountType kMemoryHeldByAllocatorBit = 0x0000'0001;
-
-  static constexpr CountType kPtrCountMask = 0x7FFF'FFFE;
-  static constexpr CountType kUnprotectedPtrCountMask = 0x0000'0000;
-  static constexpr CountType kDanglingRawPtrDetectedBit = 0x0000'0000;
-  static constexpr CountType kNeedsMac11MallocSizeHackBit = 0x8000'0000;
-
-  static constexpr CountType kPtrInc = 0x0000'0002;
-#endif
-
-  PA_ALWAYS_INLINE explicit PartitionRefCount(
-      bool needs_mac11_malloc_size_hack);
-
-  // Incrementing the counter doesn't imply any visibility about modified
-  // memory, hence relaxed atomics. For decrement, visibility is required before
-  // the memory gets freed, necessitating an acquire/release barrier before
-  // freeing the memory.
-  //
-  // For details, see base::AtomicRefCount, which has the same constraints and
-  // characteristics.
-  //
-  // FYI: The assembly produced by the compiler on every platform, in particular
-  // the uint64_t fetch_add on 32bit CPU.
-  // https://docs.google.com/document/d/1cSTVDVEE-8l2dXLPcfyN75r6ihMbeiSp1ncL9ae3RZE
-  PA_ALWAYS_INLINE void Acquire() {
-    CheckCookieIfSupported();
-
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
-    constexpr CountType kInc = kUnprotectedPtrInc;
-    constexpr CountType kMask = kUnprotectedPtrCountMask;
-#else
-    constexpr CountType kInc = kPtrInc;
-    constexpr CountType kMask = kPtrCountMask;
-#endif
-    CountType old_count = count_.fetch_add(kInc, std::memory_order_relaxed);
-    // Check overflow.
-    PA_CHECK((old_count & kMask) != kMask);
-  }
-
-  // Similar to |Acquire()|, but for raw_ptr<T, DisableDanglingPtrDetection>
-  // instead of raw_ptr<T>.
-  PA_ALWAYS_INLINE void AcquireFromUnprotectedPtr() {
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-    CheckCookieIfSupported();
-    CountType old_count =
-        count_.fetch_add(kUnprotectedPtrInc, std::memory_order_relaxed);
-    // Check overflow.
-    PA_CHECK((old_count & kUnprotectedPtrCountMask) !=
-             kUnprotectedPtrCountMask);
-#else
-    Acquire();
-#endif
-  }
-
-  // Returns true if the allocation should be reclaimed.
-  PA_ALWAYS_INLINE bool Release() {
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
-    constexpr CountType kInc = kUnprotectedPtrInc;
-    constexpr CountType kMask = kUnprotectedPtrCountMask;
-#else
-    constexpr CountType kInc = kPtrInc;
-    constexpr CountType kMask = kPtrCountMask;
-#endif
-    CheckCookieIfSupported();
-
-    CountType old_count = count_.fetch_sub(kInc, std::memory_order_release);
-    // Check underflow.
-    PA_DCHECK(old_count & kMask);
-
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-    // If a dangling raw_ptr<> was detected, report it.
-    if (PA_UNLIKELY((old_count & kDanglingRawPtrDetectedBit) ==
-                    kDanglingRawPtrDetectedBit)) {
-      partition_alloc::internal::DanglingRawPtrReleased(
-          reinterpret_cast<uintptr_t>(this));
-    }
-#endif
-
-    return ReleaseCommon(old_count - kInc);
-  }
-
-  // Similar to |Release()|, but for raw_ptr<T, DisableDanglingPtrDetection>
-  // instead of raw_ptr<T>.
-  PA_ALWAYS_INLINE bool ReleaseFromUnprotectedPtr() {
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-    CheckCookieIfSupported();
-
-    CountType old_count =
-        count_.fetch_sub(kUnprotectedPtrInc, std::memory_order_release);
-    // Check underflow.
-    PA_DCHECK(old_count & kUnprotectedPtrCountMask);
-
-    return ReleaseCommon(old_count - kUnprotectedPtrInc);
-#else
-    return Release();
-#endif
-  }
-
-  // Returns true if the allocation should be reclaimed.
-  // This function should be called by the allocator during Free().
-  PA_ALWAYS_INLINE bool ReleaseFromAllocator() {
-    CheckCookieIfSupported();
-
-    // TODO(bartekn): Make the double-free check more effective. Once freed, the
-    // ref-count is overwritten by an encoded freelist-next pointer.
-    CountType old_count =
-        count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
-
-    if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
-      DoubleFreeOrCorruptionDetected(old_count);
-    }
-
-    // Release memory when no raw_ptr<> exists anymore:
-    static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
-    if (PA_LIKELY((old_count & mask) == 0)) {
-      std::atomic_thread_fence(std::memory_order_acquire);
-      // The allocation is about to get freed, so clear the cookie.
-      ClearCookieIfSupported();
-      return true;
-    }
-
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-    // There are some dangling raw_ptr<>. Turn on the error flag if it exists
-    // some which have not opted-out of being checked against being dangling:
-    if (PA_UNLIKELY(old_count & kPtrCountMask)) {
-      count_.fetch_or(kDanglingRawPtrDetectedBit, std::memory_order_relaxed);
-      partition_alloc::internal::DanglingRawPtrDetected(
-          reinterpret_cast<uintptr_t>(this));
-    }
-#endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-    return false;
-  }
-
-  // "IsAlive" means is allocated and not freed. "KnownRefs" refers to
-  // raw_ptr<T> references. There may be other references from raw pointers or
-  // unique_ptr, but we have no way of tracking them, so we hope for the best.
-  // To summarize, the function returns whether we believe the allocation can be
-  // safely freed.
-  PA_ALWAYS_INLINE bool IsAliveWithNoKnownRefs() {
-    CheckCookieIfSupported();
-    static constexpr CountType mask =
-        kMemoryHeldByAllocatorBit | kPtrCountMask | kUnprotectedPtrCountMask;
-    return (count_.load(std::memory_order_acquire) & mask) ==
-           kMemoryHeldByAllocatorBit;
-  }
-
-  PA_ALWAYS_INLINE bool IsAlive() {
-    bool alive =
-        count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit;
-    if (alive) {
-      CheckCookieIfSupported();
-    }
-    return alive;
-  }
-
-  // Called when a raw_ptr is not banning dangling ptrs, but the user still
-  // wants to ensure the pointer is not currently dangling. This is currently
-  // used in UnretainedWrapper to make sure callbacks are not invoked with
-  // dangling pointers. If such a raw_ptr exists but the allocation is no longer
-  // alive, then we have a dangling pointer to a dead object.
-  PA_ALWAYS_INLINE void ReportIfDangling() {
-    if (!IsAlive()) {
-      partition_alloc::internal::UnretainedDanglingRawPtrDetected(
-          reinterpret_cast<uintptr_t>(this));
-    }
-  }
-
-  // GWP-ASan slots are assigned an extra reference (note `kPtrInc` below) to
-  // make sure the `raw_ptr<T>` release operation will never attempt to call the
-  // PA `free` on such a slot. GWP-ASan takes the extra reference into account
-  // when determining whether the slot can be reused.
-  PA_ALWAYS_INLINE void InitalizeForGwpAsan() {
-#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
-    brp_cookie_ = CalculateCookie();
-#endif
-    count_.store(kPtrInc | kMemoryHeldByAllocatorBit,
-                 std::memory_order_release);
-  }
-
-  PA_ALWAYS_INLINE bool CanBeReusedByGwpAsan() {
-    static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
-    return (count_.load(std::memory_order_acquire) & mask) == kPtrInc;
-  }
-
-  bool NeedsMac11MallocSizeHack() {
-    return count_.load(std::memory_order_relaxed) &
-           kNeedsMac11MallocSizeHackBit;
-  }
-
-#if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
-  PA_ALWAYS_INLINE void SetRequestedSize(size_t size) {
-    requested_size_ = static_cast<uint32_t>(size);
-  }
-  PA_ALWAYS_INLINE uint32_t requested_size() const { return requested_size_; }
-#endif  // PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
-
- private:
-  // The common parts shared by Release() and ReleaseFromUnprotectedPtr().
-  // Called after updating the ref counts, |count| is the new value of |count_|
-  // set by fetch_sub. Returns true if memory can be reclaimed.
-  PA_ALWAYS_INLINE bool ReleaseCommon(CountType count) {
-    // Do not release memory, if it is still held by any of:
-    // - The allocator
-    // - A raw_ptr<T>
-    // - A raw_ptr<T, DisableDanglingPtrDetection>
-    //
-    // Assuming this raw_ptr is not dangling, the memory must still be held at
-    // least by the allocator, so this is PA_LIKELY true.
-    if (PA_LIKELY((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
-                            kUnprotectedPtrCountMask)))) {
-      return false;  // Do not release the memory.
-    }
-
-    // In most thread-safe reference count implementations, an acquire
-    // barrier is required so that all changes made to an object from other
-    // threads are visible to its destructor. In our case, the destructor
-    // finishes before the final `Release` call, so it shouldn't be a problem.
-    // However, we will keep it as a precautionary measure.
-    std::atomic_thread_fence(std::memory_order_acquire);
-
-    // The allocation is about to get freed, so clear the cookie.
-    ClearCookieIfSupported();
-    return true;
-  }
-
-  // The cookie helps us ensure that:
-  // 1) The reference count pointer calculation is correct.
-  // 2) The returned allocation slot is not freed.
-  PA_ALWAYS_INLINE void CheckCookieIfSupported() {
-#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
-    PA_CHECK(brp_cookie_ == CalculateCookie());
-#endif
-  }
-
-  PA_ALWAYS_INLINE void ClearCookieIfSupported() {
-#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
-    brp_cookie_ = 0;
-#endif
-  }
-
-#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
-  PA_ALWAYS_INLINE uint32_t CalculateCookie() {
-    return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)) ^
-           kCookieSalt;
-  }
-#endif  // PA_CONFIG(REF_COUNT_CHECK_COOKIE)
-
-  [[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void
-  DoubleFreeOrCorruptionDetected(CountType count) {
-    PA_DEBUG_DATA_ON_STACK("refcount", count);
-    PA_NO_CODE_FOLDING();
-    PA_IMMEDIATE_CRASH();
-  }
-
-  // Note that in free slots, this is overwritten by encoded freelist
-  // pointer(s). The way the pointers are encoded on 64-bit little-endian
-  // architectures, count_ happens stay even, which works well with the
-  // double-free-detection in ReleaseFromAllocator(). Don't change the layout of
-  // this class, to preserve this functionality.
-  std::atomic<CountType> count_;
-
-#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
-  static constexpr uint32_t kCookieSalt = 0xc01dbeef;
-  volatile uint32_t brp_cookie_;
-#endif
-
-#if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
-  uint32_t requested_size_;
-#endif
-};
-
-PA_ALWAYS_INLINE PartitionRefCount::PartitionRefCount(
-    bool needs_mac11_malloc_size_hack)
-    : count_(kMemoryHeldByAllocatorBit |
-             (needs_mac11_malloc_size_hack ? kNeedsMac11MallocSizeHackBit : 0))
-#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
-      ,
-      brp_cookie_(CalculateCookie())
-#endif
-{
-}
-
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-
-static_assert(kAlignment % alignof(PartitionRefCount) == 0,
-              "kAlignment must be multiples of alignof(PartitionRefCount).");
-
-// Allocate extra space for the reference count to satisfy the alignment
-// requirement.
-static constexpr size_t kInSlotRefCountBufferSize = sizeof(PartitionRefCount);
-constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
-constexpr size_t kPartitionPastAllocationAdjustment = 0;
-
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-
-#if PA_CONFIG(REF_COUNT_CHECK_COOKIE) || \
-    PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
-static constexpr size_t kPartitionRefCountSizeShift = 4;
-#else
-static constexpr size_t kPartitionRefCountSizeShift = 3;
-#endif
-
-#else  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
-
-#if PA_CONFIG(REF_COUNT_CHECK_COOKIE) && \
-    PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
-static constexpr size_t kPartitionRefCountSizeShift = 4;
-#elif PA_CONFIG(REF_COUNT_CHECK_COOKIE) || \
-    PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
-static constexpr size_t kPartitionRefCountSizeShift = 3;
-#else
-static constexpr size_t kPartitionRefCountSizeShift = 2;
-#endif
-
-#endif  // PA_CONFIG(REF_COUNT_CHECK_COOKIE)
-static_assert((1 << kPartitionRefCountSizeShift) == sizeof(PartitionRefCount));
-
-// We need one PartitionRefCount for each system page in a super page. They take
-// `x = sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize())` space.
-// They need to fit into a system page of metadata as sparsely as possible to
-// minimize cache line sharing, hence we calculate a multiplier as
-// `SystemPageSize() / x`.
-//
-// The multiplier is expressed as a bitshift to optimize the code generation.
-// SystemPageSize() isn't always a constrexpr, in which case the compiler
-// wouldn't know it's a power of two. The equivalence of these calculations is
-// checked in PartitionAllocGlobalInit().
-PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-GetPartitionRefCountIndexMultiplierShift() {
-  return SystemPageShift() * 2 - kSuperPageShift - kPartitionRefCountSizeShift;
-}
-
-PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
-    uintptr_t slot_start) {
-  if (PA_LIKELY(slot_start & SystemPageOffsetMask())) {
-    uintptr_t refcount_address = slot_start - sizeof(PartitionRefCount);
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-    PA_CHECK(refcount_address % alignof(PartitionRefCount) == 0);
-#endif
-    // No need to tag because the ref count is not protected by MTE.
-    return reinterpret_cast<PartitionRefCount*>(refcount_address);
-  } else {
-    // No need to tag, as the metadata region isn't protected by MTE.
-    PartitionRefCount* bitmap_base = reinterpret_cast<PartitionRefCount*>(
-        (slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
-    size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
-                   << GetPartitionRefCountIndexMultiplierShift();
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-    PA_CHECK(sizeof(PartitionRefCount) * index <= SystemPageSize());
-#endif
-    return bitmap_base + index;
-  }
-}
-
-#else  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-
-// Allocate extra space for the reference count to satisfy the alignment
-// requirement.
-static constexpr size_t kInSlotRefCountBufferSize = kAlignment;
-constexpr size_t kPartitionRefCountOffsetAdjustment = kInSlotRefCountBufferSize;
-
-// This is for adjustment of pointers right past the allocation, which may point
-// to the next slot. First subtract 1 to bring them to the intended slot, and
-// only then we'll be able to find ref-count in that slot.
-constexpr size_t kPartitionPastAllocationAdjustment = 1;
-
-PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
-    uintptr_t slot_start) {
-  // Have to MTE-tag, because the address is untagged, but lies within a slot
-  // area, which is protected by MTE.
-  return static_cast<PartitionRefCount*>(TagAddr(slot_start));
-}
-
-#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-
-static_assert(sizeof(PartitionRefCount) <= kInSlotRefCountBufferSize,
-              "PartitionRefCount should fit into the in-slot buffer.");
-
-#else  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-static constexpr size_t kInSlotRefCountBufferSize = 0;
-constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
-
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-constexpr size_t kPartitionRefCountSizeAdjustment = kInSlotRefCountBufferSize;
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
diff --git a/base/allocator/partition_allocator/partition_root.cc b/base/allocator/partition_allocator/partition_root.cc
deleted file mode 100644
index dd78098..0000000
--- a/base/allocator/partition_allocator/partition_root.cc
+++ /dev/null
@@ -1,1682 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_root.h"
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/freeslot_bitmap.h"
-#include "base/allocator/partition_allocator/oom.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_bucket.h"
-#include "base/allocator/partition_allocator/partition_cookie.h"
-#include "base/allocator/partition_allocator/partition_oom.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_ref_count.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_MAC)
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h"
-#endif
-
-#if BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#endif
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
-#endif
-
-#if BUILDFLAG(IS_WIN)
-#include <windows.h>
-#include "wow64apiset.h"
-#endif
-
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-#include <pthread.h>
-#endif
-
-namespace partition_alloc::internal {
-
-#if BUILDFLAG(RECORD_ALLOC_INFO)
-// Even if this is not hidden behind a BUILDFLAG, it should not use any memory
-// when recording is disabled, since it ends up in the .bss section.
-AllocInfo g_allocs = {};
-
-void RecordAllocOrFree(uintptr_t addr, size_t size) {
-  g_allocs.allocs[g_allocs.index.fetch_add(1, std::memory_order_relaxed) %
-                  kAllocInfoSize] = {addr, size};
-}
-#endif  // BUILDFLAG(RECORD_ALLOC_INFO)
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address,
-                                       uintptr_t test_address,
-                                       size_t type_size) {
-  // Required for pointers right past an allocation. See
-  // |PartitionAllocGetSlotStartInBRPPool()|.
-  uintptr_t adjusted_address =
-      orig_address - kPartitionPastAllocationAdjustment;
-  PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(adjusted_address));
-  DCheckIfManagedByPartitionAllocBRPPool(adjusted_address);
-
-  uintptr_t slot_start = PartitionAllocGetSlotStartInBRPPool(adjusted_address);
-  // Don't use |adjusted_address| beyond this point at all. It was needed to
-  // pick the right slot, but now we're dealing with very concrete addresses.
-  // Zero it just in case, to catch errors.
-  adjusted_address = 0;
-
-  auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
-  auto* root = PartitionRoot::FromSlotSpan(slot_span);
-  // Double check that ref-count is indeed present.
-  PA_DCHECK(root->brp_enabled());
-
-  uintptr_t object_addr = root->SlotStartToObjectAddr(slot_start);
-  uintptr_t object_end = object_addr + root->GetSlotUsableSize(slot_span);
-  if (test_address < object_addr || object_end < test_address) {
-    return PtrPosWithinAlloc::kFarOOB;
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-  } else if (object_end - type_size < test_address) {
-    // Not even a single element of the type referenced by the pointer can fit
-    // between the pointer and the end of the object.
-    return PtrPosWithinAlloc::kAllocEnd;
-#endif
-  } else {
-    return PtrPosWithinAlloc::kInBounds;
-  }
-}
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-}  // namespace partition_alloc::internal
-
-namespace partition_alloc {
-
-#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
-
-namespace {
-internal::Lock g_root_enumerator_lock;
-}
-
-internal::Lock& PartitionRoot::GetEnumeratorLock() {
-  return g_root_enumerator_lock;
-}
-
-namespace internal {
-
-class PartitionRootEnumerator {
- public:
-  using EnumerateCallback = void (*)(PartitionRoot* root, bool in_child);
-  enum EnumerateOrder {
-    kNormal,
-    kReverse,
-  };
-
-  static PartitionRootEnumerator& Instance() {
-    static PartitionRootEnumerator instance;
-    return instance;
-  }
-
-  void Enumerate(EnumerateCallback callback,
-                 bool in_child,
-                 EnumerateOrder order) PA_NO_THREAD_SAFETY_ANALYSIS {
-    if (order == kNormal) {
-      PartitionRoot* root;
-      for (root = Head(partition_roots_); root != nullptr;
-           root = root->next_root) {
-        callback(root, in_child);
-      }
-    } else {
-      PA_DCHECK(order == kReverse);
-      PartitionRoot* root;
-      for (root = Tail(partition_roots_); root != nullptr;
-           root = root->prev_root) {
-        callback(root, in_child);
-      }
-    }
-  }
-
-  void Register(PartitionRoot* root) {
-    internal::ScopedGuard guard(PartitionRoot::GetEnumeratorLock());
-    root->next_root = partition_roots_;
-    root->prev_root = nullptr;
-    if (partition_roots_) {
-      partition_roots_->prev_root = root;
-    }
-    partition_roots_ = root;
-  }
-
-  void Unregister(PartitionRoot* root) {
-    internal::ScopedGuard guard(PartitionRoot::GetEnumeratorLock());
-    PartitionRoot* prev = root->prev_root;
-    PartitionRoot* next = root->next_root;
-    if (prev) {
-      PA_DCHECK(prev->next_root == root);
-      prev->next_root = next;
-    } else {
-      PA_DCHECK(partition_roots_ == root);
-      partition_roots_ = next;
-    }
-    if (next) {
-      PA_DCHECK(next->prev_root == root);
-      next->prev_root = prev;
-    }
-    root->next_root = nullptr;
-    root->prev_root = nullptr;
-  }
-
- private:
-  constexpr PartitionRootEnumerator() = default;
-
-  PartitionRoot* Head(PartitionRoot* roots) { return roots; }
-
-  PartitionRoot* Tail(PartitionRoot* roots) PA_NO_THREAD_SAFETY_ANALYSIS {
-    if (!roots) {
-      return nullptr;
-    }
-    PartitionRoot* node = roots;
-    for (; node->next_root != nullptr; node = node->next_root)
-      ;
-    return node;
-  }
-
-  PartitionRoot* partition_roots_
-      PA_GUARDED_BY(PartitionRoot::GetEnumeratorLock()) = nullptr;
-};
-
-}  // namespace internal
-
-#endif  // PA_USE_PARTITION_ROOT_ENUMERATOR
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-namespace {
-
-#if PA_CONFIG(HAS_ATFORK_HANDLER)
-
-void LockRoot(PartitionRoot* root, bool) PA_NO_THREAD_SAFETY_ANALYSIS {
-  PA_DCHECK(root);
-  internal::PartitionRootLock(root).Acquire();
-}
-
-// PA_NO_THREAD_SAFETY_ANALYSIS: acquires the lock and doesn't release it, by
-// design.
-void BeforeForkInParent() PA_NO_THREAD_SAFETY_ANALYSIS {
-  // PartitionRoot::GetLock() is private. So use
-  // g_root_enumerator_lock here.
-  g_root_enumerator_lock.Acquire();
-  internal::PartitionRootEnumerator::Instance().Enumerate(
-      LockRoot, false,
-      internal::PartitionRootEnumerator::EnumerateOrder::kNormal);
-
-  ThreadCacheRegistry::GetLock().Acquire();
-}
-
-template <typename T>
-void UnlockOrReinit(T& lock, bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {
-  // Only re-init the locks in the child process, in the parent can unlock
-  // normally.
-  if (in_child) {
-    lock.Reinit();
-  } else {
-    lock.Release();
-  }
-}
-
-void UnlockOrReinitRoot(PartitionRoot* root,
-                        bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {
-  UnlockOrReinit(internal::PartitionRootLock(root), in_child);
-}
-
-void ReleaseLocks(bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {
-  // In reverse order, even though there are no lock ordering dependencies.
-  UnlockOrReinit(ThreadCacheRegistry::GetLock(), in_child);
-  internal::PartitionRootEnumerator::Instance().Enumerate(
-      UnlockOrReinitRoot, in_child,
-      internal::PartitionRootEnumerator::EnumerateOrder::kReverse);
-
-  // PartitionRoot::GetLock() is private. So use
-  // g_root_enumerator_lock here.
-  UnlockOrReinit(g_root_enumerator_lock, in_child);
-}
-
-void AfterForkInParent() {
-  ReleaseLocks(/* in_child = */ false);
-}
-
-void AfterForkInChild() {
-  ReleaseLocks(/* in_child = */ true);
-  // Unsafe, as noted in the name. This is fine here however, since at this
-  // point there is only one thread, this one (unless another post-fork()
-  // handler created a thread, but it would have needed to allocate, which would
-  // have deadlocked the process already).
-  //
-  // If we don't reclaim this memory, it is lost forever. Note that this is only
-  // really an issue if we fork() a multi-threaded process without calling
-  // exec() right away, which is discouraged.
-  ThreadCacheRegistry::Instance().ForcePurgeAllThreadAfterForkUnsafe();
-}
-#endif  // PA_CONFIG(HAS_ATFORK_HANDLER)
-
-std::atomic<bool> g_global_init_called;
-void PartitionAllocMallocInitOnce() {
-  bool expected = false;
-  // No need to block execution for potential concurrent initialization, merely
-  // want to make sure this is only called once.
-  if (!g_global_init_called.compare_exchange_strong(expected, true)) {
-    return;
-  }
-
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-  // When fork() is called, only the current thread continues to execute in the
-  // child process. If the lock is held, but *not* by this thread when fork() is
-  // called, we have a deadlock.
-  //
-  // The "solution" here is to acquire the lock on the forking thread before
-  // fork(), and keep it held until fork() is done, in the parent and the
-  // child. To clean up memory, we also must empty the thread caches in the
-  // child, which is easier, since no threads except for the current one are
-  // running right after the fork().
-  //
-  // This is not perfect though, since:
-  // - Multiple pre/post-fork() handlers can be registered, they are then run in
-  //   LIFO order for the pre-fork handler, and FIFO order for the post-fork
-  //   one. So unless we are the first to register a handler, if another handler
-  //   allocates, then we deterministically deadlock.
-  // - pthread handlers are *not* called when the application calls clone()
-  //   directly, which is what Chrome does to launch processes.
-  //
-  // However, no perfect solution really exists to make threads + fork()
-  // cooperate, but deadlocks are real (and fork() is used in DEATH_TEST()s),
-  // and other malloc() implementations use the same techniques.
-  int err =
-      pthread_atfork(BeforeForkInParent, AfterForkInParent, AfterForkInChild);
-  PA_CHECK(err == 0);
-#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-}
-
-}  // namespace
-
-#if BUILDFLAG(IS_APPLE)
-void PartitionAllocMallocHookOnBeforeForkInParent() {
-  BeforeForkInParent();
-}
-
-void PartitionAllocMallocHookOnAfterForkInParent() {
-  AfterForkInParent();
-}
-
-void PartitionAllocMallocHookOnAfterForkInChild() {
-  AfterForkInChild();
-}
-#endif  // BUILDFLAG(IS_APPLE)
-
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-namespace internal {
-
-namespace {
-// 64 was chosen arbitrarily, as it seems like a reasonable trade-off between
-// performance and purging opportunity. Higher value (i.e. smaller slots)
-// wouldn't necessarily increase chances of purging, but would result in
-// more work and larger |slot_usage| array. Lower value would probably decrease
-// chances of purging. Not empirically tested.
-constexpr size_t kMaxPurgeableSlotsPerSystemPage = 64;
-PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-MinPurgeableSlotSize() {
-  return SystemPageSize() / kMaxPurgeableSlotsPerSystemPage;
-}
-}  // namespace
-
-static size_t PartitionPurgeSlotSpan(internal::SlotSpanMetadata* slot_span,
-                                     bool discard) {
-  auto* root = PartitionRoot::FromSlotSpan(slot_span);
-  const internal::PartitionBucket* bucket = slot_span->bucket;
-  size_t slot_size = bucket->slot_size;
-
-  if (slot_size < MinPurgeableSlotSize() || !slot_span->num_allocated_slots) {
-    return 0;
-  }
-
-  size_t bucket_num_slots = bucket->get_slots_per_span();
-  size_t discardable_bytes = 0;
-
-  if (slot_span->CanStoreRawSize()) {
-    uint32_t utilized_slot_size = static_cast<uint32_t>(
-        RoundUpToSystemPage(slot_span->GetUtilizedSlotSize()));
-    discardable_bytes = bucket->slot_size - utilized_slot_size;
-    if (discardable_bytes && discard) {
-      uintptr_t slot_span_start =
-          internal::SlotSpanMetadata::ToSlotSpanStart(slot_span);
-      uintptr_t committed_data_end = slot_span_start + utilized_slot_size;
-      ScopedSyscallTimer timer{root};
-      DiscardSystemPages(committed_data_end, discardable_bytes);
-    }
-    return discardable_bytes;
-  }
-
-#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
-  constexpr size_t kMaxSlotCount =
-      (PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan) /
-      MinPurgeableSlotSize();
-#elif BUILDFLAG(IS_APPLE) || (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
-  // It's better for slot_usage to be stack-allocated and fixed-size, which
-  // demands that its size be constexpr. On IS_APPLE and Linux on arm64,
-  // PartitionPageSize() is always SystemPageSize() << 2, so regardless of
-  // what the run time page size is, kMaxSlotCount can always be simplified
-  // to this expression.
-  constexpr size_t kMaxSlotCount =
-      4 * kMaxPurgeableSlotsPerSystemPage *
-      internal::kMaxPartitionPagesPerRegularSlotSpan;
-  PA_CHECK(kMaxSlotCount == (PartitionPageSize() *
-                             internal::kMaxPartitionPagesPerRegularSlotSpan) /
-                                MinPurgeableSlotSize());
-#endif
-  PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
-  PA_DCHECK(slot_span->num_unprovisioned_slots < bucket_num_slots);
-  size_t num_slots = bucket_num_slots - slot_span->num_unprovisioned_slots;
-  char slot_usage[kMaxSlotCount];
-#if !BUILDFLAG(IS_WIN)
-  // The last freelist entry should not be discarded when using OS_WIN.
-  // DiscardVirtualMemory makes the contents of discarded memory undefined.
-  size_t last_slot = static_cast<size_t>(-1);
-#endif
-  memset(slot_usage, 1, num_slots);
-  uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
-  // First, walk the freelist for this slot span and make a bitmap of which
-  // slots are not in use.
-  for (PartitionFreelistEntry* entry = slot_span->get_freelist_head(); entry;
-       /**/) {
-    size_t slot_number =
-        bucket->GetSlotNumber(SlotStartPtr2Addr(entry) - slot_span_start);
-    PA_DCHECK(slot_number < num_slots);
-    slot_usage[slot_number] = 0;
-#if !BUILDFLAG(IS_WIN)
-    // If we have a slot where the encoded next pointer is 0, we can actually
-    // discard that entry because touching a discarded page is guaranteed to
-    // return the original content or 0. (Note that this optimization won't be
-    // effective on big-endian machines because the masking function is
-    // negation.)
-    if (entry->IsEncodedNextPtrZero()) {
-      last_slot = slot_number;
-    }
-#endif
-    entry = entry->GetNext(slot_size);
-  }
-
-  // If the slot(s) at the end of the slot span are not in used, we can truncate
-  // them entirely and rewrite the freelist.
-  size_t truncated_slots = 0;
-  while (!slot_usage[num_slots - 1]) {
-    truncated_slots++;
-    num_slots--;
-    PA_DCHECK(num_slots);
-  }
-  // First, do the work of calculating the discardable bytes. Don't actually
-  // discard anything unless the discard flag was passed in.
-  if (truncated_slots) {
-    size_t unprovisioned_bytes = 0;
-    uintptr_t begin_addr = slot_span_start + (num_slots * slot_size);
-    uintptr_t end_addr = begin_addr + (slot_size * truncated_slots);
-
-    // The slots that do not contain discarded pages should not be included to
-    // |truncated_slots|. Detects those slots and fixes |truncated_slots| and
-    // |num_slots| accordingly.
-    uintptr_t rounded_up_truncatation_begin_addr =
-        RoundUpToSystemPage(begin_addr);
-    while (begin_addr + slot_size <= rounded_up_truncatation_begin_addr) {
-      begin_addr += slot_size;
-      PA_DCHECK(truncated_slots);
-      --truncated_slots;
-      ++num_slots;
-    }
-    begin_addr = rounded_up_truncatation_begin_addr;
-
-    // We round the end address here up and not down because we're at the end of
-    // a slot span, so we "own" all the way up the page boundary.
-    end_addr = RoundUpToSystemPage(end_addr);
-    PA_DCHECK(end_addr <= slot_span_start + bucket->get_bytes_per_span());
-    if (begin_addr < end_addr) {
-      unprovisioned_bytes = end_addr - begin_addr;
-      discardable_bytes += unprovisioned_bytes;
-    }
-    if (unprovisioned_bytes && discard) {
-      PA_DCHECK(truncated_slots > 0);
-      size_t new_unprovisioned_slots =
-          truncated_slots + slot_span->num_unprovisioned_slots;
-      PA_DCHECK(new_unprovisioned_slots <= bucket->get_slots_per_span());
-      slot_span->num_unprovisioned_slots = new_unprovisioned_slots;
-
-      // Rewrite the freelist.
-      internal::PartitionFreelistEntry* head = nullptr;
-      internal::PartitionFreelistEntry* back = head;
-      size_t num_new_entries = 0;
-      for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) {
-        if (slot_usage[slot_index]) {
-          continue;
-        }
-
-        auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(
-            slot_span_start + (slot_size * slot_index));
-        if (!head) {
-          head = entry;
-          back = entry;
-        } else {
-          back->SetNext(entry);
-          back = entry;
-        }
-        num_new_entries++;
-#if !BUILDFLAG(IS_WIN)
-        last_slot = slot_index;
-#endif
-      }
-
-      slot_span->SetFreelistHead(head);
-
-      PA_DCHECK(num_new_entries == num_slots - slot_span->num_allocated_slots);
-
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-      FreeSlotBitmapReset(slot_span_start + (slot_size * num_slots), end_addr,
-                          slot_size);
-#endif
-
-      // Discard the memory.
-      ScopedSyscallTimer timer{root};
-      DiscardSystemPages(begin_addr, unprovisioned_bytes);
-    }
-  }
-
-  if (slot_size < SystemPageSize()) {
-    // Returns here because implementing the following steps for smaller slot
-    // size will need a complicated logic and make the code messy.
-    return discardable_bytes;
-  }
-
-  // Next, walk the slots and for any not in use, consider which system pages
-  // are no longer needed. We can release any system pages back to the system as
-  // long as we don't interfere with a freelist pointer or an adjacent used
-  // slot.
-  for (size_t i = 0; i < num_slots; ++i) {
-    if (slot_usage[i]) {
-      continue;
-    }
-
-    // The first address we can safely discard is just after the freelist
-    // pointer. There's one quirk: if the freelist pointer is actually nullptr,
-    // we can discard that pointer value too.
-    uintptr_t begin_addr = slot_span_start + (i * slot_size);
-    uintptr_t end_addr = begin_addr + slot_size;
-
-    bool can_discard_free_list_pointer = false;
-#if !BUILDFLAG(IS_WIN)
-    if (i != last_slot) {
-      begin_addr += sizeof(internal::PartitionFreelistEntry);
-    } else {
-      can_discard_free_list_pointer = true;
-    }
-#else
-    begin_addr += sizeof(internal::PartitionFreelistEntry);
-#endif
-
-    uintptr_t rounded_up_begin_addr = RoundUpToSystemPage(begin_addr);
-    uintptr_t rounded_down_begin_addr = RoundDownToSystemPage(begin_addr);
-    end_addr = RoundDownToSystemPage(end_addr);
-
-    // |rounded_up_begin_addr| could be greater than |end_addr| only if slot
-    // size was less than system page size, or if free list pointer crossed the
-    // page boundary. Neither is possible here.
-    PA_DCHECK(rounded_up_begin_addr <= end_addr);
-
-    if (rounded_down_begin_addr < rounded_up_begin_addr && i != 0 &&
-        !slot_usage[i - 1] && can_discard_free_list_pointer) {
-      // This slot contains a partial page in the beginning. The rest of that
-      // page is contained in the slot[i-1], which is also discardable.
-      // Therefore we can discard this page.
-      begin_addr = rounded_down_begin_addr;
-    } else {
-      begin_addr = rounded_up_begin_addr;
-    }
-
-    if (begin_addr < end_addr) {
-      size_t partial_slot_bytes = end_addr - begin_addr;
-      discardable_bytes += partial_slot_bytes;
-      if (discard) {
-        ScopedSyscallTimer timer{root};
-        DiscardSystemPages(begin_addr, partial_slot_bytes);
-      }
-    }
-  }
-
-  return discardable_bytes;
-}
-
-static void PartitionPurgeBucket(internal::PartitionBucket* bucket) {
-  if (bucket->active_slot_spans_head !=
-      internal::SlotSpanMetadata::get_sentinel_slot_span()) {
-    for (internal::SlotSpanMetadata* slot_span = bucket->active_slot_spans_head;
-         slot_span; slot_span = slot_span->next_slot_span) {
-      PA_DCHECK(slot_span !=
-                internal::SlotSpanMetadata::get_sentinel_slot_span());
-      PartitionPurgeSlotSpan(slot_span, true);
-    }
-  }
-}
-
-static void PartitionDumpSlotSpanStats(PartitionBucketMemoryStats* stats_out,
-                                       internal::SlotSpanMetadata* slot_span) {
-  uint16_t bucket_num_slots = slot_span->bucket->get_slots_per_span();
-
-  if (slot_span->is_decommitted()) {
-    ++stats_out->num_decommitted_slot_spans;
-    return;
-  }
-
-  stats_out->discardable_bytes += PartitionPurgeSlotSpan(slot_span, false);
-
-  if (slot_span->CanStoreRawSize()) {
-    stats_out->active_bytes += static_cast<uint32_t>(slot_span->GetRawSize());
-  } else {
-    stats_out->active_bytes +=
-        (slot_span->num_allocated_slots * stats_out->bucket_slot_size);
-  }
-  stats_out->active_count += slot_span->num_allocated_slots;
-
-  size_t slot_span_bytes_resident = RoundUpToSystemPage(
-      (bucket_num_slots - slot_span->num_unprovisioned_slots) *
-      stats_out->bucket_slot_size);
-  stats_out->resident_bytes += slot_span_bytes_resident;
-  if (slot_span->is_empty()) {
-    stats_out->decommittable_bytes += slot_span_bytes_resident;
-    ++stats_out->num_empty_slot_spans;
-  } else if (slot_span->is_full()) {
-    ++stats_out->num_full_slot_spans;
-  } else {
-    PA_DCHECK(slot_span->is_active());
-    ++stats_out->num_active_slot_spans;
-  }
-}
-
-static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
-                                     const internal::PartitionBucket* bucket) {
-  PA_DCHECK(!bucket->is_direct_mapped());
-  stats_out->is_valid = false;
-  // If the active slot span list is empty (==
-  // internal::SlotSpanMetadata::get_sentinel_slot_span()), the bucket might
-  // still need to be reported if it has a list of empty, decommitted or full
-  // slot spans.
-  if (bucket->active_slot_spans_head ==
-          internal::SlotSpanMetadata::get_sentinel_slot_span() &&
-      !bucket->empty_slot_spans_head && !bucket->decommitted_slot_spans_head &&
-      !bucket->num_full_slot_spans) {
-    return;
-  }
-
-  memset(stats_out, '\0', sizeof(*stats_out));
-  stats_out->is_valid = true;
-  stats_out->is_direct_map = false;
-  stats_out->num_full_slot_spans =
-      static_cast<size_t>(bucket->num_full_slot_spans);
-  stats_out->bucket_slot_size = bucket->slot_size;
-  uint16_t bucket_num_slots = bucket->get_slots_per_span();
-  size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
-  stats_out->allocated_slot_span_size = bucket->get_bytes_per_span();
-  stats_out->active_bytes = bucket->num_full_slot_spans * bucket_useful_storage;
-  stats_out->active_count = bucket->num_full_slot_spans * bucket_num_slots;
-  stats_out->resident_bytes =
-      bucket->num_full_slot_spans * stats_out->allocated_slot_span_size;
-
-  for (internal::SlotSpanMetadata* slot_span = bucket->empty_slot_spans_head;
-       slot_span; slot_span = slot_span->next_slot_span) {
-    PA_DCHECK(slot_span->is_empty() || slot_span->is_decommitted());
-    PartitionDumpSlotSpanStats(stats_out, slot_span);
-  }
-  for (internal::SlotSpanMetadata* slot_span =
-           bucket->decommitted_slot_spans_head;
-       slot_span; slot_span = slot_span->next_slot_span) {
-    PA_DCHECK(slot_span->is_decommitted());
-    PartitionDumpSlotSpanStats(stats_out, slot_span);
-  }
-
-  if (bucket->active_slot_spans_head !=
-      internal::SlotSpanMetadata::get_sentinel_slot_span()) {
-    for (internal::SlotSpanMetadata* slot_span = bucket->active_slot_spans_head;
-         slot_span; slot_span = slot_span->next_slot_span) {
-      PA_DCHECK(slot_span !=
-                internal::SlotSpanMetadata::get_sentinel_slot_span());
-      PartitionDumpSlotSpanStats(stats_out, slot_span);
-    }
-  }
-}
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-void DCheckIfManagedByPartitionAllocBRPPool(uintptr_t address) {
-  PA_DCHECK(IsManagedByPartitionAllocBRPPool(address));
-}
-#endif
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-void PartitionAllocThreadIsolationInit(ThreadIsolationOption thread_isolation) {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  ThreadIsolationSettings::settings.enabled = true;
-#endif
-  PartitionAddressSpace::InitThreadIsolatedPool(thread_isolation);
-  // Call WriteProtectThreadIsolatedGlobals last since we might not have write
-  // permissions to to globals afterwards.
-  WriteProtectThreadIsolatedGlobals(thread_isolation);
-}
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-}  // namespace internal
-
-[[noreturn]] PA_NOINLINE void PartitionRoot::OutOfMemory(size_t size) {
-  const size_t virtual_address_space_size =
-      total_size_of_super_pages.load(std::memory_order_relaxed) +
-      total_size_of_direct_mapped_pages.load(std::memory_order_relaxed);
-#if !defined(ARCH_CPU_64_BITS)
-  const size_t uncommitted_size =
-      virtual_address_space_size -
-      total_size_of_committed_pages.load(std::memory_order_relaxed);
-
-  // Check whether this OOM is due to a lot of super pages that are allocated
-  // but not committed, probably due to http://crbug.com/421387.
-  if (uncommitted_size > internal::kReasonableSizeOfUnusedPages) {
-    internal::PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
-  }
-
-#if BUILDFLAG(IS_WIN)
-  // If true then we are running on 64-bit Windows.
-  BOOL is_wow_64 = FALSE;
-  // Intentionally ignoring failures.
-  IsWow64Process(GetCurrentProcess(), &is_wow_64);
-  // 32-bit address space on Windows is typically either 2 GiB (on 32-bit
-  // Windows) or 4 GiB (on 64-bit Windows). 2.8 and 1.0 GiB are just rough
-  // guesses as to how much address space PA can consume (note that code,
-  // stacks, and other allocators will also consume address space).
-  const size_t kReasonableVirtualSize = (is_wow_64 ? 2800 : 1024) * 1024 * 1024;
-  // Make it obvious whether we are running on 64-bit Windows.
-  PA_DEBUG_DATA_ON_STACK("iswow64", static_cast<size_t>(is_wow_64));
-#else
-  constexpr size_t kReasonableVirtualSize =
-      // 1.5GiB elsewhere, since address space is typically 3GiB.
-      (1024 + 512) * 1024 * 1024;
-#endif
-  if (virtual_address_space_size > kReasonableVirtualSize) {
-    internal::PartitionOutOfMemoryWithLargeVirtualSize(
-        virtual_address_space_size);
-  }
-#endif  // #if !defined(ARCH_CPU_64_BITS)
-
-  // Out of memory can be due to multiple causes, such as:
-  // - Out of virtual address space in the desired pool
-  // - Out of commit due to either our process, or another one
-  // - Excessive allocations in the current process
-  //
-  // Saving these values make it easier to distinguish between these. See the
-  // documentation in PA_CONFIG(DEBUG_DATA_ON_STACK) on how to get these from
-  // minidumps.
-  PA_DEBUG_DATA_ON_STACK("va_size", virtual_address_space_size);
-  PA_DEBUG_DATA_ON_STACK("alloc", get_total_size_of_allocated_bytes());
-  PA_DEBUG_DATA_ON_STACK("commit", get_total_size_of_committed_pages());
-  PA_DEBUG_DATA_ON_STACK("size", size);
-
-  if (internal::g_oom_handling_function) {
-    (*internal::g_oom_handling_function)(size);
-  }
-  OOM_CRASH(size);
-}
-
-void PartitionRoot::DecommitEmptySlotSpans() {
-  ShrinkEmptySlotSpansRing(0);
-  // Just decommitted everything, and holding the lock, should be exactly 0.
-  PA_DCHECK(empty_slot_spans_dirty_bytes == 0);
-}
-
-void PartitionRoot::DestructForTesting() {
-  // We need to destruct the thread cache before we unreserve any of the super
-  // pages below, which we currently are not doing. So, we should only call
-  // this function on PartitionRoots without a thread cache.
-  PA_CHECK(!settings.with_thread_cache);
-  auto pool_handle = ChoosePool();
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  // The pages managed by thread isolated pool will be free-ed at
-  // UninitThreadIsolatedForTesting(). Don't invoke FreePages() for the pages.
-  if (pool_handle == internal::kThreadIsolatedPoolHandle) {
-    return;
-  }
-  PA_DCHECK(pool_handle < internal::kNumPools);
-#else
-  PA_DCHECK(pool_handle <= internal::kNumPools);
-#endif
-
-  auto* curr = first_extent;
-  while (curr != nullptr) {
-    auto* next = curr->next;
-    uintptr_t address = SuperPagesBeginFromExtent(curr);
-    size_t size =
-        internal::kSuperPageSize * curr->number_of_consecutive_super_pages;
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-    internal::AddressPoolManager::GetInstance().MarkUnused(pool_handle, address,
-                                                           size);
-#endif
-    internal::AddressPoolManager::GetInstance().UnreserveAndDecommit(
-        pool_handle, address, size);
-    curr = next;
-  }
-}
-
-#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-void PartitionRoot::InitMac11MallocSizeHackUsableSize(size_t ref_count_size) {
-  settings.mac11_malloc_size_hack_enabled_ = true;
-
-  // 0 means reserve just enough extras to fit PartitionRefCount.
-  if (!ref_count_size) {
-    ref_count_size = sizeof(internal::PartitionRefCount);
-  }
-  // Request of 32B will fall into a 48B bucket in the presence of BRP
-  // ref-count, yielding |48 - ref_count_size| of actual usable space.
-  settings.mac11_malloc_size_hack_usable_size_ = 48 - ref_count_size;
-}
-
-void PartitionRoot::EnableMac11MallocSizeHackForTesting(size_t ref_count_size) {
-  settings.mac11_malloc_size_hack_enabled_ = true;
-  InitMac11MallocSizeHackUsableSize(ref_count_size);
-}
-
-void PartitionRoot::EnableMac11MallocSizeHackIfNeeded(size_t ref_count_size) {
-  settings.mac11_malloc_size_hack_enabled_ =
-      settings.brp_enabled_ && internal::base::mac::IsOS11();
-  if (settings.mac11_malloc_size_hack_enabled_) {
-    InitMac11MallocSizeHackUsableSize(ref_count_size);
-  }
-}
-#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !BUILDFLAG(HAS_64_BIT_POINTERS)
-namespace {
-std::atomic<bool> g_reserve_brp_guard_region_called;
-// An address constructed by repeating `kQuarantinedByte` shouldn't never point
-// to valid memory. Preemptively reserve a memory region around that address and
-// make it inaccessible. Not needed for 64-bit platforms where the address is
-// guaranteed to be non-canonical. Safe to call multiple times.
-void ReserveBackupRefPtrGuardRegionIfNeeded() {
-  bool expected = false;
-  // No need to block execution for potential concurrent initialization, merely
-  // want to make sure this is only called once.
-  if (!g_reserve_brp_guard_region_called.compare_exchange_strong(expected,
-                                                                 true)) {
-    return;
-  }
-
-  size_t alignment = internal::PageAllocationGranularity();
-  uintptr_t requested_address;
-  memset(&requested_address, internal::kQuarantinedByte,
-         sizeof(requested_address));
-  requested_address = RoundDownToPageAllocationGranularity(requested_address);
-
-  // Request several pages so that even unreasonably large C++ objects stay
-  // within the inaccessible region. If some of the pages can't be reserved,
-  // it's still preferable to try and reserve the rest.
-  for (size_t i = 0; i < 4; ++i) {
-    [[maybe_unused]] uintptr_t allocated_address =
-        AllocPages(requested_address, alignment, alignment,
-                   PageAccessibilityConfiguration(
-                       PageAccessibilityConfiguration::kInaccessible),
-                   PageTag::kPartitionAlloc);
-    requested_address += alignment;
-  }
-}
-}  // namespace
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
-        // !BUILDFLAG(HAS_64_BIT_POINTERS)
-
-void PartitionRoot::Init(PartitionOptions opts) {
-  {
-#if BUILDFLAG(IS_APPLE)
-    // Needed to statically bound page size, which is a runtime constant on
-    // apple OSes.
-    PA_CHECK((internal::SystemPageSize() == (size_t{1} << 12)) ||
-             (internal::SystemPageSize() == (size_t{1} << 14)));
-#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
-    // Check runtime pagesize. Though the code is currently the same, it is
-    // not merged with the IS_APPLE case above as a 1 << 16 case needs to be
-    // added here in the future, to allow 64 kiB pagesize. That is only
-    // supported on Linux on arm64, not on IS_APPLE, but not yet present here
-    // as the rest of the partition allocator does not currently support it.
-    PA_CHECK((internal::SystemPageSize() == (size_t{1} << 12)) ||
-             (internal::SystemPageSize() == (size_t{1} << 14)));
-#endif
-
-    ::partition_alloc::internal::ScopedGuard guard{lock_};
-    if (initialized) {
-      return;
-    }
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-    // Reserve address space for partition alloc.
-    internal::PartitionAddressSpace::Init();
-#endif
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !BUILDFLAG(HAS_64_BIT_POINTERS)
-    ReserveBackupRefPtrGuardRegionIfNeeded();
-#endif
-
-    settings.allow_aligned_alloc =
-        opts.aligned_alloc == PartitionOptions::AlignedAlloc::kAllowed;
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    settings.use_cookie = true;
-#else
-    static_assert(!Settings::use_cookie);
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    settings.brp_enabled_ =
-        opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kEnabled;
-#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-    EnableMac11MallocSizeHackIfNeeded(opts.ref_count_size);
-#endif
-#else   // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    PA_CHECK(opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kDisabled);
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    settings.use_configurable_pool =
-        (opts.use_configurable_pool ==
-         PartitionOptions::UseConfigurablePool::kIfAvailable) &&
-        IsConfigurablePoolAvailable();
-    PA_DCHECK(!settings.use_configurable_pool || IsConfigurablePoolAvailable());
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-    settings.memory_tagging_enabled_ =
-        opts.memory_tagging.enabled ==
-        PartitionOptions::MemoryTagging::kEnabled;
-    // Memory tagging is not supported in the configurable pool because MTE
-    // stores tagging information in the high bits of the pointer, it causes
-    // issues with components like V8's ArrayBuffers which use custom pointer
-    // representations. All custom representations encountered so far rely on an
-    // "is in configurable pool?" check, so we use that as a proxy.
-    PA_CHECK(!settings.memory_tagging_enabled_ ||
-             !settings.use_configurable_pool);
-
-    settings.memory_tagging_reporting_mode_ =
-        opts.memory_tagging.reporting_mode;
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
-    // brp_enabled() is not supported in the configurable pool because
-    // BRP requires objects to be in a different Pool.
-    PA_CHECK(!(settings.use_configurable_pool && brp_enabled()));
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-    // BRP and thread isolated mode use different pools, so they can't be
-    // enabled at the same time.
-    PA_CHECK(!opts.thread_isolation.enabled ||
-             opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kDisabled);
-    settings.thread_isolation = opts.thread_isolation;
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-    // Ref-count messes up alignment needed for AlignedAlloc, making this
-    // option incompatible. However, except in the
-    // PUT_REF_COUNT_IN_PREVIOUS_SLOT case.
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
-    !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-    PA_CHECK(!settings.allow_aligned_alloc || !settings.brp_enabled_);
-#endif
-
-#if PA_CONFIG(EXTRAS_REQUIRED)
-    settings.extras_size = 0;
-    settings.extras_offset = 0;
-
-    if (settings.use_cookie) {
-      settings.extras_size += internal::kPartitionCookieSizeAdjustment;
-    }
-
-    if (brp_enabled()) {
-      // TODO(tasak): In the PUT_REF_COUNT_IN_PREVIOUS_SLOT case, ref-count is
-      // stored out-of-line for single-slot slot spans, so no need to
-      // add/subtract its size in this case.
-      size_t ref_count_size = opts.ref_count_size;
-      if (!ref_count_size) {
-        ref_count_size = internal::kPartitionRefCountSizeAdjustment;
-      }
-      ref_count_size = internal::AlignUpRefCountSizeForMac(ref_count_size);
-#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
-      if (IsMemoryTaggingEnabled()) {
-        ref_count_size = internal::base::bits::AlignUp(
-            ref_count_size, internal::kMemTagGranuleSize);
-      }
-      settings.ref_count_size = ref_count_size;
-#endif  // PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
-      PA_CHECK(internal::kPartitionRefCountSizeAdjustment <= ref_count_size);
-      settings.extras_size += ref_count_size;
-      settings.extras_offset += internal::kPartitionRefCountOffsetAdjustment;
-    }
-#endif  // PA_CONFIG(EXTRAS_REQUIRED)
-
-    // Re-confirm the above PA_CHECKs, by making sure there are no
-    // pre-allocation extras when AlignedAlloc is allowed. Post-allocation
-    // extras are ok.
-    PA_CHECK(!settings.allow_aligned_alloc || !settings.extras_offset);
-
-    settings.quarantine_mode =
-#if BUILDFLAG(USE_STARSCAN)
-        (opts.star_scan_quarantine ==
-                 PartitionOptions::StarScanQuarantine::kDisallowed
-             ? QuarantineMode::kAlwaysDisabled
-             : QuarantineMode::kDisabledByDefault);
-#else
-        QuarantineMode::kAlwaysDisabled;
-#endif  // BUILDFLAG(USE_STARSCAN)
-
-    // We mark the sentinel slot span as free to make sure it is skipped by our
-    // logic to find a new active slot span.
-    memset(&sentinel_bucket, 0, sizeof(sentinel_bucket));
-    sentinel_bucket.active_slot_spans_head =
-        SlotSpan::get_sentinel_slot_span_non_const();
-
-    // This is a "magic" value so we can test if a root pointer is valid.
-    inverted_self = ~reinterpret_cast<uintptr_t>(this);
-
-    // Set up the actual usable buckets first.
-    constexpr internal::BucketIndexLookup lookup{};
-    size_t bucket_index = 0;
-    while (lookup.bucket_sizes()[bucket_index] !=
-           internal::kInvalidBucketSize) {
-      buckets[bucket_index].Init(lookup.bucket_sizes()[bucket_index]);
-      bucket_index++;
-    }
-    PA_DCHECK(bucket_index < internal::kNumBuckets);
-
-    // Remaining buckets are not usable, and not real.
-    for (size_t index = bucket_index; index < internal::kNumBuckets; index++) {
-      // Cannot init with size 0 since it computes 1 / size, but make sure the
-      // bucket is invalid.
-      buckets[index].Init(internal::kInvalidBucketSize);
-      buckets[index].active_slot_spans_head = nullptr;
-      PA_DCHECK(!buckets[index].is_valid());
-    }
-
-#if !PA_CONFIG(THREAD_CACHE_SUPPORTED)
-    // TLS in ThreadCache not supported on other OSes.
-    settings.with_thread_cache = false;
-#else
-    ThreadCache::EnsureThreadSpecificDataInitialized();
-    settings.with_thread_cache =
-        (opts.thread_cache == PartitionOptions::ThreadCache::kEnabled);
-
-    if (settings.with_thread_cache) {
-      ThreadCache::Init(this);
-    }
-#endif  // !PA_CONFIG(THREAD_CACHE_SUPPORTED)
-
-#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
-    internal::PartitionRootEnumerator::Instance().Register(this);
-#endif
-
-    initialized = true;
-  }
-
-  // Called without the lock, might allocate.
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  PartitionAllocMallocInitOnce();
-#endif
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  if (settings.thread_isolation.enabled) {
-    internal::PartitionAllocThreadIsolationInit(settings.thread_isolation);
-  }
-#endif
-}
-
-PartitionRoot::Settings::Settings() = default;
-
-PartitionRoot::PartitionRoot() : settings() {}
-
-PartitionRoot::PartitionRoot(PartitionOptions opts) : settings() {
-  Init(opts);
-}
-
-PartitionRoot::~PartitionRoot() {
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  PA_CHECK(!settings.with_thread_cache)
-      << "Must not destroy a partition with a thread cache";
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
-  if (initialized) {
-    internal::PartitionRootEnumerator::Instance().Unregister(this);
-  }
-#endif  // PA_CONFIG(USE_PARTITION_ALLOC_ENUMERATOR)
-}
-
-void PartitionRoot::EnableThreadCacheIfSupported() {
-#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
-  ::partition_alloc::internal::ScopedGuard guard{lock_};
-  PA_CHECK(!settings.with_thread_cache);
-  // By the time we get there, there may be multiple threads created in the
-  // process. Since `with_thread_cache` is accessed without a lock, it can
-  // become visible to another thread before the effects of
-  // `internal::ThreadCacheInit()` are visible. To prevent that, we fake thread
-  // cache creation being in-progress while this is running.
-  //
-  // This synchronizes with the acquire load in `MaybeInitThreadCacheAndAlloc()`
-  // to ensure that we don't create (and thus use) a ThreadCache before
-  // ThreadCache::Init()'s effects are visible.
-  int before =
-      thread_caches_being_constructed_.fetch_add(1, std::memory_order_acquire);
-  PA_CHECK(before == 0);
-  ThreadCache::Init(this);
-  thread_caches_being_constructed_.fetch_sub(1, std::memory_order_release);
-  settings.with_thread_cache = true;
-#endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
-}
-
-bool PartitionRoot::TryReallocInPlaceForDirectMap(
-    internal::SlotSpanMetadata* slot_span,
-    size_t requested_size) {
-  PA_DCHECK(slot_span->bucket->is_direct_mapped());
-  // Slot-span metadata isn't MTE-tagged.
-  PA_DCHECK(
-      internal::IsManagedByDirectMap(reinterpret_cast<uintptr_t>(slot_span)));
-
-  size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
-  auto* extent = DirectMapExtent::FromSlotSpan(slot_span);
-  size_t current_reservation_size = extent->reservation_size;
-  // Calculate the new reservation size the way PartitionDirectMap() would, but
-  // skip the alignment, because this call isn't requesting it.
-  size_t new_reservation_size = GetDirectMapReservationSize(raw_size);
-
-  // If new reservation would be larger, there is nothing we can do to
-  // reallocate in-place.
-  if (new_reservation_size > current_reservation_size) {
-    return false;
-  }
-
-  // Don't reallocate in-place if new reservation size would be less than 80 %
-  // of the current one, to avoid holding on to too much unused address space.
-  // Make this check before comparing slot sizes, as even with equal or similar
-  // slot sizes we can save a lot if the original allocation was heavily padded
-  // for alignment.
-  if ((new_reservation_size >> internal::SystemPageShift()) * 5 <
-      (current_reservation_size >> internal::SystemPageShift()) * 4) {
-    return false;
-  }
-
-  // Note that the new size isn't a bucketed size; this function is called
-  // whenever we're reallocating a direct mapped allocation, so calculate it
-  // the way PartitionDirectMap() would.
-  size_t new_slot_size = GetDirectMapSlotSize(raw_size);
-  if (new_slot_size < internal::kMinDirectMappedDownsize) {
-    return false;
-  }
-
-  // Past this point, we decided we'll attempt to reallocate without relocating,
-  // so we have to honor the padding for alignment in front of the original
-  // allocation, even though this function isn't requesting any alignment.
-
-  // bucket->slot_size is the currently committed size of the allocation.
-  size_t current_slot_size = slot_span->bucket->slot_size;
-  size_t current_usable_size = GetSlotUsableSize(slot_span);
-  uintptr_t slot_start = SlotSpan::ToSlotSpanStart(slot_span);
-  // This is the available part of the reservation up to which the new
-  // allocation can grow.
-  size_t available_reservation_size =
-      current_reservation_size - extent->padding_for_alignment -
-      PartitionRoot::GetDirectMapMetadataAndGuardPagesSize();
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  uintptr_t reservation_start = slot_start & internal::kSuperPageBaseMask;
-  PA_DCHECK(internal::IsReservationStart(reservation_start));
-  PA_DCHECK(slot_start + available_reservation_size ==
-            reservation_start + current_reservation_size -
-                GetDirectMapMetadataAndGuardPagesSize() +
-                internal::PartitionPageSize());
-#endif
-
-  PA_DCHECK(new_slot_size > internal::kMaxMemoryTaggingSize);
-  if (new_slot_size == current_slot_size) {
-    // No need to move any memory around, but update size and cookie below.
-    // That's because raw_size may have changed.
-  } else if (new_slot_size < current_slot_size) {
-    // Shrink by decommitting unneeded pages and making them inaccessible.
-    size_t decommit_size = current_slot_size - new_slot_size;
-    DecommitSystemPagesForData(slot_start + new_slot_size, decommit_size,
-                               PageAccessibilityDisposition::kRequireUpdate);
-    // Since the decommited system pages are still reserved, we don't need to
-    // change the entries for decommitted pages in the reservation offset table.
-  } else if (new_slot_size <= available_reservation_size) {
-    // Grow within the actually reserved address space. Just need to make the
-    // pages accessible again.
-    size_t recommit_slot_size_growth = new_slot_size - current_slot_size;
-    // Direct map never uses tagging, as size is always >kMaxMemoryTaggingSize.
-    RecommitSystemPagesForData(
-        slot_start + current_slot_size, recommit_slot_size_growth,
-        PageAccessibilityDisposition::kRequireUpdate, false);
-    // The recommited system pages had been already reserved and all the
-    // entries in the reservation offset table (for entire reservation_size
-    // region) have been already initialized.
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    memset(reinterpret_cast<void*>(slot_start + current_slot_size),
-           internal::kUninitializedByte, recommit_slot_size_growth);
-#endif
-  } else {
-    // We can't perform the realloc in-place.
-    // TODO: support this too when possible.
-    return false;
-  }
-
-  DecreaseTotalSizeOfAllocatedBytes(reinterpret_cast<uintptr_t>(slot_span),
-                                    slot_span->bucket->slot_size);
-  slot_span->SetRawSize(raw_size);
-  slot_span->bucket->slot_size = new_slot_size;
-  IncreaseTotalSizeOfAllocatedBytes(reinterpret_cast<uintptr_t>(slot_span),
-                                    slot_span->bucket->slot_size, raw_size);
-
-  // Always record in-place realloc() as free()+malloc() pair.
-  //
-  // The early returns above (`return false`) will fall back to free()+malloc(),
-  // so this is consistent.
-  auto* thread_cache = GetOrCreateThreadCache();
-  if (ThreadCache::IsValid(thread_cache)) {
-    thread_cache->RecordDeallocation(current_usable_size);
-    thread_cache->RecordAllocation(GetSlotUsableSize(slot_span));
-  }
-
-  // Write a new trailing cookie.
-  if (settings.use_cookie) {
-    auto* object = static_cast<unsigned char*>(SlotStartToObject(slot_start));
-    internal::PartitionCookieWriteValue(object + GetSlotUsableSize(slot_span));
-  }
-
-  return true;
-}
-
-bool PartitionRoot::TryReallocInPlaceForNormalBuckets(void* object,
-                                                      SlotSpan* slot_span,
-                                                      size_t new_size) {
-  uintptr_t slot_start = ObjectToSlotStart(object);
-  PA_DCHECK(internal::IsManagedByNormalBuckets(slot_start));
-
-  // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
-  // new size is a significant percentage smaller. We could do the same if we
-  // determine it is a win.
-  if (AllocationCapacityFromRequestedSize(new_size) !=
-      AllocationCapacityFromSlotStart(slot_start)) {
-    return false;
-  }
-  size_t current_usable_size = GetSlotUsableSize(slot_span);
-
-  // Trying to allocate |new_size| would use the same amount of underlying
-  // memory as we're already using, so re-use the allocation after updating
-  // statistics (and cookie, if present).
-  if (slot_span->CanStoreRawSize()) {
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && BUILDFLAG(PA_DCHECK_IS_ON)
-    internal::PartitionRefCount* old_ref_count;
-    if (brp_enabled()) {
-      old_ref_count = internal::PartitionRefCountPointer(slot_start);
-    }
-#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) &&
-        // BUILDFLAG(PA_DCHECK_IS_ON)
-    size_t new_raw_size = AdjustSizeForExtrasAdd(new_size);
-    slot_span->SetRawSize(new_raw_size);
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && BUILDFLAG(PA_DCHECK_IS_ON)
-    if (brp_enabled()) {
-      internal::PartitionRefCount* new_ref_count =
-          internal::PartitionRefCountPointer(slot_start);
-      PA_DCHECK(new_ref_count == old_ref_count);
-    }
-#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) &&
-        // BUILDFLAG(PA_DCHECK_IS_ON)
-    // Write a new trailing cookie only when it is possible to keep track
-    // raw size (otherwise we wouldn't know where to look for it later).
-    if (settings.use_cookie) {
-      internal::PartitionCookieWriteValue(static_cast<unsigned char*>(object) +
-                                          GetSlotUsableSize(slot_span));
-    }
-  }
-
-  // Always record a realloc() as a free() + malloc(), even if it's in
-  // place. When we cannot do it in place (`return false` above), the allocator
-  // falls back to free()+malloc(), so this is consistent.
-  ThreadCache* thread_cache = GetOrCreateThreadCache();
-  if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
-    thread_cache->RecordDeallocation(current_usable_size);
-    thread_cache->RecordAllocation(GetSlotUsableSize(slot_span));
-  }
-
-  return object;
-}
-
-void* PartitionRoot::ReallocWithFlags(unsigned int flags,
-                                      void* ptr,
-                                      size_t new_size,
-                                      const char* type_name) {
-#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-  CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags);
-  void* result = realloc(ptr, new_size);
-  PA_CHECK(result || flags & AllocFlags::kReturnNull);
-  return result;
-#else
-  bool no_hooks = flags & AllocFlags::kNoHooks;
-  if (PA_UNLIKELY(!ptr)) {
-    return no_hooks
-               ? AllocWithFlagsNoHooks(flags, new_size,
-                                       internal::PartitionPageSize())
-               : AllocWithFlagsInternal(
-                     flags, new_size, internal::PartitionPageSize(), type_name);
-  }
-
-  if (PA_UNLIKELY(!new_size)) {
-    FreeInUnknownRoot(ptr);
-    return nullptr;
-  }
-
-  if (new_size > internal::MaxDirectMapped()) {
-    if (flags & AllocFlags::kReturnNull) {
-      return nullptr;
-    }
-    internal::PartitionExcessiveAllocationSize(new_size);
-  }
-
-  const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
-  bool overridden = false;
-  size_t old_usable_size;
-  if (PA_UNLIKELY(!no_hooks && hooks_enabled)) {
-    overridden = PartitionAllocHooks::ReallocOverrideHookIfEnabled(
-        &old_usable_size, ptr);
-  }
-  if (PA_LIKELY(!overridden)) {
-    // |ptr| may have been allocated in another root.
-    SlotSpan* slot_span = SlotSpan::FromObject(ptr);
-    auto* old_root = PartitionRoot::FromSlotSpan(slot_span);
-    bool success = false;
-    bool tried_in_place_for_direct_map = false;
-    {
-      ::partition_alloc::internal::ScopedGuard guard{
-          internal::PartitionRootLock(old_root)};
-      // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
-      DCheckIsValidSlotSpan(slot_span);
-      old_usable_size = old_root->GetSlotUsableSize(slot_span);
-
-      if (PA_UNLIKELY(slot_span->bucket->is_direct_mapped())) {
-        tried_in_place_for_direct_map = true;
-        // We may be able to perform the realloc in place by changing the
-        // accessibility of memory pages and, if reducing the size, decommitting
-        // them.
-        success = old_root->TryReallocInPlaceForDirectMap(slot_span, new_size);
-      }
-    }
-    if (success) {
-      if (PA_UNLIKELY(!no_hooks && hooks_enabled)) {
-        PartitionAllocHooks::ReallocObserverHookIfEnabled(
-            CreateFreeNotificationData(ptr),
-            CreateAllocationNotificationData(ptr, new_size, type_name));
-      }
-      return ptr;
-    }
-
-    if (PA_LIKELY(!tried_in_place_for_direct_map)) {
-      if (old_root->TryReallocInPlaceForNormalBuckets(ptr, slot_span,
-                                                      new_size)) {
-        return ptr;
-      }
-    }
-  }
-
-  // This realloc cannot be resized in-place. Sadness.
-  void* ret =
-      no_hooks ? AllocWithFlagsNoHooks(flags, new_size,
-                                       internal::PartitionPageSize())
-               : AllocWithFlagsInternal(
-                     flags, new_size, internal::PartitionPageSize(), type_name);
-  if (!ret) {
-    if (flags & AllocFlags::kReturnNull) {
-      return nullptr;
-    }
-    internal::PartitionExcessiveAllocationSize(new_size);
-  }
-
-  memcpy(ret, ptr, std::min(old_usable_size, new_size));
-  FreeInUnknownRoot(ptr);  // Implicitly protects the old ptr on MTE systems.
-  return ret;
-#endif
-}
-
-void PartitionRoot::PurgeMemory(int flags) {
-  {
-    ::partition_alloc::internal::ScopedGuard guard{
-        internal::PartitionRootLock(this)};
-#if BUILDFLAG(USE_STARSCAN)
-    // Avoid purging if there is PCScan task currently scheduled. Since pcscan
-    // takes snapshot of all allocated pages, decommitting pages here (even
-    // under the lock) is racy.
-    // TODO(bikineev): Consider rescheduling the purging after PCScan.
-    if (PCScan::IsInProgress()) {
-      return;
-    }
-#endif  // BUILDFLAG(USE_STARSCAN)
-
-    if (flags & PurgeFlags::kDecommitEmptySlotSpans) {
-      DecommitEmptySlotSpans();
-    }
-    if (flags & PurgeFlags::kDiscardUnusedSystemPages) {
-      for (Bucket& bucket : buckets) {
-        if (bucket.slot_size == internal::kInvalidBucketSize) {
-          continue;
-        }
-
-        if (bucket.slot_size >= internal::MinPurgeableSlotSize()) {
-          internal::PartitionPurgeBucket(&bucket);
-        } else {
-          bucket.SortSlotSpanFreelists();
-        }
-
-        // Do it at the end, as the actions above change the status of slot
-        // spans (e.g. empty -> decommitted).
-        bucket.MaintainActiveList();
-
-        if (sort_active_slot_spans_) {
-          bucket.SortActiveSlotSpans();
-        }
-      }
-    }
-  }
-}
-
-void PartitionRoot::ShrinkEmptySlotSpansRing(size_t limit) {
-  int16_t index = global_empty_slot_span_ring_index;
-  int16_t starting_index = index;
-  while (empty_slot_spans_dirty_bytes > limit) {
-    SlotSpan* slot_span = global_empty_slot_span_ring[index];
-    // The ring is not always full, may be nullptr.
-    if (slot_span) {
-      slot_span->DecommitIfPossible(this);
-      global_empty_slot_span_ring[index] = nullptr;
-    }
-    index += 1;
-    // Walk through the entirety of possible slots, even though the last ones
-    // are unused, if global_empty_slot_span_ring_size is smaller than
-    // kMaxFreeableSpans. It's simpler, and does not cost anything, since all
-    // the pointers are going to be nullptr.
-    if (index == internal::kMaxFreeableSpans) {
-      index = 0;
-    }
-
-    // Went around the whole ring, since this is locked,
-    // empty_slot_spans_dirty_bytes should be exactly 0.
-    if (index == starting_index) {
-      PA_DCHECK(empty_slot_spans_dirty_bytes == 0);
-      // Metrics issue, don't crash, return.
-      break;
-    }
-  }
-}
-
-void PartitionRoot::DumpStats(const char* partition_name,
-                              bool is_light_dump,
-                              PartitionStatsDumper* dumper) {
-  static const size_t kMaxReportableDirectMaps = 4096;
-  // Allocate on the heap rather than on the stack to avoid stack overflow
-  // skirmishes (on Windows, in particular). Allocate before locking below,
-  // otherwise when PartitionAlloc is malloc() we get reentrancy issues. This
-  // inflates reported values a bit for detailed dumps though, by 16kiB.
-  std::unique_ptr<uint32_t[]> direct_map_lengths;
-  if (!is_light_dump) {
-    direct_map_lengths =
-        std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
-  }
-  PartitionBucketMemoryStats bucket_stats[internal::kNumBuckets];
-  size_t num_direct_mapped_allocations = 0;
-  PartitionMemoryStats stats = {0};
-
-  stats.syscall_count = syscall_count.load(std::memory_order_relaxed);
-  stats.syscall_total_time_ns =
-      syscall_total_time_ns.load(std::memory_order_relaxed);
-
-  // Collect data with the lock held, cannot allocate or call third-party code
-  // below.
-  {
-    ::partition_alloc::internal::ScopedGuard guard{
-        internal::PartitionRootLock(this)};
-    PA_DCHECK(total_size_of_allocated_bytes <= max_size_of_allocated_bytes);
-
-    stats.total_mmapped_bytes =
-        total_size_of_super_pages.load(std::memory_order_relaxed) +
-        total_size_of_direct_mapped_pages.load(std::memory_order_relaxed);
-    stats.total_committed_bytes =
-        total_size_of_committed_pages.load(std::memory_order_relaxed);
-    stats.max_committed_bytes =
-        max_size_of_committed_pages.load(std::memory_order_relaxed);
-    stats.total_allocated_bytes = total_size_of_allocated_bytes;
-    stats.max_allocated_bytes = max_size_of_allocated_bytes;
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    stats.total_brp_quarantined_bytes =
-        total_size_of_brp_quarantined_bytes.load(std::memory_order_relaxed);
-    stats.total_brp_quarantined_count =
-        total_count_of_brp_quarantined_slots.load(std::memory_order_relaxed);
-    stats.cumulative_brp_quarantined_bytes =
-        cumulative_size_of_brp_quarantined_bytes.load(
-            std::memory_order_relaxed);
-    stats.cumulative_brp_quarantined_count =
-        cumulative_count_of_brp_quarantined_slots.load(
-            std::memory_order_relaxed);
-#endif
-
-    size_t direct_mapped_allocations_total_size = 0;
-    for (size_t i = 0; i < internal::kNumBuckets; ++i) {
-      const Bucket* bucket = &bucket_at(i);
-      // Don't report the pseudo buckets that the generic allocator sets up in
-      // order to preserve a fast size->bucket map (see
-      // PartitionRoot::Init() for details).
-      if (!bucket->is_valid()) {
-        bucket_stats[i].is_valid = false;
-      } else {
-        internal::PartitionDumpBucketStats(&bucket_stats[i], bucket);
-      }
-      if (bucket_stats[i].is_valid) {
-        stats.total_resident_bytes += bucket_stats[i].resident_bytes;
-        stats.total_active_bytes += bucket_stats[i].active_bytes;
-        stats.total_active_count += bucket_stats[i].active_count;
-        stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
-        stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
-      }
-    }
-
-    for (DirectMapExtent* extent = direct_map_list;
-         extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
-         extent = extent->next_extent, ++num_direct_mapped_allocations) {
-      PA_DCHECK(!extent->next_extent ||
-                extent->next_extent->prev_extent == extent);
-      size_t slot_size = extent->bucket->slot_size;
-      direct_mapped_allocations_total_size += slot_size;
-      if (is_light_dump) {
-        continue;
-      }
-      direct_map_lengths[num_direct_mapped_allocations] = slot_size;
-    }
-
-    stats.total_resident_bytes += direct_mapped_allocations_total_size;
-    stats.total_active_bytes += direct_mapped_allocations_total_size;
-    stats.total_active_count += num_direct_mapped_allocations;
-
-    stats.has_thread_cache = settings.with_thread_cache;
-    if (stats.has_thread_cache) {
-      ThreadCacheRegistry::Instance().DumpStats(
-          true, &stats.current_thread_cache_stats);
-      ThreadCacheRegistry::Instance().DumpStats(false,
-                                                &stats.all_thread_caches_stats);
-    }
-  }
-
-  // Do not hold the lock when calling |dumper|, as it may allocate.
-  if (!is_light_dump) {
-    for (auto& stat : bucket_stats) {
-      if (stat.is_valid) {
-        dumper->PartitionsDumpBucketStats(partition_name, &stat);
-      }
-    }
-
-    for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
-      uint32_t size = direct_map_lengths[i];
-
-      PartitionBucketMemoryStats mapped_stats = {};
-      mapped_stats.is_valid = true;
-      mapped_stats.is_direct_map = true;
-      mapped_stats.num_full_slot_spans = 1;
-      mapped_stats.allocated_slot_span_size = size;
-      mapped_stats.bucket_slot_size = size;
-      mapped_stats.active_bytes = size;
-      mapped_stats.active_count = 1;
-      mapped_stats.resident_bytes = size;
-      dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats);
-    }
-  }
-  dumper->PartitionDumpTotals(partition_name, &stats);
-}
-
-// static
-void PartitionRoot::DeleteForTesting(PartitionRoot* partition_root) {
-  if (partition_root->settings.with_thread_cache) {
-    ThreadCache::SwapForTesting(nullptr);
-    partition_root->settings.with_thread_cache = false;
-  }
-
-  partition_root->DestructForTesting();  // IN-TEST
-
-  delete partition_root;
-}
-
-void PartitionRoot::ResetForTesting(bool allow_leaks) {
-  if (settings.with_thread_cache) {
-    ThreadCache::SwapForTesting(nullptr);
-    settings.with_thread_cache = false;
-  }
-
-  ::partition_alloc::internal::ScopedGuard guard{
-      internal::PartitionRootLock(this)};
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  if (!allow_leaks) {
-    unsigned num_allocated_slots = 0;
-    for (Bucket& bucket : buckets) {
-      if (bucket.active_slot_spans_head !=
-          internal::SlotSpanMetadata::get_sentinel_slot_span()) {
-        for (internal::SlotSpanMetadata* slot_span =
-                 bucket.active_slot_spans_head;
-             slot_span; slot_span = slot_span->next_slot_span) {
-          num_allocated_slots += slot_span->num_allocated_slots;
-        }
-      }
-      // Full slot spans are nowhere. Need to see bucket.num_full_slot_spans
-      // to count the number of full slot spans' slots.
-      if (bucket.num_full_slot_spans) {
-        num_allocated_slots +=
-            bucket.num_full_slot_spans * bucket.get_slots_per_span();
-      }
-    }
-    PA_DCHECK(num_allocated_slots == 0);
-
-    // Check for direct-mapped allocations.
-    PA_DCHECK(!direct_map_list);
-  }
-#endif
-
-  DestructForTesting();  // IN-TEST
-
-#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
-  if (initialized) {
-    internal::PartitionRootEnumerator::Instance().Unregister(this);
-  }
-#endif  // PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
-
-  for (Bucket& bucket : buckets) {
-    bucket.active_slot_spans_head =
-        SlotSpan::get_sentinel_slot_span_non_const();
-    bucket.empty_slot_spans_head = nullptr;
-    bucket.decommitted_slot_spans_head = nullptr;
-    bucket.num_full_slot_spans = 0;
-  }
-
-  next_super_page = 0;
-  next_partition_page = 0;
-  next_partition_page_end = 0;
-  current_extent = nullptr;
-  first_extent = nullptr;
-
-  direct_map_list = nullptr;
-  for (auto*& entity : global_empty_slot_span_ring) {
-    entity = nullptr;
-  }
-
-  global_empty_slot_span_ring_index = 0;
-  global_empty_slot_span_ring_size = internal::kDefaultEmptySlotSpanRingSize;
-  initialized = false;
-}
-
-void PartitionRoot::ResetBookkeepingForTesting() {
-  ::partition_alloc::internal::ScopedGuard guard{
-      internal::PartitionRootLock(this)};
-  max_size_of_allocated_bytes = total_size_of_allocated_bytes;
-  max_size_of_committed_pages.store(total_size_of_committed_pages);
-}
-
-ThreadCache* PartitionRoot::MaybeInitThreadCache() {
-  auto* tcache = ThreadCache::Get();
-  // See comment in `EnableThreadCacheIfSupport()` for why this is an acquire
-  // load.
-  if (ThreadCache::IsTombstone(tcache) ||
-      thread_caches_being_constructed_.load(std::memory_order_acquire)) {
-    // Two cases:
-    // 1. Thread is being terminated, don't try to use the thread cache, and
-    //    don't try to resurrect it.
-    // 2. Someone, somewhere is currently allocating a thread cache. This may
-    //    be us, in which case we are re-entering and should not create a thread
-    //    cache. If it is not us, then this merely delays thread cache
-    //    construction a bit, which is not an issue.
-    return nullptr;
-  }
-
-  // There is no per-thread ThreadCache allocated here yet, and this partition
-  // has a thread cache, allocate a new one.
-  //
-  // The thread cache allocation itself will not reenter here, as it sidesteps
-  // the thread cache by using placement new and |RawAlloc()|. However,
-  // internally to libc, allocations may happen to create a new TLS
-  // variable. This would end up here again, which is not what we want (and
-  // likely is not supported by libc).
-  //
-  // To avoid this sort of reentrancy, increase the count of thread caches that
-  // are currently allocating a thread cache.
-  //
-  // Note that there is no deadlock or data inconsistency concern, since we do
-  // not hold the lock, and has such haven't touched any internal data.
-  int before =
-      thread_caches_being_constructed_.fetch_add(1, std::memory_order_relaxed);
-  PA_CHECK(before < std::numeric_limits<int>::max());
-  tcache = ThreadCache::Create(this);
-  thread_caches_being_constructed_.fetch_sub(1, std::memory_order_relaxed);
-
-  return tcache;
-}
-
-void PartitionRoot::EnableSortActiveSlotSpans() {
-  sort_active_slot_spans_ = true;
-}
-
-static_assert(offsetof(PartitionRoot, sentinel_bucket) ==
-                  offsetof(PartitionRoot, buckets) +
-                      internal::kNumBuckets * sizeof(PartitionRoot::Bucket),
-              "sentinel_bucket must be just after the regular buckets.");
-
-static_assert(
-    offsetof(PartitionRoot, lock_) >= 64,
-    "The lock should not be on the same cacheline as the read-mostly flags");
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/partition_root.h b/base/allocator/partition_allocator/partition_root.h
deleted file mode 100644
index c664357..0000000
--- a/base/allocator/partition_allocator/partition_root.h
+++ /dev/null
@@ -1,2281 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_
-
-// DESCRIPTION
-// PartitionRoot::Alloc() and PartitionRoot::Free() are approximately analogous
-// to malloc() and free().
-//
-// The main difference is that a PartitionRoot object must be supplied to these
-// functions, representing a specific "heap partition" that will be used to
-// satisfy the allocation. Different partitions are guaranteed to exist in
-// separate address spaces, including being separate from the main system
-// heap. If the contained objects are all freed, physical memory is returned to
-// the system but the address space remains reserved.  See PartitionAlloc.md for
-// other security properties PartitionAlloc provides.
-//
-// THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
-// PartitionAllocator classes. To minimize the instruction count to the fullest
-// extent possible, the PartitionRoot is really just a header adjacent to other
-// data areas provided by the allocator class.
-//
-// The constraints for PartitionRoot::Alloc() are:
-// - Multi-threaded use against a single partition is ok; locking is handled.
-// - Allocations of any arbitrary size can be handled (subject to a limit of
-//   INT_MAX bytes for security reasons).
-// - Bucketing is by approximate size, for example an allocation of 4000 bytes
-//   might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
-//   keep worst-case waste to ~10%.
-
-#include <algorithm>
-#include <atomic>
-#include <cstddef>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/address_pool_manager_types.h"
-#include "base/allocator/partition_allocator/allocation_guard.h"
-#include "base/allocator/partition_allocator/chromecast_buildflags.h"
-#include "base/allocator/partition_allocator/freeslot_bitmap.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc-inl.h"
-#include "base/allocator/partition_allocator/partition_alloc_allocation_data.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/notreached.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
-#include "base/allocator/partition_allocator/partition_bucket.h"
-#include "base/allocator/partition_allocator/partition_bucket_lookup.h"
-#include "base/allocator/partition_allocator/partition_cookie.h"
-#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
-#include "base/allocator/partition_allocator/partition_freelist_entry.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-#include "base/allocator/partition_allocator/partition_oom.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_ref_count.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "base/allocator/partition_allocator/thread_cache.h"
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#endif
-
-// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
-// size as other alloc code.
-#define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags)        \
-  if (size > partition_alloc::internal::MaxDirectMapped()) { \
-    if (flags & AllocFlags::kReturnNull) {                   \
-      return nullptr;                                        \
-    }                                                        \
-    PA_CHECK(false);                                         \
-  }
-
-namespace partition_alloc::internal {
-
-// We want this size to be big enough that we have time to start up other
-// scripts _before_ we wrap around.
-static constexpr size_t kAllocInfoSize = 1 << 24;
-
-struct AllocInfo {
-  std::atomic<size_t> index{0};
-  struct {
-    uintptr_t addr;
-    size_t size;
-  } allocs[kAllocInfoSize] = {};
-};
-
-#if BUILDFLAG(RECORD_ALLOC_INFO)
-extern AllocInfo g_allocs;
-
-void RecordAllocOrFree(uintptr_t addr, size_t size);
-#endif  // BUILDFLAG(RECORD_ALLOC_INFO)
-}  // namespace partition_alloc::internal
-
-namespace partition_alloc {
-
-namespace internal {
-// Avoid including partition_address_space.h from this .h file, by moving the
-// call to IsManagedByPartitionAllocBRPPool into the .cc file.
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void DCheckIfManagedByPartitionAllocBRPPool(uintptr_t address);
-#else
-PA_ALWAYS_INLINE void DCheckIfManagedByPartitionAllocBRPPool(
-    uintptr_t address) {}
-#endif
-
-#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
-class PartitionRootEnumerator;
-#endif
-
-}  // namespace internal
-
-// Bit flag constants used to purge memory.  See PartitionRoot::PurgeMemory.
-//
-// In order to support bit operations like `flag_a | flag_b`, the old-fashioned
-// enum (+ surrounding named struct) is used instead of enum class.
-struct PurgeFlags {
-  enum : int {
-    // Decommitting the ring list of empty slot spans is reasonably fast.
-    kDecommitEmptySlotSpans = 1 << 0,
-    // Discarding unused system pages is slower, because it involves walking all
-    // freelists in all active slot spans of all buckets >= system page
-    // size. It often frees a similar amount of memory to decommitting the empty
-    // slot spans, though.
-    kDiscardUnusedSystemPages = 1 << 1,
-    // Aggressively reclaim memory. This is meant to be used in low-memory
-    // situations, not for periodic memory reclaiming.
-    kAggressiveReclaim = 1 << 2,
-  };
-};
-
-// Options struct used to configure PartitionRoot and PartitionAllocator.
-struct PartitionOptions {
-  enum class AlignedAlloc : uint8_t {
-    // By default all allocations will be aligned to `kAlignment`,
-    // likely to be 8B or 16B depending on platforms and toolchains.
-    // AlignedAlloc() allows to enforce higher alignment.
-    // This option determines whether it is supported for the partition.
-    // Allowing AlignedAlloc() comes at a cost of disallowing extras in front
-    // of the allocation.
-    kDisallowed,
-    kAllowed,
-  };
-
-  enum class ThreadCache : uint8_t {
-    kDisabled,
-    kEnabled,
-  };
-
-  enum class StarScanQuarantine : uint8_t {
-    kDisallowed,
-    kAllowed,
-  };
-
-  enum class BackupRefPtr : uint8_t {
-    kDisabled,
-    kEnabled,
-  };
-
-  enum class UseConfigurablePool : uint8_t {
-    kNo,
-    kIfAvailable,
-  };
-
-  enum class MemoryTagging : uint8_t {
-    kEnabled,
-    kDisabled,
-  };
-
-  AlignedAlloc aligned_alloc = AlignedAlloc::kDisallowed;
-  ThreadCache thread_cache = ThreadCache::kDisabled;
-  StarScanQuarantine star_scan_quarantine = StarScanQuarantine::kDisallowed;
-  BackupRefPtr backup_ref_ptr = BackupRefPtr::kDisabled;
-  UseConfigurablePool use_configurable_pool = UseConfigurablePool::kNo;
-  size_t ref_count_size = 0;
-  struct {
-    MemoryTagging enabled = MemoryTagging::kDisabled;
-    TagViolationReportingMode reporting_mode =
-        TagViolationReportingMode::kUndefined;
-  } memory_tagging;
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  ThreadIsolationOption thread_isolation;
-#endif
-};
-
-// Never instantiate a PartitionRoot directly, instead use
-// PartitionAllocator.
-struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
-  using SlotSpan = internal::SlotSpanMetadata;
-  using Page = internal::PartitionPage;
-  using Bucket = internal::PartitionBucket;
-  using FreeListEntry = internal::PartitionFreelistEntry;
-  using SuperPageExtentEntry = internal::PartitionSuperPageExtentEntry;
-  using DirectMapExtent = internal::PartitionDirectMapExtent;
-#if BUILDFLAG(USE_STARSCAN)
-  using PCScan = internal::PCScan;
-#endif
-
-  enum class QuarantineMode : uint8_t {
-    kAlwaysDisabled,
-    kDisabledByDefault,
-    kEnabled,
-  };
-
-  enum class ScanMode : uint8_t {
-    kDisabled,
-    kEnabled,
-  };
-
-  enum class BucketDistribution : uint8_t { kNeutral, kDenser };
-
-  // Root settings accessed on fast paths.
-  //
-  // Careful! PartitionAlloc's performance is sensitive to its layout.  Please
-  // put the fast-path objects in the struct below.
-  struct alignas(internal::kPartitionCachelineSize) Settings {
-    // Chromium-style: Complex constructor needs an explicit out-of-line
-    // constructor.
-    Settings();
-
-    // Defines whether objects should be quarantined for this root.
-    QuarantineMode quarantine_mode = QuarantineMode::kAlwaysDisabled;
-
-    // Defines whether the root should be scanned.
-    ScanMode scan_mode = ScanMode::kDisabled;
-
-    // It's important to default to the 'neutral' distribution, otherwise a
-    // switch from 'dense' -> 'neutral' would leave some buckets with dirty
-    // memory forever, since no memory would be allocated from these, their
-    // freelist would typically not be empty, making these unreclaimable.
-    BucketDistribution bucket_distribution = BucketDistribution::kNeutral;
-
-    bool with_thread_cache = false;
-
-    bool allow_aligned_alloc = false;
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    bool use_cookie = false;
-#else
-    static constexpr bool use_cookie = false;
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    bool brp_enabled_ = false;
-#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-    bool mac11_malloc_size_hack_enabled_ = false;
-    size_t mac11_malloc_size_hack_usable_size_ = 0;
-#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    bool use_configurable_pool = false;
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-    bool memory_tagging_enabled_ = false;
-    TagViolationReportingMode memory_tagging_reporting_mode_ =
-        TagViolationReportingMode::kUndefined;
-#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
-    size_t ref_count_size = 0;
-#endif  // PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-    ThreadIsolationOption thread_isolation;
-#endif
-
-#if PA_CONFIG(EXTRAS_REQUIRED)
-    uint32_t extras_size = 0;
-    uint32_t extras_offset = 0;
-#else
-    // Teach the compiler that code can be optimized in builds that use no
-    // extras.
-    static inline constexpr uint32_t extras_size = 0;
-    static inline constexpr uint32_t extras_offset = 0;
-#endif  // PA_CONFIG(EXTRAS_REQUIRED)
-  };
-
-  Settings settings;
-
-  // Not used on the fastest path (thread cache allocations), but on the fast
-  // path of the central allocator.
-  alignas(internal::kPartitionCachelineSize) internal::Lock lock_;
-
-  Bucket buckets[internal::kNumBuckets] = {};
-  Bucket sentinel_bucket{};
-
-  // All fields below this comment are not accessed on the fast path.
-  bool initialized = false;
-
-  // Bookkeeping.
-  // - total_size_of_super_pages - total virtual address space for normal bucket
-  //     super pages
-  // - total_size_of_direct_mapped_pages - total virtual address space for
-  //     direct-map regions
-  // - total_size_of_committed_pages - total committed pages for slots (doesn't
-  //     include metadata, bitmaps (if any), or any data outside or regions
-  //     described in #1 and #2)
-  // Invariant: total_size_of_allocated_bytes <=
-  //            total_size_of_committed_pages <
-  //                total_size_of_super_pages +
-  //                total_size_of_direct_mapped_pages.
-  // Invariant: total_size_of_committed_pages <= max_size_of_committed_pages.
-  // Invariant: total_size_of_allocated_bytes <= max_size_of_allocated_bytes.
-  // Invariant: max_size_of_allocated_bytes <= max_size_of_committed_pages.
-  // Since all operations on the atomic variables have relaxed semantics, we
-  // don't check these invariants with DCHECKs.
-  std::atomic<size_t> total_size_of_committed_pages{0};
-  std::atomic<size_t> max_size_of_committed_pages{0};
-  std::atomic<size_t> total_size_of_super_pages{0};
-  std::atomic<size_t> total_size_of_direct_mapped_pages{0};
-  size_t total_size_of_allocated_bytes
-      PA_GUARDED_BY(internal::PartitionRootLock(this)) = 0;
-  size_t max_size_of_allocated_bytes
-      PA_GUARDED_BY(internal::PartitionRootLock(this)) = 0;
-  // Atomic, because system calls can be made without the lock held.
-  std::atomic<uint64_t> syscall_count{};
-  std::atomic<uint64_t> syscall_total_time_ns{};
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  std::atomic<size_t> total_size_of_brp_quarantined_bytes{0};
-  std::atomic<size_t> total_count_of_brp_quarantined_slots{0};
-  std::atomic<size_t> cumulative_size_of_brp_quarantined_bytes{0};
-  std::atomic<size_t> cumulative_count_of_brp_quarantined_slots{0};
-#endif
-  // Slot span memory which has been provisioned, and is currently unused as
-  // it's part of an empty SlotSpan. This is not clean memory, since it has
-  // either been used for a memory allocation, and/or contains freelist
-  // entries. But it might have been moved to swap. Note that all this memory
-  // can be decommitted at any time.
-  size_t empty_slot_spans_dirty_bytes
-      PA_GUARDED_BY(internal::PartitionRootLock(this)) = 0;
-
-  // Only tolerate up to |total_size_of_committed_pages >>
-  // max_empty_slot_spans_dirty_bytes_shift| dirty bytes in empty slot
-  // spans. That is, the default value of 3 tolerates up to 1/8. Since
-  // |empty_slot_spans_dirty_bytes| is never strictly larger than
-  // total_size_of_committed_pages, setting this to 0 removes the cap. This is
-  // useful to make tests deterministic and easier to reason about.
-  int max_empty_slot_spans_dirty_bytes_shift = 3;
-
-  uintptr_t next_super_page = 0;
-  uintptr_t next_partition_page = 0;
-  uintptr_t next_partition_page_end = 0;
-  SuperPageExtentEntry* current_extent = nullptr;
-  SuperPageExtentEntry* first_extent = nullptr;
-  DirectMapExtent* direct_map_list
-      PA_GUARDED_BY(internal::PartitionRootLock(this)) = nullptr;
-  SlotSpan*
-      global_empty_slot_span_ring[internal::kMaxFreeableSpans] PA_GUARDED_BY(
-          internal::PartitionRootLock(this)) = {};
-  int16_t global_empty_slot_span_ring_index
-      PA_GUARDED_BY(internal::PartitionRootLock(this)) = 0;
-  int16_t global_empty_slot_span_ring_size
-      PA_GUARDED_BY(internal::PartitionRootLock(this)) =
-          internal::kDefaultEmptySlotSpanRingSize;
-
-  // Integrity check = ~reinterpret_cast<uintptr_t>(this).
-  uintptr_t inverted_self = 0;
-  std::atomic<int> thread_caches_being_constructed_{0};
-
-  bool quarantine_always_for_testing = false;
-
-  PartitionRoot();
-  explicit PartitionRoot(PartitionOptions opts);
-
-  // TODO(tasak): remove ~PartitionRoot() after confirming all tests
-  // don't need ~PartitionRoot().
-  ~PartitionRoot();
-
-  // This will unreserve any space in the pool that the PartitionRoot is
-  // using. This is needed because many tests create and destroy many
-  // PartitionRoots over the lifetime of a process, which can exhaust the
-  // pool and cause tests to fail.
-  void DestructForTesting();
-
-#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-  void EnableMac11MallocSizeHackIfNeeded(size_t ref_count_size);
-  void EnableMac11MallocSizeHackForTesting(size_t ref_count_size);
-  void InitMac11MallocSizeHackUsableSize(size_t ref_count_size);
-#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-
-  // Public API
-  //
-  // Allocates out of the given bucket. Properly, this function should probably
-  // be in PartitionBucket, but because the implementation needs to be inlined
-  // for performance, and because it needs to inspect SlotSpanMetadata,
-  // it becomes impossible to have it in PartitionBucket as this causes a
-  // cyclical dependency on SlotSpanMetadata function implementations.
-  //
-  // Moving it a layer lower couples PartitionRoot and PartitionBucket, but
-  // preserves the layering of the includes.
-  void Init(PartitionOptions);
-
-  void EnableThreadCacheIfSupported();
-
-  PA_ALWAYS_INLINE static PartitionRoot* FromSlotSpan(SlotSpan* slot_span);
-  // These two functions work unconditionally for normal buckets.
-  // For direct map, they only work for the first super page of a reservation,
-  // (see partition_alloc_constants.h for the direct map allocation layout).
-  // In particular, the functions always work for a pointer to the start of a
-  // reservation.
-  PA_ALWAYS_INLINE static PartitionRoot* FromFirstSuperPage(
-      uintptr_t super_page);
-  PA_ALWAYS_INLINE static PartitionRoot* FromAddrInFirstSuperpage(
-      uintptr_t address);
-
-  PA_ALWAYS_INLINE void DecreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
-                                                          size_t len)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
-  PA_ALWAYS_INLINE void IncreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
-                                                          size_t len,
-                                                          size_t raw_size)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
-  PA_ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
-  PA_ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
-  PA_ALWAYS_INLINE void DecommitSystemPagesForData(
-      uintptr_t address,
-      size_t length,
-      PageAccessibilityDisposition accessibility_disposition)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
-  PA_ALWAYS_INLINE void RecommitSystemPagesForData(
-      uintptr_t address,
-      size_t length,
-      PageAccessibilityDisposition accessibility_disposition,
-      bool request_tagging)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
-  PA_ALWAYS_INLINE bool TryRecommitSystemPagesForData(
-      uintptr_t address,
-      size_t length,
-      PageAccessibilityDisposition accessibility_disposition,
-      bool request_tagging)
-      PA_LOCKS_EXCLUDED(internal::PartitionRootLock(this));
-
-  [[noreturn]] PA_NOINLINE void OutOfMemory(size_t size);
-
-  // Returns a pointer aligned on |alignment|, or nullptr.
-  //
-  // |alignment| has to be a power of two and a multiple of sizeof(void*) (as in
-  // posix_memalign() for POSIX systems). The returned pointer may include
-  // padding, and can be passed to |Free()| later.
-  //
-  // NOTE: This is incompatible with anything that adds extras before the
-  // returned pointer, such as ref-count.
-  PA_ALWAYS_INLINE void* AlignedAllocWithFlags(unsigned int flags,
-                                               size_t alignment,
-                                               size_t requested_size);
-
-  // PartitionAlloc supports multiple partitions, and hence multiple callers to
-  // these functions. Setting PA_ALWAYS_INLINE bloats code, and can be
-  // detrimental to performance, for instance if multiple callers are hot (by
-  // increasing cache footprint). Set PA_NOINLINE on the "basic" top-level
-  // functions to mitigate that for "vanilla" callers.
-  PA_NOINLINE PA_MALLOC_FN void* Alloc(size_t requested_size,
-                                       const char* type_name)
-      PA_MALLOC_ALIGNED {
-    return AllocWithFlags(0, requested_size, type_name);
-  }
-  PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocWithFlags(unsigned int flags,
-                                                     size_t requested_size,
-                                                     const char* type_name)
-      PA_MALLOC_ALIGNED;
-  // Same as |AllocWithFlags()|, but allows specifying |slot_span_alignment|. It
-  // has to be a multiple of partition page size, greater than 0 and no greater
-  // than kMaxSupportedAlignment. If it equals exactly 1 partition page, no
-  // special action is taken as PartitionAlloc naturally guarantees this
-  // alignment, otherwise a sub-optimal allocation strategy is used to
-  // guarantee the higher-order alignment.
-  PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocWithFlagsInternal(
-      unsigned int flags,
-      size_t requested_size,
-      size_t slot_span_alignment,
-      const char* type_name) PA_MALLOC_ALIGNED;
-  // Same as |AllocWithFlags()|, but bypasses the allocator hooks.
-  //
-  // This is separate from AllocWithFlags() because other callers of
-  // AllocWithFlags() should not have the extra branch checking whether the
-  // hooks should be ignored or not. This is the same reason why |FreeNoHooks()|
-  // exists. However, |AlignedAlloc()| and |Realloc()| have few callers, so
-  // taking the extra branch in the non-malloc() case doesn't hurt. In addition,
-  // for the malloc() case, the compiler correctly removes the branch, since
-  // this is marked |PA_ALWAYS_INLINE|.
-  PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocWithFlagsNoHooks(
-      unsigned int flags,
-      size_t requested_size,
-      size_t slot_span_alignment) PA_MALLOC_ALIGNED;
-
-  PA_NOINLINE void* Realloc(void* ptr,
-                            size_t new_size,
-                            const char* type_name) PA_MALLOC_ALIGNED {
-    return ReallocWithFlags(0, ptr, new_size, type_name);
-  }
-
-  // Overload that may return nullptr if reallocation isn't possible. In this
-  // case, |ptr| remains valid.
-  PA_NOINLINE void* TryRealloc(void* ptr,
-                               size_t new_size,
-                               const char* type_name) PA_MALLOC_ALIGNED {
-    return ReallocWithFlags(AllocFlags::kReturnNull, ptr, new_size, type_name);
-  }
-  PA_NOINLINE void* ReallocWithFlags(unsigned int flags,
-                                     void* ptr,
-                                     size_t new_size,
-                                     const char* type_name) PA_MALLOC_ALIGNED;
-  template <unsigned int flags = 0>
-  PA_NOINLINE void Free(void* object);
-  PA_ALWAYS_INLINE void FreeNoHooks(void* object);
-
-  template <unsigned int flags = 0>
-  PA_NOINLINE static void FreeInUnknownRoot(void* object);
-  PA_ALWAYS_INLINE static void FreeNoHooksInUnknownRoot(void* object);
-  // Immediately frees the pointer bypassing the quarantine. |slot_start| is the
-  // beginning of the slot that contains |object|.
-  PA_ALWAYS_INLINE void FreeNoHooksImmediate(void* object,
-                                             SlotSpan* slot_span,
-                                             uintptr_t slot_start);
-
-  PA_ALWAYS_INLINE size_t GetSlotUsableSize(SlotSpan* slot_span) {
-    return AdjustSizeForExtrasSubtract(slot_span->GetUtilizedSlotSize());
-  }
-
-  PA_ALWAYS_INLINE static size_t GetUsableSize(void* ptr);
-
-  // Same as GetUsableSize() except it adjusts the return value for macOS 11
-  // malloc_size() hack.
-  PA_ALWAYS_INLINE static size_t GetUsableSizeWithMac11MallocSizeHack(
-      void* ptr);
-
-  PA_ALWAYS_INLINE PageAccessibilityConfiguration
-  GetPageAccessibility(bool request_tagging) const;
-  PA_ALWAYS_INLINE PageAccessibilityConfiguration
-      PageAccessibilityWithThreadIsolationIfEnabled(
-          PageAccessibilityConfiguration::Permissions) const;
-
-  PA_ALWAYS_INLINE size_t
-  AllocationCapacityFromSlotStart(uintptr_t slot_start) const;
-  PA_ALWAYS_INLINE size_t
-  AllocationCapacityFromRequestedSize(size_t size) const;
-
-  PA_ALWAYS_INLINE bool IsMemoryTaggingEnabled() const;
-  PA_ALWAYS_INLINE TagViolationReportingMode
-  memory_tagging_reporting_mode() const;
-
-  // Frees memory from this partition, if possible, by decommitting pages or
-  // even entire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
-  void PurgeMemory(int flags);
-
-  // Reduces the size of the empty slot spans ring, until the dirty size is <=
-  // |limit|.
-  void ShrinkEmptySlotSpansRing(size_t limit)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
-  // The empty slot span ring starts "small", can be enlarged later. This
-  // improves performance by performing fewer system calls, at the cost of more
-  // memory usage.
-  void EnableLargeEmptySlotSpanRing() {
-    ::partition_alloc::internal::ScopedGuard locker{
-        internal::PartitionRootLock(this)};
-    global_empty_slot_span_ring_size = internal::kMaxFreeableSpans;
-  }
-
-  void DumpStats(const char* partition_name,
-                 bool is_light_dump,
-                 PartitionStatsDumper* partition_stats_dumper);
-
-  static void DeleteForTesting(PartitionRoot* partition_root);
-  void ResetForTesting(bool allow_leaks);
-  void ResetBookkeepingForTesting();
-
-  PA_ALWAYS_INLINE BucketDistribution GetBucketDistribution() const {
-    return settings.bucket_distribution;
-  }
-
-  static uint16_t SizeToBucketIndex(size_t size,
-                                    BucketDistribution bucket_distribution);
-
-  PA_ALWAYS_INLINE void FreeInSlotSpan(uintptr_t slot_start,
-                                       SlotSpan* slot_span)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
-
-  // Frees memory, with |slot_start| as returned by |RawAlloc()|.
-  PA_ALWAYS_INLINE void RawFree(uintptr_t slot_start);
-  PA_ALWAYS_INLINE void RawFree(uintptr_t slot_start, SlotSpan* slot_span)
-      PA_LOCKS_EXCLUDED(internal::PartitionRootLock(this));
-
-  PA_ALWAYS_INLINE void RawFreeBatch(FreeListEntry* head,
-                                     FreeListEntry* tail,
-                                     size_t size,
-                                     SlotSpan* slot_span)
-      PA_LOCKS_EXCLUDED(internal::PartitionRootLock(this));
-
-  PA_ALWAYS_INLINE void RawFreeWithThreadCache(uintptr_t slot_start,
-                                               SlotSpan* slot_span);
-
-  // This is safe to do because we are switching to a bucket distribution with
-  // more buckets, meaning any allocations we have done before the switch are
-  // guaranteed to have a bucket under the new distribution when they are
-  // eventually deallocated. We do not need synchronization here.
-  void SwitchToDenserBucketDistribution() {
-    settings.bucket_distribution = BucketDistribution::kDenser;
-  }
-  // Switching back to the less dense bucket distribution is ok during tests.
-  // At worst, we end up with deallocations that are sent to a bucket that we
-  // cannot allocate from, which will not cause problems besides wasting
-  // memory.
-  void ResetBucketDistributionForTesting() {
-    settings.bucket_distribution = BucketDistribution::kNeutral;
-  }
-
-  ThreadCache* thread_cache_for_testing() const {
-    return settings.with_thread_cache ? ThreadCache::Get() : nullptr;
-  }
-  size_t get_total_size_of_committed_pages() const {
-    return total_size_of_committed_pages.load(std::memory_order_relaxed);
-  }
-  size_t get_max_size_of_committed_pages() const {
-    return max_size_of_committed_pages.load(std::memory_order_relaxed);
-  }
-
-  size_t get_total_size_of_allocated_bytes() const {
-    // Since this is only used for bookkeeping, we don't care if the value is
-    // stale, so no need to get a lock here.
-    return PA_TS_UNCHECKED_READ(total_size_of_allocated_bytes);
-  }
-
-  size_t get_max_size_of_allocated_bytes() const {
-    // Since this is only used for bookkeeping, we don't care if the value is
-    // stale, so no need to get a lock here.
-    return PA_TS_UNCHECKED_READ(max_size_of_allocated_bytes);
-  }
-
-  internal::pool_handle ChoosePool() const {
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-    if (settings.use_configurable_pool) {
-      PA_DCHECK(IsConfigurablePoolAvailable());
-      return internal::kConfigurablePoolHandle;
-    }
-#endif
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-    if (settings.thread_isolation.enabled) {
-      return internal::kThreadIsolatedPoolHandle;
-    }
-#endif
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    return brp_enabled() ? internal::kBRPPoolHandle
-                         : internal::kRegularPoolHandle;
-#else
-    return internal::kRegularPoolHandle;
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  }
-
-  PA_ALWAYS_INLINE bool IsQuarantineAllowed() const {
-    return settings.quarantine_mode != QuarantineMode::kAlwaysDisabled;
-  }
-
-  PA_ALWAYS_INLINE bool IsQuarantineEnabled() const {
-    return settings.quarantine_mode == QuarantineMode::kEnabled;
-  }
-
-  PA_ALWAYS_INLINE bool ShouldQuarantine(void* object) const {
-    if (PA_UNLIKELY(settings.quarantine_mode != QuarantineMode::kEnabled)) {
-      return false;
-    }
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-    if (PA_UNLIKELY(quarantine_always_for_testing)) {
-      return true;
-    }
-    // If quarantine is enabled and the tag overflows, move the containing slot
-    // to quarantine, to prevent the attacker from exploiting a pointer that has
-    // an old tag.
-    if (PA_LIKELY(IsMemoryTaggingEnabled())) {
-      return internal::HasOverflowTag(object);
-    }
-    // Default behaviour if MTE is not enabled for this PartitionRoot.
-    return true;
-#else
-    return true;
-#endif
-  }
-
-  PA_ALWAYS_INLINE void SetQuarantineAlwaysForTesting(bool value) {
-    quarantine_always_for_testing = value;
-  }
-
-  PA_ALWAYS_INLINE bool IsScanEnabled() const {
-    // Enabled scan implies enabled quarantine.
-    PA_DCHECK(settings.scan_mode != ScanMode::kEnabled ||
-              IsQuarantineEnabled());
-    return settings.scan_mode == ScanMode::kEnabled;
-  }
-
-  PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-  GetDirectMapMetadataAndGuardPagesSize() {
-    // Because we need to fake a direct-map region to look like a super page, we
-    // need to allocate more pages around the payload:
-    // - The first partition page is a combination of metadata and guard region.
-    // - We also add a trailing guard page. In most cases, a system page would
-    //   suffice. But on 32-bit systems when BRP is on, we need a partition page
-    //   to match granularity of the BRP pool bitmap. For cosistency, we'll use
-    //   a partition page everywhere, which is cheap as it's uncommitted address
-    //   space anyway.
-    return 2 * internal::PartitionPageSize();
-  }
-
-  PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
-  GetDirectMapSlotSize(size_t raw_size) {
-    // Caller must check that the size is not above the MaxDirectMapped()
-    // limit before calling. This also guards against integer overflow in the
-    // calculation here.
-    PA_DCHECK(raw_size <= internal::MaxDirectMapped());
-    return partition_alloc::internal::base::bits::AlignUp(
-        raw_size, internal::SystemPageSize());
-  }
-
-  PA_ALWAYS_INLINE static size_t GetDirectMapReservationSize(
-      size_t padded_raw_size) {
-    // Caller must check that the size is not above the MaxDirectMapped()
-    // limit before calling. This also guards against integer overflow in the
-    // calculation here.
-    PA_DCHECK(padded_raw_size <= internal::MaxDirectMapped());
-    return partition_alloc::internal::base::bits::AlignUp(
-        padded_raw_size + GetDirectMapMetadataAndGuardPagesSize(),
-        internal::DirectMapAllocationGranularity());
-  }
-
-  PA_ALWAYS_INLINE size_t AdjustSize0IfNeeded(size_t size) const {
-    // There are known cases where allowing size 0 would lead to problems:
-    // 1. If extras are present only before allocation (e.g. BRP ref-count), the
-    //    extras will fill the entire kAlignment-sized slot, leading to
-    //    returning a pointer to the next slot. ReallocWithFlags() calls
-    //    SlotSpanMetadata::FromObject() prior to subtracting extras, thus
-    //    potentially getting a wrong slot span.
-    // 2. If we put BRP ref-count in the previous slot, that slot may be free.
-    //    In this case, the slot needs to fit both, a free-list entry and a
-    //    ref-count. If sizeof(PartitionRefCount) is 8, it fills the entire
-    //    smallest slot on 32-bit systems (kSmallestBucket is 8), thus not
-    //    leaving space for the free-list entry.
-    // 3. On macOS and iOS, PartitionGetSizeEstimate() is used for two purposes:
-    //    as a zone dispatcher and as an underlying implementation of
-    //    malloc_size(3). As a zone dispatcher, zero has a special meaning of
-    //    "doesn't belong to this zone". When extras fill out the entire slot,
-    //    the usable size is 0, thus confusing the zone dispatcher.
-    //
-    // To save ourselves a branch on this hot path, we could eliminate this
-    // check at compile time for cases not listed above. The #if statement would
-    // be rather complex. Then there is also the fear of the unknown. The
-    // existing cases were discovered through obscure, painful-to-debug crashes.
-    // Better save ourselves trouble with not-yet-discovered cases.
-    if (PA_UNLIKELY(size == 0)) {
-      return 1;
-    }
-    return size;
-  }
-
-  // Adjusts the size by adding extras. Also include the 0->1 adjustment if
-  // needed.
-  PA_ALWAYS_INLINE size_t AdjustSizeForExtrasAdd(size_t size) const {
-    size = AdjustSize0IfNeeded(size);
-    PA_DCHECK(size + settings.extras_size >= size);
-    return size + settings.extras_size;
-  }
-
-  // Adjusts the size by subtracing extras. Doesn't include the 0->1 adjustment,
-  // which leads to an asymmetry with AdjustSizeForExtrasAdd, but callers of
-  // AdjustSizeForExtrasSubtract either expect the adjustment to be included, or
-  // are indifferent.
-  PA_ALWAYS_INLINE size_t AdjustSizeForExtrasSubtract(size_t size) const {
-    return size - settings.extras_size;
-  }
-
-  PA_ALWAYS_INLINE uintptr_t SlotStartToObjectAddr(uintptr_t slot_start) const {
-    // TODO(bartekn): Check that |slot_start| is indeed a slot start.
-    return slot_start + settings.extras_offset;
-  }
-
-  PA_ALWAYS_INLINE void* SlotStartToObject(uintptr_t slot_start) const {
-    // TODO(bartekn): Check that |slot_start| is indeed a slot start.
-    return internal::TagAddr(SlotStartToObjectAddr(slot_start));
-  }
-
-  PA_ALWAYS_INLINE void* TaggedSlotStartToObject(
-      void* tagged_slot_start) const {
-    // TODO(bartekn): Check that |tagged_slot_start| is indeed a slot start.
-    return reinterpret_cast<void*>(
-        SlotStartToObjectAddr(reinterpret_cast<uintptr_t>(tagged_slot_start)));
-  }
-
-  PA_ALWAYS_INLINE uintptr_t ObjectToSlotStart(void* object) const {
-    return UntagPtr(object) - settings.extras_offset;
-    // TODO(bartekn): Check that the result is indeed a slot start.
-  }
-
-  PA_ALWAYS_INLINE uintptr_t ObjectToTaggedSlotStart(void* object) const {
-    return reinterpret_cast<uintptr_t>(object) - settings.extras_offset;
-    // TODO(bartekn): Check that the result is indeed a slot start.
-  }
-
-  bool brp_enabled() const {
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    return settings.brp_enabled_;
-#else
-    return false;
-#endif
-  }
-
-  PA_ALWAYS_INLINE bool uses_configurable_pool() const {
-    return settings.use_configurable_pool;
-  }
-
-  // To make tests deterministic, it is necessary to uncap the amount of memory
-  // waste incurred by empty slot spans. Otherwise, the size of various
-  // freelists, and committed memory becomes harder to reason about (and
-  // brittle) with a single thread, and non-deterministic with several.
-  void UncapEmptySlotSpanMemoryForTesting() {
-    max_empty_slot_spans_dirty_bytes_shift = 0;
-  }
-
-  // Enables the sorting of active slot spans in PurgeMemory().
-  static void EnableSortActiveSlotSpans();
-
- private:
-  static inline bool sort_active_slot_spans_ = false;
-
-  // Common path of Free() and FreeInUnknownRoot(). Returns
-  // true if the caller should return immediately.
-  template <unsigned int flags>
-  PA_ALWAYS_INLINE static bool FreeProlog(void* object,
-                                          const PartitionRoot* root);
-
-  // |buckets| has `kNumBuckets` elements, but we sometimes access it at index
-  // `kNumBuckets`, which is occupied by the sentinel bucket. The correct layout
-  // is enforced by a static_assert() in partition_root.cc, so this is
-  // fine. However, UBSAN is correctly pointing out that there is an
-  // out-of-bounds access, so disable it for these accesses.
-  //
-  // See crbug.com/1150772 for an instance of Clusterfuzz / UBSAN detecting
-  // this.
-  PA_ALWAYS_INLINE const Bucket& PA_NO_SANITIZE("undefined")
-      bucket_at(size_t i) const {
-    PA_DCHECK(i <= internal::kNumBuckets);
-    return buckets[i];
-  }
-
-  // Returns whether a |bucket| from |this| root is direct-mapped. This function
-  // does not touch |bucket|, contrary to  PartitionBucket::is_direct_mapped().
-  //
-  // This is meant to be used in hot paths, and particularly *before* going into
-  // the thread cache fast path. Indeed, real-world profiles show that accessing
-  // an allocation's bucket is responsible for a sizable fraction of *total*
-  // deallocation time. This can be understood because
-  // - All deallocations have to access the bucket to know whether it is
-  //   direct-mapped. If not (vast majority of allocations), it can go through
-  //   the fast path, i.e. thread cache.
-  // - The bucket is relatively frequently written to, by *all* threads
-  //   (e.g. every time a slot span becomes full or empty), so accessing it will
-  //   result in some amount of cacheline ping-pong.
-  PA_ALWAYS_INLINE bool IsDirectMappedBucket(Bucket* bucket) const {
-    // All regular allocations are associated with a bucket in the |buckets_|
-    // array. A range check is then sufficient to identify direct-mapped
-    // allocations.
-    bool ret = !(bucket >= this->buckets && bucket <= &this->sentinel_bucket);
-    PA_DCHECK(ret == bucket->is_direct_mapped());
-    return ret;
-  }
-
-  // Allocates a memory slot, without initializing extras.
-  //
-  // - |flags| are as in AllocWithFlags().
-  // - |raw_size| accommodates for extras on top of AllocWithFlags()'s
-  //   |requested_size|.
-  // - |usable_size| and |is_already_zeroed| are output only. |usable_size| is
-  //   guaranteed to be larger or equal to AllocWithFlags()'s |requested_size|.
-  PA_ALWAYS_INLINE uintptr_t RawAlloc(Bucket* bucket,
-                                      unsigned int flags,
-                                      size_t raw_size,
-                                      size_t slot_span_alignment,
-                                      size_t* usable_size,
-                                      bool* is_already_zeroed);
-  PA_ALWAYS_INLINE uintptr_t AllocFromBucket(Bucket* bucket,
-                                             unsigned int flags,
-                                             size_t raw_size,
-                                             size_t slot_span_alignment,
-                                             size_t* usable_size,
-                                             bool* is_already_zeroed)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
-
-  bool TryReallocInPlaceForNormalBuckets(void* object,
-                                         SlotSpan* slot_span,
-                                         size_t new_size);
-  bool TryReallocInPlaceForDirectMap(internal::SlotSpanMetadata* slot_span,
-                                     size_t requested_size)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
-  void DecommitEmptySlotSpans()
-      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
-  PA_ALWAYS_INLINE void RawFreeLocked(uintptr_t slot_start)
-      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
-  ThreadCache* MaybeInitThreadCache();
-
-  // May return an invalid thread cache.
-  PA_ALWAYS_INLINE ThreadCache* GetOrCreateThreadCache();
-  PA_ALWAYS_INLINE ThreadCache* GetThreadCache();
-
-  PA_ALWAYS_INLINE AllocationNotificationData
-  CreateAllocationNotificationData(void* object,
-                                   size_t size,
-                                   const char* type_name) const;
-  PA_ALWAYS_INLINE static FreeNotificationData
-  CreateDefaultFreeNotificationData(void* address);
-  PA_ALWAYS_INLINE FreeNotificationData
-  CreateFreeNotificationData(void* address) const;
-
-#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
-  static internal::Lock& GetEnumeratorLock();
-
-  PartitionRoot* PA_GUARDED_BY(GetEnumeratorLock()) next_root = nullptr;
-  PartitionRoot* PA_GUARDED_BY(GetEnumeratorLock()) prev_root = nullptr;
-
-  friend class internal::PartitionRootEnumerator;
-#endif  // PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
-
-  friend class ThreadCache;
-};
-
-namespace internal {
-
-PA_ALWAYS_INLINE ::partition_alloc::internal::Lock& PartitionRootLock(
-    PartitionRoot* root) {
-  return root->lock_;
-}
-
-class ScopedSyscallTimer {
- public:
-#if PA_CONFIG(COUNT_SYSCALL_TIME)
-  explicit ScopedSyscallTimer(PartitionRoot* root)
-      : root_(root), tick_(base::TimeTicks::Now()) {}
-
-  ~ScopedSyscallTimer() {
-    root_->syscall_count.fetch_add(1, std::memory_order_relaxed);
-
-    int64_t elapsed_nanos = (base::TimeTicks::Now() - tick_).InNanoseconds();
-    if (elapsed_nanos > 0) {
-      root_->syscall_total_time_ns.fetch_add(
-          static_cast<uint64_t>(elapsed_nanos), std::memory_order_relaxed);
-    }
-  }
-
- private:
-  PartitionRoot* root_;
-  const base::TimeTicks tick_;
-#else
-  explicit ScopedSyscallTimer(PartitionRoot* root) {
-    root->syscall_count.fetch_add(1, std::memory_order_relaxed);
-  }
-#endif
-};
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-PA_ALWAYS_INLINE uintptr_t
-PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) {
-  PA_DCHECK(IsManagedByPartitionAllocBRPPool(address));
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  // Use this variant of GetDirectMapReservationStart as it has better
-  // performance.
-  uintptr_t offset = OffsetInBRPPool(address);
-  uintptr_t reservation_start =
-      GetDirectMapReservationStart(address, kBRPPoolHandle, offset);
-#else  // BUILDFLAG(HAS_64_BIT_POINTERS)
-  uintptr_t reservation_start = GetDirectMapReservationStart(address);
-#endif
-  if (!reservation_start) {
-    return 0;
-  }
-
-  // The direct map allocation may not start exactly from the first page, as
-  // there may be padding for alignment. The first page metadata holds an offset
-  // to where direct map metadata, and thus direct map start, are located.
-  auto* first_page =
-      PartitionPage::FromAddr(reservation_start + PartitionPageSize());
-  auto* page = first_page + first_page->slot_span_metadata_offset;
-  PA_DCHECK(page->is_valid);
-  PA_DCHECK(!page->slot_span_metadata_offset);
-  auto* slot_span = &page->slot_span_metadata;
-  uintptr_t slot_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  auto* metadata = PartitionDirectMapMetadata::FromSlotSpan(slot_span);
-  size_t padding_for_alignment =
-      metadata->direct_map_extent.padding_for_alignment;
-  PA_DCHECK(padding_for_alignment ==
-            static_cast<size_t>(page - first_page) * PartitionPageSize());
-  PA_DCHECK(slot_start ==
-            reservation_start + PartitionPageSize() + padding_for_alignment);
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-  return slot_start;
-}
-
-// Gets the address to the beginning of the allocated slot. The input |address|
-// can point anywhere in the slot, including the slot start as well as
-// immediately past the slot.
-//
-// This isn't a general purpose function, it is used specifically for obtaining
-// BackupRefPtr's ref-count. The caller is responsible for ensuring that the
-// ref-count is in place for this allocation.
-PA_ALWAYS_INLINE uintptr_t
-PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
-  // Adjust to support pointers right past the end of an allocation, which in
-  // some cases appear to point outside the designated allocation slot.
-  //
-  // If ref-count is present before the allocation, then adjusting a valid
-  // pointer down will not cause us to go down to the previous slot, otherwise
-  // no adjustment is needed (and likely wouldn't be correct as there is
-  // a risk of going down to the previous slot). Either way,
-  // kPartitionPastAllocationAdjustment takes care of that detail.
-  address -= kPartitionPastAllocationAdjustment;
-  PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(address));
-  DCheckIfManagedByPartitionAllocBRPPool(address);
-
-  uintptr_t directmap_slot_start =
-      PartitionAllocGetDirectMapSlotStartInBRPPool(address);
-  if (PA_UNLIKELY(directmap_slot_start)) {
-    return directmap_slot_start;
-  }
-  auto* slot_span = SlotSpanMetadata::FromAddr(address);
-  auto* root = PartitionRoot::FromSlotSpan(slot_span);
-  // Double check that ref-count is indeed present.
-  PA_DCHECK(root->brp_enabled());
-
-  // Get the offset from the beginning of the slot span.
-  uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
-  size_t offset_in_slot_span = address - slot_span_start;
-
-  auto* bucket = slot_span->bucket;
-  return slot_span_start +
-         bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span);
-}
-
-// Return values to indicate where a pointer is pointing relative to the bounds
-// of an allocation.
-enum class PtrPosWithinAlloc {
-  // When BACKUP_REF_PTR_POISON_OOB_PTR is disabled, end-of-allocation pointers
-  // are also considered in-bounds.
-  kInBounds,
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-  kAllocEnd,
-#endif
-  kFarOOB
-};
-
-// Checks whether `test_address` is in the same allocation slot as
-// `orig_address`.
-//
-// This can be called after adding or subtracting from the `orig_address`
-// to produce a different pointer which must still stay in the same allocation.
-//
-// The `type_size` is the size of the type that the raw_ptr is pointing to,
-// which may be the type the allocation is holding or a compatible pointer type
-// such as a base class or char*. It is used to detect pointers near the end of
-// the allocation but not strictly beyond it.
-//
-// This isn't a general purpose function. The caller is responsible for ensuring
-// that the ref-count is in place for this allocation.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address,
-                                       uintptr_t test_address,
-                                       size_t type_size);
-
-PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
-  PA_DCHECK(!PartitionRefCountPointer(slot_start)->IsAlive());
-
-  auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
-  auto* root = PartitionRoot::FromSlotSpan(slot_span);
-  // PartitionRefCount is required to be allocated inside a `PartitionRoot` that
-  // supports reference counts.
-  PA_DCHECK(root->brp_enabled());
-
-  // Iterating over the entire slot can be really expensive.
-#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-  auto hook = PartitionAllocHooks::GetQuarantineOverrideHook();
-  // If we have a hook the object segment is not necessarily filled
-  // with |kQuarantinedByte|.
-  if (PA_LIKELY(!hook)) {
-    unsigned char* object =
-        static_cast<unsigned char*>(root->SlotStartToObject(slot_start));
-    for (size_t i = 0; i < root->GetSlotUsableSize(slot_span); ++i) {
-      PA_DCHECK(object[i] == kQuarantinedByte);
-    }
-  }
-  DebugMemset(SlotStartAddr2Ptr(slot_start), kFreedByte,
-              slot_span->GetUtilizedSlotSize()
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-                  - sizeof(PartitionRefCount)
-#endif
-  );
-#endif
-
-  root->total_size_of_brp_quarantined_bytes.fetch_sub(
-      slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
-  root->total_count_of_brp_quarantined_slots.fetch_sub(
-      1, std::memory_order_relaxed);
-
-  root->RawFreeWithThreadCache(slot_start, slot_span);
-}
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-}  // namespace internal
-
-PA_ALWAYS_INLINE uintptr_t
-PartitionRoot::AllocFromBucket(Bucket* bucket,
-                               unsigned int flags,
-                               size_t raw_size,
-                               size_t slot_span_alignment,
-                               size_t* usable_size,
-                               bool* is_already_zeroed) {
-  PA_DCHECK((slot_span_alignment >= internal::PartitionPageSize()) &&
-            internal::base::bits::IsPowerOfTwo(slot_span_alignment));
-  SlotSpan* slot_span = bucket->active_slot_spans_head;
-  // There always must be a slot span on the active list (could be a sentinel).
-  PA_DCHECK(slot_span);
-  // Check that it isn't marked full, which could only be true if the span was
-  // removed from the active list.
-  PA_DCHECK(!slot_span->marked_full);
-
-  uintptr_t slot_start =
-      internal::SlotStartPtr2Addr(slot_span->get_freelist_head());
-  // Use the fast path when a slot is readily available on the free list of the
-  // first active slot span. However, fall back to the slow path if a
-  // higher-order alignment is requested, because an inner slot of an existing
-  // slot span is unlikely to satisfy it.
-  if (PA_LIKELY(slot_span_alignment <= internal::PartitionPageSize() &&
-                slot_start)) {
-    *is_already_zeroed = false;
-    // This is a fast path, avoid calling GetSlotUsableSize() in Release builds
-    // as it is costlier. Copy its small bucket path instead.
-    *usable_size = AdjustSizeForExtrasSubtract(bucket->slot_size);
-    PA_DCHECK(*usable_size == GetSlotUsableSize(slot_span));
-
-    // If these DCHECKs fire, you probably corrupted memory.
-    // TODO(crbug.com/1257655): See if we can afford to make these CHECKs.
-    DCheckIsValidSlotSpan(slot_span);
-
-    // All large allocations must go through the slow path to correctly update
-    // the size metadata.
-    PA_DCHECK(!slot_span->CanStoreRawSize());
-    PA_DCHECK(!slot_span->bucket->is_direct_mapped());
-    void* entry = slot_span->PopForAlloc(bucket->slot_size);
-    PA_DCHECK(internal::SlotStartPtr2Addr(entry) == slot_start);
-
-    PA_DCHECK(slot_span->bucket == bucket);
-  } else {
-    slot_start = bucket->SlowPathAlloc(this, flags, raw_size,
-                                       slot_span_alignment, is_already_zeroed);
-    if (PA_UNLIKELY(!slot_start)) {
-      return 0;
-    }
-
-    slot_span = SlotSpan::FromSlotStart(slot_start);
-    // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
-    DCheckIsValidSlotSpan(slot_span);
-    // For direct mapped allocations, |bucket| is the sentinel.
-    PA_DCHECK((slot_span->bucket == bucket) ||
-              (slot_span->bucket->is_direct_mapped() &&
-               (bucket == &sentinel_bucket)));
-
-    *usable_size = GetSlotUsableSize(slot_span);
-  }
-  PA_DCHECK(slot_span->GetUtilizedSlotSize() <= slot_span->bucket->slot_size);
-  IncreaseTotalSizeOfAllocatedBytes(
-      slot_start, slot_span->GetSlotSizeForBookkeeping(), raw_size);
-
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-  if (!slot_span->bucket->is_direct_mapped()) {
-    internal::FreeSlotBitmapMarkSlotAsUsed(slot_start);
-  }
-#endif
-
-  return slot_start;
-}
-
-AllocationNotificationData PartitionRoot::CreateAllocationNotificationData(
-    void* object,
-    size_t size,
-    const char* type_name) const {
-  AllocationNotificationData notification_data(object, size, type_name);
-
-  if (IsMemoryTaggingEnabled()) {
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-    notification_data.SetMteReportingMode(memory_tagging_reporting_mode());
-#endif
-  }
-
-  return notification_data;
-}
-
-FreeNotificationData PartitionRoot::CreateDefaultFreeNotificationData(
-    void* address) {
-  return FreeNotificationData(address);
-}
-
-FreeNotificationData PartitionRoot::CreateFreeNotificationData(
-    void* address) const {
-  FreeNotificationData notification_data =
-      CreateDefaultFreeNotificationData(address);
-
-  if (IsMemoryTaggingEnabled()) {
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-    notification_data.SetMteReportingMode(memory_tagging_reporting_mode());
-#endif
-  }
-
-  return notification_data;
-}
-
-// static
-template <unsigned int flags>
-PA_ALWAYS_INLINE bool PartitionRoot::FreeProlog(void* object,
-                                                const PartitionRoot* root) {
-  PA_DCHECK(flags < FreeFlags::kLastFlag << 1);
-
-#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-  if constexpr (!(flags & FreeFlags::kNoMemoryToolOverride)) {
-    free(object);
-    return true;
-  }
-#endif  // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-  if (PA_UNLIKELY(!object)) {
-    return true;
-  }
-
-  if (PartitionAllocHooks::AreHooksEnabled()) {
-    // A valid |root| might not be available if this function is called from
-    // |FreeWithFlagsInUnknownRoot| and not deducible if object originates from
-    // an override hook.
-    // TODO(crbug.com/1137393): See if we can make the root available more
-    // reliably or even make this function non-static.
-    auto notification_data = root ? root->CreateFreeNotificationData(object)
-                                  : CreateDefaultFreeNotificationData(object);
-    PartitionAllocHooks::FreeObserverHookIfEnabled(notification_data);
-    if (PartitionAllocHooks::FreeOverrideHookIfEnabled(object)) {
-      return true;
-    }
-  }
-
-  return false;
-}
-
-template <unsigned int flags>
-PA_NOINLINE void PartitionRoot::Free(void* object) {
-  bool early_return = FreeProlog<flags>(object, this);
-  if (early_return) {
-    return;
-  }
-
-  FreeNoHooks(object);
-}
-
-// static
-template <unsigned int flags>
-PA_NOINLINE void PartitionRoot::FreeInUnknownRoot(void* object) {
-  // The correct PartitionRoot might not be deducible if the |object| originates
-  // from an override hook.
-  bool early_return = FreeProlog<flags>(object, nullptr);
-  if (early_return) {
-    return;
-  }
-
-  FreeNoHooksInUnknownRoot(object);
-}
-
-PA_ALWAYS_INLINE bool PartitionRoot::IsMemoryTaggingEnabled() const {
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  return settings.memory_tagging_enabled_;
-#else
-  return false;
-#endif
-}
-
-PA_ALWAYS_INLINE TagViolationReportingMode
-PartitionRoot::memory_tagging_reporting_mode() const {
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  return settings.memory_tagging_reporting_mode_;
-#else
-  return TagViolationReportingMode::kUndefined;
-#endif
-}
-
-// static
-PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooksInUnknownRoot(void* object) {
-  if (PA_UNLIKELY(!object)) {
-    return;
-  }
-
-  // Fetch the root from the address, and not SlotSpanMetadata. This is
-  // important, as obtaining it from SlotSpanMetadata is a slow operation
-  // (looking into the metadata area, and following a pointer), which can induce
-  // cache coherency traffic (since they're read on every free(), and written to
-  // on any malloc()/free() that is not a hit in the thread cache). This way we
-  // change the critical path from object -> slot_span -> root into two
-  // *parallel* ones:
-  // 1. object -> root
-  // 2. object -> slot_span (inside FreeNoHooks)
-  uintptr_t object_addr = internal::ObjectPtr2Addr(object);
-  auto* root = FromAddrInFirstSuperpage(object_addr);
-  root->FreeNoHooks(object);
-}
-
-PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooks(void* object) {
-  if (PA_UNLIKELY(!object)) {
-    return;
-  }
-
-  // Almost all calls to FreeNoNooks() will end up writing to |*object|, the
-  // only cases where we don't would be delayed free() in PCScan, but |*object|
-  // can be cold in cache.
-  PA_PREFETCH(object);
-
-  // On Android, malloc() interception is more fragile than on other
-  // platforms, as we use wrapped symbols. However, the pools allow us to
-  // quickly tell that a pointer was allocated with PartitionAlloc.
-  //
-  // This is a crash to detect imperfect symbol interception. However, we can
-  // forward allocations we don't own to the system malloc() implementation in
-  // these rare cases, assuming that some remain.
-  //
-  // On Android Chromecast devices, this is already checked in PartitionFree()
-  // in the shim.
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
-    (BUILDFLAG(IS_ANDROID) && !BUILDFLAG(PA_IS_CAST_ANDROID))
-  uintptr_t object_addr = internal::ObjectPtr2Addr(object);
-  PA_CHECK(IsManagedByPartitionAlloc(object_addr));
-#endif
-
-  SlotSpan* slot_span = SlotSpan::FromObject(object);
-  PA_DCHECK(PartitionRoot::FromSlotSpan(slot_span) == this);
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  if (PA_LIKELY(IsMemoryTaggingEnabled())) {
-    const size_t slot_size = slot_span->bucket->slot_size;
-    if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) {
-      // slot_span is untagged at this point, so we have to recover its tag
-      // again to increment and provide use-after-free mitigations.
-      size_t tag_size = slot_size;
-#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
-      tag_size -= settings.ref_count_size;
-#endif
-      void* retagged_slot_start = internal::TagMemoryRangeIncrement(
-          ObjectToTaggedSlotStart(object), tag_size);
-      // Incrementing the MTE-tag in the memory range invalidates the |object|'s
-      // tag, so it must be retagged.
-      object = TaggedSlotStartToObject(retagged_slot_start);
-    }
-  }
-#else
-  // We are going to read from |*slot_span| in all branches, but haven't done it
-  // yet.
-  //
-  // TODO(crbug.com/1207307): It would be much better to avoid touching
-  // |*slot_span| at all on the fast path, or at least to separate its read-only
-  // parts (i.e. bucket pointer) from the rest. Indeed, every thread cache miss
-  // (or batch fill) will *write* to |slot_span->freelist_head|, leading to
-  // cacheline ping-pong.
-  //
-  // Don't do it when memory tagging is enabled, as |*slot_span| has already
-  // been touched above.
-  PA_PREFETCH(slot_span);
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
-  uintptr_t slot_start = ObjectToSlotStart(object);
-  PA_DCHECK(slot_span == SlotSpan::FromSlotStart(slot_start));
-
-#if BUILDFLAG(USE_STARSCAN)
-  // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
-  // default.
-  if (PA_UNLIKELY(ShouldQuarantine(object))) {
-    // PCScan safepoint. Call before potentially scheduling scanning task.
-    PCScan::JoinScanIfNeeded();
-    if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
-      PCScan::MoveToQuarantine(object, GetSlotUsableSize(slot_span), slot_start,
-                               slot_span->bucket->slot_size);
-      return;
-    }
-  }
-#endif  // BUILDFLAG(USE_STARSCAN)
-
-  FreeNoHooksImmediate(object, slot_span, slot_start);
-}
-
-PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooksImmediate(
-    void* object,
-    SlotSpan* slot_span,
-    uintptr_t slot_start) {
-  // The thread cache is added "in the middle" of the main allocator, that is:
-  // - After all the cookie/ref-count management
-  // - Before the "raw" allocator.
-  //
-  // On the deallocation side:
-  // 1. Check cookie/ref-count, adjust the pointer
-  // 2. Deallocation
-  //   a. Return to the thread cache if possible. If it succeeds, return.
-  //   b. Otherwise, call the "raw" allocator <-- Locking
-  PA_DCHECK(object);
-  PA_DCHECK(slot_span);
-  DCheckIsValidSlotSpan(slot_span);
-  PA_DCHECK(slot_start);
-
-  // Layout inside the slot:
-  //   |[refcnt]|...object...|[empty]|[cookie]|[unused]|
-  //            <--------(a)--------->
-  //   <--(b)--->         +          <--(b)--->
-  //   <-----------------(c)------------------>
-  //     (a) usable_size
-  //     (b) extras
-  //     (c) utilized_slot_size
-  //
-  // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is set, the layout is:
-  //   |...object...|[empty]|[cookie]|[unused]|[refcnt]|
-  //   <--------(a)--------->
-  //                        <--(b)--->   +    <--(b)--->
-  //   <-------------(c)------------->   +    <--(c)--->
-  //
-  // Note: ref-count and cookie can be 0-sized.
-  //
-  // For more context, see the other "Layout inside the slot" comment inside
-  // AllocWithFlagsNoHooks().
-
-  if (settings.use_cookie) {
-    // Verify the cookie after the allocated region.
-    // If this assert fires, you probably corrupted memory.
-    internal::PartitionCookieCheckValue(static_cast<unsigned char*>(object) +
-                                        GetSlotUsableSize(slot_span));
-  }
-
-#if BUILDFLAG(USE_STARSCAN)
-  // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
-  // default.
-  if (PA_UNLIKELY(IsQuarantineEnabled())) {
-    if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
-      // Mark the state in the state bitmap as freed.
-      internal::StateBitmapFromAddr(slot_start)->Free(slot_start);
-    }
-  }
-#endif  // BUILDFLAG(USE_STARSCAN)
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  // TODO(keishi): Add PA_LIKELY when brp is fully enabled as |brp_enabled| will
-  // be false only for the aligned partition.
-  if (brp_enabled()) {
-    auto* ref_count = internal::PartitionRefCountPointer(slot_start);
-    // If there are no more references to the allocation, it can be freed
-    // immediately. Otherwise, defer the operation and zap the memory to turn
-    // potential use-after-free issues into unexploitable crashes.
-    if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs())) {
-      auto usable_size = GetSlotUsableSize(slot_span);
-      auto hook = PartitionAllocHooks::GetQuarantineOverrideHook();
-      if (PA_UNLIKELY(hook)) {
-        hook(object, usable_size);
-      } else {
-        internal::SecureMemset(object, internal::kQuarantinedByte, usable_size);
-      }
-    }
-
-    if (PA_UNLIKELY(!(ref_count->ReleaseFromAllocator()))) {
-      total_size_of_brp_quarantined_bytes.fetch_add(
-          slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
-      total_count_of_brp_quarantined_slots.fetch_add(1,
-                                                     std::memory_order_relaxed);
-      cumulative_size_of_brp_quarantined_bytes.fetch_add(
-          slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
-      cumulative_count_of_brp_quarantined_slots.fetch_add(
-          1, std::memory_order_relaxed);
-      return;
-    }
-  }
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-  // memset() can be really expensive.
-#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-  internal::DebugMemset(internal::SlotStartAddr2Ptr(slot_start),
-                        internal::kFreedByte,
-                        slot_span->GetUtilizedSlotSize()
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-                            - sizeof(internal::PartitionRefCount)
-#endif
-  );
-#elif PA_CONFIG(ZERO_RANDOMLY_ON_FREE)
-  // `memset` only once in a while: we're trading off safety for time
-  // efficiency.
-  if (PA_UNLIKELY(internal::RandomPeriod()) &&
-      !IsDirectMappedBucket(slot_span->bucket)) {
-    internal::SecureMemset(internal::SlotStartAddr2Ptr(slot_start), 0,
-                           slot_span->GetUtilizedSlotSize()
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-                               - sizeof(internal::PartitionRefCount)
-#endif
-    );
-  }
-#endif  // PA_CONFIG(ZERO_RANDOMLY_ON_FREE)
-
-  RawFreeWithThreadCache(slot_start, slot_span);
-}
-
-PA_ALWAYS_INLINE void PartitionRoot::FreeInSlotSpan(uintptr_t slot_start,
-                                                    SlotSpan* slot_span) {
-  DecreaseTotalSizeOfAllocatedBytes(slot_start,
-                                    slot_span->GetSlotSizeForBookkeeping());
-#if BUILDFLAG(USE_FREESLOT_BITMAP)
-  if (!slot_span->bucket->is_direct_mapped()) {
-    internal::FreeSlotBitmapMarkSlotAsFree(slot_start);
-  }
-#endif
-
-  return slot_span->Free(slot_start, this);
-}
-
-PA_ALWAYS_INLINE void PartitionRoot::RawFree(uintptr_t slot_start) {
-  SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
-  RawFree(slot_start, slot_span);
-}
-
-#if PA_CONFIG(IS_NONCLANG_MSVC)
-// MSVC only supports inline assembly on x86. This preprocessor directive
-// is intended to be a replacement for the same.
-//
-// TODO(crbug.com/1351310): Make sure inlining doesn't degrade this into
-// a no-op or similar. The documentation doesn't say.
-#pragma optimize("", off)
-#endif
-PA_ALWAYS_INLINE void PartitionRoot::RawFree(uintptr_t slot_start,
-                                             SlotSpan* slot_span) {
-  // At this point we are about to acquire the lock, so we try to minimize the
-  // risk of blocking inside the locked section.
-  //
-  // For allocations that are not direct-mapped, there will always be a store at
-  // the beginning of |*slot_start|, to link the freelist. This is why there is
-  // a prefetch of it at the beginning of the free() path.
-  //
-  // However, the memory which is being freed can be very cold (for instance
-  // during browser shutdown, when various caches are finally completely freed),
-  // and so moved to either compressed memory or swap. This means that touching
-  // it here can cause a major page fault. This is in turn will cause
-  // descheduling of the thread *while locked*. Since we don't have priority
-  // inheritance locks on most platforms, avoiding long locked periods relies on
-  // the OS having proper priority boosting. There is evidence
-  // (crbug.com/1228523) that this is not always the case on Windows, and a very
-  // low priority background thread can block the main one for a long time,
-  // leading to hangs.
-  //
-  // To mitigate that, make sure that we fault *before* locking. Note that this
-  // is useless for direct-mapped allocations (which are very rare anyway), and
-  // that this path is *not* taken for thread cache bucket purge (since it calls
-  // RawFreeLocked()). This is intentional, as the thread cache is purged often,
-  // and the memory has a consequence the memory has already been touched
-  // recently (to link the thread cache freelist).
-  *static_cast<volatile uintptr_t*>(internal::SlotStartAddr2Ptr(slot_start)) =
-      0;
-  // Note: even though we write to slot_start + sizeof(void*) as well, due to
-  // alignment constraints, the two locations are always going to be in the same
-  // OS page. No need to write to the second one as well.
-  //
-  // Do not move the store above inside the locked section.
-#if !(PA_CONFIG(IS_NONCLANG_MSVC))
-  __asm__ __volatile__("" : : "r"(slot_start) : "memory");
-#endif
-
-  ::partition_alloc::internal::ScopedGuard guard{
-      internal::PartitionRootLock(this)};
-  FreeInSlotSpan(slot_start, slot_span);
-}
-#if PA_CONFIG(IS_NONCLANG_MSVC)
-#pragma optimize("", on)
-#endif
-
-PA_ALWAYS_INLINE void PartitionRoot::RawFreeBatch(FreeListEntry* head,
-                                                  FreeListEntry* tail,
-                                                  size_t size,
-                                                  SlotSpan* slot_span) {
-  PA_DCHECK(head);
-  PA_DCHECK(tail);
-  PA_DCHECK(size > 0);
-  PA_DCHECK(slot_span);
-  DCheckIsValidSlotSpan(slot_span);
-  // The passed freelist is likely to be just built up, which means that the
-  // corresponding pages were faulted in (without acquiring the lock). So there
-  // is no need to touch pages manually here before the lock.
-  ::partition_alloc::internal::ScopedGuard guard{
-      internal::PartitionRootLock(this)};
-  // TODO(thiabaud): Fix the accounting here. The size is correct, but the
-  // pointer is not. This only affects local tools that record each allocation,
-  // not our metrics.
-  DecreaseTotalSizeOfAllocatedBytes(
-      0u, slot_span->GetSlotSizeForBookkeeping() * size);
-  slot_span->AppendFreeList(head, tail, size, this);
-}
-
-PA_ALWAYS_INLINE void PartitionRoot::RawFreeWithThreadCache(
-    uintptr_t slot_start,
-    SlotSpan* slot_span) {
-  // PA_LIKELY: performance-sensitive partitions have a thread cache,
-  // direct-mapped allocations are uncommon.
-  ThreadCache* thread_cache = GetThreadCache();
-  if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
-                !IsDirectMappedBucket(slot_span->bucket))) {
-    size_t bucket_index =
-        static_cast<size_t>(slot_span->bucket - this->buckets);
-    size_t slot_size;
-    if (PA_LIKELY(thread_cache->MaybePutInCache(slot_start, bucket_index,
-                                                &slot_size))) {
-      // This is a fast path, avoid calling GetSlotUsableSize() in Release
-      // builds as it is costlier. Copy its small bucket path instead.
-      PA_DCHECK(!slot_span->CanStoreRawSize());
-      size_t usable_size = AdjustSizeForExtrasSubtract(slot_size);
-      PA_DCHECK(usable_size == GetSlotUsableSize(slot_span));
-      thread_cache->RecordDeallocation(usable_size);
-      return;
-    }
-  }
-
-  if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
-    // Accounting must be done outside `RawFree()`, as it's also called from the
-    // thread cache. We would double-count otherwise.
-    //
-    // GetSlotUsableSize() will always give the correct result, and we are in
-    // a slow path here (since the thread cache case returned earlier).
-    size_t usable_size = GetSlotUsableSize(slot_span);
-    thread_cache->RecordDeallocation(usable_size);
-  }
-  RawFree(slot_start, slot_span);
-}
-
-PA_ALWAYS_INLINE void PartitionRoot::RawFreeLocked(uintptr_t slot_start) {
-  SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
-  // Direct-mapped deallocation releases then re-acquires the lock. The caller
-  // may not expect that, but we never call this function on direct-mapped
-  // allocations.
-  PA_DCHECK(!IsDirectMappedBucket(slot_span->bucket));
-  FreeInSlotSpan(slot_start, slot_span);
-}
-
-PA_ALWAYS_INLINE PartitionRoot* PartitionRoot::FromSlotSpan(
-    SlotSpan* slot_span) {
-  auto* extent_entry = reinterpret_cast<SuperPageExtentEntry*>(
-      reinterpret_cast<uintptr_t>(slot_span) & internal::SystemPageBaseMask());
-  return extent_entry->root;
-}
-
-PA_ALWAYS_INLINE PartitionRoot* PartitionRoot::FromFirstSuperPage(
-    uintptr_t super_page) {
-  PA_DCHECK(internal::IsReservationStart(super_page));
-  auto* extent_entry = internal::PartitionSuperPageToExtent(super_page);
-  PartitionRoot* root = extent_entry->root;
-  PA_DCHECK(root->inverted_self == ~reinterpret_cast<uintptr_t>(root));
-  return root;
-}
-
-PA_ALWAYS_INLINE PartitionRoot* PartitionRoot::FromAddrInFirstSuperpage(
-    uintptr_t address) {
-  uintptr_t super_page = address & internal::kSuperPageBaseMask;
-  PA_DCHECK(internal::IsReservationStart(super_page));
-  return FromFirstSuperPage(super_page);
-}
-
-PA_ALWAYS_INLINE void PartitionRoot::IncreaseTotalSizeOfAllocatedBytes(
-    uintptr_t addr,
-    size_t len,
-    size_t raw_size) {
-  total_size_of_allocated_bytes += len;
-  max_size_of_allocated_bytes =
-      std::max(max_size_of_allocated_bytes, total_size_of_allocated_bytes);
-#if BUILDFLAG(RECORD_ALLOC_INFO)
-  partition_alloc::internal::RecordAllocOrFree(addr | 0x01, raw_size);
-#endif  // BUILDFLAG(RECORD_ALLOC_INFO)
-}
-
-PA_ALWAYS_INLINE void PartitionRoot::DecreaseTotalSizeOfAllocatedBytes(
-    uintptr_t addr,
-    size_t len) {
-  // An underflow here means we've miscounted |total_size_of_allocated_bytes|
-  // somewhere.
-  PA_DCHECK(total_size_of_allocated_bytes >= len);
-  total_size_of_allocated_bytes -= len;
-#if BUILDFLAG(RECORD_ALLOC_INFO)
-  partition_alloc::internal::RecordAllocOrFree(addr | 0x00, len);
-#endif  // BUILDFLAG(RECORD_ALLOC_INFO)
-}
-
-PA_ALWAYS_INLINE void PartitionRoot::IncreaseCommittedPages(size_t len) {
-  const auto old_total =
-      total_size_of_committed_pages.fetch_add(len, std::memory_order_relaxed);
-
-  const auto new_total = old_total + len;
-
-  // This function is called quite frequently; to avoid performance problems, we
-  // don't want to hold a lock here, so we use compare and exchange instead.
-  size_t expected = max_size_of_committed_pages.load(std::memory_order_relaxed);
-  size_t desired;
-  do {
-    desired = std::max(expected, new_total);
-  } while (!max_size_of_committed_pages.compare_exchange_weak(
-      expected, desired, std::memory_order_relaxed, std::memory_order_relaxed));
-}
-
-PA_ALWAYS_INLINE void PartitionRoot::DecreaseCommittedPages(size_t len) {
-  total_size_of_committed_pages.fetch_sub(len, std::memory_order_relaxed);
-}
-
-PA_ALWAYS_INLINE void PartitionRoot::DecommitSystemPagesForData(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityDisposition accessibility_disposition) {
-  internal::ScopedSyscallTimer timer{this};
-  DecommitSystemPages(address, length, accessibility_disposition);
-  DecreaseCommittedPages(length);
-}
-
-// Not unified with TryRecommitSystemPagesForData() to preserve error codes.
-PA_ALWAYS_INLINE void PartitionRoot::RecommitSystemPagesForData(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityDisposition accessibility_disposition,
-    bool request_tagging) {
-  internal::ScopedSyscallTimer timer{this};
-
-  auto page_accessibility = GetPageAccessibility(request_tagging);
-  bool ok = TryRecommitSystemPages(address, length, page_accessibility,
-                                   accessibility_disposition);
-  if (PA_UNLIKELY(!ok)) {
-    // Decommit some memory and retry. The alternative is crashing.
-    DecommitEmptySlotSpans();
-    RecommitSystemPages(address, length, page_accessibility,
-                        accessibility_disposition);
-  }
-
-  IncreaseCommittedPages(length);
-}
-
-PA_ALWAYS_INLINE bool PartitionRoot::TryRecommitSystemPagesForData(
-    uintptr_t address,
-    size_t length,
-    PageAccessibilityDisposition accessibility_disposition,
-    bool request_tagging) {
-  internal::ScopedSyscallTimer timer{this};
-
-  auto page_accessibility = GetPageAccessibility(request_tagging);
-  bool ok = TryRecommitSystemPages(address, length, page_accessibility,
-                                   accessibility_disposition);
-  if (PA_UNLIKELY(!ok)) {
-    // Decommit some memory and retry. The alternative is crashing.
-    {
-      ::partition_alloc::internal::ScopedGuard guard(
-          internal::PartitionRootLock(this));
-      DecommitEmptySlotSpans();
-    }
-    ok = TryRecommitSystemPages(address, length, page_accessibility,
-                                accessibility_disposition);
-  }
-
-  if (ok) {
-    IncreaseCommittedPages(length);
-  }
-
-  return ok;
-}
-
-// static
-//
-// Returns the size available to the app. It can be equal or higher than the
-// requested size. If higher, the overage won't exceed what's actually usable
-// by the app without a risk of running out of an allocated region or into
-// PartitionAlloc's internal data. Used as malloc_usable_size and malloc_size.
-//
-// |ptr| should preferably point to the beginning of an object returned from
-// malloc() et al., but it doesn't have to. crbug.com/1292646 shows an example
-// where this isn't the case. Note, an inner object pointer won't work for
-// direct map, unless it is within the first partition page.
-PA_ALWAYS_INLINE size_t PartitionRoot::GetUsableSize(void* ptr) {
-  // malloc_usable_size() is expected to handle NULL gracefully and return 0.
-  if (!ptr) {
-    return 0;
-  }
-  auto* slot_span = SlotSpan::FromObjectInnerPtr(ptr);
-  auto* root = FromSlotSpan(slot_span);
-  return root->GetSlotUsableSize(slot_span);
-}
-
-PA_ALWAYS_INLINE size_t
-PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(void* ptr) {
-  // malloc_usable_size() is expected to handle NULL gracefully and return 0.
-  if (!ptr) {
-    return 0;
-  }
-  auto* slot_span = SlotSpan::FromObjectInnerPtr(ptr);
-  auto* root = FromSlotSpan(slot_span);
-  size_t usable_size = root->GetSlotUsableSize(slot_span);
-#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-  // Check |mac11_malloc_size_hack_enabled_| flag first as this doesn't
-  // concern OS versions other than macOS 11.
-  if (PA_UNLIKELY(root->settings.mac11_malloc_size_hack_enabled_ &&
-                  usable_size ==
-                      root->settings.mac11_malloc_size_hack_usable_size_)) {
-    uintptr_t slot_start =
-        internal::PartitionAllocGetSlotStartInBRPPool(UntagPtr(ptr));
-    auto* ref_count = internal::PartitionRefCountPointer(slot_start);
-    if (ref_count->NeedsMac11MallocSizeHack()) {
-      return internal::kMac11MallocSizeHackRequestedSize;
-    }
-  }
-#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-
-  return usable_size;
-}
-
-// Returns the page configuration to use when mapping slot spans for a given
-// partition root. ReadWriteTagged is used on MTE-enabled systems for
-// PartitionRoots supporting it.
-PA_ALWAYS_INLINE PageAccessibilityConfiguration
-PartitionRoot::GetPageAccessibility(bool request_tagging) const {
-  PageAccessibilityConfiguration::Permissions permissions =
-      PageAccessibilityConfiguration::kReadWrite;
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  if (IsMemoryTaggingEnabled() && request_tagging) {
-    permissions = PageAccessibilityConfiguration::kReadWriteTagged;
-  }
-#endif
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  return PageAccessibilityConfiguration(permissions, settings.thread_isolation);
-#else
-  return PageAccessibilityConfiguration(permissions);
-#endif
-}
-
-PA_ALWAYS_INLINE PageAccessibilityConfiguration
-PartitionRoot::PageAccessibilityWithThreadIsolationIfEnabled(
-    PageAccessibilityConfiguration::Permissions permissions) const {
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  return PageAccessibilityConfiguration(permissions, settings.thread_isolation);
-#endif
-  return PageAccessibilityConfiguration(permissions);
-}
-
-// Return the capacity of the underlying slot (adjusted for extras). This
-// doesn't mean this capacity is readily available. It merely means that if
-// a new allocation (or realloc) happened with that returned value, it'd use
-// the same amount of underlying memory.
-PA_ALWAYS_INLINE size_t
-PartitionRoot::AllocationCapacityFromSlotStart(uintptr_t slot_start) const {
-  auto* slot_span = SlotSpan::FromSlotStart(slot_start);
-  return AdjustSizeForExtrasSubtract(slot_span->bucket->slot_size);
-}
-
-// static
-PA_ALWAYS_INLINE uint16_t
-PartitionRoot::SizeToBucketIndex(size_t size,
-                                 BucketDistribution bucket_distribution) {
-  switch (bucket_distribution) {
-    case BucketDistribution::kNeutral:
-      return internal::BucketIndexLookup::GetIndexForNeutralBuckets(size);
-    case BucketDistribution::kDenser:
-      return internal::BucketIndexLookup::GetIndexForDenserBuckets(size);
-  }
-}
-
-PA_ALWAYS_INLINE void* PartitionRoot::AllocWithFlags(unsigned int flags,
-                                                     size_t requested_size,
-                                                     const char* type_name) {
-  return AllocWithFlagsInternal(flags, requested_size,
-                                internal::PartitionPageSize(), type_name);
-}
-
-PA_ALWAYS_INLINE void* PartitionRoot::AllocWithFlagsInternal(
-    unsigned int flags,
-    size_t requested_size,
-    size_t slot_span_alignment,
-    const char* type_name) {
-  PA_DCHECK(
-      (slot_span_alignment >= internal::PartitionPageSize()) &&
-      partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
-
-  PA_DCHECK(flags < AllocFlags::kLastFlag << 1);
-  PA_DCHECK((flags & AllocFlags::kNoHooks) == 0);  // Internal only.
-  PA_DCHECK((flags & AllocFlags::kMemoryShouldBeTaggedForMte) ==
-            0);  // Internal only.
-  PA_DCHECK(initialized);
-
-#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-  if (!(flags & AllocFlags::kNoMemoryToolOverride)) {
-    CHECK_MAX_SIZE_OR_RETURN_NULLPTR(requested_size, flags);
-    const bool zero_fill = flags & AllocFlags::kZeroFill;
-    void* result =
-        zero_fill ? calloc(1, requested_size) : malloc(requested_size);
-    PA_CHECK(result || flags & AllocFlags::kReturnNull);
-    return result;
-  }
-#endif  // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-  void* object = nullptr;
-  const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
-  if (PA_UNLIKELY(hooks_enabled)) {
-    unsigned int additional_flags = 0;
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-    if (IsMemoryTaggingEnabled()) {
-      additional_flags |= AllocFlags::kMemoryShouldBeTaggedForMte;
-    }
-#endif
-    // The override hooks will return false if it can't handle the request, i.e.
-    // due to unsupported flags. In this case, we forward the allocation request
-    // to the default mechanisms.
-    // TODO(crbug.com/1137393): See if we can make the forwarding more verbose
-    // to ensure that this situation doesn't go unnoticed.
-    if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(
-            &object, flags | additional_flags, requested_size, type_name)) {
-      PartitionAllocHooks::AllocationObserverHookIfEnabled(
-          CreateAllocationNotificationData(object, requested_size, type_name));
-      return object;
-    }
-  }
-
-  object = AllocWithFlagsNoHooks(flags, requested_size, slot_span_alignment);
-
-  if (PA_UNLIKELY(hooks_enabled)) {
-    PartitionAllocHooks::AllocationObserverHookIfEnabled(
-        CreateAllocationNotificationData(object, requested_size, type_name));
-  }
-
-  return object;
-}
-
-PA_ALWAYS_INLINE void* PartitionRoot::AllocWithFlagsNoHooks(
-    unsigned int flags,
-    size_t requested_size,
-    size_t slot_span_alignment) {
-  PA_DCHECK(
-      (slot_span_alignment >= internal::PartitionPageSize()) &&
-      partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
-
-  // The thread cache is added "in the middle" of the main allocator, that is:
-  // - After all the cookie/ref-count management
-  // - Before the "raw" allocator.
-  //
-  // That is, the general allocation flow is:
-  // 1. Adjustment of requested size to make room for extras
-  // 2. Allocation:
-  //   a. Call to the thread cache, if it succeeds, go to step 3.
-  //   b. Otherwise, call the "raw" allocator <-- Locking
-  // 3. Handle cookie/ref-count, zero allocation if required
-
-  size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
-  PA_CHECK(raw_size >= requested_size);  // check for overflows
-
-  // We should only call |SizeToBucketIndex| at most once when allocating.
-  // Otherwise, we risk having |bucket_distribution| changed
-  // underneath us (between calls to |SizeToBucketIndex| during the same call),
-  // which would result in an inconsistent state.
-  uint16_t bucket_index =
-      SizeToBucketIndex(raw_size, this->GetBucketDistribution());
-  size_t usable_size;
-  bool is_already_zeroed = false;
-  uintptr_t slot_start = 0;
-  size_t slot_size;
-
-#if BUILDFLAG(USE_STARSCAN)
-  const bool is_quarantine_enabled = IsQuarantineEnabled();
-  // PCScan safepoint. Call before trying to allocate from cache.
-  // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
-  // default.
-  if (PA_UNLIKELY(is_quarantine_enabled)) {
-    PCScan::JoinScanIfNeeded();
-  }
-#endif  // BUILDFLAG(USE_STARSCAN)
-
-  auto* thread_cache = GetOrCreateThreadCache();
-
-  // Don't use thread cache if higher order alignment is requested, because the
-  // thread cache will not be able to satisfy it.
-  //
-  // PA_LIKELY: performance-sensitive partitions use the thread cache.
-  if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
-                slot_span_alignment <= internal::PartitionPageSize())) {
-    // Note: getting slot_size from the thread cache rather than by
-    // `buckets[bucket_index].slot_size` to avoid touching `buckets` on the fast
-    // path.
-    slot_start = thread_cache->GetFromCache(bucket_index, &slot_size);
-
-    // PA_LIKELY: median hit rate in the thread cache is 95%, from metrics.
-    if (PA_LIKELY(slot_start)) {
-      // This follows the logic of SlotSpanMetadata::GetUsableSize for small
-      // buckets, which is too expensive to call here.
-      // Keep it in sync!
-      usable_size = AdjustSizeForExtrasSubtract(slot_size);
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-      // Make sure that the allocated pointer comes from the same place it would
-      // for a non-thread cache allocation.
-      SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
-      DCheckIsValidSlotSpan(slot_span);
-      PA_DCHECK(slot_span->bucket == &bucket_at(bucket_index));
-      PA_DCHECK(slot_span->bucket->slot_size == slot_size);
-      PA_DCHECK(usable_size == GetSlotUsableSize(slot_span));
-      // All large allocations must go through the RawAlloc path to correctly
-      // set |usable_size|.
-      PA_DCHECK(!slot_span->CanStoreRawSize());
-      PA_DCHECK(!slot_span->bucket->is_direct_mapped());
-#endif
-    } else {
-      slot_start =
-          RawAlloc(buckets + bucket_index, flags, raw_size, slot_span_alignment,
-                   &usable_size, &is_already_zeroed);
-    }
-  } else {
-    slot_start =
-        RawAlloc(buckets + bucket_index, flags, raw_size, slot_span_alignment,
-                 &usable_size, &is_already_zeroed);
-  }
-
-  if (PA_UNLIKELY(!slot_start)) {
-    return nullptr;
-  }
-
-  if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
-    thread_cache->RecordAllocation(usable_size);
-  }
-
-  // Layout inside the slot:
-  //   |[refcnt]|...object...|[empty]|[cookie]|[unused]|
-  //            <----(a)----->
-  //            <--------(b)--------->
-  //   <--(c)--->         +          <--(c)--->
-  //   <---------(d)--------->   +   <--(d)--->
-  //   <-----------------(e)------------------>
-  //   <----------------------(f)---------------------->
-  //     (a) requested_size
-  //     (b) usable_size
-  //     (c) extras
-  //     (d) raw_size
-  //     (e) utilized_slot_size
-  //     (f) slot_size
-  // Notes:
-  // - Ref-count may or may not exist in the slot, depending on brp_enabled().
-  // - Cookie exists only in the BUILDFLAG(PA_DCHECK_IS_ON) case.
-  // - Think of raw_size as the minimum size required internally to satisfy
-  //   the allocation request (i.e. requested_size + extras)
-  // - Note, at most one "empty" or "unused" space can occur at a time. It
-  //   occurs when slot_size is larger than raw_size. "unused" applies only to
-  //   large allocations (direct-mapped and single-slot slot spans) and "empty"
-  //   only to small allocations.
-  //   Why either-or, one might ask? We make an effort to put the trailing
-  //   cookie as close to data as possible to catch overflows (often
-  //   off-by-one), but that's possible only if we have enough space in metadata
-  //   to save raw_size, i.e. only for large allocations. For small allocations,
-  //   we have no other choice than putting the cookie at the very end of the
-  //   slot, thus creating the "empty" space.
-  //
-  // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is set, the layout is:
-  //   |...object...|[empty]|[cookie]|[unused]|[refcnt]|
-  //   <----(a)----->
-  //   <--------(b)--------->
-  //                        <--(c)--->   +    <--(c)--->
-  //   <----(d)----->   +   <--(d)--->   +    <--(d)--->
-  //   <-------------(e)------------->   +    <--(e)--->
-  //   <----------------------(f)---------------------->
-  // Notes:
-  // If |slot_start| is not SystemPageSize()-aligned (possible only for small
-  // allocations), ref-count of this slot is stored at the end of the previous
-  // slot. Otherwise it is stored in ref-count table placed after the super page
-  // metadata. For simplicity, the space for ref-count is still reserved at the
-  // end of previous slot, even though redundant.
-
-  void* object = SlotStartToObject(slot_start);
-
-  // Add the cookie after the allocation.
-  if (settings.use_cookie) {
-    internal::PartitionCookieWriteValue(static_cast<unsigned char*>(object) +
-                                        usable_size);
-  }
-
-  // Fill the region kUninitializedByte (on debug builds, if not requested to 0)
-  // or 0 (if requested and not 0 already).
-  bool zero_fill = flags & AllocFlags::kZeroFill;
-  // PA_LIKELY: operator new() calls malloc(), not calloc().
-  if (PA_LIKELY(!zero_fill)) {
-    // memset() can be really expensive.
-#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-    internal::DebugMemset(object, internal::kUninitializedByte, usable_size);
-#endif
-  } else if (!is_already_zeroed) {
-    memset(object, 0, usable_size);
-  }
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  // TODO(keishi): Add PA_LIKELY when brp is fully enabled as |brp_enabled| will
-  // be false only for the aligned partition.
-  if (brp_enabled()) {
-    bool needs_mac11_malloc_size_hack = false;
-#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-    // Only apply hack to size 32 allocations on macOS 11. There is a buggy
-    // assertion that malloc_size() equals sizeof(class_rw_t) which is 32.
-    if (PA_UNLIKELY(settings.mac11_malloc_size_hack_enabled_ &&
-                    requested_size ==
-                        internal::kMac11MallocSizeHackRequestedSize)) {
-      needs_mac11_malloc_size_hack = true;
-    }
-#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
-    auto* ref_count = new (internal::PartitionRefCountPointer(slot_start))
-        internal::PartitionRefCount(needs_mac11_malloc_size_hack);
-#if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
-    ref_count->SetRequestedSize(requested_size);
-#else
-    (void)ref_count;
-#endif
-  }
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-#if BUILDFLAG(USE_STARSCAN)
-  // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
-  // default.
-  if (PA_UNLIKELY(is_quarantine_enabled)) {
-    if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
-      // Mark the corresponding bits in the state bitmap as allocated.
-      internal::StateBitmapFromAddr(slot_start)->Allocate(slot_start);
-    }
-  }
-#endif  // BUILDFLAG(USE_STARSCAN)
-
-  return object;
-}
-
-PA_ALWAYS_INLINE uintptr_t PartitionRoot::RawAlloc(Bucket* bucket,
-                                                   unsigned int flags,
-                                                   size_t raw_size,
-                                                   size_t slot_span_alignment,
-                                                   size_t* usable_size,
-                                                   bool* is_already_zeroed) {
-  ::partition_alloc::internal::ScopedGuard guard{
-      internal::PartitionRootLock(this)};
-  return AllocFromBucket(bucket, flags, raw_size, slot_span_alignment,
-                         usable_size, is_already_zeroed);
-}
-
-PA_ALWAYS_INLINE void* PartitionRoot::AlignedAllocWithFlags(
-    unsigned int flags,
-    size_t alignment,
-    size_t requested_size) {
-  // Aligned allocation support relies on the natural alignment guarantees of
-  // PartitionAlloc. Specifically, it relies on the fact that slots within a
-  // slot span are aligned to slot size, from the beginning of the span.
-  //
-  // For alignments <=PartitionPageSize(), the code below adjusts the request
-  // size to be a power of two, no less than alignment. Since slot spans are
-  // aligned to PartitionPageSize(), which is also a power of two, this will
-  // automatically guarantee alignment on the adjusted size boundary, thanks to
-  // the natural alignment described above.
-  //
-  // For alignments >PartitionPageSize(), we need to pass the request down the
-  // stack to only give us a slot span aligned to this more restrictive
-  // boundary. In the current implementation, this code path will always
-  // allocate a new slot span and hand us the first slot, so no need to adjust
-  // the request size. As a consequence, allocating many small objects with
-  // such a high alignment can cause a non-negligable fragmentation,
-  // particularly if these allocations are back to back.
-  // TODO(bartekn): We should check that this is not causing issues in practice.
-  //
-  // Extras before the allocation are forbidden as they shift the returned
-  // allocation from the beginning of the slot, thus messing up alignment.
-  // Extras after the allocation are acceptable, but they have to be taken into
-  // account in the request size calculation to avoid crbug.com/1185484.
-  PA_DCHECK(settings.allow_aligned_alloc);
-  PA_DCHECK(!settings.extras_offset);
-  // This is mandated by |posix_memalign()|, so should never fire.
-  PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
-  // Catch unsupported alignment requests early.
-  PA_CHECK(alignment <= internal::kMaxSupportedAlignment);
-  size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
-
-  size_t adjusted_size = requested_size;
-  if (alignment <= internal::PartitionPageSize()) {
-    // Handle cases such as size = 16, alignment = 64.
-    // Wastes memory when a large alignment is requested with a small size, but
-    // this is hard to avoid, and should not be too common.
-    if (PA_UNLIKELY(raw_size < alignment)) {
-      raw_size = alignment;
-    } else {
-      // PartitionAlloc only guarantees alignment for power-of-two sized
-      // allocations. To make sure this applies here, round up the allocation
-      // size.
-      raw_size =
-          static_cast<size_t>(1)
-          << (int{sizeof(size_t) * 8} -
-              partition_alloc::internal::base::bits::CountLeadingZeroBits(
-                  raw_size - 1));
-    }
-    PA_DCHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(raw_size));
-    // Adjust back, because AllocWithFlagsNoHooks/Alloc will adjust it again.
-    adjusted_size = AdjustSizeForExtrasSubtract(raw_size);
-
-    // Overflow check. adjusted_size must be larger or equal to requested_size.
-    if (PA_UNLIKELY(adjusted_size < requested_size)) {
-      if (flags & AllocFlags::kReturnNull) {
-        return nullptr;
-      }
-      // OutOfMemoryDeathTest.AlignedAlloc requires
-      // base::TerminateBecauseOutOfMemory (invoked by
-      // PartitionExcessiveAllocationSize).
-      internal::PartitionExcessiveAllocationSize(requested_size);
-      // internal::PartitionExcessiveAllocationSize(size) causes OOM_CRASH.
-      PA_NOTREACHED();
-    }
-  }
-
-  // Slot spans are naturally aligned on partition page size, but make sure you
-  // don't pass anything less, because it'll mess up callee's calculations.
-  size_t slot_span_alignment =
-      std::max(alignment, internal::PartitionPageSize());
-  bool no_hooks = flags & AllocFlags::kNoHooks;
-  void* object =
-      no_hooks
-          ? AllocWithFlagsNoHooks(0, adjusted_size, slot_span_alignment)
-          : AllocWithFlagsInternal(0, adjusted_size, slot_span_alignment, "");
-
-  // |alignment| is a power of two, but the compiler doesn't necessarily know
-  // that. A regular % operation is very slow, make sure to use the equivalent,
-  // faster form.
-  // No need to MTE-untag, as it doesn't change alignment.
-  PA_CHECK(!(reinterpret_cast<uintptr_t>(object) & (alignment - 1)));
-
-  return object;
-}
-
-// Return the capacity of the underlying slot (adjusted for extras) that'd be
-// used to satisfy a request of |size|. This doesn't mean this capacity would be
-// readily available. It merely means that if an allocation happened with that
-// returned value, it'd use the same amount of underlying memory as the
-// allocation with |size|.
-PA_ALWAYS_INLINE size_t
-PartitionRoot::AllocationCapacityFromRequestedSize(size_t size) const {
-#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-  return size;
-#else
-  PA_DCHECK(PartitionRoot::initialized);
-  size = AdjustSizeForExtrasAdd(size);
-  auto& bucket = bucket_at(SizeToBucketIndex(size, GetBucketDistribution()));
-  PA_DCHECK(!bucket.slot_size || bucket.slot_size >= size);
-  PA_DCHECK(!(bucket.slot_size % internal::kSmallestBucket));
-
-  if (PA_LIKELY(!bucket.is_direct_mapped())) {
-    size = bucket.slot_size;
-  } else if (size > internal::MaxDirectMapped()) {
-    // Too large to allocate => return the size unchanged.
-  } else {
-    size = GetDirectMapSlotSize(size);
-  }
-  size = AdjustSizeForExtrasSubtract(size);
-  return size;
-#endif
-}
-
-ThreadCache* PartitionRoot::GetOrCreateThreadCache() {
-  ThreadCache* thread_cache = nullptr;
-  if (PA_LIKELY(settings.with_thread_cache)) {
-    thread_cache = ThreadCache::Get();
-    if (PA_UNLIKELY(!ThreadCache::IsValid(thread_cache))) {
-      thread_cache = MaybeInitThreadCache();
-    }
-  }
-  return thread_cache;
-}
-
-ThreadCache* PartitionRoot::GetThreadCache() {
-  return PA_LIKELY(settings.with_thread_cache) ? ThreadCache::Get() : nullptr;
-}
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-// Usage in `raw_ptr.cc` is notable enough to merit a non-internal alias.
-using ::partition_alloc::internal::PartitionAllocGetSlotStartInBRPPool;
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_
diff --git a/base/allocator/partition_allocator/partition_stats.cc b/base/allocator/partition_allocator/partition_stats.cc
deleted file mode 100644
index 90cb336..0000000
--- a/base/allocator/partition_allocator/partition_stats.cc
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_stats.h"
-
-#include <cstring>
-
-namespace partition_alloc {
-
-SimplePartitionStatsDumper::SimplePartitionStatsDumper() {
-  memset(&stats_, 0, sizeof(stats_));
-}
-
-void SimplePartitionStatsDumper::PartitionDumpTotals(
-    const char* partition_name,
-    const PartitionMemoryStats* memory_stats) {
-  stats_ = *memory_stats;
-}
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/partition_stats.h b/base/allocator/partition_allocator/partition_stats.h
deleted file mode 100644
index 39398a4..0000000
--- a/base/allocator/partition_allocator/partition_stats.h
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_STATS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_STATS_H_
-
-#include <cstddef>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-
-namespace partition_alloc {
-
-// Most of these are not populated if PA_THREAD_CACHE_ENABLE_STATISTICS is not
-// defined.
-struct ThreadCacheStats {
-  uint64_t alloc_count;   // Total allocation requests.
-  uint64_t alloc_hits;    // Thread cache hits.
-  uint64_t alloc_misses;  // Thread cache misses.
-
-  // Allocation failure details:
-  uint64_t alloc_miss_empty;
-  uint64_t alloc_miss_too_large;
-
-  // Cache fill details:
-  uint64_t cache_fill_count;
-  uint64_t cache_fill_hits;
-  uint64_t cache_fill_misses;  // Object too large.
-
-  uint64_t batch_fill_count;  // Number of central allocator requests.
-
-  // Memory cost:
-  uint32_t bucket_total_memory;
-  uint32_t metadata_overhead;
-
-#if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
-  uint64_t allocs_per_bucket_[internal::kNumBuckets + 1];
-#endif  // PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
-};
-
-// Per-thread allocation statistics. Only covers allocations made through the
-// partition linked to the thread cache. As the allocator doesn't record
-// requested sizes in most cases, the data there will be an overestimate of the
-// actually requested sizes. It is also not expected to sum up to anything
-// meaningful across threads, due to the lack of synchronization. Figures there
-// are cumulative, not net. Since the data below is per-thread, note a thread
-// can deallocate more than it allocated.
-struct ThreadAllocStats {
-  uint64_t alloc_count;
-  uint64_t alloc_total_size;
-  uint64_t dealloc_count;
-  uint64_t dealloc_total_size;
-};
-
-// Struct used to retrieve total memory usage of a partition. Used by
-// PartitionStatsDumper implementation.
-struct PartitionMemoryStats {
-  size_t total_mmapped_bytes;    // Total bytes mmap()-ed from the system.
-  size_t total_committed_bytes;  // Total size of committed pages.
-  size_t max_committed_bytes;    // Max size of committed pages.
-  size_t total_allocated_bytes;  // Total size of allcoations.
-  size_t max_allocated_bytes;    // Max size of allocations.
-  size_t total_resident_bytes;   // Total bytes provisioned by the partition.
-  size_t total_active_bytes;     // Total active bytes in the partition.
-  size_t total_active_count;  // Total count of active objects in the partition.
-  size_t total_decommittable_bytes;  // Total bytes that could be decommitted.
-  size_t total_discardable_bytes;    // Total bytes that could be discarded.
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  size_t
-      total_brp_quarantined_bytes;  // Total bytes that are quarantined by BRP.
-  size_t total_brp_quarantined_count;       // Total number of slots that are
-                                            // quarantined by BRP.
-  size_t cumulative_brp_quarantined_bytes;  // Cumulative bytes that are
-                                            // quarantined by BRP.
-  size_t cumulative_brp_quarantined_count;  // Cumulative number of slots that
-                                            // are quarantined by BRP.
-#endif
-
-  bool has_thread_cache;
-  ThreadCacheStats current_thread_cache_stats;
-  ThreadCacheStats all_thread_caches_stats;
-
-  // Count and total duration of system calls made since process start. May not
-  // be reported on all platforms.
-  uint64_t syscall_count;
-  uint64_t syscall_total_time_ns;
-};
-
-// Struct used to retrieve memory statistics about a partition bucket. Used by
-// PartitionStatsDumper implementation.
-struct PartitionBucketMemoryStats {
-  bool is_valid;       // Used to check if the stats is valid.
-  bool is_direct_map;  // True if this is a direct mapping; size will not be
-                       // unique.
-  uint32_t bucket_slot_size;          // The size of the slot in bytes.
-  uint32_t allocated_slot_span_size;  // Total size the slot span allocated
-                                      // from the system (committed pages).
-  uint32_t active_bytes;              // Total active bytes used in the bucket.
-  uint32_t active_count;    // Total active objects allocated in the bucket.
-  uint32_t resident_bytes;  // Total bytes provisioned in the bucket.
-  uint32_t decommittable_bytes;    // Total bytes that could be decommitted.
-  uint32_t discardable_bytes;      // Total bytes that could be discarded.
-  uint32_t num_full_slot_spans;    // Number of slot spans with all slots
-                                   // allocated.
-  uint32_t num_active_slot_spans;  // Number of slot spans that have at least
-                                   // one provisioned slot.
-  uint32_t num_empty_slot_spans;   // Number of slot spans that are empty
-                                   // but not decommitted.
-  uint32_t num_decommitted_slot_spans;  // Number of slot spans that are empty
-                                        // and decommitted.
-};
-
-// Interface that is passed to PartitionDumpStats and
-// PartitionDumpStats for using the memory statistics.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionStatsDumper {
- public:
-  virtual ~PartitionStatsDumper() = default;
-
-  // Called to dump total memory used by partition, once per partition.
-  virtual void PartitionDumpTotals(const char* partition_name,
-                                   const PartitionMemoryStats*) = 0;
-
-  // Called to dump stats about buckets, for each bucket.
-  virtual void PartitionsDumpBucketStats(const char* partition_name,
-                                         const PartitionBucketMemoryStats*) = 0;
-};
-
-// Simple version of PartitionStatsDumper, storing the returned stats in stats_.
-// Does not handle per-bucket stats.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) SimplePartitionStatsDumper
-    : public PartitionStatsDumper {
- public:
-  SimplePartitionStatsDumper();
-  ~SimplePartitionStatsDumper() override = default;
-
-  void PartitionDumpTotals(const char* partition_name,
-                           const PartitionMemoryStats* memory_stats) override;
-
-  void PartitionsDumpBucketStats(const char* partition_name,
-                                 const PartitionBucketMemoryStats*) override {}
-
-  const PartitionMemoryStats& stats() const { return stats_; }
-
- private:
-  PartitionMemoryStats stats_;
-};
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_STATS_H_
diff --git a/base/allocator/partition_allocator/partition_superpage_extent_entry.h b/base/allocator/partition_allocator/partition_superpage_extent_entry.h
deleted file mode 100644
index ee36a7a..0000000
--- a/base/allocator/partition_allocator/partition_superpage_extent_entry.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_SUPERPAGE_EXTENT_ENTRY_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_SUPERPAGE_EXTENT_ENTRY_H_
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/address_pool_manager_types.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_dcheck_helper.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-
-// Should not include partition_root.h, partition_bucket.h, partition_page.h.
-// For IsQuarantineAllowed(), use partition_dcheck_helper.h instead of
-// partition_root.h.
-
-namespace partition_alloc::internal {
-
-// An "extent" is a span of consecutive superpages. We link the partition's next
-// extent (if there is one) to the very start of a superpage's metadata area.
-struct PartitionSuperPageExtentEntry {
-  PartitionRoot* root;
-  PartitionSuperPageExtentEntry* next;
-  uint16_t number_of_consecutive_super_pages;
-  uint16_t number_of_nonempty_slot_spans;
-
-  PA_ALWAYS_INLINE void IncrementNumberOfNonemptySlotSpans() {
-    DCheckNumberOfPartitionPagesInSuperPagePayload(
-        this, root, number_of_nonempty_slot_spans);
-    ++number_of_nonempty_slot_spans;
-  }
-
-  PA_ALWAYS_INLINE void DecrementNumberOfNonemptySlotSpans() {
-    PA_DCHECK(number_of_nonempty_slot_spans);
-    --number_of_nonempty_slot_spans;
-  }
-};
-
-static_assert(
-    sizeof(PartitionSuperPageExtentEntry) <= kPageMetadataSize,
-    "PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
-static_assert(kMaxSuperPagesInPool / kSuperPageSize <=
-                  std::numeric_limits<
-                      decltype(PartitionSuperPageExtentEntry ::
-                                   number_of_consecutive_super_pages)>::max(),
-              "number_of_consecutive_super_pages must be big enough");
-
-// Returns the base of the first super page in the range of consecutive super
-// pages.
-//
-// CAUTION! |extent| must point to the extent of the first super page in the
-// range of consecutive super pages.
-PA_ALWAYS_INLINE uintptr_t
-SuperPagesBeginFromExtent(const PartitionSuperPageExtentEntry* extent) {
-  PA_DCHECK(0 < extent->number_of_consecutive_super_pages);
-  uintptr_t extent_as_uintptr = reinterpret_cast<uintptr_t>(extent);
-  PA_DCHECK(IsManagedByNormalBuckets(extent_as_uintptr));
-  return base::bits::AlignDown(extent_as_uintptr, kSuperPageAlignment);
-}
-
-// Returns the end of the last super page in the range of consecutive super
-// pages.
-//
-// CAUTION! |extent| must point to the extent of the first super page in the
-// range of consecutive super pages.
-PA_ALWAYS_INLINE uintptr_t
-SuperPagesEndFromExtent(const PartitionSuperPageExtentEntry* extent) {
-  return SuperPagesBeginFromExtent(extent) +
-         (extent->number_of_consecutive_super_pages * kSuperPageSize);
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_SUPERPAGE_EXTENT_ENTRY_H_
diff --git a/base/allocator/partition_allocator/partition_tls.h b/base/allocator/partition_allocator/partition_tls.h
deleted file mode 100644
index 420559f..0000000
--- a/base/allocator/partition_allocator/partition_tls.h
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TLS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TLS_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_POSIX)
-#include <pthread.h>
-#endif
-
-#if BUILDFLAG(IS_WIN)
-#include "base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h"
-#endif
-
-// Barebones TLS implementation for use in PartitionAlloc. This doesn't use the
-// general chromium TLS handling to avoid dependencies, but more importantly
-// because it allocates memory.
-namespace partition_alloc::internal {
-
-#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-using PartitionTlsKey = pthread_key_t;
-
-// Only on x86_64, the implementation is not stable on ARM64. For instance, in
-// macOS 11, the TPIDRRO_EL0 registers holds the CPU index in the low bits,
-// which is not the case in macOS 12. See libsyscall/os/tsd.h in XNU
-// (_os_tsd_get_direct() is used by pthread_getspecific() internally).
-#if BUILDFLAG(IS_MAC) && defined(ARCH_CPU_X86_64)
-namespace {
-
-PA_ALWAYS_INLINE void* FastTlsGet(PartitionTlsKey index) {
-  // On macOS, pthread_getspecific() is in libSystem, so a call to it has to go
-  // through PLT. However, and contrary to some other platforms, *all* TLS keys
-  // are in a static array in the thread structure. So they are *always* at a
-  // fixed offset from the segment register holding the thread structure
-  // address.
-  //
-  // We could use _pthread_getspecific_direct(), but it is not
-  // exported. However, on all macOS versions we support, the TLS array is at
-  // %gs. This is used in V8 to back up InternalGetExistingThreadLocal(), and
-  // can also be seen by looking at pthread_getspecific() disassembly:
-  //
-  // libsystem_pthread.dylib`pthread_getspecific:
-  // libsystem_pthread.dylib[0x7ff800316099] <+0>: movq   %gs:(,%rdi,8), %rax
-  // libsystem_pthread.dylib[0x7ff8003160a2] <+9>: retq
-  //
-  // This function is essentially inlining the content of pthread_getspecific()
-  // here.
-  intptr_t result;
-  static_assert(sizeof index <= sizeof(intptr_t));
-  asm("movq %%gs:(,%1,8), %0;"
-      : "=r"(result)
-      : "r"(static_cast<intptr_t>(index)));
-
-  return reinterpret_cast<void*>(result);
-}
-
-}  // namespace
-#endif  // BUILDFLAG(IS_MAC) && defined(ARCH_CPU_X86_64)
-
-PA_ALWAYS_INLINE bool PartitionTlsCreate(PartitionTlsKey* key,
-                                         void (*destructor)(void*)) {
-  return !pthread_key_create(key, destructor);
-}
-
-PA_ALWAYS_INLINE void* PartitionTlsGet(PartitionTlsKey key) {
-#if BUILDFLAG(IS_MAC) && defined(ARCH_CPU_X86_64)
-  PA_DCHECK(pthread_getspecific(key) == FastTlsGet(key));
-  return FastTlsGet(key);
-#else
-  return pthread_getspecific(key);
-#endif
-}
-
-PA_ALWAYS_INLINE void PartitionTlsSet(PartitionTlsKey key, void* value) {
-  int ret = pthread_setspecific(key, value);
-  PA_DCHECK(!ret);
-}
-
-#elif BUILDFLAG(IS_WIN)
-// Note: supports only a single TLS key on Windows. Not a hard constraint, may
-// be lifted.
-using PartitionTlsKey = unsigned long;
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool PartitionTlsCreate(PartitionTlsKey* key, void (*destructor)(void*));
-
-PA_ALWAYS_INLINE void* PartitionTlsGet(PartitionTlsKey key) {
-  // Accessing TLS resets the last error, which then makes |GetLastError()|
-  // return something misleading. While this means that properly using
-  // |GetLastError()| is difficult, there is currently code in Chromium which
-  // expects malloc() to *not* reset it. Meaning that we either have to fix this
-  // code, or pay the cost of saving/restoring it.
-  //
-  // Source:
-  // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-tlsgetvalue
-  // "Functions that return indications of failure call SetLastError() when they
-  // fail. They generally do not call SetLastError() when they succeed. The
-  // TlsGetValue() function is an exception to this general rule. The
-  // TlsGetValue() function calls SetLastError() to clear a thread's last error
-  // when it succeeds."
-  DWORD saved_error = GetLastError();
-  void* ret = TlsGetValue(key);
-  // Only non-zero errors need to be restored.
-  if (PA_UNLIKELY(saved_error)) {
-    SetLastError(saved_error);
-  }
-  return ret;
-}
-
-PA_ALWAYS_INLINE void PartitionTlsSet(PartitionTlsKey key, void* value) {
-  BOOL ret = TlsSetValue(key, value);
-  PA_DCHECK(ret);
-}
-
-// Registers a callback for DLL_PROCESS_DETACH events.
-void PartitionTlsSetOnDllProcessDetach(void (*callback)());
-
-#else
-// Not supported.
-using PartitionTlsKey = int;
-
-PA_ALWAYS_INLINE bool PartitionTlsCreate(PartitionTlsKey* key,
-                                         void (*destructor)(void*)) {
-  // NOTIMPLEMENTED() may allocate, crash instead.
-  PA_IMMEDIATE_CRASH();
-}
-
-PA_ALWAYS_INLINE void* PartitionTlsGet(PartitionTlsKey key) {
-  PA_IMMEDIATE_CRASH();
-}
-
-PA_ALWAYS_INLINE void PartitionTlsSet(PartitionTlsKey key, void* value) {
-  PA_IMMEDIATE_CRASH();
-}
-
-#endif  // BUILDFLAG(IS_WIN)
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TLS_H_
diff --git a/base/allocator/partition_allocator/partition_tls_win.cc b/base/allocator/partition_allocator/partition_tls_win.cc
deleted file mode 100644
index 5f1b161..0000000
--- a/base/allocator/partition_allocator/partition_tls_win.cc
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_tls.h"
-
-#include <windows.h>
-
-namespace partition_alloc::internal {
-
-namespace {
-
-// Store the key as the thread destruction callback doesn't get it.
-PartitionTlsKey g_key;
-void (*g_destructor)(void*) = nullptr;
-void (*g_on_dll_process_detach)() = nullptr;
-
-// Static callback function to call with each thread termination.
-void NTAPI PartitionTlsOnThreadExit(PVOID module,
-                                    DWORD reason,
-                                    PVOID reserved) {
-  if (reason != DLL_THREAD_DETACH && reason != DLL_PROCESS_DETACH) {
-    return;
-  }
-
-  if (reason == DLL_PROCESS_DETACH && g_on_dll_process_detach) {
-    g_on_dll_process_detach();
-  }
-
-  if (g_destructor) {
-    void* per_thread_data = PartitionTlsGet(g_key);
-    if (per_thread_data) {
-      g_destructor(per_thread_data);
-    }
-  }
-}
-
-}  // namespace
-
-bool PartitionTlsCreate(PartitionTlsKey* key, void (*destructor)(void*)) {
-  PA_CHECK(g_destructor == nullptr);  // Only one TLS key supported at a time.
-  PartitionTlsKey value = TlsAlloc();
-  if (value != TLS_OUT_OF_INDEXES) {
-    *key = value;
-
-    g_key = value;
-    g_destructor = destructor;
-    return true;
-  }
-  return false;
-}
-
-void PartitionTlsSetOnDllProcessDetach(void (*callback)()) {
-  g_on_dll_process_detach = callback;
-}
-
-}  // namespace partition_alloc::internal
-
-// See thread_local_storage_win.cc for details and reference.
-//
-// The callback has to be in any section between .CRT$XLA and .CRT$XLZ, as these
-// are sentinels used by the TLS code to find the callback array bounds. As we
-// don't particularly care about where we are called but would prefer to be
-// deinitialized towards the end (in particular after Chromium's TLS), we locate
-// ourselves in .CRT$XLY.
-
-// Force a reference to _tls_used to make the linker create the TLS directory if
-// it's not already there.  (e.g. if __declspec(thread) is not used).  Force a
-// reference to partition_tls_thread_exit_callback to prevent whole program
-// optimization from discarding the variable.
-#ifdef _WIN64
-
-#pragma comment(linker, "/INCLUDE:_tls_used")
-#pragma comment(linker, "/INCLUDE:partition_tls_thread_exit_callback")
-
-#else  // _WIN64
-
-#pragma comment(linker, "/INCLUDE:__tls_used")
-#pragma comment(linker, "/INCLUDE:_partition_tls_thread_exit_callback")
-
-#endif  // _WIN64
-
-// extern "C" suppresses C++ name mangling so we know the symbol name for the
-// linker /INCLUDE:symbol pragma above.
-extern "C" {
-// The linker must not discard partition_tls_thread_exit_callback.  (We force a
-// reference to this variable with a linker /INCLUDE:symbol pragma to ensure
-// that.) If this variable is discarded, PartitionTlsOnThreadExit will never be
-// called.
-#ifdef _WIN64
-
-// .CRT section is merged with .rdata on x64 so it must be constant data.
-#pragma const_seg(".CRT$XLY")
-// When defining a const variable, it must have external linkage to be sure the
-// linker doesn't discard it.
-extern const PIMAGE_TLS_CALLBACK partition_tls_thread_exit_callback;
-const PIMAGE_TLS_CALLBACK partition_tls_thread_exit_callback =
-    partition_alloc::internal::PartitionTlsOnThreadExit;
-
-// Reset the default section.
-#pragma const_seg()
-
-#else  // _WIN64
-
-#pragma data_seg(".CRT$XLY")
-PIMAGE_TLS_CALLBACK partition_tls_thread_exit_callback =
-    partition_alloc::internal::PartitionTlsOnThreadExit;
-
-// Reset the default section.
-#pragma data_seg()
-
-#endif  // _WIN64
-}  // extern "C"
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr.h b/base/allocator/partition_allocator/pointers/raw_ptr.h
deleted file mode 100644
index 32950e5..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr.h
+++ /dev/null
@@ -1,1073 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <cstddef>
-#include <functional>
-#include <type_traits>
-#include <utility>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/pointers/raw_ptr_exclusion.h"
-#include "build/build_config.h"
-#include "build/buildflag.h"
-
-#if BUILDFLAG(IS_WIN)
-#include "base/allocator/partition_allocator/partition_alloc_base/win/win_handle_types.h"
-#endif
-
-#if BUILDFLAG(USE_PARTITION_ALLOC)
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-// Live implementation of MiraclePtr being built.
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
-    BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
-#define PA_RAW_PTR_CHECK(condition) PA_BASE_CHECK(condition)
-#else
-// No-op implementation of MiraclePtr being built.
-// Note that `PA_BASE_DCHECK()` evaporates from non-DCHECK builds,
-// minimizing impact of generated code.
-#define PA_RAW_PTR_CHECK(condition) PA_BASE_DCHECK(condition)
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
-        // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
-#else   // BUILDFLAG(USE_PARTITION_ALLOC)
-// Without PartitionAlloc, there's no `PA_BASE_D?CHECK()` implementation
-// available.
-#define PA_RAW_PTR_CHECK(condition)
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC)
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-#include "base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.h"
-#elif BUILDFLAG(USE_ASAN_UNOWNED_PTR)
-#include "base/allocator/partition_allocator/pointers/raw_ptr_asan_unowned_impl.h"
-#elif BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-#include "base/allocator/partition_allocator/pointers/raw_ptr_hookable_impl.h"
-#else
-#include "base/allocator/partition_allocator/pointers/raw_ptr_noop_impl.h"
-#endif
-
-namespace cc {
-class Scheduler;
-}
-namespace base::internal {
-class DelayTimerBase;
-}
-namespace content::responsiveness {
-class Calculator;
-}
-
-namespace base {
-
-// NOTE: All methods should be `PA_ALWAYS_INLINE`. raw_ptr is meant to be a
-// lightweight replacement of a raw pointer, hence performance is critical.
-
-// This is a bitfield representing the different flags that can be applied to a
-// raw_ptr.
-//
-// Internal use only: Developers shouldn't use those values directly.
-//
-// Housekeeping rules: Try not to change trait values, so that numeric trait
-// values stay constant across builds (could be useful e.g. when analyzing stack
-// traces). A reasonable exception to this rule are `*ForTest` traits. As a
-// matter of fact, we propose that new non-test traits are added before the
-// `*ForTest` traits.
-enum class RawPtrTraits : unsigned {
-  kEmpty = 0,
-
-  // Disables dangling pointer detection, but keeps other raw_ptr protections.
-  //
-  // Don't use directly, use DisableDanglingPtrDetection or DanglingUntriaged
-  // instead.
-  kMayDangle = (1 << 0),
-
-  // Disables any hooks, when building with BUILDFLAG(USE_HOOKABLE_RAW_PTR).
-  //
-  // Internal use only.
-  kDisableHooks = (1 << 2),
-
-  // Pointer arithmetic is discouraged and disabled by default.
-  //
-  // Don't use directly, use AllowPtrArithmetic instead.
-  kAllowPtrArithmetic = (1 << 3),
-
-  // This pointer is evaluated by a separate, Ash-related experiment.
-  //
-  // Don't use directly, use ExperimentalAsh instead.
-  kExperimentalAsh = (1 << 4),
-
-  // *** ForTest traits below ***
-
-  // Adds accounting, on top of the chosen implementation, for test purposes.
-  // raw_ptr/raw_ref with this trait perform extra bookkeeping, e.g. to track
-  // the number of times the raw_ptr is wrapped, unwrapped, etc.
-  //
-  // Test only. Include raw_ptr_counting_wrapper_impl_for_test.h in your test
-  // files when using this trait.
-  kUseCountingWrapperForTest = (1 << 10),
-
-  // Helper trait that can be used to test raw_ptr's behaviour or conversions.
-  //
-  // Test only.
-  kDummyForTest = (1 << 11),
-};
-
-// Used to combine RawPtrTraits:
-constexpr RawPtrTraits operator|(RawPtrTraits a, RawPtrTraits b) {
-  return static_cast<RawPtrTraits>(static_cast<unsigned>(a) |
-                                   static_cast<unsigned>(b));
-}
-constexpr RawPtrTraits operator&(RawPtrTraits a, RawPtrTraits b) {
-  return static_cast<RawPtrTraits>(static_cast<unsigned>(a) &
-                                   static_cast<unsigned>(b));
-}
-constexpr RawPtrTraits operator~(RawPtrTraits a) {
-  return static_cast<RawPtrTraits>(~static_cast<unsigned>(a));
-}
-
-namespace raw_ptr_traits {
-
-constexpr bool Contains(RawPtrTraits a, RawPtrTraits b) {
-  return (a & b) != RawPtrTraits::kEmpty;
-}
-
-constexpr RawPtrTraits Remove(RawPtrTraits a, RawPtrTraits b) {
-  return a & ~b;
-}
-
-constexpr bool AreValid(RawPtrTraits traits) {
-  return Remove(traits, RawPtrTraits::kMayDangle | RawPtrTraits::kDisableHooks |
-                            RawPtrTraits::kAllowPtrArithmetic |
-                            RawPtrTraits::kExperimentalAsh |
-                            RawPtrTraits::kUseCountingWrapperForTest |
-                            RawPtrTraits::kDummyForTest) ==
-         RawPtrTraits::kEmpty;
-}
-
-// IsSupportedType<T>::value answers whether raw_ptr<T> 1) compiles and 2) is
-// always safe at runtime.  Templates that may end up using `raw_ptr<T>` should
-// use IsSupportedType to ensure that raw_ptr is not used with unsupported
-// types.  As an example, see how base::internal::StorageTraits uses
-// IsSupportedType as a condition for using base::internal::UnretainedWrapper
-// (which has a `ptr_` field that will become `raw_ptr<T>` after the Big
-// Rewrite).
-template <typename T, typename SFINAE = void>
-struct IsSupportedType {
-  static constexpr bool value = true;
-};
-
-// raw_ptr<T> is not compatible with function pointer types. Also, they don't
-// even need the raw_ptr protection, because they don't point on heap.
-template <typename T>
-struct IsSupportedType<T, std::enable_if_t<std::is_function<T>::value>> {
-  static constexpr bool value = false;
-};
-
-// This section excludes some types from raw_ptr<T> to avoid them from being
-// used inside base::Unretained in performance sensitive places. These were
-// identified from sampling profiler data. See crbug.com/1287151 for more info.
-template <>
-struct IsSupportedType<cc::Scheduler> {
-  static constexpr bool value = false;
-};
-template <>
-struct IsSupportedType<base::internal::DelayTimerBase> {
-  static constexpr bool value = false;
-};
-template <>
-struct IsSupportedType<content::responsiveness::Calculator> {
-  static constexpr bool value = false;
-};
-
-#if __OBJC__
-// raw_ptr<T> is not compatible with pointers to Objective-C classes for a
-// multitude of reasons. They may fail to compile in many cases, and wouldn't
-// work well with tagged pointers. Anyway, Objective-C objects have their own
-// way of tracking lifespan, hence don't need the raw_ptr protection as much.
-//
-// Such pointers are detected by checking if they're convertible to |id| type.
-template <typename T>
-struct IsSupportedType<T,
-                       std::enable_if_t<std::is_convertible<T*, id>::value>> {
-  static constexpr bool value = false;
-};
-#endif  // __OBJC__
-
-#if BUILDFLAG(IS_WIN)
-// raw_ptr<HWND__> is unsafe at runtime - if the handle happens to also
-// represent a valid pointer into a PartitionAlloc-managed region then it can
-// lead to manipulating random memory when treating it as BackupRefPtr
-// ref-count.  See also https://crbug.com/1262017.
-//
-// TODO(https://crbug.com/1262017): Cover other handle types like HANDLE,
-// HLOCAL, HINTERNET, or HDEVINFO.  Maybe we should avoid using raw_ptr<T> when
-// T=void (as is the case in these handle types).  OTOH, explicit,
-// non-template-based raw_ptr<void> should be allowed.  Maybe this can be solved
-// by having 2 traits: IsPointeeAlwaysSafe (to be used in templates) and
-// IsPointeeUsuallySafe (to be used in the static_assert in raw_ptr).  The
-// upside of this approach is that it will safely handle base::Bind closing over
-// HANDLE.  The downside of this approach is that base::Bind closing over a
-// void* pointer will not get UaF protection.
-#define PA_WINDOWS_HANDLE_TYPE(name)       \
-  template <>                              \
-  struct IsSupportedType<name##__, void> { \
-    static constexpr bool value = false;   \
-  };
-#include "base/allocator/partition_allocator/partition_alloc_base/win/win_handle_types_list.inc"
-#undef PA_WINDOWS_HANDLE_TYPE
-#endif
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-template <RawPtrTraits Traits>
-using UnderlyingImplForTraits = internal::RawPtrBackupRefImpl<
-    /*AllowDangling=*/Contains(Traits, RawPtrTraits::kMayDangle),
-    /*ExperimentalAsh=*/Contains(Traits, RawPtrTraits::kExperimentalAsh)>;
-
-#elif BUILDFLAG(USE_ASAN_UNOWNED_PTR)
-template <RawPtrTraits Traits>
-using UnderlyingImplForTraits =
-    internal::RawPtrAsanUnownedImpl<Contains(Traits,
-                                             RawPtrTraits::kAllowPtrArithmetic),
-                                    Contains(Traits, RawPtrTraits::kMayDangle)>;
-
-#elif BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-template <RawPtrTraits Traits>
-using UnderlyingImplForTraits = internal::RawPtrHookableImpl<
-    /*EnableHooks=*/!Contains(Traits, RawPtrTraits::kDisableHooks)>;
-
-#else
-template <RawPtrTraits Traits>
-using UnderlyingImplForTraits = internal::RawPtrNoOpImpl;
-#endif
-
-}  // namespace raw_ptr_traits
-
-namespace test {
-
-template <RawPtrTraits Traits>
-struct RawPtrCountingImplWrapperForTest;
-
-}  // namespace test
-
-namespace raw_ptr_traits {
-
-// ImplForTraits is the struct that implements raw_ptr functions. Think of
-// raw_ptr as a thin wrapper, that directs calls to ImplForTraits. ImplForTraits
-// may be different from UnderlyingImplForTraits, because it may include a
-// wrapper.
-template <RawPtrTraits Traits>
-using ImplForTraits = std::conditional_t<
-    Contains(Traits, RawPtrTraits::kUseCountingWrapperForTest),
-    test::RawPtrCountingImplWrapperForTest<
-        Remove(Traits, RawPtrTraits::kUseCountingWrapperForTest)>,
-    UnderlyingImplForTraits<Traits>>;
-
-}  // namespace raw_ptr_traits
-
-// `raw_ptr<T>` is a non-owning smart pointer that has improved memory-safety
-// over raw pointers.  It behaves just like a raw pointer on platforms where
-// USE_BACKUP_REF_PTR is off, and almost like one when it's on (the main
-// difference is that it's zero-initialized and cleared on destruction and
-// move). Unlike `std::unique_ptr<T>`, `base::scoped_refptr<T>`, etc., it
-// doesn’t manage ownership or lifetime of an allocated object - you are still
-// responsible for freeing the object when no longer used, just as you would
-// with a raw C++ pointer.
-//
-// Compared to a raw C++ pointer, on platforms where USE_BACKUP_REF_PTR is on,
-// `raw_ptr<T>` incurs additional performance overhead for initialization,
-// destruction, and assignment (including `ptr++` and `ptr += ...`).  There is
-// no overhead when dereferencing a pointer.
-//
-// `raw_ptr<T>` is beneficial for security, because it can prevent a significant
-// percentage of Use-after-Free (UaF) bugs from being exploitable.  `raw_ptr<T>`
-// has limited impact on stability - dereferencing a dangling pointer remains
-// Undefined Behavior.  Note that the security protection is not yet enabled by
-// default.
-//
-// raw_ptr<T> is marked as [[gsl::Pointer]] which allows the compiler to catch
-// some bugs where the raw_ptr holds a dangling pointer to a temporary object.
-// However the [[gsl::Pointer]] analysis expects that such types do not have a
-// non-default move constructor/assignment. Thus, it's possible to get an error
-// where the pointer is not actually dangling, and have to work around the
-// compiler. We have not managed to construct such an example in Chromium yet.
-template <typename T, RawPtrTraits Traits = RawPtrTraits::kEmpty>
-class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
- public:
-  using Impl = typename raw_ptr_traits::ImplForTraits<Traits>;
-  // Needed to make gtest Pointee matcher work with raw_ptr.
-  using element_type = T;
-  using DanglingType = raw_ptr<T, Traits | RawPtrTraits::kMayDangle>;
-
-#if !BUILDFLAG(USE_PARTITION_ALLOC)
-  // See comment at top about `PA_RAW_PTR_CHECK()`.
-  static_assert(std::is_same_v<Impl, internal::RawPtrNoOpImpl>);
-#endif  // !BUILDFLAG(USE_PARTITION_ALLOC)
-
-  static_assert(raw_ptr_traits::AreValid(Traits), "Unknown raw_ptr trait(s)");
-  static_assert(raw_ptr_traits::IsSupportedType<T>::value,
-                "raw_ptr<T> doesn't work with this kind of pointee type T");
-
-  // TODO(bartekn): Turn on zeroing as much as possible, to reduce
-  // pointer-related UBs. In the current implementation we do it only when the
-  // underlying implementation needs it for correctness, for performance
-  // reasons. There are two secnarios where it's important:
-  // 1. When rewriting renderer, we don't want extra overhead get in the way of
-  //    our perf evaluation.
-  // 2. The same applies to rewriting 3rd party libraries, but also we want
-  //    RawPtrNoOpImpl to be a true no-op, in case the library is linked with
-  //    a product other than Chromium (this can be mitigated using
-  //    `build_with_chromium` GN variable).
-  static constexpr bool kZeroOnInit = Impl::kMustZeroOnInit;
-  static constexpr bool kZeroOnMove = Impl::kMustZeroOnMove;
-  static constexpr bool kZeroOnDestruct = Impl::kMustZeroOnDestruct;
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
-    BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-  // BackupRefPtr requires a non-trivial default constructor, destructor, etc.
-  PA_ALWAYS_INLINE constexpr raw_ptr() noexcept {
-    if constexpr (kZeroOnInit) {
-      wrapped_ptr_ = nullptr;
-    }
-  }
-
-  PA_ALWAYS_INLINE constexpr raw_ptr(const raw_ptr& p) noexcept
-      : wrapped_ptr_(Impl::Duplicate(p.wrapped_ptr_)) {}
-
-  PA_ALWAYS_INLINE constexpr raw_ptr(raw_ptr&& p) noexcept {
-    wrapped_ptr_ = p.wrapped_ptr_;
-    if constexpr (kZeroOnMove) {
-      p.wrapped_ptr_ = nullptr;
-    }
-  }
-
-  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(const raw_ptr& p) noexcept {
-    // Duplicate before releasing, in case the pointer is assigned to itself.
-    //
-    // Unlike the move version of this operator, don't add |this != &p| branch,
-    // for performance reasons. Even though Duplicate() is not cheap, we
-    // practically never assign a raw_ptr<T> to itself. We suspect that a
-    // cumulative cost of a conditional branch, even if always correctly
-    // predicted, would exceed that.
-    T* new_ptr = Impl::Duplicate(p.wrapped_ptr_);
-    Impl::ReleaseWrappedPtr(wrapped_ptr_);
-    wrapped_ptr_ = new_ptr;
-    return *this;
-  }
-
-  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(raw_ptr&& p) noexcept {
-    // Unlike the the copy version of this operator, this branch is necessaty
-    // for correctness.
-    if (PA_LIKELY(this != &p)) {
-      Impl::ReleaseWrappedPtr(wrapped_ptr_);
-      wrapped_ptr_ = p.wrapped_ptr_;
-      if constexpr (kZeroOnMove) {
-        p.wrapped_ptr_ = nullptr;
-      }
-    }
-    return *this;
-  }
-
-  PA_ALWAYS_INLINE PA_CONSTEXPR_DTOR ~raw_ptr() noexcept {
-    Impl::ReleaseWrappedPtr(wrapped_ptr_);
-    // Work around external issues where raw_ptr is used after destruction.
-    if constexpr (kZeroOnDestruct) {
-      wrapped_ptr_ = nullptr;
-    }
-  }
-
-#else   // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
-        // BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-
-  // raw_ptr can be trivially default constructed (leaving |wrapped_ptr_|
-  // uninitialized).
-  PA_ALWAYS_INLINE constexpr raw_ptr() noexcept = default;
-
-  // In addition to nullptr_t ctor above, raw_ptr needs to have these
-  // as |=default| or |constexpr| to avoid hitting -Wglobal-constructors in
-  // cases like this:
-  //     struct SomeStruct { int int_field; raw_ptr<int> ptr_field; };
-  //     SomeStruct g_global_var = { 123, nullptr };
-  PA_ALWAYS_INLINE raw_ptr(const raw_ptr&) noexcept = default;
-  PA_ALWAYS_INLINE raw_ptr(raw_ptr&&) noexcept = default;
-  PA_ALWAYS_INLINE raw_ptr& operator=(const raw_ptr&) noexcept = default;
-  PA_ALWAYS_INLINE raw_ptr& operator=(raw_ptr&&) noexcept = default;
-
-  PA_ALWAYS_INLINE ~raw_ptr() noexcept = default;
-
-  // With default constructor, destructor and move operations, we don't have an
-  // opportunity to zero the underlying pointer, so ensure this isn't expected.
-  static_assert(!kZeroOnInit);
-  static_assert(!kZeroOnMove);
-  static_assert(!kZeroOnDestruct);
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
-        // BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-
-  // Cross-kind copy constructor.
-  // Move is not supported as different traits may use different ref-counts, so
-  // let move operations degrade to copy, which handles it well.
-  template <RawPtrTraits PassedTraits,
-            typename Unused = std::enable_if_t<Traits != PassedTraits>>
-  PA_ALWAYS_INLINE constexpr explicit raw_ptr(
-      const raw_ptr<T, PassedTraits>& p) noexcept
-      : wrapped_ptr_(Impl::WrapRawPtrForDuplication(
-            raw_ptr_traits::ImplForTraits<PassedTraits>::
-                UnsafelyUnwrapPtrForDuplication(p.wrapped_ptr_))) {
-    // Limit cross-kind conversions only to cases where kMayDangle gets added,
-    // because that's needed for Unretained(Ref)Wrapper. Use a static_assert,
-    // instead of disabling via SFINAE, so that the compiler catches other
-    // conversions. Otherwise implicit raw_ptr<T> -> T* -> raw_ptr<> route will
-    // be taken.
-    static_assert(Traits == (PassedTraits | RawPtrTraits::kMayDangle));
-  }
-
-  // Cross-kind assignment.
-  // Move is not supported as different traits may use different ref-counts, so
-  // let move operations degrade to copy, which handles it well.
-  template <RawPtrTraits PassedTraits,
-            typename Unused = std::enable_if_t<Traits != PassedTraits>>
-  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(
-      const raw_ptr<T, PassedTraits>& p) noexcept {
-    // Limit cross-kind assignments only to cases where kMayDangle gets added,
-    // because that's needed for Unretained(Ref)Wrapper. Use a static_assert,
-    // instead of disabling via SFINAE, so that the compiler catches other
-    // conversions. Otherwise implicit raw_ptr<T> -> T* -> raw_ptr<> route will
-    // be taken.
-    static_assert(Traits == (PassedTraits | RawPtrTraits::kMayDangle));
-
-    Impl::ReleaseWrappedPtr(wrapped_ptr_);
-    wrapped_ptr_ = Impl::WrapRawPtrForDuplication(
-        raw_ptr_traits::ImplForTraits<
-            PassedTraits>::UnsafelyUnwrapPtrForDuplication(p.wrapped_ptr_));
-    return *this;
-  }
-
-  // Deliberately implicit, because raw_ptr is supposed to resemble raw ptr.
-  // Ignore kZeroOnInit, because here the caller explicitly wishes to initialize
-  // with nullptr. NOLINTNEXTLINE(google-explicit-constructor)
-  PA_ALWAYS_INLINE constexpr raw_ptr(std::nullptr_t) noexcept
-      : wrapped_ptr_(nullptr) {}
-
-  // Deliberately implicit, because raw_ptr is supposed to resemble raw ptr.
-  // NOLINTNEXTLINE(google-explicit-constructor)
-  PA_ALWAYS_INLINE constexpr raw_ptr(T* p) noexcept
-      : wrapped_ptr_(Impl::WrapRawPtr(p)) {}
-
-  // Deliberately implicit in order to support implicit upcast.
-  template <typename U,
-            typename Unused = std::enable_if_t<
-                std::is_convertible<U*, T*>::value &&
-                !std::is_void<typename std::remove_cv<T>::type>::value>>
-  // NOLINTNEXTLINE(google-explicit-constructor)
-  PA_ALWAYS_INLINE constexpr raw_ptr(const raw_ptr<U, Traits>& ptr) noexcept
-      : wrapped_ptr_(
-            Impl::Duplicate(Impl::template Upcast<T, U>(ptr.wrapped_ptr_))) {}
-  // Deliberately implicit in order to support implicit upcast.
-  template <typename U,
-            typename Unused = std::enable_if_t<
-                std::is_convertible<U*, T*>::value &&
-                !std::is_void<typename std::remove_cv<T>::type>::value>>
-  // NOLINTNEXTLINE(google-explicit-constructor)
-  PA_ALWAYS_INLINE constexpr raw_ptr(raw_ptr<U, Traits>&& ptr) noexcept
-      : wrapped_ptr_(Impl::template Upcast<T, U>(ptr.wrapped_ptr_)) {
-    if constexpr (kZeroOnMove) {
-      ptr.wrapped_ptr_ = nullptr;
-    }
-  }
-
-  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(std::nullptr_t) noexcept {
-    Impl::ReleaseWrappedPtr(wrapped_ptr_);
-    wrapped_ptr_ = nullptr;
-    return *this;
-  }
-  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(T* p) noexcept {
-    Impl::ReleaseWrappedPtr(wrapped_ptr_);
-    wrapped_ptr_ = Impl::WrapRawPtr(p);
-    return *this;
-  }
-
-  // Upcast assignment
-  template <typename U,
-            typename Unused = std::enable_if_t<
-                std::is_convertible<U*, T*>::value &&
-                !std::is_void<typename std::remove_cv<T>::type>::value>>
-  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(
-      const raw_ptr<U, Traits>& ptr) noexcept {
-    // Make sure that pointer isn't assigned to itself (look at raw_ptr address,
-    // not its contained pointer value). The comparison is only needed when they
-    // are the same type, otherwise they can't be the same raw_ptr object.
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-    if constexpr (std::is_same_v<raw_ptr, std::decay_t<decltype(ptr)>>) {
-      PA_RAW_PTR_CHECK(this != &ptr);
-    }
-#endif
-    Impl::ReleaseWrappedPtr(wrapped_ptr_);
-    wrapped_ptr_ =
-        Impl::Duplicate(Impl::template Upcast<T, U>(ptr.wrapped_ptr_));
-    return *this;
-  }
-  template <typename U,
-            typename Unused = std::enable_if_t<
-                std::is_convertible<U*, T*>::value &&
-                !std::is_void<typename std::remove_cv<T>::type>::value>>
-  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(
-      raw_ptr<U, Traits>&& ptr) noexcept {
-    // Make sure that pointer isn't assigned to itself (look at raw_ptr address,
-    // not its contained pointer value). The comparison is only needed when they
-    // are the same type, otherwise they can't be the same raw_ptr object.
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-    if constexpr (std::is_same_v<raw_ptr, std::decay_t<decltype(ptr)>>) {
-      PA_RAW_PTR_CHECK(this != &ptr);
-    }
-#endif
-    Impl::ReleaseWrappedPtr(wrapped_ptr_);
-    wrapped_ptr_ = Impl::template Upcast<T, U>(ptr.wrapped_ptr_);
-    if constexpr (kZeroOnMove) {
-      ptr.wrapped_ptr_ = nullptr;
-    }
-    return *this;
-  }
-
-  // Avoid using. The goal of raw_ptr is to be as close to raw pointer as
-  // possible, so use it only if absolutely necessary (e.g. for const_cast).
-  PA_ALWAYS_INLINE constexpr T* get() const { return GetForExtraction(); }
-
-  // You may use |raw_ptr<T>::AsEphemeralRawAddr()| to obtain |T**| or |T*&|
-  // from |raw_ptr<T>|, as long as you follow these requirements:
-  // - DO NOT carry T**/T*& obtained via AsEphemeralRawAddr() out of
-  //   expression.
-  // - DO NOT use raw_ptr or T**/T*& multiple times within an expression.
-  //
-  // https://chromium.googlesource.com/chromium/src/+/main/base/memory/raw_ptr.md#in_out-arguments-need-to-be-refactored
-  class EphemeralRawAddr {
-   public:
-    EphemeralRawAddr(const EphemeralRawAddr&) = delete;
-    EphemeralRawAddr& operator=(const EphemeralRawAddr&) = delete;
-    void* operator new(size_t) = delete;
-    void* operator new(size_t, void*) = delete;
-    PA_ALWAYS_INLINE PA_CONSTEXPR_DTOR ~EphemeralRawAddr() { original = copy; }
-
-    PA_ALWAYS_INLINE constexpr T** operator&() && { return &copy; }
-    // NOLINTNEXTLINE(google-explicit-constructor)
-    PA_ALWAYS_INLINE constexpr operator T*&() && { return copy; }
-
-   private:
-    friend class raw_ptr;
-    PA_ALWAYS_INLINE constexpr explicit EphemeralRawAddr(raw_ptr& ptr)
-        : copy(ptr.get()), original(ptr) {}
-    T* copy;
-    raw_ptr& original;  // Original pointer.
-  };
-  PA_ALWAYS_INLINE PA_CONSTEXPR_DTOR EphemeralRawAddr AsEphemeralRawAddr() & {
-    return EphemeralRawAddr(*this);
-  }
-
-  PA_ALWAYS_INLINE constexpr explicit operator bool() const {
-    return !!wrapped_ptr_;
-  }
-
-  template <typename U = T,
-            typename Unused = std::enable_if_t<
-                !std::is_void<typename std::remove_cv<U>::type>::value>>
-  PA_ALWAYS_INLINE constexpr U& operator*() const {
-    return *GetForDereference();
-  }
-  PA_ALWAYS_INLINE constexpr T* operator->() const {
-    return GetForDereference();
-  }
-
-  // Disables `(my_raw_ptr->*pmf)(...)` as a workaround for
-  // the ICE in GCC parsing the code, reported at
-  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103455
-  template <typename PMF>
-  void operator->*(PMF) const = delete;
-
-  // Deliberately implicit, because raw_ptr is supposed to resemble raw ptr.
-  // NOLINTNEXTLINE(google-explicit-constructor)
-  PA_ALWAYS_INLINE constexpr operator T*() const { return GetForExtraction(); }
-  template <typename U>
-  PA_ALWAYS_INLINE constexpr explicit operator U*() const {
-    // This operator may be invoked from static_cast, meaning the types may not
-    // be implicitly convertible, hence the need for static_cast here.
-    return static_cast<U*>(GetForExtraction());
-  }
-
-  PA_ALWAYS_INLINE constexpr raw_ptr& operator++() {
-    wrapped_ptr_ = Impl::Advance(wrapped_ptr_, 1);
-    return *this;
-  }
-  PA_ALWAYS_INLINE constexpr raw_ptr& operator--() {
-    wrapped_ptr_ = Impl::Retreat(wrapped_ptr_, 1);
-    return *this;
-  }
-  PA_ALWAYS_INLINE constexpr raw_ptr operator++(int /* post_increment */) {
-    raw_ptr result = *this;
-    ++(*this);
-    return result;
-  }
-  PA_ALWAYS_INLINE constexpr raw_ptr operator--(int /* post_decrement */) {
-    raw_ptr result = *this;
-    --(*this);
-    return result;
-  }
-  template <
-      typename Z,
-      typename = std::enable_if_t<partition_alloc::internal::is_offset_type<Z>>>
-  PA_ALWAYS_INLINE constexpr raw_ptr& operator+=(Z delta_elems) {
-    wrapped_ptr_ = Impl::Advance(wrapped_ptr_, delta_elems);
-    return *this;
-  }
-  template <
-      typename Z,
-      typename = std::enable_if_t<partition_alloc::internal::is_offset_type<Z>>>
-  PA_ALWAYS_INLINE constexpr raw_ptr& operator-=(Z delta_elems) {
-    wrapped_ptr_ = Impl::Retreat(wrapped_ptr_, delta_elems);
-    return *this;
-  }
-
-  // Do not disable operator+() and operator-().
-  // They provide OOB checks, which prevent from assigning an arbitrary value to
-  // raw_ptr, leading BRP to modifying arbitrary memory thinking it's ref-count.
-  // Keep them enabled, which may be blocked later when attempting to apply the
-  // += or -= operation, when disabled. In the absence of operators +/-, the
-  // compiler is free to implicitly convert to the underlying T* representation
-  // and perform ordinary pointer arithmetic, thus invalidating the purpose
-  // behind disabling them.
-  template <typename Z>
-  PA_ALWAYS_INLINE friend constexpr raw_ptr operator+(const raw_ptr& p,
-                                                      Z delta_elems) {
-    raw_ptr result = p;
-    return result += delta_elems;
-  }
-  template <typename Z>
-  PA_ALWAYS_INLINE friend constexpr raw_ptr operator-(const raw_ptr& p,
-                                                      Z delta_elems) {
-    raw_ptr result = p;
-    return result -= delta_elems;
-  }
-
-  PA_ALWAYS_INLINE friend constexpr ptrdiff_t operator-(const raw_ptr& p1,
-                                                        const raw_ptr& p2) {
-    return Impl::GetDeltaElems(p1.wrapped_ptr_, p2.wrapped_ptr_);
-  }
-  PA_ALWAYS_INLINE friend constexpr ptrdiff_t operator-(T* p1,
-                                                        const raw_ptr& p2) {
-    return Impl::GetDeltaElems(p1, p2.wrapped_ptr_);
-  }
-  PA_ALWAYS_INLINE friend constexpr ptrdiff_t operator-(const raw_ptr& p1,
-                                                        T* p2) {
-    return Impl::GetDeltaElems(p1.wrapped_ptr_, p2);
-  }
-
-  // Stop referencing the underlying pointer and free its memory. Compared to
-  // raw delete calls, this avoids the raw_ptr to be temporarily dangling
-  // during the free operation, which will lead to taking the slower path that
-  // involves quarantine.
-  PA_ALWAYS_INLINE constexpr void ClearAndDelete() noexcept {
-    delete GetForExtractionAndReset();
-  }
-  PA_ALWAYS_INLINE constexpr void ClearAndDeleteArray() noexcept {
-    delete[] GetForExtractionAndReset();
-  }
-
-  // Clear the underlying pointer and return another raw_ptr instance
-  // that is allowed to dangle.
-  // This can be useful in cases such as:
-  // ```
-  //  ptr.ExtractAsDangling()->SelfDestroy();
-  // ```
-  // ```
-  //  c_style_api_do_something_and_destroy(ptr.ExtractAsDangling());
-  // ```
-  // NOTE, avoid using this method as it indicates an error-prone memory
-  // ownership pattern. If possible, use smart pointers like std::unique_ptr<>
-  // instead of raw_ptr<>.
-  // If you have to use it, avoid saving the return value in a long-lived
-  // variable (or worse, a field)! It's meant to be used as a temporary, to be
-  // passed into a cleanup & freeing function, and destructed at the end of the
-  // statement.
-  PA_ALWAYS_INLINE constexpr DanglingType ExtractAsDangling() noexcept {
-    DanglingType res(std::move(*this));
-    // Not all implementation clear the source pointer on move. Furthermore,
-    // even for implemtantions that do, cross-kind conversions (that add
-    // kMayDangle) fall back to a copy, instead of move. So do it here just in
-    // case. Should be cheap.
-    operator=(nullptr);
-    return res;
-  }
-
-  // Comparison operators between raw_ptr and raw_ptr<U>/U*/std::nullptr_t.
-  // Strictly speaking, it is not necessary to provide these: the compiler can
-  // use the conversion operator implicitly to allow comparisons to fall back to
-  // comparisons between raw pointers. However, `operator T*`/`operator U*` may
-  // perform safety checks with a higher runtime cost, so to avoid this, provide
-  // explicit comparison operators for all combinations of parameters.
-
-  // Comparisons between `raw_ptr`s. This unusual declaration and separate
-  // definition below is because `GetForComparison()` is a private method. The
-  // more conventional approach of defining a comparison operator between
-  // `raw_ptr` and `raw_ptr<U>` in the friend declaration itself does not work,
-  // because a comparison operator defined inline would not be allowed to call
-  // `raw_ptr<U>`'s private `GetForComparison()` method.
-  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
-  friend bool operator==(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
-  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
-  friend bool operator!=(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
-  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
-  friend bool operator<(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
-  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
-  friend bool operator>(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
-  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
-  friend bool operator<=(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
-  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
-  friend bool operator>=(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
-
-  // Comparisons with U*. These operators also handle the case where the RHS is
-  // T*.
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator==(const raw_ptr& lhs, U* rhs) {
-    return lhs.GetForComparison() == rhs;
-  }
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator!=(const raw_ptr& lhs, U* rhs) {
-    return !(lhs == rhs);
-  }
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator==(U* lhs, const raw_ptr& rhs) {
-    return rhs == lhs;  // Reverse order to call the operator above.
-  }
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator!=(U* lhs, const raw_ptr& rhs) {
-    return rhs != lhs;  // Reverse order to call the operator above.
-  }
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator<(const raw_ptr& lhs, U* rhs) {
-    return lhs.GetForComparison() < rhs;
-  }
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator<=(const raw_ptr& lhs, U* rhs) {
-    return lhs.GetForComparison() <= rhs;
-  }
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator>(const raw_ptr& lhs, U* rhs) {
-    return lhs.GetForComparison() > rhs;
-  }
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator>=(const raw_ptr& lhs, U* rhs) {
-    return lhs.GetForComparison() >= rhs;
-  }
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator<(U* lhs, const raw_ptr& rhs) {
-    return lhs < rhs.GetForComparison();
-  }
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator<=(U* lhs, const raw_ptr& rhs) {
-    return lhs <= rhs.GetForComparison();
-  }
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator>(U* lhs, const raw_ptr& rhs) {
-    return lhs > rhs.GetForComparison();
-  }
-  template <typename U>
-  PA_ALWAYS_INLINE friend bool operator>=(U* lhs, const raw_ptr& rhs) {
-    return lhs >= rhs.GetForComparison();
-  }
-
-  // Comparisons with `std::nullptr_t`.
-  PA_ALWAYS_INLINE friend bool operator==(const raw_ptr& lhs, std::nullptr_t) {
-    return !lhs;
-  }
-  PA_ALWAYS_INLINE friend bool operator!=(const raw_ptr& lhs, std::nullptr_t) {
-    return !!lhs;  // Use !! otherwise the costly implicit cast will be used.
-  }
-  PA_ALWAYS_INLINE friend bool operator==(std::nullptr_t, const raw_ptr& rhs) {
-    return !rhs;
-  }
-  PA_ALWAYS_INLINE friend bool operator!=(std::nullptr_t, const raw_ptr& rhs) {
-    return !!rhs;  // Use !! otherwise the costly implicit cast will be used.
-  }
-
-  PA_ALWAYS_INLINE friend constexpr void swap(raw_ptr& lhs,
-                                              raw_ptr& rhs) noexcept {
-    Impl::IncrementSwapCountForTest();
-    std::swap(lhs.wrapped_ptr_, rhs.wrapped_ptr_);
-  }
-
-  PA_ALWAYS_INLINE void ReportIfDangling() const noexcept {
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    Impl::ReportIfDangling(wrapped_ptr_);
-#endif
-  }
-
- private:
-  // This getter is meant for situations where the pointer is meant to be
-  // dereferenced. It is allowed to crash on nullptr (it may or may not),
-  // because it knows that the caller will crash on nullptr.
-  PA_ALWAYS_INLINE constexpr T* GetForDereference() const {
-    return Impl::SafelyUnwrapPtrForDereference(wrapped_ptr_);
-  }
-  // This getter is meant for situations where the raw pointer is meant to be
-  // extracted outside of this class, but not necessarily with an intention to
-  // dereference. It mustn't crash on nullptr.
-  PA_ALWAYS_INLINE constexpr T* GetForExtraction() const {
-    return Impl::SafelyUnwrapPtrForExtraction(wrapped_ptr_);
-  }
-  // This getter is meant *only* for situations where the pointer is meant to be
-  // compared (guaranteeing no dereference or extraction outside of this class).
-  // Any verifications can and should be skipped for performance reasons.
-  PA_ALWAYS_INLINE constexpr T* GetForComparison() const {
-    return Impl::UnsafelyUnwrapPtrForComparison(wrapped_ptr_);
-  }
-
-  PA_ALWAYS_INLINE constexpr T* GetForExtractionAndReset() {
-    T* ptr = GetForExtraction();
-    operator=(nullptr);
-    return ptr;
-  }
-
-  // This field is not a raw_ptr<> because it was filtered by the rewriter for:
-  // #union, #global-scope, #constexpr-ctor-field-initializer
-  RAW_PTR_EXCLUSION T* wrapped_ptr_;
-
-  template <typename U, base::RawPtrTraits R>
-  friend class raw_ptr;
-};
-
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, Traits1>& lhs,
-                                 const raw_ptr<V, Traits2>& rhs) {
-  return lhs.GetForComparison() == rhs.GetForComparison();
-}
-
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, Traits1>& lhs,
-                                 const raw_ptr<V, Traits2>& rhs) {
-  return !(lhs == rhs);
-}
-
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, Traits1>& lhs,
-                                const raw_ptr<V, Traits2>& rhs) {
-  return lhs.GetForComparison() < rhs.GetForComparison();
-}
-
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, Traits1>& lhs,
-                                const raw_ptr<V, Traits2>& rhs) {
-  return lhs.GetForComparison() > rhs.GetForComparison();
-}
-
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, Traits1>& lhs,
-                                 const raw_ptr<V, Traits2>& rhs) {
-  return lhs.GetForComparison() <= rhs.GetForComparison();
-}
-
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, Traits1>& lhs,
-                                 const raw_ptr<V, Traits2>& rhs) {
-  return lhs.GetForComparison() >= rhs.GetForComparison();
-}
-
-template <typename T>
-struct IsRawPtr : std::false_type {};
-
-template <typename T, RawPtrTraits Traits>
-struct IsRawPtr<raw_ptr<T, Traits>> : std::true_type {};
-
-template <typename T>
-inline constexpr bool IsRawPtrV = IsRawPtr<T>::value;
-
-template <typename T>
-inline constexpr bool IsRawPtrMayDangleV = false;
-
-template <typename T, RawPtrTraits Traits>
-inline constexpr bool IsRawPtrMayDangleV<raw_ptr<T, Traits>> =
-    raw_ptr_traits::Contains(Traits, RawPtrTraits::kMayDangle);
-
-// Template helpers for working with T* or raw_ptr<T>.
-template <typename T>
-struct IsPointer : std::false_type {};
-
-template <typename T>
-struct IsPointer<T*> : std::true_type {};
-
-template <typename T, RawPtrTraits Traits>
-struct IsPointer<raw_ptr<T, Traits>> : std::true_type {};
-
-template <typename T>
-inline constexpr bool IsPointerV = IsPointer<T>::value;
-
-template <typename T>
-struct RemovePointer {
-  using type = T;
-};
-
-template <typename T>
-struct RemovePointer<T*> {
-  using type = T;
-};
-
-template <typename T, RawPtrTraits Traits>
-struct RemovePointer<raw_ptr<T, Traits>> {
-  using type = T;
-};
-
-template <typename T>
-using RemovePointerT = typename RemovePointer<T>::type;
-
-struct RawPtrGlobalSettings {
-  static void EnableExperimentalAsh() {
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    internal::BackupRefPtrGlobalSettings::EnableExperimentalAsh();
-#endif
-  }
-
-  static void DisableExperimentalAshForTest() {
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-    internal::BackupRefPtrGlobalSettings::DisableExperimentalAshForTest();
-#endif
-  }
-};
-
-}  // namespace base
-
-using base::raw_ptr;
-
-// DisableDanglingPtrDetection option for raw_ptr annotates
-// "intentional-and-safe" dangling pointers. It is meant to be used at the
-// margin, only if there is no better way to re-architecture the code.
-//
-// Usage:
-// raw_ptr<T, DisableDanglingPtrDetection> dangling_ptr;
-//
-// When using it, please provide a justification about what guarantees that it
-// will never be dereferenced after becoming dangling.
-constexpr auto DisableDanglingPtrDetection = base::RawPtrTraits::kMayDangle;
-
-// See `docs/dangling_ptr.md`
-// Annotates known dangling raw_ptr. Those haven't been triaged yet. All the
-// occurrences are meant to be removed. See https://crbug.com/1291138.
-constexpr auto DanglingUntriaged = base::RawPtrTraits::kMayDangle;
-
-// Unlike DanglingUntriaged, this annotates raw_ptrs that are known to
-// dangle only occasionally on the CQ.
-//
-// These were found from CQ runs and analysed in this dashboard:
-// https://docs.google.com/spreadsheets/d/1k12PQOG4y1-UEV9xDfP1F8FSk4cVFywafEYHmzFubJ8/
-//
-// This is not meant to be added manually. You can ignore this flag.
-constexpr auto FlakyDanglingUntriaged = base::RawPtrTraits::kMayDangle;
-
-// Dangling raw_ptr that is more likely to cause UAF: its memory was freed in
-// one task, and the raw_ptr was released in a different one.
-//
-// This is not meant to be added manually. You can ignore this flag.
-constexpr auto AcrossTasksDanglingUntriaged = base::RawPtrTraits::kMayDangle;
-
-// The use of pointer arithmetic with raw_ptr is strongly discouraged and
-// disabled by default. Usually a container like span<> should be used
-// instead of the raw_ptr.
-constexpr auto AllowPtrArithmetic = base::RawPtrTraits::kAllowPtrArithmetic;
-
-// Temporary flag for `raw_ptr` / `raw_ref`. This is used by finch experiments
-// to differentiate pointers added recently for the ChromeOS ash rewrite.
-//
-// See launch plan:
-// https://docs.google.com/document/d/105OVhNl-2lrfWElQSk5BXYv-nLynfxUrbC4l8cZ0CoU/edit
-//
-// This is not meant to be added manually. You can ignore this flag.
-constexpr auto ExperimentalAsh = base::RawPtrTraits::kExperimentalAsh;
-
-// This flag is used to tag a subset of dangling pointers. Similarly to
-// DanglingUntriaged, those pointers are known to be dangling. However, we also
-// detected that those raw_ptr's were never released (either by calling
-// raw_ptr's destructor or by resetting its value), which can ultimately put
-// pressure on the BRP quarantine.
-//
-// This is not meant to be added manually. You can ignore this flag.
-constexpr auto LeakedDanglingUntriaged = base::RawPtrTraits::kMayDangle;
-
-// Public verson used in callbacks arguments when it is known that they might
-// receive dangling pointers. In any other cases, please
-// use one of:
-// - raw_ptr<T, DanglingUntriaged>
-// - raw_ptr<T, DisableDanglingPtrDetection>
-template <typename T, base::RawPtrTraits Traits = base::RawPtrTraits::kEmpty>
-using MayBeDangling = base::raw_ptr<T, Traits | base::RawPtrTraits::kMayDangle>;
-
-namespace std {
-
-// Override so set/map lookups do not create extra raw_ptr. This also allows
-// dangling pointers to be used for lookup.
-template <typename T, base::RawPtrTraits Traits>
-struct less<raw_ptr<T, Traits>> {
-  using Impl = typename raw_ptr<T, Traits>::Impl;
-  using is_transparent = void;
-
-  bool operator()(const raw_ptr<T, Traits>& lhs,
-                  const raw_ptr<T, Traits>& rhs) const {
-    Impl::IncrementLessCountForTest();
-    return lhs < rhs;
-  }
-
-  bool operator()(T* lhs, const raw_ptr<T, Traits>& rhs) const {
-    Impl::IncrementLessCountForTest();
-    return lhs < rhs;
-  }
-
-  bool operator()(const raw_ptr<T, Traits>& lhs, T* rhs) const {
-    Impl::IncrementLessCountForTest();
-    return lhs < rhs;
-  }
-};
-
-// Define for cases where raw_ptr<T> holds a pointer to an array of type T.
-// This is consistent with definition of std::iterator_traits<T*>.
-// Algorithms like std::binary_search need that.
-template <typename T, base::RawPtrTraits Traits>
-struct iterator_traits<raw_ptr<T, Traits>> {
-  using difference_type = ptrdiff_t;
-  using value_type = std::remove_cv_t<T>;
-  using pointer = T*;
-  using reference = T&;
-  using iterator_category = std::random_access_iterator_tag;
-};
-
-// Specialize std::pointer_traits. The latter is required to obtain the
-// underlying raw pointer in the std::to_address(pointer) overload.
-// Implementing the pointer_traits is the standard blessed way to customize
-// `std::to_address(pointer)` in C++20 [3].
-//
-// [1] https://wg21.link/pointer.traits.optmem
-
-template <typename T, ::base::RawPtrTraits Traits>
-struct pointer_traits<::raw_ptr<T, Traits>> {
-  using pointer = ::raw_ptr<T, Traits>;
-  using element_type = T;
-  using difference_type = ptrdiff_t;
-
-  template <typename U>
-  using rebind = ::raw_ptr<U, Traits>;
-
-  static constexpr pointer pointer_to(element_type& r) noexcept {
-    return pointer(&r);
-  }
-
-  static constexpr element_type* to_address(pointer p) noexcept {
-    return p.get();
-  }
-};
-
-}  // namespace std
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_H_
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_asan_unowned_impl.cc b/base/allocator/partition_allocator/pointers/raw_ptr_asan_unowned_impl.cc
deleted file mode 100644
index fb30593..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_asan_unowned_impl.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/pointers/raw_ptr_asan_unowned_impl.h"
-
-#include <sanitizer/asan_interface.h>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-
-namespace base::internal {
-
-PA_NO_SANITIZE("address")
-bool EndOfAliveAllocation(const volatile void* ptr, bool is_adjustable_ptr) {
-  uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
-
-  // Normally, we probe the first byte of an object, but in cases of pointer
-  // arithmetic, we may be probing subsequent bytes, including the legal
-  // "end + 1" position.
-  //
-  // Alas, ASAN will claim an unmapped page is unpoisoned, so willfully ignore
-  // the fist address of a page, since "end + 1" of an object allocated exactly
-  // up to a page  boundary will SEGV on probe. This will cause false negatives
-  // for pointers that happen to be page aligned, which is undesirable but
-  // necessary for now.
-  //
-  // We minimize the consequences by using the pointer arithmetic flag in
-  // higher levels to conditionalize this suppression.
-  //
-  // TODO(tsepez): this may still fail for a non-accessible but non-null
-  // return from, say, malloc(0) which happens to be page-aligned.
-  //
-  // TODO(tsepez): enforce the pointer arithmetic flag. Until then, we
-  // may fail here if a pointer requires the flag but is lacking it.
-  return is_adjustable_ptr &&
-         ((address & 0x0fff) == 0 ||
-          __asan_region_is_poisoned(reinterpret_cast<void*>(address), 1)) &&
-         !__asan_region_is_poisoned(reinterpret_cast<void*>(address - 1), 1);
-}
-
-bool LikelySmuggledScalar(const volatile void* ptr) {
-  intptr_t address = reinterpret_cast<intptr_t>(ptr);
-  return address < 0x4000;  // Negative or small positive.
-}
-
-}  // namespace base::internal
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_asan_unowned_impl.h b/base/allocator/partition_allocator/pointers/raw_ptr_asan_unowned_impl.h
deleted file mode 100644
index 4e73ea1..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_asan_unowned_impl.h
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_ASAN_UNOWNED_IMPL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_ASAN_UNOWNED_IMPL_H_
-
-#include <stddef.h>
-
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-
-#if !BUILDFLAG(USE_ASAN_UNOWNED_PTR)
-#error "Included under wrong build option"
-#endif
-
-namespace base::internal {
-
-bool EndOfAliveAllocation(const volatile void* ptr, bool is_adjustable_ptr);
-bool LikelySmuggledScalar(const volatile void* ptr);
-
-template <bool IsAdjustablePtr, bool MayDangle>
-struct RawPtrAsanUnownedImpl {
-  // The first two are needed for correctness. The last one isn't technically a
-  // must, but better to set it.
-  static constexpr bool kMustZeroOnInit = true;
-  static constexpr bool kMustZeroOnMove = true;
-  static constexpr bool kMustZeroOnDestruct = true;
-
-  // Wraps a pointer.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
-    return ptr;
-  }
-
-  // Notifies the allocator when a wrapped pointer is being removed or replaced.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
-    }
-  }
-
-  // Unwraps the pointer, while asserting that memory hasn't been freed. The
-  // function is allowed to crash on nullptr.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
-      T* wrapped_ptr) {
-    // ASAN will catch use of dereferenced ptr without additional probing.
-    return wrapped_ptr;
-  }
-
-  // Unwraps the pointer, while asserting that memory hasn't been freed. The
-  // function must handle nullptr gracefully.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
-      T* wrapped_ptr) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
-    }
-    return wrapped_ptr;
-  }
-
-  // Unwraps the pointer, without making an assertion on whether memory was
-  // freed or not.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
-      T* wrapped_ptr) {
-    return wrapped_ptr;
-  }
-
-  // Upcasts the wrapped pointer.
-  template <typename To, typename From>
-  PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
-    static_assert(std::is_convertible<From*, To*>::value,
-                  "From must be convertible to To.");
-    // Note, this cast may change the address if upcasting to base that lies in
-    // the middle of the derived object.
-    return wrapped_ptr;
-  }
-
-  // Advance the wrapped pointer by `delta_elems`.
-  template <
-      typename T,
-      typename Z,
-      typename =
-          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
-  PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
-    return wrapped_ptr + delta_elems;
-  }
-
-  // Retreat the wrapped pointer by `delta_elems`.
-  template <
-      typename T,
-      typename Z,
-      typename =
-          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
-  PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
-    return wrapped_ptr - delta_elems;
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
-                                                            T* wrapped_ptr2) {
-    return wrapped_ptr1 - wrapped_ptr2;
-  }
-
-  // Returns a copy of a wrapped pointer, without making an assertion on whether
-  // memory was freed or not.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
-    return wrapped_ptr;
-  }
-
-  template <typename T>
-  static void ProbeForLowSeverityLifetimeIssue(T* wrapped_ptr) {
-    if (!MayDangle && wrapped_ptr) {
-      const volatile void* probe_ptr =
-          reinterpret_cast<const volatile void*>(wrapped_ptr);
-      if (!LikelySmuggledScalar(probe_ptr) &&
-          !EndOfAliveAllocation(probe_ptr, IsAdjustablePtr)) {
-        reinterpret_cast<const volatile uint8_t*>(probe_ptr)[0];
-      }
-    }
-  }
-
-  // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
-  // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
-    return ptr;
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
-      T* wrapped_ptr) {
-    return wrapped_ptr;
-  }
-
-  // This is for accounting only, used by unit tests.
-  PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
-  PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
-  PA_ALWAYS_INLINE static constexpr void
-  IncrementPointerToMemberOperatorCountForTest() {}
-};
-
-}  // namespace base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_ASAN_UNOWNED_IMPL_H_
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.cc b/base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.cc
deleted file mode 100644
index 6deac74..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.cc
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.h"
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_ref_count.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-
-namespace base::internal {
-
-template <bool AllowDangling, bool ExperimentalAsh>
-void RawPtrBackupRefImpl<AllowDangling, ExperimentalAsh>::AcquireInternal(
-    uintptr_t address) {
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-  PA_BASE_CHECK(UseBrp(address));
-#endif
-  uintptr_t slot_start =
-      partition_alloc::PartitionAllocGetSlotStartInBRPPool(address);
-  if constexpr (AllowDangling) {
-    partition_alloc::internal::PartitionRefCountPointer(slot_start)
-        ->AcquireFromUnprotectedPtr();
-  } else {
-    partition_alloc::internal::PartitionRefCountPointer(slot_start)->Acquire();
-  }
-}
-
-template <bool AllowDangling, bool ExperimentalAsh>
-void RawPtrBackupRefImpl<AllowDangling, ExperimentalAsh>::ReleaseInternal(
-    uintptr_t address) {
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-  PA_BASE_CHECK(UseBrp(address));
-#endif
-  uintptr_t slot_start =
-      partition_alloc::PartitionAllocGetSlotStartInBRPPool(address);
-  if constexpr (AllowDangling) {
-    if (partition_alloc::internal::PartitionRefCountPointer(slot_start)
-            ->ReleaseFromUnprotectedPtr()) {
-      partition_alloc::internal::PartitionAllocFreeForRefCounting(slot_start);
-    }
-  } else {
-    if (partition_alloc::internal::PartitionRefCountPointer(slot_start)
-            ->Release()) {
-      partition_alloc::internal::PartitionAllocFreeForRefCounting(slot_start);
-    }
-  }
-}
-
-template <bool AllowDangling, bool ExperimentalAsh>
-void RawPtrBackupRefImpl<AllowDangling, ExperimentalAsh>::
-    ReportIfDanglingInternal(uintptr_t address) {
-  if (partition_alloc::internal::IsUnretainedDanglingRawPtrCheckEnabled()) {
-    if (IsSupportedAndNotNull(address)) {
-      uintptr_t slot_start =
-          partition_alloc::PartitionAllocGetSlotStartInBRPPool(address);
-      partition_alloc::internal::PartitionRefCountPointer(slot_start)
-          ->ReportIfDangling();
-    }
-  }
-}
-
-// static
-template <bool AllowDangling, bool ExperimentalAsh>
-bool RawPtrBackupRefImpl<AllowDangling, ExperimentalAsh>::
-    CheckPointerWithinSameAlloc(uintptr_t before_addr,
-                                uintptr_t after_addr,
-                                size_t type_size) {
-  partition_alloc::internal::PtrPosWithinAlloc ptr_pos_within_alloc =
-      partition_alloc::internal::IsPtrWithinSameAlloc(before_addr, after_addr,
-                                                      type_size);
-  // No need to check that |new_ptr| is in the same pool, as
-  // IsPtrWithinSameAlloc() checks that it's within the same allocation, so
-  // must be the same pool.
-  PA_BASE_CHECK(ptr_pos_within_alloc !=
-                partition_alloc::internal::PtrPosWithinAlloc::kFarOOB);
-
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-  return ptr_pos_within_alloc ==
-         partition_alloc::internal::PtrPosWithinAlloc::kAllocEnd;
-#else
-  return false;
-#endif
-}
-
-template <bool AllowDangling, bool ExperimentalAsh>
-bool RawPtrBackupRefImpl<AllowDangling, ExperimentalAsh>::IsPointeeAlive(
-    uintptr_t address) {
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-  PA_BASE_CHECK(UseBrp(address));
-#endif
-  uintptr_t slot_start =
-      partition_alloc::PartitionAllocGetSlotStartInBRPPool(address);
-  return partition_alloc::internal::PartitionRefCountPointer(slot_start)
-      ->IsAlive();
-}
-
-// Explicitly instantiates the two BackupRefPtr variants in the .cc. This
-// ensures the definitions not visible from the .h are available in the binary.
-template struct RawPtrBackupRefImpl</*AllowDangling=*/false,
-                                    /*ExperimentalAsh=*/false>;
-template struct RawPtrBackupRefImpl</*AllowDangling=*/false,
-                                    /*ExperimentalAsh=*/true>;
-template struct RawPtrBackupRefImpl</*AllowDangling=*/true,
-                                    /*ExperimentalAsh=*/false>;
-template struct RawPtrBackupRefImpl</*AllowDangling=*/true,
-                                    /*ExperimentalAsh=*/true>;
-
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-void CheckThatAddressIsntWithinFirstPartitionPage(uintptr_t address) {
-  if (partition_alloc::internal::IsManagedByDirectMap(address)) {
-    uintptr_t reservation_start =
-        partition_alloc::internal::GetDirectMapReservationStart(address);
-    PA_BASE_CHECK(address - reservation_start >=
-                  partition_alloc::PartitionPageSize());
-  } else {
-    PA_BASE_CHECK(partition_alloc::internal::IsManagedByNormalBuckets(address));
-    PA_BASE_CHECK(address % partition_alloc::kSuperPageSize >=
-                  partition_alloc::PartitionPageSize());
-  }
-}
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON) ||
-        // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-
-}  // namespace base::internal
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.h b/base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.h
deleted file mode 100644
index dee1d55..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.h
+++ /dev/null
@@ -1,510 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_
-
-#include <stddef.h>
-
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/chromeos_buildflags.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "build/build_config.h"
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
-#endif
-
-#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-#error "Included under wrong build option"
-#endif
-
-namespace base::internal {
-
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-PA_COMPONENT_EXPORT(RAW_PTR)
-void CheckThatAddressIsntWithinFirstPartitionPage(uintptr_t address);
-#endif
-
-class BackupRefPtrGlobalSettings {
- public:
-  static void EnableExperimentalAsh() {
-    PA_CHECK(!experimental_ash_raw_ptr_enabled_);
-    experimental_ash_raw_ptr_enabled_ = true;
-  }
-
-  static void DisableExperimentalAshForTest() {
-    PA_CHECK(experimental_ash_raw_ptr_enabled_);
-    experimental_ash_raw_ptr_enabled_ = false;
-  }
-
-  PA_ALWAYS_INLINE static bool IsExperimentalAshEnabled() {
-    return experimental_ash_raw_ptr_enabled_;
-  }
-
- private:
-  // Write-once settings that should be in its own cacheline, as they're
-  // accessed frequently on a hot path.
-  PA_ALIGNAS(partition_alloc::internal::kPartitionCachelineSize)
-  static inline bool experimental_ash_raw_ptr_enabled_ = false;
-  [[maybe_unused]] char
-      padding_[partition_alloc::internal::kPartitionCachelineSize - 1];
-};
-
-// Note that `RawPtrBackupRefImpl` itself is not thread-safe. If multiple
-// threads modify the same raw_ptr object without synchronization, a data race
-// will occur.
-template <bool AllowDangling = false, bool ExperimentalAsh = false>
-struct RawPtrBackupRefImpl {
-  // These are needed for correctness, or else we may end up manipulating
-  // ref-count where we shouldn't, thus affecting the BRP's integrity. Unlike
-  // the first two, kMustZeroOnDestruct wouldn't be needed if raw_ptr was used
-  // correctly, but we already caught cases where a value is written after
-  // destruction.
-  static constexpr bool kMustZeroOnInit = true;
-  static constexpr bool kMustZeroOnMove = true;
-  static constexpr bool kMustZeroOnDestruct = true;
-
- private:
-  PA_ALWAYS_INLINE static bool UseBrp(uintptr_t address) {
-    // Pointer annotated with ExperimentalAsh are subject to a separate,
-    // Ash-related experiment.
-    //
-    // Note that this can be enabled only before the BRP partition is created,
-    // so it's impossible for this function to change its answer for a specific
-    // pointer. (This relies on the original partition to not be BRP-enabled.)
-    if constexpr (ExperimentalAsh) {
-#if BUILDFLAG(PA_IS_CHROMEOS_ASH)
-      if (!BackupRefPtrGlobalSettings::IsExperimentalAshEnabled()) {
-        return false;
-      }
-#endif
-    }
-    return partition_alloc::IsManagedByPartitionAllocBRPPool(address);
-  }
-
-  PA_ALWAYS_INLINE static bool IsSupportedAndNotNull(uintptr_t address) {
-    // There are many situations where the compiler can prove that
-    // `ReleaseWrappedPtr` is called on a value that is always nullptr, but the
-    // way `IsManagedByPartitionAllocBRPPool` is written, the compiler can't
-    // prove that nullptr is not managed by PartitionAlloc; and so the compiler
-    // has to emit a useless check and dead code. To avoid that without making
-    // the runtime check slower, tell the compiler to skip
-    // `IsManagedByPartitionAllocBRPPool` when it can statically determine that
-    // address is nullptr.
-#if PA_HAS_BUILTIN(__builtin_constant_p)
-    if (__builtin_constant_p(address == 0) && (address == 0)) {
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-      PA_BASE_CHECK(
-          !partition_alloc::IsManagedByPartitionAllocBRPPool(address));
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON) ||
-        // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-      return false;
-    }
-#endif  // PA_HAS_BUILTIN(__builtin_constant_p)
-
-    // This covers the nullptr case, as address 0 is never in any
-    // PartitionAlloc pool.
-    bool use_brp = UseBrp(address);
-
-    // There may be pointers immediately after the allocation, e.g.
-    //   {
-    //     // Assume this allocation happens outside of PartitionAlloc.
-    //     raw_ptr<T> ptr = new T[20];
-    //     for (size_t i = 0; i < 20; i ++) { ptr++; }
-    //   }
-    //
-    // Such pointers are *not* at risk of accidentally falling into BRP pool,
-    // because:
-    // 1) On 64-bit systems, BRP pool is preceded by a forbidden region.
-    // 2) On 32-bit systems, the guard pages and metadata of super pages in BRP
-    //    pool aren't considered to be part of that pool.
-    //
-    // This allows us to make a stronger assertion that if
-    // IsManagedByPartitionAllocBRPPool returns true for a valid pointer,
-    // it must be at least partition page away from the beginning of a super
-    // page.
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-    if (use_brp) {
-      CheckThatAddressIsntWithinFirstPartitionPage(address);
-    }
-#endif
-
-    return use_brp;
-  }
-
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-  // Out-Of-Bounds (OOB) poison bit is set when the pointer has overflowed by
-  // one byte.
-#if defined(ARCH_CPU_X86_64)
-  // Bit 63 is the only pointer bit that will work as the poison bit across both
-  // LAM48 and LAM57. It also works when all unused linear address bits are
-  // checked for canonicality.
-  static constexpr uintptr_t OOB_POISON_BIT = static_cast<uintptr_t>(1) << 63;
-#else
-  // Avoid ARM's Top-Byte Ignore.
-  static constexpr uintptr_t OOB_POISON_BIT = static_cast<uintptr_t>(1) << 55;
-#endif
-
-  template <typename T>
-  PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) {
-    return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) &
-                                ~OOB_POISON_BIT);
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static bool IsPtrOOB(T* ptr) {
-    return (reinterpret_cast<uintptr_t>(ptr) & OOB_POISON_BIT) ==
-           OOB_POISON_BIT;
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static T* PoisonOOBPtr(T* ptr) {
-    return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) |
-                                OOB_POISON_BIT);
-  }
-#else   // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-  template <typename T>
-  PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) {
-    return ptr;
-  }
-#endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-
- public:
-  // Wraps a pointer.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return ptr;
-    }
-    uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(ptr));
-    if (IsSupportedAndNotNull(address)) {
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-      PA_BASE_CHECK(ptr != nullptr);
-#endif
-      AcquireInternal(address);
-    } else {
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-#if PA_HAS_BUILTIN(__builtin_constant_p)
-      // Similarly to `IsSupportedAndNotNull` above, elide the
-      // `BanSuperPageFromBRPPool` call if the compiler can prove that `address`
-      // is zero since PA won't be able to map anything at that address anyway.
-      bool known_constant_zero =
-          __builtin_constant_p(address == 0) && (address == 0);
-#else   // PA_HAS_BUILTIN(__builtin_constant_p)
-      bool known_constant_zero = false;
-#endif  // PA_HAS_BUILTIN(__builtin_constant_p)
-
-      if (!known_constant_zero) {
-        partition_alloc::internal::AddressPoolManagerBitmap::
-            BanSuperPageFromBRPPool(address);
-      }
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
-    }
-
-    return ptr;
-  }
-
-  // Notifies the allocator when a wrapped pointer is being removed or replaced.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) {
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return;
-    }
-    uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(wrapped_ptr));
-    if (IsSupportedAndNotNull(address)) {
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-      PA_BASE_CHECK(wrapped_ptr != nullptr);
-#endif
-      ReleaseInternal(address);
-    }
-    // We are unable to counteract BanSuperPageFromBRPPool(), called from
-    // WrapRawPtr(). We only use one bit per super-page and, thus can't tell if
-    // there's more than one associated raw_ptr<T> at a given time. The risk of
-    // exhausting the entire address space is minuscule, therefore, we couldn't
-    // resist the perf gain of a single relaxed store (in the above mentioned
-    // function) over much more expensive two CAS operations, which we'd have to
-    // use if we were to un-ban a super-page.
-  }
-
-  // Unwraps the pointer, while asserting that memory hasn't been freed. The
-  // function is allowed to crash on nullptr.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
-      T* wrapped_ptr) {
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return wrapped_ptr;
-    }
-#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-    PA_BASE_CHECK(!IsPtrOOB(wrapped_ptr));
-#endif
-    uintptr_t address = partition_alloc::UntagPtr(wrapped_ptr);
-    if (IsSupportedAndNotNull(address)) {
-      PA_BASE_CHECK(wrapped_ptr != nullptr);
-      PA_BASE_CHECK(IsPointeeAlive(address));
-    }
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON) ||
-        // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-    return wrapped_ptr;
-  }
-
-  // Unwraps the pointer, while asserting that memory hasn't been freed. The
-  // function must handle nullptr gracefully.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
-      T* wrapped_ptr) {
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return wrapped_ptr;
-    }
-    T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-    // Some code uses invalid pointer values as indicators, so those values must
-    // be passed through unchanged during extraction. The following check will
-    // pass invalid values through if those values do not fall within the BRP
-    // pool after being unpoisoned.
-    if (!IsSupportedAndNotNull(partition_alloc::UntagPtr(unpoisoned_ptr))) {
-      return wrapped_ptr;
-    }
-    // Poison-based OOB checks do not extend to extracted pointers. The
-    // alternative of retaining poison on extracted pointers could introduce new
-    // OOB conditions, e.g., in code that extracts an end-of-allocation pointer
-    // for use in a loop termination condition. The poison bit would make that
-    // pointer appear to reference a very high address.
-#endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-    return unpoisoned_ptr;
-  }
-
-  // Unwraps the pointer, without making an assertion on whether memory was
-  // freed or not.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
-      T* wrapped_ptr) {
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return wrapped_ptr;
-    }
-    // This may be used for unwrapping an end-of-allocation pointer to be used
-    // as an endpoint in an iterative algorithm, so this removes the OOB poison
-    // bit.
-    return UnpoisonPtr(wrapped_ptr);
-  }
-
-  // Upcasts the wrapped pointer.
-  template <typename To, typename From>
-  PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
-    static_assert(std::is_convertible<From*, To*>::value,
-                  "From must be convertible to To.");
-    // Note, this cast may change the address if upcasting to base that lies in
-    // the middle of the derived object.
-    return wrapped_ptr;
-  }
-
-  // Verify the pointer stayed in the same slot, and return the poisoned version
-  // of `new_ptr` if OOB poisoning is enabled.
-  template <typename T>
-  PA_ALWAYS_INLINE static T* VerifyAndPoisonPointerAfterAdvanceOrRetreat(
-      T* unpoisoned_ptr,
-      T* new_ptr) {
-    // In the "before allocation" mode, on 32-bit, we can run into a problem
-    // that the end-of-allocation address could fall outside of
-    // PartitionAlloc's pools, if this is the last slot of the super page,
-    // thus pointing to the guard page. This means the ref-count won't be
-    // decreased when the pointer is released (leak).
-    //
-    // We could possibly solve it in a few different ways:
-    // - Add the trailing guard page to the pool, but we'd have to think very
-    //   hard if this doesn't create another hole.
-    // - Add an address adjustment to "is in pool?" check, similar as the one in
-    //   PartitionAllocGetSlotStartInBRPPool(), but that seems fragile, not to
-    //   mention adding an extra instruction to an inlined hot path.
-    // - Let the leak happen, since it should a very rare condition.
-    // - Go back to the previous solution of rewrapping the pointer, but that
-    //   had an issue of losing BRP protection in case the pointer ever gets
-    //   shifted back before the end of allocation.
-    //
-    // We decided to cross that bridge once we get there... if we ever get
-    // there. Currently there are no plans to switch back to the "before
-    // allocation" mode.
-    //
-    // This problem doesn't exist in the "previous slot" mode, or any mode that
-    // involves putting extras after the allocation, because the
-    // end-of-allocation address belongs to the same slot.
-    static_assert(BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT));
-
-    // First check if the new address didn't migrate in/out the BRP pool, and
-    // that it lands within the same allocation. An end-of-allocation address is
-    // ok, too, and that may lead to the pointer being poisoned if the relevant
-    // feature is enabled. These checks add a non-trivial cost, but they're
-    // cheaper and more secure than the previous implementation that rewrapped
-    // the pointer (wrapped the new pointer and unwrapped the old one).
-    //
-    // Note, the value of these checks goes beyond OOB protection. They're
-    // important for integrity of the BRP algorithm. Without these, an attacker
-    // could make the pointer point to another allocation, and cause its
-    // ref-count to go to 0 upon this pointer's destruction, even though there
-    // may be another pointer still pointing to it, thus making it lose the BRP
-    // protection prematurely.
-    const uintptr_t before_addr = partition_alloc::UntagPtr(unpoisoned_ptr);
-    const uintptr_t after_addr = partition_alloc::UntagPtr(new_ptr);
-    // TODO(bartekn): Consider adding support for non-BRP pools too (without
-    // removing the cross-pool migration check).
-    if (IsSupportedAndNotNull(before_addr)) {
-      constexpr size_t size = sizeof(T);
-      [[maybe_unused]] const bool is_end =
-          CheckPointerWithinSameAlloc(before_addr, after_addr, size);
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-      if (is_end) {
-        new_ptr = PoisonOOBPtr(new_ptr);
-      }
-#endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-    } else {
-      // Check that the new address didn't migrate into the BRP pool, as it
-      // would result in more pointers pointing to an allocation than its
-      // ref-count reflects.
-      PA_BASE_CHECK(!IsSupportedAndNotNull(after_addr));
-    }
-    return new_ptr;
-  }
-
-  // Advance the wrapped pointer by `delta_elems`.
-  template <
-      typename T,
-      typename Z,
-      typename =
-          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
-  PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return wrapped_ptr + delta_elems;
-    }
-    T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
-    return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
-        unpoisoned_ptr, unpoisoned_ptr + delta_elems);
-  }
-
-  // Retreat the wrapped pointer by `delta_elems`.
-  template <
-      typename T,
-      typename Z,
-      typename =
-          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
-  PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return wrapped_ptr - delta_elems;
-    }
-    T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
-    return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
-        unpoisoned_ptr, unpoisoned_ptr - delta_elems);
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
-                                                            T* wrapped_ptr2) {
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return wrapped_ptr1 - wrapped_ptr2;
-    }
-
-    T* unpoisoned_ptr1 = UnpoisonPtr(wrapped_ptr1);
-    T* unpoisoned_ptr2 = UnpoisonPtr(wrapped_ptr2);
-#if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return unpoisoned_ptr1 - unpoisoned_ptr2;
-    }
-    uintptr_t address1 = partition_alloc::UntagPtr(unpoisoned_ptr1);
-    uintptr_t address2 = partition_alloc::UntagPtr(unpoisoned_ptr2);
-    // Ensure that both pointers are within the same slot, and pool!
-    // TODO(bartekn): Consider adding support for non-BRP pool too.
-    if (IsSupportedAndNotNull(address1)) {
-      PA_BASE_CHECK(IsSupportedAndNotNull(address2));
-      PA_BASE_CHECK(partition_alloc::internal::IsPtrWithinSameAlloc(
-                        address2, address1, sizeof(T)) !=
-                    partition_alloc::internal::PtrPosWithinAlloc::kFarOOB);
-    } else {
-      PA_BASE_CHECK(!IsSupportedAndNotNull(address2));
-    }
-#endif  // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
-    return unpoisoned_ptr1 - unpoisoned_ptr2;
-  }
-
-  // Returns a copy of a wrapped pointer, without making an assertion on whether
-  // memory was freed or not.
-  // This method increments the reference count of the allocation slot.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return wrapped_ptr;
-    }
-    return WrapRawPtr(wrapped_ptr);
-  }
-
-  // Report the current wrapped pointer if pointee isn't alive anymore.
-  template <typename T>
-  PA_ALWAYS_INLINE static void ReportIfDangling(T* wrapped_ptr) {
-    ReportIfDanglingInternal(partition_alloc::UntagPtr(wrapped_ptr));
-  }
-
-  // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
-  // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return ptr;
-    } else {
-      return WrapRawPtr(ptr);
-    }
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
-      T* wrapped_ptr) {
-    if (partition_alloc::internal::base::is_constant_evaluated()) {
-      return wrapped_ptr;
-    } else {
-      return UnpoisonPtr(wrapped_ptr);
-    }
-  }
-
-  // This is for accounting only, used by unit tests.
-  PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
-  PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
-  PA_ALWAYS_INLINE static constexpr void
-  IncrementPointerToMemberOperatorCountForTest() {}
-
- private:
-  // We've evaluated several strategies (inline nothing, various parts, or
-  // everything in |Wrap()| and |Release()|) using the Speedometer2 benchmark
-  // to measure performance. The best results were obtained when only the
-  // lightweight |IsManagedByPartitionAllocBRPPool()| check was inlined.
-  // Therefore, we've extracted the rest into the functions below and marked
-  // them as PA_NOINLINE to prevent unintended LTO effects.
-  PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void AcquireInternal(
-      uintptr_t address);
-  PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReleaseInternal(
-      uintptr_t address);
-  PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) bool IsPointeeAlive(
-      uintptr_t address);
-  PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReportIfDanglingInternal(
-      uintptr_t address);
-
-  // CHECK if `before_addr` and `after_addr` are in the same allocation, for a
-  // given `type_size`.
-  // If BACKUP_REF_PTR_POISON_OOB_PTR is enabled, return whether the allocation
-  // is at the end.
-  // If BACKUP_REF_PTR_POISON_OOB_PTR is disable, return false.
-  PA_NOINLINE static PA_COMPONENT_EXPORT(
-      RAW_PTR) bool CheckPointerWithinSameAlloc(uintptr_t before_addr,
-                                                uintptr_t after_addr,
-                                                size_t type_size);
-};
-
-}  // namespace base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_cast.h b/base/allocator/partition_allocator/pointers/raw_ptr_cast.h
deleted file mode 100644
index ad6235d..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_cast.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_CAST_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_CAST_H_
-
-#include <memory>
-
-#include <type_traits>
-#if defined(__has_builtin)
-#define PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST __has_builtin(__builtin_bit_cast)
-#else
-#define PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST 0
-#endif
-
-#if !PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST
-#include <cstring>
-#endif
-
-// This header is explicitly allowlisted from a clang plugin rule at
-// "tools/clang/plugins/FindBadRawPtrPatterns.cpp". You can bypass these checks
-// by performing casts explicitly with functions here.
-namespace base {
-
-// Wrapper for |static_cast<T>(src)|.
-template <typename Dest, typename Source>
-inline constexpr Dest unsafe_raw_ptr_static_cast(Source source) noexcept {
-  return static_cast<Dest>(source);
-}
-
-// Wrapper for |reinterpret_cast<T>(src)|.
-template <typename Dest, typename Source>
-inline constexpr Dest unsafe_raw_ptr_reinterpret_cast(Source source) noexcept {
-  return reinterpret_cast<Dest>(source);
-}
-
-// Wrapper for |std::bit_cast<T>(src)|.
-// Though we have similar implementations at |absl::bit_cast| and
-// |base::bit_cast|, it is important to perform casting in this file to
-// correctly exclude from the check.
-template <typename Dest, typename Source>
-#if PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST
-inline constexpr std::enable_if_t<sizeof(Dest) == sizeof(Source) &&
-                                      std::is_trivially_copyable_v<Dest> &&
-                                      std::is_trivially_copyable_v<Source>,
-                                  Dest>
-#else
-inline std::enable_if_t<sizeof(Dest) == sizeof(Source) &&
-                            std::is_trivially_copyable_v<Dest> &&
-                            std::is_trivially_copyable_v<Source> &&
-                            std::is_default_constructible_v<Dest>,
-                        Dest>
-#endif  // PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST
-unsafe_raw_ptr_bit_cast(const Source& source) noexcept {
-  // TODO(mikt): Replace this with |std::bit_cast<T>| when C++20 arrives.
-#if PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST
-  return __builtin_bit_cast(Dest, source);
-#else
-  Dest dest;
-  memcpy(std::addressof(dest), std::addressof(source), sizeof(dest));
-  return dest;
-#endif  // PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST
-}
-
-}  // namespace base
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_CAST_H_
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_counting_impl_wrapper_for_test.h b/base/allocator/partition_allocator/pointers/raw_ptr_counting_impl_wrapper_for_test.h
deleted file mode 100644
index 73ef806..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_counting_impl_wrapper_for_test.h
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_COUNTING_IMPL_WRAPPER_FOR_TEST_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_COUNTING_IMPL_WRAPPER_FOR_TEST_H_
-
-#include <climits>
-
-#include "base/allocator/partition_allocator/pointers/raw_ptr.h"
-
-namespace base::test {
-
-// Wraps a raw_ptr/raw_ref implementation with a class of the same interface
-// that provides accounting for test purposes. A raw_ptr/raw_ref that uses it
-// performs extra bookkeeping, e.g. to track the number of times the raw_ptr
-// is wrapped, unrwapped, etc.
-//
-// Test only.
-template <RawPtrTraits Traits>
-struct RawPtrCountingImplWrapperForTest
-    : public raw_ptr_traits::ImplForTraits<Traits> {
-  static_assert(
-      !raw_ptr_traits::Contains(Traits,
-                                RawPtrTraits::kUseCountingWrapperForTest));
-
-  using SuperImpl = typename raw_ptr_traits::ImplForTraits<Traits>;
-
-  static constexpr bool kMustZeroOnInit = SuperImpl::kMustZeroOnInit;
-  static constexpr bool kMustZeroOnMove = SuperImpl::kMustZeroOnMove;
-  static constexpr bool kMustZeroOnDestruct = SuperImpl::kMustZeroOnDestruct;
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
-    ++wrap_raw_ptr_cnt;
-    return SuperImpl::WrapRawPtr(ptr);
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* ptr) {
-    ++release_wrapped_ptr_cnt;
-    SuperImpl::ReleaseWrappedPtr(ptr);
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
-      T* wrapped_ptr) {
-    ++get_for_dereference_cnt;
-    return SuperImpl::SafelyUnwrapPtrForDereference(wrapped_ptr);
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
-      T* wrapped_ptr) {
-    ++get_for_extraction_cnt;
-    return SuperImpl::SafelyUnwrapPtrForExtraction(wrapped_ptr);
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
-      T* wrapped_ptr) {
-    ++get_for_comparison_cnt;
-    return SuperImpl::UnsafelyUnwrapPtrForComparison(wrapped_ptr);
-  }
-
-  PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {
-    ++wrapped_ptr_swap_cnt;
-  }
-
-  PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {
-    ++wrapped_ptr_less_cnt;
-  }
-
-  PA_ALWAYS_INLINE static constexpr void
-  IncrementPointerToMemberOperatorCountForTest() {
-    ++pointer_to_member_operator_cnt;
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
-    ++wrap_raw_ptr_for_dup_cnt;
-    return SuperImpl::WrapRawPtrForDuplication(ptr);
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
-      T* wrapped_ptr) {
-    ++get_for_duplication_cnt;
-    return SuperImpl::UnsafelyUnwrapPtrForDuplication(wrapped_ptr);
-  }
-
-  static constexpr void ClearCounters() {
-    wrap_raw_ptr_cnt = 0;
-    release_wrapped_ptr_cnt = 0;
-    get_for_dereference_cnt = 0;
-    get_for_extraction_cnt = 0;
-    get_for_comparison_cnt = 0;
-    wrapped_ptr_swap_cnt = 0;
-    wrapped_ptr_less_cnt = 0;
-    pointer_to_member_operator_cnt = 0;
-    wrap_raw_ptr_for_dup_cnt = 0;
-    get_for_duplication_cnt = 0;
-  }
-
-  static inline int wrap_raw_ptr_cnt = INT_MIN;
-  static inline int release_wrapped_ptr_cnt = INT_MIN;
-  static inline int get_for_dereference_cnt = INT_MIN;
-  static inline int get_for_extraction_cnt = INT_MIN;
-  static inline int get_for_comparison_cnt = INT_MIN;
-  static inline int wrapped_ptr_swap_cnt = INT_MIN;
-  static inline int wrapped_ptr_less_cnt = INT_MIN;
-  static inline int pointer_to_member_operator_cnt = INT_MIN;
-  static inline int wrap_raw_ptr_for_dup_cnt = INT_MIN;
-  static inline int get_for_duplication_cnt = INT_MIN;
-};
-
-}  // namespace base::test
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_COUNTING_IMPL_WRAPPER_FOR_TEST_H_
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_exclusion.h b/base/allocator/partition_allocator/pointers/raw_ptr_exclusion.h
deleted file mode 100644
index dc94d6b..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_exclusion.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_EXCLUSION_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_EXCLUSION_H_
-
-// This header will be leakily included even when
-// `!use_partition_alloc`, which is okay because it's a leaf header.
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"  // nogncheck
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "build/build_config.h"
-
-#if PA_HAS_ATTRIBUTE(annotate)
-#if defined(OFFICIAL_BUILD) && !BUILDFLAG(FORCE_ENABLE_RAW_PTR_EXCLUSION)
-// The annotation changed compiler output and increased binary size so disable
-// for official builds.
-// TODO(crbug.com/1320670): Remove when issue is resolved.
-#define RAW_PTR_EXCLUSION
-#else
-// Marks a field as excluded from the `raw_ptr<T>` usage enforcement via
-// Chromium Clang plugin.
-//
-// Example:
-//     RAW_PTR_EXCLUSION Foo* foo_;
-//
-// `RAW_PTR_EXCLUSION` should be avoided, as exclusions makes it significantly
-// easier for any bug involving the pointer to become a security vulnerability.
-// For additional guidance please see the "When to use raw_ptr<T>" section of
-// `//base/memory/raw_ptr.md`.
-#define RAW_PTR_EXCLUSION __attribute__((annotate("raw_ptr_exclusion")))
-#endif
-#else
-#define RAW_PTR_EXCLUSION
-#endif
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_EXCLUSION_H_
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_hookable_impl.cc b/base/allocator/partition_allocator/pointers/raw_ptr_hookable_impl.cc
deleted file mode 100644
index a3c59ff..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_hookable_impl.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/pointers/raw_ptr_hookable_impl.h"
-
-#include <atomic>
-#include <cstdint>
-
-namespace base::internal {
-
-namespace {
-
-void DefaultWrapPtrHook(uintptr_t address) {}
-void DefaultReleaseWrappedPtrHook(uintptr_t address) {}
-void DefaultUnwrapForDereferenceHook(uintptr_t address) {}
-void DefaultUnwrapForExtractionHook(uintptr_t address) {}
-void DefaultUnwrapForComparisonHook(uintptr_t address) {}
-void DefaultAdvanceHook(uintptr_t old_address, uintptr_t new_address) {}
-void DefaultDuplicateHook(uintptr_t address) {}
-void DefaultWrapPtrForDuplicationHook(uintptr_t address) {}
-void DefaultUnsafelyUnwrapForDuplicationHook(uintptr_t address) {}
-
-constexpr RawPtrHooks default_hooks = {
-    DefaultWrapPtrHook,
-    DefaultReleaseWrappedPtrHook,
-    DefaultUnwrapForDereferenceHook,
-    DefaultUnwrapForExtractionHook,
-    DefaultUnwrapForComparisonHook,
-    DefaultAdvanceHook,
-    DefaultDuplicateHook,
-    DefaultWrapPtrForDuplicationHook,
-    DefaultUnsafelyUnwrapForDuplicationHook,
-};
-
-}  // namespace
-
-std::atomic<const RawPtrHooks*> g_hooks{&default_hooks};
-
-const RawPtrHooks* GetRawPtrHooks() {
-  return g_hooks.load(std::memory_order_relaxed);
-}
-
-void InstallRawPtrHooks(const RawPtrHooks* hooks) {
-  g_hooks.store(hooks, std::memory_order_relaxed);
-}
-
-void ResetRawPtrHooks() {
-  InstallRawPtrHooks(&default_hooks);
-}
-
-}  // namespace base::internal
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_hookable_impl.h b/base/allocator/partition_allocator/pointers/raw_ptr_hookable_impl.h
deleted file mode 100644
index 33b57d8..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_hookable_impl.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_HOOKABLE_IMPL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_HOOKABLE_IMPL_H_
-
-#include <stddef.h>
-
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-
-#if !BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-#error "Included under wrong build option"
-#endif
-
-namespace base::internal {
-
-struct RawPtrHooks {
-  using WrapPtr = void(uintptr_t address);
-  using ReleaseWrappedPtr = void(uintptr_t address);
-  using SafelyUnwrapForDereference = void(uintptr_t address);
-  using SafelyUnwrapForExtraction = void(uintptr_t address);
-  using UnsafelyUnwrapForComparison = void(uintptr_t address);
-  using Advance = void(uintptr_t old_address, uintptr_t new_address);
-  using Duplicate = void(uintptr_t address);
-  using WrapPtrForDuplication = void(uintptr_t address);
-  using UnsafelyUnwrapForDuplication = void(uintptr_t address);
-
-  WrapPtr* wrap_ptr;
-  ReleaseWrappedPtr* release_wrapped_ptr;
-  SafelyUnwrapForDereference* safely_unwrap_for_dereference;
-  SafelyUnwrapForExtraction* safely_unwrap_for_extraction;
-  UnsafelyUnwrapForComparison* unsafely_unwrap_for_comparison;
-  Advance* advance;
-  Duplicate* duplicate;
-  WrapPtrForDuplication* wrap_ptr_for_duplication;
-  UnsafelyUnwrapForDuplication* unsafely_unwrap_for_duplication;
-};
-
-PA_COMPONENT_EXPORT(RAW_PTR) const RawPtrHooks* GetRawPtrHooks();
-PA_COMPONENT_EXPORT(RAW_PTR) void InstallRawPtrHooks(const RawPtrHooks*);
-PA_COMPONENT_EXPORT(RAW_PTR) void ResetRawPtrHooks();
-
-template <bool EnableHooks>
-struct RawPtrHookableImpl {
-  // Since this Impl is used for BRP-ASan, match BRP as closely as possible.
-  static constexpr bool kMustZeroOnInit = true;
-  static constexpr bool kMustZeroOnMove = true;
-  static constexpr bool kMustZeroOnDestruct = true;
-
-  // Wraps a pointer.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      if (EnableHooks) {
-        GetRawPtrHooks()->wrap_ptr(reinterpret_cast<uintptr_t>(ptr));
-      }
-    }
-    return ptr;
-  }
-
-  // Notifies the allocator when a wrapped pointer is being removed or replaced.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* ptr) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      if (EnableHooks) {
-        GetRawPtrHooks()->release_wrapped_ptr(reinterpret_cast<uintptr_t>(ptr));
-      }
-    }
-  }
-
-  // Unwraps the pointer, while asserting that memory hasn't been freed. The
-  // function is allowed to crash on nullptr.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
-      T* wrapped_ptr) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      if (EnableHooks) {
-        GetRawPtrHooks()->safely_unwrap_for_dereference(
-            reinterpret_cast<uintptr_t>(wrapped_ptr));
-      }
-    }
-    return wrapped_ptr;
-  }
-
-  // Unwraps the pointer, while asserting that memory hasn't been freed. The
-  // function must handle nullptr gracefully.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
-      T* wrapped_ptr) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      if (EnableHooks) {
-        GetRawPtrHooks()->safely_unwrap_for_extraction(
-            reinterpret_cast<uintptr_t>(wrapped_ptr));
-      }
-    }
-    return wrapped_ptr;
-  }
-
-  // Unwraps the pointer, without making an assertion on whether memory was
-  // freed or not.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
-      T* wrapped_ptr) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      if (EnableHooks) {
-        GetRawPtrHooks()->unsafely_unwrap_for_comparison(
-            reinterpret_cast<uintptr_t>(wrapped_ptr));
-      }
-    }
-    return wrapped_ptr;
-  }
-
-  // Upcasts the wrapped pointer.
-  template <typename To, typename From>
-  PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
-    static_assert(std::is_convertible<From*, To*>::value,
-                  "From must be convertible to To.");
-    // Note, this cast may change the address if upcasting to base that lies in
-    // the middle of the derived object.
-    return wrapped_ptr;
-  }
-
-  // Advance the wrapped pointer by `delta_elems`.
-  template <
-      typename T,
-      typename Z,
-      typename =
-          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
-  PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      if (EnableHooks) {
-        GetRawPtrHooks()->advance(
-            reinterpret_cast<uintptr_t>(wrapped_ptr),
-            reinterpret_cast<uintptr_t>(wrapped_ptr + delta_elems));
-      }
-    }
-    return wrapped_ptr + delta_elems;
-  }
-
-  // Retreat the wrapped pointer by `delta_elems`.
-  template <
-      typename T,
-      typename Z,
-      typename =
-          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
-  PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      if (EnableHooks) {
-        GetRawPtrHooks()->advance(
-            reinterpret_cast<uintptr_t>(wrapped_ptr),
-            reinterpret_cast<uintptr_t>(wrapped_ptr - delta_elems));
-      }
-    }
-    return wrapped_ptr - delta_elems;
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
-                                                            T* wrapped_ptr2) {
-    return wrapped_ptr1 - wrapped_ptr2;
-  }
-
-  // Returns a copy of a wrapped pointer, without making an assertion on whether
-  // memory was freed or not.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      if (EnableHooks) {
-        GetRawPtrHooks()->duplicate(reinterpret_cast<uintptr_t>(wrapped_ptr));
-      }
-    }
-    return wrapped_ptr;
-  }
-
-  // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
-  // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      if (EnableHooks) {
-        GetRawPtrHooks()->wrap_ptr_for_duplication(
-            reinterpret_cast<uintptr_t>(ptr));
-      }
-    }
-    return ptr;
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
-      T* wrapped_ptr) {
-    if (!partition_alloc::internal::base::is_constant_evaluated()) {
-      if (EnableHooks) {
-        GetRawPtrHooks()->unsafely_unwrap_for_duplication(
-            reinterpret_cast<uintptr_t>(wrapped_ptr));
-      }
-    }
-    return wrapped_ptr;
-  }
-
-  // This is for accounting only, used by unit tests.
-  PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
-  PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
-  PA_ALWAYS_INLINE static constexpr void
-  IncrementPointerToMemberOperatorCountForTest() {}
-};
-
-}  // namespace base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_HOOKABLE_IMPL_H_
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_noop_impl.h b/base/allocator/partition_allocator/pointers/raw_ptr_noop_impl.h
deleted file mode 100644
index dce5b07..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_noop_impl.h
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_NOOP_IMPL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_NOOP_IMPL_H_
-
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-
-namespace base::internal {
-
-struct RawPtrNoOpImpl {
-  static constexpr bool kMustZeroOnInit = false;
-  static constexpr bool kMustZeroOnMove = false;
-  static constexpr bool kMustZeroOnDestruct = false;
-
-  // Wraps a pointer.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
-    return ptr;
-  }
-
-  // Notifies the allocator when a wrapped pointer is being removed or
-  // replaced.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T*) {}
-
-  // Unwraps the pointer, while asserting that memory hasn't been freed. The
-  // function is allowed to crash on nullptr.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
-      T* wrapped_ptr) {
-    return wrapped_ptr;
-  }
-
-  // Unwraps the pointer, while asserting that memory hasn't been freed. The
-  // function must handle nullptr gracefully.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
-      T* wrapped_ptr) {
-    return wrapped_ptr;
-  }
-
-  // Unwraps the pointer, without making an assertion on whether memory was
-  // freed or not.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
-      T* wrapped_ptr) {
-    return wrapped_ptr;
-  }
-
-  // Upcasts the wrapped pointer.
-  template <typename To, typename From>
-  PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
-    static_assert(std::is_convertible<From*, To*>::value,
-                  "From must be convertible to To.");
-    // Note, this cast may change the address if upcasting to base that lies
-    // in the middle of the derived object.
-    return wrapped_ptr;
-  }
-
-  // Advance the wrapped pointer by `delta_elems`.
-  template <
-      typename T,
-      typename Z,
-      typename =
-          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
-  PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
-    return wrapped_ptr + delta_elems;
-  }
-
-  // Retreat the wrapped pointer by `delta_elems`.
-  template <
-      typename T,
-      typename Z,
-      typename =
-          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
-  PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
-    return wrapped_ptr - delta_elems;
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
-                                                            T* wrapped_ptr2) {
-    return wrapped_ptr1 - wrapped_ptr2;
-  }
-
-  // Returns a copy of a wrapped pointer, without making an assertion on
-  // whether memory was freed or not.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
-    return wrapped_ptr;
-  }
-
-  // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
-  // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
-    return ptr;
-  }
-
-  template <typename T>
-  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
-      T* wrapped_ptr) {
-    return wrapped_ptr;
-  }
-
-  // This is for accounting only, used by unit tests.
-  PA_ALWAYS_INLINE constexpr static void IncrementSwapCountForTest() {}
-  PA_ALWAYS_INLINE constexpr static void IncrementLessCountForTest() {}
-  PA_ALWAYS_INLINE constexpr static void
-  IncrementPointerToMemberOperatorCountForTest() {}
-};
-
-}  // namespace base::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_NOOP_IMPL_H_
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_test_support.h b/base/allocator/partition_allocator/pointers/raw_ptr_test_support.h
deleted file mode 100644
index df84b10..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_test_support.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_
-
-#include "testing/gmock/include/gmock/gmock.h"
-#include "third_party/abseil-cpp/absl/types/optional.h"
-
-// Struct intended to be used with designated initializers and passed
-// to the `CountersMatch()` matcher.
-//
-// `CountingImplType` isn't used directly; it tells the `CountersMatch`
-// matcher which impl's static members should be checked.
-template <typename CountingImplType>
-struct CountingRawPtrExpectations {
-  absl::optional<int> wrap_raw_ptr_cnt;
-  absl::optional<int> release_wrapped_ptr_cnt;
-  absl::optional<int> get_for_dereference_cnt;
-  absl::optional<int> get_for_extraction_cnt;
-  absl::optional<int> get_for_comparison_cnt;
-  absl::optional<int> wrapped_ptr_swap_cnt;
-  absl::optional<int> wrapped_ptr_less_cnt;
-  absl::optional<int> pointer_to_member_operator_cnt;
-  absl::optional<int> wrap_raw_ptr_for_dup_cnt;
-  absl::optional<int> get_for_duplication_cnt;
-};
-
-#define REPORT_UNEQUAL_RAW_PTR_COUNTER(member_name, CounterClassImpl) \
-  {                                                                   \
-    if (arg.member_name.has_value() &&                                \
-        arg.member_name.value() != CounterClassImpl::member_name) {   \
-      *result_listener << "Expected `" #member_name "` to be "        \
-                       << arg.member_name.value() << " but got "      \
-                       << CounterClassImpl::member_name << "; ";      \
-      result = false;                                                 \
-    }                                                                 \
-  }
-#define REPORT_UNEQUAL_RAW_PTR_COUNTERS(result, CounterClassImpl)              \
-  {                                                                            \
-    result = true;                                                             \
-    REPORT_UNEQUAL_RAW_PTR_COUNTER(wrap_raw_ptr_cnt, CounterClassImpl)         \
-    REPORT_UNEQUAL_RAW_PTR_COUNTER(release_wrapped_ptr_cnt, CounterClassImpl)  \
-    REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_dereference_cnt, CounterClassImpl)  \
-    REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_extraction_cnt, CounterClassImpl)   \
-    REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_comparison_cnt, CounterClassImpl)   \
-    REPORT_UNEQUAL_RAW_PTR_COUNTER(wrapped_ptr_swap_cnt, CounterClassImpl)     \
-    REPORT_UNEQUAL_RAW_PTR_COUNTER(wrapped_ptr_less_cnt, CounterClassImpl)     \
-    REPORT_UNEQUAL_RAW_PTR_COUNTER(pointer_to_member_operator_cnt,             \
-                                   CounterClassImpl)                           \
-    REPORT_UNEQUAL_RAW_PTR_COUNTER(wrap_raw_ptr_for_dup_cnt, CounterClassImpl) \
-    REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_duplication_cnt, CounterClassImpl)  \
-  }
-
-// Matcher used with `CountingRawPtr`. Provides slightly shorter
-// boilerplate for verifying counts. This inner function is detached
-// from the `MATCHER` to isolate the templating.
-template <typename CountingImplType>
-bool CountersMatchImpl(const CountingRawPtrExpectations<CountingImplType>& arg,
-                       testing::MatchResultListener* result_listener) {
-  bool result = true;
-  REPORT_UNEQUAL_RAW_PTR_COUNTERS(result, CountingImplType);
-  return result;
-}
-
-// Implicit `arg` has type `CountingRawPtrExpectations`, specialized for
-// the specific counting impl.
-MATCHER(CountersMatch, "counting impl has specified counters") {
-  return CountersMatchImpl(arg, result_listener);
-}
-
-#undef REPORT_UNEQUAL_RAW_PTR_COUNTERS
-#undef REPORT_UNEQUAL_RAW_PTR_COUNTER
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_unittest.cc b/base/allocator/partition_allocator/pointers/raw_ptr_unittest.cc
deleted file mode 100644
index 1cf122e..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_unittest.cc
+++ /dev/null
@@ -1,2467 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/pointers/raw_ptr.h"
-
-#include <climits>
-#include <cstddef>
-#include <cstdint>
-#include <memory>
-#include <string>
-#include <thread>
-#include <type_traits>
-#include <utility>
-
-#include "base/allocator/partition_alloc_features.h"
-#include "base/allocator/partition_alloc_support.h"
-#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
-#include "base/allocator/partition_allocator/partition_alloc-inl.h"
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "base/allocator/partition_allocator/pointers/raw_ptr_counting_impl_wrapper_for_test.h"
-#include "base/allocator/partition_allocator/pointers/raw_ptr_test_support.h"
-#include "base/allocator/partition_allocator/pointers/raw_ref.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "base/cpu.h"
-#include "base/cxx20_to_address.h"
-#include "base/logging.h"
-#include "base/memory/raw_ptr_asan_service.h"
-#include "base/task/thread_pool.h"
-#include "base/test/bind.h"
-#include "base/test/gtest_util.h"
-#include "base/test/memory/dangling_ptr_instrumentation.h"
-#include "base/test/scoped_feature_list.h"
-#include "base/test/task_environment.h"
-#include "build/build_config.h"
-#include "build/buildflag.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/abseil-cpp/absl/types/optional.h"
-#include "third_party/abseil-cpp/absl/types/variant.h"
-
-#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
-#include <sanitizer/asan_interface.h>
-#include "base/debug/asan_service.h"
-#endif
-
-using testing::AllOf;
-using testing::HasSubstr;
-using testing::Test;
-
-static_assert(sizeof(raw_ptr<void>) == sizeof(void*),
-              "raw_ptr shouldn't add memory overhead");
-static_assert(sizeof(raw_ptr<int>) == sizeof(int*),
-              "raw_ptr shouldn't add memory overhead");
-static_assert(sizeof(raw_ptr<std::string>) == sizeof(std::string*),
-              "raw_ptr shouldn't add memory overhead");
-
-#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
-    !BUILDFLAG(USE_ASAN_UNOWNED_PTR) && !BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-// |is_trivially_copyable| assertion means that arrays/vectors of raw_ptr can
-// be copied by memcpy.
-static_assert(std::is_trivially_copyable<raw_ptr<void>>::value,
-              "raw_ptr should be trivially copyable");
-static_assert(std::is_trivially_copyable<raw_ptr<int>>::value,
-              "raw_ptr should be trivially copyable");
-static_assert(std::is_trivially_copyable<raw_ptr<std::string>>::value,
-              "raw_ptr should be trivially copyable");
-
-// |is_trivially_default_constructible| assertion helps retain implicit default
-// constructors when raw_ptr is used as a union field.  Example of an error
-// if this assertion didn't hold:
-//
-//     ../../base/trace_event/trace_arguments.h:249:16: error: call to
-//     implicitly-deleted default constructor of 'base::trace_event::TraceValue'
-//         TraceValue ret;
-//                    ^
-//     ../../base/trace_event/trace_arguments.h:211:26: note: default
-//     constructor of 'TraceValue' is implicitly deleted because variant field
-//     'as_pointer' has a non-trivial default constructor
-//       raw_ptr<const void> as_pointer;
-static_assert(std::is_trivially_default_constructible<raw_ptr<void>>::value,
-              "raw_ptr should be trivially default constructible");
-static_assert(std::is_trivially_default_constructible<raw_ptr<int>>::value,
-              "raw_ptr should be trivially default constructible");
-static_assert(
-    std::is_trivially_default_constructible<raw_ptr<std::string>>::value,
-    "raw_ptr should be trivially default constructible");
-#endif  // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
-        // !BUILDFLAG(USE_ASAN_UNOWNED_PTR) && !BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-
-// Verify that raw_ptr is a literal type, and its entire interface is constexpr.
-//
-// Constexpr destructors were introduced in C++20. PartitionAlloc's minimum
-// supported C++ version is C++17, so raw_ptr is not a literal type in C++17.
-// Thus we only test for constexpr in C++20.
-#if defined(__cpp_constexpr) && __cpp_constexpr >= 201907L
-static_assert([]() constexpr {
-  struct IntBase {};
-  struct Int : public IntBase {
-    int i = 0;
-  };
-
-  Int* i = new Int();
-  {
-    raw_ptr<Int> r(i);              // raw_ptr(T*)
-    raw_ptr<Int> r2(r);             // raw_ptr(const raw_ptr&)
-    raw_ptr<Int> r3(std::move(r));  // raw_ptr(raw_ptr&&)
-    r = r2;                         // operator=(const raw_ptr&)
-    r = std::move(r3);              // operator=(raw_ptr&&)
-    raw_ptr<Int, base::RawPtrTraits::kMayDangle> r4(
-        r);   // raw_ptr(const raw_ptr<DifferentTraits>&)
-    r4 = r2;  // operator=(const raw_ptr<DifferentTraits>&)
-    // (There is no move-version of DifferentTraits.)
-    [[maybe_unused]] raw_ptr<IntBase> r5(
-        r2);  // raw_ptr(const raw_ptr<Convertible>&)
-    [[maybe_unused]] raw_ptr<IntBase> r6(
-        std::move(r2));  // raw_ptr(raw_ptr<Convertible>&&)
-    r2 = r;              // Reset after move...
-    r5 = r2;             // operator=(const raw_ptr<Convertible>&)
-    r5 = std::move(r2);  // operator=(raw_ptr<Convertible>&&)
-    [[maybe_unused]] raw_ptr<Int> r7(nullptr);  // raw_ptr(nullptr)
-    r4 = nullptr;                               // operator=(nullptr)
-    r4 = i;                                     // operator=(T*)
-    r5 = r4;                                    // operator=(const Upcast&)
-    r5 = std::move(r4);                         // operator=(Upcast&&)
-    r.get()->i += 1;                            // get()
-    [[maybe_unused]] bool b = r;                // operator bool
-    (*r).i += 1;                                // operator*()
-    r->i += 1;                                  // operator->()
-    [[maybe_unused]] Int* i2 = r;               // operator T*()
-    [[maybe_unused]] IntBase* i3 = r;           // operator Convertible*()
-
-    [[maybe_unused]] Int** i4 = &r.AsEphemeralRawAddr();
-    [[maybe_unused]] Int*& i5 = r.AsEphemeralRawAddr();
-
-    Int* array = new Int[3]();
-    {
-      raw_ptr<Int, base::RawPtrTraits::kAllowPtrArithmetic> ra(array);
-      ++ra;      // operator++()
-      --ra;      // operator--()
-      ra++;      // operator++(int)
-      ra--;      // operator--(int)
-      ra += 1u;  // operator+=()
-      ra -= 1u;  // operator-=()
-    }
-    delete[] array;
-  }
-  delete i;
-  return true;
-}());
-#endif
-
-// Don't use base::internal for testing raw_ptr API, to test if code outside
-// this namespace calls the correct functions from this namespace.
-namespace {
-
-// `kAllowPtrArithmetic` matches what `CountingRawPtr` does internally.
-// `kUseCountingWrapperForTest` is removed.
-using RawPtrCountingImpl = base::test::RawPtrCountingImplWrapperForTest<
-    base::RawPtrTraits::kAllowPtrArithmetic>;
-
-// `kMayDangle | kAllowPtrArithmetic` matches what `CountingRawPtrMayDangle`
-// does internally. `kUseCountingWrapperForTest` is removed, and `kMayDangle`
-// and `kAllowPtrArithmetic` are kept.
-using RawPtrCountingMayDangleImpl =
-    base::test::RawPtrCountingImplWrapperForTest<
-        base::RawPtrTraits::kMayDangle |
-        base::RawPtrTraits::kAllowPtrArithmetic>;
-
-template <typename T>
-using CountingRawPtr = raw_ptr<T,
-                               base::RawPtrTraits::kUseCountingWrapperForTest |
-                                   base::RawPtrTraits::kAllowPtrArithmetic>;
-static_assert(std::is_same_v<CountingRawPtr<int>::Impl, RawPtrCountingImpl>);
-
-template <typename T>
-using CountingRawPtrMayDangle =
-    raw_ptr<T,
-            base::RawPtrTraits::kMayDangle |
-                base::RawPtrTraits::kUseCountingWrapperForTest |
-                base::RawPtrTraits::kAllowPtrArithmetic>;
-static_assert(std::is_same_v<CountingRawPtrMayDangle<int>::Impl,
-                             RawPtrCountingMayDangleImpl>);
-
-struct MyStruct {
-  int x;
-};
-
-struct Base1 {
-  explicit Base1(int b1) : b1(b1) {}
-  int b1;
-};
-
-struct Base2 {
-  explicit Base2(int b2) : b2(b2) {}
-  int b2;
-};
-
-struct Derived : Base1, Base2 {
-  Derived(int b1, int b2, int d) : Base1(b1), Base2(b2), d(d) {}
-  int d;
-};
-
-class RawPtrTest : public Test {
- protected:
-  void SetUp() override {
-    RawPtrCountingImpl::ClearCounters();
-    RawPtrCountingMayDangleImpl::ClearCounters();
-  }
-};
-
-// Use this instead of std::ignore, to prevent the instruction from getting
-// optimized out by the compiler.
-volatile int g_volatile_int_to_ignore;
-
-TEST_F(RawPtrTest, NullStarDereference) {
-  raw_ptr<int> ptr = nullptr;
-  EXPECT_DEATH_IF_SUPPORTED(g_volatile_int_to_ignore = *ptr, "");
-}
-
-TEST_F(RawPtrTest, NullArrowDereference) {
-  raw_ptr<MyStruct> ptr = nullptr;
-  EXPECT_DEATH_IF_SUPPORTED(g_volatile_int_to_ignore = ptr->x, "");
-}
-
-TEST_F(RawPtrTest, NullExtractNoDereference) {
-  CountingRawPtr<int> ptr = nullptr;
-  // No dereference hence shouldn't crash.
-  int* raw = ptr;
-  std::ignore = raw;
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 1,
-                  .get_for_comparison_cnt = 0}),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, InvalidExtractNoDereference) {
-  // Some code uses invalid pointer values as indicators, so those values must
-  // be accepted by raw_ptr and passed through unchanged during extraction.
-  int* inv_ptr = reinterpret_cast<int*>(~static_cast<uintptr_t>(0));
-  CountingRawPtr<int> ptr = inv_ptr;
-  int* raw = ptr;
-  EXPECT_EQ(raw, inv_ptr);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 1,
-                  .get_for_comparison_cnt = 0}),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, NullCmpExplicit) {
-  CountingRawPtr<int> ptr = nullptr;
-  EXPECT_TRUE(ptr == nullptr);
-  EXPECT_TRUE(nullptr == ptr);
-  EXPECT_FALSE(ptr != nullptr);
-  EXPECT_FALSE(nullptr != ptr);
-  // No need to unwrap pointer, just compare against 0.
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, NullCmpBool) {
-  CountingRawPtr<int> ptr = nullptr;
-  EXPECT_FALSE(ptr);
-  EXPECT_TRUE(!ptr);
-  // No need to unwrap pointer, just compare against 0.
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-void FuncThatAcceptsBool(bool b) {}
-
-bool IsValidNoCast(CountingRawPtr<int> ptr) {
-  return !!ptr;  // !! to avoid implicit cast
-}
-bool IsValidNoCast2(CountingRawPtr<int> ptr) {
-  return ptr && true;
-}
-
-TEST_F(RawPtrTest, BoolOpNotCast) {
-  CountingRawPtr<int> ptr = nullptr;
-  volatile bool is_valid = !!ptr;  // !! to avoid implicit cast
-  is_valid = ptr || is_valid;      // volatile, so won't be optimized
-  if (ptr) {
-    is_valid = true;
-  }
-  [[maybe_unused]] bool is_not_valid = !ptr;
-  if (!ptr) {
-    is_not_valid = true;
-  }
-  std::ignore = IsValidNoCast(ptr);
-  std::ignore = IsValidNoCast2(ptr);
-  FuncThatAcceptsBool(!ptr);
-  // No need to unwrap pointer, just compare against 0.
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-bool IsValidWithCast(CountingRawPtr<int> ptr) {
-  return ptr;
-}
-
-// This test is mostly for documentation purposes. It demonstrates cases where
-// |operator T*| is called first and then the pointer is converted to bool,
-// as opposed to calling |operator bool| directly. The former may be more
-// costly, so the caller has to be careful not to trigger this path.
-TEST_F(RawPtrTest, CastNotBoolOp) {
-  CountingRawPtr<int> ptr = nullptr;
-  [[maybe_unused]] bool is_valid = ptr;
-  is_valid = IsValidWithCast(ptr);
-  FuncThatAcceptsBool(ptr);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 3,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, StarDereference) {
-  int foo = 42;
-  CountingRawPtr<int> ptr = &foo;
-  EXPECT_EQ(*ptr, 42);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 1,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, ArrowDereference) {
-  MyStruct foo = {42};
-  CountingRawPtr<MyStruct> ptr = &foo;
-  EXPECT_EQ(ptr->x, 42);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 1,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, Delete) {
-  CountingRawPtr<int> ptr = new int(42);
-  delete ptr.ExtractAsDangling();
-  // The pointer is first internally converted to MayDangle kind, then extracted
-  // using implicit cast before passing to |delete|.
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-                  .wrap_raw_ptr_for_dup_cnt = 0,
-                  .get_for_duplication_cnt = 1,
-              }),
-              CountersMatch());
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 1,
-                  .get_for_comparison_cnt = 0,
-                  .wrap_raw_ptr_for_dup_cnt = 1,
-                  .get_for_duplication_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, ClearAndDelete) {
-  CountingRawPtr<int> ptr(new int);
-  ptr.ClearAndDelete();
-
-  // TODO(crbug.com/1346513): clang-format has a difficult time making
-  // sense of preprocessor arms mixed with designated initializers.
-  //
-  // clang-format off
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl> {
-                .wrap_raw_ptr_cnt = 1,
-                .release_wrapped_ptr_cnt = 1,
-                .get_for_dereference_cnt = 0,
-                .get_for_extraction_cnt = 1,
-                .wrapped_ptr_swap_cnt = 0,
-              }),
-              CountersMatch());
-  // clang-format on
-  EXPECT_EQ(ptr.get(), nullptr);
-}
-
-TEST_F(RawPtrTest, ClearAndDeleteArray) {
-  CountingRawPtr<int> ptr(new int[8]);
-  ptr.ClearAndDeleteArray();
-
-  // TODO(crbug.com/1346513): clang-format has a difficult time making
-  // sense of preprocessor arms mixed with designated initializers.
-  //
-  // clang-format off
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl> {
-                .wrap_raw_ptr_cnt = 1,
-                .release_wrapped_ptr_cnt = 1,
-                .get_for_dereference_cnt = 0,
-                .get_for_extraction_cnt = 1,
-                .wrapped_ptr_swap_cnt = 0,
-              }),
-              CountersMatch());
-  // clang-format on
-  EXPECT_EQ(ptr.get(), nullptr);
-}
-
-TEST_F(RawPtrTest, ExtractAsDangling) {
-  CountingRawPtr<int> ptr(new int);
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .wrap_raw_ptr_cnt = 1,
-                  .release_wrapped_ptr_cnt = 0,
-                  .get_for_dereference_cnt = 0,
-                  .wrapped_ptr_swap_cnt = 0,
-                  .wrap_raw_ptr_for_dup_cnt = 0,
-                  .get_for_duplication_cnt = 0,
-              }),
-              CountersMatch());
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .wrap_raw_ptr_cnt = 0,
-                  .release_wrapped_ptr_cnt = 0,
-                  .get_for_dereference_cnt = 0,
-                  .wrapped_ptr_swap_cnt = 0,
-                  .wrap_raw_ptr_for_dup_cnt = 0,
-                  .get_for_duplication_cnt = 0,
-              }),
-              CountersMatch());
-
-  EXPECT_TRUE(ptr.get());
-
-  CountingRawPtrMayDangle<int> dangling = ptr.ExtractAsDangling();
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .wrap_raw_ptr_cnt = 1,
-                  .release_wrapped_ptr_cnt = 1,
-                  .get_for_dereference_cnt = 0,
-                  .wrapped_ptr_swap_cnt = 0,
-                  .wrap_raw_ptr_for_dup_cnt = 0,
-                  .get_for_duplication_cnt = 1,
-              }),
-              CountersMatch());
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .wrap_raw_ptr_cnt = 0,
-                  .release_wrapped_ptr_cnt = 0,
-                  .get_for_dereference_cnt = 0,
-                  .wrapped_ptr_swap_cnt = 0,
-                  .wrap_raw_ptr_for_dup_cnt = 1,
-                  .get_for_duplication_cnt = 0,
-              }),
-              CountersMatch());
-
-  EXPECT_FALSE(ptr.get());
-  EXPECT_TRUE(dangling.get());
-
-  dangling.ClearAndDelete();
-}
-
-TEST_F(RawPtrTest, ExtractAsDanglingFromDangling) {
-  CountingRawPtrMayDangle<int> ptr(new int);
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .wrap_raw_ptr_cnt = 1,
-                  .release_wrapped_ptr_cnt = 0,
-                  .get_for_dereference_cnt = 0,
-                  .wrapped_ptr_swap_cnt = 0,
-                  .wrap_raw_ptr_for_dup_cnt = 0,
-                  .get_for_duplication_cnt = 0,
-              }),
-              CountersMatch());
-
-  CountingRawPtrMayDangle<int> dangling = ptr.ExtractAsDangling();
-
-  // wrap_raw_ptr_cnt remains `1` because, as `ptr` is already a dangling
-  // pointer, we are only moving `ptr` to `dangling` here to avoid extra cost.
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .wrap_raw_ptr_cnt = 1,
-                  .release_wrapped_ptr_cnt = 1,
-                  .get_for_dereference_cnt = 0,
-                  .wrapped_ptr_swap_cnt = 0,
-                  .wrap_raw_ptr_for_dup_cnt = 0,
-                  .get_for_duplication_cnt = 0,
-              }),
-              CountersMatch());
-
-  dangling.ClearAndDelete();
-}
-
-TEST_F(RawPtrTest, ConstVolatileVoidPtr) {
-  int32_t foo[] = {1234567890};
-  CountingRawPtr<const volatile void> ptr = foo;
-  EXPECT_EQ(*static_cast<const volatile int32_t*>(ptr), 1234567890);
-  // Because we're using a cast, the extraction API kicks in, which doesn't
-  // know if the extracted pointer will be dereferenced or not.
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 1,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, VoidPtr) {
-  int32_t foo[] = {1234567890};
-  CountingRawPtr<void> ptr = foo;
-  EXPECT_EQ(*static_cast<int32_t*>(ptr), 1234567890);
-  // Because we're using a cast, the extraction API kicks in, which doesn't
-  // know if the extracted pointer will be dereferenced or not.
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 1,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, OperatorEQ) {
-  int foo;
-  CountingRawPtr<int> ptr1 = nullptr;
-  EXPECT_TRUE(ptr1 == ptr1);
-
-  CountingRawPtr<int> ptr2 = nullptr;
-  EXPECT_TRUE(ptr1 == ptr2);
-
-  CountingRawPtr<int> ptr3 = &foo;
-  EXPECT_TRUE(&foo == ptr3);
-  EXPECT_TRUE(ptr3 == &foo);
-  EXPECT_FALSE(ptr1 == ptr3);
-
-  ptr1 = &foo;
-  EXPECT_TRUE(ptr1 == ptr3);
-  EXPECT_TRUE(ptr3 == ptr1);
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 12,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, OperatorNE) {
-  int foo;
-  CountingRawPtr<int> ptr1 = nullptr;
-  EXPECT_FALSE(ptr1 != ptr1);
-
-  CountingRawPtr<int> ptr2 = nullptr;
-  EXPECT_FALSE(ptr1 != ptr2);
-
-  CountingRawPtr<int> ptr3 = &foo;
-  EXPECT_FALSE(&foo != ptr3);
-  EXPECT_FALSE(ptr3 != &foo);
-  EXPECT_TRUE(ptr1 != ptr3);
-
-  ptr1 = &foo;
-  EXPECT_FALSE(ptr1 != ptr3);
-  EXPECT_FALSE(ptr3 != ptr1);
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 12,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, OperatorEQCast) {
-  int foo = 42;
-  const int* raw_int_ptr = &foo;
-  volatile void* raw_void_ptr = &foo;
-  CountingRawPtr<volatile int> checked_int_ptr = &foo;
-  CountingRawPtr<const void> checked_void_ptr = &foo;
-  EXPECT_TRUE(checked_int_ptr == checked_int_ptr);
-  EXPECT_TRUE(checked_int_ptr == raw_int_ptr);
-  EXPECT_TRUE(raw_int_ptr == checked_int_ptr);
-  EXPECT_TRUE(checked_void_ptr == checked_void_ptr);
-  EXPECT_TRUE(checked_void_ptr == raw_void_ptr);
-  EXPECT_TRUE(raw_void_ptr == checked_void_ptr);
-  EXPECT_TRUE(checked_int_ptr == checked_void_ptr);
-  EXPECT_TRUE(checked_int_ptr == raw_void_ptr);
-  EXPECT_TRUE(raw_int_ptr == checked_void_ptr);
-  EXPECT_TRUE(checked_void_ptr == checked_int_ptr);
-  EXPECT_TRUE(checked_void_ptr == raw_int_ptr);
-  EXPECT_TRUE(raw_void_ptr == checked_int_ptr);
-  // Make sure that all cases are handled by operator== (faster) and none by the
-  // cast operator (slower).
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 16,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, OperatorEQCastHierarchy) {
-  Derived derived_val(42, 84, 1024);
-  Derived* raw_derived_ptr = &derived_val;
-  const Base1* raw_base1_ptr = &derived_val;
-  volatile Base2* raw_base2_ptr = &derived_val;
-  // Double check the basic understanding of pointers: Even though the numeric
-  // value (i.e. the address) isn't equal, the pointers are still equal. That's
-  // because from derived to base adjusts the address.
-  // raw_ptr must behave the same, which is checked below.
-  ASSERT_NE(reinterpret_cast<uintptr_t>(raw_base2_ptr),
-            reinterpret_cast<uintptr_t>(raw_derived_ptr));
-  ASSERT_TRUE(raw_base2_ptr == raw_derived_ptr);
-
-  CountingRawPtr<const volatile Derived> checked_derived_ptr = &derived_val;
-  CountingRawPtr<volatile Base1> checked_base1_ptr = &derived_val;
-  CountingRawPtr<const Base2> checked_base2_ptr = &derived_val;
-  EXPECT_TRUE(checked_derived_ptr == checked_derived_ptr);
-  EXPECT_TRUE(checked_derived_ptr == raw_derived_ptr);
-  EXPECT_TRUE(raw_derived_ptr == checked_derived_ptr);
-  EXPECT_TRUE(checked_derived_ptr == checked_base1_ptr);
-  EXPECT_TRUE(checked_derived_ptr == raw_base1_ptr);
-  EXPECT_TRUE(raw_derived_ptr == checked_base1_ptr);
-  EXPECT_TRUE(checked_base1_ptr == checked_derived_ptr);
-  EXPECT_TRUE(checked_base1_ptr == raw_derived_ptr);
-  EXPECT_TRUE(raw_base1_ptr == checked_derived_ptr);
-  // |base2_ptr| points to the second base class of |derived|, so will be
-  // located at an offset. While the stored raw uinptr_t values shouldn't match,
-  // ensure that the internal pointer manipulation correctly offsets when
-  // casting up and down the class hierarchy.
-  EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()),
-            reinterpret_cast<uintptr_t>(checked_derived_ptr.get()));
-  EXPECT_NE(reinterpret_cast<uintptr_t>(raw_base2_ptr),
-            reinterpret_cast<uintptr_t>(checked_derived_ptr.get()));
-  EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()),
-            reinterpret_cast<uintptr_t>(raw_derived_ptr));
-  EXPECT_TRUE(checked_derived_ptr == checked_base2_ptr);
-  EXPECT_TRUE(checked_derived_ptr == raw_base2_ptr);
-  EXPECT_TRUE(raw_derived_ptr == checked_base2_ptr);
-  EXPECT_TRUE(checked_base2_ptr == checked_derived_ptr);
-  EXPECT_TRUE(checked_base2_ptr == raw_derived_ptr);
-  EXPECT_TRUE(raw_base2_ptr == checked_derived_ptr);
-  // Make sure that all cases are handled by operator== (faster) and none by the
-  // cast operator (slower).
-  // The 4 extractions come from .get() checks, that compare raw addresses.
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 4,
-                  .get_for_comparison_cnt = 20,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, OperatorNECast) {
-  int foo = 42;
-  volatile int* raw_int_ptr = &foo;
-  const void* raw_void_ptr = &foo;
-  CountingRawPtr<const int> checked_int_ptr = &foo;
-  CountingRawPtr<volatile void> checked_void_ptr = &foo;
-  EXPECT_FALSE(checked_int_ptr != checked_int_ptr);
-  EXPECT_FALSE(checked_int_ptr != raw_int_ptr);
-  EXPECT_FALSE(raw_int_ptr != checked_int_ptr);
-  EXPECT_FALSE(checked_void_ptr != checked_void_ptr);
-  EXPECT_FALSE(checked_void_ptr != raw_void_ptr);
-  EXPECT_FALSE(raw_void_ptr != checked_void_ptr);
-  EXPECT_FALSE(checked_int_ptr != checked_void_ptr);
-  EXPECT_FALSE(checked_int_ptr != raw_void_ptr);
-  EXPECT_FALSE(raw_int_ptr != checked_void_ptr);
-  EXPECT_FALSE(checked_void_ptr != checked_int_ptr);
-  EXPECT_FALSE(checked_void_ptr != raw_int_ptr);
-  EXPECT_FALSE(raw_void_ptr != checked_int_ptr);
-  // Make sure that all cases are handled by operator== (faster) and none by the
-  // cast operator (slower).
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 16,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, OperatorNECastHierarchy) {
-  Derived derived_val(42, 84, 1024);
-  const Derived* raw_derived_ptr = &derived_val;
-  volatile Base1* raw_base1_ptr = &derived_val;
-  const Base2* raw_base2_ptr = &derived_val;
-  CountingRawPtr<volatile Derived> checked_derived_ptr = &derived_val;
-  CountingRawPtr<const Base1> checked_base1_ptr = &derived_val;
-  CountingRawPtr<const volatile Base2> checked_base2_ptr = &derived_val;
-  EXPECT_FALSE(checked_derived_ptr != checked_derived_ptr);
-  EXPECT_FALSE(checked_derived_ptr != raw_derived_ptr);
-  EXPECT_FALSE(raw_derived_ptr != checked_derived_ptr);
-  EXPECT_FALSE(checked_derived_ptr != checked_base1_ptr);
-  EXPECT_FALSE(checked_derived_ptr != raw_base1_ptr);
-  EXPECT_FALSE(raw_derived_ptr != checked_base1_ptr);
-  EXPECT_FALSE(checked_base1_ptr != checked_derived_ptr);
-  EXPECT_FALSE(checked_base1_ptr != raw_derived_ptr);
-  EXPECT_FALSE(raw_base1_ptr != checked_derived_ptr);
-  // |base2_ptr| points to the second base class of |derived|, so will be
-  // located at an offset. While the stored raw uinptr_t values shouldn't match,
-  // ensure that the internal pointer manipulation correctly offsets when
-  // casting up and down the class hierarchy.
-  EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()),
-            reinterpret_cast<uintptr_t>(checked_derived_ptr.get()));
-  EXPECT_NE(reinterpret_cast<uintptr_t>(raw_base2_ptr),
-            reinterpret_cast<uintptr_t>(checked_derived_ptr.get()));
-  EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()),
-            reinterpret_cast<uintptr_t>(raw_derived_ptr));
-  EXPECT_FALSE(checked_derived_ptr != checked_base2_ptr);
-  EXPECT_FALSE(checked_derived_ptr != raw_base2_ptr);
-  EXPECT_FALSE(raw_derived_ptr != checked_base2_ptr);
-  EXPECT_FALSE(checked_base2_ptr != checked_derived_ptr);
-  EXPECT_FALSE(checked_base2_ptr != raw_derived_ptr);
-  EXPECT_FALSE(raw_base2_ptr != checked_derived_ptr);
-  // Make sure that all cases are handled by operator== (faster) and none by the
-  // cast operator (slower).
-  // The 4 extractions come from .get() checks, that compare raw addresses.
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 4,
-                  .get_for_comparison_cnt = 20,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, Cast) {
-  Derived derived_val(42, 84, 1024);
-  raw_ptr<Derived> checked_derived_ptr = &derived_val;
-  Base1* raw_base1_ptr = checked_derived_ptr;
-  EXPECT_EQ(raw_base1_ptr->b1, 42);
-  Base2* raw_base2_ptr = checked_derived_ptr;
-  EXPECT_EQ(raw_base2_ptr->b2, 84);
-
-  Derived* raw_derived_ptr = static_cast<Derived*>(raw_base1_ptr);
-  EXPECT_EQ(raw_derived_ptr->b1, 42);
-  EXPECT_EQ(raw_derived_ptr->b2, 84);
-  EXPECT_EQ(raw_derived_ptr->d, 1024);
-  raw_derived_ptr = static_cast<Derived*>(raw_base2_ptr);
-  EXPECT_EQ(raw_derived_ptr->b1, 42);
-  EXPECT_EQ(raw_derived_ptr->b2, 84);
-  EXPECT_EQ(raw_derived_ptr->d, 1024);
-
-  raw_ptr<Base1> checked_base1_ptr = raw_derived_ptr;
-  EXPECT_EQ(checked_base1_ptr->b1, 42);
-  raw_ptr<Base2> checked_base2_ptr = raw_derived_ptr;
-  EXPECT_EQ(checked_base2_ptr->b2, 84);
-
-  raw_ptr<Derived> checked_derived_ptr2 =
-      static_cast<Derived*>(checked_base1_ptr);
-  EXPECT_EQ(checked_derived_ptr2->b1, 42);
-  EXPECT_EQ(checked_derived_ptr2->b2, 84);
-  EXPECT_EQ(checked_derived_ptr2->d, 1024);
-  checked_derived_ptr2 = static_cast<Derived*>(checked_base2_ptr);
-  EXPECT_EQ(checked_derived_ptr2->b1, 42);
-  EXPECT_EQ(checked_derived_ptr2->b2, 84);
-  EXPECT_EQ(checked_derived_ptr2->d, 1024);
-
-  const Derived* raw_const_derived_ptr = checked_derived_ptr2;
-  EXPECT_EQ(raw_const_derived_ptr->b1, 42);
-  EXPECT_EQ(raw_const_derived_ptr->b2, 84);
-  EXPECT_EQ(raw_const_derived_ptr->d, 1024);
-
-  raw_ptr<const Derived> checked_const_derived_ptr = raw_const_derived_ptr;
-  EXPECT_EQ(checked_const_derived_ptr->b1, 42);
-  EXPECT_EQ(checked_const_derived_ptr->b2, 84);
-  EXPECT_EQ(checked_const_derived_ptr->d, 1024);
-
-  const Derived* raw_const_derived_ptr2 = checked_const_derived_ptr;
-  EXPECT_EQ(raw_const_derived_ptr2->b1, 42);
-  EXPECT_EQ(raw_const_derived_ptr2->b2, 84);
-  EXPECT_EQ(raw_const_derived_ptr2->d, 1024);
-
-  raw_ptr<const Derived> checked_const_derived_ptr2 = raw_derived_ptr;
-  EXPECT_EQ(checked_const_derived_ptr2->b1, 42);
-  EXPECT_EQ(checked_const_derived_ptr2->b2, 84);
-  EXPECT_EQ(checked_const_derived_ptr2->d, 1024);
-
-  raw_ptr<const Derived> checked_const_derived_ptr3 = checked_derived_ptr2;
-  EXPECT_EQ(checked_const_derived_ptr3->b1, 42);
-  EXPECT_EQ(checked_const_derived_ptr3->b2, 84);
-  EXPECT_EQ(checked_const_derived_ptr3->d, 1024);
-
-  volatile Derived* raw_volatile_derived_ptr = checked_derived_ptr2;
-  EXPECT_EQ(raw_volatile_derived_ptr->b1, 42);
-  EXPECT_EQ(raw_volatile_derived_ptr->b2, 84);
-  EXPECT_EQ(raw_volatile_derived_ptr->d, 1024);
-
-  raw_ptr<volatile Derived> checked_volatile_derived_ptr =
-      raw_volatile_derived_ptr;
-  EXPECT_EQ(checked_volatile_derived_ptr->b1, 42);
-  EXPECT_EQ(checked_volatile_derived_ptr->b2, 84);
-  EXPECT_EQ(checked_volatile_derived_ptr->d, 1024);
-
-  void* raw_void_ptr = checked_derived_ptr;
-  raw_ptr<void> checked_void_ptr = raw_derived_ptr;
-  raw_ptr<Derived> checked_derived_ptr3 = static_cast<Derived*>(raw_void_ptr);
-  raw_ptr<Derived> checked_derived_ptr4 =
-      static_cast<Derived*>(checked_void_ptr);
-  EXPECT_EQ(checked_derived_ptr3->b1, 42);
-  EXPECT_EQ(checked_derived_ptr3->b2, 84);
-  EXPECT_EQ(checked_derived_ptr3->d, 1024);
-  EXPECT_EQ(checked_derived_ptr4->b1, 42);
-  EXPECT_EQ(checked_derived_ptr4->b2, 84);
-  EXPECT_EQ(checked_derived_ptr4->d, 1024);
-}
-
-TEST_F(RawPtrTest, UpcastConvertible) {
-  {
-    Derived derived_val(42, 84, 1024);
-    raw_ptr<Derived> checked_derived_ptr = &derived_val;
-
-    raw_ptr<Base1> checked_base1_ptr(checked_derived_ptr);
-    EXPECT_EQ(checked_base1_ptr->b1, 42);
-    raw_ptr<Base2> checked_base2_ptr(checked_derived_ptr);
-    EXPECT_EQ(checked_base2_ptr->b2, 84);
-
-    checked_base1_ptr = checked_derived_ptr;
-    EXPECT_EQ(checked_base1_ptr->b1, 42);
-    checked_base2_ptr = checked_derived_ptr;
-    EXPECT_EQ(checked_base2_ptr->b2, 84);
-
-    EXPECT_EQ(checked_base1_ptr, checked_derived_ptr);
-    EXPECT_EQ(checked_base2_ptr, checked_derived_ptr);
-  }
-
-  {
-    Derived derived_val(42, 84, 1024);
-    raw_ptr<Derived> checked_derived_ptr1 = &derived_val;
-    raw_ptr<Derived> checked_derived_ptr2 = &derived_val;
-    raw_ptr<Derived> checked_derived_ptr3 = &derived_val;
-    raw_ptr<Derived> checked_derived_ptr4 = &derived_val;
-
-    raw_ptr<Base1> checked_base1_ptr(std::move(checked_derived_ptr1));
-    EXPECT_EQ(checked_base1_ptr->b1, 42);
-    raw_ptr<Base2> checked_base2_ptr(std::move(checked_derived_ptr2));
-    EXPECT_EQ(checked_base2_ptr->b2, 84);
-
-    checked_base1_ptr = std::move(checked_derived_ptr3);
-    EXPECT_EQ(checked_base1_ptr->b1, 42);
-    checked_base2_ptr = std::move(checked_derived_ptr4);
-    EXPECT_EQ(checked_base2_ptr->b2, 84);
-  }
-}
-
-TEST_F(RawPtrTest, UpcastNotConvertible) {
-  class Base {};
-  class Derived : private Base {};
-  class Unrelated {};
-  EXPECT_FALSE((std::is_convertible<raw_ptr<Derived>, raw_ptr<Base>>::value));
-  EXPECT_FALSE((std::is_convertible<raw_ptr<Unrelated>, raw_ptr<Base>>::value));
-  EXPECT_FALSE((std::is_convertible<raw_ptr<Unrelated>, raw_ptr<void>>::value));
-  EXPECT_FALSE((std::is_convertible<raw_ptr<void>, raw_ptr<Unrelated>>::value));
-  EXPECT_FALSE(
-      (std::is_convertible<raw_ptr<int64_t>, raw_ptr<int32_t>>::value));
-  EXPECT_FALSE(
-      (std::is_convertible<raw_ptr<int16_t>, raw_ptr<int32_t>>::value));
-}
-
-TEST_F(RawPtrTest, UpcastPerformance) {
-  {
-    Derived derived_val(42, 84, 1024);
-    CountingRawPtr<Derived> checked_derived_ptr = &derived_val;
-    CountingRawPtr<Base1> checked_base1_ptr(checked_derived_ptr);
-    CountingRawPtr<Base2> checked_base2_ptr(checked_derived_ptr);
-    checked_base1_ptr = checked_derived_ptr;
-    checked_base2_ptr = checked_derived_ptr;
-  }
-
-  {
-    Derived derived_val(42, 84, 1024);
-    CountingRawPtr<Derived> checked_derived_ptr = &derived_val;
-    CountingRawPtr<Base1> checked_base1_ptr(std::move(checked_derived_ptr));
-    CountingRawPtr<Base2> checked_base2_ptr(std::move(checked_derived_ptr));
-    checked_base1_ptr = std::move(checked_derived_ptr);
-    checked_base2_ptr = std::move(checked_derived_ptr);
-  }
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, CustomSwap) {
-  int foo1, foo2;
-  CountingRawPtr<int> ptr1(&foo1);
-  CountingRawPtr<int> ptr2(&foo2);
-  // Recommended use pattern.
-  using std::swap;
-  swap(ptr1, ptr2);
-  EXPECT_EQ(ptr1.get(), &foo2);
-  EXPECT_EQ(ptr2.get(), &foo1);
-  EXPECT_EQ(RawPtrCountingImpl::wrapped_ptr_swap_cnt, 1);
-}
-
-TEST_F(RawPtrTest, StdSwap) {
-  int foo1, foo2;
-  CountingRawPtr<int> ptr1(&foo1);
-  CountingRawPtr<int> ptr2(&foo2);
-  std::swap(ptr1, ptr2);
-  EXPECT_EQ(ptr1.get(), &foo2);
-  EXPECT_EQ(ptr2.get(), &foo1);
-  EXPECT_EQ(RawPtrCountingImpl::wrapped_ptr_swap_cnt, 0);
-}
-
-TEST_F(RawPtrTest, PostIncrementOperator) {
-  std::vector<int> foo({42, 43, 44, 45});
-  CountingRawPtr<int> ptr = &foo[0];
-  for (int i = 0; i < 4; ++i) {
-    ASSERT_EQ(*ptr++, 42 + i);
-  }
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 4,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, PostDecrementOperator) {
-  std::vector<int> foo({42, 43, 44, 45});
-  CountingRawPtr<int> ptr = &foo[3];
-  // Avoid decrementing out of the slot holding the vector's backing store.
-  for (int i = 3; i > 0; --i) {
-    ASSERT_EQ(*ptr--, 42 + i);
-  }
-  ASSERT_EQ(*ptr, 42);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 4,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, PreIncrementOperator) {
-  std::vector<int> foo({42, 43, 44, 45});
-  CountingRawPtr<int> ptr = &foo[0];
-  for (int i = 0; i < 4; ++i, ++ptr) {
-    ASSERT_EQ(*ptr, 42 + i);
-  }
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 4,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, PreDecrementOperator) {
-  std::vector<int> foo({42, 43, 44, 45});
-  CountingRawPtr<int> ptr = &foo[3];
-  // Avoid decrementing out of the slot holding the vector's backing store.
-  for (int i = 3; i > 0; --i, --ptr) {
-    ASSERT_EQ(*ptr, 42 + i);
-  }
-  ASSERT_EQ(*ptr, 42);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 4,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, PlusEqualOperator) {
-  std::vector<int> foo({42, 43, 44, 45});
-  CountingRawPtr<int> ptr = &foo[0];
-  for (int i = 0; i < 4; i += 2, ptr += 2) {
-    ASSERT_EQ(*ptr, 42 + i);
-  }
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 2,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, PlusEqualOperatorTypes) {
-  std::vector<int> foo({42, 43, 44, 45});
-  CountingRawPtr<int> ptr = &foo[0];
-  ASSERT_EQ(*ptr, 42);
-  ptr += 2;  // Positive literal.
-  ASSERT_EQ(*ptr, 44);
-  ptr -= 2;  // Negative literal.
-  ASSERT_EQ(*ptr, 42);
-  ptr += ptrdiff_t{1};  // ptrdiff_t.
-  ASSERT_EQ(*ptr, 43);
-  ptr += size_t{2};  // size_t.
-  ASSERT_EQ(*ptr, 45);
-}
-
-TEST_F(RawPtrTest, MinusEqualOperator) {
-  std::vector<int> foo({42, 43, 44, 45});
-  CountingRawPtr<int> ptr = &foo[3];
-  ASSERT_EQ(*ptr, 45);
-  ptr -= 2;
-  ASSERT_EQ(*ptr, 43);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 2,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, MinusEqualOperatorTypes) {
-  int foo[] = {42, 43, 44, 45};
-  CountingRawPtr<int> ptr = &foo[3];
-  ASSERT_EQ(*ptr, 45);
-  ptr -= 2;  // Positive literal.
-  ASSERT_EQ(*ptr, 43);
-  ptr -= -2;  // Negative literal.
-  ASSERT_EQ(*ptr, 45);
-  ptr -= ptrdiff_t{2};  // ptrdiff_t.
-  ASSERT_EQ(*ptr, 43);
-  ptr -= size_t{1};  // size_t.
-  ASSERT_EQ(*ptr, 42);
-}
-
-TEST_F(RawPtrTest, PlusOperator) {
-  int foo[] = {42, 43, 44, 45};
-  CountingRawPtr<int> ptr = foo;
-  for (int i = 0; i < 4; ++i) {
-    ASSERT_EQ(*(ptr + i), 42 + i);
-  }
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 4,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, MinusOperator) {
-  int foo[] = {42, 43, 44, 45};
-  CountingRawPtr<int> ptr = &foo[4];
-  for (int i = 1; i <= 4; ++i) {
-    ASSERT_EQ(*(ptr - i), 46 - i);
-  }
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 4,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, MinusDeltaOperator) {
-  int foo[] = {42, 43, 44, 45};
-  CountingRawPtr<int> ptrs[] = {&foo[0], &foo[1], &foo[2], &foo[3], &foo[4]};
-  for (int i = 0; i <= 4; ++i) {
-    for (int j = 0; j <= 4; ++j) {
-      ASSERT_EQ(ptrs[i] - ptrs[j], i - j);
-      ASSERT_EQ(ptrs[i] - &foo[j], i - j);
-      ASSERT_EQ(&foo[i] - ptrs[j], i - j);
-    }
-  }
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, AdvanceString) {
-  const char kChars[] = "Hello";
-  std::string str = kChars;
-  CountingRawPtr<const char> ptr = str.c_str();
-  for (size_t i = 0; i < str.size(); ++i, ++ptr) {
-    ASSERT_EQ(*ptr, kChars[i]);
-  }
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 5,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, AssignmentFromNullptr) {
-  CountingRawPtr<int> wrapped_ptr;
-  wrapped_ptr = nullptr;
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .wrap_raw_ptr_cnt = 0,
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-void FunctionWithRawPtrParameter(raw_ptr<int> actual_ptr, int* expected_ptr) {
-  EXPECT_EQ(actual_ptr.get(), expected_ptr);
-  EXPECT_EQ(*actual_ptr, *expected_ptr);
-}
-
-// This test checks that raw_ptr<T> can be passed by value into function
-// parameters.  This is mostly a smoke test for TRIVIAL_ABI attribute.
-TEST_F(RawPtrTest, FunctionParameters_ImplicitlyMovedTemporary) {
-  int x = 123;
-  FunctionWithRawPtrParameter(
-      raw_ptr<int>(&x),  // Temporary that will be moved into the function.
-      &x);
-}
-
-// This test checks that raw_ptr<T> can be passed by value into function
-// parameters.  This is mostly a smoke test for TRIVIAL_ABI attribute.
-TEST_F(RawPtrTest, FunctionParameters_ExplicitlyMovedLValue) {
-  int x = 123;
-  raw_ptr<int> ptr(&x);
-  FunctionWithRawPtrParameter(std::move(ptr), &x);
-}
-
-// This test checks that raw_ptr<T> can be passed by value into function
-// parameters.  This is mostly a smoke test for TRIVIAL_ABI attribute.
-TEST_F(RawPtrTest, FunctionParameters_Copy) {
-  int x = 123;
-  raw_ptr<int> ptr(&x);
-  FunctionWithRawPtrParameter(ptr,  // `ptr` will be copied into the function.
-                              &x);
-}
-
-TEST_F(RawPtrTest, SetLookupUsesGetForComparison) {
-  int x = 123;
-  CountingRawPtr<int> ptr(&x);
-  std::set<CountingRawPtr<int>> set;
-
-  RawPtrCountingImpl::ClearCounters();
-  set.emplace(&x);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .wrap_raw_ptr_cnt = 1,
-                  // Nothing to compare to yet.
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 0,
-                  .wrapped_ptr_less_cnt = 0,
-              }),
-              CountersMatch());
-
-  RawPtrCountingImpl::ClearCounters();
-  set.emplace(ptr);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .wrap_raw_ptr_cnt = 0,
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  // 2 items to compare to => 4 calls.
-                  .get_for_comparison_cnt = 4,
-                  // 1 element to compare to => 2 calls.
-                  .wrapped_ptr_less_cnt = 2,
-              }),
-              CountersMatch());
-
-  RawPtrCountingImpl::ClearCounters();
-  set.count(&x);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .wrap_raw_ptr_cnt = 0,
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  // 2 comparisons => 2 extractions. Less than before, because
-                  // this time a raw pointer is one side of the comparison.
-                  .get_for_comparison_cnt = 2,
-                  // 2 items to compare to => 4 calls.
-                  .wrapped_ptr_less_cnt = 2,
-              }),
-              CountersMatch());
-
-  RawPtrCountingImpl::ClearCounters();
-  set.count(ptr);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .wrap_raw_ptr_cnt = 0,
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  // 2 comparisons => 4 extractions.
-                  .get_for_comparison_cnt = 4,
-                  // 2 items to compare to => 4 calls.
-                  .wrapped_ptr_less_cnt = 2,
-              }),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, ComparisonOperatorUsesGetForComparison) {
-  int x = 123;
-  CountingRawPtr<int> ptr(&x);
-
-  RawPtrCountingImpl::ClearCounters();
-  EXPECT_FALSE(ptr < ptr);
-  EXPECT_FALSE(ptr > ptr);
-  EXPECT_TRUE(ptr <= ptr);
-  EXPECT_TRUE(ptr >= ptr);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .wrap_raw_ptr_cnt = 0,
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 8,
-                  // < is used directly, not std::less().
-                  .wrapped_ptr_less_cnt = 0,
-              }),
-              CountersMatch());
-
-  RawPtrCountingImpl::ClearCounters();
-  EXPECT_FALSE(ptr < &x);
-  EXPECT_FALSE(ptr > &x);
-  EXPECT_TRUE(ptr <= &x);
-  EXPECT_TRUE(ptr >= &x);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .wrap_raw_ptr_cnt = 0,
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 4,
-                  .wrapped_ptr_less_cnt = 0,
-              }),
-              CountersMatch());
-
-  RawPtrCountingImpl::ClearCounters();
-  EXPECT_FALSE(&x < ptr);
-  EXPECT_FALSE(&x > ptr);
-  EXPECT_TRUE(&x <= ptr);
-  EXPECT_TRUE(&x >= ptr);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .wrap_raw_ptr_cnt = 0,
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 4,
-                  .wrapped_ptr_less_cnt = 0,
-              }),
-              CountersMatch());
-}
-
-// Two `raw_ptr`s with different Traits should still hit `GetForComparison()`
-// (as opposed to `GetForExtraction()`) in their comparison operators. We use
-// `CountingRawPtr` and `CountingRawPtrMayDangle` to contrast two different
-// Traits.
-TEST_F(RawPtrTest, OperatorsUseGetForComparison) {
-  int x = 123;
-  CountingRawPtr<int> ptr1 = &x;
-  CountingRawPtrMayDangle<int> ptr2 = &x;
-
-  RawPtrCountingImpl::ClearCounters();
-  RawPtrCountingMayDangleImpl::ClearCounters();
-
-  EXPECT_TRUE(ptr1 == ptr2);
-  EXPECT_FALSE(ptr1 != ptr2);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 2,
-              }),
-              CountersMatch());
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 2,
-              }),
-              CountersMatch());
-
-  EXPECT_FALSE(ptr1 < ptr2);
-  EXPECT_FALSE(ptr1 > ptr2);
-  EXPECT_TRUE(ptr1 <= ptr2);
-  EXPECT_TRUE(ptr1 >= ptr2);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 6,
-              }),
-              CountersMatch());
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = 6,
-              }),
-              CountersMatch());
-}
-
-// This test checks how the std library handles collections like
-// std::vector<raw_ptr<T>>.
-//
-// When this test is written, reallocating std::vector's storage (e.g.
-// when growing the vector) requires calling raw_ptr's destructor on the
-// old storage (after std::move-ing the data to the new storage).  In
-// the future we hope that TRIVIAL_ABI (or [trivially_relocatable]]
-// proposed by P1144 [1]) will allow memcpy-ing the elements into the
-// new storage (without invoking destructors and move constructors
-// and/or move assignment operators).  At that point, the assert in the
-// test should be modified to capture the new, better behavior.
-//
-// In the meantime, this test serves as a basic correctness test that
-// ensures that raw_ptr<T> stored in a std::vector passes basic smoke
-// tests.
-//
-// [1]
-// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1144r5.html#wording-attribute
-TEST_F(RawPtrTest, TrivialRelocability) {
-  std::vector<CountingRawPtr<int>> vector;
-  int x = 123;
-
-  // See how many times raw_ptr's destructor is called when std::vector
-  // needs to increase its capacity and reallocate the internal vector
-  // storage (moving the raw_ptr elements).
-  RawPtrCountingImpl::ClearCounters();
-  size_t number_of_capacity_changes = 0;
-  do {
-    size_t previous_capacity = vector.capacity();
-    while (vector.capacity() == previous_capacity) {
-      vector.emplace_back(&x);
-    }
-    number_of_capacity_changes++;
-  } while (number_of_capacity_changes < 10);
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
-    BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-  // TODO(lukasza): In the future (once C++ language and std library
-  // support custom trivially relocatable objects) this #if branch can
-  // be removed (keeping only the right long-term expectation from the
-  // #else branch).
-  EXPECT_NE(0, RawPtrCountingImpl::release_wrapped_ptr_cnt);
-#else
-  // This is the right long-term expectation.
-  //
-  // (This EXPECT_EQ assertion is slightly misleading in
-  // !USE_BACKUP_REF_PTR mode, because RawPtrNoOpImpl has a default
-  // destructor that doesn't go through
-  // RawPtrCountingImpl::ReleaseWrappedPtr.  Nevertheless, the spirit of
-  // the EXPECT_EQ is correct + the assertion should be true in the
-  // long-term.)
-  EXPECT_EQ(0, RawPtrCountingImpl::release_wrapped_ptr_cnt);
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
-        // BUILDFLAG(USE_ASAN_UNOWNED_PTR)
-
-  // Basic smoke test that raw_ptr elements in a vector work okay.
-  for (const auto& elem : vector) {
-    EXPECT_EQ(elem.get(), &x);
-    EXPECT_EQ(*elem, x);
-  }
-
-  // Verification that release_wrapped_ptr_cnt does capture how many times the
-  // destructors are called (e.g. that it is not always zero).
-  RawPtrCountingImpl::ClearCounters();
-  size_t number_of_cleared_elements = vector.size();
-  vector.clear();
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
-    BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-
-  EXPECT_EQ((int)number_of_cleared_elements,
-            RawPtrCountingImpl::release_wrapped_ptr_cnt);
-#else
-  // TODO(lukasza): !USE_BACKUP_REF_PTR / RawPtrNoOpImpl has a default
-  // destructor that doesn't go through
-  // RawPtrCountingImpl::ReleaseWrappedPtr.  So we can't really depend
-  // on `g_release_wrapped_ptr_cnt`.  This #else branch should be
-  // deleted once USE_BACKUP_REF_PTR is removed (e.g. once BackupRefPtr
-  // ships to the Stable channel).
-  EXPECT_EQ(0, RawPtrCountingImpl::release_wrapped_ptr_cnt);
-  std::ignore = number_of_cleared_elements;
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
-        // BUILDFLAG(USE_ASAN_UNOWNED_PTR)
-}
-
-struct BaseStruct {
-  explicit BaseStruct(int a) : a(a) {}
-  virtual ~BaseStruct() = default;
-
-  int a;
-};
-
-struct DerivedType1 : public BaseStruct {
-  explicit DerivedType1(int a, int b) : BaseStruct(a), b(b) {}
-  int b;
-};
-
-struct DerivedType2 : public BaseStruct {
-  explicit DerivedType2(int a, int c) : BaseStruct(a), c(c) {}
-  int c;
-};
-
-TEST_F(RawPtrTest, DerivedStructsComparison) {
-  DerivedType1 derived_1(42, 84);
-  raw_ptr<DerivedType1> checked_derived1_ptr = &derived_1;
-  DerivedType2 derived_2(21, 10);
-  raw_ptr<DerivedType2> checked_derived2_ptr = &derived_2;
-
-  // Make sure that comparing a |DerivedType2*| to a |DerivedType1*| casted
-  // as a |BaseStruct*| doesn't cause CFI errors.
-  EXPECT_NE(checked_derived1_ptr,
-            static_cast<BaseStruct*>(checked_derived2_ptr.get()));
-  EXPECT_NE(static_cast<BaseStruct*>(checked_derived1_ptr.get()),
-            checked_derived2_ptr);
-}
-
-class PmfTestBase {
- public:
-  int MemFunc(char, double) const { return 11; }
-};
-
-class PmfTestDerived : public PmfTestBase {
- public:
-  using PmfTestBase::MemFunc;
-  int MemFunc(float, double) { return 22; }
-};
-
-TEST_F(RawPtrTest, WorksWithOptional) {
-  int x = 0;
-  absl::optional<raw_ptr<int>> maybe_int;
-  EXPECT_FALSE(maybe_int.has_value());
-
-  maybe_int = nullptr;
-  ASSERT_TRUE(maybe_int.has_value());
-  EXPECT_EQ(nullptr, maybe_int.value());
-
-  maybe_int = &x;
-  ASSERT_TRUE(maybe_int.has_value());
-  EXPECT_EQ(&x, maybe_int.value());
-}
-
-TEST_F(RawPtrTest, WorksWithVariant) {
-  int x = 100;
-  absl::variant<int, raw_ptr<int>> vary;
-  ASSERT_EQ(0u, vary.index());
-  EXPECT_EQ(0, absl::get<int>(vary));
-
-  vary = x;
-  ASSERT_EQ(0u, vary.index());
-  EXPECT_EQ(100, absl::get<int>(vary));
-
-  vary = nullptr;
-  ASSERT_EQ(1u, vary.index());
-  EXPECT_EQ(nullptr, absl::get<raw_ptr<int>>(vary));
-
-  vary = &x;
-  ASSERT_EQ(1u, vary.index());
-  EXPECT_EQ(&x, absl::get<raw_ptr<int>>(vary));
-}
-
-TEST_F(RawPtrTest, CrossKindConversion) {
-  int x = 123;
-  CountingRawPtr<int> ptr1 = &x;
-
-  RawPtrCountingImpl::ClearCounters();
-  RawPtrCountingMayDangleImpl::ClearCounters();
-
-  CountingRawPtrMayDangle<int> ptr2(ptr1);
-  CountingRawPtrMayDangle<int> ptr3(std::move(ptr1));  // Falls back to copy.
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_duplication_cnt = 2}),
-              CountersMatch());
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .wrap_raw_ptr_cnt = 0, .wrap_raw_ptr_for_dup_cnt = 2}),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, CrossKindAssignment) {
-  int x = 123;
-  CountingRawPtr<int> ptr1 = &x;
-
-  RawPtrCountingImpl::ClearCounters();
-  RawPtrCountingMayDangleImpl::ClearCounters();
-
-  CountingRawPtrMayDangle<int> ptr2;
-  CountingRawPtrMayDangle<int> ptr3;
-  ptr2 = ptr1;
-  ptr3 = std::move(ptr1);  // Falls back to copy.
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_duplication_cnt = 2}),
-              CountersMatch());
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .wrap_raw_ptr_cnt = 0, .wrap_raw_ptr_for_dup_cnt = 2}),
-              CountersMatch());
-}
-
-// Without the explicitly customized `raw_ptr::to_address()`,
-// `base::to_address()` will use the dereference operator. This is not
-// what we want; this test enforces extraction semantics for
-// `to_address()`.
-TEST_F(RawPtrTest, ToAddressDoesNotDereference) {
-  CountingRawPtr<int> ptr = nullptr;
-  int* raw = base::to_address(ptr);
-  std::ignore = raw;
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 1,
-                  .get_for_comparison_cnt = 0,
-                  .get_for_duplication_cnt = 0}),
-              CountersMatch());
-}
-
-TEST_F(RawPtrTest, ToAddressGivesBackRawAddress) {
-  int* raw = nullptr;
-  raw_ptr<int> miracle = raw;
-  EXPECT_EQ(base::to_address(raw), base::to_address(miracle));
-}
-
-void InOutParamFuncWithPointer(int* in, int** out) {
-  *out = in;
-}
-
-TEST_F(RawPtrTest, EphemeralRawAddrPointerPointer) {
-  int v1 = 123;
-  int v2 = 456;
-  raw_ptr<int> ptr = &v1;
-  // Pointer pointer should point to a pointer other than one inside raw_ptr.
-  EXPECT_NE(&ptr.AsEphemeralRawAddr(),
-            reinterpret_cast<int**>(std::addressof(ptr)));
-  // But inner pointer should point to the same address.
-  EXPECT_EQ(*&ptr.AsEphemeralRawAddr(), &v1);
-
-  // Inner pointer can be rewritten via the pointer pointer.
-  *&ptr.AsEphemeralRawAddr() = &v2;
-  EXPECT_EQ(ptr.get(), &v2);
-  InOutParamFuncWithPointer(&v1, &ptr.AsEphemeralRawAddr());
-  EXPECT_EQ(ptr.get(), &v1);
-}
-
-void InOutParamFuncWithReference(int* in, int*& out) {
-  out = in;
-}
-
-TEST_F(RawPtrTest, EphemeralRawAddrPointerReference) {
-  int v1 = 123;
-  int v2 = 456;
-  raw_ptr<int> ptr = &v1;
-  // Pointer reference should refer to a pointer other than one inside raw_ptr.
-  EXPECT_NE(&static_cast<int*&>(ptr.AsEphemeralRawAddr()),
-            reinterpret_cast<int**>(std::addressof(ptr)));
-  // But inner pointer should point to the same address.
-  EXPECT_EQ(static_cast<int*&>(ptr.AsEphemeralRawAddr()), &v1);
-
-  // Inner pointer can be rewritten via the pointer pointer.
-  static_cast<int*&>(ptr.AsEphemeralRawAddr()) = &v2;
-  EXPECT_EQ(ptr.get(), &v2);
-  InOutParamFuncWithReference(&v1, ptr.AsEphemeralRawAddr());
-  EXPECT_EQ(ptr.get(), &v1);
-}
-
-}  // namespace
-
-namespace base::internal {
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
-    !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-
-void HandleOOM(size_t unused_size) {
-  LOG(FATAL) << "Out of memory";
-}
-
-class BackupRefPtrTest : public testing::Test {
- protected:
-  void SetUp() override {
-    // TODO(bartekn): Avoid using PartitionAlloc API directly. Switch to
-    // new/delete once PartitionAlloc Everywhere is fully enabled.
-    partition_alloc::PartitionAllocGlobalInit(HandleOOM);
-  }
-
-  partition_alloc::PartitionAllocator allocator_ =
-      partition_alloc::PartitionAllocator(partition_alloc::PartitionOptions{
-          .backup_ref_ptr =
-              partition_alloc::PartitionOptions::BackupRefPtr::kEnabled,
-          .memory_tagging = {.enabled =
-                                 base::CPU::GetInstanceNoAllocation().has_mte()
-                                     ? partition_alloc::PartitionOptions::
-                                           MemoryTagging::kEnabled
-                                     : partition_alloc::PartitionOptions::
-                                           MemoryTagging::kDisabled}});
-};
-
-TEST_F(BackupRefPtrTest, Basic) {
-  base::CPU cpu;
-
-  int* raw_ptr1 =
-      reinterpret_cast<int*>(allocator_.root()->Alloc(sizeof(int), ""));
-  // Use the actual raw_ptr implementation, not a test substitute, to
-  // exercise real PartitionAlloc paths.
-  raw_ptr<int, DisableDanglingPtrDetection> wrapped_ptr1 = raw_ptr1;
-
-  *raw_ptr1 = 42;
-  EXPECT_EQ(*raw_ptr1, *wrapped_ptr1);
-
-  allocator_.root()->Free(raw_ptr1);
-#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-  // In debug builds, the use-after-free should be caught immediately.
-  EXPECT_DEATH_IF_SUPPORTED(g_volatile_int_to_ignore = *wrapped_ptr1, "");
-#else   // DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-  if (cpu.has_mte()) {
-    // If the hardware supports MTE, the use-after-free should also be caught.
-    EXPECT_DEATH_IF_SUPPORTED(g_volatile_int_to_ignore = *wrapped_ptr1, "");
-  } else {
-    // The allocation should be poisoned since there's a raw_ptr alive.
-    EXPECT_NE(*wrapped_ptr1, 42);
-  }
-
-  // The allocator should not be able to reuse the slot at this point.
-  void* raw_ptr2 = allocator_.root()->Alloc(sizeof(int), "");
-  EXPECT_NE(partition_alloc::UntagPtr(raw_ptr1),
-            partition_alloc::UntagPtr(raw_ptr2));
-  allocator_.root()->Free(raw_ptr2);
-
-  // When the last reference is released, the slot should become reusable.
-  wrapped_ptr1 = nullptr;
-  void* raw_ptr3 = allocator_.root()->Alloc(sizeof(int), "");
-  EXPECT_EQ(partition_alloc::UntagPtr(raw_ptr1),
-            partition_alloc::UntagPtr(raw_ptr3));
-  allocator_.root()->Free(raw_ptr3);
-#endif  // DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
-}
-
-TEST_F(BackupRefPtrTest, ZeroSized) {
-  std::vector<raw_ptr<void>> ptrs;
-  // Use a reasonable number of elements to fill up the slot span.
-  for (int i = 0; i < 128 * 1024; ++i) {
-    // Constructing a raw_ptr instance from a zero-sized allocation should
-    // not result in a crash.
-    ptrs.emplace_back(allocator_.root()->Alloc(0, ""));
-  }
-}
-
-TEST_F(BackupRefPtrTest, EndPointer) {
-  // This test requires a fresh partition with an empty free list.
-  // Check multiple size buckets and levels of slot filling.
-  for (int size = 0; size < 1024; size += sizeof(void*)) {
-    // Creating a raw_ptr from an address right past the end of an allocation
-    // should not result in a crash or corrupt the free list.
-    char* raw_ptr1 =
-        reinterpret_cast<char*>(allocator_.root()->Alloc(size, ""));
-    raw_ptr<char, AllowPtrArithmetic> wrapped_ptr = raw_ptr1 + size;
-    wrapped_ptr = nullptr;
-    // We need to make two more allocations to turn the possible free list
-    // corruption into an observable crash.
-    char* raw_ptr2 =
-        reinterpret_cast<char*>(allocator_.root()->Alloc(size, ""));
-    char* raw_ptr3 =
-        reinterpret_cast<char*>(allocator_.root()->Alloc(size, ""));
-
-    // Similarly for operator+=.
-    char* raw_ptr4 =
-        reinterpret_cast<char*>(allocator_.root()->Alloc(size, ""));
-    wrapped_ptr = raw_ptr4;
-    wrapped_ptr += size;
-    wrapped_ptr = nullptr;
-    char* raw_ptr5 =
-        reinterpret_cast<char*>(allocator_.root()->Alloc(size, ""));
-    char* raw_ptr6 =
-        reinterpret_cast<char*>(allocator_.root()->Alloc(size, ""));
-
-    allocator_.root()->Free(raw_ptr1);
-    allocator_.root()->Free(raw_ptr2);
-    allocator_.root()->Free(raw_ptr3);
-    allocator_.root()->Free(raw_ptr4);
-    allocator_.root()->Free(raw_ptr5);
-    allocator_.root()->Free(raw_ptr6);
-  }
-}
-
-TEST_F(BackupRefPtrTest, QuarantinedBytes) {
-  uint64_t* raw_ptr1 = reinterpret_cast<uint64_t*>(
-      allocator_.root()->Alloc(sizeof(uint64_t), ""));
-  raw_ptr<uint64_t, DisableDanglingPtrDetection> wrapped_ptr1 = raw_ptr1;
-  EXPECT_EQ(allocator_.root()->total_size_of_brp_quarantined_bytes.load(
-                std::memory_order_relaxed),
-            0U);
-  EXPECT_EQ(allocator_.root()->total_count_of_brp_quarantined_slots.load(
-                std::memory_order_relaxed),
-            0U);
-
-  // Memory should get quarantined.
-  allocator_.root()->Free(raw_ptr1);
-  EXPECT_GT(allocator_.root()->total_size_of_brp_quarantined_bytes.load(
-                std::memory_order_relaxed),
-            0U);
-  EXPECT_EQ(allocator_.root()->total_count_of_brp_quarantined_slots.load(
-                std::memory_order_relaxed),
-            1U);
-
-  // Non quarantined free should not effect total_size_of_brp_quarantined_bytes
-  void* raw_ptr2 = allocator_.root()->Alloc(sizeof(uint64_t), "");
-  allocator_.root()->Free(raw_ptr2);
-
-  // Freeing quarantined memory should bring the size back down to zero.
-  wrapped_ptr1 = nullptr;
-  EXPECT_EQ(allocator_.root()->total_size_of_brp_quarantined_bytes.load(
-                std::memory_order_relaxed),
-            0U);
-  EXPECT_EQ(allocator_.root()->total_count_of_brp_quarantined_slots.load(
-                std::memory_order_relaxed),
-            0U);
-}
-
-void RunBackupRefPtrImplAdvanceTest(
-    partition_alloc::PartitionAllocator& allocator,
-    size_t requested_size) {
-  char* ptr = static_cast<char*>(allocator.root()->Alloc(requested_size, ""));
-  raw_ptr<char, AllowPtrArithmetic> protected_ptr = ptr;
-  protected_ptr += 123;
-  protected_ptr -= 123;
-  protected_ptr = protected_ptr + 123;
-  protected_ptr = protected_ptr - 123;
-  protected_ptr += requested_size / 2;
-  // end-of-allocation address should not cause an error immediately, but it may
-  // result in the pointer being poisoned.
-  protected_ptr = protected_ptr + requested_size / 2;
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-  EXPECT_DEATH_IF_SUPPORTED(*protected_ptr = ' ', "");
-  protected_ptr -= 1;  // This brings the pointer back within
-                       // bounds, which causes the poison to be removed.
-  *protected_ptr = ' ';
-  protected_ptr += 1;  // Reposition pointer back past end of allocation.
-#endif
-  EXPECT_CHECK_DEATH(protected_ptr = protected_ptr + 1);
-  EXPECT_CHECK_DEATH(protected_ptr += 1);
-  EXPECT_CHECK_DEATH(++protected_ptr);
-
-  // Even though |protected_ptr| is already pointing to the end of the
-  // allocation, assign it explicitly to make sure the underlying implementation
-  // doesn't "switch" to the next slot.
-  protected_ptr = ptr + requested_size;
-  protected_ptr -= requested_size / 2;
-  protected_ptr = protected_ptr - requested_size / 2;
-  EXPECT_CHECK_DEATH(protected_ptr = protected_ptr - 1);
-  EXPECT_CHECK_DEATH(protected_ptr -= 1);
-  EXPECT_CHECK_DEATH(--protected_ptr);
-
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-  // An array type that should be more than a third the size of the available
-  // memory for the allocation such that incrementing a pointer to this type
-  // twice causes it to point to a memory location that is too small to fit a
-  // complete element of this type.
-  typedef int OverThirdArray[200 / sizeof(int)];
-  raw_ptr<OverThirdArray> protected_arr_ptr =
-      reinterpret_cast<OverThirdArray*>(ptr);
-
-  protected_arr_ptr++;
-  **protected_arr_ptr = 4;
-  protected_arr_ptr++;
-  EXPECT_DEATH_IF_SUPPORTED(** protected_arr_ptr = 4, "");
-#endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-
-  protected_ptr = nullptr;
-  allocator.root()->Free(ptr);
-}
-
-TEST_F(BackupRefPtrTest, Advance) {
-  // This requires some internal PartitionAlloc knowledge, but for the test to
-  // work well the allocation + extras have to fill out the entire slot. That's
-  // because PartitionAlloc doesn't know exact allocation size and bases the
-  // guards on the slot size.
-  //
-  // A power of two is a safe choice for a slot size, then adjust it for extras.
-  size_t slot_size = 512;
-  size_t requested_size =
-      allocator_.root()->AdjustSizeForExtrasSubtract(slot_size);
-  // Verify that we're indeed filling up the slot.
-  ASSERT_EQ(
-      requested_size,
-      allocator_.root()->AllocationCapacityFromRequestedSize(requested_size));
-  RunBackupRefPtrImplAdvanceTest(allocator_, requested_size);
-
-  // We don't have the same worry for single-slot spans, as PartitionAlloc knows
-  // exactly where the allocation ends.
-  size_t raw_size = 300003;
-  ASSERT_GT(raw_size, partition_alloc::internal::MaxRegularSlotSpanSize());
-  ASSERT_LE(raw_size, partition_alloc::internal::kMaxBucketed);
-  requested_size = allocator_.root()->AdjustSizeForExtrasSubtract(slot_size);
-  RunBackupRefPtrImplAdvanceTest(allocator_, requested_size);
-
-  // Same for direct map.
-  raw_size = 1001001;
-  ASSERT_GT(raw_size, partition_alloc::internal::kMaxBucketed);
-  requested_size = allocator_.root()->AdjustSizeForExtrasSubtract(slot_size);
-  RunBackupRefPtrImplAdvanceTest(allocator_, requested_size);
-}
-
-TEST_F(BackupRefPtrTest, AdvanceAcrossPools) {
-  char array1[1000];
-  char array2[1000];
-
-  char* in_pool_ptr = static_cast<char*>(allocator_.root()->Alloc(123, ""));
-
-  raw_ptr<char, AllowPtrArithmetic> protected_ptr = array1;
-  // Nothing bad happens. Both pointers are outside of the BRP pool, so no
-  // checks are triggered.
-  protected_ptr += (array2 - array1);
-  // A pointer is shifted from outside of the BRP pool into the BRP pool. This
-  // should trigger death to avoid
-  EXPECT_CHECK_DEATH(protected_ptr += (in_pool_ptr - array2));
-
-  protected_ptr = in_pool_ptr;
-  // Same when a pointer is shifted from inside the BRP pool out of it.
-  EXPECT_CHECK_DEATH(protected_ptr += (array1 - in_pool_ptr));
-
-  protected_ptr = nullptr;
-  allocator_.root()->Free(in_pool_ptr);
-}
-
-TEST_F(BackupRefPtrTest, GetDeltaElems) {
-  size_t requested_size = allocator_.root()->AdjustSizeForExtrasSubtract(512);
-  char* ptr1 = static_cast<char*>(allocator_.root()->Alloc(requested_size, ""));
-  char* ptr2 = static_cast<char*>(allocator_.root()->Alloc(requested_size, ""));
-  ASSERT_LT(ptr1, ptr2);  // There should be a ref-count between slots.
-  raw_ptr<char> protected_ptr1 = ptr1;
-  raw_ptr<char> protected_ptr1_2 = ptr1 + 1;
-  raw_ptr<char> protected_ptr1_3 = ptr1 + requested_size - 1;
-  raw_ptr<char> protected_ptr1_4 = ptr1 + requested_size;
-  raw_ptr<char> protected_ptr2 = ptr2;
-  raw_ptr<char> protected_ptr2_2 = ptr2 + 1;
-
-  EXPECT_EQ(protected_ptr1_2 - protected_ptr1, 1);
-  EXPECT_EQ(protected_ptr1 - protected_ptr1_2, -1);
-  EXPECT_EQ(protected_ptr1_3 - protected_ptr1,
-            checked_cast<ptrdiff_t>(requested_size) - 1);
-  EXPECT_EQ(protected_ptr1 - protected_ptr1_3,
-            -checked_cast<ptrdiff_t>(requested_size) + 1);
-  EXPECT_EQ(protected_ptr1_4 - protected_ptr1,
-            checked_cast<ptrdiff_t>(requested_size));
-  EXPECT_EQ(protected_ptr1 - protected_ptr1_4,
-            -checked_cast<ptrdiff_t>(requested_size));
-#if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
-  EXPECT_CHECK_DEATH(protected_ptr2 - protected_ptr1);
-  EXPECT_CHECK_DEATH(protected_ptr1 - protected_ptr2);
-  EXPECT_CHECK_DEATH(protected_ptr2 - protected_ptr1_4);
-  EXPECT_CHECK_DEATH(protected_ptr1_4 - protected_ptr2);
-  EXPECT_CHECK_DEATH(protected_ptr2_2 - protected_ptr1);
-  EXPECT_CHECK_DEATH(protected_ptr1 - protected_ptr2_2);
-  EXPECT_CHECK_DEATH(protected_ptr2_2 - protected_ptr1_4);
-  EXPECT_CHECK_DEATH(protected_ptr1_4 - protected_ptr2_2);
-#endif  // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
-  EXPECT_EQ(protected_ptr2_2 - protected_ptr2, 1);
-  EXPECT_EQ(protected_ptr2 - protected_ptr2_2, -1);
-
-  protected_ptr1 = nullptr;
-  protected_ptr1_2 = nullptr;
-  protected_ptr1_3 = nullptr;
-  protected_ptr1_4 = nullptr;
-  protected_ptr2 = nullptr;
-  protected_ptr2_2 = nullptr;
-
-  allocator_.root()->Free(ptr1);
-  allocator_.root()->Free(ptr2);
-}
-
-bool IsQuarantineEmpty(partition_alloc::PartitionAllocator& allocator) {
-  return allocator.root()->total_size_of_brp_quarantined_bytes.load(
-             std::memory_order_relaxed) == 0;
-}
-
-struct BoundRawPtrTestHelper {
-  static BoundRawPtrTestHelper* Create(
-      partition_alloc::PartitionAllocator& allocator) {
-    return new (allocator.root()->Alloc(sizeof(BoundRawPtrTestHelper), ""))
-        BoundRawPtrTestHelper(allocator);
-  }
-
-  explicit BoundRawPtrTestHelper(partition_alloc::PartitionAllocator& allocator)
-      : owning_allocator(allocator),
-        once_callback(
-            BindOnce(&BoundRawPtrTestHelper::DeleteItselfAndCheckIfInQuarantine,
-                     Unretained(this))),
-        repeating_callback(BindRepeating(
-            &BoundRawPtrTestHelper::DeleteItselfAndCheckIfInQuarantine,
-            Unretained(this))) {}
-
-  void DeleteItselfAndCheckIfInQuarantine() {
-    auto& allocator = *owning_allocator;
-    EXPECT_TRUE(IsQuarantineEmpty(allocator));
-
-    // Since we use a non-default partition, `delete` has to be simulated.
-    this->~BoundRawPtrTestHelper();
-    allocator.root()->Free(this);
-
-    EXPECT_FALSE(IsQuarantineEmpty(allocator));
-  }
-
-  const raw_ref<partition_alloc::PartitionAllocator> owning_allocator;
-  OnceClosure once_callback;
-  RepeatingClosure repeating_callback;
-};
-
-// Check that bound callback arguments remain protected by BRP for the
-// entire duration of a callback invocation.
-TEST_F(BackupRefPtrTest, Bind) {
-  // This test requires a separate partition; otherwise, unrelated allocations
-  // might interfere with `IsQuarantineEmpty`.
-  auto* object_for_once_callback1 = BoundRawPtrTestHelper::Create(allocator_);
-  std::move(object_for_once_callback1->once_callback).Run();
-  EXPECT_TRUE(IsQuarantineEmpty(allocator_));
-
-  auto* object_for_repeating_callback1 =
-      BoundRawPtrTestHelper::Create(allocator_);
-  std::move(object_for_repeating_callback1->repeating_callback).Run();
-  EXPECT_TRUE(IsQuarantineEmpty(allocator_));
-
-  // `RepeatingCallback` has both lvalue and rvalue versions of `Run`.
-  auto* object_for_repeating_callback2 =
-      BoundRawPtrTestHelper::Create(allocator_);
-  object_for_repeating_callback2->repeating_callback.Run();
-  EXPECT_TRUE(IsQuarantineEmpty(allocator_));
-}
-
-#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
-TEST_F(BackupRefPtrTest, ReinterpretCast) {
-  void* ptr = allocator_.root()->Alloc(16, "");
-  allocator_.root()->Free(ptr);
-
-  raw_ptr<void>* wrapped_ptr = reinterpret_cast<raw_ptr<void>*>(&ptr);
-  // The reference count cookie check should detect that the allocation has
-  // been already freed.
-  BASE_EXPECT_DEATH(*wrapped_ptr = nullptr, "");
-}
-#endif
-
-namespace {
-
-// Install dangling raw_ptr handlers and restore them when going out of scope.
-class ScopedInstallDanglingRawPtrChecks {
- public:
-  ScopedInstallDanglingRawPtrChecks() {
-    enabled_feature_list_.InitWithFeaturesAndParameters(
-        {{features::kPartitionAllocDanglingPtr, {{"mode", "crash"}}}},
-        {/* disabled_features */});
-    old_detected_fn_ = partition_alloc::GetDanglingRawPtrDetectedFn();
-    old_dereferenced_fn_ = partition_alloc::GetDanglingRawPtrReleasedFn();
-    allocator::InstallDanglingRawPtrChecks();
-  }
-  ~ScopedInstallDanglingRawPtrChecks() {
-    partition_alloc::SetDanglingRawPtrDetectedFn(old_detected_fn_);
-    partition_alloc::SetDanglingRawPtrReleasedFn(old_dereferenced_fn_);
-  }
-
- private:
-  test::ScopedFeatureList enabled_feature_list_;
-  partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
-  partition_alloc::DanglingRawPtrReleasedFn* old_dereferenced_fn_;
-};
-
-}  // namespace
-
-TEST_F(BackupRefPtrTest, RawPtrMayDangle) {
-  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
-
-  void* ptr = allocator_.root()->Alloc(16, "");
-  raw_ptr<void, DisableDanglingPtrDetection> dangling_ptr = ptr;
-  allocator_.root()->Free(ptr);  // No dangling raw_ptr reported.
-  dangling_ptr = nullptr;        // No dangling raw_ptr reported.
-}
-
-TEST_F(BackupRefPtrTest, RawPtrNotDangling) {
-  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
-
-  void* ptr = allocator_.root()->Alloc(16, "");
-  raw_ptr<void> dangling_ptr = ptr;
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) && \
-    !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
-  BASE_EXPECT_DEATH(
-      {
-        allocator_.root()->Free(ptr);  // Dangling raw_ptr detected.
-        dangling_ptr = nullptr;        // Dangling raw_ptr released.
-      },
-      AllOf(HasSubstr("Detected dangling raw_ptr"),
-            HasSubstr("The memory was freed at:"),
-            HasSubstr("The dangling raw_ptr was released at:")));
-#else
-  allocator_.root()->Free(ptr);
-  dangling_ptr = nullptr;
-#endif
-}
-
-// Check the comparator operators work, even across raw_ptr with different
-// dangling policies.
-TEST_F(BackupRefPtrTest, DanglingPtrComparison) {
-  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
-
-  void* ptr_1 = allocator_.root()->Alloc(16, "");
-  void* ptr_2 = allocator_.root()->Alloc(16, "");
-
-  if (ptr_1 > ptr_2) {
-    std::swap(ptr_1, ptr_2);
-  }
-
-  raw_ptr<void, DisableDanglingPtrDetection> dangling_ptr_1 = ptr_1;
-  raw_ptr<void, DisableDanglingPtrDetection> dangling_ptr_2 = ptr_2;
-  raw_ptr<void> not_dangling_ptr_1 = ptr_1;
-  raw_ptr<void> not_dangling_ptr_2 = ptr_2;
-
-  EXPECT_EQ(dangling_ptr_1, not_dangling_ptr_1);
-  EXPECT_EQ(dangling_ptr_2, not_dangling_ptr_2);
-  EXPECT_NE(dangling_ptr_1, not_dangling_ptr_2);
-  EXPECT_NE(dangling_ptr_2, not_dangling_ptr_1);
-  EXPECT_LT(dangling_ptr_1, not_dangling_ptr_2);
-  EXPECT_GT(dangling_ptr_2, not_dangling_ptr_1);
-  EXPECT_LT(not_dangling_ptr_1, dangling_ptr_2);
-  EXPECT_GT(not_dangling_ptr_2, dangling_ptr_1);
-
-  not_dangling_ptr_1 = nullptr;
-  not_dangling_ptr_2 = nullptr;
-
-  allocator_.root()->Free(ptr_1);
-  allocator_.root()->Free(ptr_2);
-}
-
-// Check the assignment operator works, even across raw_ptr with different
-// dangling policies (only `not dangling` -> `dangling` direction is supported).
-TEST_F(BackupRefPtrTest, DanglingPtrAssignment) {
-  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
-
-  void* ptr = allocator_.root()->Alloc(16, "");
-
-  raw_ptr<void, DisableDanglingPtrDetection> dangling_ptr;
-  raw_ptr<void> not_dangling_ptr;
-
-  not_dangling_ptr = ptr;
-  dangling_ptr = not_dangling_ptr;
-  not_dangling_ptr = nullptr;
-
-  allocator_.root()->Free(ptr);
-
-  dangling_ptr = nullptr;
-}
-
-// Check the copy constructor works, even across raw_ptr with different dangling
-// policies (only `not dangling` -> `dangling` direction is supported).
-TEST_F(BackupRefPtrTest, DanglingPtrCopyContructor) {
-  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
-
-  void* ptr = allocator_.root()->Alloc(16, "");
-
-  raw_ptr<void> not_dangling_ptr(ptr);
-  raw_ptr<void, DisableDanglingPtrDetection> dangling_ptr(not_dangling_ptr);
-
-  not_dangling_ptr = nullptr;
-  dangling_ptr = nullptr;
-
-  allocator_.root()->Free(ptr);
-}
-
-TEST_F(BackupRefPtrTest, RawPtrExtractAsDangling) {
-  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
-
-  raw_ptr<int> ptr =
-      static_cast<int*>(allocator_.root()->Alloc(sizeof(int), ""));
-  allocator_.root()->Free(
-      ptr.ExtractAsDangling());  // No dangling raw_ptr reported.
-  EXPECT_EQ(ptr, nullptr);
-}
-
-TEST_F(BackupRefPtrTest, RawPtrDeleteWithoutExtractAsDangling) {
-  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
-
-  raw_ptr<int> ptr =
-      static_cast<int*>(allocator_.root()->Alloc(sizeof(int), ""));
-#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) && \
-    !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
-  BASE_EXPECT_DEATH(
-      {
-        allocator_.root()->Free(ptr.get());  // Dangling raw_ptr detected.
-        ptr = nullptr;                       // Dangling raw_ptr released.
-      },
-      AllOf(HasSubstr("Detected dangling raw_ptr"),
-            HasSubstr("The memory was freed at:"),
-            HasSubstr("The dangling raw_ptr was released at:")));
-#else
-  allocator_.root()->Free(ptr.get());
-  ptr = nullptr;
-#endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) && \
-        // !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
-}
-
-TEST_F(BackupRefPtrTest, SpatialAlgoCompat) {
-  size_t slot_size = 512;
-  size_t requested_size =
-      allocator_.root()->AdjustSizeForExtrasSubtract(slot_size);
-  // Verify that we're indeed filling up the slot.
-  ASSERT_EQ(
-      requested_size,
-      allocator_.root()->AllocationCapacityFromRequestedSize(requested_size));
-  size_t requested_elements = requested_size / sizeof(int);
-
-  int* ptr =
-      reinterpret_cast<int*>(allocator_.root()->Alloc(requested_size, ""));
-  int* ptr_end = ptr + requested_elements;
-
-  CountingRawPtr<int> protected_ptr = ptr;
-  CountingRawPtr<int> protected_ptr_end = protected_ptr + requested_elements;
-
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-  EXPECT_DEATH_IF_SUPPORTED(*protected_ptr_end = 1, "");
-#endif
-
-  RawPtrCountingImpl::ClearCounters();
-
-  int gen_val = 1;
-  std::generate(protected_ptr, protected_ptr_end, [&gen_val]() {
-    gen_val ^= gen_val + 1;
-    return gen_val;
-  });
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = requested_elements,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = (requested_elements + 1) * 2,
-              }),
-              CountersMatch());
-
-  RawPtrCountingImpl::ClearCounters();
-
-  for (CountingRawPtr<int> protected_ptr_i = protected_ptr;
-       protected_ptr_i < protected_ptr_end; protected_ptr_i++) {
-    *protected_ptr_i ^= *protected_ptr_i + 1;
-  }
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = requested_elements * 2,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = (requested_elements + 1) * 2,
-              }),
-              CountersMatch());
-
-  RawPtrCountingImpl::ClearCounters();
-
-  for (CountingRawPtr<int> protected_ptr_i = protected_ptr;
-       protected_ptr_i < ptr_end; protected_ptr_i++) {
-    *protected_ptr_i ^= *protected_ptr_i + 1;
-  }
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = requested_elements * 2,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = requested_elements + 1,
-              }),
-              CountersMatch());
-
-  RawPtrCountingImpl::ClearCounters();
-
-  for (int* ptr_i = ptr; ptr_i < protected_ptr_end; ptr_i++) {
-    *ptr_i ^= *ptr_i + 1;
-  }
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_comparison_cnt = requested_elements + 1,
-              }),
-              CountersMatch());
-
-  RawPtrCountingImpl::ClearCounters();
-
-  size_t iter_cnt = 0;
-  for (int *ptr_i = protected_ptr, *ptr_i_end = protected_ptr_end;
-       ptr_i < ptr_i_end; ptr_i++) {
-    *ptr_i ^= *ptr_i + 1;
-    iter_cnt++;
-  }
-  EXPECT_EQ(iter_cnt, requested_elements);
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 2,
-                  .get_for_comparison_cnt = 0,
-              }),
-              CountersMatch());
-
-  protected_ptr = nullptr;
-  protected_ptr_end = nullptr;
-  allocator_.root()->Free(ptr);
-}
-
-#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-TEST_F(BackupRefPtrTest, Duplicate) {
-  size_t requested_size = allocator_.root()->AdjustSizeForExtrasSubtract(512);
-  char* ptr = static_cast<char*>(allocator_.root()->Alloc(requested_size, ""));
-  raw_ptr<char> protected_ptr1 = ptr;
-  protected_ptr1 += requested_size;  // Pointer should now be poisoned.
-
-  // Duplicating a poisoned pointer should be allowed.
-  raw_ptr<char> protected_ptr2 = protected_ptr1;
-
-  // The poison bit should be propagated to the duplicate such that the OOB
-  // access is disallowed:
-  EXPECT_DEATH_IF_SUPPORTED(*protected_ptr2 = ' ', "");
-
-  // Assignment from a poisoned pointer should be allowed.
-  raw_ptr<char> protected_ptr3;
-  protected_ptr3 = protected_ptr1;
-
-  // The poison bit should be propagated via the assignment such that the OOB
-  // access is disallowed:
-  EXPECT_DEATH_IF_SUPPORTED(*protected_ptr3 = ' ', "");
-
-  allocator_.root()->Free(ptr);
-}
-#endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
-
-#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-TEST_F(BackupRefPtrTest, WriteAfterFree) {
-  constexpr uint64_t kPayload = 0x1234567890ABCDEF;
-
-  raw_ptr<uint64_t, DisableDanglingPtrDetection> ptr =
-      static_cast<uint64_t*>(allocator_.root()->Alloc(sizeof(uint64_t), ""));
-
-  // Now |ptr| should be quarantined.
-  allocator_.root()->Free(ptr);
-
-  EXPECT_DEATH_IF_SUPPORTED(
-      {
-        // Write something different from |kQuarantinedByte|.
-        *ptr = kPayload;
-        // Write-after-Free should lead to crash
-        // on |PartitionAllocFreeForRefCounting|.
-        ptr = nullptr;
-      },
-      "");
-}
-#endif  // BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
-
-namespace {
-constexpr uint8_t kCustomQuarantineByte = 0xff;
-static_assert(kCustomQuarantineByte !=
-              partition_alloc::internal::kQuarantinedByte);
-
-void CustomQuarantineHook(void* address, size_t size) {
-  partition_alloc::internal::SecureMemset(address, kCustomQuarantineByte, size);
-}
-}  // namespace
-
-TEST_F(BackupRefPtrTest, QuarantineHook) {
-  partition_alloc::PartitionAllocHooks::SetQuarantineOverrideHook(
-      CustomQuarantineHook);
-  uint8_t* native_ptr =
-      static_cast<uint8_t*>(allocator_.root()->Alloc(sizeof(uint8_t), ""));
-  *native_ptr = 0;
-  {
-    raw_ptr<uint8_t, DisableDanglingPtrDetection> smart_ptr = native_ptr;
-
-    allocator_.root()->Free(smart_ptr);
-    // Access the allocation through the native pointer to avoid triggering
-    // dereference checks in debug builds.
-    EXPECT_EQ(*partition_alloc::internal::TagPtr(native_ptr),
-              kCustomQuarantineByte);
-
-    // Leaving |smart_ptr| filled with |kCustomQuarantineByte| can
-    // cause a crash because we have a DCHECK that expects it to be filled with
-    // |kQuarantineByte|. We need to ensure it is unquarantined before
-    // unregistering the hook.
-  }  // <- unquarantined here
-
-  partition_alloc::PartitionAllocHooks::SetQuarantineOverrideHook(nullptr);
-}
-
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
-        // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-
-#if BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-
-namespace {
-#define FOR_EACH_RAW_PTR_OPERATION(F) \
-  F(wrap_ptr)                         \
-  F(release_wrapped_ptr)              \
-  F(safely_unwrap_for_dereference)    \
-  F(safely_unwrap_for_extraction)     \
-  F(unsafely_unwrap_for_comparison)   \
-  F(advance)                          \
-  F(duplicate)                        \
-  F(wrap_ptr_for_duplication)         \
-  F(unsafely_unwrap_for_duplication)
-
-// Can't use gMock to count the number of invocations because
-// gMock itself triggers raw_ptr<T> operations.
-struct CountingHooks {
-  void ResetCounts() {
-#define F(name) name##_count = 0;
-    FOR_EACH_RAW_PTR_OPERATION(F)
-#undef F
-  }
-
-  static CountingHooks* Get() {
-    static thread_local CountingHooks instance;
-    return &instance;
-  }
-
-// The adapter method is templated to accept any number of arguments.
-#define F(name)                      \
-  template <typename... T>           \
-  static void name##_adapter(T...) { \
-    Get()->name##_count++;           \
-  }                                  \
-  size_t name##_count = 0;
-  FOR_EACH_RAW_PTR_OPERATION(F)
-#undef F
-};
-
-constexpr RawPtrHooks raw_ptr_hooks{
-#define F(name) .name = CountingHooks::name##_adapter,
-    FOR_EACH_RAW_PTR_OPERATION(F)
-#undef F
-};
-}  // namespace
-
-class HookableRawPtrImplTest : public testing::Test {
- protected:
-  void SetUp() override { InstallRawPtrHooks(&raw_ptr_hooks); }
-  void TearDown() override { ResetRawPtrHooks(); }
-};
-
-TEST_F(HookableRawPtrImplTest, WrapPtr) {
-  // Can't call `ResetCounts` in `SetUp` because gTest triggers
-  // raw_ptr<T> operations between `SetUp` and the test body.
-  CountingHooks::Get()->ResetCounts();
-  {
-    int* ptr = new int;
-    [[maybe_unused]] raw_ptr<int> interesting_ptr = ptr;
-    delete ptr;
-  }
-  EXPECT_EQ(CountingHooks::Get()->wrap_ptr_count, 1u);
-}
-
-TEST_F(HookableRawPtrImplTest, ReleaseWrappedPtr) {
-  CountingHooks::Get()->ResetCounts();
-  {
-    int* ptr = new int;
-    [[maybe_unused]] raw_ptr<int> interesting_ptr = ptr;
-    delete ptr;
-  }
-  EXPECT_EQ(CountingHooks::Get()->release_wrapped_ptr_count, 1u);
-}
-
-TEST_F(HookableRawPtrImplTest, SafelyUnwrapForDereference) {
-  CountingHooks::Get()->ResetCounts();
-  {
-    int* ptr = new int;
-    raw_ptr<int> interesting_ptr = ptr;
-    *interesting_ptr = 1;
-    delete ptr;
-  }
-  EXPECT_EQ(CountingHooks::Get()->safely_unwrap_for_dereference_count, 1u);
-}
-
-TEST_F(HookableRawPtrImplTest, SafelyUnwrapForExtraction) {
-  CountingHooks::Get()->ResetCounts();
-  {
-    int* ptr = new int;
-    raw_ptr<int> interesting_ptr = ptr;
-    ptr = interesting_ptr;
-    delete ptr;
-  }
-  EXPECT_EQ(CountingHooks::Get()->safely_unwrap_for_extraction_count, 1u);
-}
-
-TEST_F(HookableRawPtrImplTest, UnsafelyUnwrapForComparison) {
-  CountingHooks::Get()->ResetCounts();
-  {
-    int* ptr = new int;
-    raw_ptr<int> interesting_ptr = ptr;
-    EXPECT_EQ(interesting_ptr, ptr);
-    delete ptr;
-  }
-  EXPECT_EQ(CountingHooks::Get()->unsafely_unwrap_for_comparison_count, 1u);
-}
-
-TEST_F(HookableRawPtrImplTest, Advance) {
-  CountingHooks::Get()->ResetCounts();
-  {
-    int* ptr = new int[10];
-    raw_ptr<int, AllowPtrArithmetic> interesting_ptr = ptr;
-    interesting_ptr += 1;
-    delete[] ptr;
-  }
-  EXPECT_EQ(CountingHooks::Get()->advance_count, 1u);
-}
-
-TEST_F(HookableRawPtrImplTest, Duplicate) {
-  CountingHooks::Get()->ResetCounts();
-  {
-    int* ptr = new int;
-    raw_ptr<int> interesting_ptr = ptr;
-    raw_ptr<int> interesting_ptr2 = interesting_ptr;
-    delete ptr;
-  }
-  EXPECT_EQ(CountingHooks::Get()->duplicate_count, 1u);
-}
-
-TEST_F(HookableRawPtrImplTest, CrossKindCopyConstruction) {
-  CountingHooks::Get()->ResetCounts();
-  {
-    int* ptr = new int;
-    raw_ptr<int> non_dangling_ptr = ptr;
-    raw_ptr<int, RawPtrTraits::kMayDangle> dangling_ptr(non_dangling_ptr);
-    delete ptr;
-  }
-  EXPECT_EQ(CountingHooks::Get()->duplicate_count, 0u);
-  EXPECT_EQ(CountingHooks::Get()->wrap_ptr_for_duplication_count, 1u);
-  EXPECT_EQ(CountingHooks::Get()->unsafely_unwrap_for_duplication_count, 1u);
-}
-
-#endif  // BUILDFLAG(USE_HOOKABLE_RAW_PTR)
-
-TEST(DanglingPtrTest, DetectAndReset) {
-  auto instrumentation = test::DanglingPtrInstrumentation::Create();
-  if (!instrumentation.has_value()) {
-    GTEST_SKIP() << instrumentation.error();
-  }
-
-  auto owned_ptr = std::make_unique<int>(42);
-  raw_ptr<int> dangling_ptr = owned_ptr.get();
-  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 0u);
-  EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
-  owned_ptr.reset();
-  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
-  EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
-  dangling_ptr = nullptr;
-  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
-  EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
-}
-
-TEST(DanglingPtrTest, DetectAndDestructor) {
-  auto instrumentation = test::DanglingPtrInstrumentation::Create();
-  if (!instrumentation.has_value()) {
-    GTEST_SKIP() << instrumentation.error();
-  }
-
-  auto owned_ptr = std::make_unique<int>(42);
-  {
-    [[maybe_unused]] raw_ptr<int> dangling_ptr = owned_ptr.get();
-    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 0u);
-    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
-    owned_ptr.reset();
-    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
-    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
-  }
-  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
-  EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
-}
-
-TEST(DanglingPtrTest, DetectResetAndDestructor) {
-  auto instrumentation = test::DanglingPtrInstrumentation::Create();
-  if (!instrumentation.has_value()) {
-    GTEST_SKIP() << instrumentation.error();
-  }
-
-  auto owned_ptr = std::make_unique<int>(42);
-  {
-    raw_ptr<int> dangling_ptr = owned_ptr.get();
-    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 0u);
-    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
-    owned_ptr.reset();
-    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
-    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
-    dangling_ptr = nullptr;
-    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
-    EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
-  }
-  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
-  EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
-}
-
-}  // namespace base::internal
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_unittest.nc b/base/allocator/partition_allocator/pointers/raw_ptr_unittest.nc
deleted file mode 100644
index 22aede1..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ptr_unittest.nc
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a "No Compile Test" suite.
-// http://dev.chromium.org/developers/testing/no-compile-tests
-
-#include <memory>
-#include <tuple>  // for std::ignore
-#include <type_traits>  // for std::remove_pointer_t
-
-#include "base/functional/bind.h"
-#include "base/functional/callback.h"
-#include "base/allocator/partition_allocator/pointers/raw_ptr.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-
-namespace {
-
-struct Producer {};
-struct DerivedProducer : Producer {};
-struct OtherDerivedProducer : Producer {};
-struct Unrelated {};
-struct DerivedUnrelated : Unrelated {};
-struct PmfTest {
- public:
-  int Func(char, double) const { return 11; }
-};
-
-#if defined(NCTEST_INVALID_RAW_PTR_TRAIT)  // [r"Unknown raw_ptr trait\(s\)"]
-
-void WontCompile() {
-  constexpr auto InvalidRawPtrTrait =
-      ~base::RawPtrTraits::kEmpty;
-  raw_ptr<int, InvalidRawPtrTrait> p;
-}
-
-#elif defined(NCTEST_INVALID_RAW_PTR_TRAIT_OF_MANY)  // [r"Unknown raw_ptr trait\(s\)"]
-
-void WontCompile() {
-  constexpr auto InvalidRawPtrTrait = ~base::RawPtrTraits::kEmpty;
-  raw_ptr<int, DisableDanglingPtrDetection | InvalidRawPtrTrait>
-      p;
-}
-
-#elif defined(NCTEST_AUTO_DOWNCAST)  // [r"no viable conversion from 'raw_ptr<Producer>' to 'raw_ptr<DerivedProducer>'"]
-
-void WontCompile() {
-  Producer f;
-  raw_ptr<Producer> ptr = &f;
-  raw_ptr<DerivedProducer> derived_ptr = ptr;
-}
-
-#elif defined(NCTEST_STATIC_DOWNCAST)  // [r"no matching conversion for static_cast from 'raw_ptr<Producer>' to 'raw_ptr<DerivedProducer>'"]
-
-void WontCompile() {
-  Producer f;
-  raw_ptr<Producer> ptr = &f;
-  raw_ptr<DerivedProducer> derived_ptr =
-      static_cast<raw_ptr<DerivedProducer>>(ptr);
-}
-
-#elif defined(NCTEST_AUTO_REF_DOWNCAST)  // [r"non-const lvalue reference to type 'raw_ptr<DerivedProducer>' cannot bind to a value of unrelated type 'raw_ptr<Producer>'"]
-
-void WontCompile() {
-  Producer f;
-  raw_ptr<Producer> ptr = &f;
-  raw_ptr<DerivedProducer>& derived_ptr = ptr;
-}
-
-#elif defined(NCTEST_STATIC_REF_DOWNCAST)  // [r"non-const lvalue reference to type 'raw_ptr<DerivedProducer>' cannot bind to a value of unrelated type 'raw_ptr<Producer>'"]
-
-void WontCompile() {
-  Producer f;
-  raw_ptr<Producer> ptr = &f;
-  raw_ptr<DerivedProducer>& derived_ptr =
-      static_cast<raw_ptr<DerivedProducer>&>(ptr);
-}
-
-#elif defined(NCTEST_AUTO_DOWNCAST_FROM_RAW) // [r"no viable conversion from 'Producer \*' to 'raw_ptr<DerivedProducer>'"]
-
-void WontCompile() {
-  Producer f;
-  raw_ptr<DerivedProducer> ptr = &f;
-}
-
-#elif defined(NCTEST_UNRELATED_FROM_RAW) // [r"no viable conversion from 'DerivedProducer \*' to 'raw_ptr<Unrelated>'"]
-
-void WontCompile() {
-  DerivedProducer f;
-  raw_ptr<Unrelated> ptr = &f;
-}
-
-#elif defined(NCTEST_UNRELATED_STATIC_FROM_WRAPPED) // [r"static_cast from '\(anonymous namespace\)::DerivedProducer \*' to '\(anonymous namespace\)::Unrelated \*', which are not related by inheritance, is not allowed"]
-
-void WontCompile() {
-  DerivedProducer f;
-  raw_ptr<DerivedProducer> ptr = &f;
-  std::ignore = static_cast<Unrelated*>(ptr);
-}
-
-#elif defined(NCTEST_VOID_DEREFERENCE) // [r"indirection requires pointer operand \('raw_ptr<const void>' invalid\)"]
-
-void WontCompile() {
-  const char foo[] = "42";
-  raw_ptr<const void> ptr = foo;
-  std::ignore = *ptr;
-}
-
-#elif defined(NCTEST_FUNCTION_POINTER) // [r"raw_ptr<T> doesn't work with this kind of pointee type T"]
-
-void WontCompile() {
-  raw_ptr<void(int)> raw_ptr_var;
-  std::ignore = raw_ptr_var.get();
-}
-
-#elif defined(NCTEST_POINTER_TO_MEMBER) // [r"overload resolution selected deleted operator '->\*'"]
-
-void WontCompile() {
-  PmfTest object;
-  int (PmfTest::*pmf_func)(char, double) const = &PmfTest::Func;
-
-  raw_ptr<PmfTest> object_ptr = &object;
-  std::ignore = object_ptr->*pmf_func;
-}
-
-#elif defined(NCTEST_DANGLING_GSL) // [r"object backing the pointer will be destroyed at the end of the full-expression"]
-
-void WontCompile() {
-  [[maybe_unused]] raw_ptr<int> ptr = std::make_unique<int>(2).get();
-}
-
-#elif defined(NCTEST_BINDING_RAW_PTR_PARAMETER) // [r"base::Bind\(\) target functor has a parameter of type raw_ptr<T>."]
-
-void WontCompile() {
-  raw_ptr<int> ptr = new int(3);
-
-  // Make sure that we are not allowed to bind a function with a raw_ptr<T>
-  // parameter type.
-  auto callback = base::BindOnce(
-      [](raw_ptr<int> ptr) {
-      },
-      ptr);
-}
-
-#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_PLUS_EQUALS_STRUCT) // [r"no viable overloaded '\+='"]
-
-void WontCompile() {
-  raw_ptr<int> ptr = new int(3);
-  struct {} s;
-  ptr += s;
-}
-
-#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_MINUS_EQUALS_STRUCT) // [r"no viable overloaded '-='"]
-
-void WontCompile() {
-  raw_ptr<int> ptr = new int(3);
-  struct {} s;
-  ptr -= s;
-}
-
-#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_PLUS_STRUCT) // [r"no viable overloaded '\+='"]
-
-void WontCompile() {
-  raw_ptr<int> ptr = new int(3);
-  struct {} s;
-  // Note, operator + exists, but it calls += which doesn't.
-  [[maybe_unused]] raw_ptr<int> ptr2 = ptr + s;
-}
-
-#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_MINUS_STRUCT) // [r"no viable overloaded '-='"]
-
-void WontCompile() {
-  raw_ptr<int> ptr = new int(3);
-  struct {} s;
-  // Note, operator - exists, but it calls -= which doesn't.
-  [[maybe_unused]] raw_ptr<int> ptr2 = ptr - s;
-}
-
-#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_PLUS_EQUALS_UINT64) // [r"no viable overloaded '\+='"]
-
-void WontCompile() {
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-  raw_ptr<int> ptr = new int(3);
-  ptr += uint64_t{2};
-#else
-  // Fake error on 64-bit to match the expectation.
-  static_assert(false, "no viable overloaded '+='");
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
-}
-
-#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_MINUS_EQUALS_UINT64) // [r"no viable overloaded '-='"]
-
-void WontCompile() {
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-  raw_ptr<int> ptr = new int(3);
-  ptr -= uint64_t{2};
-#else
-  // Fake error on 64-bit to match the expectation.
-  static_assert(false, "no viable overloaded '-='");
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
-}
-
-#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_PLUS_UINT64) // [r"no viable overloaded '\+='"]
-
-void WontCompile() {
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-  raw_ptr<int> ptr = new int(3);
-  // Note, operator + exists, but it calls += which doesn't.
-  [[maybe_unused]] raw_ptr<int> ptr2 = ptr + uint64_t{2};
-#else
-  // Fake error on 64-bit to match the expectation.
-  static_assert(false, "no viable overloaded '+='");
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
-}
-
-#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_MINUS_UINT64) // [r"no viable overloaded '-='"]
-
-void WontCompile() {
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-  raw_ptr<int> ptr = new int(3);
-  // Note, operator - exists, but it calls -= which doesn't.
-  [[maybe_unused]] raw_ptr<int> ptr2 = ptr - uint64_t{2};
-#else
-  // Fake error on 64-bit to match the expectation.
-  static_assert(false, "no viable overloaded '-='");
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
-}
-
-#elif defined(NCTEST_CROSS_KIND_CONVERSION_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)0U == \(\(base::RawPtrTraits\)1U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr = new int(3);
-  [[maybe_unused]] raw_ptr<int> ptr2(ptr);
-}
-
-#elif defined(NCTEST_CROSS_KIND_CONVERSION_FROM_DUMMY) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)0U == \(\(base::RawPtrTraits\)1U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  raw_ptr<int, base::RawPtrTraits::kDummyForTest> ptr = new int(3);
-  [[maybe_unused]] raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr2(ptr);
-}
-
-#elif defined(NCTEST_CROSS_KIND_MOVE_CONVERSION_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)0U == \(\(base::RawPtrTraits\)1U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr = new int(3);
-  [[maybe_unused]] raw_ptr<int> ptr2(std::move(ptr));
-}
-
-#elif defined(NCTEST_CROSS_KIND_MOVE_CONVERSION_FROM_DUMMY) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)0U == \(\(base::RawPtrTraits\)1U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  raw_ptr<int, base::RawPtrTraits::kDummyForTest> ptr = new int(3);
-  [[maybe_unused]] raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr2(std::move(ptr));
-}
-
-#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)1U == \(\(base::RawPtrTraits\)16U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr = new int(3);
-  raw_ptr<int> ptr2;
-  ptr2 = ptr;
-}
-
-#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_FROM_DUMMY) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)1U == \(\(base::RawPtrTraits\)16U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  raw_ptr<int, base::RawPtrTraits::kDummyForTest> ptr = new int(3);
-  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr2;
-  ptr2 = ptr;
-}
-
-#elif defined(NCTEST_CROSS_KIND_MOVE_ASSIGNMENT_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)1U == \(\(base::RawPtrTraits\)16U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr = new int(3);
-  raw_ptr<int> ptr2;
-  ptr2 = std::move(ptr);
-}
-
-#elif defined(NCTEST_CROSS_KIND_MOVE_ASSIGNMENT_FROM_DUMMY) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)1U == \(\(base::RawPtrTraits\)16U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  raw_ptr<int, base::RawPtrTraits::kDummyForTest> ptr = new int(3);
-  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr2;
-  ptr2 = std::move(ptr);
-}
-
-#endif
-
-}  // namespace
diff --git a/base/allocator/partition_allocator/pointers/raw_ref.h b/base/allocator/partition_allocator/pointers/raw_ref.h
deleted file mode 100644
index a5f636c..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ref.h
+++ /dev/null
@@ -1,444 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_REF_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_REF_H_
-
-#include <memory>
-#include <type_traits>
-#include <utility>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/augmentations/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/pointers/raw_ptr.h"
-
-namespace base {
-
-template <class T, RawPtrTraits Traits>
-class raw_ref;
-
-namespace internal {
-
-template <class T>
-struct is_raw_ref : std::false_type {};
-
-template <class T, RawPtrTraits Traits>
-struct is_raw_ref<::base::raw_ref<T, Traits>> : std::true_type {};
-
-template <class T>
-constexpr inline bool is_raw_ref_v = is_raw_ref<T>::value;
-
-}  // namespace internal
-
-// A smart pointer for a pointer which can not be null, and which provides
-// Use-after-Free protection in the same ways as raw_ptr. This class acts like a
-// combination of std::reference_wrapper and raw_ptr.
-//
-// See raw_ptr and //base/memory/raw_ptr.md for more details on the
-// Use-after-Free protection.
-//
-// # Use after move
-//
-// The raw_ref type will abort if used after being moved.
-//
-// # Constness
-//
-// Use a `const raw_ref<T>` when the smart pointer should not be able to rebind
-// to a new reference. Use a `const raw_ref<const T>` do the same for a const
-// reference, which is like `const T&`.
-//
-// Unlike a native `T&` reference, a mutable `raw_ref<T>` can be changed
-// independent of the underlying `T`, similar to `std::reference_wrapper`. That
-// means the reference inside it can be moved and reassigned.
-template <class T, RawPtrTraits Traits = RawPtrTraits::kEmpty>
-class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
-  // operator* is used with the expectation of GetForExtraction semantics:
-  //
-  // raw_ref<Foo> foo_raw_ref = something;
-  // Foo& foo_ref = *foo_raw_ref;
-  //
-  // The implementation of operator* provides GetForDereference semantics, and
-  // this results in spurious crashes in BRP-ASan builds, so we need to disable
-  // hooks that provide BRP-ASan instrumentation for raw_ref.
-  using Inner = raw_ptr<T, Traits | RawPtrTraits::kDisableHooks>;
-
-  // Some underlying implementations do not clear on move, which produces an
-  // inconsistent behaviour. We want consistent behaviour such that using a
-  // raw_ref after move is caught and aborts, so do it when the underlying
-  // implementation doesn't. Failure to clear would be indicated by the related
-  // death tests not CHECKing appropriately.
-  static constexpr bool kNeedClearAfterMove = !Inner::kZeroOnMove;
-
- public:
-  using Impl = typename Inner::Impl;
-
-  // Construct a raw_ref from a pointer, which must not be null.
-  //
-  // This function is safe to use with any pointer, as it will CHECK and
-  // terminate the process if the pointer is null. Avoid dereferencing a pointer
-  // to avoid this CHECK as you may be dereferencing null.
-  PA_ALWAYS_INLINE constexpr static raw_ref from_ptr(T* ptr) noexcept {
-    PA_RAW_PTR_CHECK(ptr);
-    return raw_ref(*ptr);
-  }
-
-  // Construct a raw_ref from a reference.
-  PA_ALWAYS_INLINE constexpr explicit raw_ref(T& p) noexcept
-      : inner_(std::addressof(p)) {}
-
-  // Assign a new reference to the raw_ref, replacing the existing reference.
-  PA_ALWAYS_INLINE constexpr raw_ref& operator=(T& p) noexcept {
-    inner_.operator=(&p);
-    return *this;
-  }
-
-  // Disallow holding references to temporaries.
-  raw_ref(const T&& p) = delete;
-  raw_ref& operator=(const T&& p) = delete;
-
-  PA_ALWAYS_INLINE constexpr raw_ref(const raw_ref& p) noexcept
-      : inner_(p.inner_) {
-    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
-  }
-
-  PA_ALWAYS_INLINE constexpr raw_ref(raw_ref&& p) noexcept
-      : inner_(std::move(p.inner_)) {
-    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
-    if constexpr (kNeedClearAfterMove) {
-      p.inner_ = nullptr;
-    }
-  }
-
-  PA_ALWAYS_INLINE constexpr raw_ref& operator=(const raw_ref& p) noexcept {
-    PA_RAW_PTR_CHECK(p.inner_);  // Catch use-after-move.
-    inner_.operator=(p.inner_);
-    return *this;
-  }
-
-  PA_ALWAYS_INLINE constexpr raw_ref& operator=(raw_ref&& p) noexcept {
-    PA_RAW_PTR_CHECK(p.inner_);  // Catch use-after-move.
-    inner_.operator=(std::move(p.inner_));
-    if constexpr (kNeedClearAfterMove) {
-      p.inner_ = nullptr;
-    }
-    return *this;
-  }
-
-  // Deliberately implicit in order to support implicit upcast.
-  // Delegate cross-kind conversion to the inner raw_ptr, which decides when to
-  // allow it.
-  template <class U,
-            RawPtrTraits PassedTraits,
-            class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
-  // NOLINTNEXTLINE(google-explicit-constructor)
-  PA_ALWAYS_INLINE constexpr raw_ref(const raw_ref<U, PassedTraits>& p) noexcept
-      : inner_(p.inner_) {
-    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
-  }
-  // Deliberately implicit in order to support implicit upcast.
-  // Delegate cross-kind conversion to the inner raw_ptr, which decides when to
-  // allow it.
-  template <class U,
-            RawPtrTraits PassedTraits,
-            class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
-  // NOLINTNEXTLINE(google-explicit-constructor)
-  PA_ALWAYS_INLINE constexpr raw_ref(raw_ref<U, PassedTraits>&& p) noexcept
-      : inner_(std::move(p.inner_)) {
-    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
-    if constexpr (kNeedClearAfterMove) {
-      p.inner_ = nullptr;
-    }
-  }
-
-  // Upcast assignment
-  // Delegate cross-kind conversion to the inner raw_ptr, which decides when to
-  // allow it.
-  template <class U,
-            RawPtrTraits PassedTraits,
-            class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
-  PA_ALWAYS_INLINE constexpr raw_ref& operator=(
-      const raw_ref<U, PassedTraits>& p) noexcept {
-    PA_RAW_PTR_CHECK(p.inner_);  // Catch use-after-move.
-    inner_.operator=(p.inner_);
-    return *this;
-  }
-  // Delegate cross-kind conversion to the inner raw_ptr, which decides when to
-  // allow it.
-  template <class U,
-            RawPtrTraits PassedTraits,
-            class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
-  PA_ALWAYS_INLINE constexpr raw_ref& operator=(
-      raw_ref<U, PassedTraits>&& p) noexcept {
-    PA_RAW_PTR_CHECK(p.inner_);  // Catch use-after-move.
-    inner_.operator=(std::move(p.inner_));
-    if constexpr (kNeedClearAfterMove) {
-      p.inner_ = nullptr;
-    }
-    return *this;
-  }
-
-  PA_ALWAYS_INLINE constexpr T& operator*() const {
-    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
-    return inner_.operator*();
-  }
-
-  // This is an equivalent to operator*() that provides GetForExtraction rather
-  // rather than GetForDereference semantics (see raw_ptr.h). This should be
-  // used in place of operator*() when the memory referred to by the reference
-  // is not immediately going to be accessed.
-  PA_ALWAYS_INLINE constexpr T& get() const {
-    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
-    return *inner_.get();
-  }
-
-  PA_ALWAYS_INLINE constexpr T* operator->() const
-      PA_ATTRIBUTE_RETURNS_NONNULL {
-    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
-    return inner_.operator->();
-  }
-
-  // This is used to verify callbacks are not invoked with dangling references.
-  // If the `raw_ref` references a deleted object, it will trigger an error.
-  // Depending on the PartitionAllocUnretainedDanglingPtr feature, this is
-  // either a DumpWithoutCrashing, a crash, or ignored.
-  PA_ALWAYS_INLINE void ReportIfDangling() const noexcept {
-    inner_.ReportIfDangling();
-  }
-
-  PA_ALWAYS_INLINE friend constexpr void swap(raw_ref& lhs,
-                                              raw_ref& rhs) noexcept {
-    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-    swap(lhs.inner_, rhs.inner_);
-  }
-
-  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-  friend bool operator==(const raw_ref<U, Traits1>& lhs,
-                         const raw_ref<V, Traits2>& rhs);
-  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-  friend bool operator!=(const raw_ref<U, Traits1>& lhs,
-                         const raw_ref<V, Traits2>& rhs);
-  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-  friend bool operator<(const raw_ref<U, Traits1>& lhs,
-                        const raw_ref<V, Traits2>& rhs);
-  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-  friend bool operator>(const raw_ref<U, Traits1>& lhs,
-                        const raw_ref<V, Traits2>& rhs);
-  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-  friend bool operator<=(const raw_ref<U, Traits1>& lhs,
-                         const raw_ref<V, Traits2>& rhs);
-  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-  friend bool operator>=(const raw_ref<U, Traits1>& lhs,
-                         const raw_ref<V, Traits2>& rhs);
-
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator==(const raw_ref& lhs, const U& rhs) {
-    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-    return lhs.inner_ == &rhs;
-  }
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator!=(const raw_ref& lhs, const U& rhs) {
-    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-    return lhs.inner_ != &rhs;
-  }
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator<(const raw_ref& lhs, const U& rhs) {
-    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-    return lhs.inner_ < &rhs;
-  }
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator>(const raw_ref& lhs, const U& rhs) {
-    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-    return lhs.inner_ > &rhs;
-  }
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator<=(const raw_ref& lhs, const U& rhs) {
-    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-    return lhs.inner_ <= &rhs;
-  }
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator>=(const raw_ref& lhs, const U& rhs) {
-    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-    return lhs.inner_ >= &rhs;
-  }
-
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator==(const U& lhs, const raw_ref& rhs) {
-    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-    return &lhs == rhs.inner_;
-  }
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator!=(const U& lhs, const raw_ref& rhs) {
-    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-    return &lhs != rhs.inner_;
-  }
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator<(const U& lhs, const raw_ref& rhs) {
-    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-    return &lhs < rhs.inner_;
-  }
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator>(const U& lhs, const raw_ref& rhs) {
-    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-    return &lhs > rhs.inner_;
-  }
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator<=(const U& lhs, const raw_ref& rhs) {
-    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-    return &lhs <= rhs.inner_;
-  }
-  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
-  PA_ALWAYS_INLINE friend bool operator>=(const U& lhs, const raw_ref& rhs) {
-    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-    return &lhs >= rhs.inner_;
-  }
-
- private:
-  template <class U, RawPtrTraits R>
-  friend class raw_ref;
-
-  Inner inner_;
-};
-
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator==(const raw_ref<U, Traits1>& lhs,
-                                 const raw_ref<V, Traits2>& rhs) {
-  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-  return lhs.inner_ == rhs.inner_;
-}
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator!=(const raw_ref<U, Traits1>& lhs,
-                                 const raw_ref<V, Traits2>& rhs) {
-  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-  return lhs.inner_ != rhs.inner_;
-}
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator<(const raw_ref<U, Traits1>& lhs,
-                                const raw_ref<V, Traits2>& rhs) {
-  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-  return lhs.inner_ < rhs.inner_;
-}
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator>(const raw_ref<U, Traits1>& lhs,
-                                const raw_ref<V, Traits2>& rhs) {
-  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-  return lhs.inner_ > rhs.inner_;
-}
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator<=(const raw_ref<U, Traits1>& lhs,
-                                 const raw_ref<V, Traits2>& rhs) {
-  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-  return lhs.inner_ <= rhs.inner_;
-}
-template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
-PA_ALWAYS_INLINE bool operator>=(const raw_ref<U, Traits1>& lhs,
-                                 const raw_ref<V, Traits2>& rhs) {
-  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
-  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
-  return lhs.inner_ >= rhs.inner_;
-}
-
-// CTAD deduction guide.
-template <class T>
-raw_ref(T&) -> raw_ref<T>;
-template <class T>
-raw_ref(const T&) -> raw_ref<const T>;
-
-// Template helpers for working with raw_ref<T>.
-template <typename T>
-struct IsRawRef : std::false_type {};
-
-template <typename T, RawPtrTraits Traits>
-struct IsRawRef<raw_ref<T, Traits>> : std::true_type {};
-
-template <typename T>
-inline constexpr bool IsRawRefV = IsRawRef<T>::value;
-
-template <typename T>
-struct RemoveRawRef {
-  using type = T;
-};
-
-template <typename T, RawPtrTraits Traits>
-struct RemoveRawRef<raw_ref<T, Traits>> {
-  using type = T;
-};
-
-template <typename T>
-using RemoveRawRefT = typename RemoveRawRef<T>::type;
-
-}  // namespace base
-
-using base::raw_ref;
-
-template <base::RawPtrTraits Traits = base::RawPtrTraits::kEmpty, typename T>
-auto ToRawRef(T& ref) {
-  return raw_ref<T, Traits>(ref);
-}
-
-namespace std {
-
-// Override so set/map lookups do not create extra raw_ref. This also
-// allows C++ references to be used for lookup.
-template <typename T, base::RawPtrTraits Traits>
-struct less<raw_ref<T, Traits>> {
-  using Impl = typename raw_ref<T, Traits>::Impl;
-  using is_transparent = void;
-
-  bool operator()(const raw_ref<T, Traits>& lhs,
-                  const raw_ref<T, Traits>& rhs) const {
-    Impl::IncrementLessCountForTest();
-    return lhs < rhs;
-  }
-
-  bool operator()(T& lhs, const raw_ref<T, Traits>& rhs) const {
-    Impl::IncrementLessCountForTest();
-    return lhs < rhs;
-  }
-
-  bool operator()(const raw_ref<T, Traits>& lhs, T& rhs) const {
-    Impl::IncrementLessCountForTest();
-    return lhs < rhs;
-  }
-};
-
-// Specialize std::pointer_traits. The latter is required to obtain the
-// underlying raw pointer in the std::to_address(pointer) overload.
-// Implementing the pointer_traits is the standard blessed way to customize
-// `std::to_address(pointer)` in C++20 [3].
-//
-// [1] https://wg21.link/pointer.traits.optmem
-
-template <typename T, ::base::RawPtrTraits Traits>
-struct pointer_traits<::raw_ref<T, Traits>> {
-  using pointer = ::raw_ref<T, Traits>;
-  using element_type = T;
-  using difference_type = ptrdiff_t;
-
-  template <typename U>
-  using rebind = ::raw_ref<U, Traits>;
-
-  static constexpr pointer pointer_to(element_type& r) noexcept {
-    return pointer(r);
-  }
-
-  static constexpr element_type* to_address(pointer p) noexcept {
-    // `raw_ref::get` is used instead of raw_ref::operator*`. It provides
-    // GetForExtraction rather rather than GetForDereference semantics (see
-    // raw_ptr.h). This should be used when we we don't know the memory will be
-    // accessed.
-    return &(p.get());
-  }
-};
-
-}  // namespace std
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_REF_H_
diff --git a/base/allocator/partition_allocator/pointers/raw_ref_unittest.cc b/base/allocator/partition_allocator/pointers/raw_ref_unittest.cc
deleted file mode 100644
index 16e3672..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ref_unittest.cc
+++ /dev/null
@@ -1,1014 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/pointers/raw_ref.h"
-
-#include <functional>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/pointers/raw_ptr_counting_impl_wrapper_for_test.h"
-#include "base/allocator/partition_allocator/pointers/raw_ptr_test_support.h"
-#include "base/memory/raw_ptr.h"
-#include "base/test/gtest_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
-#include "base/debug/asan_service.h"
-#include "base/memory/raw_ptr_asan_service.h"
-#endif  // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
-
-namespace {
-
-class BaseClass {};
-class SubClass : public BaseClass {};
-
-// raw_ref just defers to the superclass for implementations, so it
-// can't add more data types.
-static_assert(sizeof(raw_ref<int>) == sizeof(raw_ptr<int>));
-
-// Since it can't hold null, raw_ref is not default-constructible.
-static_assert(!std::is_default_constructible_v<raw_ref<int>>);
-static_assert(!std::is_default_constructible_v<raw_ref<const int>>);
-
-// A mutable reference can only be constructed from a mutable lvalue reference.
-static_assert(!std::is_constructible_v<raw_ref<int>, const int>);
-static_assert(!std::is_constructible_v<raw_ref<int>, int>);
-static_assert(!std::is_constructible_v<raw_ref<int>, const int&>);
-static_assert(std::is_constructible_v<raw_ref<int>, int&>);
-static_assert(!std::is_constructible_v<raw_ref<int>, const int*>);
-static_assert(!std::is_constructible_v<raw_ref<int>, int*>);
-static_assert(!std::is_constructible_v<raw_ref<int>, const int&&>);
-static_assert(!std::is_constructible_v<raw_ref<int>, int&&>);
-// Same for assignment.
-static_assert(!std::is_assignable_v<raw_ref<int>, const int>);
-static_assert(!std::is_assignable_v<raw_ref<int>, int>);
-static_assert(!std::is_assignable_v<raw_ref<int>, const int&>);
-static_assert(std::is_assignable_v<raw_ref<int>, int&>);
-static_assert(!std::is_assignable_v<raw_ref<int>, const int*>);
-static_assert(!std::is_assignable_v<raw_ref<int>, int*>);
-static_assert(!std::is_assignable_v<raw_ref<int>, const int&&>);
-static_assert(!std::is_assignable_v<raw_ref<int>, int&&>);
-
-// A const reference can be constructed from a const or mutable lvalue
-// reference.
-static_assert(!std::is_constructible_v<raw_ref<const int>, const int>);
-static_assert(!std::is_constructible_v<raw_ref<const int>, int>);
-static_assert(std::is_constructible_v<raw_ref<const int>, const int&>);
-static_assert(std::is_constructible_v<raw_ref<const int>, int&>);
-static_assert(!std::is_constructible_v<raw_ref<const int>, const int*>);
-static_assert(!std::is_constructible_v<raw_ref<const int>, int*>);
-static_assert(!std::is_constructible_v<raw_ref<const int>, const int&&>);
-static_assert(!std::is_constructible_v<raw_ref<const int>, int&&>);
-// Same for assignment.
-static_assert(!std::is_assignable_v<raw_ref<const int>, const int>);
-static_assert(!std::is_assignable_v<raw_ref<const int>, int>);
-static_assert(std::is_assignable_v<raw_ref<const int>, const int&>);
-static_assert(std::is_assignable_v<raw_ref<const int>, int&>);
-static_assert(!std::is_assignable_v<raw_ref<const int>, const int*>);
-static_assert(!std::is_assignable_v<raw_ref<const int>, int*>);
-static_assert(!std::is_assignable_v<raw_ref<const int>, const int&&>);
-static_assert(!std::is_assignable_v<raw_ref<const int>, int&&>);
-
-// Same trivial operations (or not) as raw_ptr<T>.
-static_assert(std::is_trivially_constructible_v<raw_ref<int>, const int&> ==
-              std::is_trivially_constructible_v<raw_ptr<int>, const int&>);
-static_assert(std::is_trivially_destructible_v<raw_ref<int>> ==
-              std::is_trivially_destructible_v<raw_ptr<int>>);
-// But constructing from another raw_ref must check if it's internally null
-// (which indicates use-after-move).
-static_assert(!std::is_trivially_move_constructible_v<raw_ref<int>>);
-static_assert(!std::is_trivially_move_assignable_v<raw_ref<int>>);
-static_assert(!std::is_trivially_copy_constructible_v<raw_ref<int>>);
-static_assert(!std::is_trivially_copy_assignable_v<raw_ref<int>>);
-
-// A raw_ref can be copied or moved.
-static_assert(std::is_move_constructible_v<raw_ref<int>>);
-static_assert(std::is_copy_constructible_v<raw_ref<int>>);
-static_assert(std::is_move_assignable_v<raw_ref<int>>);
-static_assert(std::is_copy_assignable_v<raw_ref<int>>);
-
-// A SubClass can be converted to a BaseClass.
-static_assert(std::is_constructible_v<raw_ref<BaseClass>, raw_ref<SubClass>>);
-static_assert(
-    std::is_constructible_v<raw_ref<BaseClass>, const raw_ref<SubClass>&>);
-static_assert(std::is_constructible_v<raw_ref<BaseClass>, raw_ref<SubClass>&&>);
-static_assert(std::is_assignable_v<raw_ref<BaseClass>, raw_ref<SubClass>>);
-static_assert(
-    std::is_assignable_v<raw_ref<BaseClass>, const raw_ref<SubClass>&>);
-static_assert(std::is_assignable_v<raw_ref<BaseClass>, raw_ref<SubClass>&&>);
-// A BaseClass can't be implicitly downcasted.
-static_assert(!std::is_constructible_v<raw_ref<SubClass>, raw_ref<BaseClass>>);
-static_assert(
-    !std::is_constructible_v<raw_ref<SubClass>, const raw_ref<BaseClass>&>);
-static_assert(
-    !std::is_constructible_v<raw_ref<SubClass>, raw_ref<BaseClass>&&>);
-static_assert(!std::is_assignable_v<raw_ref<SubClass>, raw_ref<BaseClass>>);
-static_assert(
-    !std::is_assignable_v<raw_ref<SubClass>, const raw_ref<BaseClass>&>);
-static_assert(!std::is_assignable_v<raw_ref<SubClass>, raw_ref<BaseClass>&&>);
-
-// A raw_ref<BaseClass> can be constructed directly from a SubClass.
-static_assert(std::is_constructible_v<raw_ref<BaseClass>, SubClass&>);
-static_assert(std::is_assignable_v<raw_ref<BaseClass>, SubClass&>);
-static_assert(std::is_constructible_v<raw_ref<const BaseClass>, SubClass&>);
-static_assert(std::is_assignable_v<raw_ref<const BaseClass>, SubClass&>);
-static_assert(
-    std::is_constructible_v<raw_ref<const BaseClass>, const SubClass&>);
-static_assert(std::is_assignable_v<raw_ref<const BaseClass>, const SubClass&>);
-// But a raw_ref<SubClass> can't be constructed from an implicit downcast from a
-// BaseClass.
-static_assert(!std::is_constructible_v<raw_ref<SubClass>, BaseClass&>);
-static_assert(!std::is_assignable_v<raw_ref<SubClass>, BaseClass&>);
-static_assert(!std::is_constructible_v<raw_ref<const SubClass>, BaseClass&>);
-static_assert(!std::is_assignable_v<raw_ref<const SubClass>, BaseClass&>);
-static_assert(
-    !std::is_constructible_v<raw_ref<const SubClass>, const BaseClass&>);
-static_assert(!std::is_assignable_v<raw_ref<const SubClass>, const BaseClass&>);
-
-// A mutable reference can be converted to const reference.
-static_assert(std::is_constructible_v<raw_ref<const int>, raw_ref<int>>);
-static_assert(std::is_assignable_v<raw_ref<const int>, raw_ref<int>>);
-// A const reference can't be converted to mutable.
-static_assert(!std::is_constructible_v<raw_ref<int>, raw_ref<const int>>);
-static_assert(!std::is_assignable_v<raw_ref<int>, raw_ref<const int>>);
-
-// The deref operator gives the internal reference.
-static_assert(std::is_same_v<int&, decltype(*std::declval<raw_ref<int>>())>);
-static_assert(
-    std::is_same_v<int&, decltype(*std::declval<const raw_ref<int>>())>);
-static_assert(std::is_same_v<int&, decltype(*std::declval<raw_ref<int>&>())>);
-static_assert(
-    std::is_same_v<int&, decltype(*std::declval<const raw_ref<int>&>())>);
-static_assert(std::is_same_v<int&, decltype(*std::declval<raw_ref<int>&&>())>);
-static_assert(
-    std::is_same_v<int&, decltype(*std::declval<const raw_ref<int>&&>())>);
-// A const T is always returned as const.
-static_assert(
-    std::is_same_v<const int&, decltype(*std::declval<raw_ref<const int>>())>);
-
-// The arrow operator gives a (non-null) pointer to the internal reference.
-static_assert(
-    std::is_same_v<int*, decltype(std::declval<raw_ref<int>>().operator->())>);
-static_assert(
-    std::is_same_v<const int*,
-                   decltype(std::declval<raw_ref<const int>>().operator->())>);
-
-// Verify that raw_ref is a literal type, and its entire interface is constexpr.
-//
-// Constexpr destructors were introduced in C++20. PartitionAlloc's minimum
-// supported C++ version is C++17, so raw_ref is not a literal type in C++17.
-// Thus we only test for constexpr in C++20.
-#if defined(__cpp_constexpr) && __cpp_constexpr >= 201907L
-static_assert([]() constexpr {
-  struct IntBase {};
-  struct Int : public IntBase {
-    int i = 0;
-  };
-
-  Int* i = new Int();
-  {
-    raw_ref<Int> r(*i);              // raw_ref(T&)
-    r = *i;                          // operator=(T&)
-    raw_ref<Int> r2(r);              // raw_ref(const raw_ref&)
-    raw_ref<Int> r3(std::move(r2));  // raw_ref(raw_ref&&)
-    r2 = r;                          // operator=(const raw_ref&)
-    r3 = std::move(r2);              // operator=(raw_ref&&)
-    r2 = r;                          // Reset after move.
-    [[maybe_unused]] raw_ref<IntBase> r5(
-        r2);  // raw_ref(const raw_ref<Convertible>&)
-    [[maybe_unused]] raw_ref<IntBase> r6(
-        std::move(r2));         // raw_ref(raw_ref<Convertible>&&)
-    r2 = r;                     // Reset after move.
-    r5 = r2;                    // operator=(const raw_ref<Convertible>&)
-    r6 = std::move(r2);         // operator=(raw_ref<Convertible>&&)
-    raw_ref<Int>::from_ptr(i);  // from_ptr(T*)
-    (*r).i += 1;                // operator*()
-    r.get().i += 1;             // get()
-    r->i += 1;                  // operator->()
-    r2 = r;                     // Reset after move.
-    swap(r, r2);                // swap()
-  }
-  delete i;
-  return true;
-}());
-#endif
-
-TEST(RawRef, Construct) {
-  int i = 1;
-  auto r = raw_ref<int>(i);
-  EXPECT_EQ(&*r, &i);
-  auto cr = raw_ref<const int>(i);
-  EXPECT_EQ(&*cr, &i);
-  const int ci = 1;
-  auto cci = raw_ref<const int>(ci);
-  EXPECT_EQ(&*cci, &ci);
-}
-
-TEST(RawRef, CopyConstruct) {
-  {
-    int i = 1;
-    auto r = raw_ref<int>(i);
-    EXPECT_EQ(&*r, &i);
-    auto r2 = raw_ref<int>(r);
-    EXPECT_EQ(&*r2, &i);
-  }
-  {
-    int i = 1;
-    auto r = raw_ref<const int>(i);
-    EXPECT_EQ(&*r, &i);
-    auto r2 = raw_ref<const int>(r);
-    EXPECT_EQ(&*r2, &i);
-  }
-}
-
-TEST(RawRef, MoveConstruct) {
-  {
-    int i = 1;
-    auto r = raw_ref<int>(i);
-    EXPECT_EQ(&*r, &i);
-    auto r2 = raw_ref<int>(std::move(r));
-    EXPECT_EQ(&*r2, &i);
-  }
-  {
-    int i = 1;
-    auto r = raw_ref<const int>(i);
-    EXPECT_EQ(&*r, &i);
-    auto r2 = raw_ref<const int>(std::move(r));
-    EXPECT_EQ(&*r2, &i);
-  }
-}
-
-TEST(RawRef, CopyAssign) {
-  {
-    int i = 1;
-    int j = 2;
-    auto r = raw_ref<int>(i);
-    EXPECT_EQ(&*r, &i);
-    auto rj = raw_ref<int>(j);
-    r = rj;
-    EXPECT_EQ(&*r, &j);
-  }
-  {
-    int i = 1;
-    int j = 2;
-    auto r = raw_ref<const int>(i);
-    EXPECT_EQ(&*r, &i);
-    auto rj = raw_ref<const int>(j);
-    r = rj;
-    EXPECT_EQ(&*r, &j);
-  }
-  {
-    int i = 1;
-    int j = 2;
-    auto r = raw_ref<const int>(i);
-    EXPECT_EQ(&*r, &i);
-    auto rj = raw_ref<int>(j);
-    r = rj;
-    EXPECT_EQ(&*r, &j);
-  }
-}
-
-TEST(RawRef, CopyReassignAfterMove) {
-  int i = 1;
-  int j = 1;
-  auto r = raw_ref<int>(i);
-  auto r2 = std::move(r);
-  r2 = raw_ref<int>(j);
-  // Reassign to the moved-from `r` so it can be used again.
-  r = r2;
-  EXPECT_EQ(&*r, &j);
-}
-
-TEST(RawRef, MoveAssign) {
-  {
-    int i = 1;
-    int j = 2;
-    auto r = raw_ref<int>(i);
-    EXPECT_EQ(&*r, &i);
-    r = raw_ref<int>(j);
-    EXPECT_EQ(&*r, &j);
-  }
-  {
-    int i = 1;
-    int j = 2;
-    auto r = raw_ref<const int>(i);
-    EXPECT_EQ(&*r, &i);
-    r = raw_ref<const int>(j);
-    EXPECT_EQ(&*r, &j);
-  }
-  {
-    int i = 1;
-    int j = 2;
-    auto r = raw_ref<const int>(i);
-    EXPECT_EQ(&*r, &i);
-    r = raw_ref<int>(j);
-    EXPECT_EQ(&*r, &j);
-  }
-}
-
-TEST(RawRef, MoveReassignAfterMove) {
-  int i = 1;
-  int j = 1;
-  auto r = raw_ref<int>(i);
-  auto r2 = std::move(r);
-  // Reassign to the moved-from `r` so it can be used again.
-  r = raw_ref<int>(j);
-  EXPECT_EQ(&*r, &j);
-}
-
-TEST(RawRef, CopyConstructUpCast) {
-  {
-    auto s = SubClass();
-    auto r = raw_ref<SubClass>(s);
-    EXPECT_EQ(&*r, &s);
-    auto r2 = raw_ref<BaseClass>(r);
-    EXPECT_EQ(&*r2, &s);
-  }
-  {
-    auto s = SubClass();
-    auto r = raw_ref<const SubClass>(s);
-    EXPECT_EQ(&*r, &s);
-    auto r2 = raw_ref<const BaseClass>(r);
-    EXPECT_EQ(&*r2, &s);
-  }
-}
-
-TEST(RawRef, MoveConstructUpCast) {
-  {
-    auto s = SubClass();
-    auto r = raw_ref<SubClass>(s);
-    EXPECT_EQ(&*r, &s);
-    auto r2 = raw_ref<BaseClass>(std::move(r));
-    EXPECT_EQ(&*r2, &s);
-  }
-  {
-    auto s = SubClass();
-    auto r = raw_ref<const SubClass>(s);
-    EXPECT_EQ(&*r, &s);
-    auto r2 = raw_ref<const BaseClass>(std::move(r));
-    EXPECT_EQ(&*r2, &s);
-  }
-}
-
-TEST(RawRef, FromPtr) {
-  int i = 42;
-  auto ref = raw_ref<int>::from_ptr(&i);
-  EXPECT_EQ(&i, &*ref);
-}
-
-TEST(RawRef, CopyAssignUpCast) {
-  {
-    auto s = SubClass();
-    auto r = raw_ref<SubClass>(s);
-    auto t = BaseClass();
-    auto rt = raw_ref<BaseClass>(t);
-    rt = r;
-    EXPECT_EQ(&*rt, &s);
-  }
-  {
-    auto s = SubClass();
-    auto r = raw_ref<const SubClass>(s);
-    auto t = BaseClass();
-    auto rt = raw_ref<const BaseClass>(t);
-    rt = r;
-    EXPECT_EQ(&*rt, &s);
-  }
-  {
-    auto s = SubClass();
-    auto r = raw_ref<SubClass>(s);
-    auto t = BaseClass();
-    auto rt = raw_ref<const BaseClass>(t);
-    rt = r;
-    EXPECT_EQ(&*rt, &s);
-  }
-}
-
-TEST(RawRef, MoveAssignUpCast) {
-  {
-    auto s = SubClass();
-    auto r = raw_ref<SubClass>(s);
-    auto t = BaseClass();
-    auto rt = raw_ref<BaseClass>(t);
-    rt = std::move(r);
-    EXPECT_EQ(&*rt, &s);
-  }
-  {
-    auto s = SubClass();
-    auto r = raw_ref<const SubClass>(s);
-    auto t = BaseClass();
-    auto rt = raw_ref<const BaseClass>(t);
-    rt = std::move(r);
-    EXPECT_EQ(&*rt, &s);
-  }
-  {
-    auto s = SubClass();
-    auto r = raw_ref<SubClass>(s);
-    auto t = BaseClass();
-    auto rt = raw_ref<const BaseClass>(t);
-    rt = std::move(r);
-    EXPECT_EQ(&*rt, &s);
-  }
-}
-
-TEST(RawRef, Deref) {
-  int i;
-  auto r = raw_ref<int>(i);
-  EXPECT_EQ(&*r, &i);
-}
-
-TEST(RawRef, Arrow) {
-  int i;
-  auto r = raw_ref<int>(i);
-  EXPECT_EQ(r.operator->(), &i);
-}
-
-TEST(RawRef, Swap) {
-  int i;
-  int j;
-  auto ri = raw_ref<int>(i);
-  auto rj = raw_ref<int>(j);
-  swap(ri, rj);
-  EXPECT_EQ(&*ri, &j);
-  EXPECT_EQ(&*rj, &i);
-}
-
-TEST(RawRef, Equals) {
-  int i = 1;
-  auto r1 = raw_ref<int>(i);
-  auto r2 = raw_ref<int>(i);
-  EXPECT_TRUE(r1 == r1);
-  EXPECT_TRUE(r1 == r2);
-  EXPECT_TRUE(r1 == i);
-  EXPECT_TRUE(i == r1);
-  int j = 1;
-  auto r3 = raw_ref<int>(j);
-  EXPECT_FALSE(r1 == r3);
-  EXPECT_FALSE(r1 == j);
-  EXPECT_FALSE(j == r1);
-}
-
-TEST(RawRef, NotEquals) {
-  int i = 1;
-  auto r1 = raw_ref<int>(i);
-  int j = 1;
-  auto r2 = raw_ref<int>(j);
-  EXPECT_TRUE(r1 != r2);
-  EXPECT_TRUE(r1 != j);
-  EXPECT_TRUE(j != r1);
-  EXPECT_FALSE(r1 != r1);
-  EXPECT_FALSE(r2 != j);
-  EXPECT_FALSE(j != r2);
-}
-
-TEST(RawRef, LessThan) {
-  int i[] = {1, 1};
-  auto r1 = raw_ref<int>(i[0]);
-  auto r2 = raw_ref<int>(i[1]);
-  EXPECT_TRUE(r1 < r2);
-  EXPECT_TRUE(r1 < i[1]);
-  EXPECT_FALSE(i[1] < r1);
-  EXPECT_FALSE(r2 < r1);
-  EXPECT_FALSE(r2 < i[0]);
-  EXPECT_TRUE(i[0] < r2);
-  EXPECT_FALSE(r1 < r1);
-  EXPECT_FALSE(r1 < i[0]);
-  EXPECT_FALSE(i[0] < r1);
-}
-
-TEST(RawRef, GreaterThan) {
-  int i[] = {1, 1};
-  auto r1 = raw_ref<int>(i[0]);
-  auto r2 = raw_ref<int>(i[1]);
-  EXPECT_TRUE(r2 > r1);
-  EXPECT_FALSE(r1 > r2);
-  EXPECT_FALSE(r1 > i[1]);
-  EXPECT_TRUE(i[1] > r1);
-  EXPECT_FALSE(r2 > r2);
-  EXPECT_FALSE(r2 > i[1]);
-  EXPECT_FALSE(i[1] > r2);
-}
-
-TEST(RawRef, LessThanOrEqual) {
-  int i[] = {1, 1};
-  auto r1 = raw_ref<int>(i[0]);
-  auto r2 = raw_ref<int>(i[1]);
-  EXPECT_TRUE(r1 <= r2);
-  EXPECT_TRUE(r1 <= r1);
-  EXPECT_TRUE(r2 <= r2);
-  EXPECT_FALSE(r2 <= r1);
-  EXPECT_TRUE(r1 <= i[1]);
-  EXPECT_TRUE(r1 <= i[0]);
-  EXPECT_TRUE(r2 <= i[1]);
-  EXPECT_FALSE(r2 <= i[0]);
-  EXPECT_FALSE(i[1] <= r1);
-  EXPECT_TRUE(i[0] <= r1);
-  EXPECT_TRUE(i[1] <= r2);
-  EXPECT_TRUE(i[0] <= r2);
-}
-
-TEST(RawRef, GreaterThanOrEqual) {
-  int i[] = {1, 1};
-  auto r1 = raw_ref<int>(i[0]);
-  auto r2 = raw_ref<int>(i[1]);
-  EXPECT_TRUE(r2 >= r1);
-  EXPECT_TRUE(r1 >= r1);
-  EXPECT_TRUE(r2 >= r2);
-  EXPECT_FALSE(r1 >= r2);
-  EXPECT_TRUE(r2 >= i[0]);
-  EXPECT_TRUE(r1 >= i[0]);
-  EXPECT_TRUE(r2 >= i[1]);
-  EXPECT_FALSE(r1 >= i[1]);
-  EXPECT_FALSE(i[0] >= r2);
-  EXPECT_TRUE(i[0] >= r1);
-  EXPECT_TRUE(i[1] >= r2);
-  EXPECT_TRUE(i[1] >= r1);
-}
-
-// Death Tests: If we're only using the no-op version of `raw_ptr` and
-// have `!BUILDFLAG(PA_DCHECK_IS_ON)`, the `PA_RAW_PTR_CHECK()`s used in
-// `raw_ref` evaluate to nothing. Therefore, death tests relying on
-// these CHECKs firing are disabled in their absence.
-
-#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
-    BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) || BUILDFLAG(PA_DCHECK_IS_ON)
-
-TEST(RawRefDeathTest, CopyConstructAfterMove) {
-  int i = 1;
-  auto r = raw_ref<int>(i);
-  auto r2 = std::move(r);
-  EXPECT_CHECK_DEATH({ [[maybe_unused]] auto r3 = r; });
-}
-
-TEST(RawRefDeathTest, MoveConstructAfterMove) {
-  int i = 1;
-  auto r = raw_ref<int>(i);
-  auto r2 = std::move(r);
-  EXPECT_CHECK_DEATH({ [[maybe_unused]] auto r3 = std::move(r); });
-}
-
-TEST(RawRefDeathTest, CopyAssignAfterMove) {
-  int i = 1;
-  auto r = raw_ref<int>(i);
-  auto r2 = std::move(r);
-  EXPECT_CHECK_DEATH({ r2 = r; });
-}
-
-TEST(RawRefDeathTest, MoveAssignAfterMove) {
-  int i = 1;
-  auto r = raw_ref<int>(i);
-  auto r2 = std::move(r);
-  EXPECT_CHECK_DEATH({ r2 = std::move(r); });
-}
-
-TEST(RawRefDeathTest, CopyConstructAfterMoveUpCast) {
-  auto s = SubClass();
-  auto r = raw_ref<SubClass>(s);
-  auto moved = std::move(r);
-  EXPECT_CHECK_DEATH({ [[maybe_unused]] auto r2 = raw_ref<BaseClass>(r); });
-}
-
-TEST(RawRefDeathTest, MoveConstructAfterMoveUpCast) {
-  auto s = SubClass();
-  auto r = raw_ref<SubClass>(s);
-  auto moved = std::move(r);
-  EXPECT_CHECK_DEATH(
-      { [[maybe_unused]] auto r2 = raw_ref<BaseClass>(std::move(r)); });
-}
-
-TEST(RawRefDeathTest, FromPtrWithNullptr) {
-  EXPECT_CHECK_DEATH({ raw_ref<int>::from_ptr(nullptr); });
-}
-
-TEST(RawRefDeathTest, CopyAssignAfterMoveUpCast) {
-  auto s = SubClass();
-  auto r = raw_ref<const SubClass>(s);
-  auto t = BaseClass();
-  auto rt = raw_ref<const BaseClass>(t);
-  auto moved = std::move(r);
-  EXPECT_CHECK_DEATH({ rt = r; });
-}
-
-TEST(RawRefDeathTest, MoveAssignAfterMoveUpCast) {
-  auto s = SubClass();
-  auto r = raw_ref<const SubClass>(s);
-  auto t = BaseClass();
-  auto rt = raw_ref<const BaseClass>(t);
-  auto moved = std::move(r);
-  EXPECT_CHECK_DEATH({ rt = std::move(r); });
-}
-
-TEST(RawRefDeathTest, DerefAfterMove) {
-  int i;
-  auto r = raw_ref<int>(i);
-  auto moved = std::move(r);
-  EXPECT_CHECK_DEATH({ r.operator*(); });
-}
-
-TEST(RawRefDeathTest, ArrowAfterMove) {
-  int i;
-  auto r = raw_ref<int>(i);
-  auto moved = std::move(r);
-  EXPECT_CHECK_DEATH({ r.operator->(); });
-}
-
-TEST(RawRefDeathTest, SwapAfterMove) {
-  {
-    int i;
-    auto ri = raw_ref<int>(i);
-    int j;
-    auto rj = raw_ref<int>(j);
-
-    auto moved = std::move(ri);
-    EXPECT_CHECK_DEATH({ swap(ri, rj); });
-  }
-  {
-    int i;
-    auto ri = raw_ref<int>(i);
-    int j;
-    auto rj = raw_ref<int>(j);
-
-    auto moved = std::move(rj);
-    EXPECT_CHECK_DEATH({ swap(ri, rj); });
-  }
-}
-
-TEST(RawRefDeathTest, EqualsAfterMove) {
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 == r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r2);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 == r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 == r1; });
-  }
-}
-
-TEST(RawRefDeathTest, NotEqualsAfterMove) {
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 != r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r2);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 != r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 != r1; });
-  }
-}
-
-TEST(RawRefDeathTest, LessThanAfterMove) {
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 < r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r2);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 < r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 < r1; });
-  }
-}
-
-TEST(RawRefDeathTest, GreaterThanAfterMove) {
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 > r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r2);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 > r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 > r1; });
-  }
-}
-
-TEST(RawRefDeathTest, LessThanOrEqualAfterMove) {
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 <= r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r2);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 <= r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 <= r1; });
-  }
-}
-
-TEST(RawRefDeathTest, GreaterThanOrEqualAfterMove) {
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 >= r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto r2 = raw_ref<int>(i);
-    auto moved = std::move(r2);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 >= r2; });
-  }
-  {
-    int i = 1;
-    auto r1 = raw_ref<int>(i);
-    auto moved = std::move(r1);
-    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 >= r1; });
-  }
-}
-
-#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
-        // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) ||
-        // BUILDFLAG(PA_DCHECK_IS_ON)
-
-TEST(RawRef, CTAD) {
-  int i = 1;
-  auto r = raw_ref(i);
-  EXPECT_EQ(&*r, &i);
-}
-
-TEST(RawRefPtr, CTADWithConst) {
-  std::string str;
-  struct S {
-    const raw_ref<const std::string> r;
-  };
-  // Deduces as `raw_ref<std::string>`, for which the constructor call is valid
-  // making a mutable reference, and then converts to
-  // `raw_ref<const std::string>`.
-  S s1 = {.r = raw_ref(str)};
-  // Deduces as raw_ref<const std::string>, for which the constructor call is
-  // valid from a const ref.
-  S s2 = {.r = raw_ref(static_cast<const std::string&>(str))};
-  EXPECT_EQ(&*s1.r, &str);
-  EXPECT_EQ(&*s2.r, &str);
-}
-
-// `kDisableHooks` matches what `CountingRawRef` does internally.
-// `kUseCountingWrapperForTest` is removed, and `DisableHooks` is added.
-using RawPtrCountingImpl = base::test::RawPtrCountingImplWrapperForTest<
-    base::RawPtrTraits::kDisableHooks>;
-
-// `kDisableHooks | kMayDangle` matches what `CountingRawRefMayDangle` does
-// internally. `kUseCountingWrapperForTest` is removed, `kDisableHooks` is
-// added, and `kMayDangle` is kept.
-using RawPtrCountingMayDangleImpl =
-    base::test::RawPtrCountingImplWrapperForTest<
-        base::RawPtrTraits::kMayDangle | base::RawPtrTraits::kDisableHooks>;
-
-template <typename T>
-using CountingRawRef =
-    raw_ref<T, base::RawPtrTraits::kUseCountingWrapperForTest>;
-static_assert(std::is_same_v<CountingRawRef<int>::Impl, RawPtrCountingImpl>);
-
-template <typename T>
-using CountingRawRefMayDangle =
-    raw_ref<T,
-            base::RawPtrTraits::kMayDangle |
-                base::RawPtrTraits::kUseCountingWrapperForTest>;
-static_assert(std::is_same_v<CountingRawRefMayDangle<int>::Impl,
-                             RawPtrCountingMayDangleImpl>);
-
-TEST(RawRef, StdLess) {
-  int i[] = {1, 1};
-  {
-    RawPtrCountingImpl::ClearCounters();
-    auto r1 = CountingRawRef<int>(i[0]);
-    auto r2 = CountingRawRef<int>(i[1]);
-    EXPECT_TRUE(std::less<CountingRawRef<int>>()(r1, r2));
-    EXPECT_FALSE(std::less<CountingRawRef<int>>()(r2, r1));
-    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
-  }
-  {
-    RawPtrCountingImpl::ClearCounters();
-    const auto r1 = CountingRawRef<int>(i[0]);
-    const auto r2 = CountingRawRef<int>(i[1]);
-    EXPECT_TRUE(std::less<CountingRawRef<int>>()(r1, r2));
-    EXPECT_FALSE(std::less<CountingRawRef<int>>()(r2, r1));
-    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
-  }
-  {
-    RawPtrCountingImpl::ClearCounters();
-    auto r1 = CountingRawRef<const int>(i[0]);
-    auto r2 = CountingRawRef<const int>(i[1]);
-    EXPECT_TRUE(std::less<CountingRawRef<const int>>()(r1, r2));
-    EXPECT_FALSE(std::less<CountingRawRef<const int>>()(r2, r1));
-    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
-  }
-  {
-    RawPtrCountingImpl::ClearCounters();
-    auto r1 = CountingRawRef<int>(i[0]);
-    auto r2 = CountingRawRef<int>(i[1]);
-    EXPECT_TRUE(std::less<CountingRawRef<int>>()(r1, i[1]));
-    EXPECT_FALSE(std::less<CountingRawRef<int>>()(r2, i[0]));
-    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
-  }
-  {
-    RawPtrCountingImpl::ClearCounters();
-    const auto r1 = CountingRawRef<int>(i[0]);
-    const auto r2 = CountingRawRef<int>(i[1]);
-    EXPECT_TRUE(std::less<CountingRawRef<int>>()(r1, i[1]));
-    EXPECT_FALSE(std::less<CountingRawRef<int>>()(r2, i[0]));
-    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
-  }
-  {
-    RawPtrCountingImpl::ClearCounters();
-    auto r1 = CountingRawRef<const int>(i[0]);
-    auto r2 = CountingRawRef<const int>(i[1]);
-    EXPECT_TRUE(std::less<CountingRawRef<const int>>()(r1, i[1]));
-    EXPECT_FALSE(std::less<CountingRawRef<const int>>()(r2, i[0]));
-    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
-  }
-}
-
-// Verifies that comparing `raw_ref`s with different underlying Traits
-// is a valid utterance and primarily uses the `GetForComparison()` methods.
-TEST(RawRef, OperatorsUseGetForComparison) {
-  int x = 123;
-  CountingRawRef<int> ref1(x);
-  CountingRawRefMayDangle<int> ref2(x);
-
-  RawPtrCountingImpl::ClearCounters();
-  RawPtrCountingMayDangleImpl::ClearCounters();
-
-  EXPECT_TRUE(ref1 == ref2);
-  EXPECT_FALSE(ref1 != ref2);
-  // The use of `PA_RAW_PTR_CHECK()`s to catch dangling references means
-  // that we can't actually readily specify whether there are 0
-  // extractions (`CHECK()`s compiled out) or 2 extractions.
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_comparison_cnt = 2,
-              }),
-              CountersMatch());
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .get_for_comparison_cnt = 2,
-              }),
-              CountersMatch());
-
-  EXPECT_FALSE(ref1 < ref2);
-  EXPECT_FALSE(ref1 > ref2);
-  EXPECT_TRUE(ref1 <= ref2);
-  EXPECT_TRUE(ref1 >= ref2);
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_comparison_cnt = 6,
-              }),
-              CountersMatch());
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .get_for_comparison_cnt = 6,
-              }),
-              CountersMatch());
-}
-
-TEST(RawRef, CrossKindConversion) {
-  int x = 123;
-  CountingRawRef<int> ref1(x);
-
-  RawPtrCountingImpl::ClearCounters();
-  RawPtrCountingMayDangleImpl::ClearCounters();
-
-  CountingRawRefMayDangle<int> ref2(ref1);
-  CountingRawRefMayDangle<int> ref3(std::move(ref1));  // Falls back to copy.
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_duplication_cnt = 2}),
-              CountersMatch());
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .wrap_raw_ptr_cnt = 0, .wrap_raw_ptr_for_dup_cnt = 2}),
-              CountersMatch());
-}
-
-TEST(RawRef, CrossKindAssignment) {
-  int x = 123;
-  CountingRawRef<int> ref1(x);
-
-  CountingRawRefMayDangle<int> ref2(x);
-  CountingRawRefMayDangle<int> ref3(x);
-
-  RawPtrCountingImpl::ClearCounters();
-  RawPtrCountingMayDangleImpl::ClearCounters();
-  ref2 = ref1;
-  ref3 = std::move(ref1);  // Falls back to copy.
-
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingImpl>{
-                  .get_for_dereference_cnt = 0,
-                  .get_for_extraction_cnt = 0,
-                  .get_for_duplication_cnt = 2}),
-              CountersMatch());
-  EXPECT_THAT((CountingRawPtrExpectations<RawPtrCountingMayDangleImpl>{
-                  .wrap_raw_ptr_cnt = 0, .wrap_raw_ptr_for_dup_cnt = 2}),
-              CountersMatch());
-}
-
-#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
-
-TEST(AsanBackupRefPtrImpl, RawRefGet) {
-  base::debug::AsanService::GetInstance()->Initialize();
-
-  if (!base::RawPtrAsanService::GetInstance().IsEnabled()) {
-    base::RawPtrAsanService::GetInstance().Configure(
-        base::EnableDereferenceCheck(true), base::EnableExtractionCheck(true),
-        base::EnableInstantiationCheck(true));
-  } else {
-    ASSERT_TRUE(
-        base::RawPtrAsanService::GetInstance().is_dereference_check_enabled());
-    ASSERT_TRUE(
-        base::RawPtrAsanService::GetInstance().is_extraction_check_enabled());
-    ASSERT_TRUE(base::RawPtrAsanService::GetInstance()
-                    .is_instantiation_check_enabled());
-  }
-
-  auto ptr = ::std::make_unique<int>();
-  raw_ref<int> safe_ref(*ptr);
-  ptr.reset();
-
-  // This test is specifically to ensure that raw_ref.get() does not cause a
-  // dereference of the memory referred to by the reference. If there is a
-  // dereference, then this test will crash.
-  [[maybe_unused]] volatile int& ref = safe_ref.get();
-}
-
-TEST(AsanBackupRefPtrImpl, RawRefOperatorStar) {
-  base::debug::AsanService::GetInstance()->Initialize();
-
-  if (!base::RawPtrAsanService::GetInstance().IsEnabled()) {
-    base::RawPtrAsanService::GetInstance().Configure(
-        base::EnableDereferenceCheck(true), base::EnableExtractionCheck(true),
-        base::EnableInstantiationCheck(true));
-  } else {
-    ASSERT_TRUE(
-        base::RawPtrAsanService::GetInstance().is_dereference_check_enabled());
-    ASSERT_TRUE(
-        base::RawPtrAsanService::GetInstance().is_extraction_check_enabled());
-    ASSERT_TRUE(base::RawPtrAsanService::GetInstance()
-                    .is_instantiation_check_enabled());
-  }
-
-  auto ptr = ::std::make_unique<int>();
-  raw_ref<int> safe_ref(*ptr);
-  ptr.reset();
-
-  // This test is specifically to ensure that &*raw_ref does not cause a
-  // dereference of the memory referred to by the reference. If there is a
-  // dereference, then this test will crash.
-  [[maybe_unused]] volatile int& ref = *safe_ref;
-}
-
-#endif  // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
-
-}  // namespace
diff --git a/base/allocator/partition_allocator/pointers/raw_ref_unittest.nc b/base/allocator/partition_allocator/pointers/raw_ref_unittest.nc
deleted file mode 100644
index 0285e70..0000000
--- a/base/allocator/partition_allocator/pointers/raw_ref_unittest.nc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a "No Compile Test" suite.
-// http://dev.chromium.org/developers/testing/no-compile-tests
-
-#include "base/allocator/partition_allocator/pointers/raw_ref.h"
-
-namespace {
-
-#if defined(NCTEST_CROSS_KIND_CONVERSION_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)4U == \(\(base::RawPtrTraits\)5U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  int x = 123;
-  raw_ref<int, base::RawPtrTraits::kMayDangle> ref(x);
-  [[maybe_unused]] raw_ref<int> ref2(ref);
-}
-
-#elif defined(NCTEST_CROSS_KIND_CONVERSION_FROM_DUMMY) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)5U == \(\(base::RawPtrTraits\)36U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  int x = 123;
-  raw_ref<int, base::RawPtrTraits::kDummyForTest> ref(x);
-  [[maybe_unused]] raw_ref<int, base::RawPtrTraits::kMayDangle> ref2(ref);
-}
-
-#elif defined(NCTEST_CROSS_KIND_CONVERSION_MOVE_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)4U == \(\(base::RawPtrTraits\)5U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  int x = 123;
-  raw_ref<int, base::RawPtrTraits::kMayDangle> ref(x);
-  [[maybe_unused]] raw_ref<int> ref2(std::move(ref));
-}
-
-#elif defined(NCTEST_CROSS_KIND_CONVERSION_MOVE_FROM_DUMMY) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)5U == \(\(base::RawPtrTraits\)36U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  int x = 123;
-  raw_ref<int, base::RawPtrTraits::kDummyForTest> ref(x);
-  [[maybe_unused]] raw_ref<int, base::RawPtrTraits::kMayDangle> ref2(std::move(ref));
-}
-
-#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)4U == \(\(base::RawPtrTraits\)5U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  int x = 123;
-  raw_ref<int, base::RawPtrTraits::kMayDangle> ref(x);
-  raw_ref<int> ref2(x);
-  ref2 = ref;
-}
-
-#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_FROM_DUMMY) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)5U == \(\(base::RawPtrTraits\)36U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  int x = 123;
-  raw_ref<int, base::RawPtrTraits::kDummyForTest> ref(x);
-  raw_ref<int, base::RawPtrTraits::kMayDangle> ref2(x);
-  ref2 = ref;
-}
-
-#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_MOVE_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)4U == \(\(base::RawPtrTraits\)5U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  int x = 123;
-  raw_ref<int, base::RawPtrTraits::kMayDangle> ref(x);
-  raw_ref<int> ref2(x);
-  ref2 = std::move(ref);
-}
-
-#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_MOVE_FROM_DUMMY) // [r"static assertion failed due to requirement '\(base::RawPtrTraits\)5U == \(\(base::RawPtrTraits\)36U | RawPtrTraits::kMayDangle\)'"]
-
-void WontCompile() {
-  int x = 123;
-  raw_ref<int, base::RawPtrTraits::kDummyForTest> ref(x);
-  raw_ref<int, base::RawPtrTraits::kMayDangle> ref2(x);
-  ref2 = std::move(ref);
-}
-
-#endif
-
-}  // namespace
diff --git a/base/allocator/partition_allocator/random.cc b/base/allocator/partition_allocator/random.cc
deleted file mode 100644
index 6916909..0000000
--- a/base/allocator/partition_allocator/random.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/random.h"
-
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-
-namespace partition_alloc {
-
-class RandomGenerator {
- public:
-  constexpr RandomGenerator() {}
-
-  uint32_t RandomValue() {
-    ::partition_alloc::internal::ScopedGuard guard(lock_);
-    return GetGenerator()->RandUint32();
-  }
-
-  void SeedForTesting(uint64_t seed) {
-    ::partition_alloc::internal::ScopedGuard guard(lock_);
-    GetGenerator()->ReseedForTesting(seed);
-  }
-
- private:
-  ::partition_alloc::internal::Lock lock_ = {};
-  bool initialized_ PA_GUARDED_BY(lock_) = false;
-  union {
-    internal::base::InsecureRandomGenerator instance_ PA_GUARDED_BY(lock_);
-    uint8_t instance_buffer_[sizeof(
-        internal::base::InsecureRandomGenerator)] PA_GUARDED_BY(lock_) = {};
-  };
-
-  internal::base::InsecureRandomGenerator* GetGenerator()
-      PA_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
-    if (!initialized_) {
-      new (instance_buffer_) internal::base::InsecureRandomGenerator();
-      initialized_ = true;
-    }
-    return &instance_;
-  }
-};
-
-// Note: this is redundant, since the anonymous union is incompatible with a
-// non-trivial default destructor. Not meant to be destructed anyway.
-static_assert(std::is_trivially_destructible<RandomGenerator>::value, "");
-
-namespace {
-
-RandomGenerator g_generator = {};
-
-}  // namespace
-
-namespace internal {
-
-uint32_t RandomValue() {
-  return g_generator.RandomValue();
-}
-
-}  // namespace internal
-
-void SetMmapSeedForTesting(uint64_t seed) {
-  return g_generator.SeedForTesting(seed);
-}
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/random.h b/base/allocator/partition_allocator/random.h
deleted file mode 100644
index 8a3a85c..0000000
--- a/base/allocator/partition_allocator/random.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_RANDOM_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_RANDOM_H_
-
-#include <stdint.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace partition_alloc {
-
-namespace internal {
-
-// Returns a random value. The generator's internal state is initialized with
-// `base::RandUint64` which is very unpredictable, but which is expensive due to
-// the need to call into the kernel. Therefore this generator uses a fast,
-// entirely user-space function after initialization.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) uint32_t RandomValue();
-
-}  // namespace internal
-
-// Sets the seed for the random number generator to a known value, to cause the
-// RNG to generate a predictable sequence of outputs. May be called multiple
-// times.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void SetMmapSeedForTesting(uint64_t seed);
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_RANDOM_H_
diff --git a/base/allocator/partition_allocator/reservation_offset_table.cc b/base/allocator/partition_allocator/reservation_offset_table.cc
deleted file mode 100644
index 2d53cd2..0000000
--- a/base/allocator/partition_allocator/reservation_offset_table.cc
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-
-namespace partition_alloc::internal {
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-ReservationOffsetTable ReservationOffsetTable::singleton_;
-#else
-ReservationOffsetTable::_ReservationOffsetTable
-    ReservationOffsetTable::reservation_offset_table_;
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/reservation_offset_table.h b/base/allocator/partition_allocator/reservation_offset_table.h
deleted file mode 100644
index 73ea576..0000000
--- a/base/allocator/partition_allocator/reservation_offset_table.h
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_RESERVATION_OFFSET_TABLE_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_RESERVATION_OFFSET_TABLE_H_
-
-#include <cstddef>
-#include <cstdint>
-#include <limits>
-#include <tuple>
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "base/allocator/partition_allocator/thread_isolation/alignment.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal {
-
-static constexpr uint16_t kOffsetTagNotAllocated =
-    std::numeric_limits<uint16_t>::max();
-static constexpr uint16_t kOffsetTagNormalBuckets =
-    std::numeric_limits<uint16_t>::max() - 1;
-
-// The main purpose of the reservation offset table is to easily locate the
-// direct map reservation start address for any given address. There is one
-// entry in the table for each super page.
-//
-// When PartitionAlloc reserves an address region it is always aligned to
-// super page boundary. However, in 32-bit mode, the size may not be aligned
-// super-page-aligned, so it may look like this:
-//   |<--------- actual reservation size --------->|
-//   +----------+----------+-----+-----------+-----+ - - - +
-//   |SuperPage0|SuperPage1| ... |SuperPage K|SuperPage K+1|
-//   +----------+----------+-----+-----------+-----+ - - -.+
-//                                           |<-X->|<-Y*)->|
-//
-// The table entries for reserved super pages say how many pages away from the
-// reservation the super page is:
-//   +----------+----------+-----+-----------+-------------+
-//   |Entry for |Entry for | ... |Entry for  |Entry for    |
-//   |SuperPage0|SuperPage1|     |SuperPage K|SuperPage K+1|
-//   +----------+----------+-----+-----------+-------------+
-//   |     0    |    1     | ... |     K     |   K + 1     |
-//   +----------+----------+-----+-----------+-------------+
-//
-// For an address Z, the reservation start can be found using this formula:
-//   ((Z >> kSuperPageShift) - (the entry for Z)) << kSuperPageShift
-//
-// kOffsetTagNotAllocated is a special tag denoting that the super page isn't
-// allocated by PartitionAlloc and kOffsetTagNormalBuckets denotes that it is
-// used for a normal-bucket allocation, not for a direct-map allocation.
-//
-// *) In 32-bit mode, Y is not used by PartitionAlloc, and cannot be used
-//    until X is unreserved, because PartitionAlloc always uses kSuperPageSize
-//    alignment when reserving address spaces. One can use check "is in pool?"
-//    to further determine which part of the super page is used by
-//    PartitionAlloc. This isn't a problem in 64-bit mode, where allocation
-//    granularity is kSuperPageSize.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-    PA_THREAD_ISOLATED_ALIGN ReservationOffsetTable {
- public:
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  // There is one reservation offset table per Pool in 64-bit mode.
-  static constexpr size_t kReservationOffsetTableCoverage = kPoolMaxSize;
-  static constexpr size_t kReservationOffsetTableLength =
-      kReservationOffsetTableCoverage >> kSuperPageShift;
-#else
-  // The size of the reservation offset table should cover the entire 32-bit
-  // address space, one element per super page.
-  static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
-  static constexpr size_t kReservationOffsetTableLength =
-      4 * kGiB / kSuperPageSize;
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-  static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets,
-                "Offsets should be smaller than kOffsetTagNormalBuckets.");
-
-  struct _ReservationOffsetTable {
-    // The number of table elements is less than MAX_UINT16, so the element type
-    // can be uint16_t.
-    static_assert(
-        kReservationOffsetTableLength <= std::numeric_limits<uint16_t>::max(),
-        "Length of the reservation offset table must be less than MAX_UINT16");
-    uint16_t offsets[kReservationOffsetTableLength] = {};
-
-    constexpr _ReservationOffsetTable() {
-      for (uint16_t& offset : offsets) {
-        offset = kOffsetTagNotAllocated;
-      }
-    }
-  };
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  // If thread isolation support is enabled, we need to write-protect the tables
-  // of the thread isolated pool. For this, we need to pad the tables so that
-  // the thread isolated ones start on a page boundary.
-  char pad_[PA_THREAD_ISOLATED_ARRAY_PAD_SZ(_ReservationOffsetTable,
-                                            kNumPools)] = {};
-  struct _ReservationOffsetTable tables[kNumPools];
-  static PA_CONSTINIT ReservationOffsetTable singleton_;
-#else
-  // A single table for the entire 32-bit address space.
-  static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_;
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-};
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) {
-  PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
-  return ReservationOffsetTable::singleton_.tables[handle - 1].offsets;
-}
-
-PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
-    pool_handle handle) {
-  return GetReservationOffsetTable(handle) +
-         ReservationOffsetTable::kReservationOffsetTableLength;
-}
-
-PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
-  pool_handle handle = GetPool(address);
-  return GetReservationOffsetTable(handle);
-}
-
-PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
-    uintptr_t address) {
-  pool_handle handle = GetPool(address);
-  return GetReservationOffsetTableEnd(handle);
-}
-
-PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(pool_handle pool,
-                                                    uintptr_t offset_in_pool) {
-  size_t table_index = offset_in_pool >> kSuperPageShift;
-  PA_DCHECK(table_index <
-            ReservationOffsetTable::kReservationOffsetTableLength);
-  return GetReservationOffsetTable(pool) + table_index;
-}
-#else   // BUILDFLAG(HAS_64_BIT_POINTERS)
-PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
-  return ReservationOffsetTable::reservation_offset_table_.offsets;
-}
-
-PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
-    uintptr_t address) {
-  return ReservationOffsetTable::reservation_offset_table_.offsets +
-         ReservationOffsetTable::kReservationOffsetTableLength;
-}
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) {
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  // In 64-bit mode, find the owning Pool and compute the offset from its base.
-  auto [pool, offset] = GetPoolAndOffset(address);
-  return ReservationOffsetPointer(pool, offset);
-#else
-  size_t table_index = address >> kSuperPageShift;
-  PA_DCHECK(table_index <
-            ReservationOffsetTable::kReservationOffsetTableLength);
-  return GetReservationOffsetTable(address) + table_index;
-#endif
-}
-
-PA_ALWAYS_INLINE uintptr_t ComputeReservationStart(uintptr_t address,
-                                                   uint16_t* offset_ptr) {
-  return (address & kSuperPageBaseMask) -
-         (static_cast<size_t>(*offset_ptr) << kSuperPageShift);
-}
-
-// If the given address doesn't point to direct-map allocated memory,
-// returns 0.
-PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  bool is_in_brp_pool = IsManagedByPartitionAllocBRPPool(address);
-  bool is_in_regular_pool = IsManagedByPartitionAllocRegularPool(address);
-  bool is_in_configurable_pool =
-      IsManagedByPartitionAllocConfigurablePool(address);
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  bool is_in_thread_isolated_pool =
-      IsManagedByPartitionAllocThreadIsolatedPool(address);
-#endif
-
-  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
-#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
-  PA_DCHECK(!is_in_brp_pool);
-#endif
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-  uint16_t* offset_ptr = ReservationOffsetPointer(address);
-  PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
-  if (*offset_ptr == kOffsetTagNormalBuckets) {
-    return 0;
-  }
-  uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // MSVC workaround: the preprocessor seems to choke on an `#if` embedded
-  // inside another macro (PA_DCHECK).
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-  constexpr size_t kBRPOffset =
-      AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
-      AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
-#else
-  constexpr size_t kBRPOffset = 0ull;
-#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
-  // Make sure the reservation start is in the same pool as |address|.
-  // In the 32-bit mode, the beginning of a reservation may be excluded
-  // from the BRP pool, so shift the pointer. The other pools don't have
-  // this logic.
-  PA_DCHECK(is_in_brp_pool ==
-            IsManagedByPartitionAllocBRPPool(reservation_start + kBRPOffset));
-  PA_DCHECK(is_in_regular_pool ==
-            IsManagedByPartitionAllocRegularPool(reservation_start));
-  PA_DCHECK(is_in_configurable_pool ==
-            IsManagedByPartitionAllocConfigurablePool(reservation_start));
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-  PA_DCHECK(is_in_thread_isolated_pool ==
-            IsManagedByPartitionAllocThreadIsolatedPool(reservation_start));
-#endif
-  PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-  return reservation_start;
-}
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-// If the given address doesn't point to direct-map allocated memory,
-// returns 0.
-// This variant has better performance than the regular one on 64-bit builds if
-// the Pool that an allocation belongs to is known.
-PA_ALWAYS_INLINE uintptr_t
-GetDirectMapReservationStart(uintptr_t address,
-                             pool_handle pool,
-                             uintptr_t offset_in_pool) {
-  PA_DCHECK(AddressPoolManager::GetInstance().GetPoolBaseAddress(pool) +
-                offset_in_pool ==
-            address);
-  uint16_t* offset_ptr = ReservationOffsetPointer(pool, offset_in_pool);
-  PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
-  if (*offset_ptr == kOffsetTagNormalBuckets) {
-    return 0;
-  }
-  uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
-  PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
-  return reservation_start;
-}
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-// Returns true if |address| is the beginning of the first super page of a
-// reservation, i.e. either a normal bucket super page, or the first super page
-// of direct map.
-// |address| must belong to an allocated super page.
-PA_ALWAYS_INLINE bool IsReservationStart(uintptr_t address) {
-  uint16_t* offset_ptr = ReservationOffsetPointer(address);
-  PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
-  return ((*offset_ptr == kOffsetTagNormalBuckets) || (*offset_ptr == 0)) &&
-         (address % kSuperPageSize == 0);
-}
-
-// Returns true if |address| belongs to a normal bucket super page.
-PA_ALWAYS_INLINE bool IsManagedByNormalBuckets(uintptr_t address) {
-  uint16_t* offset_ptr = ReservationOffsetPointer(address);
-  return *offset_ptr == kOffsetTagNormalBuckets;
-}
-
-// Returns true if |address| belongs to a direct map region.
-PA_ALWAYS_INLINE bool IsManagedByDirectMap(uintptr_t address) {
-  uint16_t* offset_ptr = ReservationOffsetPointer(address);
-  return *offset_ptr != kOffsetTagNormalBuckets &&
-         *offset_ptr != kOffsetTagNotAllocated;
-}
-
-// Returns true if |address| belongs to a normal bucket super page or a direct
-// map region, i.e. belongs to an allocated super page.
-PA_ALWAYS_INLINE bool IsManagedByNormalBucketsOrDirectMap(uintptr_t address) {
-  uint16_t* offset_ptr = ReservationOffsetPointer(address);
-  return *offset_ptr != kOffsetTagNotAllocated;
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_RESERVATION_OFFSET_TABLE_H_
diff --git a/base/allocator/partition_allocator/reverse_bytes.h b/base/allocator/partition_allocator/reverse_bytes.h
deleted file mode 100644
index 2443d47..0000000
--- a/base/allocator/partition_allocator/reverse_bytes.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_REVERSE_BYTES_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_REVERSE_BYTES_H_
-
-// This header defines drop-in constexpr replacements for the
-// byte-reversing routines that we used from `//base/sys_byteorder.h`.
-// They will be made moot by C++23's <endian> header or by C++20's
-// <bit> header.
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal {
-
-constexpr uint32_t ReverseFourBytes(uint32_t value) {
-#if PA_CONFIG(IS_NONCLANG_MSVC)
-  return value >> 24 | (value >> 8 & 0xff00) | (value & 0xff00) << 8 |
-         value << 24;
-#else
-  return __builtin_bswap32(value);
-#endif  // PA_CONFIG(IS_NONCLANG_MSVC)
-}
-
-constexpr uint64_t ReverseEightBytes(uint64_t value) {
-#if PA_CONFIG(IS_NONCLANG_MSVC)
-  return value >> 56 | (value >> 40 & 0xff00) | (value >> 24 & 0xff0000) |
-         (value >> 8 & 0xff000000) | (value & 0xff000000) << 8 |
-         (value & 0xff0000) << 24 | (value & 0xff00) << 40 |
-         (value & 0xff) << 56;
-#else
-  return __builtin_bswap64(value);
-#endif  // PA_CONFIG(IS_NONCLANG_MSVC)
-}
-
-constexpr uintptr_t ReverseBytes(uintptr_t value) {
-  if (sizeof(uintptr_t) == 4) {
-    return ReverseFourBytes(static_cast<uint32_t>(value));
-  }
-  return ReverseEightBytes(static_cast<uint64_t>(value));
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_REVERSE_BYTES_H_
diff --git a/base/allocator/partition_allocator/reverse_bytes_unittest.cc b/base/allocator/partition_allocator/reverse_bytes_unittest.cc
deleted file mode 100644
index ecd2a16..0000000
--- a/base/allocator/partition_allocator/reverse_bytes_unittest.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/reverse_bytes.h"
-
-#include <cstdint>
-
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal {
-namespace {
-
-TEST(ReverseBytes, DeadBeefScramble) {
-  if (sizeof(uintptr_t) == 4) {
-    EXPECT_EQ(ReverseBytes(uintptr_t{0xefbeadde}), 0xdeadbeef);
-  } else {
-    // Hacky kludge to escape the compiler from immediately noticing that
-    // this won't fit into a uintptr_t when it's four bytes.
-    EXPECT_EQ(ReverseBytes(uint64_t{0xffeeddccefbeadde}), 0xdeadbeefccddeeff);
-  }
-}
-
-}  // namespace
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/shim/DEPS b/base/allocator/partition_allocator/shim/DEPS
deleted file mode 100644
index 7119eb2..0000000
--- a/base/allocator/partition_allocator/shim/DEPS
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2022 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# This file is meant to be temporary during we migrate allocator_shim code
-# into partition_allocator/. This file will be removed away once the
-# migration gets done.
-
-specific_include_rules = {
-  "allocator_shim_unittest\.cc$": [
-    "+base/synchronization/waitable_event.h",
-    "+base/threading/platform_thread.h",
-    "+base/threading/thread_local.h",
-  ],
-  "allocator_interception_mac_unittest\.mm$": [
-    "+testing/gtest/include/gtest/gtest.h",
-  ],
-}
diff --git a/base/allocator/partition_allocator/shim/allocator_interception_mac.h b/base/allocator/partition_allocator/shim/allocator_interception_mac.h
deleted file mode 100644
index ac08b7c..0000000
--- a/base/allocator/partition_allocator/shim/allocator_interception_mac.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_INTERCEPTION_MAC_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_INTERCEPTION_MAC_H_
-
-#include <stddef.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/third_party/apple_apsl/malloc.h"
-
-namespace allocator_shim {
-
-struct MallocZoneFunctions;
-
-// This initializes AllocatorDispatch::default_dispatch by saving pointers to
-// the functions in the current default malloc zone. This must be called before
-// the default malloc zone is changed to have its intended effect.
-void InitializeDefaultDispatchToMacAllocator();
-
-// Saves the function pointers currently used by the default zone.
-void StoreFunctionsForDefaultZone();
-
-// Same as StoreFunctionsForDefaultZone, but for all malloc zones.
-void StoreFunctionsForAllZones();
-
-// For all malloc zones that have been stored, replace their functions with
-// |functions|.
-void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern bool g_replaced_default_zone;
-
-// Calls the original implementation of malloc/calloc prior to interception.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool UncheckedMallocMac(size_t size, void** result);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool UncheckedCallocMac(size_t num_items, size_t size, void** result);
-
-// Intercepts calls to default and purgeable malloc zones. Intercepts Core
-// Foundation and Objective-C allocations.
-// Has no effect on the default malloc zone if the allocator shim already
-// performs that interception.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void InterceptAllocationsMac();
-
-// Updates all malloc zones to use their original functions.
-// Also calls ClearAllMallocZonesForTesting.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void UninterceptMallocZonesForTesting();
-
-// Returns true if allocations are successfully being intercepted for all malloc
-// zones.
-bool AreMallocZonesIntercepted();
-
-// heap_profiling::ProfilingClient needs to shim all malloc zones even ones
-// that are registered after the start-up time. ProfilingClient periodically
-// calls this API to make it sure that all malloc zones are shimmed.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void ShimNewMallocZones();
-
-// Exposed for testing.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void ReplaceZoneFunctions(ChromeMallocZone* zone,
-                          const MallocZoneFunctions* functions);
-
-}  // namespace allocator_shim
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_INTERCEPTION_MAC_H_
diff --git a/base/allocator/partition_allocator/shim/allocator_interception_mac.mm b/base/allocator/partition_allocator/shim/allocator_interception_mac.mm
deleted file mode 100644
index b208713..0000000
--- a/base/allocator/partition_allocator/shim/allocator_interception_mac.mm
+++ /dev/null
@@ -1,628 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains all the logic necessary to intercept allocations on
-// macOS. "malloc zones" are an abstraction that allows the process to intercept
-// all malloc-related functions.  There is no good mechanism [short of
-// interposition] to determine new malloc zones are added, so there's no clean
-// mechanism to intercept all malloc zones. This file contains logic to
-// intercept the default and purgeable zones, which always exist. A cursory
-// review of Chrome seems to imply that non-default zones are almost never used.
-//
-// This file also contains logic to intercept Core Foundation and Objective-C
-// allocations. The implementations forward to the default malloc zone, so the
-// only reason to intercept these calls is to re-label OOM crashes with slightly
-// more details.
-
-#include "base/allocator/partition_allocator/shim/allocator_interception_mac.h"
-
-#include <CoreFoundation/CoreFoundation.h>
-#import <Foundation/Foundation.h>
-#include <errno.h>
-#include <mach/mach.h>
-#import <objc/runtime.h>
-#include <stddef.h>
-
-#include <algorithm>
-#include <new>
-
-#include "base/allocator/partition_allocator/oom.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/mach_logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/shim/malloc_zone_functions_mac.h"
-#include "base/allocator/partition_allocator/third_party/apple_apsl/CFBase.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_IOS)
-#include "base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h"
-#else
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h"
-#endif
-
-// The patching of Objective-C runtime bits must be done without any
-// interference from the ARC machinery.
-#if defined(__has_feature) && __has_feature(objc_arc)
-#error "This file must not be compiled with ARC."
-#endif
-
-namespace allocator_shim {
-
-bool g_replaced_default_zone = false;
-
-namespace {
-
-bool g_oom_killer_enabled;
-bool g_allocator_shims_failed_to_install;
-
-// Starting with Mac OS X 10.7, the zone allocators set up by the system are
-// read-only, to prevent them from being overwritten in an attack. However,
-// blindly unprotecting and reprotecting the zone allocators fails with
-// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
-// memory in its bss. Explicit saving/restoring of the protection is required.
-//
-// This function takes a pointer to a malloc zone, de-protects it if necessary,
-// and returns (in the out parameters) a region of memory (if any) to be
-// re-protected when modifications are complete. This approach assumes that
-// there is no contention for the protection of this memory.
-//
-// Returns true if the malloc zone was properly de-protected, or false
-// otherwise. If this function returns false, the out parameters are invalid and
-// the region does not need to be re-protected.
-bool DeprotectMallocZone(ChromeMallocZone* default_zone,
-                         vm_address_t* reprotection_start,
-                         vm_size_t* reprotection_length,
-                         vm_prot_t* reprotection_value) {
-  mach_port_t unused;
-  *reprotection_start = reinterpret_cast<vm_address_t>(default_zone);
-  struct vm_region_basic_info_64 info;
-  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
-  kern_return_t result =
-      vm_region_64(mach_task_self(), reprotection_start, reprotection_length,
-                   VM_REGION_BASIC_INFO_64,
-                   reinterpret_cast<vm_region_info_t>(&info), &count, &unused);
-  if (result != KERN_SUCCESS) {
-    PA_MACH_LOG(ERROR, result) << "vm_region_64";
-    return false;
-  }
-
-  // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
-  // balance it with a deallocate in case this ever changes. See
-  // the VM_REGION_BASIC_INFO_64 case in vm_map_region() in 10.15's
-  // https://opensource.apple.com/source/xnu/xnu-6153.11.26/osfmk/vm/vm_map.c .
-  mach_port_deallocate(mach_task_self(), unused);
-
-  if (!(info.max_protection & VM_PROT_WRITE)) {
-    PA_LOG(ERROR) << "Invalid max_protection " << info.max_protection;
-    return false;
-  }
-
-  // Does the region fully enclose the zone pointers? Possibly unwarranted
-  // simplification used: using the size of a full version 10 malloc zone rather
-  // than the actual smaller size if the passed-in zone is not version 10.
-  PA_DCHECK(*reprotection_start <=
-            reinterpret_cast<vm_address_t>(default_zone));
-  vm_size_t zone_offset = reinterpret_cast<vm_address_t>(default_zone) -
-                          reinterpret_cast<vm_address_t>(*reprotection_start);
-  PA_DCHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
-
-  if (info.protection & VM_PROT_WRITE) {
-    // No change needed; the zone is already writable.
-    *reprotection_start = 0;
-    *reprotection_length = 0;
-    *reprotection_value = VM_PROT_NONE;
-  } else {
-    *reprotection_value = info.protection;
-    result =
-        vm_protect(mach_task_self(), *reprotection_start, *reprotection_length,
-                   false, info.protection | VM_PROT_WRITE);
-    if (result != KERN_SUCCESS) {
-      PA_MACH_LOG(ERROR, result) << "vm_protect";
-      return false;
-    }
-  }
-  return true;
-}
-
-#if !defined(ADDRESS_SANITIZER)
-
-MallocZoneFunctions g_old_zone;
-MallocZoneFunctions g_old_purgeable_zone;
-
-#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) {
-  void* result = g_old_zone.malloc(zone, size);
-  if (!result && size) {
-    partition_alloc::TerminateBecauseOutOfMemory(size);
-  }
-  return result;
-}
-
-void* oom_killer_calloc(struct _malloc_zone_t* zone,
-                        size_t num_items,
-                        size_t size) {
-  void* result = g_old_zone.calloc(zone, num_items, size);
-  if (!result && num_items && size) {
-    partition_alloc::TerminateBecauseOutOfMemory(num_items * size);
-  }
-  return result;
-}
-
-void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) {
-  void* result = g_old_zone.valloc(zone, size);
-  if (!result && size) {
-    partition_alloc::TerminateBecauseOutOfMemory(size);
-  }
-  return result;
-}
-
-void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) {
-  g_old_zone.free(zone, ptr);
-}
-
-void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) {
-  void* result = g_old_zone.realloc(zone, ptr, size);
-  if (!result && size) {
-    partition_alloc::TerminateBecauseOutOfMemory(size);
-  }
-  return result;
-}
-
-void* oom_killer_memalign(struct _malloc_zone_t* zone,
-                          size_t alignment,
-                          size_t size) {
-  void* result = g_old_zone.memalign(zone, alignment, size);
-  // Only die if posix_memalign would have returned ENOMEM, since there are
-  // other reasons why null might be returned. See posix_memalign() in 10.15's
-  // https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
-  if (!result && size && alignment >= sizeof(void*) &&
-      partition_alloc::internal::base::bits::IsPowerOfTwo(alignment)) {
-    partition_alloc::TerminateBecauseOutOfMemory(size);
-  }
-  return result;
-}
-
-#endif  // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
-  void* result = g_old_purgeable_zone.malloc(zone, size);
-  if (!result && size) {
-    partition_alloc::TerminateBecauseOutOfMemory(size);
-  }
-  return result;
-}
-
-void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
-                                  size_t num_items,
-                                  size_t size) {
-  void* result = g_old_purgeable_zone.calloc(zone, num_items, size);
-  if (!result && num_items && size) {
-    partition_alloc::TerminateBecauseOutOfMemory(num_items * size);
-  }
-  return result;
-}
-
-void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
-  void* result = g_old_purgeable_zone.valloc(zone, size);
-  if (!result && size) {
-    partition_alloc::TerminateBecauseOutOfMemory(size);
-  }
-  return result;
-}
-
-void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) {
-  g_old_purgeable_zone.free(zone, ptr);
-}
-
-void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
-                                   void* ptr,
-                                   size_t size) {
-  void* result = g_old_purgeable_zone.realloc(zone, ptr, size);
-  if (!result && size) {
-    partition_alloc::TerminateBecauseOutOfMemory(size);
-  }
-  return result;
-}
-
-void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
-                                    size_t alignment,
-                                    size_t size) {
-  void* result = g_old_purgeable_zone.memalign(zone, alignment, size);
-  // Only die if posix_memalign would have returned ENOMEM, since there are
-  // other reasons why null might be returned. See posix_memalign() in 10.15's
-  // https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
-  if (!result && size && alignment >= sizeof(void*) &&
-      partition_alloc::internal::base::bits::IsPowerOfTwo(alignment)) {
-    partition_alloc::TerminateBecauseOutOfMemory(size);
-  }
-  return result;
-}
-
-#endif  // !defined(ADDRESS_SANITIZER)
-
-#if !defined(ADDRESS_SANITIZER)
-
-// === Core Foundation CFAllocators ===
-
-bool CanGetContextForCFAllocator() {
-#if BUILDFLAG(IS_IOS)
-  return !partition_alloc::internal::base::ios::IsRunningOnOrLater(17, 0, 0);
-#else
-  // As of macOS 14, the allocators seem to be in read-only memory? See
-  // https://crbug.com/1454013.
-  return partition_alloc::internal::base::mac::IsAtMostOS13();
-#endif
-}
-
-CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
-  ChromeCFAllocatorLions* our_allocator = const_cast<ChromeCFAllocatorLions*>(
-      reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
-  return &our_allocator->_context;
-}
-
-CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
-CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
-CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
-
-void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
-                                            CFOptionFlags hint,
-                                            void* info) {
-  void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
-  if (!result) {
-    partition_alloc::TerminateBecauseOutOfMemory(
-        static_cast<size_t>(alloc_size));
-  }
-  return result;
-}
-
-void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
-                                    CFOptionFlags hint,
-                                    void* info) {
-  void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
-  if (!result) {
-    partition_alloc::TerminateBecauseOutOfMemory(
-        static_cast<size_t>(alloc_size));
-  }
-  return result;
-}
-
-void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
-                                         CFOptionFlags hint,
-                                         void* info) {
-  void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
-  if (!result) {
-    partition_alloc::TerminateBecauseOutOfMemory(
-        static_cast<size_t>(alloc_size));
-  }
-  return result;
-}
-
-#endif  // !defined(ADDRESS_SANITIZER)
-
-// === Cocoa NSObject allocation ===
-
-typedef id (*allocWithZone_t)(id, SEL, NSZone*);
-allocWithZone_t g_old_allocWithZone;
-
-id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
-  id result = g_old_allocWithZone(self, _cmd, zone);
-  if (!result) {
-    partition_alloc::TerminateBecauseOutOfMemory(0);
-  }
-  return result;
-}
-
-void UninterceptMallocZoneForTesting(struct _malloc_zone_t* zone) {
-  ChromeMallocZone* chrome_zone = reinterpret_cast<ChromeMallocZone*>(zone);
-  if (!IsMallocZoneAlreadyStored(chrome_zone)) {
-    return;
-  }
-  MallocZoneFunctions& functions = GetFunctionsForZone(zone);
-  ReplaceZoneFunctions(chrome_zone, &functions);
-}
-
-}  // namespace
-
-bool UncheckedMallocMac(size_t size, void** result) {
-#if defined(ADDRESS_SANITIZER)
-  *result = malloc(size);
-#else
-  if (g_old_zone.malloc) {
-    *result = g_old_zone.malloc(malloc_default_zone(), size);
-  } else {
-    *result = malloc(size);
-  }
-#endif  // defined(ADDRESS_SANITIZER)
-
-  return *result != NULL;
-}
-
-bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
-#if defined(ADDRESS_SANITIZER)
-  *result = calloc(num_items, size);
-#else
-  if (g_old_zone.calloc) {
-    *result = g_old_zone.calloc(malloc_default_zone(), num_items, size);
-  } else {
-    *result = calloc(num_items, size);
-  }
-#endif  // defined(ADDRESS_SANITIZER)
-
-  return *result != NULL;
-}
-
-void InitializeDefaultDispatchToMacAllocator() {
-  StoreFunctionsForAllZones();
-}
-
-void StoreFunctionsForDefaultZone() {
-  ChromeMallocZone* default_zone =
-      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
-  StoreMallocZone(default_zone);
-}
-
-void StoreFunctionsForAllZones() {
-  // This ensures that the default zone is always at the front of the array,
-  // which is important for performance.
-  StoreFunctionsForDefaultZone();
-
-  vm_address_t* zones;
-  unsigned int count;
-  kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
-  if (kr != KERN_SUCCESS) {
-    return;
-  }
-  for (unsigned int i = 0; i < count; ++i) {
-    ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
-    StoreMallocZone(zone);
-  }
-}
-
-void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
-  // The default zone does not get returned in malloc_get_all_zones().
-  ChromeMallocZone* default_zone =
-      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
-  if (DoesMallocZoneNeedReplacing(default_zone, functions)) {
-    ReplaceZoneFunctions(default_zone, functions);
-  }
-
-  vm_address_t* zones;
-  unsigned int count;
-  kern_return_t kr =
-      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &count);
-  if (kr != KERN_SUCCESS) {
-    return;
-  }
-  for (unsigned int i = 0; i < count; ++i) {
-    ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
-    if (DoesMallocZoneNeedReplacing(zone, functions)) {
-      ReplaceZoneFunctions(zone, functions);
-    }
-  }
-  g_replaced_default_zone = true;
-}
-
-void InterceptAllocationsMac() {
-  if (g_oom_killer_enabled) {
-    return;
-  }
-
-  g_oom_killer_enabled = true;
-
-  // === C malloc/calloc/valloc/realloc/posix_memalign ===
-
-  // This approach is not perfect, as requests for amounts of memory larger than
-  // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
-  // still fail with a NULL rather than dying (see malloc_zone_malloc() in
-  // https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c
-  // for details). Unfortunately, it's the best we can do. Also note that this
-  // does not affect allocations from non-default zones.
-
-#if !defined(ADDRESS_SANITIZER)
-  // Don't do anything special on OOM for the malloc zones replaced by
-  // AddressSanitizer, as modifying or protecting them may not work correctly.
-#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  // The malloc zone backed by PartitionAlloc crashes by default, so there is
-  // no need to install the OOM killer.
-  ChromeMallocZone* default_zone =
-      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
-  if (!IsMallocZoneAlreadyStored(default_zone)) {
-    StoreZoneFunctions(default_zone, &g_old_zone);
-    MallocZoneFunctions new_functions = {};
-    new_functions.malloc = oom_killer_malloc;
-    new_functions.calloc = oom_killer_calloc;
-    new_functions.valloc = oom_killer_valloc;
-    new_functions.free = oom_killer_free;
-    new_functions.realloc = oom_killer_realloc;
-    new_functions.memalign = oom_killer_memalign;
-
-    ReplaceZoneFunctions(default_zone, &new_functions);
-    g_replaced_default_zone = true;
-  }
-#endif  // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-  ChromeMallocZone* purgeable_zone =
-      reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
-  if (purgeable_zone && !IsMallocZoneAlreadyStored(purgeable_zone)) {
-    StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone);
-    MallocZoneFunctions new_functions = {};
-    new_functions.malloc = oom_killer_malloc_purgeable;
-    new_functions.calloc = oom_killer_calloc_purgeable;
-    new_functions.valloc = oom_killer_valloc_purgeable;
-    new_functions.free = oom_killer_free_purgeable;
-    new_functions.realloc = oom_killer_realloc_purgeable;
-    new_functions.memalign = oom_killer_memalign_purgeable;
-    ReplaceZoneFunctions(purgeable_zone, &new_functions);
-  }
-#endif
-
-  // === C malloc_zone_batch_malloc ===
-
-  // batch_malloc is omitted because the default malloc zone's implementation
-  // only supports batch_malloc for "tiny" allocations from the free list. It
-  // will fail for allocations larger than "tiny", and will only allocate as
-  // many blocks as it's able to from the free list. These factors mean that it
-  // can return less than the requested memory even in a non-out-of-memory
-  // situation. There's no good way to detect whether a batch_malloc failure is
-  // due to these other factors, or due to genuine memory or address space
-  // exhaustion. The fact that it only allocates space from the "tiny" free list
-  // means that it's likely that a failure will not be due to memory exhaustion.
-  // Similarly, these constraints on batch_malloc mean that callers must always
-  // be expecting to receive less memory than was requested, even in situations
-  // where memory pressure is not a concern. Finally, the only public interface
-  // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
-  // system's malloc implementation. It's unlikely that anyone's even heard of
-  // it.
-
-#ifndef ADDRESS_SANITIZER
-  // === Core Foundation CFAllocators ===
-
-  // This will not catch allocation done by custom allocators, but will catch
-  // all allocation done by system-provided ones.
-
-  PA_CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
-           !g_old_cfallocator_malloc_zone)
-      << "Old allocators unexpectedly non-null";
-
-  bool cf_allocator_internals_known = CanGetContextForCFAllocator();
-
-  if (cf_allocator_internals_known) {
-    CFAllocatorContext* context =
-        ContextForCFAllocator(kCFAllocatorSystemDefault);
-    PA_CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
-    g_old_cfallocator_system_default = context->allocate;
-    PA_CHECK(g_old_cfallocator_system_default)
-        << "Failed to get kCFAllocatorSystemDefault allocation function.";
-    context->allocate = oom_killer_cfallocator_system_default;
-
-    context = ContextForCFAllocator(kCFAllocatorMalloc);
-    PA_CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
-    g_old_cfallocator_malloc = context->allocate;
-    PA_CHECK(g_old_cfallocator_malloc)
-        << "Failed to get kCFAllocatorMalloc allocation function.";
-    context->allocate = oom_killer_cfallocator_malloc;
-
-    context = ContextForCFAllocator(kCFAllocatorMallocZone);
-    PA_CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
-    g_old_cfallocator_malloc_zone = context->allocate;
-    PA_CHECK(g_old_cfallocator_malloc_zone)
-        << "Failed to get kCFAllocatorMallocZone allocation function.";
-    context->allocate = oom_killer_cfallocator_malloc_zone;
-  }
-#endif
-
-  // === Cocoa NSObject allocation ===
-
-  // Note that both +[NSObject new] and +[NSObject alloc] call through to
-  // +[NSObject allocWithZone:].
-
-  PA_CHECK(!g_old_allocWithZone) << "Old allocator unexpectedly non-null";
-
-  Class nsobject_class = [NSObject class];
-  Method orig_method =
-      class_getClassMethod(nsobject_class, @selector(allocWithZone:));
-  g_old_allocWithZone =
-      reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method));
-  PA_CHECK(g_old_allocWithZone)
-      << "Failed to get allocWithZone allocation function.";
-  method_setImplementation(orig_method,
-                           reinterpret_cast<IMP>(oom_killer_allocWithZone));
-}
-
-void UninterceptMallocZonesForTesting() {
-  UninterceptMallocZoneForTesting(malloc_default_zone());  // IN-TEST
-  vm_address_t* zones;
-  unsigned int count;
-  kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
-  PA_CHECK(kr == KERN_SUCCESS);
-  for (unsigned int i = 0; i < count; ++i) {
-    UninterceptMallocZoneForTesting(  // IN-TEST
-        reinterpret_cast<struct _malloc_zone_t*>(zones[i]));
-  }
-
-  ClearAllMallocZonesForTesting();  // IN-TEST
-}
-
-bool AreMallocZonesIntercepted() {
-  return !g_allocator_shims_failed_to_install;
-}
-
-void ShimNewMallocZones() {
-  StoreFunctionsForAllZones();
-
-  // Use the functions for the default zone as a template to replace those
-  // new zones.
-  ChromeMallocZone* default_zone =
-      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
-  PA_DCHECK(IsMallocZoneAlreadyStored(default_zone));
-
-  MallocZoneFunctions new_functions;
-  StoreZoneFunctions(default_zone, &new_functions);
-  ReplaceFunctionsForStoredZones(&new_functions);
-}
-
-void ReplaceZoneFunctions(ChromeMallocZone* zone,
-                          const MallocZoneFunctions* functions) {
-  // Remove protection.
-  vm_address_t reprotection_start = 0;
-  vm_size_t reprotection_length = 0;
-  vm_prot_t reprotection_value = VM_PROT_NONE;
-  bool success = DeprotectMallocZone(zone, &reprotection_start,
-                                     &reprotection_length, &reprotection_value);
-  if (!success) {
-    g_allocator_shims_failed_to_install = true;
-    return;
-  }
-
-  PA_CHECK(functions->malloc && functions->calloc && functions->valloc &&
-           functions->free && functions->realloc);
-  zone->malloc = functions->malloc;
-  zone->calloc = functions->calloc;
-  zone->valloc = functions->valloc;
-  zone->free = functions->free;
-  zone->realloc = functions->realloc;
-  if (functions->batch_malloc) {
-    zone->batch_malloc = functions->batch_malloc;
-  }
-  if (functions->batch_free) {
-    zone->batch_free = functions->batch_free;
-  }
-  if (functions->size) {
-    zone->size = functions->size;
-  }
-  if (zone->version >= 5 && functions->memalign) {
-    zone->memalign = functions->memalign;
-  }
-  if (zone->version >= 6 && functions->free_definite_size) {
-    zone->free_definite_size = functions->free_definite_size;
-  }
-  if (zone->version >= 10 && functions->claimed_address) {
-    zone->claimed_address = functions->claimed_address;
-  }
-  if (zone->version >= 13 && functions->try_free_default) {
-    zone->try_free_default = functions->try_free_default;
-  }
-
-  // Cap the version to the max supported to ensure malloc doesn't try to call
-  // functions that weren't replaced.
-#if (__MAC_OS_X_VERSION_MAX_ALLOWED >= 130000) || \
-    (__IPHONE_OS_VERSION_MAX_ALLOWED >= 160100)
-  zone->version = std::min(zone->version, 13U);
-#else
-  zone->version = std::min(zone->version, 12U);
-#endif
-
-  // Restore protection if it was active.
-  if (reprotection_start) {
-    kern_return_t result =
-        vm_protect(mach_task_self(), reprotection_start, reprotection_length,
-                   false, reprotection_value);
-    PA_MACH_DCHECK(result == KERN_SUCCESS, result) << "vm_protect";
-  }
-}
-
-}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/shim/allocator_interception_mac_unittest.mm b/base/allocator/partition_allocator/shim/allocator_interception_mac_unittest.mm
deleted file mode 100644
index 5efe0b4..0000000
--- a/base/allocator/partition_allocator/shim/allocator_interception_mac_unittest.mm
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/shim/allocator_interception_mac.h"
-
-#include <mach/mach.h>
-
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
-#include "base/allocator/partition_allocator/shim/malloc_zone_functions_mac.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace allocator_shim {
-
-namespace {
-void ResetMallocZone(ChromeMallocZone* zone) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(zone);
-  ReplaceZoneFunctions(zone, &functions);
-}
-
-void ResetAllMallocZones() {
-  ChromeMallocZone* default_malloc_zone =
-      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
-  ResetMallocZone(default_malloc_zone);
-
-  vm_address_t* zones;
-  unsigned int count;
-  kern_return_t kr = malloc_get_all_zones(mach_task_self(), /*reader=*/nullptr,
-                                          &zones, &count);
-  if (kr != KERN_SUCCESS) {
-    return;
-  }
-  for (unsigned int i = 0; i < count; ++i) {
-    ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
-    ResetMallocZone(zone);
-  }
-}
-}  // namespace
-
-class AllocatorInterceptionTest : public testing::Test {
- protected:
-  void TearDown() override {
-    ResetAllMallocZones();
-    ClearAllMallocZonesForTesting();
-  }
-};
-
-#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-TEST_F(AllocatorInterceptionTest, ShimNewMallocZones) {
-  InitializeAllocatorShim();
-  ChromeMallocZone* default_malloc_zone =
-      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
-
-  malloc_zone_t new_zone;
-  memset(&new_zone, 1, sizeof(malloc_zone_t));
-  malloc_zone_register(&new_zone);
-  EXPECT_NE(new_zone.malloc, default_malloc_zone->malloc);
-  ShimNewMallocZones();
-  EXPECT_EQ(new_zone.malloc, default_malloc_zone->malloc);
-
-  malloc_zone_unregister(&new_zone);
-}
-#endif
-
-}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/shim/allocator_shim.cc b/base/allocator/partition_allocator/shim/allocator_shim.cc
deleted file mode 100644
index 3c20b95..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim.cc
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
-
-#include <errno.h>
-
-#include <atomic>
-#include <new>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/memory/page_size.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/notreached.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "build/build_config.h"
-
-#if !BUILDFLAG(IS_WIN)
-#include <unistd.h>
-#else
-#include "base/allocator/partition_allocator/shim/winheap_stubs_win.h"
-#endif
-
-#if BUILDFLAG(IS_APPLE)
-#include <malloc/malloc.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/mach_logging.h"
-#include "base/allocator/partition_allocator/shim/allocator_interception_mac.h"
-#endif
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
-#endif
-
-// No calls to malloc / new in this file. They would would cause re-entrancy of
-// the shim, which is hard to deal with. Keep this code as simple as possible
-// and don't use any external C++ object here, not even //base ones. Even if
-// they are safe to use today, in future they might be refactored.
-
-namespace {
-
-std::atomic<const allocator_shim::AllocatorDispatch*> g_chain_head{
-    &allocator_shim::AllocatorDispatch::default_dispatch};
-
-bool g_call_new_handler_on_malloc_failure = false;
-
-PA_ALWAYS_INLINE size_t GetCachedPageSize() {
-  static size_t pagesize = 0;
-  if (!pagesize) {
-    pagesize = partition_alloc::internal::base::GetPageSize();
-  }
-  return pagesize;
-}
-
-// Calls the std::new handler thread-safely. Returns true if a new_handler was
-// set and called, false if no new_handler was set.
-bool CallNewHandler(size_t size) {
-#if BUILDFLAG(IS_WIN)
-  return allocator_shim::WinCallNewHandler(size);
-#else
-  std::new_handler nh = std::get_new_handler();
-  if (!nh) {
-    return false;
-  }
-  (*nh)();
-  // Assume the new_handler will abort if it fails. Exception are disabled and
-  // we don't support the case of a new_handler throwing std::bad_balloc.
-  return true;
-#endif
-}
-
-PA_ALWAYS_INLINE const allocator_shim::AllocatorDispatch* GetChainHead() {
-  return g_chain_head.load(std::memory_order_relaxed);
-}
-
-}  // namespace
-
-namespace allocator_shim {
-
-void SetCallNewHandlerOnMallocFailure(bool value) {
-  g_call_new_handler_on_malloc_failure = value;
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  allocator_shim::internal::PartitionAllocSetCallNewHandlerOnMallocFailure(
-      value);
-#endif
-}
-
-void* UncheckedAlloc(size_t size) {
-  const AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
-}
-
-void UncheckedFree(void* ptr) {
-  const AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->free_function(chain_head, ptr, nullptr);
-}
-
-void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
-  // Loop in case of (an unlikely) race on setting the list head.
-  size_t kMaxRetries = 7;
-  for (size_t i = 0; i < kMaxRetries; ++i) {
-    const AllocatorDispatch* chain_head = GetChainHead();
-    dispatch->next = chain_head;
-
-    // This function guarantees to be thread-safe w.r.t. concurrent
-    // insertions. It also has to guarantee that all the threads always
-    // see a consistent chain, hence the atomic_thread_fence() below.
-    // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
-    // we don't really want this to be a release-store with a corresponding
-    // acquire-load during malloc().
-    std::atomic_thread_fence(std::memory_order_seq_cst);
-    // Set the chain head to the new dispatch atomically. If we lose the race,
-    // retry.
-    if (g_chain_head.compare_exchange_strong(chain_head, dispatch,
-                                             std::memory_order_relaxed,
-                                             std::memory_order_relaxed)) {
-      // Success.
-      return;
-    }
-  }
-
-  PA_CHECK(false);  // Too many retries, this shouldn't happen.
-}
-
-void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
-  PA_DCHECK(GetChainHead() == dispatch);
-  g_chain_head.store(dispatch->next, std::memory_order_relaxed);
-}
-
-#if BUILDFLAG(IS_APPLE)
-void TryFreeDefaultFallbackToFindZoneAndFree(void* ptr) {
-  unsigned int zone_count = 0;
-  vm_address_t* zones = nullptr;
-  kern_return_t result =
-      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
-  PA_MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
-
-  // "find_zone_and_free" expected by try_free_default.
-  //
-  // libmalloc's zones call find_registered_zone() in case the default one
-  // doesn't handle the allocation. We can't, so we try to emulate it. See the
-  // implementation in libmalloc/src/malloc.c for details.
-  // https://github.com/apple-oss-distributions/libmalloc/blob/main/src/malloc.c
-  for (unsigned int i = 0; i < zone_count; ++i) {
-    malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
-    if (size_t size = zone->size(zone, ptr)) {
-      if (zone->version >= 6 && zone->free_definite_size) {
-        zone->free_definite_size(zone, ptr, size);
-      } else {
-        zone->free(zone, ptr);
-      }
-      return;
-    }
-  }
-
-  // There must be an owner zone.
-  PA_CHECK(false);
-}
-#endif  // BUILDFLAG(IS_APPLE)
-
-}  // namespace allocator_shim
-
-// The Shim* functions below are the entry-points into the shim-layer and
-// are supposed to be invoked by the allocator_shim_override_*
-// headers to route the malloc / new symbols through the shim layer.
-// They are defined as ALWAYS_INLINE in order to remove a level of indirection
-// between the system-defined entry points and the shim implementations.
-extern "C" {
-
-// The general pattern for allocations is:
-// - Try to allocate, if succeded return the pointer.
-// - If the allocation failed:
-//   - Call the std::new_handler if it was a C++ allocation.
-//   - Call the std::new_handler if it was a malloc() (or calloc() or similar)
-//     AND SetCallNewHandlerOnMallocFailure(true).
-//   - If the std::new_handler is NOT set just return nullptr.
-//   - If the std::new_handler is set:
-//     - Assume it will abort() if it fails (very likely the new_handler will
-//       just suicide printing a message).
-//     - Assume it did succeed if it returns, in which case reattempt the alloc.
-
-PA_ALWAYS_INLINE void* ShimCppNew(size_t size) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  void* ptr;
-  do {
-    void* context = nullptr;
-#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-    context = malloc_default_zone();
-#endif
-    ptr = chain_head->alloc_function(chain_head, size, context);
-  } while (!ptr && CallNewHandler(size));
-  return ptr;
-}
-
-PA_ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
-  void* context = nullptr;
-#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  context = malloc_default_zone();
-#endif
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->alloc_unchecked_function(chain_head, size, context);
-}
-
-PA_ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  void* ptr;
-  do {
-    void* context = nullptr;
-#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-    context = malloc_default_zone();
-#endif
-    ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
-                                             context);
-  } while (!ptr && CallNewHandler(size));
-  return ptr;
-}
-
-PA_ALWAYS_INLINE void ShimCppDelete(void* address) {
-  void* context = nullptr;
-#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  context = malloc_default_zone();
-#endif
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->free_function(chain_head, address, context);
-}
-
-PA_ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  void* ptr;
-  do {
-    ptr = chain_head->alloc_function(chain_head, size, context);
-  } while (!ptr && g_call_new_handler_on_malloc_failure &&
-           CallNewHandler(size));
-  return ptr;
-}
-
-PA_ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  void* ptr;
-  do {
-    ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
-                                                      context);
-  } while (!ptr && g_call_new_handler_on_malloc_failure &&
-           CallNewHandler(size));
-  return ptr;
-}
-
-PA_ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
-  // realloc(size == 0) means free() and might return a nullptr. We should
-  // not call the std::new_handler in that case, though.
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  void* ptr;
-  do {
-    ptr = chain_head->realloc_function(chain_head, address, size, context);
-  } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
-           CallNewHandler(size));
-  return ptr;
-}
-
-PA_ALWAYS_INLINE void* ShimMemalign(size_t alignment,
-                                    size_t size,
-                                    void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  void* ptr;
-  do {
-    ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
-                                             context);
-  } while (!ptr && g_call_new_handler_on_malloc_failure &&
-           CallNewHandler(size));
-  return ptr;
-}
-
-PA_ALWAYS_INLINE int ShimPosixMemalign(void** res,
-                                       size_t alignment,
-                                       size_t size) {
-  // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
-  // in tc_malloc.cc.
-  if (((alignment % sizeof(void*)) != 0) ||
-      !partition_alloc::internal::base::bits::IsPowerOfTwo(alignment)) {
-    return EINVAL;
-  }
-  void* ptr = ShimMemalign(alignment, size, nullptr);
-  *res = ptr;
-  return ptr ? 0 : ENOMEM;
-}
-
-PA_ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
-  return ShimMemalign(GetCachedPageSize(), size, context);
-}
-
-PA_ALWAYS_INLINE void* ShimPvalloc(size_t size) {
-  // pvalloc(0) should allocate one page, according to its man page.
-  if (size == 0) {
-    size = GetCachedPageSize();
-  } else {
-    size = partition_alloc::internal::base::bits::AlignUp(size,
-                                                          GetCachedPageSize());
-  }
-  // The third argument is nullptr because pvalloc is glibc only and does not
-  // exist on OSX/BSD systems.
-  return ShimMemalign(GetCachedPageSize(), size, nullptr);
-}
-
-PA_ALWAYS_INLINE void ShimFree(void* address, void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->free_function(chain_head, address, context);
-}
-
-PA_ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address,
-                                            void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->get_size_estimate_function(
-      chain_head, const_cast<void*>(address), context);
-}
-
-PA_ALWAYS_INLINE bool ShimClaimedAddress(void* address, void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->claimed_address_function(chain_head, address, context);
-}
-
-PA_ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
-                                          void** results,
-                                          unsigned num_requested,
-                                          void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->batch_malloc_function(chain_head, size, results,
-                                           num_requested, context);
-}
-
-PA_ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
-                                    unsigned num_to_be_freed,
-                                    void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->batch_free_function(chain_head, to_be_freed,
-                                         num_to_be_freed, context);
-}
-
-PA_ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr,
-                                           size_t size,
-                                           void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->free_definite_size_function(chain_head, ptr, size,
-                                                 context);
-}
-
-PA_ALWAYS_INLINE void ShimTryFreeDefault(void* ptr, void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->try_free_default_function(chain_head, ptr, context);
-}
-
-PA_ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
-                                         size_t alignment,
-                                         void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  void* ptr = nullptr;
-  do {
-    ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
-                                              context);
-  } while (!ptr && g_call_new_handler_on_malloc_failure &&
-           CallNewHandler(size));
-  return ptr;
-}
-
-PA_ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
-                                          size_t size,
-                                          size_t alignment,
-                                          void* context) {
-  // _aligned_realloc(size == 0) means _aligned_free() and might return a
-  // nullptr. We should not call the std::new_handler in that case, though.
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  void* ptr = nullptr;
-  do {
-    ptr = chain_head->aligned_realloc_function(chain_head, address, size,
-                                               alignment, context);
-  } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
-           CallNewHandler(size));
-  return ptr;
-}
-
-PA_ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
-  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->aligned_free_function(chain_head, address, context);
-}
-
-}  // extern "C"
-
-#if !BUILDFLAG(IS_WIN) && \
-    !(BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
-// Cpp symbols (new / delete) should always be routed through the shim layer
-// except on Windows and macOS (except for PartitionAlloc-Everywhere) where the
-// malloc intercept is deep enough that it also catches the cpp calls.
-//
-// In case of PartitionAlloc-Everywhere on macOS, malloc backed by
-// allocator_shim::internal::PartitionMalloc crashes on OOM, and we need to
-// avoid crashes in case of operator new() noexcept.  Thus, operator new()
-// noexcept needs to be routed to
-// allocator_shim::internal::PartitionMallocUnchecked through the shim layer.
-#include "base/allocator/partition_allocator/shim/allocator_shim_override_cpp_symbols.h"
-#endif
-
-#if BUILDFLAG(IS_ANDROID)
-// Android does not support symbol interposition. The way malloc symbols are
-// intercepted on Android is by using link-time -wrap flags.
-#include "base/allocator/partition_allocator/shim/allocator_shim_override_linker_wrapped_symbols.h"
-#elif BUILDFLAG(IS_WIN)
-// On Windows we use plain link-time overriding of the CRT symbols.
-#include "base/allocator/partition_allocator/shim/allocator_shim_override_ucrt_symbols_win.h"
-#elif BUILDFLAG(IS_APPLE)
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-#include "base/allocator/partition_allocator/shim/allocator_shim_override_mac_default_zone.h"
-#else  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-#include "base/allocator/partition_allocator/shim/allocator_shim_override_mac_symbols.h"
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-#else
-#include "base/allocator/partition_allocator/shim/allocator_shim_override_libc_symbols.h"
-#endif
-
-// Some glibc versions (until commit 6c444ad6e953dbdf9c7be065308a0a777)
-// incorrectly call __libc_memalign() to allocate memory (see elf/dl-tls.c in
-// glibc 2.23 for instance), and free() to free it. This causes issues for us,
-// as we are then asked to free memory we didn't allocate.
-//
-// This only happened in glibc to allocate TLS storage metadata, and there are
-// no other callers of __libc_memalign() there as of September 2020. To work
-// around this issue, intercept this internal libc symbol to make sure that both
-// the allocation and the free() are caught by the shim.
-//
-// This seems fragile, and is, but there is ample precedent for it, making it
-// quite likely to keep working in the future. For instance, LLVM for LSAN uses
-// this mechanism.
-
-#if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-#include "base/allocator/partition_allocator/shim/allocator_shim_override_glibc_weak_symbols.h"
-#endif
-
-#if BUILDFLAG(IS_APPLE)
-namespace allocator_shim {
-
-void InitializeAllocatorShim() {
-#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  // Prepares the default dispatch. After the intercepted malloc calls have
-  // traversed the shim this will route them to the default malloc zone.
-  InitializeDefaultDispatchToMacAllocator();
-
-  MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
-
-  // This replaces the default malloc zone, causing calls to malloc & friends
-  // from the codebase to be routed to ShimMalloc() above.
-  ReplaceFunctionsForStoredZones(&functions);
-#endif  // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-}
-
-}  // namespace allocator_shim
-#endif
-
-// Cross-checks.
-
-#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-#error The allocator shim should not be compiled when building for memory tools.
-#endif
-
-#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
-    (defined(_MSC_VER) && defined(_CPPUNWIND))
-#error This code cannot be used when exceptions are turned on.
-#endif
diff --git a/base/allocator/partition_allocator/shim/allocator_shim.h b/base/allocator/partition_allocator/shim/allocator_shim.h
deleted file mode 100644
index 0292321..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim.h
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/types/strong_alias.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#endif
-
-namespace allocator_shim {
-
-// Allocator Shim API. Allows to:
-//  - Configure the behavior of the allocator (what to do on OOM failures).
-//  - Install new hooks (AllocatorDispatch) in the allocator chain.
-
-// When this shim layer is enabled, the route of an allocation is as-follows:
-//
-// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
-//   The override_* headers define the symbols required to intercept calls to
-//   malloc() and operator new (if not overridden by specific C++ classes).
-//
-// [allocator_shim.cc] Routing allocation calls to the shim:
-//   The headers above route the calls to the internal ShimMalloc(), ShimFree(),
-//   ShimCppNew() etc. methods defined in allocator_shim.cc.
-//   These methods will: (1) forward the allocation call to the front of the
-//   AllocatorDispatch chain. (2) perform security hardenings (e.g., might
-//   call std::new_handler on OOM failure).
-//
-// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
-//   It is a singly linked list where each element is a struct with function
-//   pointers (|malloc_function|, |free_function|, etc). Normally the chain
-//   consists of a single AllocatorDispatch element, herein called
-//   the "default dispatch", which is statically defined at build time and
-//   ultimately routes the calls to the actual allocator defined by the build
-//   config (glibc, ...).
-//
-// It is possible to dynamically insert further AllocatorDispatch stages
-// to the front of the chain, for debugging / profiling purposes.
-//
-// All the functions must be thread safe. The shim does not enforce any
-// serialization. This is to route to thread-aware allocators without
-// introducing unnecessary perf hits.
-
-struct AllocatorDispatch {
-  using AllocFn = void*(const AllocatorDispatch* self,
-                        size_t size,
-                        void* context);
-  using AllocUncheckedFn = void*(const AllocatorDispatch* self,
-                                 size_t size,
-                                 void* context);
-  using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
-                                       size_t n,
-                                       size_t size,
-                                       void* context);
-  using AllocAlignedFn = void*(const AllocatorDispatch* self,
-                               size_t alignment,
-                               size_t size,
-                               void* context);
-  using ReallocFn = void*(const AllocatorDispatch* self,
-                          void* address,
-                          size_t size,
-                          void* context);
-  using FreeFn = void(const AllocatorDispatch* self,
-                      void* address,
-                      void* context);
-  // Returns the allocated size of user data (not including heap overhead).
-  // Can be larger than the requested size.
-  using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
-                                   void* address,
-                                   void* context);
-  using ClaimedAddressFn = bool(const AllocatorDispatch* self,
-                                void* address,
-                                void* context);
-  using BatchMallocFn = unsigned(const AllocatorDispatch* self,
-                                 size_t size,
-                                 void** results,
-                                 unsigned num_requested,
-                                 void* context);
-  using BatchFreeFn = void(const AllocatorDispatch* self,
-                           void** to_be_freed,
-                           unsigned num_to_be_freed,
-                           void* context);
-  using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
-                                  void* ptr,
-                                  size_t size,
-                                  void* context);
-  using TryFreeDefaultFn = void(const AllocatorDispatch* self,
-                                void* ptr,
-                                void* context);
-  using AlignedMallocFn = void*(const AllocatorDispatch* self,
-                                size_t size,
-                                size_t alignment,
-                                void* context);
-  using AlignedReallocFn = void*(const AllocatorDispatch* self,
-                                 void* address,
-                                 size_t size,
-                                 size_t alignment,
-                                 void* context);
-  using AlignedFreeFn = void(const AllocatorDispatch* self,
-                             void* address,
-                             void* context);
-
-  AllocFn* const alloc_function;
-  AllocUncheckedFn* const alloc_unchecked_function;
-  AllocZeroInitializedFn* const alloc_zero_initialized_function;
-  AllocAlignedFn* const alloc_aligned_function;
-  ReallocFn* const realloc_function;
-  FreeFn* const free_function;
-  GetSizeEstimateFn* const get_size_estimate_function;
-  // claimed_address, batch_malloc, batch_free, free_definite_size and
-  // try_free_default are specific to the OSX and iOS allocators.
-  ClaimedAddressFn* const claimed_address_function;
-  BatchMallocFn* const batch_malloc_function;
-  BatchFreeFn* const batch_free_function;
-  FreeDefiniteSizeFn* const free_definite_size_function;
-  TryFreeDefaultFn* const try_free_default_function;
-  // _aligned_malloc, _aligned_realloc, and _aligned_free are specific to the
-  // Windows allocator.
-  AlignedMallocFn* const aligned_malloc_function;
-  AlignedReallocFn* const aligned_realloc_function;
-  AlignedFreeFn* const aligned_free_function;
-
-  const AllocatorDispatch* next;
-
-  // |default_dispatch| is statically defined by one (and only one) of the
-  // allocator_shim_default_dispatch_to_*.cc files, depending on the build
-  // configuration.
-  static const AllocatorDispatch default_dispatch;
-};
-
-// When true makes malloc behave like new, w.r.t calling the new_handler if
-// the allocation fails (see set_new_mode() in Windows).
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void SetCallNewHandlerOnMallocFailure(bool value);
-
-// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
-// regardless of SetCallNewHandlerOnMallocFailure().
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void* UncheckedAlloc(size_t size);
-
-// Frees memory allocated with UncheckedAlloc().
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void UncheckedFree(void* ptr);
-
-// Inserts |dispatch| in front of the allocator chain. This method is
-// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
-// The callers have responsibility for inserting a single dispatch no more
-// than once.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
-
-// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
-// removal of arbitrary elements from a singly linked list would require a lock
-// in malloc(), which we really don't want.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
-
-#if BUILDFLAG(IS_APPLE)
-// The fallback function to be called when try_free_default_function receives a
-// pointer which doesn't belong to the allocator.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void TryFreeDefaultFallbackToFindZoneAndFree(void* ptr);
-#endif  // BUILDFLAG(IS_APPLE)
-
-#if BUILDFLAG(IS_APPLE)
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void InitializeDefaultAllocatorPartitionRoot();
-bool IsDefaultAllocatorPartitionRootInitialized();
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-// On macOS, the allocator shim needs to be turned on during runtime.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void InitializeAllocatorShim();
-#endif  // BUILDFLAG(IS_APPLE)
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void EnablePartitionAllocMemoryReclaimer();
-
-using EnableBrp =
-    partition_alloc::internal::base::StrongAlias<class EnableBrpTag, bool>;
-using EnableMemoryTagging =
-    partition_alloc::internal::base::StrongAlias<class EnableMemoryTaggingTag,
-                                                 bool>;
-using SplitMainPartition =
-    partition_alloc::internal::base::StrongAlias<class SplitMainPartitionTag,
-                                                 bool>;
-using UseDedicatedAlignedPartition = partition_alloc::internal::base::
-    StrongAlias<class UseDedicatedAlignedPartitionTag, bool>;
-enum class BucketDistribution : uint8_t { kNeutral, kDenser };
-
-// If |thread_cache_on_non_quarantinable_partition| is specified, the
-// thread-cache will be enabled on the non-quarantinable partition. The
-// thread-cache on the main (malloc) partition will be disabled.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void ConfigurePartitions(
-    EnableBrp enable_brp,
-    EnableMemoryTagging enable_memory_tagging,
-    partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode,
-    SplitMainPartition split_main_partition,
-    UseDedicatedAlignedPartition use_dedicated_aligned_partition,
-    size_t ref_count_size,
-    BucketDistribution distribution);
-
-// If |thread_cache_on_non_quarantinable_partition| is specified, the
-// thread-cache will be enabled on the non-quarantinable partition. The
-// thread-cache on the main (malloc) partition will be disabled.
-// This is the deprecated version of ConfigurePartitions, kept for compatibility
-// with pdfium's test setup, see
-// third_party/pdfium/testing/allocator_shim_config.cpp.
-// TODO(crbug.com/1137393): Remove this functions once pdfium has switched to
-// the new version.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void ConfigurePartitions(
-    EnableBrp enable_brp,
-    EnableMemoryTagging enable_memory_tagging,
-    SplitMainPartition split_main_partition,
-    UseDedicatedAlignedPartition use_dedicated_aligned_partition,
-    size_t ref_count_size,
-    BucketDistribution distribution);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) uint32_t GetMainPartitionRootExtrasSize();
-
-#if BUILDFLAG(USE_STARSCAN)
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void EnablePCScan(partition_alloc::internal::PCScan::InitConfig);
-#endif
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-}  // namespace allocator_shim
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_H_
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_glibc.cc b/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_glibc.cc
deleted file mode 100644
index 4f95a3c..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_glibc.cc
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <limits>
-
-#include "base/allocator/partition_allocator/oom.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
-
-#include <dlfcn.h>
-#include <malloc.h>
-
-// This translation unit defines a default dispatch for the allocator shim which
-// routes allocations to libc functions.
-// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
-
-extern "C" {
-void* __libc_malloc(size_t size);
-void* __libc_calloc(size_t n, size_t size);
-void* __libc_realloc(void* address, size_t size);
-void* __libc_memalign(size_t alignment, size_t size);
-void __libc_free(void* ptr);
-}  // extern "C"
-
-namespace {
-
-using allocator_shim::AllocatorDispatch;
-
-// Strictly speaking, it would make more sense to not subtract amything, but
-// other shims limit to something lower than INT_MAX (which is 0x7FFFFFFF on
-// most platforms), and tests expect that.
-constexpr size_t kMaxAllowedSize = std::numeric_limits<int>::max() - (1 << 12);
-
-void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
-  // Cannot force glibc's malloc() to crash when a large size is requested, do
-  // it in the shim instead.
-  if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
-    partition_alloc::TerminateBecauseOutOfMemory(size);
-  }
-
-  return __libc_malloc(size);
-}
-
-void* GlibcUncheckedMalloc(const AllocatorDispatch*,
-                           size_t size,
-                           void* context) {
-  if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
-    return nullptr;
-  }
-
-  return __libc_malloc(size);
-}
-
-void* GlibcCalloc(const AllocatorDispatch*,
-                  size_t n,
-                  size_t size,
-                  void* context) {
-  const auto total = partition_alloc::internal::base::CheckMul(n, size);
-  if (PA_UNLIKELY(!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize)) {
-    partition_alloc::TerminateBecauseOutOfMemory(size * n);
-  }
-
-  return __libc_calloc(n, size);
-}
-
-void* GlibcRealloc(const AllocatorDispatch*,
-                   void* address,
-                   size_t size,
-                   void* context) {
-  if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
-    partition_alloc::TerminateBecauseOutOfMemory(size);
-  }
-
-  return __libc_realloc(address, size);
-}
-
-void* GlibcMemalign(const AllocatorDispatch*,
-                    size_t alignment,
-                    size_t size,
-                    void* context) {
-  if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
-    partition_alloc::TerminateBecauseOutOfMemory(size);
-  }
-
-  return __libc_memalign(alignment, size);
-}
-
-void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
-  __libc_free(address);
-}
-
-PA_NO_SANITIZE("cfi-icall")
-size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
-                            void* address,
-                            void* context) {
-  // glibc does not expose an alias to resolve malloc_usable_size. Dynamically
-  // resolve it instead. This should be safe because glibc (and hence dlfcn)
-  // does not use malloc_size internally and so there should not be a risk of
-  // recursion.
-  using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
-  static MallocUsableSizeFunction fn_ptr =
-      reinterpret_cast<MallocUsableSizeFunction>(
-          dlsym(RTLD_NEXT, "malloc_usable_size"));
-
-  return fn_ptr(address);
-}
-
-}  // namespace
-
-const AllocatorDispatch AllocatorDispatch::default_dispatch = {
-    &GlibcMalloc,          /* alloc_function */
-    &GlibcUncheckedMalloc, /* alloc_unchecked_function */
-    &GlibcCalloc,          /* alloc_zero_initialized_function */
-    &GlibcMemalign,        /* alloc_aligned_function */
-    &GlibcRealloc,         /* realloc_function */
-    &GlibcFree,            /* free_function */
-    &GlibcGetSizeEstimate, /* get_size_estimate_function */
-    nullptr,               /* claimed_address */
-    nullptr,               /* batch_malloc_function */
-    nullptr,               /* batch_free_function */
-    nullptr,               /* free_definite_size_function */
-    nullptr,               /* try_free_default_function */
-    nullptr,               /* aligned_malloc_function */
-    nullptr,               /* aligned_realloc_function */
-    nullptr,               /* aligned_free_function */
-    nullptr,               /* next */
-};
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc b/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
deleted file mode 100644
index 6a73d8a..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <malloc.h>
-
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
-#include "build/build_config.h"
-
-// This translation unit defines a default dispatch for the allocator shim which
-// routes allocations to the original libc functions when using the link-time
-// -Wl,-wrap,malloc approach (see README.md).
-// The __real_X functions here are special symbols that the linker will relocate
-// against the real "X" undefined symbol, so that __real_malloc becomes the
-// equivalent of what an undefined malloc symbol reference would have been.
-// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
-// which routes the __wrap_X functions into the shim.
-
-extern "C" {
-void* __real_malloc(size_t);
-void* __real_calloc(size_t, size_t);
-void* __real_realloc(void*, size_t);
-void* __real_memalign(size_t, size_t);
-void __real_free(void*);
-size_t __real_malloc_usable_size(void*);
-}  // extern "C"
-
-namespace {
-
-using allocator_shim::AllocatorDispatch;
-
-void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
-  return __real_malloc(size);
-}
-
-void* RealCalloc(const AllocatorDispatch*,
-                 size_t n,
-                 size_t size,
-                 void* context) {
-  return __real_calloc(n, size);
-}
-
-void* RealRealloc(const AllocatorDispatch*,
-                  void* address,
-                  size_t size,
-                  void* context) {
-  return __real_realloc(address, size);
-}
-
-void* RealMemalign(const AllocatorDispatch*,
-                   size_t alignment,
-                   size_t size,
-                   void* context) {
-  return __real_memalign(alignment, size);
-}
-
-void RealFree(const AllocatorDispatch*, void* address, void* context) {
-  __real_free(address);
-}
-
-size_t RealSizeEstimate(const AllocatorDispatch*,
-                        void* address,
-                        void* context) {
-  return __real_malloc_usable_size(address);
-}
-
-}  // namespace
-
-const AllocatorDispatch AllocatorDispatch::default_dispatch = {
-    &RealMalloc,       /* alloc_function */
-    &RealMalloc,       /* alloc_unchecked_function */
-    &RealCalloc,       /* alloc_zero_initialized_function */
-    &RealMemalign,     /* alloc_aligned_function */
-    &RealRealloc,      /* realloc_function */
-    &RealFree,         /* free_function */
-    &RealSizeEstimate, /* get_size_estimate_function */
-    nullptr,           /* claimed_address */
-    nullptr,           /* batch_malloc_function */
-    nullptr,           /* batch_free_function */
-    nullptr,           /* free_definite_size_function */
-    nullptr,           /* try_free_default_function */
-    nullptr,           /* aligned_malloc_function */
-    nullptr,           /* aligned_realloc_function */
-    nullptr,           /* aligned_free_function */
-    nullptr,           /* next */
-};
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc b/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc
deleted file mode 100644
index 76b275c..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <utility>
-
-#include "base/allocator/partition_allocator/shim/allocator_interception_mac.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
-#include "base/allocator/partition_allocator/shim/malloc_zone_functions_mac.h"
-
-namespace allocator_shim {
-namespace {
-
-void* MallocImpl(const AllocatorDispatch*, size_t size, void* context) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(context);
-  return functions.malloc(reinterpret_cast<struct _malloc_zone_t*>(context),
-                          size);
-}
-
-void* CallocImpl(const AllocatorDispatch*,
-                 size_t n,
-                 size_t size,
-                 void* context) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(context);
-  return functions.calloc(reinterpret_cast<struct _malloc_zone_t*>(context), n,
-                          size);
-}
-
-void* MemalignImpl(const AllocatorDispatch*,
-                   size_t alignment,
-                   size_t size,
-                   void* context) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(context);
-  return functions.memalign(reinterpret_cast<struct _malloc_zone_t*>(context),
-                            alignment, size);
-}
-
-void* ReallocImpl(const AllocatorDispatch*,
-                  void* ptr,
-                  size_t size,
-                  void* context) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(context);
-  return functions.realloc(reinterpret_cast<struct _malloc_zone_t*>(context),
-                           ptr, size);
-}
-
-void FreeImpl(const AllocatorDispatch*, void* ptr, void* context) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(context);
-  functions.free(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
-}
-
-size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr, void* context) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(context);
-  return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
-}
-
-bool ClaimedAddressImpl(const AllocatorDispatch*, void* ptr, void* context) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(context);
-  if (functions.claimed_address) {
-    return functions.claimed_address(
-        reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
-  }
-  // If the fast API 'claimed_address' is not implemented in the specified zone,
-  // fall back to 'size' function, which also tells whether the given address
-  // belongs to the zone or not although it'd be slow.
-  return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
-}
-
-unsigned BatchMallocImpl(const AllocatorDispatch* self,
-                         size_t size,
-                         void** results,
-                         unsigned num_requested,
-                         void* context) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(context);
-  return functions.batch_malloc(
-      reinterpret_cast<struct _malloc_zone_t*>(context), size, results,
-      num_requested);
-}
-
-void BatchFreeImpl(const AllocatorDispatch* self,
-                   void** to_be_freed,
-                   unsigned num_to_be_freed,
-                   void* context) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(context);
-  functions.batch_free(reinterpret_cast<struct _malloc_zone_t*>(context),
-                       to_be_freed, num_to_be_freed);
-}
-
-void FreeDefiniteSizeImpl(const AllocatorDispatch* self,
-                          void* ptr,
-                          size_t size,
-                          void* context) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(context);
-  functions.free_definite_size(
-      reinterpret_cast<struct _malloc_zone_t*>(context), ptr, size);
-}
-
-void TryFreeDefaultImpl(const AllocatorDispatch* self,
-                        void* ptr,
-                        void* context) {
-  MallocZoneFunctions& functions = GetFunctionsForZone(context);
-  if (functions.try_free_default) {
-    return functions.try_free_default(
-        reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
-  }
-  allocator_shim::TryFreeDefaultFallbackToFindZoneAndFree(ptr);
-}
-
-}  // namespace
-
-const AllocatorDispatch AllocatorDispatch::default_dispatch = {
-    &MallocImpl,           /* alloc_function */
-    &MallocImpl,           /* alloc_unchecked_function */
-    &CallocImpl,           /* alloc_zero_initialized_function */
-    &MemalignImpl,         /* alloc_aligned_function */
-    &ReallocImpl,          /* realloc_function */
-    &FreeImpl,             /* free_function */
-    &GetSizeEstimateImpl,  /* get_size_estimate_function */
-    &ClaimedAddressImpl,   /* claimed_address_function */
-    &BatchMallocImpl,      /* batch_malloc_function */
-    &BatchFreeImpl,        /* batch_free_function */
-    &FreeDefiniteSizeImpl, /* free_definite_size_function */
-    &TryFreeDefaultImpl,   /* try_free_default_function */
-    nullptr,               /* aligned_malloc_function */
-    nullptr,               /* aligned_realloc_function */
-    nullptr,               /* aligned_free_function */
-    nullptr,               /* next */
-};
-
-}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.cc b/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.cc
deleted file mode 100644
index ee20931..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.cc
+++ /dev/null
@@ -1,856 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
-
-#include <atomic>
-#include <cstddef>
-#include <map>
-#include <string>
-#include <tuple>
-
-#include "base/allocator/partition_allocator/allocation_guard.h"
-#include "base/allocator/partition_allocator/chromecast_buildflags.h"
-#include "base/allocator/partition_allocator/memory_reclaimer.h"
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "base/allocator/partition_allocator/partition_stats.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h"
-#include "base/allocator/partition_allocator/shim/nonscannable_allocator.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-#include <malloc.h>
-#endif
-
-using allocator_shim::AllocatorDispatch;
-
-namespace {
-
-class SimpleScopedSpinLocker {
- public:
-  explicit SimpleScopedSpinLocker(std::atomic<bool>& lock) : lock_(lock) {
-    // Lock. Semantically equivalent to base::Lock::Acquire().
-    bool expected = false;
-    // Weak CAS since we are in a retry loop, relaxed ordering for failure since
-    // in this case we don't imply any ordering.
-    //
-    // This matches partition_allocator/spinning_mutex.h fast path on Linux.
-    while (!lock_.compare_exchange_weak(
-        expected, true, std::memory_order_acquire, std::memory_order_relaxed)) {
-      expected = false;
-    }
-  }
-
-  ~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); }
-
- private:
-  std::atomic<bool>& lock_;
-};
-
-// We can't use a "static local" or a base::LazyInstance, as:
-// - static local variables call into the runtime on Windows, which is not
-//   prepared to handle it, as the first allocation happens during CRT init.
-// - We don't want to depend on base::LazyInstance, which may be converted to
-//   static locals one day.
-//
-// Nevertheless, this provides essentially the same thing.
-template <typename T, typename Constructor>
-class LeakySingleton {
- public:
-  constexpr LeakySingleton() = default;
-
-  PA_ALWAYS_INLINE T* Get() {
-    auto* instance = instance_.load(std::memory_order_acquire);
-    if (PA_LIKELY(instance)) {
-      return instance;
-    }
-
-    return GetSlowPath();
-  }
-
-  // Replaces the instance pointer with a new one.
-  void Replace(T* new_instance) {
-    SimpleScopedSpinLocker scoped_lock{initialization_lock_};
-
-    // Modify under the lock to avoid race between |if (instance)| and
-    // |instance_.store()| in GetSlowPath().
-    instance_.store(new_instance, std::memory_order_release);
-  }
-
- private:
-  T* GetSlowPath();
-
-  std::atomic<T*> instance_;
-  // Before C++20, having an initializer here causes a "variable does not have a
-  // constant initializer" error.  In C++20, omitting it causes a similar error.
-  // Presumably this is due to the C++20 changes to make atomic initialization
-  // (of the other members of this class) sane, so guarding under that
-  // feature-test.
-#if !defined(__cpp_lib_atomic_value_initialization) || \
-    __cpp_lib_atomic_value_initialization < 201911L
-  alignas(T) uint8_t instance_buffer_[sizeof(T)];
-#else
-  alignas(T) uint8_t instance_buffer_[sizeof(T)] = {0};
-#endif
-  std::atomic<bool> initialization_lock_;
-};
-
-template <typename T, typename Constructor>
-T* LeakySingleton<T, Constructor>::GetSlowPath() {
-  // The instance has not been set, the proper way to proceed (correct
-  // double-checked locking) is:
-  //
-  // auto* instance = instance_.load(std::memory_order_acquire);
-  // if (!instance) {
-  //   ScopedLock initialization_lock;
-  //   root = instance_.load(std::memory_order_relaxed);
-  //   if (root)
-  //     return root;
-  //   instance = Create new root;
-  //   instance_.store(instance, std::memory_order_release);
-  //   return instance;
-  // }
-  //
-  // However, we don't want to use a base::Lock here, so instead we use
-  // compare-and-exchange on a lock variable, which provides the same
-  // guarantees.
-  SimpleScopedSpinLocker scoped_lock{initialization_lock_};
-
-  T* instance = instance_.load(std::memory_order_relaxed);
-  // Someone beat us.
-  if (instance) {
-    return instance;
-  }
-
-  instance = Constructor::New(reinterpret_cast<void*>(instance_buffer_));
-  instance_.store(instance, std::memory_order_release);
-
-  return instance;
-}
-
-class MainPartitionConstructor {
- public:
-  static partition_alloc::PartitionRoot* New(void* buffer) {
-    constexpr partition_alloc::PartitionOptions::ThreadCache thread_cache =
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-        // Additional partitions may be created in ConfigurePartitions(). Since
-        // only one partition can have thread cache enabled, postpone the
-        // decision to turn the thread cache on until after that call.
-        // TODO(bartekn): Enable it here by default, once the "split-only" mode
-        // is no longer needed.
-        partition_alloc::PartitionOptions::ThreadCache::kDisabled;
-#else   // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-        // Other tests, such as the ThreadCache tests create a thread cache,
-        // and only one is supported at a time.
-        partition_alloc::PartitionOptions::ThreadCache::kDisabled;
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-    auto* new_root = new (buffer)
-        partition_alloc::PartitionRoot(partition_alloc::PartitionOptions{
-            .aligned_alloc =
-                partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
-            .thread_cache = thread_cache,
-            .star_scan_quarantine =
-                partition_alloc::PartitionOptions::StarScanQuarantine::kAllowed,
-            .backup_ref_ptr =
-                partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
-        });
-
-    return new_root;
-  }
-};
-
-LeakySingleton<partition_alloc::PartitionRoot, MainPartitionConstructor> g_root
-    PA_CONSTINIT = {};
-partition_alloc::PartitionRoot* Allocator() {
-  return g_root.Get();
-}
-
-// Original g_root_ if it was replaced by ConfigurePartitions().
-std::atomic<partition_alloc::PartitionRoot*> g_original_root(nullptr);
-
-std::atomic<bool> g_roots_finalized = false;
-
-class AlignedPartitionConstructor {
- public:
-  static partition_alloc::PartitionRoot* New(void* buffer) {
-    return g_root.Get();
-  }
-};
-
-LeakySingleton<partition_alloc::PartitionRoot, AlignedPartitionConstructor>
-    g_aligned_root PA_CONSTINIT = {};
-
-partition_alloc::PartitionRoot* OriginalAllocator() {
-  return g_original_root.load(std::memory_order_relaxed);
-}
-
-partition_alloc::PartitionRoot* AlignedAllocator() {
-  return g_aligned_root.Get();
-}
-
-bool AllocatorConfigurationFinalized() {
-  return g_roots_finalized.load();
-}
-
-void* AllocateAlignedMemory(size_t alignment, size_t size) {
-  // Memory returned by the regular allocator *always* respects |kAlignment|,
-  // which is a power of two, and any valid alignment is also a power of two. So
-  // we can directly fulfill these requests with the main allocator.
-  //
-  // This has several advantages:
-  // - The thread cache is supported on the main partition
-  // - Reduced fragmentation
-  // - Better coverage for MiraclePtr variants requiring extras
-  //
-  // There are several call sites in Chromium where base::AlignedAlloc is called
-  // with a small alignment. Some may be due to overly-careful code, some are
-  // because the client code doesn't know the required alignment at compile
-  // time.
-  //
-  // Note that all "AlignedFree()" variants (_aligned_free() on Windows for
-  // instance) directly call PartitionFree(), so there is no risk of
-  // mismatch. (see below the default_dispatch definition).
-  if (alignment <= partition_alloc::internal::kAlignment) {
-    // This is mandated by |posix_memalign()| and friends, so should never fire.
-    PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
-    // TODO(bartekn): See if the compiler optimizes branches down the stack on
-    // Mac, where PartitionPageSize() isn't constexpr.
-    return Allocator()->AllocWithFlagsNoHooks(
-        0, size, partition_alloc::PartitionPageSize());
-  }
-
-  return AlignedAllocator()->AlignedAllocWithFlags(
-      partition_alloc::AllocFlags::kNoHooks, alignment, size);
-}
-
-}  // namespace
-
-namespace allocator_shim::internal {
-
-namespace {
-#if BUILDFLAG(IS_APPLE)
-unsigned int g_alloc_flags = 0;
-#else
-constexpr unsigned int g_alloc_flags = 0;
-#endif
-}  // namespace
-
-void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value) {
-#if BUILDFLAG(IS_APPLE)
-  // We generally prefer to always crash rather than returning nullptr for
-  // OOM. However, on some macOS releases, we have to locally allow it due to
-  // weirdness in OS code. See https://crbug.com/654695 for details.
-  //
-  // Apple only since it's not needed elsewhere, and there is a performance
-  // penalty.
-
-  if (value) {
-    g_alloc_flags = 0;
-  } else {
-    g_alloc_flags = partition_alloc::AllocFlags::kReturnNull;
-  }
-#endif
-}
-
-void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
-  partition_alloc::ScopedDisallowAllocations guard{};
-  return Allocator()->AllocWithFlagsNoHooks(
-      g_alloc_flags, size, partition_alloc::PartitionPageSize());
-}
-
-void* PartitionMallocUnchecked(const AllocatorDispatch*,
-                               size_t size,
-                               void* context) {
-  partition_alloc::ScopedDisallowAllocations guard{};
-  return Allocator()->AllocWithFlagsNoHooks(
-      partition_alloc::AllocFlags::kReturnNull | g_alloc_flags, size,
-      partition_alloc::PartitionPageSize());
-}
-
-void* PartitionCalloc(const AllocatorDispatch*,
-                      size_t n,
-                      size_t size,
-                      void* context) {
-  partition_alloc::ScopedDisallowAllocations guard{};
-  const size_t total =
-      partition_alloc::internal::base::CheckMul(n, size).ValueOrDie();
-  return Allocator()->AllocWithFlagsNoHooks(
-      partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total,
-      partition_alloc::PartitionPageSize());
-}
-
-void* PartitionMemalign(const AllocatorDispatch*,
-                        size_t alignment,
-                        size_t size,
-                        void* context) {
-  partition_alloc::ScopedDisallowAllocations guard{};
-  return AllocateAlignedMemory(alignment, size);
-}
-
-void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
-                            size_t size,
-                            size_t alignment,
-                            void* context) {
-  partition_alloc::ScopedDisallowAllocations guard{};
-  return AllocateAlignedMemory(alignment, size);
-}
-
-// aligned_realloc documentation is
-// https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc
-// TODO(tasak): Expand the given memory block to the given size if possible.
-// This realloc always free the original memory block and allocates a new memory
-// block.
-// TODO(tasak): Implement PartitionRoot<thread_safe>::AlignedReallocWithFlags
-// and use it.
-void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
-                              void* address,
-                              size_t size,
-                              size_t alignment,
-                              void* context) {
-  partition_alloc::ScopedDisallowAllocations guard{};
-  void* new_ptr = nullptr;
-  if (size > 0) {
-    new_ptr = AllocateAlignedMemory(alignment, size);
-  } else {
-    // size == 0 and address != null means just "free(address)".
-    if (address) {
-      partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(address);
-    }
-  }
-  // The original memory block (specified by address) is unchanged if ENOMEM.
-  if (!new_ptr) {
-    return nullptr;
-  }
-  // TODO(tasak): Need to compare the new alignment with the address' alignment.
-  // If the two alignments are not the same, need to return nullptr with EINVAL.
-  if (address) {
-    size_t usage = partition_alloc::PartitionRoot::GetUsableSize(address);
-    size_t copy_size = usage > size ? size : usage;
-    memcpy(new_ptr, address, copy_size);
-
-    partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(address);
-  }
-  return new_ptr;
-}
-
-void* PartitionRealloc(const AllocatorDispatch*,
-                       void* address,
-                       size_t size,
-                       void* context) {
-  partition_alloc::ScopedDisallowAllocations guard{};
-#if BUILDFLAG(IS_APPLE)
-  if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
-                      reinterpret_cast<uintptr_t>(address)) &&
-                  address)) {
-    // A memory region allocated by the system allocator is passed in this
-    // function.  Forward the request to `realloc` which supports zone-
-    // dispatching so that it appropriately selects the right zone.
-    return realloc(address, size);
-  }
-#endif  // BUILDFLAG(IS_APPLE)
-
-  return Allocator()->ReallocWithFlags(
-      partition_alloc::AllocFlags::kNoHooks | g_alloc_flags, address, size, "");
-}
-
-#if BUILDFLAG(PA_IS_CAST_ANDROID)
-extern "C" {
-void __real_free(void*);
-}  // extern "C"
-#endif  // BUILDFLAG(PA_IS_CAST_ANDROID)
-
-void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
-  partition_alloc::ScopedDisallowAllocations guard{};
-#if BUILDFLAG(IS_APPLE)
-  // TODO(bartekn): Add MTE unmasking here (and below).
-  if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
-                      reinterpret_cast<uintptr_t>(object)) &&
-                  object)) {
-    // A memory region allocated by the system allocator is passed in this
-    // function.  Forward the request to `free` which supports zone-
-    // dispatching so that it appropriately selects the right zone.
-    return free(object);
-  }
-#endif  // BUILDFLAG(IS_APPLE)
-
-  // On Android Chromecast devices, there is at least one case where a system
-  // malloc() pointer can be passed to PartitionAlloc's free(). If we don't own
-  // the pointer, pass it along. This should not have a runtime cost vs regular
-  // Android, since on Android we have a PA_CHECK() rather than the branch here.
-#if BUILDFLAG(PA_IS_CAST_ANDROID)
-  if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
-                      reinterpret_cast<uintptr_t>(object)) &&
-                  object)) {
-    // A memory region allocated by the system allocator is passed in this
-    // function.  Forward the request to `free()`, which is `__real_free()`
-    // here.
-    return __real_free(object);
-  }
-#endif  // BUILDFLAG(PA_IS_CAST_ANDROID)
-
-  partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(object);
-}
-
-#if BUILDFLAG(IS_APPLE)
-// Normal free() path on Apple OSes:
-// 1. size = GetSizeEstimate(ptr);
-// 2. if (size) FreeDefiniteSize(ptr, size)
-//
-// So we don't need to re-check that the pointer is owned in Free(), and we
-// can use the size.
-void PartitionFreeDefiniteSize(const AllocatorDispatch*,
-                               void* address,
-                               size_t size,
-                               void* context) {
-  partition_alloc::ScopedDisallowAllocations guard{};
-  // TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
-  // still useful though, as we avoid double-checking that the address is owned.
-  partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(address);
-}
-#endif  // BUILDFLAG(IS_APPLE)
-
-size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
-                                void* address,
-                                void* context) {
-  // This is used to implement malloc_usable_size(3). Per its man page, "if ptr
-  // is NULL, 0 is returned".
-  if (!address) {
-    return 0;
-  }
-
-#if BUILDFLAG(IS_APPLE)
-  if (!partition_alloc::IsManagedByPartitionAlloc(
-          reinterpret_cast<uintptr_t>(address))) {
-    // The object pointed to by `address` is not allocated by the
-    // PartitionAlloc.  The return value `0` means that the pointer does not
-    // belong to this malloc zone.
-    return 0;
-  }
-#endif  // BUILDFLAG(IS_APPLE)
-
-  // TODO(lizeb): Returns incorrect values for aligned allocations.
-  const size_t size =
-      partition_alloc::PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(
-          address);
-#if BUILDFLAG(IS_APPLE)
-  // The object pointed to by `address` is allocated by the PartitionAlloc.
-  // So, this function must not return zero so that the malloc zone dispatcher
-  // finds the appropriate malloc zone.
-  PA_DCHECK(size);
-#endif  // BUILDFLAG(IS_APPLE)
-  return size;
-}
-
-#if BUILDFLAG(IS_APPLE)
-bool PartitionClaimedAddress(const AllocatorDispatch*,
-                             void* address,
-                             void* context) {
-  return partition_alloc::IsManagedByPartitionAlloc(
-      reinterpret_cast<uintptr_t>(address));
-}
-#endif  // BUILDFLAG(IS_APPLE)
-
-unsigned PartitionBatchMalloc(const AllocatorDispatch*,
-                              size_t size,
-                              void** results,
-                              unsigned num_requested,
-                              void* context) {
-  // No real batching: we could only acquire the lock once for instance, keep it
-  // simple for now.
-  for (unsigned i = 0; i < num_requested; i++) {
-    // No need to check the results, we crash if it fails.
-    results[i] = PartitionMalloc(nullptr, size, nullptr);
-  }
-
-  // Either all succeeded, or we crashed.
-  return num_requested;
-}
-
-void PartitionBatchFree(const AllocatorDispatch*,
-                        void** to_be_freed,
-                        unsigned num_to_be_freed,
-                        void* context) {
-  // No real batching: we could only acquire the lock once for instance, keep it
-  // simple for now.
-  for (unsigned i = 0; i < num_to_be_freed; i++) {
-    PartitionFree(nullptr, to_be_freed[i], nullptr);
-  }
-}
-
-#if BUILDFLAG(IS_APPLE)
-void PartitionTryFreeDefault(const AllocatorDispatch*,
-                             void* address,
-                             void* context) {
-  partition_alloc::ScopedDisallowAllocations guard{};
-
-  if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
-          reinterpret_cast<uintptr_t>(address)))) {
-    // The object pointed to by `address` is not allocated by the
-    // PartitionAlloc. Call find_zone_and_free.
-    return allocator_shim::TryFreeDefaultFallbackToFindZoneAndFree(address);
-  }
-
-  partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(address);
-}
-#endif  // BUILDFLAG(IS_APPLE)
-
-// static
-bool PartitionAllocMalloc::AllocatorConfigurationFinalized() {
-  return ::AllocatorConfigurationFinalized();
-}
-
-// static
-partition_alloc::PartitionRoot* PartitionAllocMalloc::Allocator() {
-  return ::Allocator();
-}
-
-// static
-partition_alloc::PartitionRoot* PartitionAllocMalloc::OriginalAllocator() {
-  return ::OriginalAllocator();
-}
-
-// static
-partition_alloc::PartitionRoot* PartitionAllocMalloc::AlignedAllocator() {
-  return ::AlignedAllocator();
-}
-
-}  // namespace allocator_shim::internal
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-namespace allocator_shim {
-
-void EnablePartitionAllocMemoryReclaimer() {
-  // Unlike other partitions, Allocator() does not register its PartitionRoot to
-  // the memory reclaimer, because doing so may allocate memory. Thus, the
-  // registration to the memory reclaimer has to be done some time later, when
-  // the main root is fully configured.
-  ::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
-      Allocator());
-
-  // There is only one PartitionAlloc-Everywhere partition at the moment. Any
-  // additional partitions will be created in ConfigurePartitions() and
-  // registered for memory reclaimer there.
-  PA_DCHECK(!AllocatorConfigurationFinalized());
-  PA_DCHECK(OriginalAllocator() == nullptr);
-  PA_DCHECK(AlignedAllocator() == Allocator());
-}
-
-void ConfigurePartitions(
-    EnableBrp enable_brp,
-    EnableMemoryTagging enable_memory_tagging,
-    partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode,
-    SplitMainPartition split_main_partition,
-    UseDedicatedAlignedPartition use_dedicated_aligned_partition,
-    size_t ref_count_size,
-    BucketDistribution distribution) {
-  // BRP cannot be enabled without splitting the main partition. Furthermore, in
-  // the "before allocation" mode, it can't be enabled without further splitting
-  // out the aligned partition.
-  PA_CHECK(!enable_brp || split_main_partition);
-#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-  PA_CHECK(!enable_brp || use_dedicated_aligned_partition);
-#endif
-  // Can't split out the aligned partition, without splitting the main one.
-  PA_CHECK(!use_dedicated_aligned_partition || split_main_partition);
-
-  // Calling Get() is actually important, even if the return values weren't
-  // used, because it has a side effect of initializing the variables, if they
-  // weren't already.
-  auto* current_root = g_root.Get();
-  auto* current_aligned_root = g_aligned_root.Get();
-  PA_DCHECK(current_root == current_aligned_root);
-
-  if (!split_main_partition) {
-    switch (distribution) {
-      case BucketDistribution::kNeutral:
-        // We start in the 'default' case.
-        break;
-      case BucketDistribution::kDenser:
-        current_root->SwitchToDenserBucketDistribution();
-        break;
-    }
-    PA_DCHECK(!enable_brp);
-    PA_DCHECK(!use_dedicated_aligned_partition);
-    PA_DCHECK(!current_root->settings.with_thread_cache);
-    PA_CHECK(!g_roots_finalized.exchange(true));  // Ensure configured once.
-    return;
-  }
-
-  // We've been bitten before by using a static local when initializing a
-  // partition. For synchronization, static local variables call into the
-  // runtime on Windows, which may not be ready to handle it, if the path is
-  // invoked on an allocation during the runtime initialization.
-  // ConfigurePartitions() is invoked explicitly from Chromium code, so this
-  // shouldn't bite us here. Mentioning just in case we move this code earlier.
-  static partition_alloc::internal::base::NoDestructor<
-      partition_alloc::PartitionAllocator>
-      new_main_allocator(partition_alloc::PartitionOptions{
-          .aligned_alloc =
-              !use_dedicated_aligned_partition
-                  ? partition_alloc::PartitionOptions::AlignedAlloc::kAllowed
-                  : partition_alloc::PartitionOptions::AlignedAlloc::
-                        kDisallowed,
-          .thread_cache =
-              partition_alloc::PartitionOptions::ThreadCache::kDisabled,
-          .star_scan_quarantine =
-              partition_alloc::PartitionOptions::StarScanQuarantine::kAllowed,
-          .backup_ref_ptr =
-              enable_brp
-                  ? partition_alloc::PartitionOptions::BackupRefPtr::kEnabled
-                  : partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
-          .ref_count_size = ref_count_size,
-          .memory_tagging = {
-              .enabled = enable_memory_tagging
-                             ? partition_alloc::PartitionOptions::
-                                   MemoryTagging::kEnabled
-                             : partition_alloc::PartitionOptions::
-                                   MemoryTagging::kDisabled,
-              .reporting_mode = memory_tagging_reporting_mode}});
-  partition_alloc::PartitionRoot* new_root = new_main_allocator->root();
-
-  partition_alloc::PartitionRoot* new_aligned_root;
-  if (use_dedicated_aligned_partition) {
-    // TODO(bartekn): Use the original root instead of creating a new one. It'd
-    // result in one less partition, but come at a cost of commingling types.
-    static partition_alloc::internal::base::NoDestructor<
-        partition_alloc::PartitionAllocator>
-        new_aligned_allocator(partition_alloc::PartitionOptions{
-            .aligned_alloc =
-                partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
-            .thread_cache =
-                partition_alloc::PartitionOptions::ThreadCache::kDisabled,
-            .star_scan_quarantine =
-                partition_alloc::PartitionOptions::StarScanQuarantine::kAllowed,
-            .backup_ref_ptr =
-                partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
-        });
-    new_aligned_root = new_aligned_allocator->root();
-  } else {
-    // The new main root can also support AlignedAlloc.
-    new_aligned_root = new_root;
-  }
-
-  // Now switch traffic to the new partitions.
-  g_original_root = current_root;
-  g_aligned_root.Replace(new_aligned_root);
-  g_root.Replace(new_root);
-
-  // No need for g_original_aligned_root, because in cases where g_aligned_root
-  // is replaced, it must've been g_original_root.
-  PA_CHECK(current_aligned_root == g_original_root);
-
-  // Purge memory, now that the traffic to the original partition is cut off.
-  current_root->PurgeMemory(
-      partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |
-      partition_alloc::PurgeFlags::kDiscardUnusedSystemPages);
-
-  switch (distribution) {
-    case BucketDistribution::kNeutral:
-      // We start in the 'default' case.
-      break;
-    case BucketDistribution::kDenser:
-      new_root->SwitchToDenserBucketDistribution();
-      if (new_aligned_root != new_root) {
-        new_aligned_root->SwitchToDenserBucketDistribution();
-      }
-      break;
-  }
-
-  PA_CHECK(!g_roots_finalized.exchange(true));  // Ensure configured once.
-}
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void ConfigurePartitions(
-    EnableBrp enable_brp,
-    EnableMemoryTagging enable_memory_tagging,
-    SplitMainPartition split_main_partition,
-    UseDedicatedAlignedPartition use_dedicated_aligned_partition,
-    size_t ref_count_size,
-    BucketDistribution distribution) {
-  // Since the only user of this function is a test function, we use synchronous
-  // testing mode.
-  const partition_alloc::TagViolationReportingMode
-      memory_tagging_reporting_mode =
-          enable_memory_tagging
-              ? partition_alloc::TagViolationReportingMode::kSynchronous
-              : partition_alloc::TagViolationReportingMode::kDisabled;
-
-  ConfigurePartitions(enable_brp, enable_memory_tagging,
-                      memory_tagging_reporting_mode, split_main_partition,
-                      use_dedicated_aligned_partition, ref_count_size,
-                      distribution);
-}
-
-// No synchronization provided: `PartitionRoot.flags` is only written
-// to in `PartitionRoot::Init()`.
-uint32_t GetMainPartitionRootExtrasSize() {
-#if PA_CONFIG(EXTRAS_REQUIRED)
-  return g_root.Get()->settings.extras_size;
-#else
-  return 0;
-#endif  // PA_CONFIG(EXTRAS_REQUIRED)
-}
-
-#if BUILDFLAG(USE_STARSCAN)
-void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
-  partition_alloc::internal::PCScan::Initialize(config);
-
-  PA_CHECK(AllocatorConfigurationFinalized());
-  partition_alloc::internal::PCScan::RegisterScannableRoot(Allocator());
-  if (OriginalAllocator() != nullptr) {
-    partition_alloc::internal::PCScan::RegisterScannableRoot(
-        OriginalAllocator());
-  }
-  if (Allocator() != AlignedAllocator()) {
-    partition_alloc::internal::PCScan::RegisterScannableRoot(
-        AlignedAllocator());
-  }
-
-  allocator_shim::NonScannableAllocator::Instance().NotifyPCScanEnabled();
-  allocator_shim::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
-}
-#endif  // BUILDFLAG(USE_STARSCAN)
-}  // namespace allocator_shim
-
-const AllocatorDispatch AllocatorDispatch::default_dispatch = {
-    &allocator_shim::internal::PartitionMalloc,  // alloc_function
-    &allocator_shim::internal::
-        PartitionMallocUnchecked,  // alloc_unchecked_function
-    &allocator_shim::internal::
-        PartitionCalloc,  // alloc_zero_initialized_function
-    &allocator_shim::internal::PartitionMemalign,  // alloc_aligned_function
-    &allocator_shim::internal::PartitionRealloc,   // realloc_function
-    &allocator_shim::internal::PartitionFree,      // free_function
-    &allocator_shim::internal::
-        PartitionGetSizeEstimate,  // get_size_estimate_function
-#if BUILDFLAG(IS_APPLE)
-    &allocator_shim::internal::PartitionClaimedAddress,  // claimed_address
-#else
-    nullptr,  // claimed_address
-#endif
-    &allocator_shim::internal::PartitionBatchMalloc,  // batch_malloc_function
-    &allocator_shim::internal::PartitionBatchFree,    // batch_free_function
-#if BUILDFLAG(IS_APPLE)
-    // On Apple OSes, free_definite_size() is always called from free(), since
-    // get_size_estimate() is used to determine whether an allocation belongs to
-    // the current zone. It makes sense to optimize for it.
-    &allocator_shim::internal::PartitionFreeDefiniteSize,
-    // On Apple OSes, try_free_default() is sometimes called as an optimization
-    // of free().
-    &allocator_shim::internal::PartitionTryFreeDefault,
-#else
-    nullptr,  // free_definite_size_function
-    nullptr,  // try_free_default_function
-#endif
-    &allocator_shim::internal::
-        PartitionAlignedAlloc,  // aligned_malloc_function
-    &allocator_shim::internal::
-        PartitionAlignedRealloc,               // aligned_realloc_function
-    &allocator_shim::internal::PartitionFree,  // aligned_free_function
-    nullptr,                                   // next
-};
-
-// Intercept diagnostics symbols as well, even though they are not part of the
-// unified shim layer.
-//
-// TODO(lizeb): Implement the ones that doable.
-
-extern "C" {
-
-#if !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
-
-SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
-
-SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
-  return 0;
-}
-
-#endif  // !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
-
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
-  partition_alloc::SimplePartitionStatsDumper allocator_dumper;
-  Allocator()->DumpStats("malloc", true, &allocator_dumper);
-  // TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
-
-  partition_alloc::SimplePartitionStatsDumper aligned_allocator_dumper;
-  if (AlignedAllocator() != Allocator()) {
-    AlignedAllocator()->DumpStats("posix_memalign", true,
-                                  &aligned_allocator_dumper);
-  }
-
-  // Dump stats for nonscannable and nonquarantinable allocators.
-  auto& nonscannable_allocator =
-      allocator_shim::NonScannableAllocator::Instance();
-  partition_alloc::SimplePartitionStatsDumper nonscannable_allocator_dumper;
-  if (auto* nonscannable_root = nonscannable_allocator.root()) {
-    nonscannable_root->DumpStats("malloc", true,
-                                 &nonscannable_allocator_dumper);
-  }
-  auto& nonquarantinable_allocator =
-      allocator_shim::NonQuarantinableAllocator::Instance();
-  partition_alloc::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
-  if (auto* nonquarantinable_root = nonquarantinable_allocator.root()) {
-    nonquarantinable_root->DumpStats("malloc", true,
-                                     &nonquarantinable_allocator_dumper);
-  }
-
-  struct mallinfo info = {0};
-  info.arena = 0;  // Memory *not* allocated with mmap().
-
-  // Memory allocated with mmap(), aka virtual size.
-  info.hblks =
-      partition_alloc::internal::base::checked_cast<decltype(info.hblks)>(
-          allocator_dumper.stats().total_mmapped_bytes +
-          aligned_allocator_dumper.stats().total_mmapped_bytes +
-          nonscannable_allocator_dumper.stats().total_mmapped_bytes +
-          nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
-  // Resident bytes.
-  info.hblkhd =
-      partition_alloc::internal::base::checked_cast<decltype(info.hblkhd)>(
-          allocator_dumper.stats().total_resident_bytes +
-          aligned_allocator_dumper.stats().total_resident_bytes +
-          nonscannable_allocator_dumper.stats().total_resident_bytes +
-          nonquarantinable_allocator_dumper.stats().total_resident_bytes);
-  // Allocated bytes.
-  info.uordblks =
-      partition_alloc::internal::base::checked_cast<decltype(info.uordblks)>(
-          allocator_dumper.stats().total_active_bytes +
-          aligned_allocator_dumper.stats().total_active_bytes +
-          nonscannable_allocator_dumper.stats().total_active_bytes +
-          nonquarantinable_allocator_dumper.stats().total_active_bytes);
-
-  return info;
-}
-#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-
-}  // extern "C"
-
-#if BUILDFLAG(IS_APPLE)
-
-namespace allocator_shim {
-
-void InitializeDefaultAllocatorPartitionRoot() {
-  // On OS_APPLE, the initialization of PartitionRoot uses memory allocations
-  // internally, e.g. __builtin_available, and it's not easy to avoid it.
-  // Thus, we initialize the PartitionRoot with using the system default
-  // allocator before we intercept the system default allocator.
-  std::ignore = Allocator();
-}
-
-}  // namespace allocator_shim
-
-#endif  // BUILDFLAG(IS_APPLE)
-
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h b/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h
deleted file mode 100644
index c0d9e79..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
-
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
-
-namespace allocator_shim::internal {
-
-void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value);
-
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocMalloc {
- public:
-  // Returns true if ConfigurePartitions() has completed, meaning that the
-  // allocators are effectively set in stone.
-  static bool AllocatorConfigurationFinalized();
-
-  static partition_alloc::PartitionRoot* Allocator();
-  // May return |nullptr|, will never return the same pointer as |Allocator()|.
-  static partition_alloc::PartitionRoot* OriginalAllocator();
-  // May return the same pointer as |Allocator()|.
-  static partition_alloc::PartitionRoot* AlignedAllocator();
-};
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* PartitionMallocUnchecked(const AllocatorDispatch*,
-                               size_t size,
-                               void* context);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* PartitionCalloc(const AllocatorDispatch*,
-                      size_t n,
-                      size_t size,
-                      void* context);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* PartitionMemalign(const AllocatorDispatch*,
-                        size_t alignment,
-                        size_t size,
-                        void* context);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
-                            size_t size,
-                            size_t alignment,
-                            void* context);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
-                              void* address,
-                              size_t size,
-                              size_t alignment,
-                              void* context);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* PartitionRealloc(const AllocatorDispatch*,
-                       void* address,
-                       size_t size,
-                       void* context);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void PartitionFree(const AllocatorDispatch*, void* object, void* context);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
-                                void* address,
-                                void* context);
-
-}  // namespace allocator_shim::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc b/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc
deleted file mode 100644
index bf00055..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
-
-#include <cstdlib>
-#include <cstring>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/memory/page_size.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-#include <malloc.h>
-#endif
-
-#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && BUILDFLAG(USE_PARTITION_ALLOC)
-namespace allocator_shim::internal {
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-// Platforms on which we override weak libc symbols.
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-
-PA_NOINLINE void FreeForTest(void* data) {
-  free(data);
-}
-
-TEST(PartitionAllocAsMalloc, Mallinfo) {
-  // mallinfo was deprecated in glibc 2.33. The Chrome OS device sysroot has
-  // a new-enough glibc, but the Linux one doesn't yet, so we can't switch to
-  // the replacement mallinfo2 yet.
-  // Once we update the Linux sysroot to be new enough, this warning will
-  // start firing on Linux too. At that point, s/mallinfo/mallinfo2/ in this
-  // file and remove the pragma here and and the end of this function.
-#if BUILDFLAG(IS_CHROMEOS)
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
-#endif
-  constexpr int kLargeAllocSize = 10 * 1024 * 1024;
-  struct mallinfo before = mallinfo();
-  void* data = malloc(1000);
-  ASSERT_TRUE(data);
-  void* aligned_data;
-  ASSERT_EQ(0, posix_memalign(&aligned_data, 1024, 1000));
-  ASSERT_TRUE(aligned_data);
-  void* direct_mapped_data = malloc(kLargeAllocSize);
-  ASSERT_TRUE(direct_mapped_data);
-  struct mallinfo after_alloc = mallinfo();
-
-  // Something is reported.
-  EXPECT_GT(after_alloc.hblks, 0);
-  EXPECT_GT(after_alloc.hblkhd, 0);
-  EXPECT_GT(after_alloc.uordblks, 0);
-
-  EXPECT_GT(after_alloc.hblks, kLargeAllocSize);
-
-  // malloc() can reuse memory, so sizes are not necessarily changing, which
-  // would mean that we need EXPECT_G*E*() rather than EXPECT_GT().
-  //
-  // However since we allocate direct-mapped memory, this increases the total.
-  EXPECT_GT(after_alloc.hblks, before.hblks);
-  EXPECT_GT(after_alloc.hblkhd, before.hblkhd);
-  EXPECT_GT(after_alloc.uordblks, before.uordblks);
-
-  // a simple malloc() / free() pair can be discarded by the compiler (and is),
-  // making the test fail. It is sufficient to make |FreeForTest()| a
-  // PA_NOINLINE function for the call to not be eliminated, but this is
-  // required.
-  FreeForTest(data);
-  FreeForTest(aligned_data);
-  FreeForTest(direct_mapped_data);
-  struct mallinfo after_free = mallinfo();
-
-  EXPECT_LT(after_free.hblks, after_alloc.hblks);
-  EXPECT_LT(after_free.hblkhd, after_alloc.hblkhd);
-  EXPECT_LT(after_free.uordblks, after_alloc.uordblks);
-#if BUILDFLAG(IS_CHROMEOS)
-#pragma clang diagnostic pop
-#endif
-}
-
-#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-// Note: the tests below are quite simple, they are used as simple smoke tests
-// for PartitionAlloc-Everywhere. Most of these directly dispatch to
-// PartitionAlloc, which has much more extensive tests.
-TEST(PartitionAllocAsMalloc, Simple) {
-  void* data = PartitionMalloc(nullptr, 10, nullptr);
-  EXPECT_TRUE(data);
-  PartitionFree(nullptr, data, nullptr);
-}
-
-TEST(PartitionAllocAsMalloc, MallocUnchecked) {
-  void* data = PartitionMallocUnchecked(nullptr, 10, nullptr);
-  EXPECT_TRUE(data);
-  PartitionFree(nullptr, data, nullptr);
-
-  void* too_large = PartitionMallocUnchecked(nullptr, 4e9, nullptr);
-  EXPECT_FALSE(too_large);  // No crash.
-}
-
-TEST(PartitionAllocAsMalloc, Calloc) {
-  constexpr size_t alloc_size = 100;
-  void* data = PartitionCalloc(nullptr, 1, alloc_size, nullptr);
-  EXPECT_TRUE(data);
-
-  char* zeroes[alloc_size];
-  memset(zeroes, 0, alloc_size);
-
-  EXPECT_EQ(0, memcmp(zeroes, data, alloc_size));
-  PartitionFree(nullptr, data, nullptr);
-}
-
-TEST(PartitionAllocAsMalloc, Memalign) {
-  constexpr size_t alloc_size = 100;
-  constexpr size_t alignment = 1024;
-  void* data = PartitionMemalign(nullptr, alignment, alloc_size, nullptr);
-  EXPECT_TRUE(data);
-  EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(data) % alignment);
-  PartitionFree(nullptr, data, nullptr);
-}
-
-TEST(PartitionAllocAsMalloc, AlignedAlloc) {
-  for (size_t alloc_size : {100, 100000, 10000000}) {
-    for (size_t alignment = 1;
-         alignment <= partition_alloc::kMaxSupportedAlignment;
-         alignment <<= 1) {
-      void* data =
-          PartitionAlignedAlloc(nullptr, alloc_size, alignment, nullptr);
-      EXPECT_TRUE(data);
-      EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(data) % alignment);
-      PartitionFree(nullptr, data, nullptr);
-    }
-  }
-}
-
-TEST(PartitionAllocAsMalloc, AlignedRealloc) {
-  for (size_t alloc_size : {100, 100000, 10000000}) {
-    for (size_t alignment = 1;
-         alignment <= partition_alloc::kMaxSupportedAlignment;
-         alignment <<= 1) {
-      void* data =
-          PartitionAlignedAlloc(nullptr, alloc_size, alignment, nullptr);
-      EXPECT_TRUE(data);
-
-      void* data2 = PartitionAlignedRealloc(nullptr, data, alloc_size,
-                                            alignment, nullptr);
-      EXPECT_TRUE(data2);
-
-      // Aligned realloc always relocates.
-      EXPECT_NE(reinterpret_cast<uintptr_t>(data),
-                reinterpret_cast<uintptr_t>(data2));
-      PartitionFree(nullptr, data2, nullptr);
-    }
-  }
-}
-
-TEST(PartitionAllocAsMalloc, Realloc) {
-  constexpr size_t alloc_size = 100;
-  void* data = PartitionMalloc(nullptr, alloc_size, nullptr);
-  EXPECT_TRUE(data);
-  void* data2 = PartitionMalloc(nullptr, 2 * alloc_size, nullptr);
-  EXPECT_TRUE(data2);
-  EXPECT_NE(data2, data);
-  PartitionFree(nullptr, data2, nullptr);
-}
-
-// crbug.com/1141752
-TEST(PartitionAllocAsMalloc, Alignment) {
-  EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(PartitionAllocMalloc::Allocator()) %
-                    alignof(partition_alloc::PartitionRoot));
-  // This works fine even if nullptr is returned.
-  EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
-                    PartitionAllocMalloc::OriginalAllocator()) %
-                    alignof(partition_alloc::PartitionRoot));
-  EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
-                    PartitionAllocMalloc::AlignedAllocator()) %
-                    alignof(partition_alloc::PartitionRoot));
-}
-
-// crbug.com/1297945
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_APPLE)
-TEST(PartitionAllocAsMalloc, DisableCrashOnOom) {
-  PartitionAllocSetCallNewHandlerOnMallocFailure(false);
-  // Smaller than the max size to avoid overflow checks with padding.
-  void* ptr =
-      PartitionMalloc(nullptr,
-                      std::numeric_limits<size_t>::max() -
-                          10 * partition_alloc::internal::base::GetPageSize(),
-                      nullptr);
-  // Should not crash.
-  EXPECT_FALSE(ptr);
-  PartitionAllocSetCallNewHandlerOnMallocFailure(true);
-}
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_APPLE)
-
-}  // namespace allocator_shim::internal
-#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) &&
-        // BUILDFLAG(USE_PARTITION_ALLOC)
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_winheap.cc b/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_winheap.cc
deleted file mode 100644
index 23d498e..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_winheap.cc
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
-
-#include <ostream>
-
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/shim/winheap_stubs_win.h"
-
-namespace {
-
-using allocator_shim::AllocatorDispatch;
-
-void* DefaultWinHeapMallocImpl(const AllocatorDispatch*,
-                               size_t size,
-                               void* context) {
-  return allocator_shim::WinHeapMalloc(size);
-}
-
-void* DefaultWinHeapCallocImpl(const AllocatorDispatch* self,
-                               size_t n,
-                               size_t elem_size,
-                               void* context) {
-  // Overflow check.
-  const size_t size = n * elem_size;
-  if (elem_size != 0 && size / elem_size != n) {
-    return nullptr;
-  }
-
-  void* result = DefaultWinHeapMallocImpl(self, size, context);
-  if (result) {
-    memset(result, 0, size);
-  }
-  return result;
-}
-
-void* DefaultWinHeapMemalignImpl(const AllocatorDispatch* self,
-                                 size_t alignment,
-                                 size_t size,
-                                 void* context) {
-  PA_CHECK(false) << "The windows heap does not support memalign.";
-  return nullptr;
-}
-
-void* DefaultWinHeapReallocImpl(const AllocatorDispatch* self,
-                                void* address,
-                                size_t size,
-                                void* context) {
-  return allocator_shim::WinHeapRealloc(address, size);
-}
-
-void DefaultWinHeapFreeImpl(const AllocatorDispatch*,
-                            void* address,
-                            void* context) {
-  allocator_shim::WinHeapFree(address);
-}
-
-size_t DefaultWinHeapGetSizeEstimateImpl(const AllocatorDispatch*,
-                                         void* address,
-                                         void* context) {
-  return allocator_shim::WinHeapGetSizeEstimate(address);
-}
-
-void* DefaultWinHeapAlignedMallocImpl(const AllocatorDispatch*,
-                                      size_t size,
-                                      size_t alignment,
-                                      void* context) {
-  return allocator_shim::WinHeapAlignedMalloc(size, alignment);
-}
-
-void* DefaultWinHeapAlignedReallocImpl(const AllocatorDispatch*,
-                                       void* ptr,
-                                       size_t size,
-                                       size_t alignment,
-                                       void* context) {
-  return allocator_shim::WinHeapAlignedRealloc(ptr, size, alignment);
-}
-
-void DefaultWinHeapAlignedFreeImpl(const AllocatorDispatch*,
-                                   void* ptr,
-                                   void* context) {
-  allocator_shim::WinHeapAlignedFree(ptr);
-}
-
-}  // namespace
-
-// Guarantee that default_dispatch is compile-time initialized to avoid using
-// it before initialization (allocations before main in release builds with
-// optimizations disabled).
-constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
-    &DefaultWinHeapMallocImpl,
-    &DefaultWinHeapMallocImpl, /* alloc_unchecked_function */
-    &DefaultWinHeapCallocImpl,
-    &DefaultWinHeapMemalignImpl,
-    &DefaultWinHeapReallocImpl,
-    &DefaultWinHeapFreeImpl,
-    &DefaultWinHeapGetSizeEstimateImpl,
-    nullptr, /* claimed_address */
-    nullptr, /* batch_malloc_function */
-    nullptr, /* batch_free_function */
-    nullptr, /* free_definite_size_function */
-    nullptr, /* try_free_default_function */
-    &DefaultWinHeapAlignedMallocImpl,
-    &DefaultWinHeapAlignedReallocImpl,
-    &DefaultWinHeapAlignedFreeImpl,
-    nullptr, /* next */
-};
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_internals.h b/base/allocator/partition_allocator/shim/allocator_shim_internals.h
deleted file mode 100644
index 8bddea7..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_internals.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_INTERNALS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_INTERNALS_H_
-
-#include "build/build_config.h"
-
-#if defined(__GNUC__)
-
-#if BUILDFLAG(IS_POSIX)
-#include <sys/cdefs.h>  // for __THROW
-#endif
-
-#ifndef __THROW   // Not a glibc system
-#ifdef _NOEXCEPT  // LLVM libc++ uses noexcept instead
-#define __THROW _NOEXCEPT
-#else
-#define __THROW
-#endif  // !_NOEXCEPT
-#endif
-
-// Shim layer symbols need to be ALWAYS exported, regardless of component build.
-//
-// If an exported symbol is linked into a DSO, it may be preempted by a
-// definition in the main executable. If this happens to an allocator symbol, it
-// will mean that the DSO will use the main executable's allocator. This is
-// normally relatively harmless -- regular allocations should all use the same
-// allocator, but if the DSO tries to hook the allocator it will not see any
-// allocations.
-//
-// However, if LLVM LTO is enabled, the compiler may inline the shim layer
-// symbols into callers. The end result is that allocator calls in DSOs may use
-// either the main executable's allocator or the DSO's allocator, depending on
-// whether the call was inlined. This is arguably a bug in LLVM caused by its
-// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
-// To work around the bug we use noinline to prevent the symbols from being
-// inlined.
-//
-// In the long run we probably want to avoid linking the allocator bits into
-// DSOs altogether. This will save a little space and stop giving DSOs the false
-// impression that they can hook the allocator.
-#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
-
-#elif BUILDFLAG(IS_WIN)  // __GNUC__
-
-#define __THROW
-#define SHIM_ALWAYS_EXPORT __declspec(noinline)
-
-#endif  // __GNUC__
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_INTERNALS_H_
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_override_cpp_symbols.h b/base/allocator/partition_allocator/shim/allocator_shim_override_cpp_symbols.h
deleted file mode 100644
index 33bfeda..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_override_cpp_symbols.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
-#error This header is meant to be included only once by allocator_shim.cc
-#endif
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
-
-// Preempt the default new/delete C++ symbols so they call the shim entry
-// points. This file is strongly inspired by tcmalloc's
-// libc_override_redefine.h.
-
-#include <new>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h"
-#include "build/build_config.h"
-
-#if !BUILDFLAG(IS_APPLE)
-#define SHIM_CPP_SYMBOLS_EXPORT SHIM_ALWAYS_EXPORT
-#else
-// On Apple OSes, prefer not exporting these symbols (as this reverts to the
-// default behavior, they are still exported in e.g. component builds). This is
-// partly due to intentional limits on exported symbols in the main library, but
-// it is also needless, since no library used on macOS imports these.
-//
-// TODO(lizeb): It may not be necessary anywhere to export these.
-#define SHIM_CPP_SYMBOLS_EXPORT PA_NOINLINE
-#endif
-
-SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size) {
-  return ShimCppNew(size);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p) __THROW {
-  ShimCppDelete(p);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size) {
-  return ShimCppNew(size);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p) __THROW {
-  ShimCppDelete(p);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size,
-                                           const std::nothrow_t&) __THROW {
-  return ShimCppNewNoThrow(size);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size,
-                                             const std::nothrow_t&) __THROW {
-  return ShimCppNewNoThrow(size);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
-                                             const std::nothrow_t&) __THROW {
-  ShimCppDelete(p);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
-                                               const std::nothrow_t&) __THROW {
-  ShimCppDelete(p);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p, size_t) __THROW {
-  ShimCppDelete(p);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p, size_t) __THROW {
-  ShimCppDelete(p);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void* operator new(std::size_t size,
-                                           std::align_val_t alignment) {
-  return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void* operator new(std::size_t size,
-                                           std::align_val_t alignment,
-                                           const std::nothrow_t&) __THROW {
-  return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
-                                             std::align_val_t) __THROW {
-  ShimCppDelete(p);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
-                                             std::size_t size,
-                                             std::align_val_t) __THROW {
-  ShimCppDelete(p);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
-                                             std::align_val_t,
-                                             const std::nothrow_t&) __THROW {
-  ShimCppDelete(p);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void* operator new[](std::size_t size,
-                                             std::align_val_t alignment) {
-  return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void* operator new[](std::size_t size,
-                                             std::align_val_t alignment,
-                                             const std::nothrow_t&) __THROW {
-  return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
-                                               std::align_val_t) __THROW {
-  ShimCppDelete(p);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
-                                               std::size_t size,
-                                               std::align_val_t) __THROW {
-  ShimCppDelete(p);
-}
-
-SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
-                                               std::align_val_t,
-                                               const std::nothrow_t&) __THROW {
-  ShimCppDelete(p);
-}
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_override_glibc_weak_symbols.h b/base/allocator/partition_allocator/shim/allocator_shim_override_glibc_weak_symbols.h
deleted file mode 100644
index 257a911..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_override_glibc_weak_symbols.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
-#error This header is meant to be included only once by allocator_shim.cc
-#endif
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
-
-// Alias the internal Glibc symbols to the shim entry points.
-// This file is strongly inspired by tcmalloc's libc_override_glibc.h.
-// Effectively this file does two things:
-//  1) Re-define the  __malloc_hook & co symbols. Those symbols are defined as
-//     weak in glibc and are meant to be defined strongly by client processes
-//     to hook calls initiated from within glibc.
-//  2) Re-define Glibc-specific symbols (__libc_malloc). The historical reason
-//     is that in the past (in RedHat 9) we had instances of libraries that were
-//     allocating via malloc() and freeing using __libc_free().
-//     See tcmalloc's libc_override_glibc.h for more context.
-
-#include <features.h>  // for __GLIBC__
-#include <malloc.h>
-#include <unistd.h>
-
-#include <new>
-
-#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h"
-
-// __MALLOC_HOOK_VOLATILE not defined in all Glibc headers.
-#if !defined(__MALLOC_HOOK_VOLATILE)
-#define MALLOC_HOOK_MAYBE_VOLATILE /**/
-#else
-#define MALLOC_HOOK_MAYBE_VOLATILE __MALLOC_HOOK_VOLATILE
-#endif
-
-extern "C" {
-
-// 1) Re-define malloc_hook weak symbols.
-namespace {
-
-void* GlibcMallocHook(size_t size, const void* caller) {
-  return ShimMalloc(size, nullptr);
-}
-
-void* GlibcReallocHook(void* ptr, size_t size, const void* caller) {
-  return ShimRealloc(ptr, size, nullptr);
-}
-
-void GlibcFreeHook(void* ptr, const void* caller) {
-  return ShimFree(ptr, nullptr);
-}
-
-void* GlibcMemalignHook(size_t align, size_t size, const void* caller) {
-  return ShimMemalign(align, size, nullptr);
-}
-
-}  // namespace
-
-__attribute__((visibility("default"))) void* (
-    *MALLOC_HOOK_MAYBE_VOLATILE __malloc_hook)(size_t,
-                                               const void*) = &GlibcMallocHook;
-
-__attribute__((visibility("default"))) void* (
-    *MALLOC_HOOK_MAYBE_VOLATILE __realloc_hook)(void*, size_t, const void*) =
-    &GlibcReallocHook;
-
-__attribute__((visibility("default"))) void (
-    *MALLOC_HOOK_MAYBE_VOLATILE __free_hook)(void*,
-                                             const void*) = &GlibcFreeHook;
-
-__attribute__((visibility("default"))) void* (
-    *MALLOC_HOOK_MAYBE_VOLATILE __memalign_hook)(size_t, size_t, const void*) =
-    &GlibcMemalignHook;
-
-// 2) Redefine libc symbols themselves.
-
-SHIM_ALWAYS_EXPORT void* __libc_malloc(size_t size) {
-  return ShimMalloc(size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void __libc_free(void* ptr) {
-  ShimFree(ptr, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* __libc_realloc(void* ptr, size_t size) {
-  return ShimRealloc(ptr, size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* __libc_calloc(size_t n, size_t size) {
-  return ShimCalloc(n, size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void __libc_cfree(void* ptr) {
-  return ShimFree(ptr, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* __libc_memalign(size_t align, size_t s) {
-  return ShimMemalign(align, s, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* __libc_valloc(size_t size) {
-  return ShimValloc(size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* __libc_pvalloc(size_t size) {
-  return ShimPvalloc(size);
-}
-
-SHIM_ALWAYS_EXPORT int __posix_memalign(void** r, size_t a, size_t s) {
-  return ShimPosixMemalign(r, a, s);
-}
-
-}  // extern "C"
-
-// Safety check.
-#if !defined(__GLIBC__)
-#error The target platform does not seem to use Glibc. Disable the allocator \
-shim by setting use_allocator_shim=false in GN args.
-#endif
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_override_libc_symbols.h b/base/allocator/partition_allocator/shim/allocator_shim_override_libc_symbols.h
deleted file mode 100644
index bb07170..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_override_libc_symbols.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Its purpose is to preempt the Libc symbols for malloc/new so they call the
-// shim layer entry points.
-
-#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
-#error This header is meant to be included only once by allocator_shim.cc
-#endif
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
-
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_APPLE)
-#include <malloc/malloc.h>
-#else
-#include <malloc.h>
-#endif
-
-#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h"
-
-extern "C" {
-
-// WARNING: Whenever a new function is added there (which, surprisingly enough,
-// happens. For instance glibc 2.33 introduced mallinfo2(), which we don't
-// support... yet?), it MUST be added to build/linux/chrome.map.
-//
-// Otherwise the new symbol is not exported from Chromium's main binary, which
-// is necessary to override libc's weak symbol, which in turn is necessary to
-// intercept calls made by dynamic libraries. See crbug.com/1292206 for such
-// an example.
-
-SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW {
-  return ShimMalloc(size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW {
-  ShimFree(ptr, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW {
-  return ShimRealloc(ptr, size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW {
-  return ShimCalloc(n, size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW {
-  ShimFree(ptr, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW {
-  return ShimMemalign(align, s, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* aligned_alloc(size_t align, size_t s) __THROW {
-  return ShimMemalign(align, s, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW {
-  return ShimValloc(size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW {
-  return ShimPvalloc(size);
-}
-
-SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW {
-  return ShimPosixMemalign(r, a, s);
-}
-
-SHIM_ALWAYS_EXPORT size_t malloc_size(const void* address) __THROW {
-  return ShimGetSizeEstimate(address, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT size_t malloc_usable_size(void* address) __THROW {
-  return ShimGetSizeEstimate(address, nullptr);
-}
-
-// The default dispatch translation unit has to define also the following
-// symbols (unless they are ultimately routed to the system symbols):
-//   void malloc_stats(void);
-//   int mallopt(int, int);
-//   struct mallinfo mallinfo(void);
-
-}  // extern "C"
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_override_linker_wrapped_symbols.h b/base/allocator/partition_allocator/shim/allocator_shim_override_linker_wrapped_symbols.h
deleted file mode 100644
index f074961..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_override_linker_wrapped_symbols.h
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
-#error This header is meant to be included only once by allocator_shim.cc
-#endif
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
-
-// This header overrides the __wrap_X symbols when using the link-time
-// -Wl,-wrap,malloc shim-layer approach (see README.md).
-// All references to malloc, free, etc. within the linker unit that gets the
-// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
-// linker as references to __wrap_malloc, __wrap_free, which are defined here.
-
-#include <algorithm>
-#include <cstring>
-
-#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h"
-
-extern "C" {
-
-SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t n, size_t size) {
-  return ShimCalloc(n, size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void __wrap_free(void* ptr) {
-  ShimFree(ptr, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t size) {
-  return ShimMalloc(size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t align, size_t size) {
-  return ShimMemalign(align, size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void** res,
-                                             size_t align,
-                                             size_t size) {
-  return ShimPosixMemalign(res, align, size);
-}
-
-SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t size) {
-  return ShimPvalloc(size);
-}
-
-SHIM_ALWAYS_EXPORT void* __wrap_realloc(void* address, size_t size) {
-  return ShimRealloc(address, size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
-  return ShimValloc(size, nullptr);
-}
-
-SHIM_ALWAYS_EXPORT size_t __wrap_malloc_usable_size(void* address) {
-  return ShimGetSizeEstimate(address, nullptr);
-}
-
-const size_t kPathMaxSize = 8192;
-static_assert(kPathMaxSize >= PATH_MAX, "");
-
-extern char* __wrap_strdup(const char* str);
-
-// Override <stdlib.h>
-
-extern char* __real_realpath(const char* path, char* resolved_path);
-
-SHIM_ALWAYS_EXPORT char* __wrap_realpath(const char* path,
-                                         char* resolved_path) {
-  if (resolved_path) {
-    return __real_realpath(path, resolved_path);
-  }
-
-  char buffer[kPathMaxSize];
-  if (!__real_realpath(path, buffer)) {
-    return nullptr;
-  }
-  return __wrap_strdup(buffer);
-}
-
-// Override <string.h> functions
-
-SHIM_ALWAYS_EXPORT char* __wrap_strdup(const char* str) {
-  std::size_t length = std::strlen(str) + 1;
-  void* buffer = ShimMalloc(length, nullptr);
-  if (!buffer) {
-    return nullptr;
-  }
-  return reinterpret_cast<char*>(std::memcpy(buffer, str, length));
-}
-
-SHIM_ALWAYS_EXPORT char* __wrap_strndup(const char* str, size_t n) {
-  std::size_t length = std::min(std::strlen(str), n);
-  char* buffer = reinterpret_cast<char*>(ShimMalloc(length + 1, nullptr));
-  if (!buffer) {
-    return nullptr;
-  }
-  std::memcpy(buffer, str, length);
-  buffer[length] = '\0';
-  return buffer;
-}
-
-// Override <unistd.h>
-
-extern char* __real_getcwd(char* buffer, size_t size);
-
-SHIM_ALWAYS_EXPORT char* __wrap_getcwd(char* buffer, size_t size) {
-  if (buffer) {
-    return __real_getcwd(buffer, size);
-  }
-
-  if (!size) {
-    size = kPathMaxSize;
-  }
-  char local_buffer[size];
-  if (!__real_getcwd(local_buffer, size)) {
-    return nullptr;
-  }
-  return __wrap_strdup(local_buffer);
-}
-
-// Override stdio.h
-
-// This is non-standard (_GNU_SOURCE only), but implemented by Bionic on
-// Android, and used by libc++.
-SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
-                                        const char* fmt,
-                                        va_list va_args) {
-  // There are cases where we need to use the list of arguments twice, namely
-  // when the original buffer is too small. It is not allowed to walk the list
-  // twice, so make a copy for the second invocation of vsnprintf().
-  va_list va_args_copy;
-  va_copy(va_args_copy, va_args);
-
-  constexpr int kInitialSize = 128;
-  *strp = static_cast<char*>(
-      malloc(kInitialSize));  // Our malloc() doesn't return nullptr.
-
-  int actual_size = vsnprintf(*strp, kInitialSize, fmt, va_args);
-  if (actual_size < 0) {
-    va_end(va_args_copy);
-    return actual_size;
-  }
-  *strp =
-      static_cast<char*>(realloc(*strp, static_cast<size_t>(actual_size + 1)));
-
-  // Now we know the size. This is not very efficient, but we cannot really do
-  // better without accessing internal libc functions, or reimplementing
-  // *printf().
-  //
-  // This is very lightly used in Chromium in practice, see crbug.com/116558 for
-  // details.
-  if (actual_size >= kInitialSize) {
-    int ret = vsnprintf(*strp, static_cast<size_t>(actual_size + 1), fmt,
-                        va_args_copy);
-    va_end(va_args_copy);
-    return ret;
-  }
-
-  va_end(va_args_copy);
-  return actual_size;
-}
-
-SHIM_ALWAYS_EXPORT int __wrap_asprintf(char** strp, const char* fmt, ...) {
-  va_list va_args;
-  va_start(va_args, fmt);
-  int retval = vasprintf(strp, fmt, va_args);
-  va_end(va_args);
-  return retval;
-}
-
-}  // extern "C"
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_override_mac_default_zone.h b/base/allocator/partition_allocator/shim/allocator_shim_override_mac_default_zone.h
deleted file mode 100644
index cfa2742..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_override_mac_default_zone.h
+++ /dev/null
@@ -1,411 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
-#error This header is meant to be included only once by allocator_shim.cc
-#endif
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
-
-#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-#error This header must be included iff PartitionAlloc-Everywhere is enabled.
-#endif
-
-#include <string.h>
-
-#include <atomic>
-#include <tuple>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/mac/mach_logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/shim/early_zone_registration_constants.h"
-
-namespace partition_alloc {
-
-// Defined in base/allocator/partition_allocator/partition_root.cc
-void PartitionAllocMallocHookOnBeforeForkInParent();
-void PartitionAllocMallocHookOnAfterForkInParent();
-void PartitionAllocMallocHookOnAfterForkInChild();
-
-}  // namespace partition_alloc
-
-namespace allocator_shim {
-
-namespace {
-
-// malloc_introspection_t's callback functions for our own zone
-
-kern_return_t MallocIntrospectionEnumerator(task_t task,
-                                            void*,
-                                            unsigned type_mask,
-                                            vm_address_t zone_address,
-                                            memory_reader_t reader,
-                                            vm_range_recorder_t recorder) {
-  // Should enumerate all memory regions allocated by this allocator, but not
-  // implemented just because of no use case for now.
-  return KERN_FAILURE;
-}
-
-size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) {
-  return partition_alloc::internal::base::bits::AlignUp(
-      size, partition_alloc::internal::kAlignment);
-}
-
-boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) {
-  // Should check the consistency of the allocator implementing this malloc
-  // zone, but not implemented just because of no use case for now.
-  return true;
-}
-
-void MallocIntrospectionPrint(malloc_zone_t* zone, boolean_t verbose) {
-  // Should print the current states of the zone for debugging / investigation
-  // purpose, but not implemented just because of no use case for now.
-}
-
-void MallocIntrospectionLog(malloc_zone_t* zone, void* address) {
-  // Should enable logging of the activities on the given `address`, but not
-  // implemented just because of no use case for now.
-}
-
-void MallocIntrospectionForceLock(malloc_zone_t* zone) {
-  // Called before fork(2) to acquire the lock.
-  partition_alloc::PartitionAllocMallocHookOnBeforeForkInParent();
-}
-
-void MallocIntrospectionForceUnlock(malloc_zone_t* zone) {
-  // Called in the parent process after fork(2) to release the lock.
-  partition_alloc::PartitionAllocMallocHookOnAfterForkInParent();
-}
-
-void MallocIntrospectionStatistics(malloc_zone_t* zone,
-                                   malloc_statistics_t* stats) {
-  // Should report the memory usage correctly, but not implemented just because
-  // of no use case for now.
-  stats->blocks_in_use = 0;
-  stats->size_in_use = 0;
-  stats->max_size_in_use = 0;  // High water mark of touched memory
-  stats->size_allocated = 0;   // Reserved in memory
-}
-
-boolean_t MallocIntrospectionZoneLocked(malloc_zone_t* zone) {
-  // Should return true if the underlying PartitionRoot is locked, but not
-  // implemented just because this function seems not used effectively.
-  return false;
-}
-
-boolean_t MallocIntrospectionEnableDischargeChecking(malloc_zone_t* zone) {
-  // 'discharge' is not supported.
-  return false;
-}
-
-void MallocIntrospectionDisableDischargeChecking(malloc_zone_t* zone) {
-  // 'discharge' is not supported.
-}
-
-void MallocIntrospectionDischarge(malloc_zone_t* zone, void* memory) {
-  // 'discharge' is not supported.
-}
-
-void MallocIntrospectionEnumerateDischargedPointers(
-    malloc_zone_t* zone,
-    void (^report_discharged)(void* memory, void* info)) {
-  // 'discharge' is not supported.
-}
-
-void MallocIntrospectionReinitLock(malloc_zone_t* zone) {
-  // Called in a child process after fork(2) to re-initialize the lock.
-  partition_alloc::PartitionAllocMallocHookOnAfterForkInChild();
-}
-
-void MallocIntrospectionPrintTask(task_t task,
-                                  unsigned level,
-                                  vm_address_t zone_address,
-                                  memory_reader_t reader,
-                                  print_task_printer_t printer) {
-  // Should print the current states of another process's zone for debugging /
-  // investigation purpose, but not implemented just because of no use case
-  // for now.
-}
-
-void MallocIntrospectionTaskStatistics(task_t task,
-                                       vm_address_t zone_address,
-                                       memory_reader_t reader,
-                                       malloc_statistics_t* stats) {
-  // Should report the memory usage in another process's zone, but not
-  // implemented just because of no use case for now.
-  stats->blocks_in_use = 0;
-  stats->size_in_use = 0;
-  stats->max_size_in_use = 0;  // High water mark of touched memory
-  stats->size_allocated = 0;   // Reserved in memory
-}
-
-// malloc_zone_t's callback functions for our own zone
-
-size_t MallocZoneSize(malloc_zone_t* zone, const void* ptr) {
-  return ShimGetSizeEstimate(ptr, nullptr);
-}
-
-void* MallocZoneMalloc(malloc_zone_t* zone, size_t size) {
-  return ShimMalloc(size, nullptr);
-}
-
-void* MallocZoneCalloc(malloc_zone_t* zone, size_t n, size_t size) {
-  return ShimCalloc(n, size, nullptr);
-}
-
-void* MallocZoneValloc(malloc_zone_t* zone, size_t size) {
-  return ShimValloc(size, nullptr);
-}
-
-void MallocZoneFree(malloc_zone_t* zone, void* ptr) {
-  return ShimFree(ptr, nullptr);
-}
-
-void* MallocZoneRealloc(malloc_zone_t* zone, void* ptr, size_t size) {
-  return ShimRealloc(ptr, size, nullptr);
-}
-
-void MallocZoneDestroy(malloc_zone_t* zone) {
-  // No support to destroy the zone for now.
-}
-
-void* MallocZoneMemalign(malloc_zone_t* zone, size_t alignment, size_t size) {
-  return ShimMemalign(alignment, size, nullptr);
-}
-
-void MallocZoneFreeDefiniteSize(malloc_zone_t* zone, void* ptr, size_t size) {
-  return ShimFreeDefiniteSize(ptr, size, nullptr);
-}
-
-unsigned MallocZoneBatchMalloc(malloc_zone_t* zone,
-                               size_t size,
-                               void** results,
-                               unsigned num_requested) {
-  return ShimBatchMalloc(size, results, num_requested, nullptr);
-}
-
-void MallocZoneBatchFree(malloc_zone_t* zone,
-                         void** to_be_freed,
-                         unsigned num) {
-  return ShimBatchFree(to_be_freed, num, nullptr);
-}
-
-boolean_t MallocZoneClaimedAddress(malloc_zone_t* zone, void* ptr) {
-  return static_cast<boolean_t>(ShimClaimedAddress(ptr, nullptr));
-}
-
-#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
-void MallocZoneTryFreeDefault(malloc_zone_t* zone, void* ptr) {
-  return ShimTryFreeDefault(ptr, nullptr);
-}
-#endif
-
-malloc_introspection_t g_mac_malloc_introspection{};
-malloc_zone_t g_mac_malloc_zone{};
-
-malloc_zone_t* GetDefaultMallocZone() {
-  // malloc_default_zone() does not return... the default zone, but the initial
-  // one. The default one is the first element of the default zone array.
-  unsigned int zone_count = 0;
-  vm_address_t* zones = nullptr;
-  kern_return_t result =
-      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
-  PA_MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
-  return reinterpret_cast<malloc_zone_t*>(zones[0]);
-}
-
-bool IsAlreadyRegistered() {
-  // HACK: This should really only be called once, but it is not.
-  //
-  // This function is a static constructor of its binary. If it is included in a
-  // dynamic library, then the same process may end up executing this code
-  // multiple times, once per library. As a consequence, each new library will
-  // add its own allocator as the default zone. Aside from splitting the heap
-  // further, the main issue arises if/when the last library to be loaded
-  // (dlopen()-ed) gets dlclose()-ed.
-  //
-  // See crbug.com/1271139 for details.
-  //
-  // In this case, subsequent free() will be routed by libmalloc to the deleted
-  // zone (since its code has been unloaded from memory), and crash inside
-  // libsystem's free(). This in practice happens as soon as dlclose() is
-  // called, inside the dynamic linker (dyld).
-  //
-  // Since we are talking about different library, and issues inside the dynamic
-  // linker, we cannot use a global static variable (which would be
-  // per-library), or anything from pthread.
-  //
-  // The solution used here is to check whether the current default zone is
-  // already ours, in which case we are not the first dynamic library here, and
-  // should do nothing. This is racy, and hacky.
-  vm_address_t* zones = nullptr;
-  unsigned int zone_count = 0;
-  // *Not* using malloc_default_zone(), as it seems to be hardcoded to return
-  // something else than the default zone. See the difference between
-  // malloc_default_zone() and inline_malloc_default_zone() in Apple's malloc.c
-  // (in libmalloc).
-  kern_return_t result =
-      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
-  PA_MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
-  // Checking all the zones, in case someone registered their own zone on top of
-  // our own.
-  for (unsigned int i = 0; i < zone_count; i++) {
-    malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
-
-    // strcmp() and not a pointer comparison, as the zone was registered from
-    // another library, the pointers don't match.
-    if (zone->zone_name &&
-        (strcmp(zone->zone_name, kPartitionAllocZoneName) == 0)) {
-      // This zone is provided by PartitionAlloc, so this function has been
-      // called from another library (or the main executable), nothing to do.
-      //
-      // This should be a crash, ideally, but callers do it, so only warn, for
-      // now.
-      PA_RAW_LOG(ERROR,
-                 "Trying to load the allocator multiple times. This is *not* "
-                 "supported.");
-      return true;
-    }
-  }
-
-  return false;
-}
-
-void InitializeZone() {
-  g_mac_malloc_introspection.enumerator = MallocIntrospectionEnumerator;
-  g_mac_malloc_introspection.good_size = MallocIntrospectionGoodSize;
-  g_mac_malloc_introspection.check = MallocIntrospectionCheck;
-  g_mac_malloc_introspection.print = MallocIntrospectionPrint;
-  g_mac_malloc_introspection.log = MallocIntrospectionLog;
-  g_mac_malloc_introspection.force_lock = MallocIntrospectionForceLock;
-  g_mac_malloc_introspection.force_unlock = MallocIntrospectionForceUnlock;
-  g_mac_malloc_introspection.statistics = MallocIntrospectionStatistics;
-  g_mac_malloc_introspection.zone_locked = MallocIntrospectionZoneLocked;
-  g_mac_malloc_introspection.enable_discharge_checking =
-      MallocIntrospectionEnableDischargeChecking;
-  g_mac_malloc_introspection.disable_discharge_checking =
-      MallocIntrospectionDisableDischargeChecking;
-  g_mac_malloc_introspection.discharge = MallocIntrospectionDischarge;
-  g_mac_malloc_introspection.enumerate_discharged_pointers =
-      MallocIntrospectionEnumerateDischargedPointers;
-  g_mac_malloc_introspection.reinit_lock = MallocIntrospectionReinitLock;
-  g_mac_malloc_introspection.print_task = MallocIntrospectionPrintTask;
-  g_mac_malloc_introspection.task_statistics =
-      MallocIntrospectionTaskStatistics;
-  // `version` member indicates which APIs are supported in this zone.
-  //   version >= 5: memalign is supported
-  //   version >= 6: free_definite_size is supported
-  //   version >= 7: introspect's discharge family is supported
-  //   version >= 8: pressure_relief is supported
-  //   version >= 9: introspect.reinit_lock is supported
-  //   version >= 10: claimed_address is supported
-  //   version >= 11: introspect.print_task is supported
-  //   version >= 12: introspect.task_statistics is supported
-  //   version >= 13: try_free_default is supported
-  g_mac_malloc_zone.version = kZoneVersion;
-  g_mac_malloc_zone.zone_name = kPartitionAllocZoneName;
-  g_mac_malloc_zone.introspect = &g_mac_malloc_introspection;
-  g_mac_malloc_zone.size = MallocZoneSize;
-  g_mac_malloc_zone.malloc = MallocZoneMalloc;
-  g_mac_malloc_zone.calloc = MallocZoneCalloc;
-  g_mac_malloc_zone.valloc = MallocZoneValloc;
-  g_mac_malloc_zone.free = MallocZoneFree;
-  g_mac_malloc_zone.realloc = MallocZoneRealloc;
-  g_mac_malloc_zone.destroy = MallocZoneDestroy;
-  g_mac_malloc_zone.batch_malloc = MallocZoneBatchMalloc;
-  g_mac_malloc_zone.batch_free = MallocZoneBatchFree;
-  g_mac_malloc_zone.memalign = MallocZoneMemalign;
-  g_mac_malloc_zone.free_definite_size = MallocZoneFreeDefiniteSize;
-  g_mac_malloc_zone.pressure_relief = nullptr;
-  g_mac_malloc_zone.claimed_address = MallocZoneClaimedAddress;
-#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
-  g_mac_malloc_zone.try_free_default = MallocZoneTryFreeDefault;
-#endif
-}
-
-namespace {
-static std::atomic<bool> g_initialization_is_done;
-}
-
-// Replaces the default malloc zone with our own malloc zone backed by
-// PartitionAlloc.  Since we'd like to make as much code as possible to use our
-// own memory allocator (and reduce bugs caused by mixed use of the system
-// allocator and our own allocator), run the following function
-// `InitializeDefaultAllocatorPartitionRoot` with the highest priority.
-//
-// Note that, despite of the highest priority of the initialization order,
-// [NSThread init] runs before InitializeDefaultMallocZoneWithPartitionAlloc
-// unfortunately and allocates memory with the system allocator.  Plus, the
-// allocated memory will be deallocated with the default zone's `free` at that
-// moment without using a zone dispatcher.  Hence, our own `free` function
-// receives an address allocated by the system allocator.
-__attribute__((constructor(0))) void
-InitializeDefaultMallocZoneWithPartitionAlloc() {
-  if (IsAlreadyRegistered()) {
-    return;
-  }
-
-  // Instantiate the existing regular and purgeable zones in order to make the
-  // existing purgeable zone use the existing regular zone since PartitionAlloc
-  // doesn't support a purgeable zone.
-  std::ignore = malloc_default_zone();
-  std::ignore = malloc_default_purgeable_zone();
-
-  // Initialize the default allocator's PartitionRoot with the existing zone.
-  InitializeDefaultAllocatorPartitionRoot();
-
-  // Create our own malloc zone.
-  InitializeZone();
-
-  malloc_zone_t* system_default_zone = GetDefaultMallocZone();
-  if (strcmp(system_default_zone->zone_name, kDelegatingZoneName) == 0) {
-    // The first zone is our zone, we can unregister it, replacing it with the
-    // new one. This relies on a precise zone setup, done in
-    // |EarlyMallocZoneRegistration()|.
-    malloc_zone_register(&g_mac_malloc_zone);
-    malloc_zone_unregister(system_default_zone);
-    g_initialization_is_done.store(true, std::memory_order_release);
-    return;
-  }
-
-  // Not in the path where the zone was registered early. This is either racy,
-  // or fine if the current process is not hosting multiple threads.
-  //
-  // This path is fine for e.g. most unit tests.
-  //
-  // Make our own zone the default zone.
-  //
-  // Put our own zone at the last position, so that it promotes to the default
-  // zone.  The implementation logic of malloc_zone_unregister is:
-  //   zone_table.swap(unregistered_zone, last_zone);
-  //   zone_table.shrink_size_by_1();
-  malloc_zone_register(&g_mac_malloc_zone);
-  malloc_zone_unregister(system_default_zone);
-  // Between malloc_zone_unregister(system_default_zone) (above) and
-  // malloc_zone_register(system_default_zone) (below), i.e. while absence of
-  // system_default_zone, it's possible that another thread calls free(ptr) and
-  // "no zone found" error is hit, crashing the process.
-  malloc_zone_register(system_default_zone);
-
-  // Confirm that our own zone is now the default zone.
-  PA_CHECK(GetDefaultMallocZone() == &g_mac_malloc_zone);
-  g_initialization_is_done.store(true, std::memory_order_release);
-}
-
-}  // namespace
-
-bool IsDefaultAllocatorPartitionRootInitialized() {
-  // Even though zone registration is not thread-safe, let's not make it worse,
-  // and use acquire/release ordering.
-  return g_initialization_is_done.load(std::memory_order_acquire);
-}
-
-}  // namespace allocator_shim
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_override_mac_symbols.h b/base/allocator/partition_allocator/shim/allocator_shim_override_mac_symbols.h
deleted file mode 100644
index d10156c..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_override_mac_symbols.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
-#error This header is meant to be included only once by allocator_shim.cc
-#endif
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
-
-#include "base/allocator/partition_allocator/shim/malloc_zone_functions_mac.h"
-#include "base/allocator/partition_allocator/third_party/apple_apsl/malloc.h"
-
-namespace allocator_shim {
-
-MallocZoneFunctions MallocZoneFunctionsToReplaceDefault() {
-  MallocZoneFunctions new_functions;
-  memset(&new_functions, 0, sizeof(MallocZoneFunctions));
-  new_functions.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
-    return ShimGetSizeEstimate(ptr, zone);
-  };
-  new_functions.claimed_address = [](malloc_zone_t* zone,
-                                     void* ptr) -> boolean_t {
-    return ShimClaimedAddress(ptr, zone);
-  };
-  new_functions.malloc = [](malloc_zone_t* zone, size_t size) -> void* {
-    return ShimMalloc(size, zone);
-  };
-  new_functions.calloc = [](malloc_zone_t* zone, size_t n,
-                            size_t size) -> void* {
-    return ShimCalloc(n, size, zone);
-  };
-  new_functions.valloc = [](malloc_zone_t* zone, size_t size) -> void* {
-    return ShimValloc(size, zone);
-  };
-  new_functions.free = [](malloc_zone_t* zone, void* ptr) {
-    ShimFree(ptr, zone);
-  };
-  new_functions.realloc = [](malloc_zone_t* zone, void* ptr,
-                             size_t size) -> void* {
-    return ShimRealloc(ptr, size, zone);
-  };
-  new_functions.batch_malloc = [](struct _malloc_zone_t* zone, size_t size,
-                                  void** results,
-                                  unsigned num_requested) -> unsigned {
-    return ShimBatchMalloc(size, results, num_requested, zone);
-  };
-  new_functions.batch_free = [](struct _malloc_zone_t* zone, void** to_be_freed,
-                                unsigned num_to_be_freed) -> void {
-    ShimBatchFree(to_be_freed, num_to_be_freed, zone);
-  };
-  new_functions.memalign = [](malloc_zone_t* zone, size_t alignment,
-                              size_t size) -> void* {
-    return ShimMemalign(alignment, size, zone);
-  };
-  new_functions.free_definite_size = [](malloc_zone_t* zone, void* ptr,
-                                        size_t size) {
-    ShimFreeDefiniteSize(ptr, size, zone);
-  };
-  new_functions.try_free_default = [](malloc_zone_t* zone, void* ptr) {
-    ShimTryFreeDefault(ptr, zone);
-  };
-  return new_functions;
-}
-
-}  // namespace allocator_shim
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_override_ucrt_symbols_win.h b/base/allocator/partition_allocator/shim/allocator_shim_override_ucrt_symbols_win.h
deleted file mode 100644
index 9b45219..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_override_ucrt_symbols_win.h
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This header defines symbols to override the same functions in the Visual C++
-// CRT implementation.
-
-#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
-#error This header is meant to be included only once by allocator_shim.cc
-#endif
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
-
-#include <malloc.h>
-
-#include <windows.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h"
-
-// Even though most C++ allocation operators can be left alone since the
-// interception works at a lower level, these ones should be
-// overridden. Otherwise they redirect to malloc(), which is configured to crash
-// with an OOM in failure cases, such as allocation requests that are too large.
-SHIM_ALWAYS_EXPORT void* operator new(size_t size,
-                                      const std::nothrow_t&) noexcept {
-  return ShimCppNewNoThrow(size);
-}
-
-SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
-                                        const std::nothrow_t&) noexcept {
-  return ShimCppNewNoThrow(size);
-}
-
-extern "C" {
-
-void* (*malloc_unchecked)(size_t) = &allocator_shim::UncheckedAlloc;
-
-namespace {
-
-int win_new_mode = 0;
-
-}  // namespace
-
-// This function behaves similarly to MSVC's _set_new_mode.
-// If flag is 0 (default), calls to malloc will behave normally.
-// If flag is 1, calls to malloc will behave like calls to new,
-// and the std_new_handler will be invoked on failure.
-// Returns the previous mode.
-//
-// Replaces _set_new_mode in ucrt\heap\new_mode.cpp
-int _set_new_mode(int flag) {
-  // The MS CRT calls this function early on in startup, so this serves as a low
-  // overhead proof that the allocator shim is in place for this process.
-  allocator_shim::g_is_win_shim_layer_initialized = true;
-  int old_mode = win_new_mode;
-  win_new_mode = flag;
-
-  allocator_shim::SetCallNewHandlerOnMallocFailure(win_new_mode != 0);
-
-  return old_mode;
-}
-
-// Replaces _query_new_mode in ucrt\heap\new_mode.cpp
-int _query_new_mode() {
-  return win_new_mode;
-}
-
-// These symbols override the CRT's implementation of the same functions.
-__declspec(restrict) void* malloc(size_t size) {
-  return ShimMalloc(size, nullptr);
-}
-
-void free(void* ptr) {
-  ShimFree(ptr, nullptr);
-}
-
-__declspec(restrict) void* realloc(void* ptr, size_t size) {
-  return ShimRealloc(ptr, size, nullptr);
-}
-
-__declspec(restrict) void* calloc(size_t n, size_t size) {
-  return ShimCalloc(n, size, nullptr);
-}
-
-// _msize() is the Windows equivalent of malloc_size().
-size_t _msize(void* memblock) {
-  return ShimGetSizeEstimate(memblock, nullptr);
-}
-
-__declspec(restrict) void* _aligned_malloc(size_t size, size_t alignment) {
-  return ShimAlignedMalloc(size, alignment, nullptr);
-}
-
-__declspec(restrict) void* _aligned_realloc(void* address,
-                                            size_t size,
-                                            size_t alignment) {
-  return ShimAlignedRealloc(address, size, alignment, nullptr);
-}
-
-void _aligned_free(void* address) {
-  ShimAlignedFree(address, nullptr);
-}
-
-// _recalloc_base is called by CRT internally.
-__declspec(restrict) void* _recalloc_base(void* block,
-                                          size_t count,
-                                          size_t size) {
-  const size_t old_block_size = (block != nullptr) ? _msize(block) : 0;
-  partition_alloc::internal::base::CheckedNumeric<size_t>
-      new_block_size_checked = count;
-  new_block_size_checked *= size;
-  const size_t new_block_size = new_block_size_checked.ValueOrDie();
-
-  void* const new_block = realloc(block, new_block_size);
-
-  if (new_block != nullptr && old_block_size < new_block_size) {
-    memset(static_cast<char*>(new_block) + old_block_size, 0,
-           new_block_size - old_block_size);
-  }
-
-  return new_block;
-}
-
-__declspec(restrict) void* _malloc_base(size_t size) {
-  return malloc(size);
-}
-
-__declspec(restrict) void* _calloc_base(size_t n, size_t size) {
-  return calloc(n, size);
-}
-
-void _free_base(void* block) {
-  free(block);
-}
-
-__declspec(restrict) void* _recalloc(void* block, size_t count, size_t size) {
-  return _recalloc_base(block, count, size);
-}
-
-// The following uncommon _aligned_* routines are not used in Chromium and have
-// been shimmed to immediately crash to ensure that implementations are added if
-// uses are introduced.
-__declspec(restrict) void* _aligned_recalloc(void* address,
-                                             size_t num,
-                                             size_t size,
-                                             size_t alignment) {
-  PA_CHECK(false) << "This routine has not been implemented";
-  __builtin_unreachable();
-}
-
-size_t _aligned_msize(void* address, size_t alignment, size_t offset) {
-  PA_CHECK(false) << "This routine has not been implemented";
-  __builtin_unreachable();
-}
-
-__declspec(restrict) void* _aligned_offset_malloc(size_t size,
-                                                  size_t alignment,
-                                                  size_t offset) {
-  PA_CHECK(false) << "This routine has not been implemented";
-  __builtin_unreachable();
-}
-
-__declspec(restrict) void* _aligned_offset_realloc(void* address,
-                                                   size_t size,
-                                                   size_t alignment,
-                                                   size_t offset) {
-  PA_CHECK(false) << "This routine has not been implemented";
-  __builtin_unreachable();
-}
-
-__declspec(restrict) void* _aligned_offset_recalloc(void* address,
-                                                    size_t num,
-                                                    size_t size,
-                                                    size_t alignment,
-                                                    size_t offset) {
-  PA_CHECK(false) << "This routine has not been implemented";
-  __builtin_unreachable();
-}
-
-}  // extern "C"
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_unittest.cc b/base/allocator/partition_allocator/shim/allocator_shim_unittest.cc
deleted file mode 100644
index 697e9a8..0000000
--- a/base/allocator/partition_allocator/shim/allocator_shim_unittest.cc
+++ /dev/null
@@ -1,791 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <atomic>
-#include <iomanip>
-#include <memory>
-#include <new>
-#include <sstream>
-#include <vector>
-
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/memory/page_size.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/threading/platform_thread.h"
-#include "build/build_config.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if BUILDFLAG(IS_WIN)
-#include <malloc.h>
-#include <windows.h>
-#elif BUILDFLAG(IS_APPLE)
-#include <malloc/malloc.h>
-
-#include "base/allocator/partition_allocator/shim/allocator_interception_mac.h"
-#include "base/allocator/partition_allocator/third_party/apple_apsl/malloc.h"
-#else
-#include <malloc.h>
-#endif
-
-#if !BUILDFLAG(IS_WIN)
-#include <unistd.h>
-#endif
-
-#if defined(LIBC_GLIBC)
-extern "C" void* __libc_memalign(size_t align, size_t s);
-#endif
-
-namespace allocator_shim {
-namespace {
-
-using testing::_;
-using testing::MockFunction;
-
-// Special sentinel values used for testing GetSizeEstimate() interception.
-const char kTestSizeEstimateData[] = "test_value";
-constexpr void* kTestSizeEstimateAddress = (void*)kTestSizeEstimateData;
-constexpr size_t kTestSizeEstimate = 1234;
-
-class AllocatorShimTest : public testing::Test {
- public:
-  AllocatorShimTest() : testing::Test() {}
-
-  static size_t Hash(const void* ptr) {
-    return reinterpret_cast<uintptr_t>(ptr) % MaxSizeTracked();
-  }
-
-  static void* MockAlloc(const AllocatorDispatch* self,
-                         size_t size,
-                         void* context) {
-    if (instance_ && size < MaxSizeTracked()) {
-      ++(instance_->allocs_intercepted_by_size[size]);
-    }
-    return self->next->alloc_function(self->next, size, context);
-  }
-
-  static void* MockAllocUnchecked(const AllocatorDispatch* self,
-                                  size_t size,
-                                  void* context) {
-    if (instance_ && size < MaxSizeTracked()) {
-      ++(instance_->allocs_intercepted_by_size[size]);
-    }
-    return self->next->alloc_unchecked_function(self->next, size, context);
-  }
-
-  static void* MockAllocZeroInit(const AllocatorDispatch* self,
-                                 size_t n,
-                                 size_t size,
-                                 void* context) {
-    const size_t real_size = n * size;
-    if (instance_ && real_size < MaxSizeTracked()) {
-      ++(instance_->zero_allocs_intercepted_by_size[real_size]);
-    }
-    return self->next->alloc_zero_initialized_function(self->next, n, size,
-                                                       context);
-  }
-
-  static void* MockAllocAligned(const AllocatorDispatch* self,
-                                size_t alignment,
-                                size_t size,
-                                void* context) {
-    if (instance_) {
-      if (size < MaxSizeTracked()) {
-        ++(instance_->aligned_allocs_intercepted_by_size[size]);
-      }
-      if (alignment < MaxSizeTracked()) {
-        ++(instance_->aligned_allocs_intercepted_by_alignment[alignment]);
-      }
-    }
-    return self->next->alloc_aligned_function(self->next, alignment, size,
-                                              context);
-  }
-
-  static void* MockRealloc(const AllocatorDispatch* self,
-                           void* address,
-                           size_t size,
-                           void* context) {
-    if (instance_) {
-      // Size 0xFEED is a special sentinel for the NewHandlerConcurrency test.
-      // Hitting it for the first time will cause a failure, causing the
-      // invocation of the std::new_handler.
-      if (size == 0xFEED) {
-        thread_local bool did_fail_realloc_0xfeed_once = false;
-        if (!did_fail_realloc_0xfeed_once) {
-          did_fail_realloc_0xfeed_once = true;
-          return nullptr;
-        }
-        return address;
-      }
-
-      if (size < MaxSizeTracked()) {
-        ++(instance_->reallocs_intercepted_by_size[size]);
-      }
-      ++instance_->reallocs_intercepted_by_addr[Hash(address)];
-    }
-    return self->next->realloc_function(self->next, address, size, context);
-  }
-
-  static void MockFree(const AllocatorDispatch* self,
-                       void* address,
-                       void* context) {
-    if (instance_) {
-      ++instance_->frees_intercepted_by_addr[Hash(address)];
-    }
-    self->next->free_function(self->next, address, context);
-  }
-
-  static size_t MockGetSizeEstimate(const AllocatorDispatch* self,
-                                    void* address,
-                                    void* context) {
-    // Special testing values for GetSizeEstimate() interception.
-    if (address == kTestSizeEstimateAddress) {
-      return kTestSizeEstimate;
-    }
-    return self->next->get_size_estimate_function(self->next, address, context);
-  }
-
-  static bool MockClaimedAddress(const AllocatorDispatch* self,
-                                 void* address,
-                                 void* context) {
-    // The same as MockGetSizeEstimate.
-    if (address == kTestSizeEstimateAddress) {
-      return true;
-    }
-    return self->next->claimed_address_function(self->next, address, context);
-  }
-
-  static unsigned MockBatchMalloc(const AllocatorDispatch* self,
-                                  size_t size,
-                                  void** results,
-                                  unsigned num_requested,
-                                  void* context) {
-    if (instance_) {
-      instance_->batch_mallocs_intercepted_by_size[size] =
-          instance_->batch_mallocs_intercepted_by_size[size] + num_requested;
-    }
-    return self->next->batch_malloc_function(self->next, size, results,
-                                             num_requested, context);
-  }
-
-  static void MockBatchFree(const AllocatorDispatch* self,
-                            void** to_be_freed,
-                            unsigned num_to_be_freed,
-                            void* context) {
-    if (instance_) {
-      for (unsigned i = 0; i < num_to_be_freed; ++i) {
-        ++instance_->batch_frees_intercepted_by_addr[Hash(to_be_freed[i])];
-      }
-    }
-    self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
-                                    context);
-  }
-
-  static void MockFreeDefiniteSize(const AllocatorDispatch* self,
-                                   void* ptr,
-                                   size_t size,
-                                   void* context) {
-    if (instance_) {
-      ++instance_->frees_intercepted_by_addr[Hash(ptr)];
-      ++instance_->free_definite_sizes_intercepted_by_size[size];
-    }
-    self->next->free_definite_size_function(self->next, ptr, size, context);
-  }
-
-  static void MockTryFreeDefault(const AllocatorDispatch* self,
-                                 void* ptr,
-                                 void* context) {
-    if (instance_) {
-      ++instance_->frees_intercepted_by_addr[Hash(ptr)];
-    }
-    self->next->try_free_default_function(self->next, ptr, context);
-  }
-
-  static void* MockAlignedMalloc(const AllocatorDispatch* self,
-                                 size_t size,
-                                 size_t alignment,
-                                 void* context) {
-    if (instance_ && size < MaxSizeTracked()) {
-      ++instance_->aligned_mallocs_intercepted_by_size[size];
-    }
-    return self->next->aligned_malloc_function(self->next, size, alignment,
-                                               context);
-  }
-
-  static void* MockAlignedRealloc(const AllocatorDispatch* self,
-                                  void* address,
-                                  size_t size,
-                                  size_t alignment,
-                                  void* context) {
-    if (instance_) {
-      if (size < MaxSizeTracked()) {
-        ++instance_->aligned_reallocs_intercepted_by_size[size];
-      }
-      ++instance_->aligned_reallocs_intercepted_by_addr[Hash(address)];
-    }
-    return self->next->aligned_realloc_function(self->next, address, size,
-                                                alignment, context);
-  }
-
-  static void MockAlignedFree(const AllocatorDispatch* self,
-                              void* address,
-                              void* context) {
-    if (instance_) {
-      ++instance_->aligned_frees_intercepted_by_addr[Hash(address)];
-    }
-    self->next->aligned_free_function(self->next, address, context);
-  }
-
-  static void NewHandler() {
-    if (!instance_) {
-      return;
-    }
-    instance_->num_new_handler_calls.fetch_add(1, std::memory_order_relaxed);
-  }
-
-  int32_t GetNumberOfNewHandlerCalls() {
-    return instance_->num_new_handler_calls.load(std::memory_order_acquire);
-  }
-
-  void SetUp() override {
-    allocs_intercepted_by_size.resize(MaxSizeTracked());
-    zero_allocs_intercepted_by_size.resize(MaxSizeTracked());
-    aligned_allocs_intercepted_by_size.resize(MaxSizeTracked());
-    aligned_allocs_intercepted_by_alignment.resize(MaxSizeTracked());
-    reallocs_intercepted_by_size.resize(MaxSizeTracked());
-    reallocs_intercepted_by_addr.resize(MaxSizeTracked());
-    frees_intercepted_by_addr.resize(MaxSizeTracked());
-    batch_mallocs_intercepted_by_size.resize(MaxSizeTracked());
-    batch_frees_intercepted_by_addr.resize(MaxSizeTracked());
-    free_definite_sizes_intercepted_by_size.resize(MaxSizeTracked());
-    aligned_mallocs_intercepted_by_size.resize(MaxSizeTracked());
-    aligned_reallocs_intercepted_by_size.resize(MaxSizeTracked());
-    aligned_reallocs_intercepted_by_addr.resize(MaxSizeTracked());
-    aligned_frees_intercepted_by_addr.resize(MaxSizeTracked());
-    num_new_handler_calls.store(0, std::memory_order_release);
-    instance_ = this;
-
-#if BUILDFLAG(IS_APPLE)
-    InitializeAllocatorShim();
-#endif
-  }
-
-  void TearDown() override {
-    instance_ = nullptr;
-#if BUILDFLAG(IS_APPLE)
-    UninterceptMallocZonesForTesting();
-#endif
-  }
-
-  static size_t MaxSizeTracked() {
-#if BUILDFLAG(IS_IOS)
-    // TODO(crbug.com/1077271): 64-bit iOS uses a page size that is larger than
-    // SystemPageSize(), causing this test to make larger allocations, relative
-    // to SystemPageSize().
-    return 6 * partition_alloc::internal::SystemPageSize();
-#else
-    return 2 * partition_alloc::internal::SystemPageSize();
-#endif
-  }
-
- protected:
-  std::vector<size_t> allocs_intercepted_by_size;
-  std::vector<size_t> zero_allocs_intercepted_by_size;
-  std::vector<size_t> aligned_allocs_intercepted_by_size;
-  std::vector<size_t> aligned_allocs_intercepted_by_alignment;
-  std::vector<size_t> reallocs_intercepted_by_size;
-  std::vector<size_t> reallocs_intercepted_by_addr;
-  std::vector<size_t> frees_intercepted_by_addr;
-  std::vector<size_t> batch_mallocs_intercepted_by_size;
-  std::vector<size_t> batch_frees_intercepted_by_addr;
-  std::vector<size_t> free_definite_sizes_intercepted_by_size;
-  std::vector<size_t> aligned_mallocs_intercepted_by_size;
-  std::vector<size_t> aligned_reallocs_intercepted_by_size;
-  std::vector<size_t> aligned_reallocs_intercepted_by_addr;
-  std::vector<size_t> aligned_frees_intercepted_by_addr;
-  std::atomic<uint32_t> num_new_handler_calls;
-
- private:
-  static AllocatorShimTest* instance_;
-};
-
-struct TestStruct1 {
-  uint32_t ignored;
-  uint8_t ignored_2;
-};
-
-struct TestStruct2 {
-  uint64_t ignored;
-  uint8_t ignored_3;
-};
-
-class ThreadDelegateForNewHandlerTest : public base::PlatformThread::Delegate {
- public:
-  explicit ThreadDelegateForNewHandlerTest(base::WaitableEvent* event)
-      : event_(event) {}
-
-  void ThreadMain() override {
-    event_->Wait();
-    void* temp = malloc(1);
-    void* res = realloc(temp, 0xFEED);
-    EXPECT_EQ(temp, res);
-  }
-
- private:
-  base::WaitableEvent* event_;
-};
-
-AllocatorShimTest* AllocatorShimTest::instance_ = nullptr;
-
-AllocatorDispatch g_mock_dispatch = {
-    &AllocatorShimTest::MockAlloc,          /* alloc_function */
-    &AllocatorShimTest::MockAllocUnchecked, /* alloc_unchecked_function */
-    &AllocatorShimTest::MockAllocZeroInit, /* alloc_zero_initialized_function */
-    &AllocatorShimTest::MockAllocAligned,  /* alloc_aligned_function */
-    &AllocatorShimTest::MockRealloc,       /* realloc_function */
-    &AllocatorShimTest::MockFree,          /* free_function */
-    &AllocatorShimTest::MockGetSizeEstimate,  /* get_size_estimate_function */
-    &AllocatorShimTest::MockClaimedAddress,   /* claimed_address_function */
-    &AllocatorShimTest::MockBatchMalloc,      /* batch_malloc_function */
-    &AllocatorShimTest::MockBatchFree,        /* batch_free_function */
-    &AllocatorShimTest::MockFreeDefiniteSize, /* free_definite_size_function */
-    &AllocatorShimTest::MockTryFreeDefault,   /* try_free_default_function */
-    &AllocatorShimTest::MockAlignedMalloc,    /* aligned_malloc_function */
-    &AllocatorShimTest::MockAlignedRealloc,   /* aligned_realloc_function */
-    &AllocatorShimTest::MockAlignedFree,      /* aligned_free_function */
-    nullptr,                                  /* next */
-};
-
-TEST_F(AllocatorShimTest, InterceptLibcSymbols) {
-  InsertAllocatorDispatch(&g_mock_dispatch);
-
-  void* alloc_ptr = malloc(19);
-  ASSERT_NE(nullptr, alloc_ptr);
-  ASSERT_GE(allocs_intercepted_by_size[19], 1u);
-
-  void* zero_alloc_ptr = calloc(2, 23);
-  ASSERT_NE(nullptr, zero_alloc_ptr);
-  ASSERT_GE(zero_allocs_intercepted_by_size[2 * 23], 1u);
-
-#if !BUILDFLAG(IS_WIN)
-  void* posix_memalign_ptr = nullptr;
-  int res = posix_memalign(&posix_memalign_ptr, 256, 59);
-  ASSERT_EQ(0, res);
-  ASSERT_NE(nullptr, posix_memalign_ptr);
-  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(posix_memalign_ptr) % 256);
-  ASSERT_GE(aligned_allocs_intercepted_by_alignment[256], 1u);
-  ASSERT_GE(aligned_allocs_intercepted_by_size[59], 1u);
-
-  // (p)valloc() are not defined on Android. pvalloc() is a GNU extension,
-  // valloc() is not in POSIX.
-#if !BUILDFLAG(IS_ANDROID)
-  const size_t kPageSize = partition_alloc::internal::base::GetPageSize();
-  void* valloc_ptr = valloc(61);
-  ASSERT_NE(nullptr, valloc_ptr);
-  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(valloc_ptr) % kPageSize);
-  ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
-  ASSERT_GE(aligned_allocs_intercepted_by_size[61], 1u);
-#endif  // !BUILDFLAG(IS_ANDROID)
-
-#endif  // !BUILDFLAG(IS_WIN)
-
-#if !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
-  void* memalign_ptr = memalign(128, 53);
-  ASSERT_NE(nullptr, memalign_ptr);
-  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(memalign_ptr) % 128);
-  ASSERT_GE(aligned_allocs_intercepted_by_alignment[128], 1u);
-  ASSERT_GE(aligned_allocs_intercepted_by_size[53], 1u);
-
-#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
-  void* pvalloc_ptr = pvalloc(67);
-  ASSERT_NE(nullptr, pvalloc_ptr);
-  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(pvalloc_ptr) % kPageSize);
-  ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
-  // pvalloc rounds the size up to the next page.
-  ASSERT_GE(aligned_allocs_intercepted_by_size[kPageSize], 1u);
-#endif  // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
-
-#endif  // !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
-
-// See allocator_shim_override_glibc_weak_symbols.h for why we intercept
-// internal libc symbols.
-#if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  void* libc_memalign_ptr = __libc_memalign(512, 56);
-  ASSERT_NE(nullptr, memalign_ptr);
-  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(libc_memalign_ptr) % 512);
-  ASSERT_GE(aligned_allocs_intercepted_by_alignment[512], 1u);
-  ASSERT_GE(aligned_allocs_intercepted_by_size[56], 1u);
-#endif
-
-  char* realloc_ptr = static_cast<char*>(malloc(10));
-  strcpy(realloc_ptr, "foobar");
-  void* old_realloc_ptr = realloc_ptr;
-  realloc_ptr = static_cast<char*>(realloc(realloc_ptr, 73));
-  ASSERT_GE(reallocs_intercepted_by_size[73], 1u);
-  ASSERT_GE(reallocs_intercepted_by_addr[Hash(old_realloc_ptr)], 1u);
-  ASSERT_EQ(0, strcmp(realloc_ptr, "foobar"));
-
-  free(alloc_ptr);
-  ASSERT_GE(frees_intercepted_by_addr[Hash(alloc_ptr)], 1u);
-
-  free(zero_alloc_ptr);
-  ASSERT_GE(frees_intercepted_by_addr[Hash(zero_alloc_ptr)], 1u);
-
-#if !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
-  free(memalign_ptr);
-  ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u);
-
-#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
-  free(pvalloc_ptr);
-  ASSERT_GE(frees_intercepted_by_addr[Hash(pvalloc_ptr)], 1u);
-#endif  // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
-
-#endif  // !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
-
-#if !BUILDFLAG(IS_WIN)
-  free(posix_memalign_ptr);
-  ASSERT_GE(frees_intercepted_by_addr[Hash(posix_memalign_ptr)], 1u);
-
-#if !BUILDFLAG(IS_ANDROID)
-  free(valloc_ptr);
-  ASSERT_GE(frees_intercepted_by_addr[Hash(valloc_ptr)], 1u);
-#endif  // !BUILDFLAG(IS_ANDROID)
-
-#endif  // !BUILDFLAG(IS_WIN)
-
-#if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  free(libc_memalign_ptr);
-  ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u);
-#endif
-
-  free(realloc_ptr);
-  ASSERT_GE(frees_intercepted_by_addr[Hash(realloc_ptr)], 1u);
-
-  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
-
-  void* non_hooked_ptr = malloc(4095);
-  ASSERT_NE(nullptr, non_hooked_ptr);
-  ASSERT_EQ(0u, allocs_intercepted_by_size[4095]);
-  free(non_hooked_ptr);
-}
-
-// PartitionAlloc-Everywhere does not support batch_malloc / batch_free.
-#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-TEST_F(AllocatorShimTest, InterceptLibcSymbolsBatchMallocFree) {
-  InsertAllocatorDispatch(&g_mock_dispatch);
-
-  unsigned count = 13;
-  std::vector<void*> results;
-  results.resize(count);
-  unsigned result_count = malloc_zone_batch_malloc(malloc_default_zone(), 99,
-                                                   results.data(), count);
-  ASSERT_EQ(count, result_count);
-
-  // TODO(erikchen): On macOS 10.12+, batch_malloc in the default zone may
-  // forward to another zone, which we've also shimmed, resulting in
-  // MockBatchMalloc getting called twice as often as we'd expect. This
-  // re-entrancy into the allocator shim is a bug that needs to be fixed.
-  // https://crbug.com/693237.
-  // ASSERT_EQ(count, batch_mallocs_intercepted_by_size[99]);
-
-  std::vector<void*> results_copy(results);
-  malloc_zone_batch_free(malloc_default_zone(), results.data(), count);
-  for (void* result : results_copy) {
-    ASSERT_GE(batch_frees_intercepted_by_addr[Hash(result)], 1u);
-  }
-  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
-}
-
-TEST_F(AllocatorShimTest, InterceptLibcSymbolsFreeDefiniteSize) {
-  InsertAllocatorDispatch(&g_mock_dispatch);
-
-  void* alloc_ptr = malloc(19);
-  ASSERT_NE(nullptr, alloc_ptr);
-  ASSERT_GE(allocs_intercepted_by_size[19], 1u);
-
-  ChromeMallocZone* default_zone =
-      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
-  default_zone->free_definite_size(malloc_default_zone(), alloc_ptr, 19);
-  ASSERT_GE(free_definite_sizes_intercepted_by_size[19], 1u);
-  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
-}
-#endif  // BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-#if BUILDFLAG(IS_WIN)
-TEST_F(AllocatorShimTest, InterceptUcrtAlignedAllocationSymbols) {
-  InsertAllocatorDispatch(&g_mock_dispatch);
-
-  constexpr size_t kAlignment = 32;
-  void* alloc_ptr = _aligned_malloc(123, kAlignment);
-  EXPECT_GE(aligned_mallocs_intercepted_by_size[123], 1u);
-
-  void* new_alloc_ptr = _aligned_realloc(alloc_ptr, 1234, kAlignment);
-  EXPECT_GE(aligned_reallocs_intercepted_by_size[1234], 1u);
-  EXPECT_GE(aligned_reallocs_intercepted_by_addr[Hash(alloc_ptr)], 1u);
-
-  _aligned_free(new_alloc_ptr);
-  EXPECT_GE(aligned_frees_intercepted_by_addr[Hash(new_alloc_ptr)], 1u);
-
-  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
-}
-
-TEST_F(AllocatorShimTest, AlignedReallocSizeZeroFrees) {
-  void* alloc_ptr = _aligned_malloc(123, 16);
-  ASSERT_TRUE(alloc_ptr);
-  alloc_ptr = _aligned_realloc(alloc_ptr, 0, 16);
-  ASSERT_TRUE(!alloc_ptr);
-}
-#endif  // BUILDFLAG(IS_WIN)
-
-TEST_F(AllocatorShimTest, InterceptCppSymbols) {
-  InsertAllocatorDispatch(&g_mock_dispatch);
-
-  TestStruct1* new_ptr = new TestStruct1;
-  ASSERT_NE(nullptr, new_ptr);
-  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1)], 1u);
-
-  TestStruct1* new_array_ptr = new TestStruct1[3];
-  ASSERT_NE(nullptr, new_array_ptr);
-  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1) * 3], 1u);
-
-  TestStruct2* new_nt_ptr = new (std::nothrow) TestStruct2;
-  ASSERT_NE(nullptr, new_nt_ptr);
-  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct2)], 1u);
-
-  TestStruct2* new_array_nt_ptr = new TestStruct2[3];
-  ASSERT_NE(nullptr, new_array_nt_ptr);
-  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct2) * 3], 1u);
-
-  delete new_ptr;
-  ASSERT_GE(frees_intercepted_by_addr[Hash(new_ptr)], 1u);
-
-  delete[] new_array_ptr;
-  ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_ptr)], 1u);
-
-  delete new_nt_ptr;
-  ASSERT_GE(frees_intercepted_by_addr[Hash(new_nt_ptr)], 1u);
-
-  delete[] new_array_nt_ptr;
-  ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_nt_ptr)], 1u);
-
-  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
-}
-
-// PartitionAlloc disallows large allocations to avoid errors with int
-// overflows.
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-struct TooLarge {
-  char padding1[1UL << 31];
-  int padding2;
-};
-
-TEST_F(AllocatorShimTest, NewNoThrowTooLarge) {
-  char* too_large_array = new (std::nothrow) char[(1UL << 31) + 100];
-  EXPECT_EQ(nullptr, too_large_array);
-
-  TooLarge* too_large_struct = new (std::nothrow) TooLarge;
-  EXPECT_EQ(nullptr, too_large_struct);
-}
-#endif
-
-// This test exercises the case of concurrent OOM failure, which would end up
-// invoking std::new_handler concurrently. This is to cover the CallNewHandler()
-// paths of allocator_shim.cc and smoke-test its thread safey.
-// The test creates kNumThreads threads. Each of them mallocs some memory, and
-// then does a realloc(<new memory>, 0xFEED).
-// The shim intercepts such realloc and makes it fail only once on each thread.
-// We expect to see excactly kNumThreads invocations of the new_handler.
-TEST_F(AllocatorShimTest, NewHandlerConcurrency) {
-  const int kNumThreads = 32;
-  base::PlatformThreadHandle threads[kNumThreads];
-
-  // The WaitableEvent here is used to attempt to trigger all the threads at
-  // the same time, after they have been initialized.
-  base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
-                            base::WaitableEvent::InitialState::NOT_SIGNALED);
-
-  ThreadDelegateForNewHandlerTest mock_thread_main(&event);
-
-  for (auto& thread : threads) {
-    base::PlatformThread::Create(0, &mock_thread_main, &thread);
-  }
-
-  std::set_new_handler(&AllocatorShimTest::NewHandler);
-  SetCallNewHandlerOnMallocFailure(true);  // It's going to fail on realloc().
-  InsertAllocatorDispatch(&g_mock_dispatch);
-  event.Signal();
-  for (auto& thread : threads) {
-    base::PlatformThread::Join(thread);
-  }
-  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
-  ASSERT_EQ(kNumThreads, GetNumberOfNewHandlerCalls());
-}
-
-#if BUILDFLAG(IS_WIN)
-TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) {
-  ASSERT_EQ(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle()));
-}
-#endif  // BUILDFLAG(IS_WIN)
-
-#if BUILDFLAG(IS_WIN)
-static size_t GetUsableSize(void* ptr) {
-  return _msize(ptr);
-}
-#elif BUILDFLAG(IS_APPLE)
-static size_t GetUsableSize(void* ptr) {
-  return malloc_size(ptr);
-}
-#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
-static size_t GetUsableSize(void* ptr) {
-  return malloc_usable_size(ptr);
-}
-#else
-#define NO_MALLOC_SIZE
-#endif
-
-#if !defined(NO_MALLOC_SIZE)
-TEST_F(AllocatorShimTest, ShimReplacesMallocSizeWhenEnabled) {
-  InsertAllocatorDispatch(&g_mock_dispatch);
-  EXPECT_EQ(GetUsableSize(kTestSizeEstimateAddress), kTestSizeEstimate);
-  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
-}
-
-TEST_F(AllocatorShimTest, ShimDoesntChangeMallocSizeWhenEnabled) {
-  void* alloc = malloc(16);
-  size_t sz = GetUsableSize(alloc);
-  EXPECT_GE(sz, 16U);
-
-  InsertAllocatorDispatch(&g_mock_dispatch);
-  EXPECT_EQ(GetUsableSize(alloc), sz);
-  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
-
-  free(alloc);
-}
-#endif  // !defined(NO_MALLOC_SIZE)
-
-#if BUILDFLAG(IS_ANDROID)
-TEST_F(AllocatorShimTest, InterceptCLibraryFunctions) {
-  auto total_counts = [](const std::vector<size_t>& counts) {
-    size_t total = 0;
-    for (const auto count : counts) {
-      total += count;
-    }
-    return total;
-  };
-  size_t counts_before;
-  size_t counts_after = total_counts(allocs_intercepted_by_size);
-  void* ptr;
-
-  InsertAllocatorDispatch(&g_mock_dispatch);
-
-  // <stdlib.h>
-  counts_before = counts_after;
-  ptr = realpath(".", nullptr);
-  EXPECT_NE(nullptr, ptr);
-  free(ptr);
-  counts_after = total_counts(allocs_intercepted_by_size);
-  EXPECT_GT(counts_after, counts_before);
-
-  // <string.h>
-  counts_before = counts_after;
-  ptr = strdup("hello, world");
-  EXPECT_NE(nullptr, ptr);
-  free(ptr);
-  counts_after = total_counts(allocs_intercepted_by_size);
-  EXPECT_GT(counts_after, counts_before);
-
-  counts_before = counts_after;
-  ptr = strndup("hello, world", 5);
-  EXPECT_NE(nullptr, ptr);
-  free(ptr);
-  counts_after = total_counts(allocs_intercepted_by_size);
-  EXPECT_GT(counts_after, counts_before);
-
-  // <unistd.h>
-  counts_before = counts_after;
-  ptr = getcwd(nullptr, 0);
-  EXPECT_NE(nullptr, ptr);
-  free(ptr);
-  counts_after = total_counts(allocs_intercepted_by_size);
-  EXPECT_GT(counts_after, counts_before);
-
-  // With component builds on Android, we cannot intercept calls to functions
-  // inside another component, in this instance the call to vasprintf() inside
-  // libc++. This is not necessarily an issue for allocator shims, as long as we
-  // accept that allocations and deallocations will not be matched at all times.
-  // It is however essential for PartitionAlloc, which is exercized in the test
-  // below.
-#ifndef COMPONENT_BUILD
-  // Calls vasprintf() indirectly, see below.
-  counts_before = counts_after;
-  std::stringstream stream;
-  stream << std::setprecision(1) << std::showpoint << std::fixed << 1.e38;
-  EXPECT_GT(stream.str().size(), 30u);
-  counts_after = total_counts(allocs_intercepted_by_size);
-  EXPECT_GT(counts_after, counts_before);
-#endif  // COMPONENT_BUILD
-
-  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
-}
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-// Non-regression test for crbug.com/1166558.
-TEST_F(AllocatorShimTest, InterceptVasprintf) {
-  // Printing a float which expands to >=30 characters calls vasprintf() in
-  // libc, which we should intercept.
-  std::stringstream stream;
-  stream << std::setprecision(1) << std::showpoint << std::fixed << 1.e38;
-  EXPECT_GT(stream.str().size(), 30u);
-  // Should not crash.
-}
-
-TEST_F(AllocatorShimTest, InterceptLongVasprintf) {
-  char* str = nullptr;
-  const char* lorem_ipsum =
-      "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed non risus. "
-      "Suspendisse lectus tortor, dignissim sit amet, adipiscing nec, "
-      "ultricies sed, dolor. Cras elementum ultrices diam. Maecenas ligula "
-      "massa, varius a, semper congue, euismod non, mi. Proin porttitor, orci "
-      "nec nonummy molestie, enim est eleifend mi, non fermentum diam nisl sit "
-      "amet erat. Duis semper. Duis arcu massa, scelerisque vitae, consequat "
-      "in, pretium a, enim. Pellentesque congue. Ut in risus volutpat libero "
-      "pharetra tempor. Cras vestibulum bibendum augue. Praesent egestas leo "
-      "in pede. Praesent blandit odio eu enim. Pellentesque sed dui ut augue "
-      "blandit sodales. Vestibulum ante ipsum primis in faucibus orci luctus "
-      "et ultrices posuere cubilia Curae; Aliquam nibh. Mauris ac mauris sed "
-      "pede pellentesque fermentum. Maecenas adipiscing ante non diam sodales "
-      "hendrerit.";
-  int err = asprintf(&str, "%s", lorem_ipsum);
-  EXPECT_EQ(err, static_cast<int>(strlen(lorem_ipsum)));
-  EXPECT_TRUE(str);
-  free(str);
-}
-
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-#endif  // BUILDFLAG(IS_ANDROID)
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_APPLE)
-
-// Non-regression test for crbug.com/1291885.
-TEST_F(AllocatorShimTest, BatchMalloc) {
-  constexpr unsigned kNumToAllocate = 20;
-  void* pointers[kNumToAllocate];
-
-  EXPECT_EQ(kNumToAllocate, malloc_zone_batch_malloc(malloc_default_zone(), 10,
-                                                     pointers, kNumToAllocate));
-  malloc_zone_batch_free(malloc_default_zone(), pointers, kNumToAllocate);
-  // Should not crash.
-}
-
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_APPLE)
-
-}  // namespace
-}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/shim/early_zone_registration_constants.h b/base/allocator/partition_allocator/shim/early_zone_registration_constants.h
deleted file mode 100644
index 25fcfb3..0000000
--- a/base/allocator/partition_allocator/shim/early_zone_registration_constants.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_EARLY_ZONE_REGISTRATION_CONSTANTS_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_EARLY_ZONE_REGISTRATION_CONSTANTS_H_
-
-// This is an Apple-only file, used to register PartitionAlloc's zone *before*
-// the process becomes multi-threaded. These constants are shared between the
-// allocator shim which installs the PartitionAlloc's malloc zone and the
-// application which installs the "early malloc zone" to reserve the zone slot.
-
-namespace allocator_shim {
-
-static constexpr char kDelegatingZoneName[] =
-    "DelegatingDefaultZoneForPartitionAlloc";
-static constexpr char kPartitionAllocZoneName[] = "PartitionAlloc";
-
-// Zone version. Determines which callbacks are set in the various malloc_zone_t
-// structs.
-#if (__MAC_OS_X_VERSION_MAX_ALLOWED >= 130000) || \
-    (__IPHONE_OS_VERSION_MAX_ALLOWED >= 160100)
-#define PA_TRY_FREE_DEFAULT_IS_AVAILABLE 1
-#endif
-#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
-constexpr int kZoneVersion = 13;
-#else
-constexpr int kZoneVersion = 9;
-#endif
-
-}  // namespace allocator_shim
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_EARLY_ZONE_REGISTRATION_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/shim/malloc_zone_functions_mac.cc b/base/allocator/partition_allocator/shim/malloc_zone_functions_mac.cc
deleted file mode 100644
index e80aa03..0000000
--- a/base/allocator/partition_allocator/shim/malloc_zone_functions_mac.cc
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/shim/malloc_zone_functions_mac.h"
-
-#include <atomic>
-#include <type_traits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-
-namespace allocator_shim {
-
-MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
-static_assert(std::is_pod<MallocZoneFunctions>::value,
-              "MallocZoneFunctions must be POD");
-
-void StoreZoneFunctions(const ChromeMallocZone* zone,
-                        MallocZoneFunctions* functions) {
-  memset(functions, 0, sizeof(MallocZoneFunctions));
-  functions->malloc = zone->malloc;
-  functions->calloc = zone->calloc;
-  functions->valloc = zone->valloc;
-  functions->free = zone->free;
-  functions->realloc = zone->realloc;
-  functions->size = zone->size;
-  PA_BASE_CHECK(functions->malloc && functions->calloc && functions->valloc &&
-                functions->free && functions->realloc && functions->size);
-
-  // These functions might be nullptr.
-  functions->batch_malloc = zone->batch_malloc;
-  functions->batch_free = zone->batch_free;
-
-  if (zone->version >= 5) {
-    // Not all custom malloc zones have a memalign.
-    functions->memalign = zone->memalign;
-  }
-  if (zone->version >= 6) {
-    // This may be nullptr.
-    functions->free_definite_size = zone->free_definite_size;
-  }
-  if (zone->version >= 10) {
-    functions->claimed_address = zone->claimed_address;
-  }
-  if (zone->version >= 13) {
-    functions->try_free_default = zone->try_free_default;
-  }
-
-  // Note that zone version 8 introduced a pressure relief callback, and version
-  // 10 introduced a claimed address callback, but neither are allocation or
-  // deallocation callbacks and so aren't important to intercept.
-
-  functions->context = zone;
-}
-
-namespace {
-
-// All modifications to g_malloc_zones are gated behind this lock.
-// Dispatch to a malloc zone does not need to acquire this lock.
-partition_alloc::internal::Lock& GetLock() {
-  static partition_alloc::internal::Lock s_lock;
-  return s_lock;
-}
-
-void EnsureMallocZonesInitializedLocked() {
-  GetLock().AssertAcquired();
-}
-
-int g_zone_count = 0;
-
-bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) {
-  EnsureMallocZonesInitializedLocked();
-  for (int i = 0; i < g_zone_count; ++i) {
-    if (g_malloc_zones[i].context == reinterpret_cast<void*>(zone)) {
-      return true;
-    }
-  }
-  return false;
-}
-
-}  // namespace
-
-bool StoreMallocZone(ChromeMallocZone* zone) {
-  partition_alloc::internal::ScopedGuard guard(GetLock());
-  if (IsMallocZoneAlreadyStoredLocked(zone)) {
-    return false;
-  }
-
-  if (g_zone_count == kMaxZoneCount) {
-    return false;
-  }
-
-  StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]);
-  ++g_zone_count;
-
-  // No other thread can possibly see these stores at this point. The code that
-  // reads these values is triggered after this function returns. so we want to
-  // guarantee that they are committed at this stage"
-  std::atomic_thread_fence(std::memory_order_seq_cst);
-  return true;
-}
-
-bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
-  partition_alloc::internal::ScopedGuard guard(GetLock());
-  return IsMallocZoneAlreadyStoredLocked(zone);
-}
-
-bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
-                                 const MallocZoneFunctions* functions) {
-  return IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc;
-}
-
-int GetMallocZoneCountForTesting() {
-  partition_alloc::internal::ScopedGuard guard(GetLock());
-  return g_zone_count;
-}
-
-void ClearAllMallocZonesForTesting() {
-  partition_alloc::internal::ScopedGuard guard(GetLock());
-  memset(g_malloc_zones, 0, kMaxZoneCount * sizeof(MallocZoneFunctions));
-  g_zone_count = 0;
-}
-
-}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/shim/malloc_zone_functions_mac.h b/base/allocator/partition_allocator/shim/malloc_zone_functions_mac.h
deleted file mode 100644
index 05c331b..0000000
--- a/base/allocator/partition_allocator/shim/malloc_zone_functions_mac.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_MALLOC_ZONE_FUNCTIONS_MAC_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_MALLOC_ZONE_FUNCTIONS_MAC_H_
-
-#include <malloc/malloc.h>
-#include <stddef.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/third_party/apple_apsl/malloc.h"
-
-namespace allocator_shim {
-
-typedef void* (*malloc_type)(struct _malloc_zone_t* zone, size_t size);
-typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
-                             size_t num_items,
-                             size_t size);
-typedef void* (*valloc_type)(struct _malloc_zone_t* zone, size_t size);
-typedef void (*free_type)(struct _malloc_zone_t* zone, void* ptr);
-typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
-                              void* ptr,
-                              size_t size);
-typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
-                               size_t alignment,
-                               size_t size);
-typedef unsigned (*batch_malloc_type)(struct _malloc_zone_t* zone,
-                                      size_t size,
-                                      void** results,
-                                      unsigned num_requested);
-typedef void (*batch_free_type)(struct _malloc_zone_t* zone,
-                                void** to_be_freed,
-                                unsigned num_to_be_freed);
-typedef void (*free_definite_size_type)(struct _malloc_zone_t* zone,
-                                        void* ptr,
-                                        size_t size);
-typedef void (*try_free_default_type)(struct _malloc_zone_t* zone, void* ptr);
-typedef size_t (*size_fn_type)(struct _malloc_zone_t* zone, const void* ptr);
-typedef boolean_t (*claimed_address_type)(struct _malloc_zone_t* zone,
-                                          void* ptr);
-
-struct MallocZoneFunctions {
-  malloc_type malloc;
-  calloc_type calloc;
-  valloc_type valloc;
-  free_type free;
-  realloc_type realloc;
-  memalign_type memalign;
-  batch_malloc_type batch_malloc;
-  batch_free_type batch_free;
-  free_definite_size_type free_definite_size;
-  try_free_default_type try_free_default;
-  size_fn_type size;
-  claimed_address_type claimed_address;
-  const ChromeMallocZone* context;
-};
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void StoreZoneFunctions(const ChromeMallocZone* zone,
-                        MallocZoneFunctions* functions);
-static constexpr int kMaxZoneCount = 30;
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-extern MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
-
-// The array g_malloc_zones stores all information about malloc zones before
-// they are shimmed. This information needs to be accessed during dispatch back
-// into the zone, and additional zones may be added later in the execution fo
-// the program, so the array needs to be both thread-safe and high-performance.
-//
-// We begin by creating an array of MallocZoneFunctions of fixed size. We will
-// never modify the container, which provides thread-safety to iterators.  When
-// we want to add a MallocZoneFunctions to the container, we:
-//   1. Fill in all the fields.
-//   2. Update the total zone count.
-//   3. Insert a memory barrier.
-//   4. Insert our shim.
-//
-// Each MallocZoneFunctions is uniquely identified by |context|, which is a
-// pointer to the original malloc zone. When we wish to dispatch back to the
-// original malloc zones, we iterate through the array, looking for a matching
-// |context|.
-//
-// Most allocations go through the default allocator. We will ensure that the
-// default allocator is stored as the first MallocZoneFunctions.
-//
-// Returns whether the zone was successfully stored.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool StoreMallocZone(ChromeMallocZone* zone);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
-                                 const MallocZoneFunctions* functions);
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) int GetMallocZoneCountForTesting();
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void ClearAllMallocZonesForTesting();
-
-inline MallocZoneFunctions& GetFunctionsForZone(void* zone) {
-  for (unsigned int i = 0; i < kMaxZoneCount; ++i) {
-    if (g_malloc_zones[i].context == zone) {
-      return g_malloc_zones[i];
-    }
-  }
-  PA_IMMEDIATE_CRASH();
-}
-
-}  // namespace allocator_shim
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_MALLOC_ZONE_FUNCTIONS_MAC_H_
diff --git a/base/allocator/partition_allocator/shim/malloc_zone_functions_mac_unittest.cc b/base/allocator/partition_allocator/shim/malloc_zone_functions_mac_unittest.cc
deleted file mode 100644
index a5e61b5..0000000
--- a/base/allocator/partition_allocator/shim/malloc_zone_functions_mac_unittest.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/shim/malloc_zone_functions_mac.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace allocator_shim {
-
-class MallocZoneFunctionsTest : public testing::Test {
- protected:
-  void TearDown() override { ClearAllMallocZonesForTesting(); }
-};
-
-TEST_F(MallocZoneFunctionsTest, TestDefaultZoneMallocFree) {
-  ChromeMallocZone* malloc_zone =
-      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
-  StoreMallocZone(malloc_zone);
-  int* test = reinterpret_cast<int*>(
-      g_malloc_zones[0].malloc(malloc_default_zone(), 33));
-  test[0] = 1;
-  test[1] = 2;
-  g_malloc_zones[0].free(malloc_default_zone(), test);
-}
-
-TEST_F(MallocZoneFunctionsTest, IsZoneAlreadyStored) {
-  ChromeMallocZone* malloc_zone =
-      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
-  EXPECT_FALSE(IsMallocZoneAlreadyStored(malloc_zone));
-  StoreMallocZone(malloc_zone);
-  EXPECT_TRUE(IsMallocZoneAlreadyStored(malloc_zone));
-}
-
-TEST_F(MallocZoneFunctionsTest, CannotDoubleStoreZone) {
-  ChromeMallocZone* malloc_zone =
-      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
-  StoreMallocZone(malloc_zone);
-  StoreMallocZone(malloc_zone);
-  EXPECT_EQ(1, GetMallocZoneCountForTesting());
-}
-
-TEST_F(MallocZoneFunctionsTest, CannotStoreMoreThanMaxZones) {
-  std::vector<ChromeMallocZone> zones;
-  zones.resize(kMaxZoneCount * 2);
-  for (int i = 0; i < kMaxZoneCount * 2; ++i) {
-    ChromeMallocZone& zone = zones[i];
-    memcpy(&zone, malloc_default_zone(), sizeof(ChromeMallocZone));
-    StoreMallocZone(&zone);
-  }
-
-  int max_zone_count = kMaxZoneCount;
-  EXPECT_EQ(max_zone_count, GetMallocZoneCountForTesting());
-}
-
-}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/shim/nonscannable_allocator.cc b/base/allocator/partition_allocator/shim/nonscannable_allocator.cc
deleted file mode 100644
index ed1a097..0000000
--- a/base/allocator/partition_allocator/shim/nonscannable_allocator.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/shim/nonscannable_allocator.h"
-
-#include "base/allocator/partition_allocator/partition_root.h"
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
-
-#if BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#endif
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-namespace allocator_shim::internal {
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-template <bool quarantinable>
-NonScannableAllocatorImpl<quarantinable>::NonScannableAllocatorImpl() = default;
-template <bool quarantinable>
-NonScannableAllocatorImpl<quarantinable>::~NonScannableAllocatorImpl() =
-    default;
-
-template <bool quarantinable>
-NonScannableAllocatorImpl<quarantinable>&
-NonScannableAllocatorImpl<quarantinable>::Instance() {
-  static partition_alloc::internal::base::NoDestructor<
-      NonScannableAllocatorImpl>
-      instance;
-  return *instance;
-}
-
-template <bool quarantinable>
-void* NonScannableAllocatorImpl<quarantinable>::Alloc(size_t size) {
-#if BUILDFLAG(USE_STARSCAN)
-  // TODO(bikineev): Change to LIKELY once PCScan is enabled by default.
-  if (PA_UNLIKELY(pcscan_enabled_.load(std::memory_order_acquire))) {
-    PA_DCHECK(allocator_.get());
-    return allocator_->root()->AllocWithFlagsNoHooks(
-        0, size, partition_alloc::PartitionPageSize());
-  }
-#endif  // BUILDFLAG(USE_STARSCAN)
-  // Otherwise, dispatch to default partition.
-  return allocator_shim::internal::PartitionAllocMalloc::Allocator()
-      ->AllocWithFlagsNoHooks(0, size, partition_alloc::PartitionPageSize());
-}
-
-template <bool quarantinable>
-void NonScannableAllocatorImpl<quarantinable>::Free(void* ptr) {
-#if BUILDFLAG(USE_STARSCAN)
-  if (PA_UNLIKELY(pcscan_enabled_.load(std::memory_order_acquire))) {
-    allocator_->root()->FreeNoHooks(ptr);
-    return;
-  }
-#endif  // BUILDFLAG(USE_STARSCAN)
-  partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(ptr);
-}
-
-template <bool quarantinable>
-void NonScannableAllocatorImpl<quarantinable>::NotifyPCScanEnabled() {
-#if BUILDFLAG(USE_STARSCAN)
-  allocator_.reset(partition_alloc::internal::MakePCScanMetadata<
-                   partition_alloc::PartitionAllocator>(
-      partition_alloc::PartitionOptions{
-          .star_scan_quarantine = quarantinable
-                                      ? partition_alloc::PartitionOptions::
-                                            StarScanQuarantine::kAllowed
-                                      : partition_alloc::PartitionOptions::
-                                            StarScanQuarantine::kDisallowed,
-          .backup_ref_ptr =
-              partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
-      }));
-  if constexpr (quarantinable) {
-    partition_alloc::internal::PCScan::RegisterNonScannableRoot(
-        allocator_->root());
-  }
-  pcscan_enabled_.store(true, std::memory_order_release);
-#endif  // BUILDFLAG(USE_STARSCAN)
-}
-
-template class NonScannableAllocatorImpl<true>;
-template class NonScannableAllocatorImpl<false>;
-
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-}  // namespace allocator_shim::internal
diff --git a/base/allocator/partition_allocator/shim/nonscannable_allocator.h b/base/allocator/partition_allocator/shim/nonscannable_allocator.h
deleted file mode 100644
index 862e962..0000000
--- a/base/allocator/partition_allocator/shim/nonscannable_allocator.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_NONSCANNABLE_ALLOCATOR_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_NONSCANNABLE_ALLOCATOR_H_
-
-#include <atomic>
-#include <cstddef>
-#include <memory>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-#include "base/allocator/partition_allocator/partition_alloc.h"
-
-#if BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
-#endif
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-namespace allocator_shim {
-
-#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-namespace internal {
-
-// Represents allocator that contains memory for data-like objects (that don't
-// contain pointers/references) and therefore doesn't require scanning by
-// PCScan. An example would be strings or socket/IPC/file buffers. Use with
-// caution.
-template <bool quarantinable>
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) NonScannableAllocatorImpl final {
- public:
-  static NonScannableAllocatorImpl& Instance();
-
-  NonScannableAllocatorImpl(const NonScannableAllocatorImpl&) = delete;
-  NonScannableAllocatorImpl& operator=(const NonScannableAllocatorImpl&) =
-      delete;
-
-  void* Alloc(size_t size);
-  void Free(void*);
-
-  // Returns PartitionRoot corresponding to the allocator, or nullptr if the
-  // allocator is not enabled.
-  partition_alloc::PartitionRoot* root() {
-#if BUILDFLAG(USE_STARSCAN)
-    if (!allocator_.get()) {
-      return nullptr;
-    }
-    return allocator_->root();
-#else
-    return nullptr;
-#endif  // BUILDFLAG(USE_STARSCAN)
-  }
-
-  void NotifyPCScanEnabled();
-
- private:
-  template <typename>
-  friend class partition_alloc::internal::base::NoDestructor;
-
-  NonScannableAllocatorImpl();
-  ~NonScannableAllocatorImpl();
-
-#if BUILDFLAG(USE_STARSCAN)
-  std::unique_ptr<partition_alloc::PartitionAllocator,
-                  partition_alloc::internal::PCScanMetadataDeleter>
-      allocator_;
-  std::atomic_bool pcscan_enabled_{false};
-#endif  // BUILDFLAG(USE_STARSCAN)
-};
-
-extern template class NonScannableAllocatorImpl<true>;
-extern template class NonScannableAllocatorImpl<false>;
-
-}  // namespace internal
-
-using NonScannableAllocator = internal::NonScannableAllocatorImpl<true>;
-using NonQuarantinableAllocator = internal::NonScannableAllocatorImpl<false>;
-
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-
-}  // namespace allocator_shim
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_NONSCANNABLE_ALLOCATOR_H_
diff --git a/base/allocator/partition_allocator/shim/winheap_stubs_win.cc b/base/allocator/partition_allocator/shim/winheap_stubs_win.cc
deleted file mode 100644
index b8cccdc..0000000
--- a/base/allocator/partition_allocator/shim/winheap_stubs_win.cc
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This code should move into the default Windows shim once the win-specific
-// allocation shim has been removed, and the generic shim has becaome the
-// default.
-
-#include "winheap_stubs_win.h"
-
-#include <limits.h>
-#include <malloc.h>
-#include <new.h>
-#include <windows.h>
-#include <algorithm>
-#include <limits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-
-namespace allocator_shim {
-
-bool g_is_win_shim_layer_initialized = false;
-
-namespace {
-
-const size_t kWindowsPageSize = 4096;
-const size_t kMaxWindowsAllocation = INT_MAX - kWindowsPageSize;
-
-inline HANDLE get_heap_handle() {
-  return reinterpret_cast<HANDLE>(_get_heap_handle());
-}
-
-}  // namespace
-
-void* WinHeapMalloc(size_t size) {
-  if (size < kMaxWindowsAllocation) {
-    return HeapAlloc(get_heap_handle(), 0, size);
-  }
-  return nullptr;
-}
-
-void WinHeapFree(void* ptr) {
-  if (!ptr) {
-    return;
-  }
-
-  HeapFree(get_heap_handle(), 0, ptr);
-}
-
-void* WinHeapRealloc(void* ptr, size_t size) {
-  if (!ptr) {
-    return WinHeapMalloc(size);
-  }
-  if (!size) {
-    WinHeapFree(ptr);
-    return nullptr;
-  }
-  if (size < kMaxWindowsAllocation) {
-    return HeapReAlloc(get_heap_handle(), 0, ptr, size);
-  }
-  return nullptr;
-}
-
-size_t WinHeapGetSizeEstimate(void* ptr) {
-  if (!ptr) {
-    return 0;
-  }
-
-  return HeapSize(get_heap_handle(), 0, ptr);
-}
-
-// Call the new handler, if one has been set.
-// Returns true on successfully calling the handler, false otherwise.
-bool WinCallNewHandler(size_t size) {
-#ifdef _CPPUNWIND
-#error "Exceptions in allocator shim are not supported!"
-#endif  // _CPPUNWIND
-  // Get the current new handler.
-  _PNH nh = _query_new_handler();
-  if (!nh) {
-    return false;
-  }
-  // Since exceptions are disabled, we don't really know if new_handler
-  // failed.  Assume it will abort if it fails.
-  return nh(size) ? true : false;
-}
-
-// The Windows _aligned_* functions are implemented by creating an allocation
-// with enough space to create an aligned allocation internally. The offset to
-// the original allocation is prefixed to the aligned allocation so that it can
-// be correctly freed.
-
-namespace {
-
-struct AlignedPrefix {
-  // Offset to the original allocation point.
-  unsigned int original_allocation_offset;
-  // Make sure an unsigned int is enough to store the offset
-  static_assert(
-      kMaxWindowsAllocation < std::numeric_limits<unsigned int>::max(),
-      "original_allocation_offset must be able to fit into an unsigned int");
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  // Magic value used to check that _aligned_free() and _aligned_realloc() are
-  // only ever called on an aligned allocated chunk.
-  static constexpr unsigned int kMagic = 0x12003400;
-  unsigned int magic;
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-};
-
-// Compute how large an allocation we need to fit an allocation with the given
-// size and alignment and space for a prefix pointer.
-size_t AdjustedSize(size_t size, size_t alignment) {
-  // Minimal alignment is the prefix size so the prefix is properly aligned.
-  alignment = std::max(alignment, alignof(AlignedPrefix));
-  return size + sizeof(AlignedPrefix) + alignment - 1;
-}
-
-// Align the allocation and write the prefix.
-void* AlignAllocation(void* ptr, size_t alignment) {
-  // Minimal alignment is the prefix size so the prefix is properly aligned.
-  alignment = std::max(alignment, alignof(AlignedPrefix));
-
-  uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
-  address = partition_alloc::internal::base::bits::AlignUp(
-      address + sizeof(AlignedPrefix), alignment);
-
-  // Write the prefix.
-  AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(address) - 1;
-  prefix->original_allocation_offset =
-      partition_alloc::internal::base::checked_cast<unsigned int>(
-          address - reinterpret_cast<uintptr_t>(ptr));
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  prefix->magic = AlignedPrefix::kMagic;
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-  return reinterpret_cast<void*>(address);
-}
-
-// Return the original allocation from an aligned allocation.
-void* UnalignAllocation(void* ptr) {
-  AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(ptr) - 1;
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  PA_DCHECK(prefix->magic == AlignedPrefix::kMagic);
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-  void* unaligned =
-      static_cast<uint8_t*>(ptr) - prefix->original_allocation_offset;
-  PA_CHECK(unaligned < ptr);
-  PA_CHECK(reinterpret_cast<uintptr_t>(ptr) -
-               reinterpret_cast<uintptr_t>(unaligned) <=
-           kMaxWindowsAllocation);
-  return unaligned;
-}
-
-}  // namespace
-
-void* WinHeapAlignedMalloc(size_t size, size_t alignment) {
-  PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
-
-  size_t adjusted = AdjustedSize(size, alignment);
-  if (adjusted >= kMaxWindowsAllocation) {
-    return nullptr;
-  }
-
-  void* ptr = WinHeapMalloc(adjusted);
-  if (!ptr) {
-    return nullptr;
-  }
-
-  return AlignAllocation(ptr, alignment);
-}
-
-void* WinHeapAlignedRealloc(void* ptr, size_t size, size_t alignment) {
-  PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
-
-  if (!ptr) {
-    return WinHeapAlignedMalloc(size, alignment);
-  }
-  if (!size) {
-    WinHeapAlignedFree(ptr);
-    return nullptr;
-  }
-
-  size_t adjusted = AdjustedSize(size, alignment);
-  if (adjusted >= kMaxWindowsAllocation) {
-    return nullptr;
-  }
-
-  // Try to resize the allocation in place first.
-  void* unaligned = UnalignAllocation(ptr);
-  if (HeapReAlloc(get_heap_handle(), HEAP_REALLOC_IN_PLACE_ONLY, unaligned,
-                  adjusted)) {
-    return ptr;
-  }
-
-  // Otherwise manually perform an _aligned_malloc() and copy since an
-  // unaligned allocation from HeapReAlloc() would force us to copy the
-  // allocation twice.
-  void* new_ptr = WinHeapAlignedMalloc(size, alignment);
-  if (!new_ptr) {
-    return nullptr;
-  }
-
-  size_t gap =
-      reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(unaligned);
-  size_t old_size = WinHeapGetSizeEstimate(unaligned) - gap;
-  memcpy(new_ptr, ptr, std::min(size, old_size));
-  WinHeapAlignedFree(ptr);
-  return new_ptr;
-}
-
-void WinHeapAlignedFree(void* ptr) {
-  if (!ptr) {
-    return;
-  }
-
-  void* original_allocation = UnalignAllocation(ptr);
-  WinHeapFree(original_allocation);
-}
-
-}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/shim/winheap_stubs_win.h b/base/allocator/partition_allocator/shim/winheap_stubs_win.h
deleted file mode 100644
index acbb00c..0000000
--- a/base/allocator/partition_allocator/shim/winheap_stubs_win.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Thin allocation wrappers for the windows heap. This file should be deleted
-// once the win-specific allocation shim has been removed, and the generic shim
-// has becaome the default.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_WINHEAP_STUBS_WIN_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_WINHEAP_STUBS_WIN_H_
-
-#include <stdint.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace allocator_shim {
-
-// Set to true if the link-time magic has successfully hooked into the CRT's
-// heap initialization.
-extern bool g_is_win_shim_layer_initialized;
-
-// Thin wrappers to implement the standard C allocation semantics on the
-// CRT's Windows heap.
-void* WinHeapMalloc(size_t size);
-void WinHeapFree(void* ptr);
-void* WinHeapRealloc(void* ptr, size_t size);
-
-// Returns a lower-bound estimate for the full amount of memory consumed by the
-// the allocation |ptr|.
-size_t WinHeapGetSizeEstimate(void* ptr);
-
-// Call the new handler, if one has been set.
-// Returns true on successfully calling the handler, false otherwise.
-bool WinCallNewHandler(size_t size);
-
-// Wrappers to implement the interface for the _aligned_* functions on top of
-// the CRT's Windows heap. Exported for tests.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* WinHeapAlignedMalloc(size_t size, size_t alignment);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* WinHeapAlignedRealloc(void* ptr, size_t size, size_t alignment);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void WinHeapAlignedFree(void* ptr);
-
-}  // namespace allocator_shim
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_WINHEAP_STUBS_WIN_H_
diff --git a/base/allocator/partition_allocator/shim/winheap_stubs_win_unittest.cc b/base/allocator/partition_allocator/shim/winheap_stubs_win_unittest.cc
deleted file mode 100644
index 5155918..0000000
--- a/base/allocator/partition_allocator/shim/winheap_stubs_win_unittest.cc
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/shim/winheap_stubs_win.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace allocator_shim {
-namespace {
-
-bool IsPtrAligned(void* ptr, size_t alignment) {
-  PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
-  uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
-  return partition_alloc::internal::base::bits::AlignUp(address, alignment) ==
-         address;
-}
-
-}  // namespace
-
-TEST(WinHeapStubs, AlignedAllocationAreAligned) {
-  for (size_t alignment = 1; alignment < 65536; alignment *= 2) {
-    SCOPED_TRACE(alignment);
-
-    void* ptr = WinHeapAlignedMalloc(10, alignment);
-    ASSERT_NE(ptr, nullptr);
-    EXPECT_TRUE(IsPtrAligned(ptr, alignment));
-
-    ptr = WinHeapAlignedRealloc(ptr, 1000, alignment);
-    ASSERT_NE(ptr, nullptr);
-    EXPECT_TRUE(IsPtrAligned(ptr, alignment));
-
-    WinHeapAlignedFree(ptr);
-  }
-}
-
-TEST(WinHeapStubs, AlignedReallocationsCorrectlyCopyData) {
-  constexpr size_t kAlignment = 64;
-  constexpr uint8_t kMagicByte = 0xab;
-
-  size_t old_size = 8;
-  void* ptr = WinHeapAlignedMalloc(old_size, kAlignment);
-  ASSERT_NE(ptr, nullptr);
-
-  // Cause allocations to grow and shrink and confirm allocation contents are
-  // copied regardless.
-  constexpr size_t kSizes[] = {10, 1000, 50, 3000, 30, 9000};
-
-  for (size_t size : kSizes) {
-    SCOPED_TRACE(size);
-
-    memset(ptr, kMagicByte, old_size);
-    ptr = WinHeapAlignedRealloc(ptr, size, kAlignment);
-    ASSERT_NE(ptr, nullptr);
-
-    for (size_t i = 0; i < std::min(size, old_size); i++) {
-      SCOPED_TRACE(i);
-      ASSERT_EQ(reinterpret_cast<uint8_t*>(ptr)[i], kMagicByte);
-    }
-
-    old_size = size;
-  }
-
-  WinHeapAlignedFree(ptr);
-}
-
-}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/spinning_mutex.cc b/base/allocator/partition_allocator/spinning_mutex.cc
deleted file mode 100644
index b4d4f61..0000000
--- a/base/allocator/partition_allocator/spinning_mutex.cc
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/spinning_mutex.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_WIN)
-#include <windows.h>
-#endif
-
-#if BUILDFLAG(IS_POSIX)
-#include <pthread.h>
-#endif
-
-#if PA_CONFIG(HAS_LINUX_KERNEL)
-#include <errno.h>
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-#endif  // PA_CONFIG(HAS_LINUX_KERNEL)
-
-#if !PA_CONFIG(HAS_FAST_MUTEX)
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-
-#if BUILDFLAG(IS_POSIX)
-#include <sched.h>
-
-#define PA_YIELD_THREAD sched_yield()
-
-#else  // Other OS
-
-#warning "Thread yield not supported on this OS."
-#define PA_YIELD_THREAD ((void)0)
-#endif
-
-#endif  // !PA_CONFIG(HAS_FAST_MUTEX)
-
-namespace partition_alloc::internal {
-
-void SpinningMutex::Reinit() {
-#if !BUILDFLAG(IS_APPLE)
-  // On most platforms, no need to re-init the lock, can just unlock it.
-  Release();
-#else
-  unfair_lock_ = OS_UNFAIR_LOCK_INIT;
-#endif  // BUILDFLAG(IS_APPLE)
-}
-
-void SpinningMutex::AcquireSpinThenBlock() {
-  int tries = 0;
-  int backoff = 1;
-  do {
-    if (PA_LIKELY(Try())) {
-      return;
-    }
-    // Note: Per the intel optimization manual
-    // (https://software.intel.com/content/dam/develop/public/us/en/documents/64-ia-32-architectures-optimization-manual.pdf),
-    // the "pause" instruction is more costly on Skylake Client than on previous
-    // architectures. The latency is found to be 141 cycles
-    // there (from ~10 on previous ones, nice 14x).
-    //
-    // According to Agner Fog's instruction tables, the latency is still >100
-    // cycles on Ice Lake, and from other sources, seems to be high as well on
-    // Adler Lake. Separately, it is (from
-    // https://agner.org/optimize/instruction_tables.pdf) also high on AMD Zen 3
-    // (~65). So just assume that it's this way for most x86_64 architectures.
-    //
-    // Also, loop several times here, following the guidelines in section 2.3.4
-    // of the manual, "Pause latency in Skylake Client Microarchitecture".
-    for (int yields = 0; yields < backoff; yields++) {
-      PA_YIELD_PROCESSOR;
-      tries++;
-    }
-    constexpr int kMaxBackoff = 16;
-    backoff = std::min(kMaxBackoff, backoff << 1);
-  } while (tries < kSpinCount);
-
-  LockSlow();
-}
-
-#if PA_CONFIG(HAS_FAST_MUTEX)
-
-#if PA_CONFIG(HAS_LINUX_KERNEL)
-
-void SpinningMutex::FutexWait() {
-  // Save and restore errno.
-  int saved_errno = errno;
-  // Don't check the return value, as we will not be awaken by a timeout, since
-  // none is specified.
-  //
-  // Ignoring the return value doesn't impact correctness, as this acts as an
-  // immediate wakeup. For completeness, the possible errors for FUTEX_WAIT are:
-  // - EACCES: state_ is not readable. Should not happen.
-  // - EAGAIN: the value is not as expected, that is not |kLockedContended|, in
-  //           which case retrying the loop is the right behavior.
-  // - EINTR: signal, looping is the right behavior.
-  // - EINVAL: invalid argument.
-  //
-  // Note: not checking the return value is the approach used in bionic and
-  // glibc as well.
-  //
-  // Will return immediately if |state_| is no longer equal to
-  // |kLockedContended|. Otherwise, sleeps and wakes up when |state_| may not be
-  // |kLockedContended| anymore. Note that even without spurious wakeups, the
-  // value of |state_| is not guaranteed when this returns, as another thread
-  // may get the lock before we get to run.
-  int err = syscall(SYS_futex, &state_, FUTEX_WAIT | FUTEX_PRIVATE_FLAG,
-                    kLockedContended, nullptr, nullptr, 0);
-
-  if (err) {
-    // These are programming error, check them.
-    PA_DCHECK(errno != EACCES);
-    PA_DCHECK(errno != EINVAL);
-  }
-  errno = saved_errno;
-}
-
-void SpinningMutex::FutexWake() {
-  int saved_errno = errno;
-  long retval = syscall(SYS_futex, &state_, FUTEX_WAKE | FUTEX_PRIVATE_FLAG,
-                        1 /* wake up a single waiter */, nullptr, nullptr, 0);
-  PA_CHECK(retval != -1);
-  errno = saved_errno;
-}
-
-void SpinningMutex::LockSlow() {
-  // If this thread gets awaken but another one got the lock first, then go back
-  // to sleeping. See comments in |FutexWait()| to see why a loop is required.
-  while (state_.exchange(kLockedContended, std::memory_order_acquire) !=
-         kUnlocked) {
-    FutexWait();
-  }
-}
-
-#elif BUILDFLAG(IS_WIN)
-
-void SpinningMutex::LockSlow() {
-  ::AcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&lock_));
-}
-
-#elif BUILDFLAG(IS_APPLE)
-
-void SpinningMutex::LockSlow() {
-  return os_unfair_lock_lock(&unfair_lock_);
-}
-
-#elif BUILDFLAG(IS_POSIX)
-
-void SpinningMutex::LockSlow() {
-  int retval = pthread_mutex_lock(&lock_);
-  PA_DCHECK(retval == 0);
-}
-
-#elif BUILDFLAG(IS_FUCHSIA)
-
-void SpinningMutex::LockSlow() {
-  sync_mutex_lock(&lock_);
-}
-
-#endif
-
-#else  // PA_CONFIG(HAS_FAST_MUTEX)
-
-void SpinningMutex::LockSlowSpinLock() {
-  int yield_thread_count = 0;
-  do {
-    if (yield_thread_count < 10) {
-      PA_YIELD_THREAD;
-      yield_thread_count++;
-    } else {
-      // At this point, it's likely that the lock is held by a lower priority
-      // thread that is unavailable to finish its work because of higher
-      // priority threads spinning here. Sleeping should ensure that they make
-      // progress.
-      base::PlatformThread::Sleep(base::Milliseconds(1));
-    }
-  } while (!TrySpinLock());
-}
-
-#endif  // PA_CONFIG(HAS_FAST_MUTEX)
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/spinning_mutex.h b/base/allocator/partition_allocator/spinning_mutex.h
deleted file mode 100644
index 8ded2e2..0000000
--- a/base/allocator/partition_allocator/spinning_mutex.h
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPINNING_MUTEX_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPINNING_MUTEX_H_
-
-#include <algorithm>
-#include <atomic>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/yield_processor.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_WIN)
-#include "base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h"
-#endif
-
-#if BUILDFLAG(IS_POSIX)
-#include <errno.h>
-#include <pthread.h>
-#endif
-
-#if BUILDFLAG(IS_APPLE)
-#include <os/lock.h>
-#endif  // BUILDFLAG(IS_APPLE)
-
-#if BUILDFLAG(IS_FUCHSIA)
-#include <lib/sync/mutex.h>
-#endif
-
-namespace partition_alloc::internal {
-
-// The behavior of this class depends on whether PA_HAS_FAST_MUTEX is defined.
-// 1. When it is defined:
-//
-// Simple spinning lock. It will spin in user space a set number of times before
-// going into the kernel to sleep.
-//
-// This is intended to give "the best of both worlds" between a SpinLock and
-// base::Lock:
-// - SpinLock: Inlined fast path, no external function calls, just
-//   compare-and-swap. Short waits do not go into the kernel. Good behavior in
-//   low contention cases.
-// - base::Lock: Good behavior in case of contention.
-//
-// We don't rely on base::Lock which we could make spin (by calling Try() in a
-// loop), as performance is below a custom spinlock as seen on high-level
-// benchmarks. Instead this implements a simple non-recursive mutex on top of
-// the futex() syscall on Linux, SRWLock on Windows, os_unfair_lock on macOS,
-// and pthread_mutex on POSIX. The main difference between this and a libc
-// implementation is that it only supports the simplest path: private (to a
-// process), non-recursive mutexes with no priority inheritance, no timed waits.
-//
-// As an interesting side-effect to be used in the allocator, this code does not
-// make any allocations, locks are small with a constexpr constructor and no
-// destructor.
-//
-// 2. Otherwise: This is a simple SpinLock, in the sense that it does not have
-// any awareness of other threads' behavior.
-class PA_LOCKABLE PA_COMPONENT_EXPORT(PARTITION_ALLOC) SpinningMutex {
- public:
-  inline constexpr SpinningMutex();
-  PA_ALWAYS_INLINE void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION();
-  PA_ALWAYS_INLINE void Release() PA_UNLOCK_FUNCTION();
-  PA_ALWAYS_INLINE bool Try() PA_EXCLUSIVE_TRYLOCK_FUNCTION(true);
-  void AssertAcquired() const {}  // Not supported.
-  void Reinit() PA_UNLOCK_FUNCTION();
-
- private:
-  PA_NOINLINE void AcquireSpinThenBlock() PA_EXCLUSIVE_LOCK_FUNCTION();
-#if PA_CONFIG(HAS_FAST_MUTEX)
-  void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION();
-#else
-  PA_ALWAYS_INLINE void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION();
-#endif
-
-  // See below, the latency of PA_YIELD_PROCESSOR can be as high as ~150
-  // cycles. Meanwhile, sleeping costs a few us. Spinning 64 times at 3GHz would
-  // cost 150 * 64 / 3e9 ~= 3.2us.
-  //
-  // This applies to Linux kernels, on x86_64. On ARM we might want to spin
-  // more.
-  static constexpr int kSpinCount = 64;
-
-#if PA_CONFIG(HAS_FAST_MUTEX)
-
-#if PA_CONFIG(HAS_LINUX_KERNEL)
-  void FutexWait();
-  void FutexWake();
-
-  static constexpr int kUnlocked = 0;
-  static constexpr int kLockedUncontended = 1;
-  static constexpr int kLockedContended = 2;
-
-  std::atomic<int32_t> state_{kUnlocked};
-#elif BUILDFLAG(IS_WIN)
-  PA_CHROME_SRWLOCK lock_ = SRWLOCK_INIT;
-#elif BUILDFLAG(IS_APPLE)
-  os_unfair_lock unfair_lock_ = OS_UNFAIR_LOCK_INIT;
-#elif BUILDFLAG(IS_POSIX)
-  pthread_mutex_t lock_ = PTHREAD_MUTEX_INITIALIZER;
-#elif BUILDFLAG(IS_FUCHSIA)
-  sync_mutex lock_;
-#endif
-
-#else   // PA_CONFIG(HAS_FAST_MUTEX)
-  std::atomic<bool> lock_{false};
-
-  // Spinlock-like, fallback.
-  PA_ALWAYS_INLINE bool TrySpinLock();
-  PA_ALWAYS_INLINE void ReleaseSpinLock();
-  void LockSlowSpinLock();
-#endif  // PA_CONFIG(HAS_FAST_MUTEX)
-};
-
-PA_ALWAYS_INLINE void SpinningMutex::Acquire() {
-  // Not marked PA_LIKELY(), as:
-  // 1. We don't know how much contention the lock would experience
-  // 2. This may lead to weird-looking code layout when inlined into a caller
-  // with PA_(UN)LIKELY() annotations.
-  if (Try()) {
-    return;
-  }
-
-  return AcquireSpinThenBlock();
-}
-
-inline constexpr SpinningMutex::SpinningMutex() = default;
-
-#if PA_CONFIG(HAS_FAST_MUTEX)
-
-#if PA_CONFIG(HAS_LINUX_KERNEL)
-
-PA_ALWAYS_INLINE bool SpinningMutex::Try() {
-  // Using the weak variant of compare_exchange(), which may fail spuriously. On
-  // some architectures such as ARM, CAS is typically performed as a LDREX/STREX
-  // pair, where the store may fail. In the strong version, there is a loop
-  // inserted by the compiler to retry in these cases.
-  //
-  // Since we are retrying in Lock() anyway, there is no point having two nested
-  // loops.
-  int expected = kUnlocked;
-  return (state_.load(std::memory_order_relaxed) == expected) &&
-         state_.compare_exchange_weak(expected, kLockedUncontended,
-                                      std::memory_order_acquire,
-                                      std::memory_order_relaxed);
-}
-
-PA_ALWAYS_INLINE void SpinningMutex::Release() {
-  if (PA_UNLIKELY(state_.exchange(kUnlocked, std::memory_order_release) ==
-                  kLockedContended)) {
-    // |kLockedContended|: there is a waiter to wake up.
-    //
-    // Here there is a window where the lock is unlocked, since we just set it
-    // to |kUnlocked| above. Meaning that another thread can grab the lock
-    // in-between now and |FutexWake()| waking up a waiter. Aside from
-    // potentially fairness, this is not an issue, as the newly-awaken thread
-    // will check that the lock is still free.
-    //
-    // There is a small pessimization here though: if we have a single waiter,
-    // then when it wakes up, the lock will be set to |kLockedContended|, so
-    // when this waiter releases the lock, it will needlessly call
-    // |FutexWake()|, even though there are no waiters. This is supported by the
-    // kernel, and is what bionic (Android's libc) also does.
-    FutexWake();
-  }
-}
-
-#elif BUILDFLAG(IS_WIN)
-
-PA_ALWAYS_INLINE bool SpinningMutex::Try() {
-  return !!::TryAcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&lock_));
-}
-
-PA_ALWAYS_INLINE void SpinningMutex::Release() {
-  ::ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&lock_));
-}
-
-#elif BUILDFLAG(IS_APPLE)
-
-PA_ALWAYS_INLINE bool SpinningMutex::Try() {
-  return os_unfair_lock_trylock(&unfair_lock_);
-}
-
-PA_ALWAYS_INLINE void SpinningMutex::Release() {
-  return os_unfair_lock_unlock(&unfair_lock_);
-}
-
-#elif BUILDFLAG(IS_POSIX)
-
-PA_ALWAYS_INLINE bool SpinningMutex::Try() {
-  int retval = pthread_mutex_trylock(&lock_);
-  PA_DCHECK(retval == 0 || retval == EBUSY);
-  return retval == 0;
-}
-
-PA_ALWAYS_INLINE void SpinningMutex::Release() {
-  int retval = pthread_mutex_unlock(&lock_);
-  PA_DCHECK(retval == 0);
-}
-
-#elif BUILDFLAG(IS_FUCHSIA)
-
-PA_ALWAYS_INLINE bool SpinningMutex::Try() {
-  return sync_mutex_trylock(&lock_) == ZX_OK;
-}
-
-PA_ALWAYS_INLINE void SpinningMutex::Release() {
-  sync_mutex_unlock(&lock_);
-}
-
-#endif
-
-#else  // PA_CONFIG(HAS_FAST_MUTEX)
-
-PA_ALWAYS_INLINE bool SpinningMutex::Try() {
-  // Possibly faster than CAS. The theory is that if the cacheline is shared,
-  // then it can stay shared, for the contended case.
-  return !lock_.load(std::memory_order_relaxed) &&
-         !lock_.exchange(true, std::memory_order_acquire);
-}
-
-PA_ALWAYS_INLINE void SpinningMutex::Release() {
-  lock_.store(false, std::memory_order_release);
-}
-
-PA_ALWAYS_INLINE void SpinningMutex::LockSlow() {
-  return LockSlowSpinLock();
-}
-
-#endif  // PA_CONFIG(HAS_FAST_MUTEX)
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPINNING_MUTEX_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/BUILD.gn b/base/allocator/partition_allocator/src/partition_alloc/BUILD.gn
new file mode 100644
index 0000000..d66a79c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/BUILD.gn
@@ -0,0 +1,819 @@
+# Copyright 2022 The Chromium Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/buildflag_header.gni")
+import("//build/config/android/config.gni")
+import("//build/config/chromecast_build.gni")
+import("//build/config/chromeos/ui_mode.gni")
+import("//build/config/compiler/compiler.gni")
+import("//build/config/dcheck_always_on.gni")
+import("//build/config/logging.gni")
+import("../../partition_alloc.gni")
+
+# Add partition_alloc.gni and import it for partition_alloc configs.
+
+config("partition_alloc_implementation") {
+  # See also: `partition_alloc_base/component_export.h`
+  defines = [ "IS_PARTITION_ALLOC_IMPL" ]
+}
+
+config("partition_alloc_base_implementation") {
+  # See also: `partition_alloc_base/component_export.h`
+  defines = [ "IS_PARTITION_ALLOC_BASE_IMPL" ]
+}
+
+# TODO(https://crbug.com/1467773): Split PartitionAlloc into a public and
+# private parts. The public config would include add the "./include" dir and
+# the private config would add the "./src" dir.
+# TODO(https://crbug.com/1467773): Move this config and several target into
+# "../..".
+config("public_includes") {
+  include_dirs = [
+    "..",
+    "$root_gen_dir/" + rebase_path("..", "//"),
+  ]
+}
+
+config("memory_tagging") {
+  if (current_cpu == "arm64" && is_clang &&
+      (is_linux || is_chromeos || is_android || is_fuchsia)) {
+    # base/ has access to the MTE intrinsics because it needs to use them,
+    # but they're not backwards compatible. Use base::CPU::has_mte()
+    # beforehand to confirm or use indirect functions (ifuncs) to select
+    # an MTE-specific implementation at dynamic link-time.
+    cflags = [
+      "-Xclang",
+      "-target-feature",
+      "-Xclang",
+      "+mte",
+    ]
+  }
+}
+
+# Used to shim malloc symbols on Android. see //base/allocator/README.md.
+config("wrap_malloc_symbols") {
+  ldflags = [
+    "-Wl,-wrap,calloc",
+    "-Wl,-wrap,free",
+    "-Wl,-wrap,malloc",
+    "-Wl,-wrap,memalign",
+    "-Wl,-wrap,posix_memalign",
+    "-Wl,-wrap,pvalloc",
+    "-Wl,-wrap,realloc",
+    "-Wl,-wrap,valloc",
+
+    # Not allocating memory, but part of the API
+    "-Wl,-wrap,malloc_usable_size",
+
+    # <stdlib.h> functions
+    "-Wl,-wrap,realpath",
+
+    # <string.h> functions
+    "-Wl,-wrap,strdup",
+    "-Wl,-wrap,strndup",
+
+    # <unistd.h> functions
+    "-Wl,-wrap,getcwd",
+
+    # <stdio.h> functions
+    "-Wl,-wrap,asprintf",
+    "-Wl,-wrap,vasprintf",
+  ]
+}
+
+config("mac_no_default_new_delete_symbols") {
+  if (!is_component_build) {
+    # This is already set when we compile libc++, see
+    # buildtools/third_party/libc++/BUILD.gn. But it needs to be set here as
+    # well, since the shim defines the symbols, to prevent them being exported.
+    cflags = [ "-fvisibility-global-new-delete-hidden" ]
+  }
+}
+
+if (is_fuchsia) {
+  config("fuchsia_sync_lib") {
+    libs = [
+      "sync",  # Used by spinning_mutex.h.
+    ]
+  }
+}
+
+if (enable_pkeys && is_debug) {
+  config("no_stack_protector") {
+    cflags = [ "-fno-stack-protector" ]
+  }
+}
+
+_remove_configs = []
+_add_configs = []
+if (!is_debug || partition_alloc_optimized_debug) {
+  _remove_configs += [ "//build/config/compiler:default_optimization" ]
+
+  # Partition alloc is relatively hot (>1% of cycles for users of CrOS).
+  # Use speed-focused optimizations for it.
+  _add_configs += [ "//build/config/compiler:optimize_speed" ]
+} else {
+  _remove_configs += [ "//build/config/compiler:default_optimization" ]
+  _add_configs += [ "//build/config/compiler:no_optimize" ]
+}
+
+component("partition_alloc") {
+  public_deps = [
+    ":allocator_base",
+    ":allocator_core",
+    ":allocator_shim",
+  ]
+}
+
+# Changes the freelist implementation to use pointer offsets in lieu
+# of full-on pointers. Defaults to false, which implies the use of
+# "encoded next" freelist entry.
+#
+# Only usable when pointers are 64-bit.
+use_freelist_pool_offsets = has_64_bit_pointers && false
+
+source_set("allocator_core") {
+  visibility = [ ":*" ]
+
+  sources = [
+    "address_pool_manager.cc",
+    "address_pool_manager.h",
+    "address_pool_manager_bitmap.cc",
+    "address_pool_manager_bitmap.h",
+    "address_pool_manager_types.h",
+    "address_space_randomization.cc",
+    "address_space_randomization.h",
+    "address_space_stats.h",
+    "allocation_guard.cc",
+    "allocation_guard.h",
+    "compressed_pointer.cc",
+    "compressed_pointer.h",
+    "dangling_raw_ptr_checks.cc",
+    "dangling_raw_ptr_checks.h",
+    "flags.h",
+    "freeslot_bitmap.h",
+    "freeslot_bitmap_constants.h",
+    "gwp_asan_support.cc",
+    "gwp_asan_support.h",
+    "lightweight_quarantine.cc",
+    "lightweight_quarantine.h",
+    "memory_reclaimer.cc",
+    "memory_reclaimer.h",
+    "oom.cc",
+    "oom.h",
+    "oom_callback.cc",
+    "oom_callback.h",
+    "page_allocator.cc",
+    "page_allocator.h",
+    "page_allocator_constants.h",
+    "page_allocator_internal.h",
+    "partition_address_space.cc",
+    "partition_address_space.h",
+    "partition_alloc-inl.h",
+    "partition_alloc.cc",
+    "partition_alloc.h",
+    "partition_alloc_allocation_data.h",
+    "partition_alloc_check.h",
+    "partition_alloc_config.h",
+    "partition_alloc_constants.h",
+    "partition_alloc_forward.h",
+    "partition_alloc_hooks.cc",
+    "partition_alloc_hooks.h",
+    "partition_bucket.cc",
+    "partition_bucket.h",
+    "partition_bucket_lookup.h",
+    "partition_cookie.h",
+    "partition_dcheck_helper.cc",
+    "partition_dcheck_helper.h",
+    "partition_direct_map_extent.h",
+    "partition_freelist_entry.cc",
+    "partition_freelist_entry.h",
+    "partition_lock.h",
+    "partition_oom.cc",
+    "partition_oom.h",
+    "partition_page.cc",
+    "partition_page.h",
+    "partition_page_constants.h",
+    "partition_ref_count.h",
+    "partition_root.cc",
+    "partition_root.h",
+    "partition_stats.cc",
+    "partition_stats.h",
+    "partition_superpage_extent_entry.h",
+    "partition_tls.h",
+    "random.cc",
+    "random.h",
+    "reservation_offset_table.cc",
+    "reservation_offset_table.h",
+    "reverse_bytes.h",
+    "spinning_mutex.cc",
+    "spinning_mutex.h",
+    "tagging.cc",
+    "tagging.h",
+    "thread_cache.cc",
+    "thread_cache.h",
+    "thread_isolation/alignment.h",
+    "thread_isolation/pkey.cc",
+    "thread_isolation/pkey.h",
+    "thread_isolation/thread_isolation.cc",
+    "thread_isolation/thread_isolation.h",
+    "yield_processor.h",
+  ]
+
+  if (use_starscan) {
+    sources += [
+      "starscan/logging.h",
+      "starscan/metadata_allocator.cc",
+      "starscan/metadata_allocator.h",
+      "starscan/pcscan.cc",
+      "starscan/pcscan.h",
+      "starscan/pcscan_internal.cc",
+      "starscan/pcscan_internal.h",
+      "starscan/pcscan_scheduling.cc",
+      "starscan/pcscan_scheduling.h",
+      "starscan/raceful_worklist.h",
+      "starscan/scan_loop.h",
+      "starscan/snapshot.cc",
+      "starscan/snapshot.h",
+      "starscan/stack/stack.cc",
+      "starscan/stack/stack.h",
+      "starscan/starscan_fwd.h",
+      "starscan/state_bitmap.h",
+      "starscan/stats_collector.cc",
+      "starscan/stats_collector.h",
+      "starscan/stats_reporter.h",
+      "starscan/write_protector.cc",
+      "starscan/write_protector.h",
+    ]
+  }
+
+  defines = []
+  if (is_win) {
+    sources += [
+      "page_allocator_internals_win.h",
+      "partition_tls_win.cc",
+    ]
+  } else if (is_posix) {
+    sources += [
+      "page_allocator_internals_posix.cc",
+      "page_allocator_internals_posix.h",
+    ]
+  } else if (is_fuchsia) {
+    sources += [ "page_allocator_internals_fuchsia.h" ]
+  }
+  if (is_android) {
+    # The Android NDK supports PR_MTE_* macros as of NDK r23.
+    if (android_ndk_major_version >= 23) {
+      defines += [ "HAS_PR_MTE_MACROS" ]
+    }
+  }
+  if (use_starscan) {
+    if (current_cpu == "x64") {
+      assert(pcscan_stack_supported)
+      sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ]
+    } else if (current_cpu == "x86") {
+      assert(pcscan_stack_supported)
+      sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ]
+    } else if (current_cpu == "arm") {
+      assert(pcscan_stack_supported)
+      sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ]
+    } else if (current_cpu == "arm64") {
+      assert(pcscan_stack_supported)
+      sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ]
+    } else if (current_cpu == "riscv64") {
+      assert(pcscan_stack_supported)
+      sources += [ "starscan/stack/asm/riscv64/push_registers_asm.cc" ]
+    } else {
+      # To support a trampoline for another arch, please refer to v8/src/heap/base.
+      assert(!pcscan_stack_supported)
+    }
+  }
+  if (use_freelist_pool_offsets) {
+    sources += [ "pool_offset_freelist.h" ]
+  } else {
+    sources += [ "encoded_next_freelist.h" ]
+  }
+
+  public_deps = [
+    ":chromecast_buildflags",
+    ":chromeos_buildflags",
+    ":debugging_buildflags",
+    ":partition_alloc_buildflags",
+  ]
+
+  configs += [
+    ":partition_alloc_implementation",
+    ":memory_tagging",
+    "//build/config/compiler:wexit_time_destructors",
+  ]
+  deps = [ ":allocator_base" ]
+  public_configs = []
+  if (is_android) {
+    # tagging.cc requires __arm_mte_set_* functions.
+    deps += [ "//third_party/cpu_features:ndk_compat" ]
+  }
+  if (is_fuchsia) {
+    deps += [
+      "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel:fuchsia.kernel_cpp",
+      "//third_party/fuchsia-sdk/sdk/pkg/component_incoming_cpp",
+    ]
+    public_deps += [
+      "//third_party/fuchsia-sdk/sdk/pkg/sync",
+      "//third_party/fuchsia-sdk/sdk/pkg/zx",
+    ]
+
+    # Needed for users of spinning_mutex.h, which for performance reasons,
+    # contains inlined calls to `libsync` inside the header file.
+    # It appends an entry to the "libs" section of the dependent target.
+    public_configs += [ ":fuchsia_sync_lib" ]
+  }
+
+  frameworks = []
+  if (is_mac) {
+    # SecTaskGetCodeSignStatus needs:
+    frameworks += [ "Security.framework" ]
+  }
+
+  if (is_apple) {
+    frameworks += [
+      "CoreFoundation.framework",
+      "Foundation.framework",
+    ]
+  }
+
+  configs -= _remove_configs
+  configs += _add_configs
+
+  # We want to be able to test pkey mode without access to the default pkey.
+  # This is incompatible with stack protectors since the TLS won't be pkey-tagged.
+  if (enable_pkeys && is_debug) {
+    configs += [ ":no_stack_protector" ]
+  }
+}
+
+component("allocator_base") {
+  visibility = [ ":*" ]
+
+  sources = [
+    "partition_alloc_base/atomic_ref_count.h",
+    "partition_alloc_base/augmentations/compiler_specific.h",
+    "partition_alloc_base/bit_cast.h",
+    "partition_alloc_base/bits.h",
+    "partition_alloc_base/check.cc",
+    "partition_alloc_base/check.h",
+    "partition_alloc_base/compiler_specific.h",
+    "partition_alloc_base/component_export.h",
+    "partition_alloc_base/cpu.cc",
+    "partition_alloc_base/cpu.h",
+    "partition_alloc_base/cxx20_is_constant_evaluated.h",
+    "partition_alloc_base/debug/alias.cc",
+    "partition_alloc_base/debug/alias.h",
+    "partition_alloc_base/debug/stack_trace.cc",
+    "partition_alloc_base/debug/stack_trace.h",
+    "partition_alloc_base/export_template.h",
+    "partition_alloc_base/gtest_prod_util.h",
+    "partition_alloc_base/immediate_crash.h",
+    "partition_alloc_base/log_message.cc",
+    "partition_alloc_base/log_message.h",
+    "partition_alloc_base/logging.cc",
+    "partition_alloc_base/logging.h",
+    "partition_alloc_base/memory/page_size.h",
+    "partition_alloc_base/memory/ref_counted.cc",
+    "partition_alloc_base/memory/ref_counted.h",
+    "partition_alloc_base/memory/scoped_policy.h",
+    "partition_alloc_base/memory/scoped_refptr.h",
+    "partition_alloc_base/no_destructor.h",
+    "partition_alloc_base/notreached.h",
+    "partition_alloc_base/numerics/checked_math.h",
+    "partition_alloc_base/numerics/checked_math_impl.h",
+    "partition_alloc_base/numerics/clamped_math.h",
+    "partition_alloc_base/numerics/clamped_math_impl.h",
+    "partition_alloc_base/numerics/safe_conversions.h",
+    "partition_alloc_base/numerics/safe_conversions_arm_impl.h",
+    "partition_alloc_base/numerics/safe_conversions_impl.h",
+    "partition_alloc_base/numerics/safe_math.h",
+    "partition_alloc_base/numerics/safe_math_arm_impl.h",
+    "partition_alloc_base/numerics/safe_math_clang_gcc_impl.h",
+    "partition_alloc_base/numerics/safe_math_shared_impl.h",
+    "partition_alloc_base/posix/eintr_wrapper.h",
+    "partition_alloc_base/process/process_handle.h",
+    "partition_alloc_base/rand_util.cc",
+    "partition_alloc_base/rand_util.h",
+    "partition_alloc_base/scoped_clear_last_error.h",
+    "partition_alloc_base/strings/cstring_builder.cc",
+    "partition_alloc_base/strings/cstring_builder.h",
+    "partition_alloc_base/strings/safe_sprintf.cc",
+    "partition_alloc_base/strings/safe_sprintf.h",
+    "partition_alloc_base/strings/string_util.cc",
+    "partition_alloc_base/strings/string_util.h",
+    "partition_alloc_base/strings/stringprintf.cc",
+    "partition_alloc_base/strings/stringprintf.h",
+    "partition_alloc_base/system/sys_info.h",
+    "partition_alloc_base/thread_annotations.h",
+    "partition_alloc_base/threading/platform_thread.cc",
+    "partition_alloc_base/threading/platform_thread.h",
+    "partition_alloc_base/threading/platform_thread_ref.h",
+    "partition_alloc_base/time/time.cc",
+    "partition_alloc_base/time/time.h",
+    "partition_alloc_base/time/time_override.cc",
+    "partition_alloc_base/time/time_override.h",
+    "partition_alloc_base/types/strong_alias.h",
+    "partition_alloc_base/win/win_handle_types.h",
+    "partition_alloc_base/win/win_handle_types_list.inc",
+    "partition_alloc_base/win/windows_types.h",
+  ]
+
+  if (is_win) {
+    sources += [
+      "partition_alloc_base/debug/stack_trace_win.cc",
+      "partition_alloc_base/memory/page_size_win.cc",
+      "partition_alloc_base/process/process_handle_win.cc",
+      "partition_alloc_base/rand_util_win.cc",
+      "partition_alloc_base/scoped_clear_last_error_win.cc",
+      "partition_alloc_base/threading/platform_thread_win.cc",
+      "partition_alloc_base/time/time_win.cc",
+    ]
+  } else if (is_posix) {
+    sources += [
+      "partition_alloc_base/debug/stack_trace_posix.cc",
+      "partition_alloc_base/files/file_util.h",
+      "partition_alloc_base/files/file_util_posix.cc",
+      "partition_alloc_base/memory/page_size_posix.cc",
+      "partition_alloc_base/posix/safe_strerror.cc",
+      "partition_alloc_base/posix/safe_strerror.h",
+      "partition_alloc_base/process/process_handle_posix.cc",
+      "partition_alloc_base/rand_util_posix.cc",
+      "partition_alloc_base/threading/platform_thread_internal_posix.h",
+      "partition_alloc_base/threading/platform_thread_posix.cc",
+      "partition_alloc_base/time/time_conversion_posix.cc",
+    ]
+
+    if (is_linux || is_chromeos) {
+      sources += [ "partition_alloc_base/debug/stack_trace_linux.cc" ]
+    }
+
+    if (is_android || is_chromeos_ash) {
+      sources += [ "partition_alloc_base/time/time_android.cc" ]
+    }
+    if (is_apple) {
+      sources += [
+        "partition_alloc_base/debug/stack_trace_mac.cc",
+        "partition_alloc_base/time/time_apple.mm",
+      ]
+    } else {
+      sources += [ "partition_alloc_base/time/time_now_posix.cc" ]
+    }
+  } else if (is_fuchsia) {
+    sources += [
+      "partition_alloc_base/fuchsia/fuchsia_logging.cc",
+      "partition_alloc_base/fuchsia/fuchsia_logging.h",
+      "partition_alloc_base/memory/page_size_posix.cc",
+      "partition_alloc_base/posix/safe_strerror.cc",
+      "partition_alloc_base/posix/safe_strerror.h",
+      "partition_alloc_base/rand_util_fuchsia.cc",
+      "partition_alloc_base/threading/platform_thread_internal_posix.h",
+      "partition_alloc_base/threading/platform_thread_posix.cc",
+      "partition_alloc_base/time/time_conversion_posix.cc",
+      "partition_alloc_base/time/time_fuchsia.cc",
+    ]
+  }
+  if (is_android) {
+    # Only android build requires native_library, and native_library depends
+    # on file_path. So file_path is added if is_android = true.
+    sources += [
+      "partition_alloc_base/debug/stack_trace_android.cc",
+      "partition_alloc_base/files/file_path.cc",
+      "partition_alloc_base/files/file_path.h",
+      "partition_alloc_base/native_library.cc",
+      "partition_alloc_base/native_library.h",
+      "partition_alloc_base/native_library_posix.cc",
+    ]
+  }
+  if (is_apple) {
+    # Apple-specific utilities
+    sources += [
+      "partition_alloc_base/apple/foundation_util.h",
+      "partition_alloc_base/apple/foundation_util.mm",
+      "partition_alloc_base/apple/mach_logging.cc",
+      "partition_alloc_base/apple/mach_logging.h",
+      "partition_alloc_base/apple/scoped_cftyperef.h",
+      "partition_alloc_base/apple/scoped_typeref.h",
+    ]
+    if (is_ios) {
+      sources += [
+        "partition_alloc_base/ios/ios_util.h",
+        "partition_alloc_base/ios/ios_util.mm",
+        "partition_alloc_base/system/sys_info_ios.mm",
+      ]
+    }
+    if (is_mac) {
+      sources += [
+        "partition_alloc_base/mac/mac_util.h",
+        "partition_alloc_base/mac/mac_util.mm",
+        "partition_alloc_base/system/sys_info_mac.mm",
+      ]
+    }
+  }
+
+  public_deps = [
+    ":chromecast_buildflags",
+    ":chromeos_buildflags",
+    ":debugging_buildflags",
+    ":partition_alloc_buildflags",
+  ]
+  public_configs = [ ":public_includes" ]
+  configs += [
+    ":partition_alloc_base_implementation",
+    "//build/config/compiler:wexit_time_destructors",
+  ]
+
+  deps = []
+  if (is_fuchsia) {
+    public_deps += [ "//third_party/fuchsia-sdk/sdk/pkg/fit" ]
+  }
+
+  frameworks = []
+  if (is_apple) {
+    frameworks += [
+      "CoreFoundation.framework",
+      "Foundation.framework",
+    ]
+  }
+
+  configs -= _remove_configs
+  configs += _add_configs
+}
+
+source_set("allocator_shim") {
+  visibility = [ ":*" ]
+
+  sources = []
+  deps = [ ":allocator_base" ]
+  all_dependent_configs = []
+  public_configs = [ ":public_includes" ]
+  configs += [
+    ":partition_alloc_implementation",
+    "//build/config/compiler:wexit_time_destructors",
+  ]
+
+  configs -= _remove_configs
+  configs += _add_configs
+
+  if (use_allocator_shim) {
+    sources += [
+      "shim/allocator_shim.cc",
+      "shim/allocator_shim.h",
+      "shim/allocator_shim_internals.h",
+    ]
+    if (use_partition_alloc) {
+      sources += [
+        "shim/allocator_shim_default_dispatch_to_partition_alloc.cc",
+        "shim/allocator_shim_default_dispatch_to_partition_alloc.h",
+        "shim/nonscannable_allocator.cc",
+        "shim/nonscannable_allocator.h",
+      ]
+    }
+    if (is_android) {
+      sources += [
+        "shim/allocator_shim_override_cpp_symbols.h",
+        "shim/allocator_shim_override_linker_wrapped_symbols.h",
+      ]
+      all_dependent_configs += [ ":wrap_malloc_symbols" ]
+    }
+    if (is_apple) {
+      sources += [
+        "shim/allocator_shim_override_apple_default_zone.h",
+        "shim/allocator_shim_override_apple_symbols.h",
+        "shim/early_zone_registration_constants.h",
+      ]
+      configs += [ ":mac_no_default_new_delete_symbols" ]
+    }
+    if (is_chromeos || is_linux) {
+      sources += [
+        "shim/allocator_shim_override_cpp_symbols.h",
+        "shim/allocator_shim_override_glibc_weak_symbols.h",
+        "shim/allocator_shim_override_libc_symbols.h",
+      ]
+    }
+    if (is_win) {
+      sources += [
+        "shim/allocator_shim_override_ucrt_symbols_win.h",
+        "shim/winheap_stubs_win.cc",
+        "shim/winheap_stubs_win.h",
+      ]
+    }
+
+    if (!use_partition_alloc_as_malloc) {
+      if (is_android) {
+        sources += [
+          "shim/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
+        ]
+      }
+      if (is_apple) {
+        sources +=
+            [ "shim/allocator_shim_default_dispatch_to_apple_zoned_malloc.cc" ]
+      }
+      if (is_chromeos || is_linux) {
+        sources += [ "shim/allocator_shim_default_dispatch_to_glibc.cc" ]
+      }
+      if (is_win) {
+        sources += [ "shim/allocator_shim_default_dispatch_to_winheap.cc" ]
+      }
+    }
+
+    deps += [
+      ":allocator_base",
+      ":allocator_core",
+      ":buildflags",
+    ]
+  }
+
+  if (is_apple) {
+    sources += [
+      "shim/allocator_interception_apple.h",
+      "shim/allocator_interception_apple.mm",
+      "shim/malloc_zone_functions_apple.cc",
+      "shim/malloc_zone_functions_apple.h",
+    ]
+
+    # Do not compile with ARC because this target has to interface with
+    # low-level Objective-C and having ARC would interfere.
+    configs -= [ "//build/config/compiler:enable_arc" ]
+    deps += [
+      ":allocator_base",
+      ":allocator_core",
+      ":buildflags",
+    ]
+  }
+}
+
+source_set("raw_ptr") {
+  # `gn check` is unhappy with most `#includes` when PA isn't
+  # actually built.
+  check_includes = use_partition_alloc
+  public = [
+    "pointers/raw_ptr.h",
+    "pointers/raw_ptr_cast.h",
+    "pointers/raw_ptr_exclusion.h",
+    "pointers/raw_ptr_noop_impl.h",
+    "pointers/raw_ref.h",
+  ]
+  sources = []
+  public_configs = [ ":public_includes" ]
+  configs += [ "//build/config/compiler:wexit_time_destructors" ]
+
+  if (enable_backup_ref_ptr_support) {
+    sources += [
+      "pointers/raw_ptr_backup_ref_impl.cc",
+      "pointers/raw_ptr_backup_ref_impl.h",
+    ]
+  } else if (use_hookable_raw_ptr) {
+    sources += [
+      "pointers/raw_ptr_hookable_impl.cc",
+      "pointers/raw_ptr_hookable_impl.h",
+    ]
+  } else if (use_asan_unowned_ptr) {
+    sources += [
+      "pointers/raw_ptr_asan_unowned_impl.cc",
+      "pointers/raw_ptr_asan_unowned_impl.h",
+    ]
+  } else {
+    sources += [ "pointers/raw_ptr_noop_impl.h" ]
+  }
+  if (use_partition_alloc) {
+    public_deps = [ ":partition_alloc" ]
+  }
+  deps = [ ":buildflags" ]
+
+  # See also: `partition_alloc_base/component_export.h`
+  defines = [ "IS_RAW_PTR_IMPL" ]
+
+  configs -= _remove_configs
+  configs += _add_configs
+}
+
+buildflag_header("partition_alloc_buildflags") {
+  header = "partition_alloc_buildflags.h"
+
+  _record_alloc_info = false
+
+  # GWP-ASan is tied to BRP's "refcount in previous slot" mode, whose
+  # enablement is already gated on BRP enablement.
+  _enable_gwp_asan_support = put_ref_count_in_previous_slot
+
+  # Pools are a logical concept when address space is 32-bit.
+  _glue_core_pools = glue_core_pools && has_64_bit_pointers
+
+  # Pointer compression requires 64-bit pointers.
+  _enable_pointer_compression =
+      enable_pointer_compression_support && has_64_bit_pointers
+
+  # Force-enable live BRP in all processes, ignoring the canonical
+  # experiment state of `PartitionAllocBackupRefPtr`.
+  #
+  # This is not exposed as a GN arg as it is not meant to be used by
+  # developers - it is simply a compile-time hinge that should be
+  # set in the experimental build and then reverted immediately.
+  _force_all_process_brp = false
+
+  # TODO(crbug.com/1151236): Need to refactor the following buildflags.
+  # The buildflags (except RECORD_ALLOC_INFO) are used by both chrome and
+  # partition alloc. For partition alloc,
+  # gen/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h
+  # defines and partition alloc includes the header file. For chrome,
+  # gen/base/allocator/buildflags.h defines and chrome includes.
+  flags = [
+    "HAS_64_BIT_POINTERS=$has_64_bit_pointers",
+
+    "USE_ALLOCATOR_SHIM=$use_allocator_shim",
+    "USE_PARTITION_ALLOC=$use_partition_alloc",
+    "USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
+
+    "ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
+    "ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
+    "ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
+    "ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
+    "ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
+    "ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment",
+    "ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
+    "ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK=$enable_pointer_arithmetic_trait_check",
+    "BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
+    "PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot",
+    "USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
+    "USE_ASAN_UNOWNED_PTR=$use_asan_unowned_ptr",
+    "USE_HOOKABLE_RAW_PTR=$use_hookable_raw_ptr",
+    "ENABLE_GWP_ASAN_SUPPORT=$_enable_gwp_asan_support",
+    "FORCIBLY_ENABLE_BACKUP_REF_PTR_IN_ALL_PROCESSES=$_force_all_process_brp",
+
+    "FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
+
+    "RECORD_ALLOC_INFO=$_record_alloc_info",
+    "USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
+    "GLUE_CORE_POOLS=$_glue_core_pools",
+    "ENABLE_POINTER_COMPRESSION=$_enable_pointer_compression",
+    "ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
+    "USE_FREELIST_POOL_OFFSETS=$use_freelist_pool_offsets",
+
+    "USE_STARSCAN=$use_starscan",
+    "PCSCAN_STACK_SUPPORTED=$pcscan_stack_supported",
+
+    "ENABLE_PKEYS=$enable_pkeys",
+    "ENABLE_THREAD_ISOLATION=$enable_pkeys",
+  ]
+}
+
+buildflag_header("raw_ptr_buildflags") {
+  header = "raw_ptr_buildflags.h"
+
+  flags = [
+    "RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
+    "RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
+    "RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
+  ]
+}
+
+buildflag_header("chromecast_buildflags") {
+  header = "chromecast_buildflags.h"
+
+  flags = [
+    "PA_IS_CAST_ANDROID=$is_cast_android",
+    "PA_IS_CASTOS=$is_castos",
+  ]
+}
+
+buildflag_header("chromeos_buildflags") {
+  header = "chromeos_buildflags.h"
+
+  flags = [ "PA_IS_CHROMEOS_ASH=$is_chromeos_ash" ]
+}
+
+buildflag_header("debugging_buildflags") {
+  header = "debugging_buildflags.h"
+  header_dir = rebase_path(".", "//") + "/partition_alloc_base/debug"
+
+  # Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`,
+  # but avails it as a buildflag.
+  _dcheck_is_on = is_debug || dcheck_always_on
+
+  flags = [
+    "PA_DCHECK_IS_ON=$_dcheck_is_on",
+    "PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
+    "PA_DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
+    "PA_CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
+  ]
+}
+
+group("buildflags") {
+  public_deps = [
+    ":chromecast_buildflags",
+    ":chromeos_buildflags",
+    ":debugging_buildflags",
+    ":partition_alloc_buildflags",
+    ":raw_ptr_buildflags",
+  ]
+  public_configs = [ ":public_includes" ]
+}
+# TODO(crbug.com/1151236): After making partition_alloc a standalone library,
+# move test code here. i.e. test("partition_alloc_tests") { ... } and
+# test("partition_alloc_perftests").
diff --git a/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.cc b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.cc
new file mode 100644
index 0000000..8b35d2a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.cc
@@ -0,0 +1,571 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+
+#include <algorithm>
+#include <atomic>
+#include <cstdint>
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_space_stats.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/alignment.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_APPLE) || BUILDFLAG(ENABLE_THREAD_ISOLATION)
+#include <sys/mman.h>
+#endif
+
+namespace partition_alloc::internal {
+
+AddressPoolManager AddressPoolManager::singleton_;
+
+// static
+AddressPoolManager& AddressPoolManager::GetInstance() {
+  return singleton_;
+}
+
+namespace {
+// Allocations are all performed on behalf of PartitionAlloc.
+constexpr PageTag kPageTag = PageTag::kPartitionAlloc;
+
+}  // namespace
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+
+namespace {
+
+// This will crash if the range cannot be decommitted.
+void DecommitPages(uintptr_t address, size_t size) {
+  // Callers rely on the pages being zero-initialized when recommitting them.
+  // |DecommitSystemPages| doesn't guarantee this on all operating systems, in
+  // particular on macOS, but |DecommitAndZeroSystemPages| does.
+  DecommitAndZeroSystemPages(address, size, kPageTag);
+}
+
+}  // namespace
+
+void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) {
+  PA_DCHECK(!(ptr & kSuperPageOffsetMask));
+  PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
+  PA_CHECK(handle > 0 && handle <= std::size(pools_));
+
+  Pool* pool = GetPool(handle);
+  PA_CHECK(!pool->IsInitialized());
+  pool->Initialize(ptr, length);
+}
+
+void AddressPoolManager::GetPoolUsedSuperPages(
+    pool_handle handle,
+    std::bitset<kMaxSuperPagesInPool>& used) {
+  Pool* pool = GetPool(handle);
+  if (!pool) {
+    return;
+  }
+
+  pool->GetUsedSuperPages(used);
+}
+
+uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
+  Pool* pool = GetPool(handle);
+  if (!pool) {
+    return 0;
+  }
+
+  return pool->GetBaseAddress();
+}
+
+void AddressPoolManager::ResetForTesting() {
+  for (size_t i = 0; i < std::size(pools_); ++i) {
+    pools_[i].Reset();
+  }
+}
+
+void AddressPoolManager::Remove(pool_handle handle) {
+  Pool* pool = GetPool(handle);
+  PA_DCHECK(pool->IsInitialized());
+  pool->Reset();
+}
+
+uintptr_t AddressPoolManager::Reserve(pool_handle handle,
+                                      uintptr_t requested_address,
+                                      size_t length) {
+  Pool* pool = GetPool(handle);
+  if (!requested_address) {
+    return pool->FindChunk(length);
+  }
+  const bool is_available = pool->TryReserveChunk(requested_address, length);
+  if (is_available) {
+    return requested_address;
+  }
+  return pool->FindChunk(length);
+}
+
+void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
+                                              uintptr_t address,
+                                              size_t length) {
+  PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
+  Pool* pool = GetPool(handle);
+  PA_DCHECK(pool->IsInitialized());
+  DecommitPages(address, length);
+  pool->FreeChunk(address, length);
+}
+
+void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
+  PA_CHECK(ptr != 0);
+  PA_CHECK(!(ptr & kSuperPageOffsetMask));
+  PA_CHECK(!(length & kSuperPageOffsetMask));
+  address_begin_ = ptr;
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  address_end_ = ptr + length;
+  PA_DCHECK(address_begin_ < address_end_);
+#endif
+
+  total_bits_ = length / kSuperPageSize;
+  PA_CHECK(total_bits_ <= kMaxSuperPagesInPool);
+
+  ScopedGuard scoped_lock(lock_);
+  alloc_bitset_.reset();
+  bit_hint_ = 0;
+}
+
+bool AddressPoolManager::Pool::IsInitialized() {
+  return address_begin_ != 0;
+}
+
+void AddressPoolManager::Pool::Reset() {
+  address_begin_ = 0;
+}
+
+void AddressPoolManager::Pool::GetUsedSuperPages(
+    std::bitset<kMaxSuperPagesInPool>& used) {
+  ScopedGuard scoped_lock(lock_);
+
+  PA_DCHECK(IsInitialized());
+  used = alloc_bitset_;
+}
+
+uintptr_t AddressPoolManager::Pool::GetBaseAddress() {
+  PA_DCHECK(IsInitialized());
+  return address_begin_;
+}
+
+uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
+  ScopedGuard scoped_lock(lock_);
+
+  PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
+  const size_t need_bits = requested_size >> kSuperPageShift;
+
+  // Use first-fit policy to find an available chunk from free chunks. Start
+  // from |bit_hint_|, because we know there are no free chunks before.
+  size_t beg_bit = bit_hint_;
+  size_t curr_bit = bit_hint_;
+  while (true) {
+    // |end_bit| points 1 past the last bit that needs to be 0. If it goes past
+    // |total_bits_|, return |nullptr| to signal no free chunk was found.
+    size_t end_bit = beg_bit + need_bits;
+    if (end_bit > total_bits_) {
+      return 0;
+    }
+
+    bool found = true;
+    for (; curr_bit < end_bit; ++curr_bit) {
+      if (alloc_bitset_.test(curr_bit)) {
+        // The bit was set, so this chunk isn't entirely free. Set |found=false|
+        // to ensure the outer loop continues. However, continue the inner loop
+        // to set |beg_bit| just past the last set bit in the investigated
+        // chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
+        // next outer loop pass from checking the same bits.
+        beg_bit = curr_bit + 1;
+        found = false;
+        if (bit_hint_ == curr_bit) {
+          ++bit_hint_;
+        }
+      }
+    }
+
+    // An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
+    // mark as allocated) and return the allocated address.
+    if (found) {
+      for (size_t i = beg_bit; i < end_bit; ++i) {
+        PA_DCHECK(!alloc_bitset_.test(i));
+        alloc_bitset_.set(i);
+      }
+      if (bit_hint_ == beg_bit) {
+        bit_hint_ = end_bit;
+      }
+      uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+      PA_DCHECK(address + requested_size <= address_end_);
+#endif
+      return address;
+    }
+  }
+
+  PA_NOTREACHED();
+}
+
+bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
+                                               size_t requested_size) {
+  ScopedGuard scoped_lock(lock_);
+  PA_DCHECK(!(address & kSuperPageOffsetMask));
+  PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
+  const size_t begin_bit = (address - address_begin_) / kSuperPageSize;
+  const size_t need_bits = requested_size / kSuperPageSize;
+  const size_t end_bit = begin_bit + need_bits;
+  // Check that requested address is not too high.
+  if (end_bit > total_bits_) {
+    return false;
+  }
+  // Check if any bit of the requested region is set already.
+  for (size_t i = begin_bit; i < end_bit; ++i) {
+    if (alloc_bitset_.test(i)) {
+      return false;
+    }
+  }
+  // Otherwise, set the bits.
+  for (size_t i = begin_bit; i < end_bit; ++i) {
+    alloc_bitset_.set(i);
+  }
+  return true;
+}
+
+void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
+  ScopedGuard scoped_lock(lock_);
+
+  PA_DCHECK(!(address & kSuperPageOffsetMask));
+  PA_DCHECK(!(free_size & kSuperPageOffsetMask));
+
+  PA_DCHECK(address_begin_ <= address);
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  PA_DCHECK(address + free_size <= address_end_);
+#endif
+
+  const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
+  const size_t end_bit = beg_bit + free_size / kSuperPageSize;
+  for (size_t i = beg_bit; i < end_bit; ++i) {
+    PA_DCHECK(alloc_bitset_.test(i));
+    alloc_bitset_.reset(i);
+  }
+  bit_hint_ = std::min(bit_hint_, beg_bit);
+}
+
+void AddressPoolManager::Pool::GetStats(PoolStats* stats) {
+  std::bitset<kMaxSuperPagesInPool> pages;
+  size_t i;
+  {
+    ScopedGuard scoped_lock(lock_);
+    pages = alloc_bitset_;
+    i = bit_hint_;
+  }
+
+  stats->usage = pages.count();
+
+  size_t largest_run = 0;
+  size_t current_run = 0;
+  for (; i < total_bits_; ++i) {
+    if (!pages[i]) {
+      current_run += 1;
+      continue;
+    } else if (current_run > largest_run) {
+      largest_run = current_run;
+    }
+    current_run = 0;
+  }
+
+  // Fell out of the loop with last bit being zero. Check once more.
+  if (current_run > largest_run) {
+    largest_run = current_run;
+  }
+  stats->largest_available_reservation = largest_run;
+}
+
+void AddressPoolManager::GetPoolStats(const pool_handle handle,
+                                      PoolStats* stats) {
+  Pool* pool = GetPool(handle);
+  if (!pool->IsInitialized()) {
+    return;
+  }
+  pool->GetStats(stats);
+}
+
+bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
+  // Get 64-bit pool stats.
+  GetPoolStats(kRegularPoolHandle, &stats->regular_pool_stats);
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  GetPoolStats(kBRPPoolHandle, &stats->brp_pool_stats);
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  if (IsConfigurablePoolAvailable()) {
+    GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats);
+  }
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  GetPoolStats(kThreadIsolatedPoolHandle, &stats->thread_isolated_pool_stats);
+#endif
+  return true;
+}
+
+#else  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+static_assert(
+    kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
+        0,
+    "kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap.");
+static_assert(
+    kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0,
+    "kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap.");
+static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >=
+                  AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
+              "kGuardBitsOfBRPPoolBitmap must be larger than or equal to "
+              "kGuardOffsetOfBRPPoolBitmap.");
+
+template <size_t bitsize>
+void SetBitmap(std::bitset<bitsize>& bitmap,
+               size_t start_bit,
+               size_t bit_length) {
+  const size_t end_bit = start_bit + bit_length;
+  PA_DCHECK(start_bit <= bitsize);
+  PA_DCHECK(end_bit <= bitsize);
+
+  for (size_t i = start_bit; i < end_bit; ++i) {
+    PA_DCHECK(!bitmap.test(i));
+    bitmap.set(i);
+  }
+}
+
+template <size_t bitsize>
+void ResetBitmap(std::bitset<bitsize>& bitmap,
+                 size_t start_bit,
+                 size_t bit_length) {
+  const size_t end_bit = start_bit + bit_length;
+  PA_DCHECK(start_bit <= bitsize);
+  PA_DCHECK(end_bit <= bitsize);
+
+  for (size_t i = start_bit; i < end_bit; ++i) {
+    PA_DCHECK(bitmap.test(i));
+    bitmap.reset(i);
+  }
+}
+
+uintptr_t AddressPoolManager::Reserve(pool_handle handle,
+                                      uintptr_t requested_address,
+                                      size_t length) {
+  PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
+  uintptr_t address =
+      AllocPages(requested_address, length, kSuperPageSize,
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kInaccessible),
+                 kPageTag);
+  return address;
+}
+
+void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
+                                              uintptr_t address,
+                                              size_t length) {
+  PA_DCHECK(!(address & kSuperPageOffsetMask));
+  PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
+  FreePages(address, length);
+}
+
+void AddressPoolManager::MarkUsed(pool_handle handle,
+                                  uintptr_t address,
+                                  size_t length) {
+  ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
+  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  if (handle == kBRPPoolHandle) {
+    PA_DCHECK(
+        (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
+
+    // Make IsManagedByBRPPoolPool() return false when an address inside the
+    // first or the last PartitionPageSize()-bytes block is given:
+    //
+    //          ------+---+---------------+---+----
+    // memory   ..... | B | managed by PA | B | ...
+    // regions  ------+---+---------------+---+----
+    //
+    // B: PartitionPageSize()-bytes block. This is used internally by the
+    // allocator and is not available for callers.
+    //
+    // This is required to avoid crash caused by the following code:
+    //   {
+    //     // Assume this allocation happens outside of PartitionAlloc.
+    //     raw_ptr<T> ptr = new T[20];
+    //     for (size_t i = 0; i < 20; i ++) { ptr++; }
+    //     // |ptr| may point to an address inside 'B'.
+    //   }
+    //
+    // Suppose that |ptr| points to an address inside B after the loop. If
+    // IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr<T>() would
+    // crash, since the memory is not allocated by PartitionAlloc.
+    SetBitmap(AddressPoolManagerBitmap::brp_pool_bits_,
+              (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
+                  AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
+              (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
+                  AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
+  } else
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  {
+    PA_DCHECK(handle == kRegularPoolHandle);
+    PA_DCHECK(
+        (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
+        0);
+    SetBitmap(AddressPoolManagerBitmap::regular_pool_bits_,
+              address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
+              length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
+  }
+}
+
+void AddressPoolManager::MarkUnused(pool_handle handle,
+                                    uintptr_t address,
+                                    size_t length) {
+  // Address regions allocated for normal buckets are never released, so this
+  // function can only be called for direct map. However, do not DCHECK on
+  // IsManagedByDirectMap(address), because many tests test this function using
+  // small allocations.
+
+  ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
+  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  if (handle == kBRPPoolHandle) {
+    PA_DCHECK(
+        (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
+
+    // Make IsManagedByBRPPoolPool() return false when an address inside the
+    // first or the last PartitionPageSize()-bytes block is given.
+    // (See MarkUsed comment)
+    ResetBitmap(
+        AddressPoolManagerBitmap::brp_pool_bits_,
+        (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
+            AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
+        (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
+            AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
+  } else
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  {
+    PA_DCHECK(handle == kRegularPoolHandle);
+    PA_DCHECK(
+        (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
+        0);
+    ResetBitmap(
+        AddressPoolManagerBitmap::regular_pool_bits_,
+        address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
+        length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
+  }
+}
+
+void AddressPoolManager::ResetForTesting() {
+  ScopedGuard guard(AddressPoolManagerBitmap::GetLock());
+  AddressPoolManagerBitmap::regular_pool_bits_.reset();
+  AddressPoolManagerBitmap::brp_pool_bits_.reset();
+}
+
+namespace {
+
+// Counts super pages in use represented by `bitmap`.
+template <size_t bitsize>
+size_t CountUsedSuperPages(const std::bitset<bitsize>& bitmap,
+                           const size_t bits_per_super_page) {
+  size_t count = 0;
+  size_t bit_index = 0;
+
+  // Stride over super pages.
+  for (size_t super_page_index = 0; bit_index < bitsize; ++super_page_index) {
+    // Stride over the bits comprising the super page.
+    for (bit_index = super_page_index * bits_per_super_page;
+         bit_index < (super_page_index + 1) * bits_per_super_page &&
+         bit_index < bitsize;
+         ++bit_index) {
+      if (bitmap[bit_index]) {
+        count += 1;
+        // Move on to the next super page.
+        break;
+      }
+    }
+  }
+  return count;
+}
+
+}  // namespace
+
+bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
+  std::bitset<AddressPoolManagerBitmap::kRegularPoolBits> regular_pool_bits;
+  std::bitset<AddressPoolManagerBitmap::kBRPPoolBits> brp_pool_bits;
+  {
+    ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
+    regular_pool_bits = AddressPoolManagerBitmap::regular_pool_bits_;
+    brp_pool_bits = AddressPoolManagerBitmap::brp_pool_bits_;
+  }  // scoped_lock
+
+  // Pool usage is read out from the address pool bitmaps.
+  // The output stats are sized in super pages, so we interpret
+  // the bitmaps into super page usage.
+  static_assert(
+      kSuperPageSize %
+              AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap ==
+          0,
+      "information loss when calculating metrics");
+  constexpr size_t kRegularPoolBitsPerSuperPage =
+      kSuperPageSize /
+      AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
+
+  // Get 32-bit pool usage.
+  stats->regular_pool_stats.usage =
+      CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage);
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  static_assert(
+      kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
+          0,
+      "information loss when calculating metrics");
+  constexpr size_t kBRPPoolBitsPerSuperPage =
+      kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap;
+  stats->brp_pool_stats.usage =
+      CountUsedSuperPages(brp_pool_bits, kBRPPoolBitsPerSuperPage);
+
+  // Get blocklist size.
+  for (const auto& blocked :
+       AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
+    if (blocked.load(std::memory_order_relaxed)) {
+      stats->blocklist_size += 1;
+    }
+  }
+
+  // Count failures in finding non-blocklisted addresses.
+  stats->blocklist_hit_count =
+      AddressPoolManagerBitmap::blocklist_hit_count_.load(
+          std::memory_order_relaxed);
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  return true;
+}
+
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
+  AddressSpaceStats stats{};
+  if (GetStats(&stats)) {
+    dumper->DumpStats(&stats);
+  }
+}
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+// This function just exists to static_assert the layout of the private fields
+// in Pool.
+void AddressPoolManager::AssertThreadIsolatedLayout() {
+  constexpr size_t last_pool_offset =
+      offsetof(AddressPoolManager, pools_) + sizeof(Pool) * (kNumPools - 1);
+  constexpr size_t alloc_bitset_offset =
+      last_pool_offset + offsetof(Pool, alloc_bitset_);
+  static_assert(alloc_bitset_offset % PA_THREAD_ISOLATED_ALIGN_SZ == 0);
+  static_assert(sizeof(AddressPoolManager) % PA_THREAD_ISOLATED_ALIGN_SZ == 0);
+}
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h
new file mode 100644
index 0000000..2fc8ca2
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h
@@ -0,0 +1,202 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_
+
+#include <bitset>
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_types.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/alignment.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+#include "build/build_config.h"
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_bitmap.h"
+#endif
+
+namespace partition_alloc {
+
+class AddressSpaceStatsDumper;
+struct AddressSpaceStats;
+struct PoolStats;
+
+}  // namespace partition_alloc
+
+namespace partition_alloc::internal {
+
+// (64bit version)
+// AddressPoolManager takes a reserved virtual address space and manages address
+// space allocation.
+//
+// AddressPoolManager (currently) supports up to 4 pools. Each pool manages a
+// contiguous reserved address space. Alloc() takes a pool_handle and returns
+// address regions from the specified pool. Free() also takes a pool_handle and
+// returns the address region back to the manager.
+//
+// (32bit version)
+// AddressPoolManager wraps AllocPages and FreePages and remembers allocated
+// address regions using bitmaps. IsManagedByPartitionAlloc*Pool use the bitmaps
+// to judge whether a given address is in a pool that supports BackupRefPtr or
+// in a pool that doesn't. All PartitionAlloc allocations must be in either of
+// the pools.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+    PA_THREAD_ISOLATED_ALIGN AddressPoolManager {
+ public:
+  static AddressPoolManager& GetInstance();
+
+  AddressPoolManager(const AddressPoolManager&) = delete;
+  AddressPoolManager& operator=(const AddressPoolManager&) = delete;
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  void Add(pool_handle handle, uintptr_t address, size_t length);
+  void Remove(pool_handle handle);
+
+  // Populate a |used| bitset of superpages currently in use.
+  void GetPoolUsedSuperPages(pool_handle handle,
+                             std::bitset<kMaxSuperPagesInPool>& used);
+
+  // Return the base address of a pool.
+  uintptr_t GetPoolBaseAddress(pool_handle handle);
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+  // Reserves address space from the pool.
+  uintptr_t Reserve(pool_handle handle,
+                    uintptr_t requested_address,
+                    size_t length);
+
+  // Frees address space back to the pool and decommits underlying system pages.
+  void UnreserveAndDecommit(pool_handle handle,
+                            uintptr_t address,
+                            size_t length);
+  void ResetForTesting();
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+  void MarkUsed(pool_handle handle, uintptr_t address, size_t size);
+  void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
+
+  static bool IsManagedByRegularPool(uintptr_t address) {
+    return AddressPoolManagerBitmap::IsManagedByRegularPool(address);
+  }
+
+  static bool IsManagedByBRPPool(uintptr_t address) {
+    return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
+  }
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
+
+  void DumpStats(AddressSpaceStatsDumper* dumper);
+
+ private:
+  friend class AddressPoolManagerForTesting;
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  // If we use a thread isolated pool, we need to write-protect its metadata.
+  // Allow the function to get access to the pool pointer.
+  friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption);
+#endif
+
+  constexpr AddressPoolManager() = default;
+  ~AddressPoolManager() = default;
+
+  // Populates `stats` if applicable.
+  // Returns whether `stats` was populated. (They might not be, e.g.
+  // if PartitionAlloc is wholly unused in this process.)
+  bool GetStats(AddressSpaceStats* stats);
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  static void AssertThreadIsolatedLayout();
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+
+  class Pool {
+   public:
+    constexpr Pool() = default;
+    ~Pool() = default;
+
+    Pool(const Pool&) = delete;
+    Pool& operator=(const Pool&) = delete;
+
+    void Initialize(uintptr_t ptr, size_t length);
+    bool IsInitialized();
+    void Reset();
+
+    uintptr_t FindChunk(size_t size);
+    void FreeChunk(uintptr_t address, size_t size);
+
+    bool TryReserveChunk(uintptr_t address, size_t size);
+
+    void GetUsedSuperPages(std::bitset<kMaxSuperPagesInPool>& used);
+    uintptr_t GetBaseAddress();
+
+    void GetStats(PoolStats* stats);
+
+   private:
+    // The lock needs to be the first field in this class.
+    // We write-protect the pool in the ThreadIsolated case, except that the
+    // lock can be used without acquiring write-permission first (via
+    // DumpStats()). So instead of protecting the whole variable, we only
+    // protect the memory after the lock.
+    // See the alignment of ` below.
+    Lock lock_;
+
+    // The bitset stores the allocation state of the address pool. 1 bit per
+    // super-page: 1 = allocated, 0 = free.
+    std::bitset<kMaxSuperPagesInPool> alloc_bitset_ PA_GUARDED_BY(lock_);
+
+    // An index of a bit in the bitset before which we know for sure there all
+    // 1s. This is a best-effort hint in the sense that there still may be lots
+    // of 1s after this index, but at least we know there is no point in
+    // starting the search before it.
+    size_t bit_hint_ PA_GUARDED_BY(lock_) = 0;
+
+    size_t total_bits_ = 0;
+    uintptr_t address_begin_ = 0;
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    uintptr_t address_end_ = 0;
+#endif
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+    friend class AddressPoolManager;
+    friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption);
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  };
+
+  PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
+    PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
+    return &pools_[handle - 1];
+  }
+
+  // Gets the stats for the pool identified by `handle`, if
+  // initialized.
+  void GetPoolStats(pool_handle handle, PoolStats* stats);
+
+  // If thread isolation support is enabled, we need to write-protect the
+  // isolated pool (which needs to be last). For this, we need to add padding in
+  // front of the pools so that the isolated one starts on a page boundary.
+  // We also skip the Lock at the beginning of the pool since it needs to be
+  // used in contexts where we didn't enable write access to the pool memory.
+  char pad_[PA_THREAD_ISOLATED_ARRAY_PAD_SZ_WITH_OFFSET(
+      Pool,
+      kNumPools,
+      offsetof(Pool, alloc_bitset_))] = {};
+  Pool pools_[kNumPools];
+
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+  static PA_CONSTINIT AddressPoolManager singleton_;
+};
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_bitmap.cc b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_bitmap.cc
new file mode 100644
index 0000000..84884ad
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_bitmap.cc
@@ -0,0 +1,37 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_bitmap.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+
+namespace partition_alloc::internal {
+
+namespace {
+
+Lock g_lock;
+
+}  // namespace
+
+Lock& AddressPoolManagerBitmap::GetLock() {
+  return g_lock;
+}
+
+std::bitset<AddressPoolManagerBitmap::kRegularPoolBits>
+    AddressPoolManagerBitmap::regular_pool_bits_;  // GUARDED_BY(GetLock())
+std::bitset<AddressPoolManagerBitmap::kBRPPoolBits>
+    AddressPoolManagerBitmap::brp_pool_bits_;  // GUARDED_BY(GetLock())
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+std::array<std::atomic_bool,
+           AddressPoolManagerBitmap::kAddressSpaceSize / kSuperPageSize>
+    AddressPoolManagerBitmap::brp_forbidden_super_page_map_;
+std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_;
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+}  // namespace partition_alloc::internal
+
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_bitmap.h b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_bitmap.h
new file mode 100644
index 0000000..ebbe17e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_bitmap.h
@@ -0,0 +1,189 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_
+
+#include <array>
+#include <atomic>
+#include <bitset>
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+#include "build/build_config.h"
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+
+namespace partition_alloc {
+
+namespace internal {
+
+// AddressPoolManagerBitmap is a set of bitmaps that track whether a given
+// address is in a pool that supports BackupRefPtr, or in a pool that doesn't
+// support it. All PartitionAlloc allocations must be in either of the pools.
+//
+// This code is specific to 32-bit systems.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
+ public:
+  static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
+  static constexpr uint64_t kAddressSpaceSize = 4ull * kGiB;
+
+  // For BRP pool, we use partition page granularity to eliminate the guard
+  // pages from the bitmap at the ends:
+  // - Eliminating the guard page at the beginning is needed so that pointers
+  //   to the end of an allocation that immediately precede a super page in BRP
+  //   pool don't accidentally fall into that pool.
+  // - Eliminating the guard page at the end is to ensure that the last page
+  //   of the address space isn't in the BRP pool. This allows using sentinels
+  //   like reinterpret_cast<void*>(-1) without a risk of triggering BRP logic
+  //   on an invalid address. (Note, 64-bit systems don't have this problem as
+  //   the upper half of the address space always belongs to the OS.)
+  //
+  // Note, direct map allocations also belong to this pool. The same logic as
+  // above applies. It is important to note, however, that the granularity used
+  // here has to be a minimum of partition page size and direct map allocation
+  // granularity. Since DirectMapAllocationGranularity() is no smaller than
+  // PageAllocationGranularity(), we don't need to decrease the bitmap
+  // granularity any further.
+  static constexpr size_t kBitShiftOfBRPPoolBitmap = PartitionPageShift();
+  static constexpr size_t kBytesPer1BitOfBRPPoolBitmap = PartitionPageSize();
+  static_assert(kBytesPer1BitOfBRPPoolBitmap == 1 << kBitShiftOfBRPPoolBitmap,
+                "");
+  static constexpr size_t kGuardOffsetOfBRPPoolBitmap = 1;
+  static constexpr size_t kGuardBitsOfBRPPoolBitmap = 2;
+  static constexpr size_t kBRPPoolBits =
+      kAddressSpaceSize / kBytesPer1BitOfBRPPoolBitmap;
+
+  // Regular pool may include both normal bucket and direct map allocations, so
+  // the bitmap granularity has to be at least as small as
+  // DirectMapAllocationGranularity(). No need to eliminate guard pages at the
+  // ends, as this is a BackupRefPtr-specific concern, hence no need to lower
+  // the granularity to partition page size.
+  static constexpr size_t kBitShiftOfRegularPoolBitmap =
+      DirectMapAllocationGranularityShift();
+  static constexpr size_t kBytesPer1BitOfRegularPoolBitmap =
+      DirectMapAllocationGranularity();
+  static_assert(kBytesPer1BitOfRegularPoolBitmap ==
+                    1 << kBitShiftOfRegularPoolBitmap,
+                "");
+  static constexpr size_t kRegularPoolBits =
+      kAddressSpaceSize / kBytesPer1BitOfRegularPoolBitmap;
+
+  // Returns false for nullptr.
+  static bool IsManagedByRegularPool(uintptr_t address) {
+    static_assert(
+        std::numeric_limits<uintptr_t>::max() >> kBitShiftOfRegularPoolBitmap <
+            regular_pool_bits_.size(),
+        "The bitmap is too small, will result in unchecked out of bounds "
+        "accesses.");
+    // It is safe to read |regular_pool_bits_| without a lock since the caller
+    // is responsible for guaranteeing that the address is inside a valid
+    // allocation and the deallocation call won't race with this call.
+    return PA_TS_UNCHECKED_READ(
+        regular_pool_bits_)[address >> kBitShiftOfRegularPoolBitmap];
+  }
+
+  // Returns false for nullptr.
+  static bool IsManagedByBRPPool(uintptr_t address) {
+    static_assert(std::numeric_limits<uintptr_t>::max() >>
+                      kBitShiftOfBRPPoolBitmap < brp_pool_bits_.size(),
+                  "The bitmap is too small, will result in unchecked out of "
+                  "bounds accesses.");
+    // It is safe to read |brp_pool_bits_| without a lock since the caller
+    // is responsible for guaranteeing that the address is inside a valid
+    // allocation and the deallocation call won't race with this call.
+    return PA_TS_UNCHECKED_READ(
+        brp_pool_bits_)[address >> kBitShiftOfBRPPoolBitmap];
+  }
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  static void BanSuperPageFromBRPPool(uintptr_t address) {
+    brp_forbidden_super_page_map_[address >> kSuperPageShift].store(
+        true, std::memory_order_relaxed);
+  }
+
+  static bool IsAllowedSuperPageForBRPPool(uintptr_t address) {
+    // The only potentially dangerous scenario, in which this check is used, is
+    // when the assignment of the first raw_ptr<T> object for an address
+    // allocated outside the BRP pool is racing with the allocation of a new
+    // super page at the same address. We assume that if raw_ptr<T> is being
+    // initialized with a raw pointer, the associated allocation is "alive";
+    // otherwise, the issue should be fixed by rewriting the raw pointer
+    // variable as raw_ptr<T>. In the worst case, when such a fix is
+    // impossible, we should just undo the raw pointer -> raw_ptr<T> rewrite of
+    // the problematic field. If the above assumption holds, the existing
+    // allocation will prevent us from reserving the super-page region and,
+    // thus, having the race condition.  Since we rely on that external
+    // synchronization, the relaxed memory ordering should be sufficient.
+    return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load(
+        std::memory_order_relaxed);
+  }
+
+  static void IncrementBlocklistHitCount() { ++blocklist_hit_count_; }
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+ private:
+  friend class AddressPoolManager;
+
+  static Lock& GetLock();
+
+  static std::bitset<kRegularPoolBits> regular_pool_bits_
+      PA_GUARDED_BY(GetLock());
+  static std::bitset<kBRPPoolBits> brp_pool_bits_ PA_GUARDED_BY(GetLock());
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  static std::array<std::atomic_bool, kAddressSpaceSize / kSuperPageSize>
+      brp_forbidden_super_page_map_;
+  static std::atomic_size_t blocklist_hit_count_;
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+};
+
+}  // namespace internal
+
+// Returns false for nullptr.
+PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
+  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
+  // No need to add IsManagedByConfigurablePool, because Configurable Pool
+  // doesn't exist on 32-bit.
+#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address));
+#endif
+  return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address)
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+         || internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)
+#endif
+      ;
+}
+
+// Returns false for nullptr.
+PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
+  return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address);
+}
+
+// Returns false for nullptr.
+PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
+  return internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address);
+}
+
+// Returns false for nullptr.
+PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
+    uintptr_t address) {
+  // The Configurable Pool is only available on 64-bit builds.
+  return false;
+}
+
+PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
+  // The Configurable Pool is only available on 64-bit builds.
+  return false;
+}
+
+}  // namespace partition_alloc
+
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_types.h b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_types.h
new file mode 100644
index 0000000..8c1b20f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_types.h
@@ -0,0 +1,14 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_
+
+namespace partition_alloc::internal {
+
+enum pool_handle : unsigned;
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_unittest.cc
new file mode 100644
index 0000000..f2501f0
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_unittest.cc
@@ -0,0 +1,405 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_space_stats.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal {
+
+class AddressSpaceStatsDumperForTesting final : public AddressSpaceStatsDumper {
+ public:
+  AddressSpaceStatsDumperForTesting() = default;
+  ~AddressSpaceStatsDumperForTesting() final = default;
+
+  void DumpStats(
+      const partition_alloc::AddressSpaceStats* address_space_stats) override {
+    regular_pool_usage_ = address_space_stats->regular_pool_stats.usage;
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+    regular_pool_largest_reservation_ =
+        address_space_stats->regular_pool_stats.largest_available_reservation;
+#endif
+#if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    blocklist_size_ = address_space_stats->blocklist_size;
+#endif
+  }
+
+  size_t regular_pool_usage_ = 0;
+  size_t regular_pool_largest_reservation_ = 0;
+  size_t blocklist_size_ = 0;
+};
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+
+class AddressPoolManagerForTesting : public AddressPoolManager {
+ public:
+  AddressPoolManagerForTesting() = default;
+  ~AddressPoolManagerForTesting() = default;
+};
+
+class PartitionAllocAddressPoolManagerTest : public testing::Test {
+ protected:
+  PartitionAllocAddressPoolManagerTest() = default;
+  ~PartitionAllocAddressPoolManagerTest() override = default;
+
+  void SetUp() override {
+    manager_ = std::make_unique<AddressPoolManagerForTesting>();
+    base_address_ =
+        AllocPages(kPoolSize, kSuperPageSize,
+                   PageAccessibilityConfiguration(
+                       PageAccessibilityConfiguration::kInaccessible),
+                   PageTag::kPartitionAlloc);
+    ASSERT_TRUE(base_address_);
+    manager_->Add(kRegularPoolHandle, base_address_, kPoolSize);
+    pool_ = kRegularPoolHandle;
+  }
+
+  void TearDown() override {
+    manager_->Remove(pool_);
+    FreePages(base_address_, kPoolSize);
+    manager_.reset();
+  }
+
+  AddressPoolManager* GetAddressPoolManager() { return manager_.get(); }
+
+  static constexpr size_t kPoolSize = kPoolMaxSize;
+  static constexpr size_t kPageCnt = kPoolSize / kSuperPageSize;
+
+  std::unique_ptr<AddressPoolManagerForTesting> manager_;
+  uintptr_t base_address_;
+  pool_handle pool_;
+};
+
+TEST_F(PartitionAllocAddressPoolManagerTest, TooLargePool) {
+  uintptr_t base_addr = 0x4200000;
+  const pool_handle extra_pool = static_cast<pool_handle>(2u);
+  static_assert(kNumPools >= 2);
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      GetAddressPoolManager()->Add(extra_pool, base_addr,
+                                   kPoolSize + kSuperPageSize),
+      "");
+}
+
+TEST_F(PartitionAllocAddressPoolManagerTest, ManyPages) {
+  EXPECT_EQ(
+      GetAddressPoolManager()->Reserve(pool_, 0, kPageCnt * kSuperPageSize),
+      base_address_);
+  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, base_address_,
+                                                kPageCnt * kSuperPageSize);
+
+  EXPECT_EQ(
+      GetAddressPoolManager()->Reserve(pool_, 0, kPageCnt * kSuperPageSize),
+      base_address_);
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, base_address_,
+                                                kPageCnt * kSuperPageSize);
+}
+
+TEST_F(PartitionAllocAddressPoolManagerTest, PagesFragmented) {
+  uintptr_t addrs[kPageCnt];
+  for (size_t i = 0; i < kPageCnt; ++i) {
+    addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
+    EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize);
+  }
+  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
+  // Free other other super page, so that we have plenty of free space, but none
+  // of the empty spaces can fit 2 super pages.
+  for (size_t i = 1; i < kPageCnt; i += 2) {
+    GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i],
+                                                  kSuperPageSize);
+  }
+  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize), 0u);
+  // Reserve freed super pages back, so that there are no free ones.
+  for (size_t i = 1; i < kPageCnt; i += 2) {
+    addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
+    EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize);
+  }
+  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
+  // Lastly, clean up.
+  for (uintptr_t addr : addrs) {
+    GetAddressPoolManager()->UnreserveAndDecommit(pool_, addr, kSuperPageSize);
+  }
+}
+
+TEST_F(PartitionAllocAddressPoolManagerTest, GetUsedSuperpages) {
+  uintptr_t addrs[kPageCnt];
+  for (size_t i = 0; i < kPageCnt; ++i) {
+    addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
+    EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize);
+  }
+  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
+
+  std::bitset<kMaxSuperPagesInPool> used_super_pages;
+  GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages);
+
+  // We expect every bit to be set.
+  for (size_t i = 0; i < kPageCnt; ++i) {
+    ASSERT_TRUE(used_super_pages.test(i));
+  }
+
+  // Free every other super page, so that we have plenty of free space, but none
+  // of the empty spaces can fit 2 super pages.
+  for (size_t i = 1; i < kPageCnt; i += 2) {
+    GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i],
+                                                  kSuperPageSize);
+  }
+
+  EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize), 0u);
+
+  GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages);
+
+  // We expect every other bit to be set.
+  for (size_t i = 0; i < kPageCnt; i++) {
+    if (i % 2 == 0) {
+      ASSERT_TRUE(used_super_pages.test(i));
+    } else {
+      ASSERT_FALSE(used_super_pages.test(i));
+    }
+  }
+
+  // Free the even numbered super pages.
+  for (size_t i = 0; i < kPageCnt; i += 2) {
+    GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i],
+                                                  kSuperPageSize);
+  }
+
+  // Finally check to make sure all bits are zero in the used superpage bitset.
+  GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages);
+
+  for (size_t i = 0; i < kPageCnt; i++) {
+    ASSERT_FALSE(used_super_pages.test(i));
+  }
+}
+
+TEST_F(PartitionAllocAddressPoolManagerTest, IrregularPattern) {
+  uintptr_t a1 = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
+  EXPECT_EQ(a1, base_address_);
+  uintptr_t a2 = GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize);
+  EXPECT_EQ(a2, base_address_ + 1 * kSuperPageSize);
+  uintptr_t a3 = GetAddressPoolManager()->Reserve(pool_, 0, 3 * kSuperPageSize);
+  EXPECT_EQ(a3, base_address_ + 3 * kSuperPageSize);
+  uintptr_t a4 = GetAddressPoolManager()->Reserve(pool_, 0, 4 * kSuperPageSize);
+  EXPECT_EQ(a4, base_address_ + 6 * kSuperPageSize);
+  uintptr_t a5 = GetAddressPoolManager()->Reserve(pool_, 0, 5 * kSuperPageSize);
+  EXPECT_EQ(a5, base_address_ + 10 * kSuperPageSize);
+
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a4, 4 * kSuperPageSize);
+  uintptr_t a6 = GetAddressPoolManager()->Reserve(pool_, 0, 6 * kSuperPageSize);
+  EXPECT_EQ(a6, base_address_ + 15 * kSuperPageSize);
+
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a5, 5 * kSuperPageSize);
+  uintptr_t a7 = GetAddressPoolManager()->Reserve(pool_, 0, 7 * kSuperPageSize);
+  EXPECT_EQ(a7, base_address_ + 6 * kSuperPageSize);
+  uintptr_t a8 = GetAddressPoolManager()->Reserve(pool_, 0, 3 * kSuperPageSize);
+  EXPECT_EQ(a8, base_address_ + 21 * kSuperPageSize);
+  uintptr_t a9 = GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize);
+  EXPECT_EQ(a9, base_address_ + 13 * kSuperPageSize);
+
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a7, 7 * kSuperPageSize);
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a9, 2 * kSuperPageSize);
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a6, 6 * kSuperPageSize);
+  uintptr_t a10 =
+      GetAddressPoolManager()->Reserve(pool_, 0, 15 * kSuperPageSize);
+  EXPECT_EQ(a10, base_address_ + 6 * kSuperPageSize);
+
+  // Clean up.
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a1, kSuperPageSize);
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a2, 2 * kSuperPageSize);
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a3, 3 * kSuperPageSize);
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a8, 3 * kSuperPageSize);
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, a10,
+                                                15 * kSuperPageSize);
+}
+
+TEST_F(PartitionAllocAddressPoolManagerTest, DecommittedDataIsErased) {
+  uintptr_t address =
+      GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
+  ASSERT_TRUE(address);
+  RecommitSystemPages(address, kSuperPageSize,
+                      PageAccessibilityConfiguration(
+                          PageAccessibilityConfiguration::kReadWrite),
+                      PageAccessibilityDisposition::kRequireUpdate);
+
+  memset(reinterpret_cast<void*>(address), 42, kSuperPageSize);
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, address, kSuperPageSize);
+
+  uintptr_t address2 =
+      GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
+  ASSERT_EQ(address, address2);
+  RecommitSystemPages(address2, kSuperPageSize,
+                      PageAccessibilityConfiguration(
+                          PageAccessibilityConfiguration::kReadWrite),
+                      PageAccessibilityDisposition::kRequireUpdate);
+
+  uint32_t sum = 0;
+  for (size_t i = 0; i < kSuperPageSize; i++) {
+    sum += reinterpret_cast<uint8_t*>(address2)[i];
+  }
+  EXPECT_EQ(0u, sum) << sum / 42 << " bytes were not zeroed";
+
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, address2,
+                                                kSuperPageSize);
+}
+
+TEST_F(PartitionAllocAddressPoolManagerTest, RegularPoolUsageChanges) {
+  AddressSpaceStatsDumperForTesting dumper{};
+
+  GetAddressPoolManager()->DumpStats(&dumper);
+  ASSERT_EQ(dumper.regular_pool_usage_, 0ull);
+  ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt);
+
+  // Bisect the pool by reserving a super page in the middle.
+  const uintptr_t midpoint_address =
+      base_address_ + (kPageCnt / 2) * kSuperPageSize;
+  ASSERT_EQ(
+      GetAddressPoolManager()->Reserve(pool_, midpoint_address, kSuperPageSize),
+      midpoint_address);
+
+  GetAddressPoolManager()->DumpStats(&dumper);
+  ASSERT_EQ(dumper.regular_pool_usage_, 1ull);
+  ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt / 2);
+
+  GetAddressPoolManager()->UnreserveAndDecommit(pool_, midpoint_address,
+                                                kSuperPageSize);
+
+  GetAddressPoolManager()->DumpStats(&dumper);
+  ASSERT_EQ(dumper.regular_pool_usage_, 0ull);
+  ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt);
+}
+
+#else  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+TEST(PartitionAllocAddressPoolManagerTest, IsManagedByRegularPool) {
+  constexpr size_t kAllocCount = 8;
+  static const size_t kNumPages[kAllocCount] = {1, 4, 7, 8, 13, 16, 31, 60};
+  uintptr_t addrs[kAllocCount];
+  for (size_t i = 0; i < kAllocCount; ++i) {
+    addrs[i] = AddressPoolManager::GetInstance().Reserve(
+        kRegularPoolHandle, 0,
+        AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
+            kNumPages[i]);
+    EXPECT_TRUE(addrs[i]);
+    EXPECT_TRUE(!(addrs[i] & kSuperPageOffsetMask));
+    AddressPoolManager::GetInstance().MarkUsed(
+        kRegularPoolHandle, addrs[i],
+        AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
+            kNumPages[i]);
+  }
+  for (size_t i = 0; i < kAllocCount; ++i) {
+    uintptr_t address = addrs[i];
+    size_t num_pages =
+        base::bits::AlignUp(
+            kNumPages[i] *
+                AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap,
+            kSuperPageSize) /
+        AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
+    for (size_t j = 0; j < num_pages; ++j) {
+      if (j < kNumPages[i]) {
+        EXPECT_TRUE(AddressPoolManager::IsManagedByRegularPool(address));
+      } else {
+        EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(address));
+      }
+      EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(address));
+      address += AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
+    }
+  }
+  for (size_t i = 0; i < kAllocCount; ++i) {
+    AddressPoolManager::GetInstance().MarkUnused(
+        kRegularPoolHandle, addrs[i],
+        AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
+            kNumPages[i]);
+    AddressPoolManager::GetInstance().UnreserveAndDecommit(
+        kRegularPoolHandle, addrs[i],
+        AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
+            kNumPages[i]);
+    EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(addrs[i]));
+    EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(addrs[i]));
+  }
+}
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+TEST(PartitionAllocAddressPoolManagerTest, IsManagedByBRPPool) {
+  constexpr size_t kAllocCount = 4;
+  // Totally (1+3+7+11) * 2MB = 44MB allocation
+  static const size_t kNumPages[kAllocCount] = {1, 3, 7, 11};
+  uintptr_t addrs[kAllocCount];
+  for (size_t i = 0; i < kAllocCount; ++i) {
+    addrs[i] = AddressPoolManager::GetInstance().Reserve(
+        kBRPPoolHandle, 0, kSuperPageSize * kNumPages[i]);
+    EXPECT_TRUE(addrs[i]);
+    EXPECT_TRUE(!(addrs[i] & kSuperPageOffsetMask));
+    AddressPoolManager::GetInstance().MarkUsed(kBRPPoolHandle, addrs[i],
+                                               kSuperPageSize * kNumPages[i]);
+  }
+
+  constexpr size_t first_guard_size =
+      AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
+      AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
+  constexpr size_t last_guard_size =
+      AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
+      (AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap -
+       AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap);
+
+  for (size_t i = 0; i < kAllocCount; ++i) {
+    uintptr_t address = addrs[i];
+    size_t num_allocated_size = kNumPages[i] * kSuperPageSize;
+    size_t num_system_pages = num_allocated_size / SystemPageSize();
+    for (size_t j = 0; j < num_system_pages; ++j) {
+      size_t offset = address - addrs[i];
+      if (offset < first_guard_size ||
+          offset >= (num_allocated_size - last_guard_size)) {
+        EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(address));
+      } else {
+        EXPECT_TRUE(AddressPoolManager::IsManagedByBRPPool(address));
+      }
+      EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(address));
+      address += SystemPageSize();
+    }
+  }
+  for (size_t i = 0; i < kAllocCount; ++i) {
+    AddressPoolManager::GetInstance().MarkUnused(kBRPPoolHandle, addrs[i],
+                                                 kSuperPageSize * kNumPages[i]);
+    AddressPoolManager::GetInstance().UnreserveAndDecommit(
+        kBRPPoolHandle, addrs[i], kSuperPageSize * kNumPages[i]);
+    EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(addrs[i]));
+    EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(addrs[i]));
+  }
+}
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+TEST(PartitionAllocAddressPoolManagerTest, RegularPoolUsageChanges) {
+  AddressSpaceStatsDumperForTesting dumper{};
+  AddressPoolManager::GetInstance().DumpStats(&dumper);
+  const size_t usage_before = dumper.regular_pool_usage_;
+
+  const uintptr_t address = AddressPoolManager::GetInstance().Reserve(
+      kRegularPoolHandle, 0, kSuperPageSize);
+  ASSERT_TRUE(address);
+  AddressPoolManager::GetInstance().MarkUsed(kRegularPoolHandle, address,
+                                             kSuperPageSize);
+
+  AddressPoolManager::GetInstance().DumpStats(&dumper);
+  EXPECT_GT(dumper.regular_pool_usage_, usage_before);
+
+  AddressPoolManager::GetInstance().MarkUnused(kRegularPoolHandle, address,
+                                               kSuperPageSize);
+  AddressPoolManager::GetInstance().UnreserveAndDecommit(
+      kRegularPoolHandle, address, kSuperPageSize);
+
+  AddressPoolManager::GetInstance().DumpStats(&dumper);
+  EXPECT_EQ(dumper.regular_pool_usage_, usage_before);
+}
+
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.cc b/base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.cc
new file mode 100644
index 0000000..f069c1e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.cc
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/random.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#endif
+
+namespace partition_alloc {
+
+uintptr_t GetRandomPageBase() {
+  uintptr_t random = static_cast<uintptr_t>(internal::RandomValue());
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  random <<= 32ULL;
+  random |= static_cast<uintptr_t>(internal::RandomValue());
+
+  // The ASLRMask() and ASLROffset() constants will be suitable for the
+  // OS and build configuration.
+  random &= internal::ASLRMask();
+  random += internal::ASLROffset();
+#else  // BUILDFLAG(HAS_64_BIT_POINTERS)
+#if BUILDFLAG(IS_WIN)
+  // On win32 host systems the randomization plus huge alignment causes
+  // excessive fragmentation. Plus most of these systems lack ASLR, so the
+  // randomization isn't buying anything. In that case we just skip it.
+  // TODO(palmer): Just dump the randomization when HE-ASLR is present.
+  static BOOL is_wow64 = -1;
+  if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64)) {
+    is_wow64 = FALSE;
+  }
+  if (!is_wow64) {
+    return 0;
+  }
+#endif  // BUILDFLAG(IS_WIN)
+  random &= internal::ASLRMask();
+  random += internal::ASLROffset();
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+  PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask()));
+  return random;
+}
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.h b/base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.h
new file mode 100644
index 0000000..c336c54
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.h
@@ -0,0 +1,286 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+namespace partition_alloc {
+
+// Calculates a random preferred mapping address. In calculating an address, we
+// balance good ASLR against not fragmenting the address space too badly.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t GetRandomPageBase();
+
+namespace internal {
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+AslrAddress(uintptr_t mask) {
+  return mask & PageAllocationGranularityBaseMask();
+}
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+AslrMask(uintptr_t bits) {
+  return AslrAddress((1ULL << bits) - 1ULL);
+}
+
+// Turn off formatting, because the thicket of nested ifdefs below is
+// incomprehensible without indentation. It is also incomprehensible with
+// indentation, but the only other option is a combinatorial explosion of
+// *_{win,linux,mac,foo}_{32,64}.h files.
+//
+// clang-format off
+
+#if defined(ARCH_CPU_64_BITS)
+
+  #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+    // We shouldn't allocate system pages at all for sanitizer builds. However,
+    // we do, and if random hint addresses interfere with address ranges
+    // hard-coded in those tools, bad things happen. This address range is
+    // copied from TSAN source but works with all tools. See
+    // https://crbug.com/539863.
+    PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+    ASLRMask() {
+      return AslrAddress(0x007fffffffffULL);
+    }
+    PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+    ASLROffset() {
+      return AslrAddress(0x7e8000000000ULL);
+    }
+
+  #elif BUILDFLAG(IS_WIN)
+
+    // Windows 8.10 and newer support the full 48 bit address range. Since
+    // ASLROffset() is non-zero and may cause a carry, use 47 bit masks. See
+    // http://www.alex-ionescu.com/?p=246
+    PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
+      return AslrMask(47);
+    }
+    // Try not to map pages into the range where Windows loads DLLs by default.
+    PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+      return 0x80000000ULL;
+    }
+
+  #elif BUILDFLAG(IS_APPLE)
+
+    // macOS as of 10.12.5 does not clean up entries in page map levels 3/4
+    // [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
+    // is destroyed. Using a virtual address space that is too large causes a
+    // leak of about 1 wired [can never be paged out] page per call to mmap. The
+    // page is only reclaimed when the process is killed. Confine the hint to a
+    // 39-bit section of the virtual address space.
+    //
+    // This implementation adapted from
+    // https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
+    // is that here we clamp to 39 bits, not 32.
+    //
+    // TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
+    // changes.
+    PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+    ASLRMask() {
+      return AslrMask(38);
+    }
+    PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+    ASLROffset() {
+      // Be careful, there is a zone where macOS will not map memory, at least
+      // on ARM64. From an ARM64 machine running 12.3, the range seems to be
+      // [0x1000000000, 0x7000000000). Make sure that the range we use is
+      // outside these bounds. In 12.3, there is a reserved area between
+      // MACH_VM_MIN_GPU_CARVEOUT_ADDRESS and MACH_VM_MAX_GPU_CARVEOUT_ADDRESS,
+      // which is reserved on ARM64. See these constants in XNU's source code
+      // for details (xnu-8019.80.24/osfmk/mach/arm/vm_param.h).
+      return AslrAddress(0x10000000000ULL);
+    }
+
+  #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+
+    #if defined(ARCH_CPU_X86_64)
+
+      // Linux (and macOS) support the full 47-bit user space of x64 processors.
+      // Use only 46 to allow the kernel a chance to fulfill the request.
+      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
+        return AslrMask(46);
+      }
+      PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+        return AslrAddress(0);
+      }
+
+    #elif defined(ARCH_CPU_ARM64)
+
+      #if BUILDFLAG(IS_ANDROID)
+
+      // Restrict the address range on Android to avoid a large performance
+      // regression in single-process WebViews. See https://crbug.com/837640.
+      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
+        return AslrMask(30);
+      }
+      PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+        return AslrAddress(0x20000000ULL);
+      }
+
+      #elif BUILDFLAG(IS_LINUX)
+
+      // Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on
+      // page size and number of levels of translation pages used. We use
+      // 39-bit as base as all setups should support this, lowered to 38-bit
+      // as ASLROffset() could cause a carry.
+      PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+      ASLRMask() {
+        return AslrMask(38);
+      }
+      PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+      ASLROffset() {
+        return AslrAddress(0x1000000000ULL);
+      }
+
+      #else
+
+      // ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
+      // could cause a carry.
+      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
+        return AslrMask(38);
+      }
+      PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+        return AslrAddress(0x1000000000ULL);
+      }
+
+      #endif
+
+    #elif defined(ARCH_CPU_PPC64)
+
+      #if BUILDFLAG(IS_AIX)
+
+        // AIX has 64 bits of virtual addressing, but we limit the address range
+        // to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
+        // extra address space to isolate the mmap regions.
+        PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
+          return AslrMask(30);
+        }
+        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+          return AslrAddress(0x400000000000ULL);
+        }
+
+      #elif defined(ARCH_CPU_BIG_ENDIAN)
+
+        // Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
+        PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
+          return AslrMask(42);
+        }
+        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+          return AslrAddress(0);
+        }
+
+      #else  // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
+
+        // Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
+        PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
+          return AslrMask(46);
+        }
+        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+          return AslrAddress(0);
+        }
+
+      #endif  // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
+
+    #elif defined(ARCH_CPU_S390X)
+
+      // Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
+      // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
+      // chance to fulfill the request.
+      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
+        return AslrMask(40);
+      }
+      PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+        return AslrAddress(0);
+      }
+
+    #elif defined(ARCH_CPU_S390)
+
+      // 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
+      // a chance to fulfill the request.
+      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
+        return AslrMask(29);
+      }
+      PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+        return AslrAddress(0);
+      }
+
+    #else  // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
+           // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
+
+      // For all other POSIX variants, use 30 bits.
+      PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
+        return AslrMask(30);
+      }
+
+      #if BUILDFLAG(IS_SOLARIS)
+
+        // For our Solaris/illumos mmap hint, we pick a random address in the
+        // bottom half of the top half of the address space (that is, the third
+        // quarter). Because we do not MAP_FIXED, this will be treated only as a
+        // hint -- the system will not fail to mmap because something else
+        // happens to already be mapped at our random address. We deliberately
+        // set the hint high enough to get well above the system's break (that
+        // is, the heap); Solaris and illumos will try the hint and if that
+        // fails allocate as if there were no hint at all. The high hint
+        // prevents the break from getting hemmed in at low values, ceding half
+        // of the address space to the system heap.
+        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+          return AslrAddress(0x80000000ULL);
+        }
+
+      #elif BUILDFLAG(IS_AIX)
+
+        // The range 0x30000000 - 0xD0000000 is available on AIX; choose the
+        // upper range.
+        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+          return AslrAddress(0x90000000ULL);
+        }
+
+      #else  // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
+
+        // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+        // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
+        // 10.6 and 10.7.
+        PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+          return AslrAddress(0x20000000ULL);
+        }
+
+      #endif  // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
+
+    #endif  // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
+            // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
+
+  #endif  // BUILDFLAG(IS_POSIX)
+
+#elif defined(ARCH_CPU_32_BITS)
+
+  // This is a good range on 32-bit Windows and Android (the only platforms on
+  // which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
+  // is no issue with carries here.
+  PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
+    return AslrMask(30);
+  }
+  PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
+    return AslrAddress(0x20000000ULL);
+  }
+
+#else
+
+  #error Please tell us about your exotic hardware! Sounds interesting.
+
+#endif  // defined(ARCH_CPU_32_BITS)
+
+// clang-format on
+
+}  // namespace internal
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/address_space_randomization_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/address_space_randomization_unittest.cc
new file mode 100644
index 0000000..afec439
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/address_space_randomization_unittest.cc
@@ -0,0 +1,283 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.h"
+
+#include <cstdint>
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/random.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#include "base/win/windows_version.h"
+#endif
+
+namespace partition_alloc {
+
+namespace {
+
+uintptr_t GetMask() {
+  uintptr_t mask = internal::ASLRMask();
+#if defined(ARCH_CPU_64_BITS)
+#elif defined(ARCH_CPU_32_BITS)
+#if BUILDFLAG(IS_WIN)
+  BOOL is_wow64 = FALSE;
+  if (!IsWow64Process(GetCurrentProcess(), &is_wow64)) {
+    is_wow64 = FALSE;
+  }
+  if (!is_wow64) {
+    mask = 0;
+  }
+#endif  // BUILDFLAG(IS_WIN)
+#endif  // defined(ARCH_CPU_32_BITS)
+  return mask;
+}
+
+const size_t kSamples = 100;
+
+uintptr_t GetAddressBits() {
+  return GetRandomPageBase();
+}
+
+uintptr_t GetRandomBits() {
+  return GetAddressBits() - internal::ASLROffset();
+}
+
+}  // namespace
+
+// Configurations without ASLR are tested here.
+TEST(PartitionAllocAddressSpaceRandomizationTest, DisabledASLR) {
+  uintptr_t mask = GetMask();
+  if (!mask) {
+#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_32_BITS)
+    // ASLR should be turned off on 32-bit Windows.
+    EXPECT_EQ(0u, GetRandomPageBase());
+#else
+    // Otherwise, 0 is very unexpected.
+    EXPECT_NE(0u, GetRandomPageBase());
+#endif
+  }
+}
+
+TEST(PartitionAllocAddressSpaceRandomizationTest, Alignment) {
+  uintptr_t mask = GetMask();
+  if (!mask) {
+    return;
+  }
+
+  for (size_t i = 0; i < kSamples; ++i) {
+    uintptr_t address = GetAddressBits();
+    EXPECT_EQ(0ULL,
+              (address & internal::PageAllocationGranularityOffsetMask()));
+  }
+}
+
+TEST(PartitionAllocAddressSpaceRandomizationTest, Range) {
+  uintptr_t mask = GetMask();
+  if (!mask) {
+    return;
+  }
+
+  uintptr_t min = internal::ASLROffset();
+  uintptr_t max = internal::ASLROffset() + internal::ASLRMask();
+  for (size_t i = 0; i < kSamples; ++i) {
+    uintptr_t address = GetAddressBits();
+    EXPECT_LE(min, address);
+    EXPECT_GE(max + mask, address);
+  }
+}
+
+TEST(PartitionAllocAddressSpaceRandomizationTest, Predictable) {
+  uintptr_t mask = GetMask();
+  if (!mask) {
+    return;
+  }
+
+  const uint64_t kInitialSeed = 0xfeed5eedULL;
+  SetMmapSeedForTesting(kInitialSeed);
+
+  std::vector<uintptr_t> sequence;
+  for (size_t i = 0; i < kSamples; ++i) {
+    sequence.push_back(GetRandomPageBase());
+  }
+
+  SetMmapSeedForTesting(kInitialSeed);
+
+  for (size_t i = 0; i < kSamples; ++i) {
+    EXPECT_EQ(GetRandomPageBase(), sequence[i]);
+  }
+}
+
+// This randomness test is adapted from V8's PRNG tests.
+
+// Chi squared for getting m 0s out of n bits.
+double ChiSquared(int m, int n) {
+  double ys_minus_np1 = (m - n / 2.0);
+  double chi_squared_1 = ys_minus_np1 * ys_minus_np1 * 2.0 / n;
+  double ys_minus_np2 = ((n - m) - n / 2.0);
+  double chi_squared_2 = ys_minus_np2 * ys_minus_np2 * 2.0 / n;
+  return chi_squared_1 + chi_squared_2;
+}
+
+// Test for correlations between recent bits from the PRNG, or bits that are
+// biased.
+void RandomBitCorrelation(int random_bit) {
+  uintptr_t mask = GetMask();
+  if ((mask & (1ULL << random_bit)) == 0) {
+    return;  // bit is always 0.
+  }
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // Do fewer checks when BUILDFLAG(PA_DCHECK_IS_ON). Exercized code only
+  // changes when the random number generator does, which should be almost
+  // never. However it's expensive to run all the tests. So keep iterations
+  // faster for local development builds, while having the stricter version run
+  // on official build testers.
+  constexpr int kHistory = 2;
+  constexpr int kRepeats = 1000;
+#else
+  constexpr int kHistory = 8;
+  constexpr int kRepeats = 10000;
+#endif
+  constexpr int kPointerBits = 8 * sizeof(void*);
+  uintptr_t history[kHistory];
+  // The predictor bit is either constant 0 or 1, or one of the bits from the
+  // history.
+  for (int predictor_bit = -2; predictor_bit < kPointerBits; predictor_bit++) {
+    // The predicted bit is one of the bits from the PRNG.
+    for (int ago = 0; ago < kHistory; ago++) {
+      // We don't want to check whether each bit predicts itself.
+      if (ago == 0 && predictor_bit == random_bit) {
+        continue;
+      }
+
+      // Enter the new random value into the history.
+      for (int i = ago; i >= 0; i--) {
+        history[i] = GetRandomBits();
+      }
+
+      // Find out how many of the bits are the same as the prediction bit.
+      int m = 0;
+      for (int i = 0; i < kRepeats; i++) {
+        uintptr_t random = GetRandomBits();
+        for (int j = ago - 1; j >= 0; j--) {
+          history[j + 1] = history[j];
+        }
+        history[0] = random;
+
+        int predicted;
+        if (predictor_bit >= 0) {
+          predicted = (history[ago] >> predictor_bit) & 1;
+        } else {
+          predicted = predictor_bit == -2 ? 0 : 1;
+        }
+        int bit = (random >> random_bit) & 1;
+        if (bit == predicted) {
+          m++;
+        }
+      }
+
+      // Chi squared analysis for k = 2 (2, states: same/not-same) and one
+      // degree of freedom (k - 1).
+      double chi_squared = ChiSquared(m, kRepeats);
+      // For k=2 probability of Chi^2 < 35 is p=3.338e-9. This condition is
+      // tested ~19000 times, so probability of it failing randomly per one
+      // base_unittests run is (1 - (1 - p) ^ 19000) ~= 6e-5.
+      PA_CHECK(chi_squared <= 35.0);
+      // If the predictor bit is a fixed 0 or 1 then it makes no sense to
+      // repeat the test with a different age.
+      if (predictor_bit < 0) {
+        break;
+      }
+    }
+  }
+}
+
+// Tests are fairly slow, so give each random bit its own test.
+#define TEST_RANDOM_BIT(BIT)                        \
+  TEST(PartitionAllocAddressSpaceRandomizationTest, \
+       RandomBitCorrelations##BIT) {                \
+    RandomBitCorrelation(BIT);                      \
+  }
+
+// The first 12 bits on all platforms are always 0.
+TEST_RANDOM_BIT(12)
+TEST_RANDOM_BIT(13)
+TEST_RANDOM_BIT(14)
+TEST_RANDOM_BIT(15)
+TEST_RANDOM_BIT(16)
+TEST_RANDOM_BIT(17)
+TEST_RANDOM_BIT(18)
+TEST_RANDOM_BIT(19)
+TEST_RANDOM_BIT(20)
+TEST_RANDOM_BIT(21)
+TEST_RANDOM_BIT(22)
+TEST_RANDOM_BIT(23)
+TEST_RANDOM_BIT(24)
+TEST_RANDOM_BIT(25)
+TEST_RANDOM_BIT(26)
+TEST_RANDOM_BIT(27)
+TEST_RANDOM_BIT(28)
+TEST_RANDOM_BIT(29)
+TEST_RANDOM_BIT(30)
+TEST_RANDOM_BIT(31)
+#if defined(ARCH_CPU_64_BITS)
+TEST_RANDOM_BIT(32)
+TEST_RANDOM_BIT(33)
+TEST_RANDOM_BIT(34)
+TEST_RANDOM_BIT(35)
+TEST_RANDOM_BIT(36)
+TEST_RANDOM_BIT(37)
+TEST_RANDOM_BIT(38)
+TEST_RANDOM_BIT(39)
+TEST_RANDOM_BIT(40)
+TEST_RANDOM_BIT(41)
+TEST_RANDOM_BIT(42)
+TEST_RANDOM_BIT(43)
+TEST_RANDOM_BIT(44)
+TEST_RANDOM_BIT(45)
+TEST_RANDOM_BIT(46)
+TEST_RANDOM_BIT(47)
+TEST_RANDOM_BIT(48)
+// No platforms have more than 48 address bits.
+#endif  // defined(ARCH_CPU_64_BITS)
+
+#undef TEST_RANDOM_BIT
+
+// Checks that we can actually map memory in the requested range.
+// TODO(crbug.com/1318466): Extend to all operating systems once they are fixed.
+#if BUILDFLAG(IS_MAC)
+TEST(PartitionAllocAddressSpaceRandomizationTest, CanMapInAslrRange) {
+  int tries = 0;
+  // This is overly generous, but we really don't want to make the test flaky.
+  constexpr int kMaxTries = 1000;
+
+  for (tries = 0; tries < kMaxTries; tries++) {
+    uintptr_t requested_address = GetRandomPageBase();
+    size_t size = internal::PageAllocationGranularity();
+
+    uintptr_t address = AllocPages(
+        requested_address, size, internal::PageAllocationGranularity(),
+        PageAccessibilityConfiguration(
+            PageAccessibilityConfiguration::kReadWrite),
+        PageTag::kPartitionAlloc);
+    ASSERT_NE(address, 0u);
+    FreePages(address, size);
+
+    if (address == requested_address) {
+      break;
+    }
+  }
+
+  EXPECT_LT(tries, kMaxTries);
+}
+#endif  // BUILDFLAG(IS_MAC)
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/address_space_stats.h b/base/allocator/partition_allocator/src/partition_alloc/address_space_stats.h
new file mode 100644
index 0000000..5b28417
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/address_space_stats.h
@@ -0,0 +1,55 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_
+
+#include <cstddef>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+
+namespace partition_alloc {
+
+// All members are measured in super pages.
+struct PoolStats {
+  size_t usage = 0;
+
+  // On 32-bit, pools are mainly logical entities, intermingled with
+  // allocations not managed by PartitionAlloc. The "largest available
+  // reservation" is not possible to measure in that case.
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  size_t largest_available_reservation = 0;
+#endif
+};
+
+struct AddressSpaceStats {
+  PoolStats regular_pool_stats;
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  PoolStats brp_pool_stats;
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  PoolStats configurable_pool_stats;
+#else
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  size_t blocklist_size;  // measured in super pages
+  size_t blocklist_hit_count;
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  PoolStats thread_isolated_pool_stats;
+#endif
+};
+
+// Interface passed to `AddressPoolManager::DumpStats()` to mediate
+// for `AddressSpaceDumpProvider`.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressSpaceStatsDumper {
+ public:
+  virtual void DumpStats(const AddressSpaceStats* address_space_stats) = 0;
+  virtual ~AddressSpaceStatsDumper() = default;
+};
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/allocation_guard.cc b/base/allocator/partition_allocator/src/partition_alloc/allocation_guard.cc
new file mode 100644
index 0000000..64e9410
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/allocation_guard.cc
@@ -0,0 +1,42 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/allocation_guard.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+
+#if PA_CONFIG(HAS_ALLOCATION_GUARD)
+
+namespace partition_alloc {
+
+namespace {
+thread_local bool g_disallow_allocations;
+}  // namespace
+
+ScopedDisallowAllocations::ScopedDisallowAllocations() {
+  if (g_disallow_allocations) {
+    PA_IMMEDIATE_CRASH();
+  }
+
+  g_disallow_allocations = true;
+}
+
+ScopedDisallowAllocations::~ScopedDisallowAllocations() {
+  g_disallow_allocations = false;
+}
+
+ScopedAllowAllocations::ScopedAllowAllocations() {
+  // Save the previous value, as ScopedAllowAllocations is used in all
+  // partitions, not just the malloc() ones(s).
+  saved_value_ = g_disallow_allocations;
+  g_disallow_allocations = false;
+}
+
+ScopedAllowAllocations::~ScopedAllowAllocations() {
+  g_disallow_allocations = saved_value_;
+}
+
+}  // namespace partition_alloc
+
+#endif  // PA_CONFIG(HAS_ALLOCATION_GUARD)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/allocation_guard.h b/base/allocator/partition_allocator/src/partition_alloc/allocation_guard.h
new file mode 100644
index 0000000..1a69198
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/allocation_guard.h
@@ -0,0 +1,49 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ALLOCATION_GUARD_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ALLOCATION_GUARD_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "build/build_config.h"
+
+namespace partition_alloc {
+
+#if PA_CONFIG(HAS_ALLOCATION_GUARD)
+
+// Disallow allocations in the scope. Does not nest.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedDisallowAllocations {
+ public:
+  ScopedDisallowAllocations();
+  ~ScopedDisallowAllocations();
+};
+
+// Disallow allocations in the scope. Does not nest.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedAllowAllocations {
+ public:
+  ScopedAllowAllocations();
+  ~ScopedAllowAllocations();
+
+ private:
+  bool saved_value_;
+};
+
+#else
+
+struct [[maybe_unused]] ScopedDisallowAllocations {};
+struct [[maybe_unused]] ScopedAllowAllocations {};
+
+#endif  // PA_CONFIG(HAS_ALLOCATION_GUARD)
+
+}  // namespace partition_alloc
+
+namespace base::internal {
+
+using ::partition_alloc::ScopedAllowAllocations;
+using ::partition_alloc::ScopedDisallowAllocations;
+
+}  // namespace base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ALLOCATION_GUARD_H_
diff --git a/base/allocator/partition_allocator/arm_bti_test_functions.S b/base/allocator/partition_allocator/src/partition_alloc/arm_bti_test_functions.S
similarity index 100%
rename from base/allocator/partition_allocator/arm_bti_test_functions.S
rename to base/allocator/partition_allocator/src/partition_alloc/arm_bti_test_functions.S
diff --git a/base/allocator/partition_allocator/src/partition_alloc/arm_bti_test_functions.h b/base/allocator/partition_allocator/src/partition_alloc/arm_bti_test_functions.h
new file mode 100644
index 0000000..d419b35
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/arm_bti_test_functions.h
@@ -0,0 +1,31 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_
+
+#include "build/build_config.h"
+
+#if defined(ARCH_CPU_ARM64)
+extern "C" {
+/**
+ * A valid BTI function. Jumping to this funtion should not cause any problem in
+ * a BTI enabled environment.
+ **/
+int64_t arm_bti_test_function(int64_t);
+
+/**
+ * A function without proper BTI landing pad. Jumping here should crash the
+ * program on systems which support BTI.
+ **/
+int64_t arm_bti_test_function_invalid_offset(int64_t);
+
+/**
+ * A simple function which immediately returns to sender.
+ **/
+void arm_bti_test_function_end(void);
+}
+#endif  // defined(ARCH_CPU_ARM64)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/compressed_pointer.cc b/base/allocator/partition_allocator/src/partition_alloc/compressed_pointer.cc
new file mode 100644
index 0000000..1b2e9d9
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/compressed_pointer.cc
@@ -0,0 +1,29 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/compressed_pointer.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+
+namespace partition_alloc::internal {
+
+// We keep the useful part in |g_base_| as 1s to speed up decompression.
+alignas(kPartitionCachelineSize)
+    PA_COMPONENT_EXPORT(PARTITION_ALLOC) CompressedPointerBaseGlobal::Base
+    CompressedPointerBaseGlobal::g_base_ = {.base = kUsefulBitsMask};
+
+void CompressedPointerBaseGlobal::SetBase(uintptr_t base) {
+  PA_DCHECK(!IsSet());
+  PA_DCHECK((base & kUsefulBitsMask) == 0);
+  g_base_.base = base | kUsefulBitsMask;
+}
+
+void CompressedPointerBaseGlobal::ResetBaseForTesting() {
+  g_base_.base = kUsefulBitsMask;
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/compressed_pointer.h b/base/allocator/partition_allocator/src/partition_alloc/compressed_pointer.h
new file mode 100644
index 0000000..1802c1d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/compressed_pointer.h
@@ -0,0 +1,668 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_COMPRESSED_POINTER_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_COMPRESSED_POINTER_H_
+
+#include <climits>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+
+#if !BUILDFLAG(GLUE_CORE_POOLS)
+#error "Pointer compression only works with glued pools"
+#endif
+#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+#error "Pointer compression currently supports constant pool size"
+#endif
+
+#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+
+namespace partition_alloc {
+
+namespace internal {
+
+template <typename T1, typename T2>
+constexpr bool IsDecayedSame =
+    std::is_same_v<std::decay_t<T1>, std::decay_t<T2>>;
+
+#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+
+// Pointer compression works by storing only the 'useful' 32-bit part of the
+// pointer. The other half (the base) is stored in a global variable
+// (CompressedPointerBaseGlobal::g_base_), which is used on decompression. To
+// support fast branchless decompression of nullptr, we use the most significant
+// bit in the compressed pointer to leverage sign-extension (for non-nullptr
+// pointers, the most significant bit is set, whereas for nullptr it's not).
+// Using this bit and supporting heaps larger than 4GB relies on having
+// alignment bits in pointers. Assuming that all pointers point to at least
+// 8-byte alignment objects, pointer compression can support heaps of size <=
+// 16GB.
+// ((3 alignment bits) = (1 bit for sign-extension) + (2 bits for 16GB heap)).
+//
+// Example: heap base: 0x4b0'ffffffff
+//  - g_base: 0x4b3'ffffffff (lower 34 bits set)
+//  - normal pointer: 0x4b2'a08b6480
+//    - compression:
+//      - shift right by 3:        0x96'54116c90
+//      - truncate:                   0x54116c90
+//      - mark MSB:                   0xd4116c90
+//    - decompression:
+//      - sign-extend:       0xffffffff'd4116c90
+//      - shift left by 3:   0xfffffffe'a08b6480
+//      - 'and' with g_base: 0x000004b2'a08b6480
+//
+//  - nullptr: 0x00000000'00000000
+//    - compression:
+//      - shift right by 3:  0x00000000'00000000
+//      - truncate:                   0x00000000
+//      - (don't mark MSB for nullptr)
+//    - decompression:
+//      - sign-extend:       0x00000000'00000000
+//      - shift left by 3:   0x00000000'00000000
+//      - 'and' with g_base: 0x00000000'00000000
+//
+// Pointer compression relies on having both the regular and the BRP pool (core
+// pools) 'glued', so that the same base could be used for both. For simplicity,
+// the configurations with dynamically selected pool size are not supported.
+// However, they can be at the cost of performing an extra load for
+// core-pools-shift-size on both compression and decompression.
+
+class CompressedPointerBaseGlobal final {
+ public:
+  static constexpr size_t kUsefulBits =
+      base::bits::CountTrailingZeroBits(PartitionAddressSpace::CorePoolsSize());
+  static_assert(kUsefulBits >= sizeof(uint32_t) * CHAR_BIT);
+  static constexpr size_t kBitsToShift =
+      kUsefulBits - sizeof(uint32_t) * CHAR_BIT;
+
+  CompressedPointerBaseGlobal() = delete;
+
+  // Attribute const allows the compiler to assume that
+  // CompressedPointerBaseGlobal::g_base_ doesn't change (e.g. across calls) and
+  // thereby avoid redundant loads.
+  PA_ALWAYS_INLINE __attribute__((const)) static uintptr_t Get() {
+    PA_DCHECK(IsBaseConsistent());
+    return g_base_.base;
+  }
+
+  PA_ALWAYS_INLINE static bool IsSet() {
+    PA_DCHECK(IsBaseConsistent());
+    return (g_base_.base & ~kUsefulBitsMask) != 0;
+  }
+
+ private:
+  static constexpr uintptr_t kUsefulBitsMask =
+      PartitionAddressSpace::CorePoolsSize() - 1;
+
+  static union alignas(kPartitionCachelineSize)
+      PA_COMPONENT_EXPORT(PARTITION_ALLOC) Base {
+    uintptr_t base;
+    char cache_line[kPartitionCachelineSize];
+  } g_base_ PA_CONSTINIT;
+
+  PA_ALWAYS_INLINE static bool IsBaseConsistent() {
+    return kUsefulBitsMask == (g_base_.base & kUsefulBitsMask);
+  }
+
+  static void SetBase(uintptr_t base);
+  static void ResetBaseForTesting();
+
+  friend class PartitionAddressSpace;
+};
+
+#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+
+}  // namespace internal
+
+#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+
+template <typename T>
+class PA_TRIVIAL_ABI CompressedPointer final {
+ public:
+  using UnderlyingType = uint32_t;
+
+  PA_ALWAYS_INLINE constexpr CompressedPointer() = default;
+  PA_ALWAYS_INLINE explicit CompressedPointer(T* ptr) : value_(Compress(ptr)) {}
+  PA_ALWAYS_INLINE constexpr explicit CompressedPointer(std::nullptr_t)
+      : value_(0u) {}
+
+  PA_ALWAYS_INLINE constexpr CompressedPointer(const CompressedPointer&) =
+      default;
+  PA_ALWAYS_INLINE constexpr CompressedPointer(
+      CompressedPointer&& other) noexcept = default;
+
+  template <typename U,
+            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
+  PA_ALWAYS_INLINE constexpr CompressedPointer(
+      const CompressedPointer<U>& other) {
+    if constexpr (internal::IsDecayedSame<T, U>) {
+      // When pointers have the same type modulo constness, avoid the
+      // compress-decompress round.
+      value_ = other.value_;
+    } else {
+      // When the types are different, perform the round, because the pointer
+      // may need to be adjusted.
+      // TODO(1376980): Avoid the cycle here.
+      value_ = Compress(other.get());
+    }
+  }
+
+  template <typename U,
+            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
+  PA_ALWAYS_INLINE constexpr CompressedPointer(
+      CompressedPointer<U>&& other) noexcept
+      : CompressedPointer(other) {}
+
+  ~CompressedPointer() = default;
+
+  PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
+      const CompressedPointer&) = default;
+  PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
+      CompressedPointer&& other) noexcept = default;
+
+  template <typename U,
+            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
+  PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
+      const CompressedPointer<U>& other) {
+    CompressedPointer copy(other);
+    value_ = copy.value_;
+    return *this;
+  }
+
+  template <typename U,
+            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
+  PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
+      CompressedPointer<U>&& other) noexcept {
+    *this = other;
+    return *this;
+  }
+
+  // Don't perform compression when assigning to nullptr.
+  PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(std::nullptr_t) {
+    value_ = 0u;
+    return *this;
+  }
+
+  PA_ALWAYS_INLINE T* get() const { return Decompress(value_); }
+
+  PA_ALWAYS_INLINE constexpr bool is_nonnull() const { return value_; }
+
+  PA_ALWAYS_INLINE constexpr UnderlyingType GetAsIntegral() const {
+    return value_;
+  }
+
+  PA_ALWAYS_INLINE constexpr explicit operator bool() const {
+    return is_nonnull();
+  }
+
+  template <typename U = T,
+            std::enable_if_t<!std::is_void_v<std::remove_cv_t<U>>>* = nullptr>
+  PA_ALWAYS_INLINE U& operator*() const {
+    PA_DCHECK(is_nonnull());
+    return *get();
+  }
+
+  PA_ALWAYS_INLINE T* operator->() const {
+    PA_DCHECK(is_nonnull());
+    return get();
+  }
+
+  PA_ALWAYS_INLINE constexpr void swap(CompressedPointer& other) {
+    std::swap(value_, other.value_);
+  }
+
+ private:
+  template <typename>
+  friend class CompressedPointer;
+
+  static constexpr size_t kBitsForSignExtension = 1;
+  static constexpr size_t kOverallBitsToShift =
+      internal::CompressedPointerBaseGlobal::kBitsToShift +
+      kBitsForSignExtension;
+
+  PA_ALWAYS_INLINE static UnderlyingType Compress(T* ptr) {
+    static constexpr size_t kMinimalRequiredAlignment = 8;
+    static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment);
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    PA_DCHECK(reinterpret_cast<uintptr_t>(ptr) % kMinimalRequiredAlignment ==
+              0);
+    PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
+
+    const uintptr_t base = internal::CompressedPointerBaseGlobal::Get();
+    static constexpr size_t kCorePoolsBaseMask =
+        ~(internal::PartitionAddressSpace::CorePoolsSize() - 1);
+    PA_DCHECK(!ptr ||
+              (base & kCorePoolsBaseMask) ==
+                  (reinterpret_cast<uintptr_t>(ptr) & kCorePoolsBaseMask));
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+    const auto uptr = reinterpret_cast<uintptr_t>(ptr);
+    // Shift the pointer and truncate.
+    auto compressed = static_cast<UnderlyingType>(uptr >> kOverallBitsToShift);
+    // If the pointer is non-null, mark the most-significant-bit to sign-extend
+    // it on decompression. Assuming compression is a significantly less
+    // frequent operation, we let more work here in favor of faster
+    // decompression.
+    // TODO(1376980): Avoid this by overreserving the heap.
+    if (compressed) {
+      compressed |= (1u << (sizeof(uint32_t) * CHAR_BIT - 1));
+    }
+
+    return compressed;
+  }
+
+  PA_ALWAYS_INLINE static T* Decompress(UnderlyingType ptr) {
+    PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
+    const uintptr_t base = internal::CompressedPointerBaseGlobal::Get();
+    // Treat compressed pointer as signed and cast it to uint64_t, which will
+    // sign-extend it. Then, shift the result by one. It's important to shift
+    // the already unsigned value, as otherwise it would result in undefined
+    // behavior.
+    const uint64_t mask = static_cast<uint64_t>(static_cast<int32_t>(ptr))
+                          << (kOverallBitsToShift);
+    return reinterpret_cast<T*>(mask & base);
+  }
+
+  UnderlyingType value_;
+};
+
+template <typename T>
+PA_ALWAYS_INLINE constexpr void swap(CompressedPointer<T>& a,
+                                     CompressedPointer<T>& b) {
+  a.swap(b);
+}
+
+// operators==.
+template <typename T, typename U>
+PA_ALWAYS_INLINE bool operator==(CompressedPointer<T> a,
+                                 CompressedPointer<U> b) {
+  if constexpr (internal::IsDecayedSame<T, U>) {
+    // When pointers have the same type modulo constness, simply compare
+    // compressed values.
+    return a.GetAsIntegral() == b.GetAsIntegral();
+  } else {
+    // When the types are different, compare decompressed pointers, because the
+    // pointers may need to be adjusted.
+    // TODO(1376980): Avoid decompression here.
+    return a.get() == b.get();
+  }
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator==(CompressedPointer<T> a, U* b) {
+  // Do compression, since it is less expensive.
+  return a == static_cast<CompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator==(T* a, CompressedPointer<U> b) {
+  return b == a;
+}
+
+template <typename T>
+PA_ALWAYS_INLINE constexpr bool operator==(CompressedPointer<T> a,
+                                           std::nullptr_t) {
+  return !a.is_nonnull();
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator==(std::nullptr_t,
+                                           CompressedPointer<U> b) {
+  return b == nullptr;
+}
+
+// operators!=.
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a,
+                                           CompressedPointer<U> b) {
+  return !(a == b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a, U* b) {
+  // Do compression, since it is less expensive.
+  return a != static_cast<CompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator!=(T* a, CompressedPointer<U> b) {
+  return b != a;
+}
+
+template <typename T>
+PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a,
+                                           std::nullptr_t) {
+  return a.is_nonnull();
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator!=(std::nullptr_t,
+                                           CompressedPointer<U> b) {
+  return b != nullptr;
+}
+
+// operators<.
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<(CompressedPointer<T> a,
+                                          CompressedPointer<U> b) {
+  if constexpr (internal::IsDecayedSame<T, U>) {
+    // When pointers have the same type modulo constness, simply compare
+    // compressed values.
+    return a.GetAsIntegral() < b.GetAsIntegral();
+  } else {
+    // When the types are different, compare decompressed pointers, because the
+    // pointers may need to be adjusted.
+    // TODO(1376980): Avoid decompression here.
+    return a.get() < b.get();
+  }
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<(CompressedPointer<T> a, U* b) {
+  // Do compression, since it is less expensive.
+  return a < static_cast<CompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<(T* a, CompressedPointer<U> b) {
+  // Do compression, since it is less expensive.
+  return static_cast<CompressedPointer<T>>(a) < b;
+}
+
+// operators<=.
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<=(CompressedPointer<T> a,
+                                           CompressedPointer<U> b) {
+  if constexpr (internal::IsDecayedSame<T, U>) {
+    // When pointers have the same type modulo constness, simply compare
+    // compressed values.
+    return a.GetAsIntegral() <= b.GetAsIntegral();
+  } else {
+    // When the types are different, compare decompressed pointers, because the
+    // pointers may need to be adjusted.
+    // TODO(1376980): Avoid decompression here.
+    return a.get() <= b.get();
+  }
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<=(CompressedPointer<T> a, U* b) {
+  // Do compression, since it is less expensive.
+  return a <= static_cast<CompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<=(T* a, CompressedPointer<U> b) {
+  // Do compression, since it is less expensive.
+  return static_cast<CompressedPointer<T>>(a) <= b;
+}
+
+// operators>.
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>(CompressedPointer<T> a,
+                                          CompressedPointer<U> b) {
+  return !(a <= b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>(CompressedPointer<T> a, U* b) {
+  // Do compression, since it is less expensive.
+  return a > static_cast<CompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>(T* a, CompressedPointer<U> b) {
+  // Do compression, since it is less expensive.
+  return static_cast<CompressedPointer<T>>(a) > b;
+}
+
+// operators>=.
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>=(CompressedPointer<T> a,
+                                           CompressedPointer<U> b) {
+  return !(a < b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>=(CompressedPointer<T> a, U* b) {
+  // Do compression, since it is less expensive.
+  return a >= static_cast<CompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>=(T* a, CompressedPointer<U> b) {
+  // Do compression, since it is less expensive.
+  return static_cast<CompressedPointer<T>>(a) >= b;
+}
+
+#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+
+// Simple wrapper over the raw pointer.
+template <typename T>
+class PA_TRIVIAL_ABI UncompressedPointer final {
+ public:
+  PA_ALWAYS_INLINE constexpr UncompressedPointer() = default;
+  PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(T* ptr) : ptr_(ptr) {}
+  PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(std::nullptr_t)
+      : ptr_(nullptr) {}
+
+  PA_ALWAYS_INLINE constexpr UncompressedPointer(const UncompressedPointer&) =
+      default;
+  PA_ALWAYS_INLINE constexpr UncompressedPointer(
+      UncompressedPointer&& other) noexcept = default;
+
+  template <typename U,
+            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
+  PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(
+      const UncompressedPointer<U>& other)
+      : ptr_(other.ptr_) {}
+
+  template <typename U,
+            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
+  PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(
+      UncompressedPointer<U>&& other) noexcept
+      : ptr_(std::move(other.ptr_)) {}
+
+  ~UncompressedPointer() = default;
+
+  PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
+      const UncompressedPointer&) = default;
+  PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
+      UncompressedPointer&& other) noexcept = default;
+
+  template <typename U,
+            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
+  PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
+      const UncompressedPointer<U>& other) {
+    ptr_ = other.ptr_;
+    return *this;
+  }
+
+  template <typename U,
+            std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
+  PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
+      UncompressedPointer<U>&& other) noexcept {
+    ptr_ = std::move(other.ptr_);
+    return *this;
+  }
+
+  PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(std::nullptr_t) {
+    ptr_ = nullptr;
+    return *this;
+  }
+
+  PA_ALWAYS_INLINE constexpr T* get() const { return ptr_; }
+
+  PA_ALWAYS_INLINE constexpr bool is_nonnull() const { return ptr_; }
+
+  PA_ALWAYS_INLINE constexpr explicit operator bool() const {
+    return is_nonnull();
+  }
+
+  template <typename U = T,
+            std::enable_if_t<!std::is_void_v<std::remove_cv_t<U>>>* = nullptr>
+  PA_ALWAYS_INLINE constexpr U& operator*() const {
+    PA_DCHECK(is_nonnull());
+    return *get();
+  }
+
+  PA_ALWAYS_INLINE constexpr T* operator->() const {
+    PA_DCHECK(is_nonnull());
+    return get();
+  }
+
+  PA_ALWAYS_INLINE constexpr void swap(UncompressedPointer& other) {
+    std::swap(ptr_, other.ptr_);
+  }
+
+ private:
+  template <typename>
+  friend class UncompressedPointer;
+
+  T* ptr_;
+};
+
+template <typename T>
+PA_ALWAYS_INLINE constexpr void swap(UncompressedPointer<T>& a,
+                                     UncompressedPointer<T>& b) {
+  a.swap(b);
+}
+
+// operators==.
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a,
+                                           UncompressedPointer<U> b) {
+  return a.get() == b.get();
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a, U* b) {
+  return a == static_cast<UncompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator==(T* a, UncompressedPointer<U> b) {
+  return b == a;
+}
+
+template <typename T>
+PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a,
+                                           std::nullptr_t) {
+  return !a.is_nonnull();
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator==(std::nullptr_t,
+                                           UncompressedPointer<U> b) {
+  return b == nullptr;
+}
+
+// operators!=.
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a,
+                                           UncompressedPointer<U> b) {
+  return !(a == b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a, U* b) {
+  return a != static_cast<UncompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator!=(T* a, UncompressedPointer<U> b) {
+  return b != a;
+}
+
+template <typename T>
+PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a,
+                                           std::nullptr_t) {
+  return a.is_nonnull();
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator!=(std::nullptr_t,
+                                           UncompressedPointer<U> b) {
+  return b != nullptr;
+}
+
+// operators<.
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<(UncompressedPointer<T> a,
+                                          UncompressedPointer<U> b) {
+  return a.get() < b.get();
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<(UncompressedPointer<T> a, U* b) {
+  return a < static_cast<UncompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<(T* a, UncompressedPointer<U> b) {
+  return static_cast<UncompressedPointer<T>>(a) < b;
+}
+
+// operators<=.
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<=(UncompressedPointer<T> a,
+                                           UncompressedPointer<U> b) {
+  return a.get() <= b.get();
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<=(UncompressedPointer<T> a, U* b) {
+  return a <= static_cast<UncompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator<=(T* a, UncompressedPointer<U> b) {
+  return static_cast<UncompressedPointer<T>>(a) <= b;
+}
+
+// operators>.
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>(UncompressedPointer<T> a,
+                                          UncompressedPointer<U> b) {
+  return !(a <= b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>(UncompressedPointer<T> a, U* b) {
+  return a > static_cast<UncompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>(T* a, UncompressedPointer<U> b) {
+  return static_cast<UncompressedPointer<T>>(a) > b;
+}
+
+// operators>=.
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>=(UncompressedPointer<T> a,
+                                           UncompressedPointer<U> b) {
+  return !(a < b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>=(UncompressedPointer<T> a, U* b) {
+  return a >= static_cast<UncompressedPointer<U>>(b);
+}
+
+template <typename T, typename U>
+PA_ALWAYS_INLINE constexpr bool operator>=(T* a, UncompressedPointer<U> b) {
+  return static_cast<UncompressedPointer<T>>(a) >= b;
+}
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_COMPRESSED_POINTER_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/compressed_pointer_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/compressed_pointer_unittest.cc
new file mode 100644
index 0000000..c009bbb
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/compressed_pointer_unittest.cc
@@ -0,0 +1,431 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/compressed_pointer.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc {
+
+namespace {
+
+struct Base {
+  double a;
+};
+struct Derived : Base {
+  double b;
+};
+struct Mixin {
+  double c;
+};
+struct DerivedWithMixin : Base, Mixin {
+  double d;
+};
+
+struct PADeleter final {
+  void operator()(void* ptr) const { allocator_.root()->Free(ptr); }
+  PartitionAllocator& allocator_;
+};
+
+template <typename T, typename... Args>
+std::unique_ptr<T, PADeleter> make_pa_unique(PartitionAllocator& alloc,
+                                             Args&&... args) {
+  T* result = new (alloc.root()->Alloc(sizeof(T), nullptr))
+      T(std::forward<Args>(args)...);
+  return std::unique_ptr<T, PADeleter>(result, PADeleter{alloc});
+}
+
+template <typename T>
+std::unique_ptr<T[], PADeleter> make_pa_array_unique(PartitionAllocator& alloc,
+                                                     size_t num) {
+  T* result = new (alloc.root()->Alloc(sizeof(T) * num, nullptr)) T();
+  return std::unique_ptr<T[], PADeleter>(result, PADeleter{alloc});
+}
+
+// Test that pointer types are trivial.
+#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+static_assert(
+    std::is_trivially_default_constructible_v<CompressedPointer<Base>>);
+static_assert(std::is_trivially_copy_constructible_v<CompressedPointer<Base>>);
+static_assert(std::is_trivially_move_constructible_v<CompressedPointer<Base>>);
+static_assert(std::is_trivially_copy_assignable_v<CompressedPointer<Base>>);
+static_assert(std::is_trivially_move_assignable_v<CompressedPointer<Base>>);
+#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+static_assert(
+    std::is_trivially_default_constructible_v<UncompressedPointer<Base>>);
+static_assert(
+    std::is_trivially_copy_constructible_v<UncompressedPointer<Base>>);
+static_assert(
+    std::is_trivially_move_constructible_v<UncompressedPointer<Base>>);
+static_assert(std::is_trivially_copy_assignable_v<UncompressedPointer<Base>>);
+static_assert(std::is_trivially_move_assignable_v<UncompressedPointer<Base>>);
+
+}  // namespace
+
+struct UncompressedTypeTag {};
+struct CompressedTypeTag {};
+
+template <typename TagType>
+class CompressedPointerTest : public ::testing::Test {
+ public:
+#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+  template <typename T>
+  using PointerType =
+      std::conditional_t<std::is_same_v<TagType, CompressedTypeTag>,
+                         CompressedPointer<T>,
+                         UncompressedPointer<T>>;
+#else   // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+  template <typename T>
+  using PointerType = UncompressedPointer<T>;
+#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+
+  CompressedPointerTest() : allocator_(PartitionOptions{}) {}
+
+ protected:
+  PartitionAllocator allocator_;
+};
+
+#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+using ObjectTypes = ::testing::Types<UncompressedTypeTag, CompressedTypeTag>;
+#else
+using ObjectTypes = ::testing::Types<UncompressedTypeTag>;
+#endif
+
+TYPED_TEST_SUITE(CompressedPointerTest, ObjectTypes);
+
+TYPED_TEST(CompressedPointerTest, NullConstruction) {
+  using DoublePointer = typename TestFixture::template PointerType<double>;
+  {
+    DoublePointer p = static_cast<DoublePointer>(nullptr);
+    EXPECT_FALSE(p.is_nonnull());
+    EXPECT_FALSE(p.get());
+    EXPECT_EQ(p, nullptr);
+  }
+  {
+    DoublePointer p1 = static_cast<DoublePointer>(nullptr);
+    DoublePointer p2 = p1;
+    EXPECT_FALSE(p2.is_nonnull());
+    EXPECT_FALSE(p2.get());
+    EXPECT_EQ(p2, nullptr);
+  }
+  {
+    DoublePointer p1 = static_cast<DoublePointer>(nullptr);
+    DoublePointer p2 = std::move(p1);
+    EXPECT_FALSE(p2.is_nonnull());
+    EXPECT_FALSE(p2.get());
+    EXPECT_EQ(p2, nullptr);
+  }
+}
+
+TYPED_TEST(CompressedPointerTest, NullAssignment) {
+  using DoublePointer = typename TestFixture::template PointerType<double>;
+  {
+    DoublePointer p;
+    p = static_cast<DoublePointer>(nullptr);
+    EXPECT_FALSE(p.is_nonnull());
+    EXPECT_FALSE(p.get());
+    EXPECT_EQ(p.get(), nullptr);
+    EXPECT_EQ(p, nullptr);
+  }
+  {
+    DoublePointer p1 = DoublePointer(nullptr), p2;
+    p2 = p1;
+    EXPECT_FALSE(p2.is_nonnull());
+    EXPECT_FALSE(p2.get());
+    EXPECT_EQ(p2.get(), nullptr);
+    EXPECT_EQ(p2, nullptr);
+  }
+  {
+    DoublePointer p1 = DoublePointer(nullptr), p2;
+    p2 = std::move(p1);
+    EXPECT_FALSE(p2.is_nonnull());
+    EXPECT_FALSE(p2.get());
+    EXPECT_EQ(p2.get(), nullptr);
+    EXPECT_EQ(p2, nullptr);
+  }
+}
+
+TYPED_TEST(CompressedPointerTest, SameTypeValueConstruction) {
+  using DoublePointer = typename TestFixture::template PointerType<double>;
+  auto d = make_pa_unique<double>(this->allocator_);
+  {
+    DoublePointer p = static_cast<DoublePointer>(d.get());
+    EXPECT_TRUE(p.is_nonnull());
+    EXPECT_EQ(p.get(), d.get());
+    EXPECT_EQ(p, d.get());
+  }
+  {
+    DoublePointer p1 = static_cast<DoublePointer>(d.get());
+    DoublePointer p2 = p1;
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, p1);
+    EXPECT_EQ(p2, d.get());
+  }
+  {
+    DoublePointer p1 = static_cast<DoublePointer>(d.get());
+    DoublePointer p2 = std::move(p1);
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, d.get());
+  }
+}
+
+TYPED_TEST(CompressedPointerTest, SameTypeValueAssignment) {
+  using DoublePointer = typename TestFixture::template PointerType<double>;
+  auto d = make_pa_unique<double>(this->allocator_);
+  {
+    DoublePointer p;
+    p = static_cast<DoublePointer>(d.get());
+    EXPECT_TRUE(p.is_nonnull());
+    EXPECT_EQ(p.get(), d.get());
+    EXPECT_EQ(p, d.get());
+  }
+  {
+    DoublePointer p1 = static_cast<DoublePointer>(d.get());
+    DoublePointer p2;
+    p2 = p1;
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, p1);
+    EXPECT_EQ(p2, d.get());
+  }
+  {
+    DoublePointer p1 = static_cast<DoublePointer>(d.get());
+    DoublePointer p2;
+    p2 = std::move(p1);
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, d.get());
+  }
+}
+
+TYPED_TEST(CompressedPointerTest,
+           HeterogeneousValueConstructionSamePointerValue) {
+  using BasePointer = typename TestFixture::template PointerType<Base>;
+  auto d = make_pa_unique<Derived>(this->allocator_);
+  {
+    BasePointer p = static_cast<BasePointer>(d.get());
+    EXPECT_TRUE(p.is_nonnull());
+    EXPECT_EQ(p.get(), d.get());
+  }
+  {
+    BasePointer p1 = static_cast<BasePointer>(d.get());
+    BasePointer p2 = p1;
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, p1);
+    EXPECT_EQ(p2, d.get());
+  }
+  {
+    BasePointer p1 = static_cast<BasePointer>(d.get());
+    BasePointer p2 = std::move(p1);
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, d.get());
+  }
+}
+
+TYPED_TEST(CompressedPointerTest,
+           HeterogeneousValueAssignmentSamePointerValue) {
+  using BasePointer = typename TestFixture::template PointerType<Base>;
+  auto d = make_pa_unique<Derived>(this->allocator_);
+  {
+    BasePointer p;
+    p = static_cast<BasePointer>(d.get());
+    EXPECT_TRUE(p.is_nonnull());
+    EXPECT_EQ(p.get(), d.get());
+  }
+  {
+    BasePointer p1 = static_cast<BasePointer>(d.get());
+    BasePointer p2;
+    p2 = p1;
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, p1);
+    EXPECT_EQ(p2, d.get());
+  }
+  {
+    BasePointer p1 = static_cast<BasePointer>(d.get());
+    BasePointer p2;
+    p2 = std::move(p1);
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, d.get());
+  }
+}
+
+TYPED_TEST(CompressedPointerTest,
+           HeterogeneousValueConstructionDifferentPointerValues) {
+  using MixinPointer = typename TestFixture::template PointerType<Mixin>;
+  auto d = make_pa_unique<DerivedWithMixin>(this->allocator_);
+  {
+    MixinPointer p = static_cast<MixinPointer>(d.get());
+    ASSERT_NE(static_cast<void*>(p.get()), static_cast<void*>(d.get()));
+  }
+  {
+    MixinPointer p = static_cast<MixinPointer>(d.get());
+    EXPECT_TRUE(p.is_nonnull());
+    EXPECT_EQ(p.get(), d.get());
+  }
+  {
+    MixinPointer p1 = static_cast<MixinPointer>(d.get());
+    MixinPointer p2 = p1;
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, p1);
+    EXPECT_EQ(p2, d.get());
+  }
+  {
+    MixinPointer p1 = static_cast<MixinPointer>(d.get());
+    MixinPointer p2 = std::move(p1);
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, d.get());
+  }
+}
+
+TYPED_TEST(CompressedPointerTest,
+           HeterogeneousValueAssignmentDifferentPointerValue) {
+  using MixinPointer = typename TestFixture::template PointerType<Mixin>;
+  auto d = make_pa_unique<DerivedWithMixin>(this->allocator_);
+  {
+    MixinPointer p;
+    p = static_cast<MixinPointer>(d.get());
+    ASSERT_NE(static_cast<void*>(p.get()), static_cast<void*>(d.get()));
+  }
+  {
+    MixinPointer p;
+    p = static_cast<MixinPointer>(d.get());
+    EXPECT_TRUE(p.is_nonnull());
+    EXPECT_EQ(p.get(), d.get());
+  }
+  {
+    MixinPointer p1 = static_cast<MixinPointer>(d.get());
+    MixinPointer p2;
+    p2 = p1;
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, p1);
+    EXPECT_EQ(p2, d.get());
+  }
+  {
+    MixinPointer p1 = static_cast<MixinPointer>(d.get());
+    MixinPointer p2;
+    p2 = std::move(p1);
+    EXPECT_TRUE(p2.is_nonnull());
+    EXPECT_EQ(p2.get(), d.get());
+    EXPECT_EQ(p2, d.get());
+  }
+}
+
+namespace {
+
+template <template <typename> class PointerType,
+          typename T1,
+          typename T2,
+          typename U>
+void EqualityTest(U* raw) {
+  PointerType<T1> p1 = static_cast<PointerType<T1>>(raw);
+  PointerType<T2> p2 = static_cast<PointerType<T2>>(raw);
+  EXPECT_EQ(p1, raw);
+  EXPECT_EQ(p2, raw);
+  EXPECT_EQ(raw, p1);
+  EXPECT_EQ(raw, p2);
+  EXPECT_EQ(p1, p2);
+}
+
+template <template <typename> class PointerType,
+          typename T1,
+          typename T2,
+          typename U>
+void CompareTest(U* array) {
+  PointerType<T1> p0 = static_cast<PointerType<T1>>(&array[0]);
+  PointerType<T2> p1 = static_cast<PointerType<T2>>(&array[1]);
+  {
+    EXPECT_NE(p0, &array[1]);
+    EXPECT_NE(p0, p1);
+    EXPECT_NE(p1, &array[0]);
+    EXPECT_NE(p1, p0);
+  }
+  {
+    EXPECT_LT(p0, &array[1]);
+    EXPECT_LT(&array[0], p1);
+    EXPECT_LT(p0, p1);
+  }
+  {
+    EXPECT_LE(p0, &array[0]);
+    EXPECT_LE(p0, &array[1]);
+    EXPECT_LE(&array[0], p0);
+
+    EXPECT_LE(&array[1], p1);
+    EXPECT_LE(p1, &array[1]);
+
+    auto p2 = p0;
+    EXPECT_LE(p0, p2);
+    EXPECT_LE(p2, p1);
+  }
+  {
+    EXPECT_GT(&array[1], p0);
+    EXPECT_GT(p1, &array[0]);
+    EXPECT_GT(p1, p0);
+  }
+  {
+    EXPECT_GE(&array[0], p0);
+    EXPECT_GE(&array[1], p0);
+    EXPECT_GE(p0, &array[0]);
+
+    EXPECT_GE(p1, &array[1]);
+    EXPECT_GE(&array[1], p1);
+
+    auto p2 = p1;
+    EXPECT_GE(p1, p2);
+    EXPECT_GE(p1, p0);
+  }
+}
+
+}  // namespace
+
+TYPED_TEST(CompressedPointerTest, EqualitySamePointerValue) {
+  auto d = make_pa_unique<Derived>(this->allocator_);
+  EqualityTest<TestFixture::template PointerType, Base, Base>(d.get());
+  EqualityTest<TestFixture::template PointerType, Base, Derived>(d.get());
+  EqualityTest<TestFixture::template PointerType, Derived, Base>(d.get());
+  EqualityTest<TestFixture::template PointerType, Derived, Derived>(d.get());
+}
+
+TYPED_TEST(CompressedPointerTest, EqualityDifferentPointerValues) {
+  auto d = make_pa_unique<DerivedWithMixin>(this->allocator_);
+  EqualityTest<TestFixture::template PointerType, Mixin, Mixin>(d.get());
+  EqualityTest<TestFixture::template PointerType, Mixin, DerivedWithMixin>(
+      d.get());
+  EqualityTest<TestFixture::template PointerType, DerivedWithMixin, Mixin>(
+      d.get());
+  EqualityTest<TestFixture::template PointerType, DerivedWithMixin,
+               DerivedWithMixin>(d.get());
+}
+
+TYPED_TEST(CompressedPointerTest, CompareSamePointerValue) {
+  auto d = make_pa_array_unique<Derived>(this->allocator_, 2);
+  CompareTest<TestFixture::template PointerType, Base, Base>(d.get());
+  CompareTest<TestFixture::template PointerType, Base, Derived>(d.get());
+  CompareTest<TestFixture::template PointerType, Derived, Base>(d.get());
+  CompareTest<TestFixture::template PointerType, Derived, Derived>(d.get());
+}
+
+TYPED_TEST(CompressedPointerTest, CompareDifferentPointerValues) {
+  auto d = make_pa_array_unique<DerivedWithMixin>(this->allocator_, 2);
+  CompareTest<TestFixture::template PointerType, Mixin, Mixin>(d.get());
+  CompareTest<TestFixture::template PointerType, Mixin, DerivedWithMixin>(
+      d.get());
+  CompareTest<TestFixture::template PointerType, DerivedWithMixin, Mixin>(
+      d.get());
+  CompareTest<TestFixture::template PointerType, DerivedWithMixin,
+              DerivedWithMixin>(d.get());
+}
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.cc b/base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.cc
new file mode 100644
index 0000000..4946b9b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.cc
@@ -0,0 +1,75 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+
+namespace partition_alloc {
+
+namespace {
+DanglingRawPtrDetectedFn* g_dangling_raw_ptr_detected_fn = [](uintptr_t) {};
+DanglingRawPtrReleasedFn* g_dangling_raw_ptr_released_fn = [](uintptr_t) {};
+DanglingRawPtrDetectedFn* g_unretained_dangling_raw_ptr_detected_fn =
+    [](uintptr_t) {};
+bool g_unretained_dangling_raw_ptr_check_enabled = false;
+}  // namespace
+
+DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn() {
+  PA_DCHECK(g_dangling_raw_ptr_detected_fn);
+  return g_dangling_raw_ptr_detected_fn;
+}
+
+DanglingRawPtrDetectedFn* GetDanglingRawPtrReleasedFn() {
+  PA_DCHECK(g_dangling_raw_ptr_released_fn);
+  return g_dangling_raw_ptr_released_fn;
+}
+
+void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn fn) {
+  PA_DCHECK(fn);
+  g_dangling_raw_ptr_detected_fn = fn;
+}
+
+void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn fn) {
+  PA_DCHECK(fn);
+  g_dangling_raw_ptr_released_fn = fn;
+}
+
+DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn() {
+  return g_unretained_dangling_raw_ptr_detected_fn;
+}
+
+void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn* fn) {
+  PA_DCHECK(fn);
+  g_unretained_dangling_raw_ptr_detected_fn = fn;
+}
+
+bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled) {
+  bool old = g_unretained_dangling_raw_ptr_check_enabled;
+  g_unretained_dangling_raw_ptr_check_enabled = enabled;
+  return old;
+}
+
+namespace internal {
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id) {
+  g_dangling_raw_ptr_detected_fn(id);
+}
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id) {
+  g_dangling_raw_ptr_released_fn(id);
+}
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void UnretainedDanglingRawPtrDetected(uintptr_t id) {
+  g_unretained_dangling_raw_ptr_detected_fn(id);
+}
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool IsUnretainedDanglingRawPtrCheckEnabled() {
+  return g_unretained_dangling_raw_ptr_check_enabled;
+}
+
+}  // namespace internal
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h b/base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h
new file mode 100644
index 0000000..0e11826
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h
@@ -0,0 +1,67 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+// When compiled with build flags `enable_dangling_raw_ptr_checks`, dangling
+// raw_ptr are reported. Its behavior can be configured here.
+//
+// Purpose of this level of indirection:
+// - Ease testing.
+// - Keep partition_alloc/ independent from base/. In most cases, when a
+//   dangling raw_ptr is detected/released, this involves recording a
+//   base::debug::StackTrace, which isn't desirable inside partition_alloc/.
+// - Be able (potentially) to turn this feature on/off at runtime based on
+//   dependant's flags.
+namespace partition_alloc {
+
+// DanglingRawPtrDetected is called when there exists a `raw_ptr` referencing a
+// memory region and the allocator is asked to release it.
+//
+// It won't be called again with the same `id`, up until (potentially) a call to
+// DanglingRawPtrReleased(`id`) is made.
+//
+// This function is called from within the allocator, and is not allowed to
+// allocate memory.
+using DanglingRawPtrDetectedFn = void(uintptr_t /*id*/);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn();
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn();
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn*);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled);
+
+// DanglingRawPtrReleased: Called after DanglingRawPtrDetected(id), once the
+// last dangling raw_ptr stops referencing the memory region.
+//
+// This function is allowed to allocate memory.
+using DanglingRawPtrReleasedFn = void(uintptr_t /*id*/);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+DanglingRawPtrReleasedFn* GetDanglingRawPtrReleasedFn();
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn);
+
+namespace internal {
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void UnretainedDanglingRawPtrDetected(uintptr_t id);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool IsUnretainedDanglingRawPtrCheckEnabled();
+
+}  // namespace internal
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/dot/address-space.dot b/base/allocator/partition_allocator/src/partition_alloc/dot/address-space.dot
new file mode 100644
index 0000000..2293e55
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/dot/address-space.dot
@@ -0,0 +1,33 @@
+digraph {
+  node[shape=box]
+  edge[dir=both]
+  compound = true
+  dpi = 192
+  nodesep = 0.91
+  // Allows aligning nodes in different subgraphs.
+  newrank = true
+
+  subgraph cluster_0 {
+    label = "Address Space"
+    reg[label="Regular Pool"]
+    brp[label="BRP Pool"]
+    add[label="Additional Pools"]
+    reg->brp->add[style=invis]
+  }
+
+  manager[label="AddressPoolManager"]
+  manager->reg[constraint=false]
+  manager->brp
+  manager->add[constraint=false]
+
+  subgraph cluster_1 {
+    label = "PartitionRoots"
+    pae[label="PA-E Root"]
+    blink[label="Blink Roots"]
+    etc[style=dotted, label="Other Roots"]
+    pae->blink->etc[style=invis]
+  }
+
+  manager->blink[lhead=cluster_1]
+  {rank=same manager brp blink}
+}
diff --git a/base/allocator/partition_allocator/src/partition_alloc/dot/address-space.png b/base/allocator/partition_allocator/src/partition_alloc/dot/address-space.png
new file mode 100644
index 0000000..aee032a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/dot/address-space.png
Binary files differ
diff --git a/base/allocator/partition_allocator/src/partition_alloc/dot/bucket.dot b/base/allocator/partition_allocator/src/partition_alloc/dot/bucket.dot
new file mode 100644
index 0000000..d40d38c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/dot/bucket.dot
@@ -0,0 +1,59 @@
+digraph {
+  node[shape=plaintext]
+  edge[style=dashed, color=crimson]
+
+  page1[label=<
+    <table border="0" cellborder="1" cellspacing="0"><tr>
+      <!-- head partition page -->
+      <td port="head" bgcolor="darkgrey" width="40" height="52"></td>
+      <!-- bucket-external memory - not depicted -->
+      <td width="160"></td>
+      <!-- a slot span in this bucket -->
+   <td port="slotspan" bgcolor="crimson" width="80"></td>
+      <!-- bucket-external memory - not depicted -->
+      <td width="320"></td>
+      <!-- tail partition page -->
+      <td bgcolor="darkgrey" width="40"></td>
+    </tr></table>
+  >]
+  page2[label=<
+    <table border="0" cellborder="1" cellspacing="0"><tr>
+      <!-- head partition page -->
+      <td port="head" bgcolor="darkgrey" width="40" height="52"></td>
+      <!-- bucket-external memory - not depicted -->
+      <td width="280"></td>
+      <!-- a slot span in this bucket -->
+      <td port="slotspan" bgcolor="crimson" width="80"></td>
+      <!-- bucket-external memory - not depicted -->
+      <td width="200"></td>
+      <!-- tail partition page -->
+      <td bgcolor="darkgrey" width="40"></td>
+    </tr></table>
+  >]
+  page3[label=<
+    <table border="0" cellborder="1" cellspacing="0"><tr>
+      <!-- head partition page -->
+      <td port="head" bgcolor="darkgrey" width="40" height="52"></td>
+      <!-- bucket-external memory - not depicted -->
+      <td width="40"></td>
+      <!-- a slot span in this bucket -->
+      <td port="slotspan1" bgcolor="crimson" width="80"></td>
+      <!-- bucket-external memory - not depicted -->
+      <td width="120"></td>
+      <!-- a slot span in this bucket -->
+      <td port="slotspan2" bgcolor="crimson" width="80"></td>
+      <!-- bucket-external memory - not depicted -->
+      <td width="240"></td>
+      <!-- tail partition page -->
+      <td bgcolor="darkgrey" width="40"></td>
+    </tr></table>
+  >]
+
+  // Invisibly link the head partition pages to force alignment.
+  page1:head->page2:head->page3:head[style=invis]
+
+  // Inter-super-page links disable constraints so to let the above
+  // fully control alignment.
+  page1:slotspan->page2:slotspan->page3:slotspan1[constraint=false]
+  page3:slotspan1:s->page3:slotspan2:sw
+}
diff --git a/base/allocator/partition_allocator/src/partition_alloc/dot/bucket.png b/base/allocator/partition_allocator/src/partition_alloc/dot/bucket.png
new file mode 100644
index 0000000..4ed7b75
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/dot/bucket.png
Binary files differ
diff --git a/base/allocator/partition_allocator/src/partition_alloc/dot/layers.dot b/base/allocator/partition_allocator/src/partition_alloc/dot/layers.dot
new file mode 100644
index 0000000..bf6eea5
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/dot/layers.dot
@@ -0,0 +1,22 @@
+digraph G {
+  node[shape=box,style="filled,rounded",color=deepskyblue]
+
+  subgraph cluster_tc {
+    label = "Thread Cache"
+    rankdir = LR
+    {rank=same;TLS1,TLS2,TLSn}
+    TLS1->TLS2[style=invisible,dir=none]
+    TLS2->TLSn[style=dotted,dir=none]
+  }
+
+  subgraph cluster_central {
+    label = "Central Allocator (per-partition lock)"
+    fast[label="slot span freelists (fast path)"]
+    slow[label="slot span management (slow path)"]
+    # Forces slow path node beneath fast path node.
+    fast->slow[style=invisible,dir=none]
+  }
+
+  # Forces thread-external subgraph beneath thread cache subgraph.
+  TLS2->fast[style=invisible,dir=none]
+}
diff --git a/base/allocator/partition_allocator/src/partition_alloc/dot/layers.png b/base/allocator/partition_allocator/src/partition_alloc/dot/layers.png
new file mode 100644
index 0000000..c2794f1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/dot/layers.png
Binary files differ
diff --git a/base/allocator/partition_allocator/src/partition_alloc/dot/super-page.dot b/base/allocator/partition_allocator/src/partition_alloc/dot/super-page.dot
new file mode 100644
index 0000000..7f82c9c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/dot/super-page.dot
@@ -0,0 +1,94 @@
+digraph G {
+  node[shape=plaintext]
+  edge[style=dashed]
+
+  invisible_a[label=<
+    <TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
+      <TR>
+        <TD PORT="red" WIDTH="100"></TD>
+        <TD PORT="green" WIDTH="20"></TD>
+        <TD PORT="blue" WIDTH="40"></TD>
+        <TD PORT="gold" WIDTH="300"></TD>
+        <TD PORT="pink" WIDTH="60"></TD>
+      </TR>
+    </TABLE>
+  >]
+  superpage[xlabel="Super Page",label=<
+    <TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" WIDTH="10">
+      <TR>
+        <!-- Head Partition Page -->
+        <TD BGCOLOR="darkgrey" HEIGHT="52"></TD>
+        <TD PORT="metadata"></TD>
+        <TD BGCOLOR="darkgrey" WIDTH="18"></TD>
+        <!-- Bitmaps -->
+        <TD WIDTH="100">Bitmaps(?)</TD>
+        <!-- Several Slot Spans -->
+        <TD PORT="red" BGCOLOR="crimson" WIDTH="119">3</TD>
+        <TD PORT="green" BGCOLOR="palegreen" WIDTH="39">1</TD>
+        <TD PORT="blue" BGCOLOR="cornflowerblue" WIDTH="79">2</TD>
+        <TD PORT="gold" BGCOLOR="gold" WIDTH="239">6</TD>
+        <TD PORT="red2" BGCOLOR="crimson" WIDTH="119">3</TD>
+        <TD PORT="pink" BGCOLOR="deeppink" WIDTH="39">1</TD>
+        <TD WIDTH="79">...</TD>
+        <!-- Tail Partition Page -->
+        <TD BGCOLOR="darkgrey" WIDTH="39"></TD>
+      </TR>
+    </TABLE>
+  >]
+  invisible_b[label=<
+    <TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
+      <TR>
+        <TD PORT="green" WIDTH="30"></TD>
+        <TD PORT="blue" WIDTH="60"></TD>
+        <TD PORT="gold" WIDTH="180"></TD>
+        <TD PORT="red" WIDTH="90"></TD>
+        <TD PORT="pink" WIDTH="90"></TD>
+      </TR>
+    </TABLE>
+  >]
+  metadata_page[xlabel="Metadata",label=<
+    <TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
+      <TR>
+        <!-- Guard Page Metadata -->
+        <TD BGCOLOR="darkgrey"> </TD>
+        <!-- Bitmaps Offset -->
+        <TD> B? </TD>
+        <!-- Red Slot Span Metadata -->
+        <TD BGCOLOR="crimson">v</TD>
+        <TD BGCOLOR="crimson">+</TD>
+        <TD BGCOLOR="crimson">+</TD>
+        <!-- Green Slot Span Metadata -->
+        <TD BGCOLOR="palegreen">v</TD>
+        <!-- Blue Slot Span Metadata -->
+        <TD BGCOLOR="cornflowerblue">v</TD>
+        <TD BGCOLOR="cornflowerblue">+</TD>
+        <!-- Gold Slot Span Metadata -->
+        <TD BGCOLOR="gold">v</TD>
+        <TD BGCOLOR="gold">+</TD>
+        <TD BGCOLOR="gold">+</TD>
+        <TD BGCOLOR="gold">+</TD>
+        <TD BGCOLOR="gold">+</TD>
+        <TD BGCOLOR="gold">+</TD>
+        <!-- Red Slot Span Metadata -->
+        <TD BGCOLOR="crimson">v</TD>
+        <TD BGCOLOR="crimson">+</TD>
+        <TD BGCOLOR="crimson">+</TD>
+        <!-- Pink Slot Span Metadata -->
+        <TD BGCOLOR="deeppink">v</TD>
+        <!-- etc. -->
+        <TD WIDTH="64">...</TD>
+        <!-- Guard Page Metadata -->
+        <TD BGCOLOR="darkgrey"> </TD>
+      </TR>
+    </TABLE>
+  >]
+
+  invisible_a:red->superpage:red->superpage:red2[color=crimson]
+  superpage:red2->invisible_b:red[color=crimson]
+  invisible_a:green->superpage:green->invisible_b:green[color=palegreen]
+  invisible_a:blue->superpage:blue->invisible_b:blue[color=cornflowerblue]
+  invisible_a:gold->superpage:gold->invisible_b:gold[color=gold]
+  invisible_a:pink->superpage:pink->invisible_b:pink[color=deeppink]
+
+  superpage:metadata->metadata_page[style="",arrowhead=odot]
+}
diff --git a/base/allocator/partition_allocator/src/partition_alloc/dot/super-page.png b/base/allocator/partition_allocator/src/partition_alloc/dot/super-page.png
new file mode 100644
index 0000000..0d1a696
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/dot/super-page.png
Binary files differ
diff --git a/base/allocator/partition_allocator/src/partition_alloc/encoded_next_freelist.h b/base/allocator/partition_allocator/src/partition_alloc/encoded_next_freelist.h
new file mode 100644
index 0000000..89f5845
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/encoded_next_freelist.h
@@ -0,0 +1,320 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_ref_count.h"
+#include "build/build_config.h"
+
+#if !defined(ARCH_CPU_BIG_ENDIAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/reverse_bytes.h"
+#endif  // !defined(ARCH_CPU_BIG_ENDIAN)
+
+namespace partition_alloc::internal {
+
+class EncodedNextFreelistEntry;
+
+class EncodedFreelistPtr {
+ private:
+  PA_ALWAYS_INLINE constexpr explicit EncodedFreelistPtr(std::nullptr_t)
+      : encoded_(Transform(0)) {}
+  PA_ALWAYS_INLINE explicit EncodedFreelistPtr(void* ptr)
+      // The encoded pointer stays MTE-tagged.
+      : encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}
+
+  PA_ALWAYS_INLINE EncodedNextFreelistEntry* Decode() const {
+    return reinterpret_cast<EncodedNextFreelistEntry*>(Transform(encoded_));
+  }
+
+  PA_ALWAYS_INLINE constexpr uintptr_t Inverted() const { return ~encoded_; }
+
+  PA_ALWAYS_INLINE constexpr void Override(uintptr_t encoded) {
+    encoded_ = encoded;
+  }
+
+  PA_ALWAYS_INLINE constexpr explicit operator bool() const { return encoded_; }
+
+  // Transform() works the same in both directions, so can be used for
+  // encoding and decoding.
+  PA_ALWAYS_INLINE static constexpr uintptr_t Transform(uintptr_t address) {
+    // We use bswap on little endian as a fast transformation for two reasons:
+    // 1) On 64 bit architectures, the pointer is very unlikely to be a
+    //    canonical address. Therefore, if an object is freed and its vtable is
+    //    used where the attacker doesn't get the chance to run allocations
+    //    between the free and use, the vtable dereference is likely to fault.
+    // 2) If the attacker has a linear buffer overflow and elects to try and
+    //    corrupt a freelist pointer, partial pointer overwrite attacks are
+    //    thwarted.
+    // For big endian, similar guarantees are arrived at with a negation.
+#if defined(ARCH_CPU_BIG_ENDIAN)
+    uintptr_t transformed = ~address;
+#else
+    uintptr_t transformed = ReverseBytes(address);
+#endif
+    return transformed;
+  }
+
+  uintptr_t encoded_;
+
+  friend EncodedNextFreelistEntry;
+};
+
+// Freelist entries are encoded for security reasons. See
+// //base/allocator/partition_allocator/PartitionAlloc.md
+// and |Transform()| for the rationale and mechanism, respectively.
+class EncodedNextFreelistEntry {
+ private:
+  constexpr explicit EncodedNextFreelistEntry(std::nullptr_t)
+      : encoded_next_(EncodedFreelistPtr(nullptr))
+#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+        ,
+        shadow_(encoded_next_.Inverted())
+#endif
+  {
+  }
+  explicit EncodedNextFreelistEntry(EncodedNextFreelistEntry* next)
+      : encoded_next_(EncodedFreelistPtr(next))
+#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+        ,
+        shadow_(encoded_next_.Inverted())
+#endif
+  {
+  }
+  // For testing only.
+  EncodedNextFreelistEntry(void* next, bool make_shadow_match)
+      : encoded_next_(EncodedFreelistPtr(next))
+#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+        ,
+        shadow_(make_shadow_match ? encoded_next_.Inverted() : 12345)
+#endif
+  {
+  }
+
+ public:
+  ~EncodedNextFreelistEntry() = delete;
+
+  // Emplaces the freelist entry at the beginning of the given slot span, and
+  // initializes it as null-terminated.
+  PA_ALWAYS_INLINE static EncodedNextFreelistEntry* EmplaceAndInitNull(
+      void* slot_start_tagged) {
+    // |slot_start_tagged| is MTE-tagged.
+    auto* entry = new (slot_start_tagged) EncodedNextFreelistEntry(nullptr);
+    return entry;
+  }
+  PA_ALWAYS_INLINE static EncodedNextFreelistEntry* EmplaceAndInitNull(
+      uintptr_t slot_start) {
+    return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
+  }
+
+  // Emplaces the freelist entry at the beginning of the given slot span, and
+  // initializes it with the given |next| pointer, but encoded.
+  //
+  // This freelist is built for the purpose of thread-cache. This means that we
+  // can't perform a check that this and the next pointer belong to the same
+  // super page, as thread-cache spans may chain slots across super pages.
+  PA_ALWAYS_INLINE static EncodedNextFreelistEntry*
+  EmplaceAndInitForThreadCache(uintptr_t slot_start,
+                               EncodedNextFreelistEntry* next) {
+    auto* entry =
+        new (SlotStartAddr2Ptr(slot_start)) EncodedNextFreelistEntry(next);
+    return entry;
+  }
+
+  // Emplaces the freelist entry at the beginning of the given slot span, and
+  // initializes it with the given |next| pointer.
+  //
+  // This is for testing purposes only! |make_shadow_match| allows you to choose
+  // if the shadow matches the next pointer properly or is trash.
+  PA_ALWAYS_INLINE static void EmplaceAndInitForTest(uintptr_t slot_start,
+                                                     void* next,
+                                                     bool make_shadow_match) {
+    new (SlotStartAddr2Ptr(slot_start))
+        EncodedNextFreelistEntry(next, make_shadow_match);
+  }
+
+  void CorruptNextForTesting(uintptr_t v) {
+    // We just need a value that can never be a valid pointer here.
+    encoded_next_.Override(EncodedFreelistPtr::Transform(v));
+  }
+
+  // Puts `slot_size` on the stack before crashing in case of memory
+  // corruption. Meant to be used to report the failed allocation size.
+  template <bool crash_on_corruption>
+  PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextForThreadCache(
+      size_t slot_size) const;
+  PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNext(size_t slot_size) const;
+
+  PA_NOINLINE void CheckFreeList(size_t slot_size) const {
+    for (auto* entry = this; entry; entry = entry->GetNext(slot_size)) {
+      // |GetNext()| checks freelist integrity.
+    }
+  }
+
+  PA_NOINLINE void CheckFreeListForThreadCache(size_t slot_size) const {
+    for (auto* entry = this; entry;
+         entry = entry->GetNextForThreadCache<true>(slot_size)) {
+      // |GetNextForThreadCache()| checks freelist integrity.
+    }
+  }
+
+  PA_ALWAYS_INLINE void SetNext(EncodedNextFreelistEntry* entry) {
+    // SetNext() is either called on the freelist head, when provisioning new
+    // slots, or when GetNext() has been called before, no need to pass the
+    // size.
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    // Regular freelists always point to an entry within the same super page.
+    //
+    // This is most likely a PartitionAlloc bug if this triggers.
+    if (PA_UNLIKELY(entry &&
+                    (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
+                        (SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
+      FreelistCorruptionDetected(0);
+    }
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+    encoded_next_ = EncodedFreelistPtr(entry);
+#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+    shadow_ = encoded_next_.Inverted();
+#endif
+  }
+
+  // Zeroes out |this| before returning the slot. The pointer to this memory
+  // will be returned to the user (caller of Alloc()), thus can't have internal
+  // data.
+  PA_ALWAYS_INLINE uintptr_t ClearForAllocation() {
+    encoded_next_.Override(0);
+#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+    shadow_ = 0;
+#endif
+    return SlotStartPtr2Addr(this);
+  }
+
+  PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero() const {
+    return !encoded_next_;
+  }
+
+ private:
+  template <bool crash_on_corruption>
+  PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextInternal(
+      size_t slot_size,
+      bool for_thread_cache) const;
+
+  PA_ALWAYS_INLINE static bool IsSane(const EncodedNextFreelistEntry* here,
+                                      const EncodedNextFreelistEntry* next,
+                                      bool for_thread_cache) {
+    // Don't allow the freelist to be blindly followed to any location.
+    // Checks two constraints:
+    // - here and next must belong to the same superpage, unless this is in the
+    //   thread cache (they even always belong to the same slot span).
+    // - next cannot point inside the metadata area.
+    //
+    // Also, the lightweight UaF detection (pointer shadow) is checked.
+
+    uintptr_t here_address = SlotStartPtr2Addr(here);
+    uintptr_t next_address = SlotStartPtr2Addr(next);
+
+#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+    bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_;
+#else
+    bool shadow_ptr_ok = true;
+#endif
+
+    bool same_superpage = (here_address & kSuperPageBaseMask) ==
+                          (next_address & kSuperPageBaseMask);
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+    bool marked_as_free_in_bitmap =
+        for_thread_cache ? true : !FreeSlotBitmapSlotIsUsed(next_address);
+#else
+    bool marked_as_free_in_bitmap = true;
+#endif
+
+    // This is necessary but not sufficient when quarantine is enabled, see
+    // SuperPagePayloadBegin() in partition_page.h. However we don't want to
+    // fetch anything from the root in this function.
+    bool not_in_metadata =
+        (next_address & kSuperPageOffsetMask) >= PartitionPageSize();
+
+    if (for_thread_cache) {
+      return shadow_ptr_ok & not_in_metadata;
+    } else {
+      return shadow_ptr_ok & same_superpage & marked_as_free_in_bitmap &
+             not_in_metadata;
+    }
+  }
+
+  EncodedFreelistPtr encoded_next_;
+  // This is intended to detect unintentional corruptions of the freelist.
+  // These can happen due to a Use-after-Free, or overflow of the previous
+  // allocation in the slot span.
+#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+  uintptr_t shadow_;
+#endif
+};
+
+template <bool crash_on_corruption>
+PA_ALWAYS_INLINE EncodedNextFreelistEntry*
+EncodedNextFreelistEntry::GetNextInternal(size_t slot_size,
+                                          bool for_thread_cache) const {
+  // GetNext() can be called on discarded memory, in which case |encoded_next_|
+  // is 0, and none of the checks apply. Don't prefetch nullptr either.
+  if (IsEncodedNextPtrZero()) {
+    return nullptr;
+  }
+
+  auto* ret = encoded_next_.Decode();
+  // We rely on constant propagation to remove the branches coming from
+  // |for_thread_cache|, since the argument is always a compile-time constant.
+  if (PA_UNLIKELY(!IsSane(this, ret, for_thread_cache))) {
+    if constexpr (crash_on_corruption) {
+      // Put the corrupted data on the stack, it may give us more information
+      // about what kind of corruption that was.
+      PA_DEBUG_DATA_ON_STACK("first",
+                             static_cast<size_t>(encoded_next_.encoded_));
+#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+      PA_DEBUG_DATA_ON_STACK("second", static_cast<size_t>(shadow_));
+#endif
+      FreelistCorruptionDetected(slot_size);
+    } else {
+      return nullptr;
+    }
+  }
+
+  // In real-world profiles, the load of |encoded_next_| above is responsible
+  // for a large fraction of the allocation cost. However, we cannot anticipate
+  // it enough since it is accessed right after we know its address.
+  //
+  // In the case of repeated allocations, we can prefetch the access that will
+  // be done at the *next* allocation, which will touch *ret, prefetch it.
+  PA_PREFETCH(ret);
+
+  return ret;
+}
+
+template <bool crash_on_corruption>
+PA_ALWAYS_INLINE EncodedNextFreelistEntry*
+EncodedNextFreelistEntry::GetNextForThreadCache(size_t slot_size) const {
+  return GetNextInternal<crash_on_corruption>(slot_size, true);
+}
+
+PA_ALWAYS_INLINE EncodedNextFreelistEntry* EncodedNextFreelistEntry::GetNext(
+    size_t slot_size) const {
+  return GetNextInternal<true>(slot_size, false);
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/extended_api.cc b/base/allocator/partition_allocator/src/partition_alloc/extended_api.cc
new file mode 100644
index 0000000..5ca0c0f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/extended_api.cc
@@ -0,0 +1,135 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/extended_api.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
+
+namespace partition_alloc::internal {
+
+#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
+
+namespace {
+
+void DisableThreadCacheForRootIfEnabled(PartitionRoot* root) {
+  // Some platforms don't have a thread cache, or it could already have been
+  // disabled.
+  if (!root || !root->settings.with_thread_cache) {
+    return;
+  }
+
+  ThreadCacheRegistry::Instance().PurgeAll();
+  root->settings.with_thread_cache = false;
+  // Doesn't destroy the thread cache object(s). For background threads, they
+  // will be collected (and free cached memory) at thread destruction
+  // time. For the main thread, we leak it.
+}
+
+void EnablePartitionAllocThreadCacheForRootIfDisabled(PartitionRoot* root) {
+  if (!root) {
+    return;
+  }
+  root->settings.with_thread_cache = true;
+}
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+void DisablePartitionAllocThreadCacheForProcess() {
+  PA_CHECK(allocator_shim::internal::PartitionAllocMalloc::
+               AllocatorConfigurationFinalized());
+  auto* regular_allocator =
+      allocator_shim::internal::PartitionAllocMalloc::Allocator();
+  auto* aligned_allocator =
+      allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator();
+  DisableThreadCacheForRootIfEnabled(regular_allocator);
+  if (aligned_allocator != regular_allocator) {
+    DisableThreadCacheForRootIfEnabled(aligned_allocator);
+  }
+  DisableThreadCacheForRootIfEnabled(
+      allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator());
+}
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+}  // namespace
+
+#endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
+
+ThreadAllocStats GetAllocStatsForCurrentThread() {
+  ThreadCache* thread_cache = ThreadCache::Get();
+  if (ThreadCache::IsValid(thread_cache)) {
+    return thread_cache->thread_alloc_stats();
+  }
+  return {};
+}
+
+#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
+ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
+    PartitionRoot* root)
+    : root_(root) {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  auto* regular_allocator =
+      allocator_shim::internal::PartitionAllocMalloc::Allocator();
+  regular_was_enabled_ =
+      regular_allocator && regular_allocator->settings.with_thread_cache;
+
+  if (root_ != regular_allocator) {
+    // Another |root| is ThreadCache's PartitionRoot. Need to disable
+    // thread cache for the process.
+    DisablePartitionAllocThreadCacheForProcess();
+    EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
+    // Replace ThreadCache's PartitionRoot.
+    ThreadCache::SwapForTesting(root_);
+  } else {
+    if (!regular_was_enabled_) {
+      EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
+      ThreadCache::SwapForTesting(root_);
+    }
+  }
+#else
+  PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
+  EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
+  ThreadCache::SwapForTesting(root_);
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+  PA_CHECK(ThreadCache::Get());
+}
+
+ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  auto* regular_allocator =
+      allocator_shim::internal::PartitionAllocMalloc::Allocator();
+  bool regular_enabled =
+      regular_allocator && regular_allocator->settings.with_thread_cache;
+
+  if (regular_was_enabled_) {
+    if (!regular_enabled) {
+      // Need to re-enable ThreadCache for the process.
+      EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
+      // In the case, |regular_allocator| must be ThreadCache's root.
+      ThreadCache::SwapForTesting(regular_allocator);
+    } else {
+      // ThreadCache is enabled for the process, but we need to be
+      // careful about ThreadCache's PartitionRoot. If it is different from
+      // |regular_allocator|, we need to invoke SwapForTesting().
+      if (regular_allocator != root_) {
+        ThreadCache::SwapForTesting(regular_allocator);
+      }
+    }
+  } else {
+    // ThreadCache for all processes was disabled.
+    DisableThreadCacheForRootIfEnabled(regular_allocator);
+    ThreadCache::SwapForTesting(nullptr);
+  }
+#else
+  // First, disable the test thread cache we have.
+  DisableThreadCacheForRootIfEnabled(root_);
+
+  ThreadCache::SwapForTesting(nullptr);
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+}
+#endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/extended_api.h b/base/allocator/partition_allocator/src/partition_alloc/extended_api.h
new file mode 100644
index 0000000..7b154b6
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/extended_api.h
@@ -0,0 +1,42 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_EXTENDED_API_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_EXTENDED_API_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_stats.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
+
+namespace partition_alloc::internal {
+// Get allocation stats for the thread cache partition on the current
+// thread. See the documentation of ThreadAllocStats for details.
+ThreadAllocStats GetAllocStatsForCurrentThread();
+
+// Creates a scope for testing which:
+// - if the given |root| is a default malloc root for the entire process,
+//   enables the thread cache for the entire process.
+//   (This may happen if UsePartitionAllocAsMalloc is enabled.)
+// - otherwise, disables the thread cache for the entire process, and
+//   replaces it with a thread cache for |root|.
+// This class is unsafe to run if there are multiple threads running
+// in the process.
+class ThreadCacheProcessScopeForTesting {
+ public:
+  explicit ThreadCacheProcessScopeForTesting(PartitionRoot* root);
+  ~ThreadCacheProcessScopeForTesting();
+
+  ThreadCacheProcessScopeForTesting() = delete;
+
+ private:
+  PartitionRoot* root_ = nullptr;
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  bool regular_was_enabled_ = false;
+#endif
+};
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_EXTENDED_API_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/flags.h b/base/allocator/partition_allocator/src/partition_alloc/flags.h
new file mode 100644
index 0000000..a1174d3
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/flags.h
@@ -0,0 +1,101 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header provides a type-safe way of storing OR-combinations of enum
+// values.
+//
+// The traditional C++ approach for storing OR-combinations of enum values is to
+// use an int or unsigned int variable. The inconvenience with this approach is
+// that there's no type checking at all; any enum value can be OR'd with any
+// other enum value and passed on to a function that takes an int or unsigned
+// int.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FLAGS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FLAGS_H_
+
+#include <type_traits>
+
+namespace partition_alloc::internal {
+// Returns `T` if and only if `EnumType` is a scoped enum.
+template <typename EnumType, typename T = EnumType>
+using IfEnum = std::enable_if_t<
+    std::is_enum_v<EnumType> &&
+        !std::is_convertible_v<EnumType, std::underlying_type_t<EnumType>>,
+    T>;
+
+// We assume `EnumType` defines `kMaxValue` which has the largest value and all
+// powers of two are represented in `EnumType`.
+template <typename EnumType>
+constexpr inline EnumType kAllFlags = static_cast<IfEnum<EnumType>>(
+    (static_cast<std::underlying_type_t<EnumType>>(EnumType::kMaxValue) << 1) -
+    1);
+
+template <typename EnumType>
+constexpr inline IfEnum<EnumType, bool> AreValidFlags(EnumType flags) {
+  const auto raw_flags = static_cast<std::underlying_type_t<EnumType>>(flags);
+  const auto raw_all_flags =
+      static_cast<std::underlying_type_t<EnumType>>(kAllFlags<EnumType>);
+  return (raw_flags & ~raw_all_flags) == 0;
+}
+
+// Checks `subset` is a subset of `superset` or not.
+template <typename EnumType>
+constexpr inline IfEnum<EnumType, bool> ContainsFlags(EnumType superset,
+                                                      EnumType subset) {
+  return (superset & subset) == subset;
+}
+
+// Removes flags `target` from `from`.
+template <typename EnumType>
+constexpr inline IfEnum<EnumType> RemoveFlags(EnumType from, EnumType target) {
+  return from & ~target;
+}
+
+// A macro to define binary arithmetic over `EnumType`.
+// Use inside `namespace partition_alloc::internal`.
+#define PA_DEFINE_OPERATORS_FOR_FLAGS(EnumType)                              \
+  [[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator&(        \
+      const EnumType& lhs, const EnumType& rhs) {                            \
+    return static_cast<EnumType>(                                            \
+        static_cast<std::underlying_type_t<EnumType>>(lhs) &                 \
+        static_cast<std::underlying_type_t<EnumType>>(rhs));                 \
+  }                                                                          \
+  [[maybe_unused]] inline constexpr EnumType& operator&=(                    \
+      EnumType& lhs, const EnumType& rhs) {                                  \
+    lhs = lhs & rhs;                                                         \
+    return lhs;                                                              \
+  }                                                                          \
+  [[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator|(        \
+      const EnumType& lhs, const EnumType& rhs) {                            \
+    return static_cast<EnumType>(                                            \
+        static_cast<std::underlying_type_t<EnumType>>(lhs) |                 \
+        static_cast<std::underlying_type_t<EnumType>>(rhs));                 \
+  }                                                                          \
+  [[maybe_unused]] inline constexpr EnumType& operator|=(                    \
+      EnumType& lhs, const EnumType& rhs) {                                  \
+    lhs = lhs | rhs;                                                         \
+    return lhs;                                                              \
+  }                                                                          \
+  [[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator^(        \
+      const EnumType& lhs, const EnumType& rhs) {                            \
+    return static_cast<EnumType>(                                            \
+        static_cast<std::underlying_type_t<EnumType>>(lhs) ^                 \
+        static_cast<std::underlying_type_t<EnumType>>(rhs));                 \
+  }                                                                          \
+  [[maybe_unused]] inline constexpr EnumType& operator^=(                    \
+      EnumType& lhs, const EnumType& rhs) {                                  \
+    lhs = lhs ^ rhs;                                                         \
+    return lhs;                                                              \
+  }                                                                          \
+  [[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator~(        \
+      const EnumType& val) {                                                 \
+    return static_cast<EnumType>(                                            \
+        static_cast<std::underlying_type_t<EnumType>>(kAllFlags<EnumType>) & \
+        ~static_cast<std::underlying_type_t<EnumType>>(val));                \
+  }                                                                          \
+  static_assert(true) /* semicolon here */
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FLAGS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap.h b/base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap.h
new file mode 100644
index 0000000..c11b78d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap.h
@@ -0,0 +1,140 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_H_
+
+#include <climits>
+#include <cstdint>
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+
+namespace partition_alloc::internal {
+
+PA_ALWAYS_INLINE uintptr_t GetFreeSlotBitmapAddressForPointer(uintptr_t ptr) {
+  uintptr_t super_page = ptr & kSuperPageBaseMask;
+  return SuperPageFreeSlotBitmapAddr(super_page);
+}
+
+// Calculates the cell address and the offset inside the cell corresponding to
+// the |slot_start|.
+PA_ALWAYS_INLINE std::pair<FreeSlotBitmapCellType*, size_t>
+GetFreeSlotBitmapCellPtrAndBitIndex(uintptr_t slot_start) {
+  uintptr_t slot_superpage_offset = slot_start & kSuperPageOffsetMask;
+  uintptr_t superpage_bitmap_start =
+      GetFreeSlotBitmapAddressForPointer(slot_start);
+  uintptr_t cell_addr = base::bits::AlignDown(
+      superpage_bitmap_start +
+          (slot_superpage_offset / kSmallestBucket) / CHAR_BIT,
+      sizeof(FreeSlotBitmapCellType));
+  PA_DCHECK(cell_addr < superpage_bitmap_start + kFreeSlotBitmapSize);
+  size_t bit_index =
+      (slot_superpage_offset / kSmallestBucket) & kFreeSlotBitmapOffsetMask;
+  PA_DCHECK(bit_index < kFreeSlotBitmapBitsPerCell);
+  return {reinterpret_cast<FreeSlotBitmapCellType*>(cell_addr), bit_index};
+}
+
+// This bitmap marks the used slot as 0 and free one as 1. This is because we
+// would like to set all the slots as "used" by default to prevent allocating a
+// used slot when the freelist entry is overwritten. The state of the bitmap is
+// expected to be synced with freelist (i.e. the bitmap is set to 1 if and only
+// if the slot is in the freelist).
+
+PA_ALWAYS_INLINE FreeSlotBitmapCellType CellWithAOne(size_t n) {
+  return static_cast<FreeSlotBitmapCellType>(1) << n;
+}
+
+PA_ALWAYS_INLINE FreeSlotBitmapCellType CellWithTrailingOnes(size_t n) {
+  return (static_cast<FreeSlotBitmapCellType>(1) << n) -
+         static_cast<FreeSlotBitmapCellType>(1);
+}
+
+// Returns true if the bit corresponding to |slot_start| is used( = 0)
+PA_ALWAYS_INLINE bool FreeSlotBitmapSlotIsUsed(uintptr_t slot_start) {
+  auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
+  return (*cell & CellWithAOne(bit_index)) == 0;
+}
+
+// Mark the bit corresponding to |slot_start| as used( = 0).
+PA_ALWAYS_INLINE void FreeSlotBitmapMarkSlotAsUsed(uintptr_t slot_start) {
+  PA_CHECK(!FreeSlotBitmapSlotIsUsed(slot_start));
+  auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
+  *cell &= ~CellWithAOne(bit_index);
+}
+
+// Mark the bit corresponding to |slot_start| as free( = 1).
+PA_ALWAYS_INLINE void FreeSlotBitmapMarkSlotAsFree(uintptr_t slot_start) {
+  PA_CHECK(FreeSlotBitmapSlotIsUsed(slot_start));
+  auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
+  *cell |= CellWithAOne(bit_index);
+}
+
+// Resets (= set to 0) all the bits corresponding to the slot-start addresses
+// within [begin_addr, end_addr). |begin_addr| has to be the beginning of a
+// slot, but |end_addr| does not.
+PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
+                                          uintptr_t end_addr,
+                                          uintptr_t slot_size) {
+  PA_DCHECK(begin_addr <= end_addr);
+  // |end_addr| has to be kSmallestBucket-aligned.
+  PA_DCHECK((end_addr & (kSmallestBucket - 1)) == 0u);
+  for (uintptr_t slot_start = begin_addr; slot_start < end_addr;
+       slot_start += slot_size) {
+    auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
+    *cell &= ~CellWithAOne(bit_index);
+  }
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // Checks if the cells that are meant to contain only unset bits are really 0.
+  auto [begin_cell, begin_bit_index] =
+      GetFreeSlotBitmapCellPtrAndBitIndex(begin_addr);
+  auto [end_cell, end_bit_index] =
+      GetFreeSlotBitmapCellPtrAndBitIndex(end_addr);
+
+  // The bits that should be marked to 0 are |begin_bit_index|th bit of
+  // |begin_cell| to |end_bit_index - 1|th bit of |end_cell|. We verify all the
+  // bits are set to 0 for the cells between [begin_cell + 1, end_cell). For the
+  // |begin_cell| and |end_cell|, we have to handle them separately to only
+  // check the partial bits.
+  // | begin_cell |     |...|     | end_cell |
+  // |11...100...0|0...0|...|0...0|0...01...1|
+  //        ^                           ^
+  //        |                           |
+  //    begin_addr                   end_addr
+
+  if (begin_cell == end_cell) {
+    PA_DCHECK((*begin_cell & (~CellWithTrailingOnes(begin_bit_index) &
+                              CellWithTrailingOnes(end_bit_index))) == 0u);
+  }
+
+  if (begin_bit_index != 0) {
+    // Checks the bits between [begin_bit_index, kFreeSlotBitmapBitsPerCell) in
+    // the begin_cell are 0
+    PA_DCHECK((*begin_cell & ~CellWithTrailingOnes(begin_bit_index)) == 0u);
+    ++begin_cell;
+  }
+
+  if (end_bit_index != 0) {
+    // Checks the bits between [0, end_bit_index) in the end_cell are 0
+    PA_DCHECK((*end_cell & CellWithTrailingOnes(end_bit_index)) == 0u);
+  }
+
+  for (FreeSlotBitmapCellType* cell = begin_cell; cell != end_cell; ++cell) {
+    PA_DCHECK(*cell == 0u);
+  }
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BUILDFLAG(USE_FREESLOT_BITMAP)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_constants.h b/base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_constants.h
new file mode 100644
index 0000000..00e0a5d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_constants.h
@@ -0,0 +1,61 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+
+namespace partition_alloc::internal {
+
+using FreeSlotBitmapCellType = uint64_t;
+constexpr size_t kFreeSlotBitmapBitsPerCell =
+    sizeof(FreeSlotBitmapCellType) * CHAR_BIT;
+constexpr size_t kFreeSlotBitmapOffsetMask = kFreeSlotBitmapBitsPerCell - 1;
+
+// The number of bits necessary for the bitmap is equal to the maximum number of
+// slots in a super page.
+constexpr size_t kFreeSlotBitmapSize =
+    (kSuperPageSize / kSmallestBucket) / CHAR_BIT;
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+ReservedFreeSlotBitmapSize() {
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+  return base::bits::AlignUp(kFreeSlotBitmapSize, PartitionPageSize());
+#else
+  return 0;
+#endif
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+CommittedFreeSlotBitmapSize() {
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+  return base::bits::AlignUp(kFreeSlotBitmapSize, SystemPageSize());
+#else
+  return 0;
+#endif
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+NumPartitionPagesPerFreeSlotBitmap() {
+  return ReservedFreeSlotBitmapSize() / PartitionPageSize();
+}
+
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+PA_ALWAYS_INLINE uintptr_t SuperPageFreeSlotBitmapAddr(uintptr_t super_page) {
+  PA_DCHECK(!(super_page % kSuperPageAlignment));
+  return super_page + PartitionPageSize();
+}
+#endif
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_unittest.cc
new file mode 100644
index 0000000..31920e1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_unittest.cc
@@ -0,0 +1,158 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap.h"
+
+#include <cstdint>
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// This test is disabled when MEMORY_TOOL_REPLACES_ALLOCATOR is defined because
+// we cannot locate the freeslot bitmap address in that case.
+#if BUILDFLAG(USE_FREESLOT_BITMAP) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+namespace partition_alloc::internal {
+
+namespace {
+
+class PartitionAllocFreeSlotBitmapTest : public ::testing::Test {
+ protected:
+  static constexpr FreeSlotBitmapCellType kAllUsed = 0u;
+  static constexpr FreeSlotBitmapCellType kAllFree =
+      std::numeric_limits<FreeSlotBitmapCellType>::max();
+
+  void SetUp() override {
+    // Allocates memory and creates a pseudo superpage in it. We need to
+    // allocate |2 * kSuperPageSize| so that a whole superpage is contained in
+    // the allocated region.
+    allocator_.init(PartitionOptions{});
+    allocated_ptr_ = reinterpret_cast<uintptr_t>(
+        allocator_.root()->Alloc(2 * kSuperPageSize));
+    super_page_ = (allocated_ptr_ + kSuperPageSize) & kSuperPageBaseMask;
+
+    // Checks that the whole superpage is in the allocated region.
+    PA_DCHECK(super_page_ + kSuperPageSize <=
+              allocated_ptr_ + 2 * kSuperPageSize);
+  }
+
+  void TearDown() override {
+    allocator_.root()->Free(reinterpret_cast<void*>(allocated_ptr_));
+  }
+
+  // Returns the |index|-th slot address in the virtual superpage. It assumes
+  // that there are no slot spans and the superpage is only filled with the slot
+  // of size |kSmallestBucket|.
+  uintptr_t SlotAddr(size_t index) {
+    return SuperPagePayloadBegin(super_page_, false) + index * kSmallestBucket;
+  }
+
+  // Returns the last slot address in the virtual superpage. It assumes that
+  // there are no slot spans but the superpage is only filled with the slot of
+  // size |kSmallestBucket|.
+  uintptr_t LastSlotAddr() {
+    return super_page_ + kSuperPageSize - PartitionPageSize() - kSmallestBucket;
+  }
+
+ private:
+  uintptr_t allocated_ptr_;
+  uintptr_t super_page_;
+  PartitionAllocator allocator_;
+};
+
+}  // namespace
+
+TEST_F(PartitionAllocFreeSlotBitmapTest, MarkFirstSlotAsUsed) {
+  uintptr_t slot_addr = SlotAddr(0);
+  FreeSlotBitmapMarkSlotAsFree(slot_addr);
+  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_addr));
+
+  FreeSlotBitmapMarkSlotAsUsed(slot_addr);
+  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_addr));
+}
+
+TEST_F(PartitionAllocFreeSlotBitmapTest, MarkFirstSlotAsFree) {
+  uintptr_t slot_addr = SlotAddr(0);
+  // All slots are set to "used" by default.
+  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_addr));
+
+  FreeSlotBitmapMarkSlotAsFree(slot_addr);
+  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_addr));
+}
+
+TEST_F(PartitionAllocFreeSlotBitmapTest, MarkAllBitsInCellAsUsed) {
+  const size_t kFirstSlotAddr = SlotAddr(0);
+  const size_t kLastSlotAddr = SlotAddr(kFreeSlotBitmapBitsPerCell);
+
+  auto [cell_first_slot, bit_index_first_slot] =
+      GetFreeSlotBitmapCellPtrAndBitIndex(kFirstSlotAddr);
+  auto [cell_last_slot, bit_index_last_slot] =
+      GetFreeSlotBitmapCellPtrAndBitIndex(kLastSlotAddr);
+
+  // Check that the bit corresponding to |kFirstSlotAddr| is the first bit in
+  // some cell (= |cell_first_slot|), and the bit for |kLastSlotAddr| is the
+  // first bit in the next cell. This means that we are manipulating all the
+  // bits in |cell_first_slot| in this test.
+  EXPECT_EQ(0u, bit_index_first_slot);
+  EXPECT_EQ(0u, bit_index_last_slot);
+  EXPECT_NE(cell_first_slot, cell_last_slot);
+
+  for (size_t slot_addr = kFirstSlotAddr; slot_addr < kLastSlotAddr;
+       slot_addr += kSmallestBucket) {
+    FreeSlotBitmapMarkSlotAsFree(slot_addr);
+  }
+
+  // Check all the bits in |cell_first_slot| are 1 (= free).
+  EXPECT_EQ(kAllFree, *cell_first_slot);
+
+  for (size_t slot_addr = kFirstSlotAddr; slot_addr < kLastSlotAddr;
+       slot_addr += kSmallestBucket) {
+    FreeSlotBitmapMarkSlotAsUsed(slot_addr);
+  }
+
+  // Check all the bits in |cell_first_slot| are 0 (= used).
+  EXPECT_EQ(kAllUsed, *cell_first_slot);
+}
+
+TEST_F(PartitionAllocFreeSlotBitmapTest, MarkLastSlotAsUsed) {
+  uintptr_t last_slot_addr = LastSlotAddr();
+  FreeSlotBitmapMarkSlotAsFree(last_slot_addr);
+  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(last_slot_addr));
+
+  FreeSlotBitmapMarkSlotAsUsed(last_slot_addr);
+  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(last_slot_addr));
+}
+
+TEST_F(PartitionAllocFreeSlotBitmapTest, ResetBitmap) {
+  const size_t kNumSlots = 3 * kFreeSlotBitmapBitsPerCell;
+  for (size_t i = 0; i < kNumSlots; ++i) {
+    FreeSlotBitmapMarkSlotAsFree(SlotAddr(i));
+  }
+
+  auto [cell_first_slot, bit_index_first_slot] =
+      GetFreeSlotBitmapCellPtrAndBitIndex(SlotAddr(0));
+  EXPECT_EQ(0u, bit_index_first_slot);
+  EXPECT_EQ(kAllFree, *cell_first_slot);
+  EXPECT_EQ(kAllFree, *(cell_first_slot + 1));
+  EXPECT_EQ(kAllFree, *(cell_first_slot + 2));
+
+  FreeSlotBitmapReset(SlotAddr(kFreeSlotBitmapBitsPerCell),
+                      SlotAddr(2 * kFreeSlotBitmapBitsPerCell),
+                      kSmallestBucket);
+  EXPECT_EQ(kAllFree, *cell_first_slot);
+  EXPECT_EQ(kAllUsed, *(cell_first_slot + 1));
+  EXPECT_EQ(kAllFree, *(cell_first_slot + 2));
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BUILDFLAG(USE_FREESLOT_BITMAP) &&
+        // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/gwp_asan_support.cc b/base/allocator/partition_allocator/src/partition_alloc/gwp_asan_support.cc
new file mode 100644
index 0000000..31b8569
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/gwp_asan_support.cc
@@ -0,0 +1,125 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/gwp_asan_support.h"
+
+#if BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
+
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_ref_count.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "build/build_config.h"
+
+namespace partition_alloc {
+
+// static
+void* GwpAsanSupport::MapRegion(size_t slot_count,
+                                std::vector<uint16_t>& free_list) {
+  PA_CHECK(slot_count > 0);
+
+  constexpr PartitionOptions kConfig{
+      .backup_ref_ptr = PartitionOptions::kEnabled,
+  };
+  static internal::base::NoDestructor<PartitionRoot> root(kConfig);
+
+  const size_t kSlotSize = 2 * internal::SystemPageSize();
+  uint16_t bucket_index = PartitionRoot::SizeToBucketIndex(
+      kSlotSize, root->GetBucketDistribution());
+  auto* bucket = root->buckets + bucket_index;
+
+  const size_t kSuperPagePayloadStartOffset =
+      internal::SuperPagePayloadStartOffset(
+          /* is_managed_by_normal_buckets = */ true,
+          /* with_quarantine = */ false);
+  PA_CHECK(kSuperPagePayloadStartOffset % kSlotSize == 0);
+  const size_t kSuperPageGwpAsanSlotAreaBeginOffset =
+      kSuperPagePayloadStartOffset;
+  const size_t kSuperPageGwpAsanSlotAreaEndOffset =
+      internal::SuperPagePayloadEndOffset();
+  const size_t kSuperPageGwpAsanSlotAreaSize =
+      kSuperPageGwpAsanSlotAreaEndOffset - kSuperPageGwpAsanSlotAreaBeginOffset;
+  const size_t kSlotsPerSlotSpan = bucket->get_bytes_per_span() / kSlotSize;
+  const size_t kSlotsPerSuperPage =
+      kSuperPageGwpAsanSlotAreaSize / (kSlotsPerSlotSpan * kSlotSize);
+
+  size_t super_page_count = 1 + ((slot_count - 1) / kSlotsPerSuperPage);
+  PA_CHECK(super_page_count <=
+           std::numeric_limits<size_t>::max() / kSuperPageSize);
+  uintptr_t super_page_span_start;
+  {
+    internal::ScopedGuard locker{internal::PartitionRootLock(root.get())};
+    super_page_span_start = bucket->AllocNewSuperPageSpanForGwpAsan(
+        root.get(), super_page_count, AllocFlags::kNone);
+
+    if (!super_page_span_start) {
+      return nullptr;
+    }
+
+#if defined(ARCH_CPU_64_BITS)
+    // Mapping the GWP-ASan region in to the lower 32-bits of address space
+    // makes it much more likely that a bad pointer dereference points into
+    // our region and triggers a false positive report. We rely on the fact
+    // that PA address pools are never allocated in the first 4GB due to
+    // their alignment requirements.
+    PA_CHECK(super_page_span_start >= (1ULL << 32));
+#endif  // defined(ARCH_CPU_64_BITS)
+
+    uintptr_t super_page_span_end =
+        super_page_span_start + super_page_count * kSuperPageSize;
+    PA_CHECK(super_page_span_start < super_page_span_end);
+
+    for (uintptr_t super_page = super_page_span_start;
+         super_page < super_page_span_end; super_page += kSuperPageSize) {
+      auto* page_metadata =
+          internal::PartitionSuperPageToMetadataArea(super_page);
+
+      // Index 0 is invalid because it is the super page extent metadata.
+      for (size_t partition_page_idx =
+               1 + internal::NumPartitionPagesPerFreeSlotBitmap();
+           partition_page_idx + bucket->get_pages_per_slot_span() <
+           internal::NumPartitionPagesPerSuperPage();
+           partition_page_idx += bucket->get_pages_per_slot_span()) {
+        auto* slot_span_metadata =
+            &page_metadata[partition_page_idx].slot_span_metadata;
+        bucket->InitializeSlotSpanForGwpAsan(slot_span_metadata);
+        auto slot_span_start =
+            internal::SlotSpanMetadata::ToSlotSpanStart(slot_span_metadata);
+
+        for (uintptr_t slot_idx = 0; slot_idx < kSlotsPerSlotSpan; ++slot_idx) {
+          auto slot_start = slot_span_start + slot_idx * kSlotSize;
+          internal::PartitionRefCountPointer(slot_start)->InitalizeForGwpAsan();
+          size_t global_slot_idx = (slot_start - super_page_span_start -
+                                    kSuperPageGwpAsanSlotAreaBeginOffset) /
+                                   kSlotSize;
+          PA_DCHECK(global_slot_idx < std::numeric_limits<uint16_t>::max());
+          free_list.push_back(global_slot_idx);
+          if (free_list.size() == slot_count) {
+            return reinterpret_cast<void*>(
+                super_page_span_start + kSuperPageGwpAsanSlotAreaBeginOffset -
+                internal::SystemPageSize());  // Depends on the PA guard region
+                                              // in front of the super page
+                                              // payload area.
+          }
+        }
+      }
+    }
+  }
+
+  PA_NOTREACHED();
+}
+
+// static
+bool GwpAsanSupport::CanReuse(uintptr_t slot_start) {
+  return internal::PartitionRefCountPointer(slot_start)->CanBeReusedByGwpAsan();
+}
+
+}  // namespace partition_alloc
+
+#endif  // BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/gwp_asan_support.h b/base/allocator/partition_allocator/src/partition_alloc/gwp_asan_support.h
new file mode 100644
index 0000000..697acbb
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/gwp_asan_support.h
@@ -0,0 +1,120 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
+
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+namespace partition_alloc {
+
+// This class allows GWP-ASan allocations to be backed by PartitionAlloc and,
+// consequently, protected by MiraclePtr.
+//
+// GWP-ASan mainly operates at the system memory page granularity. During
+// process startup, it reserves a certain number of consecutive system pages.
+//
+// The standard layout is as follows:
+//
+//   +-------------------+--------
+//   |                   | ▲   ▲
+//   |   system page 0   |(a) (c)
+//   |                   | ▼   ▼
+//   +-------------------+--------
+//   |                   | ▲   ▲
+//   |   system page 1   |(b)  |
+//   |                   | ▼   |
+//   +-------------------+--- (d)    (a) inaccessible
+//   |                   | ▲   |     (b) accessible
+//   |   system page 2   |(a)  |     (c) initial guard page
+//   |                   | ▼   ▼     (d) allocation slot
+//   +-------------------+--------
+//   |                   | ▲   ▲
+//   |   system page 3   |(b)  |
+//   |                   | ▼   |
+//   +-------------------+--- (d)
+//   |                   | ▲   |
+//   |   system page 4   |(a)  |
+//   |                   | ▼   ▼
+//   |-------------------|--------
+//   |                   | ▲   ▲
+//   |        ...        |(a) (d)
+//
+// Unfortunately, PartitionAlloc can't provide GWP-ASan an arbitrary number of
+// consecutive allocation slots. Allocations need to be grouped into 2MB super
+// pages so that the allocation metadata can be easily located.
+//
+// Below is the new layout:
+//
+//   +-----------------------------------
+//   |                   |         ▲   ▲
+//   |   system page 0   |         |   |
+//   |                   |         |   |
+//   +-------------------+         |   |
+//   |                   |         |   |
+//   |        ...        |        (e)  |
+//   |                   |         |   |
+//   +-------------------+-------  |   |
+//   |                   | ▲   ▲   |   |
+//   |  system page k-1  |(a) (c)  |   |
+//   |                   | ▼   ▼   ▼   |
+//   +-------------------+----------- (f)
+//   |                   | ▲   ▲       |
+//   |   system page k   |(b)  |       |
+//   |                   | ▼   |       |
+//   +-------------------+--- (d)      |
+//   |                   | ▲   |       |
+//   |  system page k+1  |(a)  |       |
+//   |                   | ▼   ▼       |
+//   +-------------------+-----------  |
+//   |                   |             |    (a) inaccessible
+//   |        ...        |             |    (b) accessible
+//   |                   |             ▼    (c) initial guard page
+//   +-----------------------------------   (d) allocation slot
+//   |                   |         ▲   ▲    (e) super page metadata
+//   |   system page m   |         |   |    (f) super page
+//   |                   |         |   |    (g) pseudo allocation slot
+//   +-------------------+-------  |   |
+//   |                   |     ▲   |   |
+//   |        ...        |     |  (e)  |
+//   |                   |     |   |   |
+//   +-------------------+--- (g)  |   |
+//   |                   | ▲   |   |   |
+//   | system page m+k-1 |(a)  |   |   |
+//   |                   | ▼   ▼   ▼   |
+//   +-------------------+----------- (f)
+//   |                   | ▲   ▲       |
+//   |  system page m+k  |(b)  |       |
+//   |                   | ▼   |       |
+//   +-------------------+--- (d)      |
+//   |                   | ▲   |       |
+//   | system page m+k+1 |(a)  |       |
+//   |                   | ▼   ▼       |
+//   +-------------------+-----------  |
+//   |                   |             |
+//   |        ...        |             |
+//   |                   |             ▼
+//   +-------------------+---------------
+//
+// This means some allocation slots will be reserved to hold PA
+// metadata. We exclude these pseudo slots from the GWP-ASan free list so that
+// they are never used for anything other that storing the metadata.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) GwpAsanSupport {
+ public:
+  static void* MapRegion(size_t slot_count, std::vector<uint16_t>& free_list);
+  static bool CanReuse(uintptr_t slot_start);
+};
+
+}  // namespace partition_alloc
+
+#endif  // BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/hardening_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/hardening_unittest.cc
new file mode 100644
index 0000000..f2a8392
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/hardening_unittest.cc
@@ -0,0 +1,145 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// With *SAN, PartitionAlloc is rerouted to malloc().
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+namespace partition_alloc::internal {
+namespace {
+
+// Death tests misbehave on Android, crbug.com/1240184
+#if !BUILDFLAG(IS_ANDROID) && defined(GTEST_HAS_DEATH_TEST) && \
+    PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+
+TEST(HardeningTest, PartialCorruption) {
+  std::string important_data("very important");
+  char* to_corrupt = const_cast<char*>(important_data.c_str());
+
+  PartitionRoot root(PartitionOptions{
+      .aligned_alloc = PartitionOptions::kAllowed,
+  });
+  root.UncapEmptySlotSpanMemoryForTesting();
+
+  const size_t kAllocSize = 100;
+  void* data = root.Alloc(kAllocSize);
+  void* data2 = root.Alloc(kAllocSize);
+  root.Free(data2);
+  root.Free(data);
+
+  // root->bucket->active_slot_span_head->freelist_head points to data, next_
+  // points to data2. We can corrupt *data to get overwrite the next_ pointer.
+  // Even if it looks reasonable (valid encoded pointer), freelist corruption
+  // detection will make the code crash, because shadow_ doesn't match
+  // encoded_next_.
+  EncodedNextFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
+                                                  to_corrupt, false);
+  EXPECT_DEATH(root.Alloc(kAllocSize), "");
+}
+
+TEST(HardeningTest, OffHeapPointerCrashing) {
+  std::string important_data("very important");
+  char* to_corrupt = const_cast<char*>(important_data.c_str());
+
+  PartitionRoot root(PartitionOptions{
+      .aligned_alloc = PartitionOptions::kAllowed,
+  });
+  root.UncapEmptySlotSpanMemoryForTesting();
+
+  const size_t kAllocSize = 100;
+  void* data = root.Alloc(kAllocSize);
+  void* data2 = root.Alloc(kAllocSize);
+  root.Free(data2);
+  root.Free(data);
+
+  // See "PartialCorruption" above for details. This time, make shadow_
+  // consistent.
+  EncodedNextFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
+                                                  to_corrupt, true);
+
+  // Crashes, because |to_corrupt| is not on the same superpage as data.
+  EXPECT_DEATH(root.Alloc(kAllocSize), "");
+}
+
+TEST(HardeningTest, MetadataPointerCrashing) {
+  PartitionRoot root(PartitionOptions{
+      .aligned_alloc = PartitionOptions::kAllowed,
+  });
+  root.UncapEmptySlotSpanMemoryForTesting();
+
+  const size_t kAllocSize = 100;
+  void* data = root.Alloc(kAllocSize);
+  void* data2 = root.Alloc(kAllocSize);
+  root.Free(data2);
+  root.Free(data);
+
+  uintptr_t slot_start = root.ObjectToSlotStart(data);
+  auto* metadata = SlotSpanMetadata::FromSlotStart(slot_start);
+  EncodedNextFreelistEntry::EmplaceAndInitForTest(slot_start, metadata, true);
+
+  // Crashes, because |metadata| points inside the metadata area.
+  EXPECT_DEATH(root.Alloc(kAllocSize), "");
+}
+#endif  // !BUILDFLAG(IS_ANDROID) && defined(GTEST_HAS_DEATH_TEST) &&
+        // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+
+// Below test also misbehaves on Android; as above, death tests don't
+// quite work (crbug.com/1240184), and having free slot bitmaps enabled
+// force the expectations below to crash.
+#if !BUILDFLAG(IS_ANDROID)
+
+TEST(HardeningTest, SuccessfulCorruption) {
+  PartitionRoot root(PartitionOptions{
+      .aligned_alloc = PartitionOptions::kAllowed,
+  });
+  root.UncapEmptySlotSpanMemoryForTesting();
+
+  uintptr_t* zero_vector = reinterpret_cast<uintptr_t*>(
+      root.Alloc<AllocFlags::kZeroFill>(100 * sizeof(uintptr_t), ""));
+  ASSERT_TRUE(zero_vector);
+  // Pointer to the middle of an existing allocation.
+  uintptr_t* to_corrupt = zero_vector + 20;
+
+  const size_t kAllocSize = 100;
+  void* data = root.Alloc(kAllocSize);
+  void* data2 = root.Alloc(kAllocSize);
+  root.Free(data2);
+  root.Free(data);
+
+  EncodedNextFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
+                                                  to_corrupt, true);
+
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+  // This part crashes with freeslot bitmap because it detects freelist
+  // corruptions, which is rather desirable behavior.
+  EXPECT_DEATH_IF_SUPPORTED(root.Alloc(kAllocSize), "");
+#else
+  // Next allocation is what was in
+  // root->bucket->active_slot_span_head->freelist_head, so not the corrupted
+  // pointer.
+  void* new_data = root.Alloc(kAllocSize);
+  ASSERT_EQ(new_data, data);
+
+  // Not crashing, because a zeroed area is a "valid" freelist entry.
+  void* new_data2 = root.Alloc(kAllocSize);
+  // Now we have a pointer to the middle of an existing allocation.
+  EXPECT_EQ(new_data2, to_corrupt);
+#endif  // BUILDFLAG(USE_FREESLOT_BITMAP)
+}
+#endif  // !BUILDFLAG(IS_ANDROID)
+
+}  // namespace
+}  // namespace partition_alloc::internal
+
+#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine.cc b/base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine.cc
new file mode 100644
index 0000000..28bba39
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine.cc
@@ -0,0 +1,140 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_stats.h"
+
+namespace partition_alloc::internal {
+
+namespace {
+size_t GetObjectSize(void* object) {
+  const auto* entry_slot_span = SlotSpanMetadata::FromObject(object);
+  return entry_slot_span->GetUtilizedSlotSize();
+}
+}  // namespace
+
+template <typename QuarantineEntry, size_t CapacityCount>
+uint32_t LightweightQuarantineList<QuarantineEntry, CapacityCount>::Quarantine(
+    QuarantineEntry&& entry) {
+  const auto entry_size = GetObjectSize(entry.GetObject());
+
+  const size_t capacity_in_bytes =
+      capacity_in_bytes_.load(std::memory_order_relaxed);
+  if (capacity_in_bytes < entry_size) {
+    // Even this single entry does not fit within the capacity.
+    root_->Free(entry.GetObject());
+    quarantine_miss_count_.fetch_add(1u, std::memory_order_relaxed);
+    return kInvalidEntryID;
+  }
+
+  size_t entry_id;
+  {
+    // It may be possible to narrow down the locked section, but we will not
+    // make any detailed adjustments for now, as we aim to create a lock-free
+    // implementation by having a thread-local list.
+    ScopedGuard guard(lock_);
+
+    // Dequarantine some entries as required.
+    size_t count = count_.load(std::memory_order_acquire);
+    size_t size_in_bytes = size_in_bytes_.load(std::memory_order_acquire);
+    while (kCapacityCount < count + 1 ||
+           capacity_in_bytes < size_in_bytes + entry_size) {
+      PA_DCHECK(0 < count);
+      // As quarantined entries are shuffled, picking last entry is equivalent
+      // to picking random entry.
+      void* to_free =
+          slots_[entry_ids_[count - 1] & kSlotIndexMask].GetObject();
+      size_t to_free_size = GetObjectSize(to_free);
+
+      PA_DCHECK(to_free);
+      // We don't guarantee the deferred `Free()` has the same `FreeFlags`.
+      root_->Free<FreeFlags::kNoHooks>(to_free);
+
+      // Increment the counter embedded in the entry id.
+      // This helps to identify the entry associated with this slot.
+      entry_ids_[count - 1] += kCapacityCount;
+      if (PA_UNLIKELY(entry_ids_[count - 1] == kInvalidEntryID)) {
+        // Increment again so that it does not collide with the invalid id.
+        entry_ids_[count - 1] += kCapacityCount;
+      }
+
+      count--;
+      size_in_bytes -= to_free_size;
+      // Contents of `slots_[...]` remains  as is, to keep the free-time
+      // information as much as possible.
+    }
+
+    // Obtain an entry id.
+    PA_DCHECK(count < kCapacityCount);
+    entry_id = entry_ids_[count];
+    count++;
+    size_in_bytes += entry_size;
+
+    // Update stats (locked).
+    count_.store(count, std::memory_order_release);
+    size_in_bytes_.store(size_in_bytes, std::memory_order_release);
+
+    // Swap randomly so that the quarantine indices remain shuffled.
+    // This is not uniformly random, but sufficiently random.
+    const size_t random_index = random_.RandUint32() % count;
+    std::swap(entry_ids_[random_index], entry_ids_[count - 1]);
+
+    auto& slot = slots_[entry_id & kSlotIndexMask];
+    slot.entry_id = entry_id;
+    slot.entry = std::move(entry);
+  }
+
+  // Update stats (not locked).
+  cumulative_count_.fetch_add(1, std::memory_order_relaxed);
+  cumulative_size_in_bytes_.fetch_add(entry_size, std::memory_order_relaxed);
+  return entry_id;
+}
+
+template <typename QuarantineEntry, size_t CapacityCount>
+void LightweightQuarantineList<QuarantineEntry, CapacityCount>::AccumulateStats(
+    LightweightQuarantineStats& stats) const {
+  stats.count += count_.load(std::memory_order_relaxed);
+  stats.size_in_bytes += size_in_bytes_.load(std::memory_order_relaxed);
+  stats.cumulative_count += cumulative_count_.load(std::memory_order_relaxed);
+  stats.cumulative_size_in_bytes +=
+      cumulative_size_in_bytes_.load(std::memory_order_relaxed);
+  stats.quarantine_miss_count +=
+      quarantine_miss_count_.load(std::memory_order_relaxed);
+}
+
+template <typename QuarantineEntry, size_t CapacityCount>
+bool LightweightQuarantineList<QuarantineEntry, CapacityCount>::
+    IsQuarantinedForTesting(void* object) {
+  ScopedGuard guard(lock_);
+  for (size_t i = 0; i < count_; i++) {
+    if (slots_[entry_ids_[i] & kSlotIndexMask].GetObject() == object) {
+      return true;
+    }
+  }
+  return false;
+}
+
+template <typename QuarantineEntry, size_t CapacityCount>
+void LightweightQuarantineList<QuarantineEntry, CapacityCount>::Purge() {
+  ScopedGuard guard(lock_);
+
+  size_t count = count_.load(std::memory_order_acquire);
+  while (0 < count) {
+    void* to_free = slots_[entry_ids_[count - 1] & kSlotIndexMask].GetObject();
+    PA_DCHECK(to_free);
+    root_->Free<FreeFlags::kNoHooks>(to_free);
+    count--;
+  }
+  count_.store(0, std::memory_order_release);
+  size_in_bytes_.store(0, std::memory_order_release);
+  std::iota(entry_ids_.begin(), entry_ids_.end(), 0);
+}
+
+template class PA_EXPORT_TEMPLATE_DEFINE(PA_COMPONENT_EXPORT(PARTITION_ALLOC))
+    LightweightQuarantineList<LightweightQuarantineEntry, 1024>;
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine.h b/base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine.h
new file mode 100644
index 0000000..b34759d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine.h
@@ -0,0 +1,191 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Lightweight Quarantine (LQ) provides a low-cost quarantine mechanism with
+// following characteristics.
+//
+// - Built on PartitionAlloc: only supports allocations in a known root
+// - As fast as PA: LQ just defers `Free()` handling and may benefit from thread
+//   cache etc.
+// - Thread-safe
+// - No allocation time information: triggered on `Free()`
+// - Don't use quarantined objects' payload - available for zapping
+// - Don't allocate heap memory.
+// - Flexible to support several applications
+//   - TODO(crbug.com/1462223): Implement Miracle Object quarantine with LQ.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_LIGHTWEIGHT_QUARANTINE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_LIGHTWEIGHT_QUARANTINE_H_
+
+#include <stdint.h>
+#include <array>
+#include <atomic>
+#include <limits>
+#include <numeric>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/export_template.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+
+namespace partition_alloc {
+
+struct PartitionRoot;
+struct LightweightQuarantineStats;
+
+namespace internal {
+
+// `LightweightQuarantineEntry` represents one quarantine entry,
+// with the original `Free()` request information.
+struct LightweightQuarantineEntry {
+  LightweightQuarantineEntry() = default;
+  explicit LightweightQuarantineEntry(void* object) : object(object) {}
+  PA_ALWAYS_INLINE void* GetObject() const { return object; }
+
+  void* object = nullptr;
+};
+
+template <typename QuarantineEntry, size_t CapacityCount>
+class LightweightQuarantineList {
+ public:
+  // `CapacityCount` must be power of two.
+  static constexpr uint32_t kCapacityCount = CapacityCount;
+  static_assert(base::bits::IsPowerOfTwo(kCapacityCount));
+
+  // "Entry" is an object that holds free-time information, created for each
+  // quarantined object.
+  // An application may overwrite `QuarantineEntry` with their custom entry
+  // to record more `Free()`-time information.
+  using Entry = QuarantineEntry;
+  // To be accessed from a crash handler, it must be a trivially copyable.
+  static_assert(std::is_trivially_copyable_v<Entry>);
+
+  // Entry ids are concatenation of "slot index" and "counter".
+  // Their lower bits store "slot index", an index of `slots_`.
+  // Their upper bits store "counter", which is incremented every time
+  // when used (may overflow). It is used to verify the slot is occupied by that
+  // entry.
+  static constexpr uint32_t kSlotIndexMask = kCapacityCount - 1;
+  static constexpr uint32_t kInvalidEntryID =
+      std::numeric_limits<uint32_t>::max();
+
+  // "Slot" is a place to put an entry. Each slot owns at most one entry.
+  struct Slot {
+    void* GetObject() const {
+      // We assume `Entry` has `GetObject()` member function.
+      return entry.GetObject();
+    }
+
+    // Used to make sure the metadata entry isn't stale.
+    uint32_t entry_id = kInvalidEntryID;
+    Entry entry;
+  };
+  static_assert(std::is_trivially_copyable_v<Slot>);
+
+  explicit LightweightQuarantineList(PartitionRoot* root,
+                                     size_t capacity_in_bytes = 0)
+      : root_(root), capacity_in_bytes_(capacity_in_bytes) {
+    PA_CHECK(root);
+    // Initialize entry ids with iota.
+    // They can be initialized with any value as long as
+    // `entry_ids_[i] & kSlotIndexMask` are unique.
+    std::iota(entry_ids_.begin(), entry_ids_.end(), 0);
+  }
+  LightweightQuarantineList(const LightweightQuarantineList&) = delete;
+  ~LightweightQuarantineList() { Purge(); }
+
+  // Quarantines an object. This list holds information you put into `Entry`
+  // as much as possible.
+  // If the object is too large, this may return `kInvalidEntryID`, meaning
+  // that quarantine request has failed (and freed immediately).
+  // Otherwise, returns an entry id for the quarantine.
+  uint32_t Quarantine(Entry&& entry);
+
+  void AccumulateStats(LightweightQuarantineStats& stats) const;
+
+  // Determines this list contains an entry with `entry.GetObject() == ptr`.
+  bool IsQuarantinedForTesting(void* object);
+
+  // Dequarantine all entries.
+  void Purge();
+
+  // Returns a pointer to an array of `Slot`.
+  // Don't try to dereference to avoid harmful races.
+  // You can save this address and entry id returned by `Quarantine()`
+  // somewhere, and use `GetEntryByID()` to obtain the free time information.
+  // E.g. embed an entry id into zapping pattern and detect the pattern in
+  // a crash handler to report the free time information.
+  uintptr_t GetSlotsAddress() {
+    ScopedGuard guard(lock_);
+    return reinterpret_cast<uintptr_t>(slots_.data());
+  }
+
+  // Returns an `Entry` associated with the id.
+  // May return `nullptr` if it is overwritten by another entry. This can rarely
+  // return wrong entry if the id is colliding with another entry.
+  // Not thread-safe, use only in crash handling or in tests.
+  static const Entry* GetEntryByID(uintptr_t slots_address, uint32_t entry_id) {
+    const auto* slots = reinterpret_cast<Slot*>(slots_address);
+    const auto& slot = slots[entry_id & kSlotIndexMask];
+    if (slot.entry_id != entry_id) {
+      return nullptr;
+    }
+    return &slot.entry;
+  }
+
+  size_t GetCapacityInBytes() const {
+    return capacity_in_bytes_.load(std::memory_order_relaxed);
+  }
+  void SetCapacityInBytesForTesting(size_t capacity_in_bytes) {
+    capacity_in_bytes_.store(capacity_in_bytes, std::memory_order_relaxed);
+    // Purge to maintain invariant.
+    Purge();
+  }
+
+ private:
+  Lock lock_;
+  // Not `PA_GUARDED_BY` as they have another lock.
+  PartitionRoot* const root_;
+  std::atomic_size_t capacity_in_bytes_;
+
+  // Non-cryptographic random number generator.
+  // Thread-unsafe so guarded by `lock_`.
+  base::InsecureRandomGenerator random_ PA_GUARDED_BY(lock_);
+
+  // `slots_` hold an array of quarantined entries.
+  // The contents of empty slots are undefined and reads should not occur.
+  // There is no guarantee that non-empty slots will be placed consecutively.
+  std::array<Slot, kCapacityCount> slots_ PA_GUARDED_BY(lock_);
+
+  // Number of quarantined entries, capped by `kCapacityCount`.
+  std::atomic_size_t count_ = 0;
+  // Total size of quarantined entries, capped by `capacity_in_bytes_`.
+  std::atomic_size_t size_in_bytes_ = 0;
+  // `entry_ids_` is a supplementary data store to access slots quickly.
+  // Its first `count_` elements represents quarantined entry ids and
+  // used to choose an entry to dequarantine quickly.
+  // The other elements reperent empty slot indices to find an empty slot to
+  // fill in quickly. All elements are also responsible for managing upper bits
+  // of entry ids so that they are as unique as possible.
+  std::array<uint32_t, kCapacityCount> entry_ids_ PA_GUARDED_BY(lock_);
+
+  // Stats.
+  std::atomic_size_t cumulative_count_ = 0;
+  std::atomic_size_t cumulative_size_in_bytes_ = 0;
+  std::atomic_size_t quarantine_miss_count_ = 0;
+};
+
+using SchedulerLoopQuarantine =
+    LightweightQuarantineList<LightweightQuarantineEntry, 1024>;
+extern template class PA_EXPORT_TEMPLATE_DECLARE(
+    PA_COMPONENT_EXPORT(PARTITION_ALLOC))
+    LightweightQuarantineList<LightweightQuarantineEntry, 1024>;
+
+}  // namespace internal
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_LIGHTWEIGHT_QUARANTINE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine_unittest.cc
new file mode 100644
index 0000000..89f8b57
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine_unittest.cc
@@ -0,0 +1,132 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_stats.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc {
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+namespace {
+
+size_t GetObjectSize(void* object) {
+  const auto* entry_slot_span = internal::SlotSpanMetadata::FromObject(object);
+  return entry_slot_span->GetUtilizedSlotSize();
+}
+
+struct LightweightQuarantineTestParam {
+  size_t capacity_in_bytes;
+};
+
+using QuarantineList =
+    internal::LightweightQuarantineList<internal::LightweightQuarantineEntry,
+                                        1024>;
+constexpr LightweightQuarantineTestParam kSmallQuarantineList = {
+    .capacity_in_bytes = 256};
+constexpr LightweightQuarantineTestParam kLargeQuarantineList = {
+    .capacity_in_bytes = 4096};
+
+class PartitionAllocLightweightQuarantineTest
+    : public testing::TestWithParam<LightweightQuarantineTestParam> {
+ protected:
+  void SetUp() override {
+    const auto param = GetParam();
+
+    allocator_ =
+        std::make_unique<PartitionAllocatorForTesting>(PartitionOptions{});
+    list_ = std::make_unique<QuarantineList>(allocator_->root(),
+                                             param.capacity_in_bytes);
+
+    auto stats = GetStats();
+    ASSERT_EQ(0u, stats.size_in_bytes);
+    ASSERT_EQ(0u, stats.count);
+    ASSERT_EQ(0u, stats.cumulative_size_in_bytes);
+    ASSERT_EQ(0u, stats.cumulative_count);
+  }
+
+  void TearDown() override {
+    // |Purge()|d here.
+    list_ = nullptr;
+    allocator_ = nullptr;
+  }
+
+  PartitionRoot* GetRoot() const { return allocator_->root(); }
+
+  QuarantineList* GetList() const { return list_.get(); }
+
+  LightweightQuarantineStats GetStats() const {
+    LightweightQuarantineStats stats{};
+    list_->AccumulateStats(stats);
+    return stats;
+  }
+
+  std::unique_ptr<PartitionAllocatorForTesting> allocator_;
+  std::unique_ptr<QuarantineList> list_;
+};
+INSTANTIATE_TEST_SUITE_P(
+    PartitionAllocLightweightQuarantineTestMultipleQuarantineSizeInstantiation,
+    PartitionAllocLightweightQuarantineTest,
+    ::testing::Values(kSmallQuarantineList, kLargeQuarantineList));
+
+}  // namespace
+
+TEST_P(PartitionAllocLightweightQuarantineTest, Basic) {
+  constexpr size_t kObjectSize = 1;
+
+  uintptr_t slots_address = GetList()->GetSlotsAddress();
+  const size_t capacity_in_bytes = GetList()->GetCapacityInBytes();
+
+  constexpr size_t kCount = 100;
+  for (size_t i = 1; i <= kCount; i++) {
+    void* object = GetRoot()->Alloc(kObjectSize);
+    const size_t size = GetObjectSize(object);
+    const size_t max_count = capacity_in_bytes / size;
+
+    auto entry = QuarantineList::Entry(object);
+    const uint32_t entry_id = GetList()->Quarantine(std::move(entry));
+    const auto* entry_ptr =
+        QuarantineList::GetEntryByID(slots_address, entry_id);
+
+    ASSERT_NE(entry_ptr, nullptr);
+    ASSERT_EQ(object, entry_ptr->GetObject());
+    ASSERT_TRUE(GetList()->IsQuarantinedForTesting(object));
+
+    const auto expected_count = std::min(i, max_count);
+    auto stats = GetStats();
+    ASSERT_EQ(expected_count * size, stats.size_in_bytes);
+    ASSERT_EQ(expected_count, stats.count);
+    ASSERT_EQ(i * size, stats.cumulative_size_in_bytes);
+    ASSERT_EQ(i, stats.cumulative_count);
+  }
+}
+
+TEST_P(PartitionAllocLightweightQuarantineTest, TooLargeAllocation) {
+  constexpr size_t kObjectSize = 1 << 26;  // 64 MiB.
+  const size_t capacity_in_bytes = GetList()->GetCapacityInBytes();
+
+  void* object = GetRoot()->Alloc(kObjectSize);
+  const size_t size = GetObjectSize(object);
+  ASSERT_GT(size, capacity_in_bytes);
+
+  auto entry = QuarantineList::Entry(object);
+  GetList()->Quarantine(std::move(entry));
+
+  ASSERT_FALSE(GetList()->IsQuarantinedForTesting(object));
+
+  auto stats = GetStats();
+  ASSERT_EQ(0u, stats.size_in_bytes);
+  ASSERT_EQ(0u, stats.count);
+  ASSERT_EQ(0u, stats.cumulative_size_in_bytes);
+  ASSERT_EQ(0u, stats.cumulative_count);
+}
+
+#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.cc b/base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.cc
new file mode 100644
index 0000000..d700cb5
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.cc
@@ -0,0 +1,98 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+
+#if BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#endif
+
+namespace partition_alloc {
+
+// static
+MemoryReclaimer* MemoryReclaimer::Instance() {
+  static internal::base::NoDestructor<MemoryReclaimer> instance;
+  return instance.get();
+}
+
+void MemoryReclaimer::RegisterPartition(PartitionRoot* partition) {
+  internal::ScopedGuard lock(lock_);
+  PA_DCHECK(partition);
+  auto it_and_whether_inserted = partitions_.insert(partition);
+  PA_DCHECK(it_and_whether_inserted.second);
+}
+
+void MemoryReclaimer::UnregisterPartition(PartitionRoot* partition) {
+  internal::ScopedGuard lock(lock_);
+  PA_DCHECK(partition);
+  size_t erased_count = partitions_.erase(partition);
+  PA_DCHECK(erased_count == 1u);
+}
+
+MemoryReclaimer::MemoryReclaimer() = default;
+MemoryReclaimer::~MemoryReclaimer() = default;
+
+void MemoryReclaimer::ReclaimAll() {
+  constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
+                         PurgeFlags::kDiscardUnusedSystemPages |
+                         PurgeFlags::kAggressiveReclaim;
+  Reclaim(kFlags);
+}
+
+void MemoryReclaimer::ReclaimNormal() {
+  constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
+                         PurgeFlags::kDiscardUnusedSystemPages;
+  Reclaim(kFlags);
+}
+
+void MemoryReclaimer::Reclaim(int flags) {
+  internal::ScopedGuard lock(
+      lock_);  // Has to protect from concurrent (Un)Register calls.
+
+  // PCScan quarantines freed slots. Trigger the scan first to let it call
+  // FreeNoHooksImmediate on slots that pass the quarantine.
+  //
+  // In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
+  // so that the slots are actually freed. (This is done synchronously only for
+  // the current thread.)
+  //
+  // Lastly decommit empty slot spans and lastly try to discard unused pages at
+  // the end of the remaining active slots.
+#if PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) && BUILDFLAG(USE_STARSCAN)
+  {
+    using PCScan = internal::PCScan;
+    const auto invocation_mode = flags & PurgeFlags::kAggressiveReclaim
+                                     ? PCScan::InvocationMode::kForcedBlocking
+                                     : PCScan::InvocationMode::kBlocking;
+    PCScan::PerformScanIfNeeded(invocation_mode);
+  }
+#endif  // PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) &&
+        // BUILDFLAG(USE_STARSCAN)
+
+#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
+  // Don't completely empty the thread cache outside of low memory situations,
+  // as there is periodic purge which makes sure that it doesn't take too much
+  // space.
+  if (flags & PurgeFlags::kAggressiveReclaim) {
+    ThreadCacheRegistry::Instance().PurgeAll();
+  }
+#endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
+
+  for (auto* partition : partitions_) {
+    partition->PurgeMemory(flags);
+  }
+}
+
+void MemoryReclaimer::ResetForTesting() {
+  internal::ScopedGuard lock(lock_);
+  partitions_.clear();
+}
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h b/base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h
new file mode 100644
index 0000000..82a2347
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h
@@ -0,0 +1,71 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_MEMORY_RECLAIMER_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_MEMORY_RECLAIMER_H_
+
+#include <memory>
+#include <set>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+
+namespace partition_alloc {
+
+// Posts and handles memory reclaim tasks for PartitionAlloc.
+//
+// PartitionAlloc users are responsible for scheduling and calling the
+// reclamation methods with their own timers / event loops.
+//
+// Singleton as this runs as long as the process is alive, and
+// having multiple instances would be wasteful.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MemoryReclaimer {
+ public:
+  static MemoryReclaimer* Instance();
+
+  MemoryReclaimer(const MemoryReclaimer&) = delete;
+  MemoryReclaimer& operator=(const MemoryReclaimer&) = delete;
+
+  // Internal. Do not use.
+  // Registers a partition to be tracked by the reclaimer.
+  void RegisterPartition(PartitionRoot* partition);
+  // Internal. Do not use.
+  // Unregisters a partition to be tracked by the reclaimer.
+  void UnregisterPartition(PartitionRoot* partition);
+
+  // Triggers an explicit reclaim now to reclaim as much free memory as
+  // possible. The API callers need to invoke this method periodically
+  // if they want to use memory reclaimer.
+  // See also GetRecommendedReclaimIntervalInMicroseconds()'s comment.
+  void ReclaimNormal();
+
+  // Returns a recommended interval to invoke ReclaimNormal.
+  int64_t GetRecommendedReclaimIntervalInMicroseconds() {
+    return internal::base::Seconds(4).InMicroseconds();
+  }
+
+  // Triggers an explicit reclaim now reclaiming all free memory
+  void ReclaimAll();
+
+ private:
+  MemoryReclaimer();
+  ~MemoryReclaimer();
+  // |flags| is an OR of base::PartitionPurgeFlags
+  void Reclaim(int flags);
+  void ResetForTesting();
+
+  internal::Lock lock_;
+  std::set<PartitionRoot*> partitions_ PA_GUARDED_BY(lock_);
+
+  friend class internal::base::NoDestructor<MemoryReclaimer>;
+  friend class MemoryReclaimerTest;
+};
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_MEMORY_RECLAIMER_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer_unittest.cc
new file mode 100644
index 0000000..38b51a7
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer_unittest.cc
@@ -0,0 +1,151 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
+    PA_CONFIG(THREAD_CACHE_SUPPORTED)
+#include "base/allocator/partition_allocator/src/partition_alloc/extended_api.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
+#endif
+
+// Otherwise, PartitionAlloc doesn't allocate any memory, and the tests are
+// meaningless.
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+namespace partition_alloc {
+
+namespace {
+
+void HandleOOM(size_t unused_size) {
+  PA_LOG(FATAL) << "Out of memory";
+}
+
+}  // namespace
+
+class MemoryReclaimerTest : public ::testing::Test {
+ public:
+  MemoryReclaimerTest() {
+    // Since MemoryReclaimer::ResetForTesting() clears partitions_,
+    // we need to make PartitionAllocator after this ResetForTesting().
+    // Otherwise, we will see no PartitionAllocator is registered.
+    MemoryReclaimer::Instance()->ResetForTesting();
+
+    allocator_ =
+        std::make_unique<PartitionAllocatorForTesting>(PartitionOptions{
+            .star_scan_quarantine = PartitionOptions::kAllowed,
+        });
+    allocator_->root()->UncapEmptySlotSpanMemoryForTesting();
+    PartitionAllocGlobalInit(HandleOOM);
+  }
+
+  ~MemoryReclaimerTest() override {
+    // Since MemoryReclaimer::UnregisterPartition() checks whether
+    // the given partition is managed by MemoryReclaimer, need to
+    // destruct |allocator_| before ResetForTesting().
+    allocator_ = nullptr;
+    PartitionAllocGlobalUninitForTesting();
+  }
+
+  void Reclaim() { MemoryReclaimer::Instance()->ReclaimNormal(); }
+
+  void AllocateAndFree() {
+    void* data = allocator_->root()->Alloc(1);
+    allocator_->root()->Free(data);
+  }
+
+  std::unique_ptr<PartitionAllocatorForTesting> allocator_;
+};
+
+TEST_F(MemoryReclaimerTest, FreesMemory) {
+  PartitionRoot* root = allocator_->root();
+
+  size_t committed_initially = root->get_total_size_of_committed_pages();
+  AllocateAndFree();
+  size_t committed_before = root->get_total_size_of_committed_pages();
+
+  EXPECT_GT(committed_before, committed_initially);
+
+  Reclaim();
+  size_t committed_after = root->get_total_size_of_committed_pages();
+  EXPECT_LT(committed_after, committed_before);
+  EXPECT_LE(committed_initially, committed_after);
+}
+
+TEST_F(MemoryReclaimerTest, Reclaim) {
+  PartitionRoot* root = allocator_->root();
+  size_t committed_initially = root->get_total_size_of_committed_pages();
+
+  {
+    AllocateAndFree();
+
+    size_t committed_before = root->get_total_size_of_committed_pages();
+    EXPECT_GT(committed_before, committed_initially);
+    MemoryReclaimer::Instance()->ReclaimAll();
+    size_t committed_after = root->get_total_size_of_committed_pages();
+
+    EXPECT_LT(committed_after, committed_before);
+    EXPECT_LE(committed_initially, committed_after);
+  }
+}
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
+    PA_CONFIG(THREAD_CACHE_SUPPORTED)
+
+namespace {
+// malloc() / free() pairs can be removed by the compiler, this is enough (for
+// now) to prevent that.
+PA_NOINLINE void FreeForTest(void* data) {
+  free(data);
+}
+}  // namespace
+
+TEST_F(MemoryReclaimerTest, DoNotAlwaysPurgeThreadCache) {
+  // Make sure the thread cache is enabled in the main partition.
+  internal::ThreadCacheProcessScopeForTesting scope(
+      allocator_shim::internal::PartitionAllocMalloc::Allocator());
+
+  for (size_t i = 0; i < ThreadCache::kDefaultSizeThreshold; i++) {
+    void* data = malloc(i);
+    FreeForTest(data);
+  }
+
+  auto* tcache = ThreadCache::Get();
+  ASSERT_TRUE(tcache);
+  size_t cached_size = tcache->CachedMemory();
+
+  Reclaim();
+
+  // No thread cache purging during periodic purge, but with ReclaimAll().
+  //
+  // Cannot assert on the exact size of the thread cache, since it can shrink
+  // when a buffer is overfull, and this may happen through other malloc()
+  // allocations in the test harness.
+  EXPECT_GT(tcache->CachedMemory(), cached_size / 2);
+
+  Reclaim();
+  EXPECT_GT(tcache->CachedMemory(), cached_size / 2);
+
+  MemoryReclaimer::Instance()->ReclaimAll();
+  EXPECT_LT(tcache->CachedMemory(), cached_size / 2);
+}
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
+        // PA_CONFIG(THREAD_CACHE_SUPPORTED)
+
+}  // namespace partition_alloc
+
+#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/oom.cc b/base/allocator/partition_allocator/src/partition_alloc/oom.cc
new file mode 100644
index 0000000..1d69568
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/oom.cc
@@ -0,0 +1,81 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/oom.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/oom_callback.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+
+#include <stdlib.h>
+
+#include <array>
+#endif  // BUILDFLAG(IS_WIN)
+
+namespace partition_alloc {
+
+size_t g_oom_size = 0U;
+
+namespace internal {
+
+// Crash server classifies base::internal::OnNoMemoryInternal as OOM.
+// TODO(crbug.com/1151236): Update to
+// partition_alloc::internal::base::internal::OnNoMemoryInternal
+PA_NOINLINE void OnNoMemoryInternal(size_t size) {
+  g_oom_size = size;
+#if BUILDFLAG(IS_WIN)
+  // Kill the process. This is important for security since most of code
+  // does not check the result of memory allocation.
+  // https://msdn.microsoft.com/en-us/library/het71c37.aspx
+  // Pass the size of the failed request in an exception argument.
+  ULONG_PTR exception_args[] = {size};
+  ::RaiseException(win::kOomExceptionCode, EXCEPTION_NONCONTINUABLE,
+                   std::size(exception_args), exception_args);
+
+  // Safety check, make sure process exits here.
+  _exit(win::kOomExceptionCode);
+#else
+  size_t tmp_size = size;
+  internal::base::debug::Alias(&tmp_size);
+
+  // Note: Don't add anything that may allocate here. Depending on the
+  // allocator, this may be called from within the allocator (e.g. with
+  // PartitionAlloc), and would deadlock as our locks are not recursive.
+  //
+  // Additionally, this is unlikely to work, since allocating from an OOM
+  // handler is likely to fail.
+  //
+  // Use PA_IMMEDIATE_CRASH() so that the top frame in the crash is our code,
+  // rather than using abort() or similar; this avoids the crash server needing
+  // to be able to successfully unwind through libc to get to the correct
+  // address, which is particularly an issue on Android.
+  PA_IMMEDIATE_CRASH();
+#endif  // BUILDFLAG(IS_WIN)
+}
+
+}  // namespace internal
+
+void TerminateBecauseOutOfMemory(size_t size) {
+  internal::OnNoMemoryInternal(size);
+}
+
+namespace internal {
+
+// The crash is generated in a PA_NOINLINE function so that we can classify the
+// crash as an OOM solely by analyzing the stack trace. It is tagged as
+// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
+[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void OnNoMemory(size_t size) {
+  RunPartitionAllocOomCallback();
+  TerminateBecauseOutOfMemory(size);
+  PA_IMMEDIATE_CRASH();
+}
+
+}  // namespace internal
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/oom.h b/base/allocator/partition_allocator/src/partition_alloc/oom.h
new file mode 100644
index 0000000..a6657f6
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/oom.h
@@ -0,0 +1,70 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_OOM_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_OOM_H_
+
+#include <cstddef>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/allocation_guard.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/windows_types.h"
+#endif
+
+namespace partition_alloc {
+
+// Terminates process. Should be called only for out of memory errors.
+// |size| is the size of the failed allocation, or 0 if not known.
+// Crash reporting classifies such crashes as OOM.
+// Must be allocation-safe.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void TerminateBecauseOutOfMemory(size_t size);
+
+// Records the size of the allocation that caused the current OOM crash, for
+// consumption by Breakpad.
+// TODO: this can be removed when Breakpad is no longer supported.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern size_t g_oom_size;
+
+#if BUILDFLAG(IS_WIN)
+namespace win {
+
+// Custom Windows exception code chosen to indicate an out of memory error.
+// See https://msdn.microsoft.com/en-us/library/het71c37.aspx.
+// "To make sure that you do not define a code that conflicts with an existing
+// exception code" ... "The resulting error code should therefore have the
+// highest four bits set to hexadecimal E."
+// 0xe0000008 was chosen arbitrarily, as 0x00000008 is ERROR_NOT_ENOUGH_MEMORY.
+const DWORD kOomExceptionCode = 0xe0000008;
+
+}  // namespace win
+#endif
+
+namespace internal {
+
+// The crash is generated in a PA_NOINLINE function so that we can classify the
+// crash as an OOM solely by analyzing the stack trace. It is tagged as
+// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
+[[noreturn]] PA_NOT_TAIL_CALLED PA_COMPONENT_EXPORT(
+    PARTITION_ALLOC) void OnNoMemory(size_t size);
+
+// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
+// exception on Windows to signal this is OOM and not a normal assert.
+// OOM_CRASH(size) is called by users of PageAllocator (including
+// PartitionAlloc) to signify an allocation failure from the platform.
+#define OOM_CRASH(size)                                     \
+  do {                                                      \
+    /* Raising an exception might allocate, allow that.  */ \
+    ::partition_alloc::ScopedAllowAllocations guard{};      \
+    ::partition_alloc::internal::OnNoMemory(size);          \
+  } while (0)
+
+}  // namespace internal
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_OOM_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/oom_callback.cc b/base/allocator/partition_allocator/src/partition_alloc/oom_callback.cc
new file mode 100644
index 0000000..3528489
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/oom_callback.cc
@@ -0,0 +1,28 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/oom_callback.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+
+namespace partition_alloc {
+
+namespace {
+PartitionAllocOomCallback g_oom_callback;
+}  // namespace
+
+void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
+  PA_DCHECK(!g_oom_callback);
+  g_oom_callback = callback;
+}
+
+namespace internal {
+void RunPartitionAllocOomCallback() {
+  if (g_oom_callback) {
+    g_oom_callback();
+  }
+}
+}  // namespace internal
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/oom_callback.h b/base/allocator/partition_allocator/src/partition_alloc/oom_callback.h
new file mode 100644
index 0000000..5f816f4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/oom_callback.h
@@ -0,0 +1,26 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_OOM_CALLBACK_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_OOM_CALLBACK_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc {
+
+using PartitionAllocOomCallback = void (*)();
+
+// Registers a callback to be invoked during an OOM_CRASH(). OOM_CRASH is
+// invoked by users of PageAllocator (including PartitionAlloc) to signify an
+// allocation failure from the platform.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback);
+
+namespace internal {
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void RunPartitionAllocOomCallback();
+}  // namespace internal
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_OOM_CALLBACK_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/page_allocator.cc b/base/allocator/partition_allocator/src/partition_alloc/page_allocator.cc
new file mode 100644
index 0000000..d8134e6
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/page_allocator.cc
@@ -0,0 +1,418 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+
+#include <atomic>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_internal.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#endif
+
+#if BUILDFLAG(IS_WIN)
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_win.h"
+#elif BUILDFLAG(IS_POSIX)
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_posix.h"
+#elif BUILDFLAG(IS_FUCHSIA)
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_fuchsia.h"
+#else
+#error Platform not supported.
+#endif
+
+namespace partition_alloc {
+
+namespace {
+
+internal::Lock g_reserve_lock;
+
+// We may reserve/release address space on different threads.
+internal::Lock& GetReserveLock() {
+  return g_reserve_lock;
+}
+
+std::atomic<size_t> g_total_mapped_address_space;
+
+// We only support a single block of reserved address space.
+uintptr_t s_reservation_address PA_GUARDED_BY(GetReserveLock()) = 0;
+size_t s_reservation_size PA_GUARDED_BY(GetReserveLock()) = 0;
+
+uintptr_t AllocPagesIncludingReserved(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility,
+    PageTag page_tag,
+    int file_descriptor_for_shared_alloc = -1) {
+  uintptr_t ret =
+      internal::SystemAllocPages(address, length, accessibility, page_tag,
+                                 file_descriptor_for_shared_alloc);
+  if (!ret) {
+    const bool cant_alloc_length = internal::kHintIsAdvisory || !address;
+    if (cant_alloc_length) {
+      // The system cannot allocate |length| bytes. Release any reserved address
+      // space and try once more.
+      ReleaseReservation();
+      ret = internal::SystemAllocPages(address, length, accessibility, page_tag,
+                                       file_descriptor_for_shared_alloc);
+    }
+  }
+  return ret;
+}
+
+// Trims memory at |base_address| to given |trim_length| and |alignment|.
+//
+// On failure, on Windows, this function returns 0 and frees memory at
+// |base_address|.
+uintptr_t TrimMapping(uintptr_t base_address,
+                      size_t base_length,
+                      size_t trim_length,
+                      uintptr_t alignment,
+                      uintptr_t alignment_offset,
+                      PageAccessibilityConfiguration accessibility) {
+  PA_DCHECK(base_length >= trim_length);
+  PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
+  PA_DCHECK(alignment_offset < alignment);
+  uintptr_t new_base =
+      NextAlignedWithOffset(base_address, alignment, alignment_offset);
+  PA_DCHECK(new_base >= base_address);
+  size_t pre_slack = new_base - base_address;
+  size_t post_slack = base_length - pre_slack - trim_length;
+  PA_DCHECK(base_length == trim_length || pre_slack || post_slack);
+  PA_DCHECK(pre_slack < base_length);
+  PA_DCHECK(post_slack < base_length);
+  return internal::TrimMappingInternal(base_address, base_length, trim_length,
+                                       accessibility, pre_slack, post_slack);
+}
+
+}  // namespace
+
+// Align |address| up to the closest, non-smaller address, that gives
+// |requested_offset| remainder modulo |alignment|.
+//
+// Examples for alignment=1024 and requested_offset=64:
+//   64 -> 64
+//   65 -> 1088
+//   1024 -> 1088
+//   1088 -> 1088
+//   1089 -> 2112
+//   2048 -> 2112
+uintptr_t NextAlignedWithOffset(uintptr_t address,
+                                uintptr_t alignment,
+                                uintptr_t requested_offset) {
+  PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
+  PA_DCHECK(requested_offset < alignment);
+
+  uintptr_t actual_offset = address & (alignment - 1);
+  uintptr_t new_address;
+  if (actual_offset <= requested_offset) {
+    new_address = address + requested_offset - actual_offset;
+  } else {
+    new_address = address + alignment + requested_offset - actual_offset;
+  }
+  PA_DCHECK(new_address >= address);
+  PA_DCHECK(new_address - address < alignment);
+  PA_DCHECK(new_address % alignment == requested_offset);
+
+  return new_address;
+}
+
+namespace internal {
+
+uintptr_t SystemAllocPages(uintptr_t hint,
+                           size_t length,
+                           PageAccessibilityConfiguration accessibility,
+                           PageTag page_tag,
+                           int file_descriptor_for_shared_alloc) {
+  PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
+  PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
+  uintptr_t ret = internal::SystemAllocPagesInternal(
+      hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc);
+  if (ret) {
+    g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
+  }
+
+  return ret;
+}
+
+}  // namespace internal
+
+uintptr_t AllocPages(size_t length,
+                     size_t align,
+                     PageAccessibilityConfiguration accessibility,
+                     PageTag page_tag,
+                     int file_descriptor_for_shared_alloc) {
+  return AllocPagesWithAlignOffset(0, length, align, 0, accessibility, page_tag,
+                                   file_descriptor_for_shared_alloc);
+}
+uintptr_t AllocPages(uintptr_t address,
+                     size_t length,
+                     size_t align,
+                     PageAccessibilityConfiguration accessibility,
+                     PageTag page_tag) {
+  return AllocPagesWithAlignOffset(address, length, align, 0, accessibility,
+                                   page_tag);
+}
+void* AllocPages(void* address,
+                 size_t length,
+                 size_t align,
+                 PageAccessibilityConfiguration accessibility,
+                 PageTag page_tag) {
+  return reinterpret_cast<void*>(
+      AllocPages(reinterpret_cast<uintptr_t>(address), length, align,
+                 accessibility, page_tag));
+}
+
+uintptr_t AllocPagesWithAlignOffset(
+    uintptr_t address,
+    size_t length,
+    size_t align,
+    size_t align_offset,
+    PageAccessibilityConfiguration accessibility,
+    PageTag page_tag,
+    int file_descriptor_for_shared_alloc) {
+  PA_DCHECK(length >= internal::PageAllocationGranularity());
+  PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
+  PA_DCHECK(align >= internal::PageAllocationGranularity());
+  // Alignment must be power of 2 for masking math to work.
+  PA_DCHECK(internal::base::bits::IsPowerOfTwo(align));
+  PA_DCHECK(align_offset < align);
+  PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
+  PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
+  uintptr_t align_offset_mask = align - 1;
+  uintptr_t align_base_mask = ~align_offset_mask;
+  PA_DCHECK(!address || (address & align_offset_mask) == align_offset);
+
+  // If the client passed null as the address, choose a good one.
+  if (!address) {
+    address = (GetRandomPageBase() & align_base_mask) + align_offset;
+  }
+
+  // First try to force an exact-size, aligned allocation from our random base.
+#if defined(ARCH_CPU_32_BITS)
+  // On 32 bit systems, first try one random aligned address, and then try an
+  // aligned address derived from the value of |ret|.
+  constexpr int kExactSizeTries = 2;
+#else
+  // On 64 bit systems, try 3 random aligned addresses.
+  constexpr int kExactSizeTries = 3;
+#endif
+
+  for (int i = 0; i < kExactSizeTries; ++i) {
+    uintptr_t ret =
+        AllocPagesIncludingReserved(address, length, accessibility, page_tag,
+                                    file_descriptor_for_shared_alloc);
+    if (ret) {
+      // If the alignment is to our liking, we're done.
+      if ((ret & align_offset_mask) == align_offset) {
+        return ret;
+      }
+      // Free the memory and try again.
+      FreePages(ret, length);
+    } else {
+      // |ret| is null; if this try was unhinted, we're OOM.
+      if (internal::kHintIsAdvisory || !address) {
+        return 0;
+      }
+    }
+
+#if defined(ARCH_CPU_32_BITS)
+    // For small address spaces, try the first aligned address >= |ret|. Note
+    // |ret| may be null, in which case |address| becomes null. If
+    // |align_offset| is non-zero, this calculation may get us not the first,
+    // but the next matching address.
+    address = ((ret + align_offset_mask) & align_base_mask) + align_offset;
+#else  // defined(ARCH_CPU_64_BITS)
+    // Keep trying random addresses on systems that have a large address space.
+    address = NextAlignedWithOffset(GetRandomPageBase(), align, align_offset);
+#endif
+  }
+
+  // Make a larger allocation so we can force alignment.
+  size_t try_length = length + (align - internal::PageAllocationGranularity());
+  PA_CHECK(try_length >= length);
+  uintptr_t ret;
+
+  do {
+    // Continue randomizing only on POSIX.
+    address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0;
+    ret =
+        AllocPagesIncludingReserved(address, try_length, accessibility,
+                                    page_tag, file_descriptor_for_shared_alloc);
+    // The retries are for Windows, where a race can steal our mapping on
+    // resize.
+  } while (ret && (ret = TrimMapping(ret, try_length, length, align,
+                                     align_offset, accessibility)) == 0);
+
+  return ret;
+}
+
+void FreePages(uintptr_t address, size_t length) {
+  PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
+  PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
+  internal::FreePagesInternal(address, length);
+  PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0);
+  g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed);
+}
+void FreePages(void* address, size_t length) {
+  FreePages(reinterpret_cast<uintptr_t>(address), length);
+}
+
+bool TrySetSystemPagesAccess(uintptr_t address,
+                             size_t length,
+                             PageAccessibilityConfiguration accessibility) {
+  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
+  return internal::TrySetSystemPagesAccessInternal(address, length,
+                                                   accessibility);
+}
+bool TrySetSystemPagesAccess(void* address,
+                             size_t length,
+                             PageAccessibilityConfiguration accessibility) {
+  return TrySetSystemPagesAccess(reinterpret_cast<uintptr_t>(address), length,
+                                 accessibility);
+}
+
+void SetSystemPagesAccess(uintptr_t address,
+                          size_t length,
+                          PageAccessibilityConfiguration accessibility) {
+  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
+  internal::SetSystemPagesAccessInternal(address, length, accessibility);
+}
+
+void DecommitSystemPages(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityDisposition accessibility_disposition) {
+  PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
+  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
+  internal::DecommitSystemPagesInternal(address, length,
+                                        accessibility_disposition);
+}
+void DecommitSystemPages(
+    void* address,
+    size_t length,
+    PageAccessibilityDisposition accessibility_disposition) {
+  DecommitSystemPages(reinterpret_cast<uintptr_t>(address), length,
+                      accessibility_disposition);
+}
+
+void DecommitAndZeroSystemPages(uintptr_t address,
+                                size_t length,
+                                PageTag page_tag) {
+  PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
+  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
+  internal::DecommitAndZeroSystemPagesInternal(address, length, page_tag);
+}
+
+void DecommitAndZeroSystemPages(void* address,
+                                size_t length,
+                                PageTag page_tag) {
+  DecommitAndZeroSystemPages(reinterpret_cast<uintptr_t>(address), length,
+                             page_tag);
+}
+
+void RecommitSystemPages(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility,
+    PageAccessibilityDisposition accessibility_disposition) {
+  PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
+  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
+  PA_DCHECK(accessibility.permissions !=
+            PageAccessibilityConfiguration::kInaccessible);
+  internal::RecommitSystemPagesInternal(address, length, accessibility,
+                                        accessibility_disposition);
+}
+
+bool TryRecommitSystemPages(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility,
+    PageAccessibilityDisposition accessibility_disposition) {
+  // Duplicated because we want errors to be reported at a lower level in the
+  // crashing case.
+  PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
+  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
+  PA_DCHECK(accessibility.permissions !=
+            PageAccessibilityConfiguration::kInaccessible);
+  return internal::TryRecommitSystemPagesInternal(
+      address, length, accessibility, accessibility_disposition);
+}
+
+void DiscardSystemPages(uintptr_t address, size_t length) {
+  PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
+  internal::DiscardSystemPagesInternal(address, length);
+}
+void DiscardSystemPages(void* address, size_t length) {
+  DiscardSystemPages(reinterpret_cast<uintptr_t>(address), length);
+}
+
+bool ReserveAddressSpace(size_t size) {
+  // To avoid deadlock, call only SystemAllocPages.
+  internal::ScopedGuard guard(GetReserveLock());
+  if (!s_reservation_address) {
+    uintptr_t mem = internal::SystemAllocPages(
+        0, size,
+        PageAccessibilityConfiguration(
+            PageAccessibilityConfiguration::kInaccessible),
+        PageTag::kChromium);
+    if (mem) {
+      // We guarantee this alignment when reserving address space.
+      PA_DCHECK(!(mem & internal::PageAllocationGranularityOffsetMask()));
+      s_reservation_address = mem;
+      s_reservation_size = size;
+      return true;
+    }
+  }
+  return false;
+}
+
+bool ReleaseReservation() {
+  // To avoid deadlock, call only FreePages.
+  internal::ScopedGuard guard(GetReserveLock());
+  if (!s_reservation_address) {
+    return false;
+  }
+
+  FreePages(s_reservation_address, s_reservation_size);
+  s_reservation_address = 0;
+  s_reservation_size = 0;
+  return true;
+}
+
+bool HasReservationForTesting() {
+  internal::ScopedGuard guard(GetReserveLock());
+  return s_reservation_address;
+}
+
+uint32_t GetAllocPageErrorCode() {
+  return internal::s_allocPageErrorCode;
+}
+
+size_t GetTotalMappedSize() {
+  return g_total_mapped_address_space;
+}
+
+#if BUILDFLAG(IS_WIN)
+namespace {
+bool g_retry_on_commit_failure = false;
+}
+
+void SetRetryOnCommitFailure(bool retry_on_commit_failure) {
+  g_retry_on_commit_failure = retry_on_commit_failure;
+}
+
+bool GetRetryOnCommitFailure() {
+  return g_retry_on_commit_failure;
+}
+#endif
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/page_allocator.h b/base/allocator/partition_allocator/src/partition_alloc/page_allocator.h
new file mode 100644
index 0000000..17569be
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/page_allocator.h
@@ -0,0 +1,393 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+#include "build/build_config.h"
+
+namespace partition_alloc {
+
+struct PageAccessibilityConfiguration {
+  enum Permissions {
+    kInaccessible,
+    // This flag is valid only with AllocPages(), where in creates kInaccessible
+    // pages that may later be re-mapped as executable, on platforms which
+    // distinguish never-executable and maybe-executable pages.
+    kInaccessibleWillJitLater,
+    kRead,
+    kReadWrite,
+    // This flag is mapped to kReadWrite on systems that
+    // don't support MTE.
+    kReadWriteTagged,
+    // This flag is mapped to kReadExecute on systems
+    // that don't support Arm's BTI.
+    kReadExecuteProtected,
+    kReadExecute,
+    // This flag is deprecated and will go away soon.
+    // TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
+    kReadWriteExecute,
+  };
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  constexpr explicit PageAccessibilityConfiguration(Permissions permissions)
+      : permissions(permissions) {}
+  constexpr PageAccessibilityConfiguration(
+      Permissions permissions,
+      ThreadIsolationOption thread_isolation)
+      : permissions(permissions), thread_isolation(thread_isolation) {}
+#else
+  constexpr explicit PageAccessibilityConfiguration(Permissions permissions)
+      : permissions(permissions) {}
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+  Permissions permissions;
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  // Tag the page with a Memory Protection Key. Use 0 for none.
+  ThreadIsolationOption thread_isolation;
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+};
+
+// Use for De/RecommitSystemPages API.
+enum class PageAccessibilityDisposition {
+  // Enforces permission update (Decommit will set to
+  // PageAccessibilityConfiguration::kInaccessible;
+  // Recommit will set to whatever was requested, other than
+  // PageAccessibilityConfiguration::kInaccessible).
+  kRequireUpdate,
+  // Will not update permissions, if the platform supports that (POSIX & Fuchsia
+  // only).
+  kAllowKeepForPerf,
+};
+
+// Some platforms (including macOS and some Linux-based ones) support tagged
+// memory regions, to help in debugging. On Android, these tags are used to name
+// anonymous mappings.
+//
+// kChromium is the default value, used to distinguish general
+// Chromium-originated allocations from other ones (e.g. from platform
+// libraries).
+enum class PageTag {
+  kSimulation = 251,      // Memory simulator tool.
+  kBlinkGC = 252,         // Blink GC pages.
+  kPartitionAlloc = 253,  // PartitionAlloc, no matter the partition.
+  kChromium = 254,        // Chromium page.
+  kV8 = 255,              // V8 heap pages.
+
+  kFirst = kSimulation,  // Minimum tag value.
+  kLast = kV8            // Maximum tag value.
+};
+
+// See
+// https://github.com/apple-oss-distributions/xnu/blob/5c2921b07a2480ab43ec66f5b9e41cb872bc554f/osfmk/mach/vm_statistics.h#L687
+static_assert(static_cast<int>(PageTag::kLast) >= 240,
+              "The first application-reserved tag on macOS is 240, see "
+              "vm_statistics.h in XNU.");
+static_assert(
+    static_cast<int>(PageTag::kLast) < 256,
+    "Tags are only 1 byte long on macOS, see vm_statistics.h in XNU.");
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+uintptr_t NextAlignedWithOffset(uintptr_t ptr,
+                                uintptr_t alignment,
+                                uintptr_t requested_offset);
+
+// Allocates one or more pages.
+//
+// The requested |address| is just a hint; the actual address returned may
+// differ. The returned address will be aligned to |align_offset| modulo |align|
+// bytes.
+//
+// |length|, |align| and |align_offset| are in bytes, and must be a multiple of
+// |PageAllocationGranularity()|. |length| and |align| must be non-zero.
+// |align_offset| must be less than |align|. |align| must be a power of two.
+//
+// If |address| is 0/nullptr, then a suitable and randomized address will be
+// chosen automatically.
+//
+// |accessibility| controls the permission of the allocated pages.
+// PageAccessibilityConfiguration::kInaccessible means uncommitted.
+//
+// |page_tag| is used on some platforms to identify the source of the
+// allocation.
+//
+// |file_descriptor_for_shared_alloc| is only used in mapping the shadow
+// pools to the same physical address as the real one in
+// PartitionAddressSpace::Init(). It should be ignored in other cases.
+//
+// This call will return 0/nullptr if the allocation cannot be satisfied.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+uintptr_t AllocPages(size_t length,
+                     size_t align,
+                     PageAccessibilityConfiguration accessibility,
+                     PageTag page_tag = PageTag::kChromium,
+                     int file_descriptor_for_shared_alloc = -1);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+uintptr_t AllocPages(uintptr_t address,
+                     size_t length,
+                     size_t align,
+                     PageAccessibilityConfiguration accessibility,
+                     PageTag page_tag = PageTag::kChromium);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* AllocPages(void* address,
+                 size_t length,
+                 size_t align,
+                 PageAccessibilityConfiguration accessibility,
+                 PageTag page_tag = PageTag::kChromium);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+uintptr_t AllocPagesWithAlignOffset(
+    uintptr_t address,
+    size_t length,
+    size_t align,
+    size_t align_offset,
+    PageAccessibilityConfiguration page_accessibility,
+    PageTag page_tag = PageTag::kChromium,
+    int file_descriptor_for_shared_alloc = -1);
+
+// Frees one or more pages starting at |address| and continuing for |length|
+// bytes.
+//
+// |address| and |length| must match a previous call to |AllocPages|. Therefore,
+// |address| must be aligned to |PageAllocationGranularity()| bytes, and
+// |length| must be a multiple of |PageAllocationGranularity()|.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void FreePages(uintptr_t address, size_t length);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void FreePages(void* address, size_t length);
+
+// Marks one or more system pages, starting at |address| with the given
+// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
+// bytes.
+//
+// Returns true if the permission change succeeded. In most cases you must
+// |CHECK| the result.
+[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration page_accessibility);
+[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
+    void* address,
+    size_t length,
+    PageAccessibilityConfiguration page_accessibility);
+
+// Marks one or more system pages, starting at |address| with the given
+// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
+// bytes.
+//
+// Performs a CHECK that the operation succeeds.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void SetSystemPagesAccess(uintptr_t address,
+                          size_t length,
+                          PageAccessibilityConfiguration page_accessibility);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void SetSystemPagesAccess(void* address,
+                          size_t length,
+                          PageAccessibilityConfiguration page_accessibility);
+
+// Decommits one or more system pages starting at |address| and continuing for
+// |length| bytes. |address| and |length| must be aligned to a system page
+// boundary.
+//
+// This API will crash if the operation cannot be performed!
+//
+// If disposition is PageAccessibilityDisposition::kRequireUpdate (recommended),
+// the decommitted pages will be made inaccessible before the call returns.
+// While it is always a programming error to access decommitted pages without
+// first recommitting them, callers may use
+// PageAccessibilityDisposition::kAllowKeepForPerf to allow the implementation
+// to skip changing permissions (use with care), for performance reasons (see
+// crrev.com/c/2567282 and crrev.com/c/2563038 for perf regressions encountered
+// in the past). Implementations may choose to always modify permissions, hence
+// accessing those pages may or may not trigger a fault.
+//
+// Decommitting means that physical resources (RAM or swap/pagefile) backing the
+// allocated virtual address range may be released back to the system, but the
+// address space is still allocated to the process (possibly using up page table
+// entries or other accounting resources). There is no guarantee that the pages
+// are zeroed, unless |DecommittedMemoryIsAlwaysZeroed()| is true.
+//
+// This operation may not be atomic on some platforms.
+//
+// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures
+// processes will not fault when touching a committed memory region. There is
+// no analogue in the POSIX & Fuchsia memory API where virtual memory pages are
+// best-effort allocated resources on the first touch. If
+// PageAccessibilityDisposition::kRequireUpdate disposition is used, this API
+// behaves in a platform-agnostic way by simulating the Windows "decommit" state
+// by both discarding the region (allowing the OS to avoid swap operations)
+// *and* changing the page protections so accesses fault.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void DecommitSystemPages(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityDisposition accessibility_disposition);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void DecommitSystemPages(
+    void* address,
+    size_t length,
+    PageAccessibilityDisposition accessibility_disposition);
+
+// Decommits one or more system pages starting at |address| and continuing for
+// |length| bytes. |address| and |length| must be aligned to a system page
+// boundary.
+//
+// In contrast to |DecommitSystemPages|, this API guarantees that the pages are
+// zeroed and will always mark the region as inaccessible (the equivalent of
+// setting them to PageAccessibilityConfiguration::kInaccessible).
+//
+// This API will crash if the operation cannot be performed.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void DecommitAndZeroSystemPages(uintptr_t address,
+                                size_t length,
+                                PageTag page_tag = PageTag::kChromium);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void DecommitAndZeroSystemPages(void* address,
+                                size_t length,
+                                PageTag page_tag = PageTag::kChromium);
+
+// Whether decommitted memory is guaranteed to be zeroed when it is
+// recommitted. Do not assume that this will not change over time.
+constexpr PA_COMPONENT_EXPORT(
+    PARTITION_ALLOC) bool DecommittedMemoryIsAlwaysZeroed() {
+#if BUILDFLAG(IS_APPLE)
+  return false;
+#else
+  return true;
+#endif
+}
+
+// (Re)Commits one or more system pages, starting at |address| and continuing
+// for |length| bytes with the given |page_accessibility| (must not be
+// PageAccessibilityConfiguration::kInaccessible). |address| and |length|
+// must be aligned to a system page boundary.
+//
+// This API will crash if the operation cannot be performed!
+//
+// If disposition is PageAccessibilityConfiguration::kRequireUpdate, the calls
+// updates the pages to |page_accessibility|. This can be used regardless of
+// what disposition was used to decommit the pages.
+// PageAccessibilityConfiguration::kAllowKeepForPerf allows the implementation
+// to leave the page permissions, if that improves performance. This option can
+// only be used if the pages were previously accessible and decommitted with
+// that same option.
+//
+// The memory will be zeroed when it is committed for the first time. However,
+// there is no such guarantee when memory is recommitted, unless
+// |DecommittedMemoryIsAlwaysZeroed()| is true.
+//
+// This operation may not be atomic on some platforms.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void RecommitSystemPages(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration page_accessibility,
+    PageAccessibilityDisposition accessibility_disposition);
+
+// Like RecommitSystemPages(), but returns false instead of crashing.
+[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TryRecommitSystemPages(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration page_accessibility,
+    PageAccessibilityDisposition accessibility_disposition);
+
+// Discard one or more system pages starting at |address| and continuing for
+// |length| bytes. |length| must be a multiple of |SystemPageSize()|.
+//
+// Discarding is a hint to the system that the page is no longer required. The
+// hint may:
+//   - Do nothing.
+//   - Discard the page immediately, freeing up physical pages.
+//   - Discard the page at some time in the future in response to memory
+//   pressure.
+//
+// Only committed pages should be discarded. Discarding a page does not decommit
+// it, and it is valid to discard an already-discarded page. A read or write to
+// a discarded page will not fault.
+//
+// Reading from a discarded page may return the original page content, or a page
+// full of zeroes.
+//
+// Writing to a discarded page is the only guaranteed way to tell the system
+// that the page is required again. Once written to, the content of the page is
+// guaranteed stable once more. After being written to, the page content may be
+// based on the original page content, or a page of zeroes.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void DiscardSystemPages(uintptr_t address, size_t length);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void DiscardSystemPages(void* address, size_t length);
+
+// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
+// 0 for an |address| of 0.
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+RoundUpToSystemPage(uintptr_t address) {
+  return (address + internal::SystemPageOffsetMask()) &
+         internal::SystemPageBaseMask();
+}
+
+// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
+// 0 for an |address| of 0.
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+RoundDownToSystemPage(uintptr_t address) {
+  return address & internal::SystemPageBaseMask();
+}
+
+// Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
+// Returns 0 for an |address| of 0.
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+RoundUpToPageAllocationGranularity(uintptr_t address) {
+  return (address + internal::PageAllocationGranularityOffsetMask()) &
+         internal::PageAllocationGranularityBaseMask();
+}
+
+// Rounds down |address| to the previous multiple of
+// |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
+RoundDownToPageAllocationGranularity(uintptr_t address) {
+  return address & internal::PageAllocationGranularityBaseMask();
+}
+
+// Reserves (at least) |size| bytes of address space, aligned to
+// |PageAllocationGranularity()|. This can be called early on to make it more
+// likely that large allocations will succeed. Returns true if the reservation
+// succeeded, false if the reservation failed or a reservation was already made.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ReserveAddressSpace(size_t size);
+
+// Releases any reserved address space. |AllocPages| calls this automatically on
+// an allocation failure. External allocators may also call this on failure.
+//
+// Returns true when an existing reservation was released.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ReleaseReservation();
+
+// Returns true if there is currently an address space reservation.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool HasReservationForTesting();
+
+// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
+// (POSIX) or |VirtualAlloc| (Windows) fails.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) uint32_t GetAllocPageErrorCode();
+
+// Returns the total amount of mapped pages from all clients of
+// PageAllocator. These pages may or may not be committed. This is mostly useful
+// to assess address space pressure.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) size_t GetTotalMappedSize();
+
+#if BUILDFLAG(IS_WIN)
+// Sets whether to retry the allocation of pages when a commit failure
+// happens. This doesn't cover cases where the system is out of address space,
+// or reaches another limit.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void SetRetryOnCommitFailure(bool retry_on_commit_failure);
+bool GetRetryOnCommitFailure();
+#endif  // BUILDFLAG(IS_WIN)
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h
new file mode 100644
index 0000000..cb6116c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h
@@ -0,0 +1,180 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_CONSTANTS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_CONSTANTS_H_
+
+#include <stddef.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
+
+#include <mach/vm_page_size.h>
+
+// Although page allocator constants are not constexpr, they are run-time
+// constant. Because the underlying variables they access, such as vm_page_size,
+// are not marked const, the compiler normally has no way to know that they
+// don’t change and must obtain their values whenever it can't prove that they
+// haven't been modified, even if they had already been obtained previously.
+// Attaching __attribute__((const)) to these declarations allows these redundant
+// accesses to be omitted under optimization such as common subexpression
+// elimination.
+#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
+
+#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
+// This should work for all POSIX (if needed), but currently all other
+// supported OS/architecture combinations use either hard-coded values
+// (such as x86) or have means to determine these values without needing
+// atomics (such as macOS on arm64).
+
+// Page allocator constants are run-time constant
+#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
+
+#include <unistd.h>
+#include <atomic>
+
+namespace partition_alloc::internal {
+
+// Holds the current page size and shift, where size = 1 << shift
+// Use PageAllocationGranularity(), PageAllocationGranularityShift()
+// to initialize and retrieve these values safely.
+struct PageCharacteristics {
+  std::atomic<size_t> size;
+  std::atomic<size_t> shift;
+};
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+extern PageCharacteristics page_characteristics;
+
+}  // namespace partition_alloc::internal
+
+#else
+
+// When defined, page size constants are fixed at compile time. When not
+// defined, they may vary at run time.
+#define PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR 1
+
+// Use this macro to declare a function as constexpr or not based on whether
+// PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR is defined.
+#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR constexpr
+
+#endif
+
+// Ability to name anonymous VMAs is available on some, but not all Linux-based
+// systems.
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
+#include <sys/prctl.h>
+
+#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
+#define LINUX_NAME_REGION 1
+#endif
+
+#endif  // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
+
+namespace partition_alloc::internal {
+
+// Forward declaration, implementation below
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PageAllocationGranularity();
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PageAllocationGranularityShift() {
+#if BUILDFLAG(IS_WIN) || defined(ARCH_CPU_PPC64)
+  // Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
+  // sizes.  Since 64kB is the de facto standard on the platform and binaries
+  // compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
+  // here.
+  return 16;  // 64kB
+#elif defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONGARCH64)
+  return 14;  // 16kB
+#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
+  return static_cast<size_t>(vm_page_shift);
+#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
+  // arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
+  // page sizes. Retrieve from or initialize cache.
+  size_t shift = page_characteristics.shift.load(std::memory_order_relaxed);
+  if (PA_UNLIKELY(shift == 0)) {
+    shift = static_cast<size_t>(
+        __builtin_ctz((unsigned int)PageAllocationGranularity()));
+    page_characteristics.shift.store(shift, std::memory_order_relaxed);
+  }
+  return shift;
+#else
+  return 12;  // 4kB
+#endif
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PageAllocationGranularity() {
+#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
+  // This is literally equivalent to |1 << PageAllocationGranularityShift()|
+  // below, but was separated out for IS_APPLE to avoid << on a non-constexpr.
+  return vm_page_size;
+#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
+  // arm64 supports 4kb, 16kb, and 64kb page sizes. Retrieve from or
+  // initialize cache.
+  size_t size = page_characteristics.size.load(std::memory_order_relaxed);
+  if (PA_UNLIKELY(size == 0)) {
+    size = static_cast<size_t>(getpagesize());
+    page_characteristics.size.store(size, std::memory_order_relaxed);
+  }
+  return size;
+#else
+  return 1 << PageAllocationGranularityShift();
+#endif
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PageAllocationGranularityOffsetMask() {
+  return PageAllocationGranularity() - 1;
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PageAllocationGranularityBaseMask() {
+  return ~PageAllocationGranularityOffsetMask();
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+SystemPageShift() {
+  // On Windows allocation granularity is higher than the page size. This comes
+  // into play when reserving address space range (allocation granularity),
+  // compared to committing pages into memory (system page granularity).
+#if BUILDFLAG(IS_WIN)
+  return 12;  // 4096=1<<12
+#else
+  return PageAllocationGranularityShift();
+#endif
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+SystemPageSize() {
+#if (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
+    (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
+  // This is literally equivalent to |1 << SystemPageShift()| below, but was
+  // separated out for 64-bit IS_APPLE and arm64 on Linux to avoid << on a
+  // non-constexpr.
+  return PageAllocationGranularity();
+#else
+  return 1 << SystemPageShift();
+#endif
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+SystemPageOffsetMask() {
+  return SystemPageSize() - 1;
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+SystemPageBaseMask() {
+  return ~SystemPageOffsetMask();
+}
+
+constexpr size_t kPageMetadataShift = 5;  // 32 bytes per partition page.
+constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internal.h b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internal.h
new file mode 100644
index 0000000..0a2b13c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internal.h
@@ -0,0 +1,23 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNAL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNAL_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+
+namespace partition_alloc::internal {
+
+uintptr_t SystemAllocPages(uintptr_t hint,
+                           size_t length,
+                           PageAccessibilityConfiguration accessibility,
+                           PageTag page_tag,
+                           int file_descriptor_for_shared_alloc = -1);
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNAL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_fuchsia.h b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_fuchsia.h
new file mode 100644
index 0000000..256d783
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_fuchsia.h
@@ -0,0 +1,267 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file implements memory allocation primitives for PageAllocator using
+// Fuchsia's VMOs (Virtual Memory Objects). VMO API is documented in
+// https://fuchsia.dev/fuchsia-src/zircon/objects/vm_object . A VMO is a kernel
+// object that corresponds to a set of memory pages. VMO pages may be mapped
+// to an address space. The code below creates VMOs for each memory allocations
+// and maps them to the default address space of the current process.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
+
+#include <fidl/fuchsia.kernel/cpp/fidl.h>
+#include <lib/component/incoming/cpp/protocol.h>
+#include <lib/zx/resource.h>
+#include <lib/zx/vmar.h>
+#include <lib/zx/vmo.h>
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+
+namespace partition_alloc::internal {
+
+namespace {
+
+zx::resource GetVmexResource() {
+  auto vmex_resource_client =
+      component::Connect<fuchsia_kernel::VmexResource>();
+  if (vmex_resource_client.is_error()) {
+    PA_LOG(ERROR) << "Connect(VmexResource):"
+                  << vmex_resource_client.status_string();
+    return {};
+  }
+
+  fidl::SyncClient sync_vmex_resource_client(
+      std::move(vmex_resource_client.value()));
+  auto result = sync_vmex_resource_client->Get();
+  if (result.is_error()) {
+    PA_LOG(ERROR) << "VmexResource.Get():"
+                  << result.error_value().FormatDescription().c_str();
+    return {};
+  }
+
+  return std::move(result->resource());
+}
+
+const zx::resource& VmexResource() {
+  static base::NoDestructor<zx::resource> vmex_resource(GetVmexResource());
+  return *vmex_resource;
+}
+
+// Returns VMO name for a PageTag.
+const char* PageTagToName(PageTag tag) {
+  switch (tag) {
+    case PageTag::kBlinkGC:
+      return "cr_blink_gc";
+    case PageTag::kPartitionAlloc:
+      return "cr_partition_alloc";
+    case PageTag::kChromium:
+      return "cr_chromium";
+    case PageTag::kV8:
+      return "cr_v8";
+    case PageTag::kSimulation:
+      PA_NOTREACHED();
+  }
+  PA_NOTREACHED();
+}
+
+zx_vm_option_t PageAccessibilityToZxVmOptions(
+    PageAccessibilityConfiguration accessibility) {
+  switch (accessibility.permissions) {
+    case PageAccessibilityConfiguration::kRead:
+      return ZX_VM_PERM_READ;
+    case PageAccessibilityConfiguration::kReadWrite:
+    case PageAccessibilityConfiguration::kReadWriteTagged:
+      return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
+    case PageAccessibilityConfiguration::kReadExecuteProtected:
+    case PageAccessibilityConfiguration::kReadExecute:
+      return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
+    case PageAccessibilityConfiguration::kReadWriteExecute:
+      return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
+    case PageAccessibilityConfiguration::kInaccessible:
+    case PageAccessibilityConfiguration::kInaccessibleWillJitLater:
+      return 0;
+  };
+  PA_NOTREACHED();
+}
+
+}  // namespace
+
+// zx_vmar_map() will fail if the VMO cannot be mapped at |vmar_offset|, i.e.
+// |hint| is not advisory.
+constexpr bool kHintIsAdvisory = false;
+
+std::atomic<int32_t> s_allocPageErrorCode{0};
+
+uintptr_t SystemAllocPagesInternal(
+    uintptr_t hint,
+    size_t length,
+    PageAccessibilityConfiguration accessibility,
+    PageTag page_tag,
+    [[maybe_unused]] int file_descriptor_for_shared_alloc) {
+  zx::vmo vmo;
+  zx_status_t status = zx::vmo::create(length, 0, &vmo);
+  if (status != ZX_OK) {
+    PA_ZX_DLOG(INFO, status) << "zx_vmo_create";
+    return 0;
+  }
+
+  const char* vmo_name = PageTagToName(page_tag);
+  status = vmo.set_property(ZX_PROP_NAME, vmo_name, strlen(vmo_name));
+
+  // VMO names are used only for debugging, so failure to set a name is not
+  // fatal.
+  PA_ZX_DCHECK(status == ZX_OK, status);
+
+  if (accessibility.permissions ==
+          PageAccessibilityConfiguration::kInaccessibleWillJitLater ||
+      accessibility.permissions ==
+          PageAccessibilityConfiguration::kReadWriteExecute) {
+    // V8 uses JIT. Call zx_vmo_replace_as_executable() to allow code execution
+    // in the new VMO.
+    status = vmo.replace_as_executable(VmexResource(), &vmo);
+    if (status != ZX_OK) {
+      PA_ZX_DLOG(INFO, status) << "zx_vmo_replace_as_executable";
+      return 0;
+    }
+  }
+
+  zx_vm_option_t options = PageAccessibilityToZxVmOptions(accessibility);
+
+  uint64_t vmar_offset = 0;
+  if (hint) {
+    vmar_offset = hint;
+    options |= ZX_VM_SPECIFIC;
+  }
+
+  uint64_t address;
+  status = zx::vmar::root_self()->map(options, vmar_offset, vmo,
+                                      /*vmo_offset=*/0, length, &address);
+  if (status != ZX_OK) {
+    // map() is expected to fail if |hint| is set to an already-in-use location.
+    if (!hint) {
+      PA_ZX_DLOG(ERROR, status) << "zx_vmar_map";
+    }
+    return 0;
+  }
+
+  return address;
+}
+
+uintptr_t TrimMappingInternal(uintptr_t base_address,
+                              size_t base_length,
+                              size_t trim_length,
+                              PageAccessibilityConfiguration accessibility,
+                              size_t pre_slack,
+                              size_t post_slack) {
+  PA_DCHECK(base_length == trim_length + pre_slack + post_slack);
+
+  // Unmap head if necessary.
+  if (pre_slack) {
+    zx_status_t status = zx::vmar::root_self()->unmap(base_address, pre_slack);
+    PA_ZX_CHECK(status == ZX_OK, status);
+  }
+
+  // Unmap tail if necessary.
+  if (post_slack) {
+    zx_status_t status = zx::vmar::root_self()->unmap(
+        base_address + pre_slack + trim_length, post_slack);
+    PA_ZX_CHECK(status == ZX_OK, status);
+  }
+
+  return base_address + pre_slack;
+}
+
+bool TrySetSystemPagesAccessInternal(
+    uint64_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility) {
+  zx_status_t status = zx::vmar::root_self()->protect(
+      PageAccessibilityToZxVmOptions(accessibility), address, length);
+  return status == ZX_OK;
+}
+
+void SetSystemPagesAccessInternal(
+    uint64_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility) {
+  zx_status_t status = zx::vmar::root_self()->protect(
+      PageAccessibilityToZxVmOptions(accessibility), address, length);
+  PA_ZX_CHECK(status == ZX_OK, status);
+}
+
+void FreePagesInternal(uint64_t address, size_t length) {
+  zx_status_t status = zx::vmar::root_self()->unmap(address, length);
+  PA_ZX_CHECK(status == ZX_OK, status);
+}
+
+void DiscardSystemPagesInternal(uint64_t address, size_t length) {
+  zx_status_t status = zx::vmar::root_self()->op_range(
+      ZX_VMO_OP_DECOMMIT, address, length, nullptr, 0);
+  PA_ZX_CHECK(status == ZX_OK, status);
+}
+
+void DecommitSystemPagesInternal(
+    uint64_t address,
+    size_t length,
+    PageAccessibilityDisposition accessibility_disposition) {
+  if (accessibility_disposition ==
+      PageAccessibilityDisposition::kRequireUpdate) {
+    SetSystemPagesAccess(address, length,
+                         PageAccessibilityConfiguration(
+                             PageAccessibilityConfiguration::kInaccessible));
+  }
+
+  DiscardSystemPagesInternal(address, length);
+}
+
+void DecommitAndZeroSystemPagesInternal(uintptr_t address,
+                                        size_t length,
+                                        PageTag page_tag) {
+  SetSystemPagesAccess(address, length,
+                       PageAccessibilityConfiguration(
+                           PageAccessibilityConfiguration::kInaccessible));
+
+  DiscardSystemPagesInternal(address, length);
+}
+
+void RecommitSystemPagesInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility,
+    PageAccessibilityDisposition accessibility_disposition) {
+  // On Fuchsia systems, the caller needs to simply read the memory to recommit
+  // it. However, if decommit changed the permissions, recommit has to change
+  // them back.
+  if (accessibility_disposition ==
+      PageAccessibilityDisposition::kRequireUpdate) {
+    SetSystemPagesAccess(address, length, accessibility);
+  }
+}
+
+bool TryRecommitSystemPagesInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility,
+    PageAccessibilityDisposition accessibility_disposition) {
+  // On Fuchsia systems, the caller needs to simply read the memory to recommit
+  // it. However, if decommit changed the permissions, recommit has to change
+  // them back.
+  if (accessibility_disposition ==
+      PageAccessibilityDisposition::kRequireUpdate) {
+    return TrySetSystemPagesAccess(address, length, accessibility);
+  }
+  return true;
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_posix.cc b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_posix.cc
new file mode 100644
index 0000000..e96514f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_posix.cc
@@ -0,0 +1,44 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h"
+
+#include <sys/mman.h>
+
+// PA_PROT_BTI requests a page that supports BTI landing pads.
+#define PA_PROT_BTI 0x10
+// PA_PROT_MTE requests a page that's suitable for memory tagging.
+#define PA_PROT_MTE 0x20
+
+namespace partition_alloc::internal {
+
+int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
+  switch (accessibility.permissions) {
+    case PageAccessibilityConfiguration::kRead:
+      return PROT_READ;
+    case PageAccessibilityConfiguration::kReadWriteTagged:
+#if defined(ARCH_CPU_ARM64)
+      return PROT_READ | PROT_WRITE |
+             (base::CPU::GetInstanceNoAllocation().has_mte() ? PA_PROT_MTE : 0);
+#else
+      [[fallthrough]];
+#endif
+    case PageAccessibilityConfiguration::kReadWrite:
+      return PROT_READ | PROT_WRITE;
+    case PageAccessibilityConfiguration::kReadExecuteProtected:
+      return PROT_READ | PROT_EXEC |
+             (base::CPU::GetInstanceNoAllocation().has_bti() ? PA_PROT_BTI : 0);
+    case PageAccessibilityConfiguration::kReadExecute:
+      return PROT_READ | PROT_EXEC;
+    case PageAccessibilityConfiguration::kReadWriteExecute:
+      return PROT_READ | PROT_WRITE | PROT_EXEC;
+    case PageAccessibilityConfiguration::kInaccessible:
+    case PageAccessibilityConfiguration::kInaccessibleWillJitLater:
+      return PROT_NONE;
+  }
+}
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_posix.h b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_posix.h
new file mode 100644
index 0000000..75b45a1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_posix.h
@@ -0,0 +1,426 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
+
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <cstdint>
+#include <cstring>
+
+#include <sys/mman.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/eintr_wrapper.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_APPLE)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/foundation_util.h"
+#if BUILDFLAG(IS_IOS)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/ios/ios_util.h"
+#elif BUILDFLAG(IS_MAC)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.h"
+#else
+#error "Unknown platform"
+#endif
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/scoped_cftyperef.h"
+
+#include <Availability.h>
+#include <Security/Security.h>
+#include <mach/mach.h>
+#endif
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
+#include <sys/prctl.h>
+#endif
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#include <sys/resource.h>
+#endif
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#if BUILDFLAG(IS_MAC)
+
+// SecTaskGetCodeSignStatus is marked as unavailable on macOS, although it’s
+// available on iOS and other Apple operating systems. It is, in fact, present
+// on the system since macOS 10.12.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wavailability"
+uint32_t SecTaskGetCodeSignStatus(SecTaskRef task) API_AVAILABLE(macos(10.12));
+#pragma clang diagnostic pop
+
+#endif  // BUILDFLAG(IS_MAC)
+
+namespace partition_alloc::internal {
+
+namespace {
+
+#if defined(LINUX_NAME_REGION)
+
+void NameRegion(void* start, size_t length, PageTag page_tag) {
+  // Important: All the names should be string literals. As per prctl.h in
+  // //third_party/android_toolchain/ndk the kernel keeps a pointer to the name
+  // instead of copying it.
+  //
+  // Having the name in .rodata ensures that the pointer remains valid as
+  // long as the mapping is alive.
+  const char* name = nullptr;
+  switch (page_tag) {
+    case PageTag::kSimulation:
+      name = "simulation";
+      break;
+    case PageTag::kBlinkGC:
+      name = "blink_gc";
+      break;
+    case PageTag::kPartitionAlloc:
+      name = "partition_alloc";
+      break;
+    case PageTag::kChromium:
+      name = "chromium";
+      break;
+    case PageTag::kV8:
+      name = "v8";
+      break;
+    default:
+      PA_NOTREACHED();
+      break;
+  }
+
+  // No error checking on purpose, testing only.
+  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, length, name);
+}
+
+#endif  // defined(LINUX_NAME_REGION)
+
+#if BUILDFLAG(IS_MAC)
+// Tests whether the version of macOS supports the MAP_JIT flag and if the
+// current process is signed with the hardened runtime and the allow-jit
+// entitlement, returning whether MAP_JIT should be used to allocate regions
+// that will contain JIT-compiled executable code.
+bool UseMapJit() {
+  // Until determining that the hardened runtime is enabled, early returns will
+  // return true, so that MAP_JIT will be used. This is important on arm64,
+  // which only allows pages to be simultaneously writable and executable when
+  // in a region allocated with MAP_JIT, regardless of code signing options. On
+  // arm64, an attempt to set a non-MAP_JIT page as simultaneously writable and
+  // executable fails with EPERM. Although this is not enforced on x86_64,
+  // MAP_JIT is harmless in that case.
+
+  base::apple::ScopedCFTypeRef<SecTaskRef> task(
+      SecTaskCreateFromSelf(kCFAllocatorDefault));
+  if (!task) {
+    return true;
+  }
+
+  uint32_t flags = SecTaskGetCodeSignStatus(task);
+  if (!(flags & kSecCodeSignatureRuntime)) {
+    // The hardened runtime is not enabled. Note that kSecCodeSignatureRuntime
+    // == CS_RUNTIME.
+    return true;
+  }
+
+  // The hardened runtime is enabled. From this point on, early returns must
+  // return false, indicating that MAP_JIT is not to be used. It’s an error
+  // (EINVAL) to use MAP_JIT with the hardened runtime unless the JIT
+  // entitlement is specified.
+
+  base::apple::ScopedCFTypeRef<CFTypeRef> jit_entitlement(
+      SecTaskCopyValueForEntitlement(
+          task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
+  if (!jit_entitlement) {
+    return false;
+  }
+
+  return base::apple::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
+         kCFBooleanTrue;
+}
+#elif BUILDFLAG(IS_IOS)
+bool UseMapJit() {
+// Always enable MAP_JIT in simulator as it is supported unconditionally.
+#if TARGET_IPHONE_SIMULATOR
+  return true;
+#else
+  // TODO(https://crbug.com/1413818): Fill this out when the API it is
+  // available.
+  return false;
+#endif  // TARGET_IPHONE_SIMULATOR
+}
+#endif  // BUILDFLAG(IS_IOS)
+}  // namespace
+
+// |mmap| uses a nearby address if the hint address is blocked.
+constexpr bool kHintIsAdvisory = true;
+std::atomic<int32_t> s_allocPageErrorCode{0};
+
+int GetAccessFlags(PageAccessibilityConfiguration accessibility);
+
+uintptr_t SystemAllocPagesInternal(uintptr_t hint,
+                                   size_t length,
+                                   PageAccessibilityConfiguration accessibility,
+                                   PageTag page_tag,
+                                   int file_descriptor_for_shared_alloc) {
+#if BUILDFLAG(IS_APPLE)
+  // Use a custom tag to make it easier to distinguish Partition Alloc regions
+  // in vmmap(1). Tags between 240-255 are supported.
+  int fd = file_descriptor_for_shared_alloc == -1
+               ? VM_MAKE_TAG(static_cast<int>(page_tag))
+               : file_descriptor_for_shared_alloc;
+#else
+  int fd = file_descriptor_for_shared_alloc;
+#endif
+
+  int access_flag = GetAccessFlags(accessibility);
+  int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
+
+#if BUILDFLAG(IS_APPLE)
+  // On macOS, executables that are code signed with the "runtime" option cannot
+  // execute writable memory by default. They can opt into this capability by
+  // specifying the "com.apple.security.cs.allow-jit" code signing entitlement
+  // and allocating the region with the MAP_JIT flag.
+  static const bool kUseMapJit = UseMapJit();
+  if (accessibility.permissions ==
+          PageAccessibilityConfiguration::kInaccessibleWillJitLater &&
+      kUseMapJit) {
+    map_flags |= MAP_JIT;
+  }
+#endif
+
+  void* ret = mmap(reinterpret_cast<void*>(hint), length, access_flag,
+                   map_flags, fd, 0);
+  if (ret == MAP_FAILED) {
+    s_allocPageErrorCode = errno;
+    ret = nullptr;
+  }
+
+#if defined(LINUX_NAME_REGION)
+  if (ret) {
+    NameRegion(ret, length, page_tag);
+  }
+#endif
+
+  return reinterpret_cast<uintptr_t>(ret);
+}
+
+bool TrySetSystemPagesAccessInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility) {
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  if (accessibility.thread_isolation.enabled) {
+    return 0 == MprotectWithThreadIsolation(reinterpret_cast<void*>(address),
+                                            length,
+                                            GetAccessFlags(accessibility),
+                                            accessibility.thread_isolation);
+  }
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  return 0 == PA_HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
+                                       GetAccessFlags(accessibility)));
+}
+
+void SetSystemPagesAccessInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility) {
+  int access_flags = GetAccessFlags(accessibility);
+  int ret;
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  if (accessibility.thread_isolation.enabled) {
+    ret = MprotectWithThreadIsolation(reinterpret_cast<void*>(address), length,
+                                      GetAccessFlags(accessibility),
+                                      accessibility.thread_isolation);
+  } else
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  {
+    ret = PA_HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
+                                   GetAccessFlags(accessibility)));
+  }
+
+  // On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal
+  // kernel data structures cannot be allocated, (2) the address range is
+  // invalid, or (3) this would split an existing mapping in a way that would
+  // exceed the maximum number of allowed mappings.
+  //
+  // Neither are very likely, but we still get a lot of crashes here. This is
+  // because setrlimit(RLIMIT_DATA)'s limit is checked and enforced here, if the
+  // access flags match a "data" mapping, which in our case would be MAP_PRIVATE
+  // | MAP_ANONYMOUS, and PROT_WRITE. see the call to may_expand_vm() in
+  // mm/mprotect.c in the kernel for details.
+  //
+  // In this case, we are almost certainly bumping into the sandbox limit, mark
+  // the crash as OOM. See SandboxLinux::LimitAddressSpace() for details.
+  if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE)) {
+    OOM_CRASH(length);
+  }
+
+  PA_PCHECK(0 == ret);
+}
+
+void FreePagesInternal(uintptr_t address, size_t length) {
+  PA_PCHECK(0 == munmap(reinterpret_cast<void*>(address), length));
+}
+
+uintptr_t TrimMappingInternal(uintptr_t base_address,
+                              size_t base_length,
+                              size_t trim_length,
+                              PageAccessibilityConfiguration accessibility,
+                              size_t pre_slack,
+                              size_t post_slack) {
+  uintptr_t ret = base_address;
+  // We can resize the allocation run. Release unneeded memory before and after
+  // the aligned range.
+  if (pre_slack) {
+    FreePages(base_address, pre_slack);
+    ret = base_address + pre_slack;
+  }
+  if (post_slack) {
+    FreePages(ret + trim_length, post_slack);
+  }
+  return ret;
+}
+
+void DecommitSystemPagesInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityDisposition accessibility_disposition) {
+  // In POSIX, there is no decommit concept. Discarding is an effective way of
+  // implementing the Windows semantics where the OS is allowed to not swap the
+  // pages in the region.
+  DiscardSystemPages(address, length);
+
+  bool change_permissions =
+      accessibility_disposition == PageAccessibilityDisposition::kRequireUpdate;
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // This is not guaranteed, show that we're serious.
+  //
+  // More specifically, several callers have had issues with assuming that
+  // memory is zeroed, this would hopefully make these bugs more visible.  We
+  // don't memset() everything, because ranges can be very large, and doing it
+  // over the entire range could make Chrome unusable with
+  // BUILDFLAG(PA_DCHECK_IS_ON).
+  //
+  // Only do it when we are about to change the permissions, since we don't know
+  // the previous permissions, and cannot restore them.
+  if (!DecommittedMemoryIsAlwaysZeroed() && change_permissions) {
+    // Memory may not be writable.
+    size_t size = std::min(length, 2 * SystemPageSize());
+    void* ptr = reinterpret_cast<void*>(address);
+    PA_CHECK(mprotect(ptr, size, PROT_WRITE) == 0);
+    memset(ptr, 0xcc, size);
+  }
+#endif
+
+  // Make pages inaccessible, unless the caller requested to keep permissions.
+  //
+  // Note, there is a small window between these calls when the pages can be
+  // incorrectly touched and brought back to memory. Not ideal, but doing those
+  // operations in the opposite order resulted in PMF regression on Mac (see
+  // crbug.com/1153021).
+  if (change_permissions) {
+    SetSystemPagesAccess(address, length,
+                         PageAccessibilityConfiguration(
+                             PageAccessibilityConfiguration::kInaccessible));
+  }
+}
+
+void DecommitAndZeroSystemPagesInternal(uintptr_t address,
+                                        size_t length,
+                                        PageTag page_tag) {
+  int fd = -1;
+#if BUILDFLAG(IS_APPLE)
+  fd = VM_MAKE_TAG(static_cast<int>(page_tag));
+#endif
+
+  // https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html: "If
+  // a MAP_FIXED request is successful, then any previous mappings [...] for
+  // those whole pages containing any part of the address range [pa,pa+len)
+  // shall be removed, as if by an appropriate call to munmap(), before the
+  // new mapping is established." As a consequence, the memory will be
+  // zero-initialized on next access.
+  void* ptr = reinterpret_cast<void*>(address);
+  void* ret = mmap(ptr, length, PROT_NONE,
+                   MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, fd, 0);
+  PA_CHECK(ptr == ret);
+  // Since we just remapped the region, need to set is name again.
+#if defined(LINUX_NAME_REGION)
+  NameRegion(ret, length, page_tag);
+#endif
+}
+
+void RecommitSystemPagesInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility,
+    PageAccessibilityDisposition accessibility_disposition) {
+  // On POSIX systems, the caller needs to simply read the memory to recommit
+  // it. However, if decommit changed the permissions, recommit has to change
+  // them back.
+  if (accessibility_disposition ==
+      PageAccessibilityDisposition::kRequireUpdate) {
+    SetSystemPagesAccess(address, length, accessibility);
+  }
+
+#if BUILDFLAG(IS_APPLE)
+  // On macOS, to update accounting, we need to make another syscall. For more
+  // details, see https://crbug.com/823915.
+  madvise(reinterpret_cast<void*>(address), length, MADV_FREE_REUSE);
+#endif
+}
+
+bool TryRecommitSystemPagesInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility,
+    PageAccessibilityDisposition accessibility_disposition) {
+  // On POSIX systems, the caller needs to simply read the memory to recommit
+  // it. However, if decommit changed the permissions, recommit has to change
+  // them back.
+  if (accessibility_disposition ==
+      PageAccessibilityDisposition::kRequireUpdate) {
+    bool ok = TrySetSystemPagesAccess(address, length, accessibility);
+    if (!ok) {
+      return false;
+    }
+  }
+
+#if BUILDFLAG(IS_APPLE)
+  // On macOS, to update accounting, we need to make another syscall. For more
+  // details, see https://crbug.com/823915.
+  madvise(reinterpret_cast<void*>(address), length, MADV_FREE_REUSE);
+#endif
+
+  return true;
+}
+
+void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
+  void* ptr = reinterpret_cast<void*>(address);
+#if BUILDFLAG(IS_APPLE)
+  int ret = madvise(ptr, length, MADV_FREE_REUSABLE);
+  if (ret) {
+    // MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
+    ret = madvise(ptr, length, MADV_DONTNEED);
+  }
+  PA_PCHECK(ret == 0);
+#else   // BUILDFLAG(IS_APPLE)
+  // We have experimented with other flags, but with suboptimal results.
+  //
+  // MADV_FREE (Linux): Makes our memory measurements less predictable;
+  // performance benefits unclear.
+  //
+  // Therefore, we just do the simple thing: MADV_DONTNEED.
+  PA_PCHECK(0 == madvise(ptr, length, MADV_DONTNEED));
+#endif  // BUILDFLAG(IS_APPLE)
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_win.h b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_win.h
new file mode 100644
index 0000000..4c5469b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_win.h
@@ -0,0 +1,240 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_WIN_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_WIN_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_internal.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+
+namespace partition_alloc::internal {
+
+// |VirtualAlloc| will fail if allocation at the hint address is blocked.
+constexpr bool kHintIsAdvisory = false;
+std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
+
+bool IsOutOfMemory(DWORD error) {
+  // From
+  // https://learn.microsoft.com/en-us/windows/win32/debug/system-error-codes--0-499-
+  switch (error) {
+    // Page file is being extended.
+    case ERROR_COMMITMENT_MINIMUM:
+      // Page file is too small.
+    case ERROR_COMMITMENT_LIMIT:
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+    // Not enough memory resources are available to process this command.
+    //
+    // It is not entirely clear whether this error pertains to out of address
+    // space errors, or the kernel being out of memory. Only include it for 64
+    // bit architectures, since address space issues are unlikely there.
+    case ERROR_NOT_ENOUGH_MEMORY:
+#endif
+    case ERROR_PAGEFILE_QUOTA:
+      // Insufficient quota to complete the requested service.
+      return true;
+    default:
+      return false;
+  }
+}
+
+void* VirtualAllocWithRetry(void* address,
+                            size_t size,
+                            DWORD type_flags,
+                            DWORD access_flags) {
+  void* ret = nullptr;
+  // Failure to commit memory can be temporary, in at least two cases:
+  // - The page file is getting extended.
+  // - Another process terminates (most likely because of OOM)
+  //
+  // Wait and retry, since the alternative is crashing. Note that if we
+  // selectively apply this... hum... beautiful hack to some process types only,
+  // "some process crashing" may very well be one of ours, which may be
+  // desirable (e.g. some processes like the browser are more important than
+  // others).
+  //
+  // This approach has been shown to be effective for Firefox, see
+  // crbug.com/1392738 for context. Constants below are accordingly taken from
+  // Firefox as well.
+  constexpr int kMaxTries = 10;
+  constexpr int kDelayMs = 50;
+
+  bool should_retry = GetRetryOnCommitFailure() && (type_flags & MEM_COMMIT) &&
+                      (access_flags != PAGE_NOACCESS);
+  for (int tries = 0; tries < kMaxTries; tries++) {
+    ret = VirtualAlloc(address, size, type_flags, access_flags);
+    // Only retry for commit failures. If this is an address space problem
+    // (e.g. caller asked for an address which is not available), this is
+    // unlikely to be resolved by waiting.
+    if (ret || !should_retry || !IsOutOfMemory(GetLastError())) {
+      break;
+    }
+
+    Sleep(kDelayMs);
+  }
+  return ret;
+}
+
+int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
+  switch (accessibility.permissions) {
+    case PageAccessibilityConfiguration::kRead:
+      return PAGE_READONLY;
+    case PageAccessibilityConfiguration::kReadWrite:
+    case PageAccessibilityConfiguration::kReadWriteTagged:
+      return PAGE_READWRITE;
+    case PageAccessibilityConfiguration::kReadExecute:
+    case PageAccessibilityConfiguration::kReadExecuteProtected:
+      return PAGE_EXECUTE_READ;
+    case PageAccessibilityConfiguration::kReadWriteExecute:
+      return PAGE_EXECUTE_READWRITE;
+    case PageAccessibilityConfiguration::kInaccessible:
+    case PageAccessibilityConfiguration::kInaccessibleWillJitLater:
+      return PAGE_NOACCESS;
+  }
+  PA_NOTREACHED();
+}
+
+uintptr_t SystemAllocPagesInternal(
+    uintptr_t hint,
+    size_t length,
+    PageAccessibilityConfiguration accessibility,
+    PageTag page_tag,
+    [[maybe_unused]] int file_descriptor_for_shared_alloc) {
+  const DWORD access_flag = GetAccessFlags(accessibility);
+  const DWORD type_flags =
+      (access_flag == PAGE_NOACCESS) ? MEM_RESERVE : (MEM_RESERVE | MEM_COMMIT);
+  void* ret = VirtualAllocWithRetry(reinterpret_cast<void*>(hint), length,
+                                    type_flags, access_flag);
+  if (ret == nullptr) {
+    s_allocPageErrorCode = GetLastError();
+  }
+  return reinterpret_cast<uintptr_t>(ret);
+}
+
+uintptr_t TrimMappingInternal(uintptr_t base_address,
+                              size_t base_length,
+                              size_t trim_length,
+                              PageAccessibilityConfiguration accessibility,
+                              size_t pre_slack,
+                              size_t post_slack) {
+  uintptr_t ret = base_address;
+  if (pre_slack || post_slack) {
+    // We cannot resize the allocation run. Free it and retry at the aligned
+    // address within the freed range.
+    ret = base_address + pre_slack;
+    FreePages(base_address, base_length);
+    ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium);
+  }
+  return ret;
+}
+
+bool TrySetSystemPagesAccessInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility) {
+  void* ptr = reinterpret_cast<void*>(address);
+  if (GetAccessFlags(accessibility) == PAGE_NOACCESS) {
+    return VirtualFree(ptr, length, MEM_DECOMMIT) != 0;
+  }
+  // Call the retry path even though this function can fail, because callers of
+  // this are likely to crash the process when this function fails, and we don't
+  // want that for transient failures.
+  return nullptr != VirtualAllocWithRetry(ptr, length, MEM_COMMIT,
+                                          GetAccessFlags(accessibility));
+}
+
+void SetSystemPagesAccessInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility) {
+  void* ptr = reinterpret_cast<void*>(address);
+  const DWORD access_flag = GetAccessFlags(accessibility);
+  if (access_flag == PAGE_NOACCESS) {
+    if (!VirtualFree(ptr, length, MEM_DECOMMIT)) {
+      // We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
+      // report we get the error number.
+      PA_CHECK(static_cast<uint32_t>(ERROR_SUCCESS) == GetLastError());
+    }
+  } else {
+    if (!VirtualAllocWithRetry(ptr, length, MEM_COMMIT, access_flag)) {
+      int32_t error = GetLastError();
+      if (error == ERROR_COMMITMENT_LIMIT) {
+        OOM_CRASH(length);
+      }
+      // We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
+      // report we get the error number.
+      PA_CHECK(ERROR_SUCCESS == error);
+    }
+  }
+}
+
+void FreePagesInternal(uintptr_t address, size_t length) {
+  PA_CHECK(VirtualFree(reinterpret_cast<void*>(address), 0, MEM_RELEASE));
+}
+
+void DecommitSystemPagesInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityDisposition accessibility_disposition) {
+  // Ignore accessibility_disposition, because decommitting is equivalent to
+  // making pages inaccessible.
+  SetSystemPagesAccess(address, length,
+                       PageAccessibilityConfiguration(
+                           PageAccessibilityConfiguration::kInaccessible));
+}
+
+void DecommitAndZeroSystemPagesInternal(uintptr_t address,
+                                        size_t length,
+                                        PageTag page_tag) {
+  // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualfree:
+  // "If a page is decommitted but not released, its state changes to reserved.
+  // Subsequently, you can call VirtualAlloc to commit it, or VirtualFree to
+  // release it. Attempts to read from or write to a reserved page results in an
+  // access violation exception."
+  // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc
+  // for MEM_COMMIT: "The function also guarantees that when the caller later
+  // initially accesses the memory, the contents will be zero."
+  PA_CHECK(VirtualFree(reinterpret_cast<void*>(address), length, MEM_DECOMMIT));
+}
+
+void RecommitSystemPagesInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility,
+    PageAccessibilityDisposition accessibility_disposition) {
+  // Ignore accessibility_disposition, because decommitting is equivalent to
+  // making pages inaccessible.
+  SetSystemPagesAccess(address, length, accessibility);
+}
+
+bool TryRecommitSystemPagesInternal(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility,
+    PageAccessibilityDisposition accessibility_disposition) {
+  // Ignore accessibility_disposition, because decommitting is equivalent to
+  // making pages inaccessible.
+  return TrySetSystemPagesAccess(address, length, accessibility);
+}
+
+void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
+  void* ptr = reinterpret_cast<void*>(address);
+  // Use DiscardVirtualMemory when available because it releases faster than
+  // MEM_RESET.
+  DWORD ret = DiscardVirtualMemory(ptr, length);
+  // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
+  // failure.
+  if (ret) {
+    PA_CHECK(VirtualAllocWithRetry(ptr, length, MEM_RESET, PAGE_READWRITE));
+  }
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PAGE_ALLOCATOR_INTERNALS_WIN_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/page_allocator_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_unittest.cc
new file mode 100644
index 0000000..772a3d1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/page_allocator_unittest.cc
@@ -0,0 +1,680 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "build/build_config.h"
+
+#if defined(LINUX_NAME_REGION)
+#include "base/debug/proc_maps_linux.h"
+#endif
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if BUILDFLAG(IS_POSIX)
+#include <setjmp.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#endif  // BUILDFLAG(IS_POSIX)
+
+#include "base/allocator/partition_allocator/src/partition_alloc/arm_bti_test_functions.h"
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+#include <arm_acle.h>
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
+#define MTE_KILLED_BY_SIGNAL_AVAILABLE
+#endif
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+namespace partition_alloc::internal {
+
+namespace {
+
+// Any number of bytes that can be allocated with no trouble.
+size_t EasyAllocSize() {
+  return (1024 * 1024) & ~(PageAllocationGranularity() - 1);
+}
+
+// A huge amount of memory, greater than or equal to the ASLR space.
+size_t HugeMemoryAmount() {
+  return std::max(::partition_alloc::internal::ASLRMask(),
+                  std::size_t{2} * ::partition_alloc::internal::ASLRMask());
+}
+
+}  // namespace
+
+TEST(PartitionAllocPageAllocatorTest, Rounding) {
+  EXPECT_EQ(0u, RoundUpToSystemPage(0u));
+  EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(1));
+  EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(SystemPageSize() - 1));
+  EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(SystemPageSize()));
+  EXPECT_EQ(2 * SystemPageSize(), RoundUpToSystemPage(SystemPageSize() + 1));
+  EXPECT_EQ(0u, RoundDownToSystemPage(0u));
+  EXPECT_EQ(0u, RoundDownToSystemPage(SystemPageSize() - 1));
+  EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(SystemPageSize()));
+  EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(SystemPageSize() + 1));
+  EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(2 * SystemPageSize() - 1));
+  EXPECT_EQ(0u, RoundUpToPageAllocationGranularity(0u));
+  EXPECT_EQ(PageAllocationGranularity(), RoundUpToPageAllocationGranularity(1));
+  EXPECT_EQ(PageAllocationGranularity(), RoundUpToPageAllocationGranularity(
+                                             PageAllocationGranularity() - 1));
+  EXPECT_EQ(PageAllocationGranularity(),
+            RoundUpToPageAllocationGranularity(PageAllocationGranularity()));
+  EXPECT_EQ(
+      2 * PageAllocationGranularity(),
+      RoundUpToPageAllocationGranularity(PageAllocationGranularity() + 1));
+  EXPECT_EQ(0u, RoundDownToPageAllocationGranularity(0u));
+  EXPECT_EQ(0u, RoundDownToPageAllocationGranularity(
+                    PageAllocationGranularity() - 1));
+  EXPECT_EQ(PageAllocationGranularity(),
+            RoundDownToPageAllocationGranularity(PageAllocationGranularity()));
+  EXPECT_EQ(PageAllocationGranularity(), RoundDownToPageAllocationGranularity(
+                                             PageAllocationGranularity() + 1));
+  EXPECT_EQ(PageAllocationGranularity(),
+            RoundDownToPageAllocationGranularity(
+                2 * PageAllocationGranularity() - 1));
+}
+
+TEST(PartitionAllocPageAllocatorTest, NextAlignedWithOffset) {
+  EXPECT_EQ(1024u, NextAlignedWithOffset(1024, 1, 0));
+  EXPECT_EQ(2024u, NextAlignedWithOffset(1024, 1024, 1000));
+  EXPECT_EQ(2024u, NextAlignedWithOffset(2024, 1024, 1000));
+  EXPECT_EQ(3048u, NextAlignedWithOffset(2025, 1024, 1000));
+  EXPECT_EQ(2048u, NextAlignedWithOffset(1024, 2048, 0));
+  EXPECT_EQ(2148u, NextAlignedWithOffset(1024, 2048, 100));
+  EXPECT_EQ(2000u, NextAlignedWithOffset(1024, 2048, 2000));
+}
+
+// Test that failed page allocations invoke base::ReleaseReservation().
+// We detect this by making a reservation and ensuring that after failure, we
+// can make a new reservation.
+TEST(PartitionAllocPageAllocatorTest, AllocFailure) {
+  // Release any reservation made by another test.
+  ReleaseReservation();
+
+  // We can make a reservation.
+  EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
+
+  // We can't make another reservation until we trigger an allocation failure.
+  EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
+
+  size_t size = HugeMemoryAmount();
+  // Skip the test for sanitizers and platforms with ASLR turned off.
+  if (size == 0) {
+    return;
+  }
+
+  uintptr_t result =
+      AllocPages(size, PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kInaccessible),
+                 PageTag::kChromium);
+  if (!result) {
+    // We triggered allocation failure. Our reservation should have been
+    // released, and we should be able to make a new reservation.
+    EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
+    ReleaseReservation();
+    return;
+  }
+  // We couldn't fail. Make sure reservation is still there.
+  EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
+}
+
+// TODO(crbug.com/765801): Test failed on chromium.win/Win10 Tests x64.
+#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_64_BITS)
+#define MAYBE_ReserveAddressSpace DISABLED_ReserveAddressSpace
+#else
+#define MAYBE_ReserveAddressSpace ReserveAddressSpace
+#endif  // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_64_BITS)
+
+// Test that reserving address space can fail.
+TEST(PartitionAllocPageAllocatorTest, MAYBE_ReserveAddressSpace) {
+  // Release any reservation made by another test.
+  ReleaseReservation();
+
+  size_t size = HugeMemoryAmount();
+  // Skip the test for sanitizers and platforms with ASLR turned off.
+  if (size == 0) {
+    return;
+  }
+
+  bool success = ReserveAddressSpace(size);
+  if (!success) {
+    EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
+    return;
+  }
+  // We couldn't fail. Make sure reservation is still there.
+  EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
+}
+
+TEST(PartitionAllocPageAllocatorTest, AllocAndFreePages) {
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWrite),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  int* buffer0 = reinterpret_cast<int*>(buffer);
+  *buffer0 = 42;
+  EXPECT_EQ(42, *buffer0);
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+TEST(PartitionAllocPageAllocatorTest, AllocPagesAligned) {
+  size_t alignment = 8 * PageAllocationGranularity();
+  size_t sizes[] = {PageAllocationGranularity(),
+                    alignment - PageAllocationGranularity(), alignment,
+                    alignment + PageAllocationGranularity(), alignment * 4};
+  size_t offsets[] = {0, PageAllocationGranularity(), alignment / 2,
+                      alignment - PageAllocationGranularity()};
+  for (size_t size : sizes) {
+    for (size_t offset : offsets) {
+      uintptr_t buffer = AllocPagesWithAlignOffset(
+          0, size, alignment, offset,
+          PageAccessibilityConfiguration(
+              PageAccessibilityConfiguration::kReadWrite),
+          PageTag::kChromium);
+      EXPECT_TRUE(buffer);
+      EXPECT_EQ(buffer % alignment, offset);
+      FreePages(buffer, size);
+    }
+  }
+}
+
+TEST(PartitionAllocPageAllocatorTest,
+     AllocAndFreePagesWithPageReadWriteTagged) {
+  // This test checks that a page allocated with
+  // PageAccessibilityConfiguration::kReadWriteTagged is safe to use on all
+  // systems (even those which don't support MTE).
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteTagged),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  int* buffer0 = reinterpret_cast<int*>(buffer);
+  *buffer0 = 42;
+  EXPECT_EQ(42, *buffer0);
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+TEST(PartitionAllocPageAllocatorTest,
+     AllocAndFreePagesWithPageReadExecuteConfirmCFI) {
+  // This test checks that indirect branches to anything other than a valid
+  // branch target in a PageAccessibilityConfiguration::kReadExecute-mapped
+  // crash on systems which support the Armv8.5 Branch Target Identification
+  // extension.
+  base::CPU cpu;
+  if (!cpu.has_bti()) {
+#if BUILDFLAG(IS_IOS)
+    // Workaround for incorrectly failed iOS tests with GTEST_SKIP,
+    // see crbug.com/912138 for details.
+    return;
+#else
+    GTEST_SKIP();
+#endif
+  }
+#if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
+  // Next, map some read-write memory and copy the BTI-enabled function there.
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWrite),
+                 PageTag::kChromium);
+  ptrdiff_t function_range =
+      reinterpret_cast<char*>(arm_bti_test_function_end) -
+      reinterpret_cast<char*>(arm_bti_test_function);
+  ptrdiff_t invalid_offset =
+      reinterpret_cast<char*>(arm_bti_test_function_invalid_offset) -
+      reinterpret_cast<char*>(arm_bti_test_function);
+  memcpy(reinterpret_cast<void*>(buffer),
+         reinterpret_cast<void*>(arm_bti_test_function), function_range);
+
+  // Next re-protect the page.
+  SetSystemPagesAccess(
+      buffer, PageAllocationGranularity(),
+      PageAccessibilityConfiguration(
+          PageAccessibilityConfiguration::kReadExecuteProtected));
+
+  using BTITestFunction = int64_t (*)(int64_t);
+
+  // Attempt to call the function through the BTI-enabled entrypoint. Confirm
+  // that it works.
+  BTITestFunction bti_enabled_fn = reinterpret_cast<BTITestFunction>(buffer);
+  BTITestFunction bti_invalid_fn =
+      reinterpret_cast<BTITestFunction>(buffer + invalid_offset);
+  EXPECT_EQ(bti_enabled_fn(15), 18);
+  // Next, attempt to call the function without the entrypoint.
+  EXPECT_EXIT({ bti_invalid_fn(15); }, testing::KilledBySignal(SIGILL),
+              "");  // Should crash with SIGILL.
+  FreePages(buffer, PageAllocationGranularity());
+#else
+  PA_NOTREACHED();
+#endif
+}
+
+TEST(PartitionAllocPageAllocatorTest,
+     AllocAndFreePagesWithPageReadWriteTaggedSynchronous) {
+  // This test checks that a page allocated with
+  // PageAccessibilityConfiguration::kReadWriteTagged generates tag violations
+  // if allocated on a system which supports the
+  // Armv8.5 Memory Tagging Extension.
+  base::CPU cpu;
+  if (!cpu.has_mte()) {
+    // Skip this test if there's no MTE.
+#if BUILDFLAG(IS_IOS)
+    return;
+#else
+    GTEST_SKIP();
+#endif
+  }
+
+#if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteTagged),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  int* buffer0 = reinterpret_cast<int*>(buffer);
+  // Assign an 0x1 tag to the first granule of buffer.
+  int* buffer1 = __arm_mte_increment_tag(buffer0, 0x1);
+  EXPECT_NE(buffer0, buffer1);
+  __arm_mte_set_tag(buffer1);
+  // Retrieve the tag to ensure that it's set.
+  buffer1 = __arm_mte_get_tag(buffer0);
+  // Prove that the tag is different (if they're the same, the test won't work).
+  ASSERT_NE(buffer0, buffer1);
+  TagViolationReportingMode parent_tagging_mode =
+      GetMemoryTaggingModeForCurrentThread();
+  EXPECT_EXIT(
+      {
+  // Switch to synchronous mode.
+#if BUILDFLAG(IS_ANDROID)
+        ChangeMemoryTaggingModeForAllThreadsPerProcess(
+            TagViolationReportingMode::kSynchronous);
+#else
+        ChangeMemoryTaggingModeForCurrentThread(
+            TagViolationReportingMode::kSynchronous);
+#endif  // BUILDFLAG(IS_ANDROID)
+        EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
+                  TagViolationReportingMode::kSynchronous);
+        // Write to the buffer using its previous tag. A segmentation fault
+        // should be delivered.
+        *buffer0 = 42;
+      },
+      testing::KilledBySignal(SIGSEGV), "");
+  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(), parent_tagging_mode);
+  FreePages(buffer, PageAllocationGranularity());
+#else
+  PA_NOTREACHED();
+#endif
+}
+
+TEST(PartitionAllocPageAllocatorTest,
+     AllocAndFreePagesWithPageReadWriteTaggedAsynchronous) {
+  // This test checks that a page allocated with
+  // PageAccessibilityConfiguration::kReadWriteTagged generates tag violations
+  // if allocated on a system which supports MTE.
+  base::CPU cpu;
+  if (!cpu.has_mte()) {
+    // Skip this test if there's no MTE.
+#if BUILDFLAG(IS_IOS)
+    return;
+#else
+    GTEST_SKIP();
+#endif
+  }
+
+#if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteTagged),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  int* buffer0 = reinterpret_cast<int*>(buffer);
+  __arm_mte_set_tag(__arm_mte_increment_tag(buffer0, 0x1));
+  int* buffer1 = __arm_mte_get_tag(buffer0);
+  EXPECT_NE(buffer0, buffer1);
+  TagViolationReportingMode parent_tagging_mode =
+      GetMemoryTaggingModeForCurrentThread();
+  EXPECT_EXIT(
+      {
+  // Switch to asynchronous mode.
+#if BUILDFLAG(IS_ANDROID)
+        ChangeMemoryTaggingModeForAllThreadsPerProcess(
+            TagViolationReportingMode::kAsynchronous);
+#else
+        ChangeMemoryTaggingModeForCurrentThread(
+            TagViolationReportingMode::kAsynchronous);
+#endif  // BUILDFLAG(IS_ANDROID)
+        EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
+                  TagViolationReportingMode::kAsynchronous);
+        // Write to the buffer using its previous tag. A fault should be
+        // generated at this point but we may not notice straight away...
+        *buffer0 = 42;
+        EXPECT_EQ(42, *buffer0);
+        PA_LOG(ERROR) << "=";  // Until we receive control back from the kernel
+                               // (e.g. on a system call).
+      },
+      testing::KilledBySignal(SIGSEGV), "");
+  FreePages(buffer, PageAllocationGranularity());
+  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(), parent_tagging_mode);
+#else
+  PA_NOTREACHED();
+#endif
+}
+
+// Test permission setting on POSIX, where we can set a trap handler.
+#if BUILDFLAG(IS_POSIX)
+
+namespace {
+sigjmp_buf g_continuation;
+
+void SignalHandler(int signal, siginfo_t* info, void*) {
+  siglongjmp(g_continuation, 1);
+}
+}  // namespace
+
+// On Mac, sometimes we get SIGBUS instead of SIGSEGV, so handle that too.
+#if BUILDFLAG(IS_APPLE)
+#define EXTRA_FAULT_BEGIN_ACTION() \
+  struct sigaction old_bus_action; \
+  sigaction(SIGBUS, &action, &old_bus_action);
+#define EXTRA_FAULT_END_ACTION() sigaction(SIGBUS, &old_bus_action, nullptr);
+#else
+#define EXTRA_FAULT_BEGIN_ACTION()
+#define EXTRA_FAULT_END_ACTION()
+#endif
+
+// Install a signal handler so we can catch the fault we're about to trigger.
+#define FAULT_TEST_BEGIN()                  \
+  struct sigaction action = {};             \
+  struct sigaction old_action = {};         \
+  action.sa_sigaction = SignalHandler;      \
+  sigemptyset(&action.sa_mask);             \
+  action.sa_flags = SA_SIGINFO;             \
+  sigaction(SIGSEGV, &action, &old_action); \
+  EXTRA_FAULT_BEGIN_ACTION();               \
+  int const save_sigs = 1;                  \
+  if (!sigsetjmp(g_continuation, save_sigs)) {
+// Fault generating code goes here...
+
+// Handle when sigsetjmp returns nonzero (we are returning from our handler).
+#define FAULT_TEST_END()                      \
+  }                                           \
+  else {                                      \
+    sigaction(SIGSEGV, &old_action, nullptr); \
+    EXTRA_FAULT_END_ACTION();                 \
+  }
+
+TEST(PartitionAllocPageAllocatorTest, InaccessiblePages) {
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kInaccessible),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+
+  FAULT_TEST_BEGIN()
+
+  // Reading from buffer should fault.
+  // Volatile prevents the compiler from eliminating the load by folding
+  // buffer0_contents == *buffer0.
+  volatile int* buffer0 = reinterpret_cast<int*>(buffer);
+  int buffer0_contents = *buffer0;
+  EXPECT_EQ(buffer0_contents, *buffer0);
+  EXPECT_TRUE(false);
+
+  FAULT_TEST_END()
+
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+// TODO(crbug.com/1291888): Understand why we can't read from Read-Execute pages
+// on iOS.
+#if BUILDFLAG(IS_IOS)
+#define MAYBE_ReadExecutePages DISABLED_ReadExecutePages
+#else
+#define MAYBE_ReadExecutePages ReadExecutePages
+#endif  // BUILDFLAG(IS_IOS)
+TEST(PartitionAllocPageAllocatorTest, MAYBE_ReadExecutePages) {
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadExecute),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  int* buffer0 = reinterpret_cast<int*>(buffer);
+  // Reading from buffer should succeed.
+  int buffer0_contents = *buffer0;
+
+  FAULT_TEST_BEGIN()
+
+  // Writing to buffer should fault.
+  *buffer0 = ~buffer0_contents;
+  EXPECT_TRUE(false);
+
+  FAULT_TEST_END()
+
+  // Make sure no write occurred.
+  EXPECT_EQ(buffer0_contents, *buffer0);
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+#endif  // BUILDFLAG(IS_POSIX)
+
+#if defined(LINUX_NAME_REGION)
+TEST(PartitionAllocPageAllocatorTest, PageTagging) {
+  size_t size = PageAllocationGranularity();
+  uintptr_t buffer =
+      AllocPages(size, PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kInaccessible),
+                 PageTag::kChromium);
+  ASSERT_TRUE(buffer);
+
+  auto is_region_named = [](uintptr_t start_address) {
+    std::string proc_maps;
+    EXPECT_TRUE(::base::debug::ReadProcMaps(&proc_maps));
+    std::vector<::base::debug::MappedMemoryRegion> regions;
+    EXPECT_TRUE(::base::debug::ParseProcMaps(proc_maps, &regions));
+
+    bool found = false;
+    for (const auto& region : regions) {
+      if (region.start == start_address) {
+        found = true;
+        return "[anon:chromium]" == region.path;
+      }
+    }
+    EXPECT_TRUE(found);
+    return false;
+  };
+
+  bool before = is_region_named(buffer);
+  DecommitAndZeroSystemPages(buffer, size);
+  bool after = is_region_named(buffer);
+
+#if BUILDFLAG(IS_ANDROID)
+  EXPECT_TRUE(before) << "VMA tagging should always work on Android";
+#endif
+  // When not running on Android, the prctl() command may be defined in the
+  // headers, but not be implemented by the host kernel.
+  EXPECT_EQ(before, after);
+
+  FreePages(buffer, size);
+}
+#endif  // defined(LINUX_NAME_REGION)
+
+TEST(PartitionAllocPageAllocatorTest, DecommitErasesMemory) {
+  if (!DecommittedMemoryIsAlwaysZeroed()) {
+    return;
+  }
+
+  size_t size = PageAllocationGranularity();
+  uintptr_t buffer = AllocPages(size, PageAllocationGranularity(),
+                                PageAccessibilityConfiguration(
+                                    PageAccessibilityConfiguration::kReadWrite),
+                                PageTag::kChromium);
+  ASSERT_TRUE(buffer);
+
+  memset(reinterpret_cast<void*>(buffer), 42, size);
+
+  DecommitSystemPages(buffer, size,
+                      PageAccessibilityDisposition::kAllowKeepForPerf);
+  RecommitSystemPages(buffer, size,
+                      PageAccessibilityConfiguration(
+                          PageAccessibilityConfiguration::kReadWrite),
+                      PageAccessibilityDisposition::kAllowKeepForPerf);
+
+  uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
+  uint32_t sum = 0;
+  for (size_t i = 0; i < size; i++) {
+    sum += recommitted_buffer[i];
+  }
+  EXPECT_EQ(0u, sum) << "Data was not erased";
+
+  FreePages(buffer, size);
+}
+
+TEST(PartitionAllocPageAllocatorTest, DecommitAndZero) {
+  size_t size = PageAllocationGranularity();
+  uintptr_t buffer = AllocPages(size, PageAllocationGranularity(),
+                                PageAccessibilityConfiguration(
+                                    PageAccessibilityConfiguration::kReadWrite),
+                                PageTag::kChromium);
+  ASSERT_TRUE(buffer);
+
+  memset(reinterpret_cast<void*>(buffer), 42, size);
+
+  DecommitAndZeroSystemPages(buffer, size);
+
+// Test permission setting on POSIX, where we can set a trap handler.
+#if BUILDFLAG(IS_POSIX)
+
+  FAULT_TEST_BEGIN()
+
+  // Reading from buffer should now fault.
+  int* buffer0 = reinterpret_cast<int*>(buffer);
+  int buffer0_contents = *buffer0;
+  EXPECT_EQ(buffer0_contents, *buffer0);
+  EXPECT_TRUE(false);
+
+  FAULT_TEST_END()
+
+#endif
+
+  // Clients of the DecommitAndZero API (in particular, V8), currently just
+  // call SetSystemPagesAccess to mark the region as accessible again, so we
+  // use that here as well.
+  SetSystemPagesAccess(buffer, size,
+                       PageAccessibilityConfiguration(
+                           PageAccessibilityConfiguration::kReadWrite));
+
+  uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
+  uint32_t sum = 0;
+  for (size_t i = 0; i < size; i++) {
+    sum += recommitted_buffer[i];
+  }
+  EXPECT_EQ(0u, sum) << "Data was not erased";
+
+  FreePages(buffer, size);
+}
+
+TEST(PartitionAllocPageAllocatorTest, MappedPagesAccounting) {
+  size_t size = PageAllocationGranularity();
+  // Ask for a large alignment to make sure that trimming doesn't change the
+  // accounting.
+  size_t alignment = 128 * PageAllocationGranularity();
+  size_t offsets[] = {0, PageAllocationGranularity(), alignment / 2,
+                      alignment - PageAllocationGranularity()};
+
+  size_t mapped_size_before = GetTotalMappedSize();
+
+  for (size_t offset : offsets) {
+    uintptr_t data = AllocPagesWithAlignOffset(
+        0, size, alignment, offset,
+        PageAccessibilityConfiguration(
+            PageAccessibilityConfiguration::kInaccessible),
+        PageTag::kChromium);
+    ASSERT_TRUE(data);
+
+    EXPECT_EQ(mapped_size_before + size, GetTotalMappedSize());
+
+    DecommitSystemPages(data, size,
+                        PageAccessibilityDisposition::kAllowKeepForPerf);
+    EXPECT_EQ(mapped_size_before + size, GetTotalMappedSize());
+
+    FreePages(data, size);
+    EXPECT_EQ(mapped_size_before, GetTotalMappedSize());
+  }
+}
+
+TEST(PartitionAllocPageAllocatorTest, AllocInaccessibleWillJitLater) {
+  // Verify that kInaccessibleWillJitLater allows read/write, and read/execute
+  // permissions to be set.
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kInaccessibleWillJitLater),
+                 PageTag::kChromium);
+  EXPECT_TRUE(
+      TrySetSystemPagesAccess(buffer, PageAllocationGranularity(),
+                              PageAccessibilityConfiguration(
+                                  PageAccessibilityConfiguration::kReadWrite)));
+  EXPECT_TRUE(TrySetSystemPagesAccess(
+      buffer, PageAllocationGranularity(),
+      PageAccessibilityConfiguration(
+          PageAccessibilityConfiguration::kReadExecute)));
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+#if BUILDFLAG(IS_IOS) || BUILDFLAG(IS_MAC)
+// TODO(crbug.com/1452151): Fix test to GTEST_SKIP() if MAP_JIT is in-use,
+// or to be run otherwise, since kReadWriteExecute is used in some other
+// configurations.
+#define MAYBE_AllocReadWriteExecute DISABLED_AllocReadWriteExecute
+#else
+#define MAYBE_AllocReadWriteExecute AllocReadWriteExecute
+#endif  // BUILDFLAG(IS_IOS) || BUILDFLAG(IS_MAC)
+TEST(PartitionAllocPageAllocatorTest, MAYBE_AllocReadWriteExecute) {
+  // Verify that kReadWriteExecute is similarly functional.
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteExecute),
+                 PageTag::kChromium);
+  EXPECT_TRUE(
+      TrySetSystemPagesAccess(buffer, PageAllocationGranularity(),
+                              PageAccessibilityConfiguration(
+                                  PageAccessibilityConfiguration::kReadWrite)));
+  EXPECT_TRUE(TrySetSystemPagesAccess(
+      buffer, PageAllocationGranularity(),
+      PageAccessibilityConfiguration(
+          PageAccessibilityConfiguration::kReadExecute)));
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_address_space.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_address_space.cc
new file mode 100644
index 0000000..8df4cc0
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_address_space.cc
@@ -0,0 +1,431 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+
+#include <array>
+#include <cstddef>
+#include <cstdint>
+#include <ostream>
+#include <string>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/compressed_pointer.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_IOS)
+#include <mach-o/dyld.h>
+#endif
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#endif  // BUILDFLAG(IS_WIN)
+
+#if PA_CONFIG(ENABLE_SHADOW_METADATA) || BUILDFLAG(ENABLE_THREAD_ISOLATION)
+#include <sys/mman.h>
+#endif
+
+namespace partition_alloc::internal {
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+
+namespace {
+
+#if BUILDFLAG(IS_WIN)
+
+PA_NOINLINE void HandlePoolAllocFailureOutOfVASpace() {
+  PA_NO_CODE_FOLDING();
+  PA_CHECK(false);
+}
+
+PA_NOINLINE void HandlePoolAllocFailureOutOfCommitCharge() {
+  PA_NO_CODE_FOLDING();
+  PA_CHECK(false);
+}
+#endif  // BUILDFLAG(IS_WIN)
+
+PA_NOINLINE void HandlePoolAllocFailure() {
+  PA_NO_CODE_FOLDING();
+  uint32_t alloc_page_error_code = GetAllocPageErrorCode();
+  PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
+  // It's important to easily differentiate these two failures on Windows, so
+  // crash with different stacks.
+#if BUILDFLAG(IS_WIN)
+  if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) {
+    // The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE,
+    // it must be VA space exhaustion.
+    HandlePoolAllocFailureOutOfVASpace();
+  } else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT) {
+    // Should not happen, since as of Windows 8.1+, reserving address space
+    // should not be charged against the commit limit, aside from a very small
+    // amount per 64kiB block. Keep this path anyway, to check in crash reports.
+    HandlePoolAllocFailureOutOfCommitCharge();
+  } else
+#endif  // BUILDFLAG(IS_WIN)
+  {
+    PA_CHECK(false);
+  }
+}
+
+}  // namespace
+
+PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_;
+
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+std::ptrdiff_t PartitionAddressSpace::regular_pool_shadow_offset_ = 0;
+std::ptrdiff_t PartitionAddressSpace::brp_pool_shadow_offset_ = 0;
+#endif
+
+#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+#if !BUILDFLAG(IS_IOS)
+#error Dynamic pool size is only supported on iOS.
+#endif
+
+namespace {
+bool IsIOSTestProcess() {
+  // On iOS, only applications with the extended virtual addressing entitlement
+  // can use a large address space. Since Earl Grey test runner apps cannot get
+  // entitlements, they must use a much smaller pool size. Similarly,
+  // integration tests for ChromeWebView end up with two PartitionRoots since
+  // both the integration tests and ChromeWebView have a copy of base/. Even
+  // with the entitlement, there is insufficient address space for two
+  // PartitionRoots, so a smaller pool size is needed.
+
+  // Use a fixed buffer size to avoid allocation inside the allocator.
+  constexpr size_t path_buffer_size = 8192;
+  char executable_path[path_buffer_size];
+
+  uint32_t executable_length = path_buffer_size;
+  int rv = _NSGetExecutablePath(executable_path, &executable_length);
+  PA_CHECK(!rv);
+  size_t executable_path_length =
+      std::char_traits<char>::length(executable_path);
+
+  auto has_suffix = [&](const char* suffix) -> bool {
+    size_t suffix_length = std::char_traits<char>::length(suffix);
+    if (executable_path_length < suffix_length) {
+      return false;
+    }
+    return std::char_traits<char>::compare(
+               executable_path + (executable_path_length - suffix_length),
+               suffix, suffix_length) == 0;
+  };
+
+  return has_suffix("Runner") || has_suffix("ios_web_view_inttests");
+}
+}  // namespace
+
+PA_ALWAYS_INLINE size_t PartitionAddressSpace::RegularPoolSize() {
+  return IsIOSTestProcess() ? kRegularPoolSizeForIOSTestProcess
+                            : kRegularPoolSize;
+}
+PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
+  return IsIOSTestProcess() ? kBRPPoolSizeForIOSTestProcess : kBRPPoolSize;
+}
+#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+
+void PartitionAddressSpace::Init() {
+  if (IsInitialized()) {
+    return;
+  }
+
+  size_t regular_pool_size = RegularPoolSize();
+  size_t brp_pool_size = BRPPoolSize();
+
+#if BUILDFLAG(GLUE_CORE_POOLS)
+  // Gluing core pools (regular & BRP) makes sense only when both pools are of
+  // the same size. This the only way we can check belonging to either of the
+  // two with a single bitmask operation.
+  PA_CHECK(regular_pool_size == brp_pool_size);
+
+  // TODO(crbug.com/1362969): Support PA_ENABLE_SHADOW_METADATA.
+  int pools_fd = -1;
+
+  size_t glued_pool_sizes = regular_pool_size * 2;
+  // Note, BRP pool requires to be preceded by a "forbidden zone", which is
+  // conveniently taken care of by the last guard page of the regular pool.
+  setup_.regular_pool_base_address_ =
+      AllocPages(glued_pool_sizes, glued_pool_sizes,
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kInaccessible),
+                 PageTag::kPartitionAlloc, pools_fd);
+  if (!setup_.regular_pool_base_address_) {
+    HandlePoolAllocFailure();
+  }
+  setup_.brp_pool_base_address_ =
+      setup_.regular_pool_base_address_ + regular_pool_size;
+#else  // BUILDFLAG(GLUE_CORE_POOLS)
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+  int regular_pool_fd = memfd_create("/regular_pool", MFD_CLOEXEC);
+#else
+  int regular_pool_fd = -1;
+#endif
+  setup_.regular_pool_base_address_ =
+      AllocPages(regular_pool_size, regular_pool_size,
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kInaccessible),
+                 PageTag::kPartitionAlloc, regular_pool_fd);
+  if (!setup_.regular_pool_base_address_) {
+    HandlePoolAllocFailure();
+  }
+
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+  int brp_pool_fd = memfd_create("/brp_pool", MFD_CLOEXEC);
+#else
+  int brp_pool_fd = -1;
+#endif
+  // Reserve an extra allocation granularity unit before the BRP pool, but keep
+  // the pool aligned at BRPPoolSize(). A pointer immediately past an allocation
+  // is a valid pointer, and having a "forbidden zone" before the BRP pool
+  // prevents such a pointer from "sneaking into" the pool.
+  const size_t kForbiddenZoneSize = PageAllocationGranularity();
+  uintptr_t base_address = AllocPagesWithAlignOffset(
+      0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
+      brp_pool_size - kForbiddenZoneSize,
+      PageAccessibilityConfiguration(
+          PageAccessibilityConfiguration::kInaccessible),
+      PageTag::kPartitionAlloc, brp_pool_fd);
+  if (!base_address) {
+    HandlePoolAllocFailure();
+  }
+  setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
+#endif  // BUILDFLAG(GLUE_CORE_POOLS)
+
+#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+  setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
+  setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
+#if BUILDFLAG(GLUE_CORE_POOLS)
+  // When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
+  // regular pool, effectively forming one virtual pool of a twice bigger
+  // size. Adjust the mask appropriately.
+  setup_.core_pools_base_mask_ = setup_.regular_pool_base_mask_ << 1;
+  PA_DCHECK(setup_.core_pools_base_mask_ == (setup_.brp_pool_base_mask_ << 1));
+#endif
+#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+
+  AddressPoolManager::GetInstance().Add(
+      kRegularPoolHandle, setup_.regular_pool_base_address_, regular_pool_size);
+  AddressPoolManager::GetInstance().Add(
+      kBRPPoolHandle, setup_.brp_pool_base_address_, brp_pool_size);
+
+  // Sanity check pool alignment.
+  PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
+  PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
+#if BUILDFLAG(GLUE_CORE_POOLS)
+  PA_DCHECK(!(setup_.regular_pool_base_address_ & (glued_pool_sizes - 1)));
+#endif
+
+  // Sanity check pool belonging.
+  PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
+  PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
+  PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
+                            regular_pool_size - 1));
+  PA_DCHECK(
+      !IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size));
+  PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1));
+  PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_));
+  PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size - 1));
+  PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size));
+#if BUILDFLAG(GLUE_CORE_POOLS)
+  PA_DCHECK(!IsInCorePools(setup_.regular_pool_base_address_ - 1));
+  PA_DCHECK(IsInCorePools(setup_.regular_pool_base_address_));
+  PA_DCHECK(
+      IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size - 1));
+  PA_DCHECK(
+      IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size));
+  PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ - 1));
+  PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_));
+  PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size - 1));
+  PA_DCHECK(!IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size));
+#endif  // BUILDFLAG(GLUE_CORE_POOLS)
+
+#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+  // Reserve memory for PCScan quarantine card table.
+  uintptr_t requested_address = setup_.regular_pool_base_address_;
+  uintptr_t actual_address = AddressPoolManager::GetInstance().Reserve(
+      kRegularPoolHandle, requested_address, kSuperPageSize);
+  PA_CHECK(requested_address == actual_address)
+      << "QuarantineCardTable is required to be allocated at the beginning of "
+         "the regular pool";
+#endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+  // Reserve memory for the shadow pools.
+  uintptr_t regular_pool_shadow_address =
+      AllocPages(regular_pool_size, regular_pool_size,
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kInaccessible),
+                 PageTag::kPartitionAlloc, regular_pool_fd);
+  regular_pool_shadow_offset_ =
+      regular_pool_shadow_address - setup_.regular_pool_base_address_;
+
+  uintptr_t brp_pool_shadow_address = AllocPagesWithAlignOffset(
+      0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
+      brp_pool_size - kForbiddenZoneSize,
+      PageAccessibilityConfiguration(
+          PageAccessibilityConfiguration::kInaccessible),
+      PageTag::kPartitionAlloc, brp_pool_fd);
+  brp_pool_shadow_offset_ =
+      brp_pool_shadow_address - setup_.brp_pool_base_address_;
+#endif
+
+#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+  CompressedPointerBaseGlobal::SetBase(setup_.regular_pool_base_address_);
+#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+}
+
+void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
+                                                 size_t size) {
+  // The ConfigurablePool must only be initialized once.
+  PA_CHECK(!IsConfigurablePoolInitialized());
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  // It's possible that the thread isolated pool has been initialized first, in
+  // which case the setup_ memory has been made read-only. Remove the protection
+  // temporarily.
+  if (IsThreadIsolatedPoolInitialized()) {
+    UnprotectThreadIsolatedGlobals();
+  }
+#endif
+
+  PA_CHECK(pool_base);
+  PA_CHECK(size <= kConfigurablePoolMaxSize);
+  PA_CHECK(size >= kConfigurablePoolMinSize);
+  PA_CHECK(base::bits::IsPowerOfTwo(size));
+  PA_CHECK(pool_base % size == 0);
+
+  setup_.configurable_pool_base_address_ = pool_base;
+  setup_.configurable_pool_base_mask_ = ~(size - 1);
+
+  AddressPoolManager::GetInstance().Add(
+      kConfigurablePoolHandle, setup_.configurable_pool_base_address_, size);
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  // Put the metadata protection back in place.
+  if (IsThreadIsolatedPoolInitialized()) {
+    WriteProtectThreadIsolatedGlobals(setup_.thread_isolation_);
+  }
+#endif
+}
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+void PartitionAddressSpace::InitThreadIsolatedPool(
+    ThreadIsolationOption thread_isolation) {
+  // The ThreadIsolated pool can't be initialized with conflicting settings.
+  if (IsThreadIsolatedPoolInitialized()) {
+    PA_CHECK(setup_.thread_isolation_ == thread_isolation);
+    return;
+  }
+
+  size_t pool_size = ThreadIsolatedPoolSize();
+  setup_.thread_isolated_pool_base_address_ =
+      AllocPages(pool_size, pool_size,
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kInaccessible),
+                 PageTag::kPartitionAlloc);
+  if (!setup_.thread_isolated_pool_base_address_) {
+    HandlePoolAllocFailure();
+  }
+
+  PA_DCHECK(!(setup_.thread_isolated_pool_base_address_ & (pool_size - 1)));
+  setup_.thread_isolation_ = thread_isolation;
+  AddressPoolManager::GetInstance().Add(
+      kThreadIsolatedPoolHandle, setup_.thread_isolated_pool_base_address_,
+      pool_size);
+
+  PA_DCHECK(
+      !IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ - 1));
+  PA_DCHECK(IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_));
+  PA_DCHECK(IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ +
+                                   pool_size - 1));
+  PA_DCHECK(!IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ +
+                                    pool_size));
+
+  // TODO(1362969): support PA_ENABLE_SHADOW_METADATA
+}
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+void PartitionAddressSpace::UninitForTesting() {
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  UninitThreadIsolatedPoolForTesting();  // IN-TEST
+#endif
+#if BUILDFLAG(GLUE_CORE_POOLS)
+  // The core pools (regular & BRP) were allocated using a single allocation of
+  // double size.
+  FreePages(setup_.regular_pool_base_address_, 2 * RegularPoolSize());
+#else   // BUILDFLAG(GLUE_CORE_POOLS)
+  FreePages(setup_.regular_pool_base_address_, RegularPoolSize());
+  // For BRP pool, the allocation region includes a "forbidden zone" before the
+  // pool.
+  const size_t kForbiddenZoneSize = PageAllocationGranularity();
+  FreePages(setup_.brp_pool_base_address_ - kForbiddenZoneSize,
+            BRPPoolSize() + kForbiddenZoneSize);
+#endif  // BUILDFLAG(GLUE_CORE_POOLS)
+  // Do not free pages for the configurable pool, because its memory is owned
+  // by someone else, but deinitialize it nonetheless.
+  setup_.regular_pool_base_address_ = kUninitializedPoolBaseAddress;
+  setup_.brp_pool_base_address_ = kUninitializedPoolBaseAddress;
+  setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
+  setup_.configurable_pool_base_mask_ = 0;
+  AddressPoolManager::GetInstance().ResetForTesting();
+#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+  CompressedPointerBaseGlobal::ResetBaseForTesting();
+#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+}
+
+void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  // It's possible that the thread isolated pool has been initialized first, in
+  // which case the setup_ memory has been made read-only. Remove the protection
+  // temporarily.
+  if (IsThreadIsolatedPoolInitialized()) {
+    UnprotectThreadIsolatedGlobals();
+  }
+#endif
+  AddressPoolManager::GetInstance().Remove(kConfigurablePoolHandle);
+  setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
+  setup_.configurable_pool_base_mask_ = 0;
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  // Put the metadata protection back in place.
+  if (IsThreadIsolatedPoolInitialized()) {
+    WriteProtectThreadIsolatedGlobals(setup_.thread_isolation_);
+  }
+#endif
+}
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+void PartitionAddressSpace::UninitThreadIsolatedPoolForTesting() {
+  if (IsThreadIsolatedPoolInitialized()) {
+    UnprotectThreadIsolatedGlobals();
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    ThreadIsolationSettings::settings.enabled = false;
+#endif
+
+    FreePages(setup_.thread_isolated_pool_base_address_,
+              ThreadIsolatedPoolSize());
+    AddressPoolManager::GetInstance().Remove(kThreadIsolatedPoolHandle);
+    setup_.thread_isolated_pool_base_address_ = kUninitializedPoolBaseAddress;
+    setup_.thread_isolation_.enabled = false;
+  }
+}
+#endif
+
+#if BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
+
+PageCharacteristics page_characteristics;
+
+#endif  // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
+
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h b/base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h
new file mode 100644
index 0000000..dec9695
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h
@@ -0,0 +1,454 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ADDRESS_SPACE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ADDRESS_SPACE_H_
+
+#include <cstddef>
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_types.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/alignment.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+#endif
+
+// The feature is not applicable to 32-bit address space.
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+
+namespace partition_alloc {
+
+namespace internal {
+
+// Manages PartitionAlloc address space, which is split into pools.
+// See `glossary.md`.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
+ public:
+  // Represents pool-specific information about a given address.
+  struct PoolInfo {
+    pool_handle handle;
+    uintptr_t base;
+    uintptr_t offset;
+  };
+
+#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+  PA_ALWAYS_INLINE static uintptr_t RegularPoolBaseMask() {
+    return setup_.regular_pool_base_mask_;
+  }
+#else
+  PA_ALWAYS_INLINE static constexpr uintptr_t RegularPoolBaseMask() {
+    return kRegularPoolBaseMask;
+  }
+#endif
+
+  PA_ALWAYS_INLINE static PoolInfo GetPoolInfo(uintptr_t address) {
+    // When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
+#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    PA_DCHECK(!IsInBRPPool(address));
+#endif
+    pool_handle pool = kNullPoolHandle;
+    uintptr_t base = 0;
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    if (IsInBRPPool(address)) {
+      pool = kBRPPoolHandle;
+      base = setup_.brp_pool_base_address_;
+    } else
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+      if (IsInRegularPool(address)) {
+        pool = kRegularPoolHandle;
+        base = setup_.regular_pool_base_address_;
+      } else if (IsInConfigurablePool(address)) {
+        PA_DCHECK(IsConfigurablePoolInitialized());
+        pool = kConfigurablePoolHandle;
+        base = setup_.configurable_pool_base_address_;
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+      } else if (IsInThreadIsolatedPool(address)) {
+        pool = kThreadIsolatedPoolHandle;
+        base = setup_.thread_isolated_pool_base_address_;
+#endif
+      } else {
+        PA_NOTREACHED();
+      }
+    return PoolInfo{.handle = pool, .base = base, .offset = address - base};
+  }
+  PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMaxSize() {
+    return kConfigurablePoolMaxSize;
+  }
+  PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMinSize() {
+    return kConfigurablePoolMinSize;
+  }
+
+  // Initialize pools (except for the configurable one).
+  //
+  // This function must only be called from the main thread.
+  static void Init();
+  // Initialize the ConfigurablePool at the given address |pool_base|. It must
+  // be aligned to the size of the pool. The size must be a power of two and
+  // must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()].
+  //
+  // This function must only be called from the main thread.
+  static void InitConfigurablePool(uintptr_t pool_base, size_t size);
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  static void InitThreadIsolatedPool(ThreadIsolationOption thread_isolation);
+  static void UninitThreadIsolatedPoolForTesting();
+#endif
+  static void UninitForTesting();
+  static void UninitConfigurablePoolForTesting();
+
+  PA_ALWAYS_INLINE static bool IsInitialized() {
+    // Either neither or both regular and BRP pool are initialized. The
+    // configurable and thread isolated pool are initialized separately.
+    if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) {
+      PA_DCHECK(setup_.brp_pool_base_address_ != kUninitializedPoolBaseAddress);
+      return true;
+    }
+
+    PA_DCHECK(setup_.brp_pool_base_address_ == kUninitializedPoolBaseAddress);
+    return false;
+  }
+
+  PA_ALWAYS_INLINE static bool IsConfigurablePoolInitialized() {
+    return setup_.configurable_pool_base_address_ !=
+           kUninitializedPoolBaseAddress;
+  }
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  PA_ALWAYS_INLINE static bool IsThreadIsolatedPoolInitialized() {
+    return setup_.thread_isolated_pool_base_address_ !=
+           kUninitializedPoolBaseAddress;
+  }
+#endif
+
+  // Returns false for nullptr.
+  PA_ALWAYS_INLINE static bool IsInRegularPool(uintptr_t address) {
+#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+    const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
+#else
+    constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask;
+#endif
+    return (address & regular_pool_base_mask) ==
+           setup_.regular_pool_base_address_;
+  }
+
+  PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
+    return setup_.regular_pool_base_address_;
+  }
+
+  // Returns false for nullptr.
+  PA_ALWAYS_INLINE static bool IsInBRPPool(uintptr_t address) {
+#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+    const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
+#else
+    constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask;
+#endif
+    return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
+  }
+
+#if BUILDFLAG(GLUE_CORE_POOLS)
+  // Checks whether the address belongs to either regular or BRP pool.
+  // Returns false for nullptr.
+  PA_ALWAYS_INLINE static bool IsInCorePools(uintptr_t address) {
+#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+    const uintptr_t core_pools_base_mask = setup_.core_pools_base_mask_;
+#else
+    // When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
+    // regular pool, effectively forming one virtual pool of a twice bigger
+    // size. Adjust the mask appropriately.
+    constexpr uintptr_t core_pools_base_mask = kRegularPoolBaseMask << 1;
+#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+    bool ret =
+        (address & core_pools_base_mask) == setup_.regular_pool_base_address_;
+    PA_DCHECK(ret == (IsInRegularPool(address) || IsInBRPPool(address)));
+    return ret;
+  }
+#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+  PA_ALWAYS_INLINE static size_t CorePoolsSize() {
+    return RegularPoolSize() * 2;
+  }
+#else
+  PA_ALWAYS_INLINE static constexpr size_t CorePoolsSize() {
+    return RegularPoolSize() * 2;
+  }
+#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+#endif  // BUILDFLAG(GLUE_CORE_POOLS)
+
+  PA_ALWAYS_INLINE static uintptr_t OffsetInBRPPool(uintptr_t address) {
+    PA_DCHECK(IsInBRPPool(address));
+    return address - setup_.brp_pool_base_address_;
+  }
+
+  // Returns false for nullptr.
+  PA_ALWAYS_INLINE static bool IsInConfigurablePool(uintptr_t address) {
+    return (address & setup_.configurable_pool_base_mask_) ==
+           setup_.configurable_pool_base_address_;
+  }
+
+  PA_ALWAYS_INLINE static uintptr_t ConfigurablePoolBase() {
+    return setup_.configurable_pool_base_address_;
+  }
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  // Returns false for nullptr.
+  PA_ALWAYS_INLINE static bool IsInThreadIsolatedPool(uintptr_t address) {
+    return (address & kThreadIsolatedPoolBaseMask) ==
+           setup_.thread_isolated_pool_base_address_;
+  }
+#endif
+
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+  PA_ALWAYS_INLINE static std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
+    if (pool == kRegularPoolHandle) {
+      return regular_pool_shadow_offset_;
+    } else if (pool == kBRPPoolHandle) {
+      return brp_pool_shadow_offset_;
+    } else {
+      // TODO(crbug.com/1362969): Add shadow for configurable pool as well.
+      // Shadow is not created for ConfigurablePool for now, so this part should
+      // be unreachable.
+      PA_NOTREACHED();
+    }
+  }
+#endif
+
+  // PartitionAddressSpace is static_only class.
+  PartitionAddressSpace() = delete;
+  PartitionAddressSpace(const PartitionAddressSpace&) = delete;
+  void* operator new(size_t) = delete;
+  void* operator new(size_t, void*) = delete;
+
+ private:
+#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+  PA_ALWAYS_INLINE static size_t RegularPoolSize();
+  PA_ALWAYS_INLINE static size_t BRPPoolSize();
+#else
+  // The pool sizes should be as large as maximum whenever possible.
+  PA_ALWAYS_INLINE static constexpr size_t RegularPoolSize() {
+    return kRegularPoolSize;
+  }
+  PA_ALWAYS_INLINE static constexpr size_t BRPPoolSize() {
+    return kBRPPoolSize;
+  }
+#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  PA_ALWAYS_INLINE static constexpr size_t ThreadIsolatedPoolSize() {
+    return kThreadIsolatedPoolSize;
+  }
+#endif
+
+  // On 64-bit systems, PA allocates from several contiguous, mutually disjoint
+  // pools. The BRP pool is where all allocations have a BRP ref-count, thus
+  // pointers pointing there can use a BRP protection against UaF. Allocations
+  // in the other pools don't have that.
+  //
+  // Pool sizes have to be the power of two. Each pool will be aligned at its
+  // own size boundary.
+  //
+  // NOTE! The BRP pool must be preceded by an inaccessible region. This is to
+  // prevent a pointer to the end of a non-BRP-pool allocation from falling into
+  // the BRP pool, thus triggering BRP mechanism and likely crashing. This
+  // "forbidden zone" can be as small as 1B, but it's simpler to just reserve an
+  // allocation granularity unit.
+  //
+  // The ConfigurablePool is an optional Pool that can be created inside an
+  // existing mapping provided by the embedder. This Pool can be used when
+  // certain PA allocations must be located inside a given virtual address
+  // region. One use case for this Pool is V8 Sandbox, which requires that
+  // ArrayBuffers be located inside of it.
+  static constexpr size_t kRegularPoolSize = kPoolMaxSize;
+  static constexpr size_t kBRPPoolSize = kPoolMaxSize;
+  static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize));
+  static_assert(base::bits::IsPowerOfTwo(kBRPPoolSize));
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  static constexpr size_t kThreadIsolatedPoolSize = kGiB / 4;
+  static_assert(base::bits::IsPowerOfTwo(kThreadIsolatedPoolSize));
+#endif
+  static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
+  static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
+  static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
+  static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMaxSize));
+  static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMinSize));
+
+#if BUILDFLAG(IS_IOS)
+
+#if !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+#error iOS is only supported with a dynamically sized GigaCase.
+#endif
+
+  // We can't afford pool sizes as large as kPoolMaxSize in iOS EarlGrey tests,
+  // since the test process cannot use an extended virtual address space (see
+  // crbug.com/1250788).
+  static constexpr size_t kRegularPoolSizeForIOSTestProcess = kGiB / 4;
+  static constexpr size_t kBRPPoolSizeForIOSTestProcess = kGiB / 4;
+  static_assert(kRegularPoolSizeForIOSTestProcess < kRegularPoolSize);
+  static_assert(kBRPPoolSizeForIOSTestProcess < kBRPPoolSize);
+  static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForIOSTestProcess));
+  static_assert(base::bits::IsPowerOfTwo(kBRPPoolSizeForIOSTestProcess));
+#endif  // BUILDFLAG(IOS_IOS)
+
+#if !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+  // Masks used to easy determine belonging to a pool.
+  static constexpr uintptr_t kRegularPoolOffsetMask =
+      static_cast<uintptr_t>(kRegularPoolSize) - 1;
+  static constexpr uintptr_t kRegularPoolBaseMask = ~kRegularPoolOffsetMask;
+  static constexpr uintptr_t kBRPPoolOffsetMask =
+      static_cast<uintptr_t>(kBRPPoolSize) - 1;
+  static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
+#endif  // !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  static constexpr uintptr_t kThreadIsolatedPoolOffsetMask =
+      static_cast<uintptr_t>(kThreadIsolatedPoolSize) - 1;
+  static constexpr uintptr_t kThreadIsolatedPoolBaseMask =
+      ~kThreadIsolatedPoolOffsetMask;
+#endif
+
+  // This must be set to such a value that IsIn*Pool() always returns false when
+  // the pool isn't initialized.
+  static constexpr uintptr_t kUninitializedPoolBaseAddress =
+      static_cast<uintptr_t>(-1);
+
+  struct alignas(kPartitionCachelineSize) PA_THREAD_ISOLATED_ALIGN PoolSetup {
+    // Before PartitionAddressSpace::Init(), no allocation are allocated from a
+    // reserved address space. Therefore, set *_pool_base_address_ initially to
+    // -1, so that PartitionAddressSpace::IsIn*Pool() always returns false.
+    constexpr PoolSetup() = default;
+
+    // Using a struct to enforce alignment and padding
+    uintptr_t regular_pool_base_address_ = kUninitializedPoolBaseAddress;
+    uintptr_t brp_pool_base_address_ = kUninitializedPoolBaseAddress;
+    uintptr_t configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+    uintptr_t thread_isolated_pool_base_address_ =
+        kUninitializedPoolBaseAddress;
+#endif
+#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+    uintptr_t regular_pool_base_mask_ = 0;
+    uintptr_t brp_pool_base_mask_ = 0;
+#if BUILDFLAG(GLUE_CORE_POOLS)
+    uintptr_t core_pools_base_mask_ = 0;
+#endif
+#endif  // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+    uintptr_t configurable_pool_base_mask_ = 0;
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+    ThreadIsolationOption thread_isolation_;
+#endif
+  };
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  static_assert(sizeof(PoolSetup) % SystemPageSize() == 0,
+                "PoolSetup has to fill a page(s)");
+#else
+  static_assert(sizeof(PoolSetup) % kPartitionCachelineSize == 0,
+                "PoolSetup has to fill a cacheline(s)");
+#endif
+
+  // See the comment describing the address layout above.
+  //
+  // These are write-once fields, frequently accessed thereafter. Make sure they
+  // don't share a cacheline with other, potentially writeable data, through
+  // alignment and padding.
+  static PoolSetup setup_ PA_CONSTINIT;
+
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+  static std::ptrdiff_t regular_pool_shadow_offset_;
+  static std::ptrdiff_t brp_pool_shadow_offset_;
+#endif
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  // If we use thread isolation, we need to write-protect its metadata.
+  // Allow the function to get access to the PoolSetup.
+  friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption);
+#endif
+};
+
+PA_ALWAYS_INLINE PartitionAddressSpace::PoolInfo GetPoolInfo(
+    uintptr_t address) {
+  return PartitionAddressSpace::GetPoolInfo(address);
+}
+
+PA_ALWAYS_INLINE pool_handle GetPool(uintptr_t address) {
+  return GetPoolInfo(address).handle;
+}
+
+PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
+  return PartitionAddressSpace::OffsetInBRPPool(address);
+}
+
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
+  return PartitionAddressSpace::ShadowPoolOffset(pool);
+}
+#endif
+
+}  // namespace internal
+
+// Returns false for nullptr.
+PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
+  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
+#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  PA_DCHECK(!internal::PartitionAddressSpace::IsInBRPPool(address));
+#endif
+  return internal::PartitionAddressSpace::IsInRegularPool(address)
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+         || internal::PartitionAddressSpace::IsInBRPPool(address)
+#endif
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+         || internal::PartitionAddressSpace::IsInThreadIsolatedPool(address)
+#endif
+         || internal::PartitionAddressSpace::IsInConfigurablePool(address);
+}
+
+// Returns false for nullptr.
+PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
+  return internal::PartitionAddressSpace::IsInRegularPool(address);
+}
+
+// Returns false for nullptr.
+PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
+  return internal::PartitionAddressSpace::IsInBRPPool(address);
+}
+
+#if BUILDFLAG(GLUE_CORE_POOLS)
+// Checks whether the address belongs to either regular or BRP pool.
+// Returns false for nullptr.
+PA_ALWAYS_INLINE bool IsManagedByPartitionAllocCorePools(uintptr_t address) {
+  return internal::PartitionAddressSpace::IsInCorePools(address);
+}
+#endif  // BUILDFLAG(GLUE_CORE_POOLS)
+
+// Returns false for nullptr.
+PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
+    uintptr_t address) {
+  return internal::PartitionAddressSpace::IsInConfigurablePool(address);
+}
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+// Returns false for nullptr.
+PA_ALWAYS_INLINE bool IsManagedByPartitionAllocThreadIsolatedPool(
+    uintptr_t address) {
+  return internal::PartitionAddressSpace::IsInThreadIsolatedPool(address);
+}
+#endif
+
+PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
+  return internal::PartitionAddressSpace::IsConfigurablePoolInitialized();
+}
+
+}  // namespace partition_alloc
+
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ADDRESS_SPACE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h
new file mode 100644
index 0000000..e808347
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h
@@ -0,0 +1,104 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_INL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_INL_H_
+
+#include <algorithm>
+#include <cstring>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_ref_count.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/random.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+#include "build/build_config.h"
+
+// Prefetch *x into memory.
+#if defined(__clang__) || defined(COMPILER_GCC)
+#define PA_PREFETCH(x) __builtin_prefetch(x)
+#else
+#define PA_PREFETCH(x)
+#endif
+
+namespace partition_alloc::internal {
+
+// This is a `memset` that resists being optimized away. Adapted from
+// boringssl/src/crypto/mem.c. (Copying and pasting is bad, but //base can't
+// depend on //third_party, and this is small enough.)
+#if PA_CONFIG(IS_NONCLANG_MSVC)
+// MSVC only supports inline assembly on x86. This preprocessor directive
+// is intended to be a replacement for the same.
+//
+// TODO(crbug.com/1351310): Make sure inlining doesn't degrade this into
+// a no-op or similar. The documentation doesn't say.
+#pragma optimize("", off)
+#endif
+PA_ALWAYS_INLINE void SecureMemset(void* ptr, uint8_t value, size_t size) {
+  memset(ptr, value, size);
+
+#if !PA_CONFIG(IS_NONCLANG_MSVC)
+  // As best as we can tell, this is sufficient to break any optimisations that
+  // might try to eliminate "superfluous" memsets. If there's an easy way to
+  // detect memset_s, it would be better to use that.
+  __asm__ __volatile__("" : : "r"(ptr) : "memory");
+#endif  // !PA_CONFIG(IS_NONCLANG_MSVC)
+}
+#if PA_CONFIG(IS_NONCLANG_MSVC)
+#pragma optimize("", on)
+#endif
+
+#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+// Used to memset() memory for debugging purposes only.
+PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
+  // Only set the first 512kiB of the allocation. This is enough to detect uses
+  // of uininitialized / freed memory, and makes tests run significantly
+  // faster. Note that for direct-mapped allocations, memory is decomitted at
+  // free() time, so freed memory usage cannot happen.
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  LiftThreadIsolationScope lift_thread_isolation_restrictions;
+#endif
+  size_t size_to_memset = std::min(size, size_t{1} << 19);
+  memset(ptr, value, size_to_memset);
+}
+#endif  // BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+
+// Returns true if we've hit the end of a random-length period. We don't want to
+// invoke `RandomValue` too often, because we call this function in a hot spot
+// (`Free`), and `RandomValue` incurs the cost of atomics.
+#if !BUILDFLAG(PA_DCHECK_IS_ON)
+PA_ALWAYS_INLINE bool RandomPeriod() {
+  static thread_local uint8_t counter = 0;
+  if (PA_UNLIKELY(counter == 0)) {
+    // It's OK to truncate this value.
+    counter = static_cast<uint8_t>(RandomValue());
+  }
+  // If `counter` is 0, this will wrap. That is intentional and OK.
+  counter--;
+  return counter == 0;
+}
+#endif  // !BUILDFLAG(PA_DCHECK_IS_ON)
+
+PA_ALWAYS_INLINE uintptr_t ObjectInnerPtr2Addr(const void* ptr) {
+  return UntagPtr(ptr);
+}
+PA_ALWAYS_INLINE uintptr_t ObjectPtr2Addr(const void* object) {
+  // TODO(bartekn): Check that |object| is indeed an object start.
+  return ObjectInnerPtr2Addr(object);
+}
+PA_ALWAYS_INLINE void* SlotStartAddr2Ptr(uintptr_t slot_start) {
+  // TODO(bartekn): Check that |slot_start| is indeed a slot start.
+  return TagAddr(slot_start);
+}
+PA_ALWAYS_INLINE uintptr_t SlotStartPtr2Addr(const void* slot_start) {
+  // TODO(bartekn): Check that |slot_start| is indeed a slot start.
+  return UntagPtr(slot_start);
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_INL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc.cc
new file mode 100644
index 0000000..780aa65
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc.cc
@@ -0,0 +1,133 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+
+#include <string.h>
+
+#include <cstdint>
+#include <memory>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_direct_map_extent.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_stats.h"
+
+#if BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#endif
+
+namespace partition_alloc {
+
+void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
+  // This is from page_allocator_constants.h and doesn't really fit here, but
+  // there isn't a centralized initialization function in page_allocator.cc, so
+  // there's no good place in that file to do a STATIC_ASSERT_OR_PA_CHECK.
+  STATIC_ASSERT_OR_PA_CHECK(
+      (internal::SystemPageSize() & internal::SystemPageOffsetMask()) == 0,
+      "SystemPageSize() must be power of 2");
+
+  // Two partition pages are used as guard / metadata page so make sure the
+  // super page size is bigger.
+  STATIC_ASSERT_OR_PA_CHECK(
+      internal::PartitionPageSize() * 4 <= internal::kSuperPageSize,
+      "ok super page size");
+  STATIC_ASSERT_OR_PA_CHECK(
+      (internal::kSuperPageSize & internal::SystemPageOffsetMask()) == 0,
+      "ok super page multiple");
+  // Four system pages gives us room to hack out a still-guard-paged piece
+  // of metadata in the middle of a guard partition page.
+  STATIC_ASSERT_OR_PA_CHECK(
+      internal::SystemPageSize() * 4 <= internal::PartitionPageSize(),
+      "ok partition page size");
+  STATIC_ASSERT_OR_PA_CHECK(
+      (internal::PartitionPageSize() & internal::SystemPageOffsetMask()) == 0,
+      "ok partition page multiple");
+  static_assert(sizeof(internal::PartitionPage) <= internal::kPageMetadataSize,
+                "PartitionPage should not be too big");
+  STATIC_ASSERT_OR_PA_CHECK(
+      internal::kPageMetadataSize * internal::NumPartitionPagesPerSuperPage() <=
+          internal::SystemPageSize(),
+      "page metadata fits in hole");
+
+  // Limit to prevent callers accidentally overflowing an int size.
+  STATIC_ASSERT_OR_PA_CHECK(
+      internal::MaxDirectMapped() <=
+          (1UL << 31) + internal::DirectMapAllocationGranularity(),
+      "maximum direct mapped allocation");
+
+  // Check that some of our zanier calculations worked out as expected.
+  static_assert(internal::kSmallestBucket == internal::kAlignment,
+                "generic smallest bucket");
+  static_assert(internal::kMaxBucketed == 983040, "generic max bucketed");
+  STATIC_ASSERT_OR_PA_CHECK(
+      internal::MaxSystemPagesPerRegularSlotSpan() <= 16,
+      "System pages per slot span must be no greater than 16.");
+
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+  STATIC_ASSERT_OR_PA_CHECK(
+      internal::GetPartitionRefCountIndexMultiplierShift() <
+          std::numeric_limits<size_t>::max() / 2,
+      "Calculation in GetPartitionRefCountIndexMultiplierShift() must not "
+      "underflow.");
+  // Check that the GetPartitionRefCountIndexMultiplierShift() calculation is
+  // correct.
+  STATIC_ASSERT_OR_PA_CHECK(
+      (1 << internal::GetPartitionRefCountIndexMultiplierShift()) ==
+          (internal::SystemPageSize() /
+           (sizeof(internal::PartitionRefCount) *
+            (internal::kSuperPageSize / internal::SystemPageSize()))),
+      "Bitshift must match the intended multiplication.");
+  STATIC_ASSERT_OR_PA_CHECK(
+      ((sizeof(internal::PartitionRefCount) *
+        (internal::kSuperPageSize / internal::SystemPageSize()))
+       << internal::GetPartitionRefCountIndexMultiplierShift()) <=
+          internal::SystemPageSize(),
+      "PartitionRefCount Bitmap size must be smaller than or equal to "
+      "<= SystemPageSize().");
+#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+
+  PA_DCHECK(on_out_of_memory);
+  internal::g_oom_handling_function = on_out_of_memory;
+}
+
+void PartitionAllocGlobalUninitForTesting() {
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  internal::PartitionAddressSpace::UninitThreadIsolatedPoolForTesting();
+#endif
+  internal::g_oom_handling_function = nullptr;
+}
+
+PartitionAllocator::PartitionAllocator() = default;
+
+PartitionAllocator::~PartitionAllocator() {
+  MemoryReclaimer::Instance()->UnregisterPartition(&partition_root_);
+}
+
+void PartitionAllocator::init(PartitionOptions opts) {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  PA_CHECK(opts.thread_cache == PartitionOptions::kDisabled)
+      << "Cannot use a thread cache when PartitionAlloc is malloc().";
+#endif
+  partition_root_.Init(opts);
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  // The MemoryReclaimer won't have write access to the partition, so skip
+  // registration.
+  const bool use_memory_reclaimer = !opts.thread_isolation.enabled;
+#else
+  constexpr bool use_memory_reclaimer = true;
+#endif
+  if (use_memory_reclaimer) {
+    MemoryReclaimer::Instance()->RegisterPartition(&partition_root_);
+  }
+}
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h
new file mode 100644
index 0000000..c88946f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h
@@ -0,0 +1,69 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+
+// *** HOUSEKEEPING RULES ***
+//
+// Throughout PartitionAlloc code, we avoid using generic variable names like
+// |ptr| or |address|, and prefer names like |object|, |slot_start|, instead.
+// This helps emphasize that terms like "object" and "slot" represent two
+// different worlds. "Slot" is an indivisible allocation unit, internal to
+// PartitionAlloc. It is generally represented as an address (uintptr_t), since
+// arithmetic operations on it aren't uncommon, and for that reason it isn't
+// MTE-tagged either. "Object" is the allocated memory that the app is given via
+// interfaces like Alloc(), Free(), etc. An object is fully contained within a
+// slot, and may be surrounded by internal PartitionAlloc structures or empty
+// space. Is is generally represented as a pointer to its beginning (most
+// commonly void*), and is MTE-tagged so it's safe to access.
+//
+// The best way to transition between these to worlds is via
+// PartitionRoot::ObjectToSlotStart() and ::SlotStartToObject(). These take care
+// of shifting between slot/object start, MTE-tagging/untagging and the cast for
+// you. There are cases where these functions are insufficient. Internal
+// PartitionAlloc structures, like free-list pointers, BRP ref-count, cookie,
+// etc. are located in-slot thus accessing them requires an MTE tag.
+// SlotStartPtr2Addr() and SlotStartAddr2Ptr() take care of this.
+// There are cases where we have to do pointer arithmetic on an object pointer
+// (like check belonging to a pool, etc.), in which case we want to strip MTE
+// tag. ObjectInnerPtr2Addr() and ObjectPtr2Addr() take care of that.
+//
+// Avoid using UntagPtr/Addr() and TagPtr/Addr() directly, if possible. And
+// definitely avoid using reinterpret_cast between uintptr_t and pointer worlds.
+// When you do, add a comment explaining why it's safe from the point of MTE
+// tagging.
+
+namespace partition_alloc {
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void PartitionAllocGlobalUninitForTesting();
+
+struct PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocator {
+  PartitionAllocator();
+  explicit PartitionAllocator(PartitionOptions opts) { init(opts); }
+  ~PartitionAllocator();
+
+  void init(PartitionOptions);
+
+  PA_ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
+  PA_ALWAYS_INLINE const PartitionRoot* root() const {
+    return &partition_root_;
+  }
+
+ private:
+  PartitionRoot partition_root_;
+};
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_allocation_data.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_allocation_data.h
new file mode 100644
index 0000000..ec47adf
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_allocation_data.h
@@ -0,0 +1,93 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_ALLOCATION_DATA_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_ALLOCATION_DATA_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+
+namespace partition_alloc {
+
+// Definitions of various parameters of override and observer hooks. Allocation
+// and free path differ from each other in that the allocation override provides
+// data to the caller (we have an out parameter there), whereas the free
+// override just consumes the data.
+
+// AllocationNotificationData is the in-parameter of an allocation observer
+// hook.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AllocationNotificationData {
+ public:
+  AllocationNotificationData(void* address, size_t size, const char* type_name)
+      : address_(address), size_(size), type_name_(type_name) {}
+
+  void* address() const { return address_; }
+  size_t size() const { return size_; }
+  const char* type_name() const { return type_name_; }
+
+  // In the allocation observer path, it's interesting which reporting mode is
+  // enabled.
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  AllocationNotificationData& SetMteReportingMode(
+      TagViolationReportingMode mode) {
+    mte_reporting_mode_ = mode;
+    return *this;
+  }
+
+  TagViolationReportingMode mte_reporting_mode() const {
+    return mte_reporting_mode_;
+  }
+#else
+  constexpr TagViolationReportingMode mte_reporting_mode() const {
+    return TagViolationReportingMode::kUndefined;
+  }
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+ private:
+  void* address_ = nullptr;
+  size_t size_ = 0;
+  const char* type_name_ = nullptr;
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  TagViolationReportingMode mte_reporting_mode_ =
+      TagViolationReportingMode::kUndefined;
+#endif
+};
+
+// FreeNotificationData is the in-parameter of a free observer hook.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) FreeNotificationData {
+ public:
+  constexpr explicit FreeNotificationData(void* address) : address_(address) {}
+
+  void* address() const { return address_; }
+
+  // In the free observer path, it's interesting which reporting mode is
+  // enabled.
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  FreeNotificationData& SetMteReportingMode(TagViolationReportingMode mode) {
+    mte_reporting_mode_ = mode;
+    return *this;
+  }
+
+  TagViolationReportingMode mte_reporting_mode() const {
+    return mte_reporting_mode_;
+  }
+#else
+  constexpr TagViolationReportingMode mte_reporting_mode() const {
+    return TagViolationReportingMode::kUndefined;
+  }
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+ private:
+  void* address_ = nullptr;
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  TagViolationReportingMode mte_reporting_mode_ =
+      TagViolationReportingMode::kUndefined;
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+};
+
+}  // namespace partition_alloc
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_ALLOCATION_DATA_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/README.md b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/README.md
similarity index 100%
rename from base/allocator/partition_allocator/partition_alloc_base/README.md
rename to base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/README.md
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/foundation_util.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/foundation_util.h
new file mode 100644
index 0000000..516538b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/foundation_util.h
@@ -0,0 +1,62 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_FOUNDATION_UTIL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_FOUNDATION_UTIL_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc::internal::base::apple {
+
+// CFCast<>() and CFCastStrict<>() cast a basic CFTypeRef to a more
+// specific CoreFoundation type. The compatibility of the passed
+// object is found by comparing its opaque type against the
+// requested type identifier. If the supplied object is not
+// compatible with the requested return type, CFCast<>() returns
+// NULL and CFCastStrict<>() will DCHECK. Providing a NULL pointer
+// to either variant results in NULL being returned without
+// triggering any DCHECK.
+//
+// Example usage:
+// CFNumberRef some_number = base::mac::CFCast<CFNumberRef>(
+//     CFArrayGetValueAtIndex(array, index));
+//
+// CFTypeRef hello = CFSTR("hello world");
+// CFStringRef some_string = base::mac::CFCastStrict<CFStringRef>(hello);
+
+template <typename T>
+T CFCast(const CFTypeRef& cf_val);
+
+template <typename T>
+T CFCastStrict(const CFTypeRef& cf_val);
+
+#define PA_CF_CAST_DECL(TypeCF)                             \
+  template <>                                               \
+  PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)                 \
+  TypeCF##Ref CFCast<TypeCF##Ref>(const CFTypeRef& cf_val); \
+                                                            \
+  template <>                                               \
+  PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)                 \
+  TypeCF##Ref CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val)
+
+PA_CF_CAST_DECL(CFArray);
+PA_CF_CAST_DECL(CFBag);
+PA_CF_CAST_DECL(CFBoolean);
+PA_CF_CAST_DECL(CFData);
+PA_CF_CAST_DECL(CFDate);
+PA_CF_CAST_DECL(CFDictionary);
+PA_CF_CAST_DECL(CFNull);
+PA_CF_CAST_DECL(CFNumber);
+PA_CF_CAST_DECL(CFSet);
+PA_CF_CAST_DECL(CFString);
+PA_CF_CAST_DECL(CFURL);
+PA_CF_CAST_DECL(CFUUID);
+
+#undef PA_CF_CAST_DECL
+
+}  // namespace partition_alloc::internal::base::apple
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_FOUNDATION_UTIL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/foundation_util.mm b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/foundation_util.mm
new file mode 100644
index 0000000..e49906b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/foundation_util.mm
@@ -0,0 +1,45 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/foundation_util.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+
+namespace partition_alloc::internal::base::apple {
+
+#define PA_CF_CAST_DEFN(TypeCF)                                    \
+  template <>                                                      \
+  TypeCF##Ref CFCast<TypeCF##Ref>(const CFTypeRef& cf_val) {       \
+    if (cf_val == NULL) {                                          \
+      return NULL;                                                 \
+    }                                                              \
+    if (CFGetTypeID(cf_val) == TypeCF##GetTypeID()) {              \
+      return (TypeCF##Ref)(cf_val);                                \
+    }                                                              \
+    return NULL;                                                   \
+  }                                                                \
+                                                                   \
+  template <>                                                      \
+  TypeCF##Ref CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val) { \
+    TypeCF##Ref rv = CFCast<TypeCF##Ref>(cf_val);                  \
+    PA_BASE_DCHECK(cf_val == NULL || rv);                          \
+    return rv;                                                     \
+  }
+
+PA_CF_CAST_DEFN(CFArray)
+PA_CF_CAST_DEFN(CFBag)
+PA_CF_CAST_DEFN(CFBoolean)
+PA_CF_CAST_DEFN(CFData)
+PA_CF_CAST_DEFN(CFDate)
+PA_CF_CAST_DEFN(CFDictionary)
+PA_CF_CAST_DEFN(CFNull)
+PA_CF_CAST_DEFN(CFNumber)
+PA_CF_CAST_DEFN(CFSet)
+PA_CF_CAST_DEFN(CFString)
+PA_CF_CAST_DEFN(CFURL)
+PA_CF_CAST_DEFN(CFUUID)
+
+#undef PA_CF_CAST_DEFN
+
+}  // namespace partition_alloc::internal::base::apple
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/mach_logging.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/mach_logging.cc
new file mode 100644
index 0000000..21d6aa2
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/mach_logging.cc
@@ -0,0 +1,41 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/mach_logging.h"
+
+#include <iomanip>
+#include <string>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf.h"
+
+namespace {
+
+std::string FormatMachErrorNumber(mach_error_t mach_err) {
+  // For the os/kern subsystem, give the error number in decimal as in
+  // <mach/kern_return.h>. Otherwise, give it in hexadecimal to make it easier
+  // to visualize the various bits. See <mach/error.h>.
+  if (mach_err >= 0 && mach_err < KERN_RETURN_MAX) {
+    return partition_alloc::internal::base::TruncatingStringPrintf(" (%d)",
+                                                                   mach_err);
+  }
+  return partition_alloc::internal::base::TruncatingStringPrintf(" (0x%08x)",
+                                                                 mach_err);
+}
+
+}  // namespace
+
+namespace partition_alloc::internal::logging {
+
+MachLogMessage::MachLogMessage(const char* file_path,
+                               int line,
+                               LogSeverity severity,
+                               mach_error_t mach_err)
+    : LogMessage(file_path, line, severity), mach_err_(mach_err) {}
+
+MachLogMessage::~MachLogMessage() {
+  stream() << ": " << mach_error_string(mach_err_)
+           << FormatMachErrorNumber(mach_err_).c_str();
+}
+
+}  // namespace partition_alloc::internal::logging
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/mach_logging.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/mach_logging.h
new file mode 100644
index 0000000..2f3b504
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/mach_logging.h
@@ -0,0 +1,99 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_MACH_LOGGING_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_MACH_LOGGING_H_
+
+#include <mach/mach.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "build/build_config.h"
+
+// Use the PA_MACH_LOG family of macros along with a mach_error_t
+// (kern_return_t) containing a Mach error. The error value will be decoded so
+// that logged messages explain the error.
+//
+// Examples:
+//
+//   kern_return_t kr = mach_timebase_info(&info);
+//   if (kr != KERN_SUCCESS) {
+//     PA_MACH_LOG(ERROR, kr) << "mach_timebase_info";
+//   }
+//
+//   kr = vm_deallocate(task, address, size);
+//   PA_MACH_DCHECK(kr == KERN_SUCCESS, kr) << "vm_deallocate";
+
+namespace partition_alloc::internal::logging {
+
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) MachLogMessage
+    : public partition_alloc::internal::logging::LogMessage {
+ public:
+  MachLogMessage(const char* file_path,
+                 int line,
+                 LogSeverity severity,
+                 mach_error_t mach_err);
+
+  MachLogMessage(const MachLogMessage&) = delete;
+  MachLogMessage& operator=(const MachLogMessage&) = delete;
+
+  ~MachLogMessage() override;
+
+ private:
+  mach_error_t mach_err_;
+};
+
+}  // namespace partition_alloc::internal::logging
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+#define PA_MACH_DVLOG_IS_ON(verbose_level) PA_VLOG_IS_ON(verbose_level)
+#else
+#define PA_MACH_DVLOG_IS_ON(verbose_level) 0
+#endif
+
+#define PA_MACH_LOG_STREAM(severity, mach_err) \
+  PA_COMPACT_GOOGLE_LOG_EX_##severity(MachLogMessage, mach_err).stream()
+#define PA_MACH_VLOG_STREAM(verbose_level, mach_err)    \
+  ::partition_alloc::internal::logging::MachLogMessage( \
+      __FILE__, __LINE__, -verbose_level, mach_err)     \
+      .stream()
+
+#define PA_MACH_LOG(severity, mach_err) \
+  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(severity, mach_err), PA_LOG_IS_ON(severity))
+#define PA_MACH_LOG_IF(severity, condition, mach_err)    \
+  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(severity, mach_err), \
+                 PA_LOG_IS_ON(severity) && (condition))
+
+#define PA_MACH_VLOG(verbose_level, mach_err)                  \
+  PA_LAZY_STREAM(PA_MACH_VLOG_STREAM(verbose_level, mach_err), \
+                 PA_VLOG_IS_ON(verbose_level))
+#define PA_MACH_VLOG_IF(verbose_level, condition, mach_err)    \
+  PA_LAZY_STREAM(PA_MACH_VLOG_STREAM(verbose_level, mach_err), \
+                 PA_VLOG_IS_ON(verbose_level) && (condition))
+
+#define PA_MACH_CHECK(condition, mach_err)                          \
+  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(FATAL, mach_err), !(condition)) \
+      << "Check failed: " #condition << ". "
+
+#define PA_MACH_DLOG(severity, mach_err)                 \
+  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(severity, mach_err), \
+                 PA_DLOG_IS_ON(severity))
+#define PA_MACH_DLOG_IF(severity, condition, mach_err)   \
+  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(severity, mach_err), \
+                 PA_DLOG_IS_ON(severity) && (condition))
+
+#define PA_MACH_DVLOG(verbose_level, mach_err)                 \
+  PA_LAZY_STREAM(PA_MACH_VLOG_STREAM(verbose_level, mach_err), \
+                 PA_MACH_DVLOG_IS_ON(verbose_level))
+#define PA_MACH_DVLOG_IF(verbose_level, condition, mach_err)   \
+  PA_LAZY_STREAM(PA_MACH_VLOG_STREAM(verbose_level, mach_err), \
+                 PA_MACH_DVLOG_IS_ON(verbose_level) && (condition))
+
+#define PA_MACH_DCHECK(condition, mach_err)                  \
+  PA_LAZY_STREAM(PA_MACH_LOG_STREAM(FATAL, mach_err),        \
+                 BUILDFLAG(PA_DCHECK_IS_ON) && !(condition)) \
+      << "Check failed: " #condition << ". "
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_MACH_LOGGING_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/scoped_cftyperef.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/scoped_cftyperef.h
new file mode 100644
index 0000000..147386e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/scoped_cftyperef.h
@@ -0,0 +1,48 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_SCOPED_CFTYPEREF_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_SCOPED_CFTYPEREF_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/scoped_typeref.h"
+
+namespace partition_alloc::internal::base::apple {
+
+// ScopedCFTypeRef<> is patterned after std::unique_ptr<>, but maintains
+// ownership of a CoreFoundation object: any object that can be represented
+// as a CFTypeRef.  Style deviations here are solely for compatibility with
+// std::unique_ptr<>'s interface, with which everyone is already familiar.
+//
+// By default, ScopedCFTypeRef<> takes ownership of an object (in the
+// constructor or in reset()) by taking over the caller's existing ownership
+// claim.  The caller must own the object it gives to ScopedCFTypeRef<>, and
+// relinquishes an ownership claim to that object.  ScopedCFTypeRef<> does not
+// call CFRetain(). This behavior is parameterized by the |OwnershipPolicy|
+// enum. If the value |RETAIN| is passed (in the constructor or in reset()),
+// then ScopedCFTypeRef<> will call CFRetain() on the object, and the initial
+// ownership is not changed.
+
+namespace internal {
+
+template <typename CFT>
+struct ScopedCFTypeRefTraits {
+  static CFT InvalidValue() { return nullptr; }
+  static CFT Retain(CFT object) {
+    CFRetain(object);
+    return object;
+  }
+  static void Release(CFT object) { CFRelease(object); }
+};
+
+}  // namespace internal
+
+template <typename CFT>
+using ScopedCFTypeRef =
+    ScopedTypeRef<CFT, internal::ScopedCFTypeRefTraits<CFT>>;
+
+}  // namespace partition_alloc::internal::base::apple
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_SCOPED_CFTYPEREF_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/scoped_typeref.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/scoped_typeref.h
new file mode 100644
index 0000000..0df3e7e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/scoped_typeref.h
@@ -0,0 +1,151 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_SCOPED_TYPEREF_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_SCOPED_TYPEREF_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/scoped_policy.h"
+
+namespace partition_alloc::internal::base::apple {
+
+// ScopedTypeRef<> is patterned after std::unique_ptr<>, but maintains ownership
+// of a reference to any type that is maintained by Retain and Release methods.
+//
+// The Traits structure must provide the Retain and Release methods for type T.
+// A default ScopedTypeRefTraits is used but not defined, and should be defined
+// for each type to use this interface. For example, an appropriate definition
+// of ScopedTypeRefTraits for CGLContextObj would be:
+//
+//   template<>
+//   struct ScopedTypeRefTraits<CGLContextObj> {
+//     static CGLContextObj InvalidValue() { return nullptr; }
+//     static CGLContextObj Retain(CGLContextObj object) {
+//       CGLContextRetain(object);
+//       return object;
+//     }
+//     static void Release(CGLContextObj object) { CGLContextRelease(object); }
+//   };
+//
+// For the many types that have pass-by-pointer create functions, the function
+// InitializeInto() is provided to allow direct initialization and assumption
+// of ownership of the object. For example, continuing to use the above
+// CGLContextObj specialization:
+//
+//   base::apple::ScopedTypeRef<CGLContextObj> context;
+//   CGLCreateContext(pixel_format, share_group, context.InitializeInto());
+//
+// For initialization with an existing object, the caller may specify whether
+// the ScopedTypeRef<> being initialized is assuming the caller's existing
+// ownership of the object (and should not call Retain in initialization) or if
+// it should not assume this ownership and must create its own (by calling
+// Retain in initialization). This behavior is based on the |policy| parameter,
+// with |ASSUME| for the former and |RETAIN| for the latter. The default policy
+// is to |ASSUME|.
+
+template <typename T>
+struct ScopedTypeRefTraits;
+
+template <typename T, typename Traits = ScopedTypeRefTraits<T>>
+class ScopedTypeRef {
+ public:
+  using element_type = T;
+
+  explicit constexpr ScopedTypeRef(
+      element_type object = Traits::InvalidValue(),
+      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+      : object_(object) {
+    if (object_ && policy == base::scoped_policy::RETAIN) {
+      object_ = Traits::Retain(object_);
+    }
+  }
+
+  ScopedTypeRef(const ScopedTypeRef<T, Traits>& that) : object_(that.object_) {
+    if (object_) {
+      object_ = Traits::Retain(object_);
+    }
+  }
+
+  // This allows passing an object to a function that takes its superclass.
+  template <typename R, typename RTraits>
+  explicit ScopedTypeRef(const ScopedTypeRef<R, RTraits>& that_as_subclass)
+      : object_(that_as_subclass.get()) {
+    if (object_) {
+      object_ = Traits::Retain(object_);
+    }
+  }
+
+  ScopedTypeRef(ScopedTypeRef<T, Traits>&& that) : object_(that.object_) {
+    that.object_ = Traits::InvalidValue();
+  }
+
+  ~ScopedTypeRef() {
+    if (object_) {
+      Traits::Release(object_);
+    }
+  }
+
+  ScopedTypeRef& operator=(const ScopedTypeRef<T, Traits>& that) {
+    reset(that.get(), base::scoped_policy::RETAIN);
+    return *this;
+  }
+
+  // This is to be used only to take ownership of objects that are created
+  // by pass-by-pointer create functions. To enforce this, require that the
+  // object be reset to NULL before this may be used.
+  [[nodiscard]] element_type* InitializeInto() {
+    PA_BASE_DCHECK(!object_);
+    return &object_;
+  }
+
+  void reset(const ScopedTypeRef<T, Traits>& that) {
+    reset(that.get(), base::scoped_policy::RETAIN);
+  }
+
+  void reset(element_type object = Traits::InvalidValue(),
+             base::scoped_policy::OwnershipPolicy policy =
+                 base::scoped_policy::ASSUME) {
+    if (object && policy == base::scoped_policy::RETAIN) {
+      object = Traits::Retain(object);
+    }
+    if (object_) {
+      Traits::Release(object_);
+    }
+    object_ = object;
+  }
+
+  bool operator==(const ScopedTypeRef& that) const {
+    return object_ == that.object_;
+  }
+
+  bool operator!=(const ScopedTypeRef& that) const {
+    return object_ != that.object_;
+  }
+
+  operator element_type() const { return object_; }
+
+  element_type get() const { return object_; }
+
+  void swap(ScopedTypeRef& that) {
+    element_type temp = that.object_;
+    that.object_ = object_;
+    object_ = temp;
+  }
+
+  // ScopedTypeRef<>::release() is like std::unique_ptr<>::release.  It is NOT
+  // a wrapper for Release().  To force a ScopedTypeRef<> object to call
+  // Release(), use ScopedTypeRef<>::reset().
+  [[nodiscard]] element_type release() {
+    element_type temp = object_;
+    object_ = Traits::InvalidValue();
+    return temp;
+  }
+
+ private:
+  element_type object_;
+};
+
+}  // namespace partition_alloc::internal::base::apple
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_APPLE_SCOPED_TYPEREF_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/atomic_ref_count.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/atomic_ref_count.h
new file mode 100644
index 0000000..5ca4c11
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/atomic_ref_count.h
@@ -0,0 +1,69 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a low level implementation of atomic semantics for reference
+// counting.  Please use base/memory/ref_counted.h directly instead.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_
+
+#include <atomic>
+
+namespace partition_alloc::internal::base {
+
+class AtomicRefCount {
+ public:
+  constexpr AtomicRefCount() : ref_count_(0) {}
+  explicit constexpr AtomicRefCount(int initial_value)
+      : ref_count_(initial_value) {}
+
+  // Increment a reference count.
+  // Returns the previous value of the count.
+  int Increment() { return Increment(1); }
+
+  // Increment a reference count by "increment", which must exceed 0.
+  // Returns the previous value of the count.
+  int Increment(int increment) {
+    return ref_count_.fetch_add(increment, std::memory_order_relaxed);
+  }
+
+  // Decrement a reference count, and return whether the result is non-zero.
+  // Insert barriers to ensure that state written before the reference count
+  // became zero will be visible to a thread that has just made the count zero.
+  bool Decrement() {
+    // TODO(jbroman): Technically this doesn't need to be an acquire operation
+    // unless the result is 1 (i.e., the ref count did indeed reach zero).
+    // However, there are toolchain issues that make that not work as well at
+    // present (notably TSAN doesn't like it).
+    return ref_count_.fetch_sub(1, std::memory_order_acq_rel) != 1;
+  }
+
+  // Return whether the reference count is one.  If the reference count is used
+  // in the conventional way, a reference count of 1 implies that the current
+  // thread owns the reference and no other thread shares it.  This call
+  // performs the test for a reference count of one, and performs the memory
+  // barrier needed for the owning thread to act on the object, knowing that it
+  // has exclusive access to the object.
+  bool IsOne() const { return ref_count_.load(std::memory_order_acquire) == 1; }
+
+  // Return whether the reference count is zero.  With conventional object
+  // referencing counting, the object will be destroyed, so the reference count
+  // should never be zero.  Hence this is generally used for a debug check.
+  bool IsZero() const {
+    return ref_count_.load(std::memory_order_acquire) == 0;
+  }
+
+  // Returns the current reference count (with no barriers). This is subtle, and
+  // should be used only for debugging.
+  int SubtleRefCountForDebug() const {
+    return ref_count_.load(std::memory_order_relaxed);
+  }
+
+ private:
+  std::atomic_int ref_count_;
+};
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/augmentations/compiler_specific.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/augmentations/compiler_specific.h
new file mode 100644
index 0000000..29674f3
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/augmentations/compiler_specific.h
@@ -0,0 +1,22 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_AUGMENTATIONS_COMPILER_SPECIFIC_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_AUGMENTATIONS_COMPILER_SPECIFIC_H_
+
+// Extensions for PA's copy of `//base/compiler_specific.h`.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+
+// PA_ATTRIBUTE_RETURNS_NONNULL
+//
+// Tells the compiler that a function never returns a null pointer.
+// Sourced from Abseil's `attributes.h`.
+#if PA_HAS_ATTRIBUTE(returns_nonnull)
+#define PA_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull))
+#else
+#define PA_ATTRIBUTE_RETURNS_NONNULL
+#endif
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_AUGMENTATIONS_COMPILER_SPECIFIC_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bit_cast.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bit_cast.h
new file mode 100644
index 0000000..7a0d0b2
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bit_cast.h
@@ -0,0 +1,48 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_BIT_CAST_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_BIT_CAST_H_
+
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+
+#if !PA_HAS_BUILTIN(__builtin_bit_cast)
+#include <string.h>  // memcpy
+#endif
+
+namespace partition_alloc::internal::base {
+
+// This is C++20's std::bit_cast<>().
+// It morally does what `*reinterpret_cast<Dest*>(&source)` does, but the
+// cast/deref pair is undefined behavior, while bit_cast<>() isn't.
+template <class Dest, class Source>
+#if PA_HAS_BUILTIN(__builtin_bit_cast)
+constexpr
+#else
+inline
+#endif
+    Dest
+    bit_cast(const Source& source) {
+#if PA_HAS_BUILTIN(__builtin_bit_cast)
+  // TODO(thakis): Keep only this codepath once nacl is gone or updated.
+  return __builtin_bit_cast(Dest, source);
+#else
+  static_assert(sizeof(Dest) == sizeof(Source),
+                "bit_cast requires source and destination to be the same size");
+  static_assert(std::is_trivially_copyable_v<Dest>,
+                "bit_cast requires the destination type to be copyable");
+  static_assert(std::is_trivially_copyable_v<Source>,
+                "bit_cast requires the source type to be copyable");
+
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+#endif
+}
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_BIT_CAST_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h
new file mode 100644
index 0000000..2af5acd
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h
@@ -0,0 +1,154 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines some bit utilities.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_BITS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_BITS_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base::bits {
+
+// Returns true iff |value| is a power of 2.
+template <typename T, typename = std::enable_if_t<std::is_integral_v<T>>>
+constexpr bool IsPowerOfTwo(T value) {
+  // From "Hacker's Delight": Section 2.1 Manipulating Rightmost Bits.
+  //
+  // Only positive integers with a single bit set are powers of two. If only one
+  // bit is set in x (e.g. 0b00000100000000) then |x-1| will have that bit set
+  // to zero and all bits to its right set to 1 (e.g. 0b00000011111111). Hence
+  // |x & (x-1)| is 0 iff x is a power of two.
+  return value > 0 && (value & (value - 1)) == 0;
+}
+
+// Round down |size| to a multiple of alignment, which must be a power of two.
+inline constexpr size_t AlignDown(size_t size, size_t alignment) {
+  PA_BASE_DCHECK(IsPowerOfTwo(alignment));
+  return size & ~(alignment - 1);
+}
+
+// Move |ptr| back to the previous multiple of alignment, which must be a power
+// of two. Defined for types where sizeof(T) is one byte.
+template <typename T, typename = typename std::enable_if<sizeof(T) == 1>::type>
+inline T* AlignDown(T* ptr, size_t alignment) {
+  return reinterpret_cast<T*>(
+      AlignDown(reinterpret_cast<size_t>(ptr), alignment));
+}
+
+// Round up |size| to a multiple of alignment, which must be a power of two.
+inline constexpr size_t AlignUp(size_t size, size_t alignment) {
+  PA_BASE_DCHECK(IsPowerOfTwo(alignment));
+  return (size + alignment - 1) & ~(alignment - 1);
+}
+
+// Advance |ptr| to the next multiple of alignment, which must be a power of
+// two. Defined for types where sizeof(T) is one byte.
+template <typename T, typename = typename std::enable_if<sizeof(T) == 1>::type>
+inline T* AlignUp(T* ptr, size_t alignment) {
+  return reinterpret_cast<T*>(
+      AlignUp(reinterpret_cast<size_t>(ptr), alignment));
+}
+
+// CountLeadingZeroBits(value) returns the number of zero bits following the
+// most significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns {sizeof(T) * 8}.
+// Example: 00100010 -> 2
+//
+// CountTrailingZeroBits(value) returns the number of zero bits preceding the
+// least significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns {sizeof(T) * 8}.
+// Example: 00100010 -> 1
+//
+// C does not have an operator to do this, but fortunately the various
+// compilers have built-ins that map to fast underlying processor instructions.
+// __builtin_clz has undefined behaviour for an input of 0, even though there's
+// clearly a return value that makes sense, and even though some processor clz
+// instructions have defined behaviour for 0. We could drop to raw __asm__ to
+// do better, but we'll avoid doing that unless we see proof that we need to.
+template <typename T, int bits = sizeof(T) * 8>
+PA_ALWAYS_INLINE constexpr
+    typename std::enable_if<std::is_unsigned_v<T> && sizeof(T) <= 8, int>::type
+    CountLeadingZeroBits(T value) {
+  static_assert(bits > 0, "invalid instantiation");
+#if defined(COMPILER_MSVC) && !defined(__clang__)
+  // We would prefer to use the _BitScanReverse(64) intrinsics, but they
+  // aren't constexpr and thus unusable here.
+  if (PA_LIKELY(value)) {
+    int leading_zeros = 0;
+    constexpr T kMostSignificantBitMask = 1ull << (bits - 1);
+    for (; !(value & kMostSignificantBitMask); value <<= 1, ++leading_zeros) {
+    }
+    return leading_zeros;
+  }
+  return bits;
+#else
+  return PA_LIKELY(value)
+             ? bits == 64
+                   ? __builtin_clzll(static_cast<uint64_t>(value))
+                   : __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits)
+             : bits;
+#endif  // defined(COMPILER_MSVC) && !defined(__clang__)
+}
+
+template <typename T, int bits = sizeof(T) * 8>
+PA_ALWAYS_INLINE constexpr
+    typename std::enable_if<std::is_unsigned_v<T> && sizeof(T) <= 8, int>::type
+    CountTrailingZeroBits(T value) {
+#if defined(COMPILER_MSVC) && !defined(__clang__)
+  // We would prefer to use the _BitScanForward(64) intrinsics, but they
+  // aren't constexpr and thus unusable here.
+  if (PA_LIKELY(value)) {
+    int trailing_zeros = 0;
+    constexpr T kLeastSignificantBitMask = 1ull;
+    for (; !(value & kLeastSignificantBitMask); value >>= 1, ++trailing_zeros) {
+    }
+    return trailing_zeros;
+  }
+  return bits;
+
+#else
+  return PA_LIKELY(value) ? bits == 64
+                                ? __builtin_ctzll(static_cast<uint64_t>(value))
+                                : __builtin_ctz(static_cast<uint32_t>(value))
+                          : bits;
+#endif  // defined(COMPILER_MSVC) && !defined(__clang__)
+}
+
+// Returns the integer i such as 2^i <= n < 2^(i+1).
+//
+// There is a common `BitLength` function, which returns the number of bits
+// required to represent a value. Rather than implement that function,
+// use `Log2Floor` and add 1 to the result.
+constexpr int Log2Floor(uint32_t n) {
+  return 31 - CountLeadingZeroBits(n);
+}
+
+// Returns the integer i such as 2^(i-1) < n <= 2^i.
+constexpr int Log2Ceiling(uint32_t n) {
+  // When n == 0, we want the function to return -1.
+  // When n == 0, (n - 1) will underflow to 0xFFFFFFFF, which is
+  // why the statement below starts with (n ? 32 : -1).
+  return (n ? 32 : -1) - CountLeadingZeroBits(n - 1);
+}
+
+// Returns a value of type T with a single bit set in the left-most position.
+// Can be used instead of manually shifting a 1 to the left.
+template <typename T>
+constexpr T LeftmostBit() {
+  static_assert(std::is_integral_v<T>,
+                "This function can only be used with integral types.");
+  T one(1u);
+  return one << (8 * sizeof(T) - 1);
+}
+
+}  // namespace partition_alloc::internal::base::bits
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_BITS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits_pa_unittest.cc
new file mode 100644
index 0000000..3f595bb
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits_pa_unittest.cc
@@ -0,0 +1,275 @@
+// Copyright 2009 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the unit tests for the bit utilities.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+
+#include <cstddef>
+#include <limits>
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal::base::bits {
+
+TEST(BitsTestPA, Log2Floor) {
+  EXPECT_EQ(-1, Log2Floor(0));
+  EXPECT_EQ(0, Log2Floor(1));
+  EXPECT_EQ(1, Log2Floor(2));
+  EXPECT_EQ(1, Log2Floor(3));
+  EXPECT_EQ(2, Log2Floor(4));
+  for (int i = 3; i < 31; ++i) {
+    unsigned int value = 1U << i;
+    EXPECT_EQ(i, Log2Floor(value));
+    EXPECT_EQ(i, Log2Floor(value + 1));
+    EXPECT_EQ(i, Log2Floor(value + 2));
+    EXPECT_EQ(i - 1, Log2Floor(value - 1));
+    EXPECT_EQ(i - 1, Log2Floor(value - 2));
+  }
+  EXPECT_EQ(31, Log2Floor(0xffffffffU));
+}
+
+TEST(BitsTestPA, Log2Ceiling) {
+  EXPECT_EQ(-1, Log2Ceiling(0));
+  EXPECT_EQ(0, Log2Ceiling(1));
+  EXPECT_EQ(1, Log2Ceiling(2));
+  EXPECT_EQ(2, Log2Ceiling(3));
+  EXPECT_EQ(2, Log2Ceiling(4));
+  for (int i = 3; i < 31; ++i) {
+    unsigned int value = 1U << i;
+    EXPECT_EQ(i, Log2Ceiling(value));
+    EXPECT_EQ(i + 1, Log2Ceiling(value + 1));
+    EXPECT_EQ(i + 1, Log2Ceiling(value + 2));
+    EXPECT_EQ(i, Log2Ceiling(value - 1));
+    EXPECT_EQ(i, Log2Ceiling(value - 2));
+  }
+  EXPECT_EQ(32, Log2Ceiling(0xffffffffU));
+}
+
+TEST(BitsTestPA, AlignUp) {
+  static constexpr size_t kSizeTMax = std::numeric_limits<size_t>::max();
+  EXPECT_EQ(0ul, AlignUp(0, 4));
+  EXPECT_EQ(4ul, AlignUp(1, 4));
+  EXPECT_EQ(4096ul, AlignUp(1, 4096));
+  EXPECT_EQ(4096ul, AlignUp(4096, 4096));
+  EXPECT_EQ(4096ul, AlignUp(4095, 4096));
+  EXPECT_EQ(8192ul, AlignUp(4097, 4096));
+  EXPECT_EQ(kSizeTMax - 31, AlignUp(kSizeTMax - 62, 32));
+  EXPECT_EQ(kSizeTMax / 2 + 1, AlignUp(1, kSizeTMax / 2 + 1));
+}
+
+TEST(BitsTestPA, AlignUpPointer) {
+  static constexpr uintptr_t kUintPtrTMax =
+      std::numeric_limits<uintptr_t>::max();
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
+            AlignUp(reinterpret_cast<uint8_t*>(0), 4));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(4),
+            AlignUp(reinterpret_cast<uint8_t*>(1), 4));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(4096),
+            AlignUp(reinterpret_cast<uint8_t*>(1), 4096));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(4096),
+            AlignUp(reinterpret_cast<uint8_t*>(4096), 4096));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(4096),
+            AlignUp(reinterpret_cast<uint8_t*>(4095), 4096));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(8192),
+            AlignUp(reinterpret_cast<uint8_t*>(4097), 4096));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(kUintPtrTMax - 31),
+            AlignUp(reinterpret_cast<uint8_t*>(kUintPtrTMax - 62), 32));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(kUintPtrTMax / 2 + 1),
+            AlignUp(reinterpret_cast<uint8_t*>(1), kUintPtrTMax / 2 + 1));
+}
+
+TEST(BitsTestPA, AlignDown) {
+  static constexpr size_t kSizeTMax = std::numeric_limits<size_t>::max();
+  EXPECT_EQ(0ul, AlignDown(0, 4));
+  EXPECT_EQ(0ul, AlignDown(1, 4));
+  EXPECT_EQ(0ul, AlignDown(1, 4096));
+  EXPECT_EQ(4096ul, AlignDown(4096, 4096));
+  EXPECT_EQ(0ul, AlignDown(4095, 4096));
+  EXPECT_EQ(4096ul, AlignDown(4097, 4096));
+  EXPECT_EQ(kSizeTMax - 63, AlignDown(kSizeTMax - 62, 32));
+  EXPECT_EQ(kSizeTMax - 31, AlignDown(kSizeTMax, 32));
+  EXPECT_EQ(0ul, AlignDown(1, kSizeTMax / 2 + 1));
+}
+
+TEST(BitsTestPA, AlignDownPointer) {
+  static constexpr uintptr_t kUintPtrTMax =
+      std::numeric_limits<uintptr_t>::max();
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
+            AlignDown(reinterpret_cast<uint8_t*>(0), 4));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
+            AlignDown(reinterpret_cast<uint8_t*>(1), 4));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
+            AlignDown(reinterpret_cast<uint8_t*>(1), 4096));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(4096),
+            AlignDown(reinterpret_cast<uint8_t*>(4096), 4096));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
+            AlignDown(reinterpret_cast<uint8_t*>(4095), 4096));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(4096),
+            AlignDown(reinterpret_cast<uint8_t*>(4097), 4096));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(kUintPtrTMax - 63),
+            AlignDown(reinterpret_cast<uint8_t*>(kUintPtrTMax - 62), 32));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(kUintPtrTMax - 31),
+            AlignDown(reinterpret_cast<uint8_t*>(kUintPtrTMax), 32));
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(0),
+            AlignDown(reinterpret_cast<uint8_t*>(1), kUintPtrTMax / 2 + 1));
+}
+
+TEST(BitsTestPA, CountLeadingZeroBits8) {
+  EXPECT_EQ(8, CountLeadingZeroBits(uint8_t{0}));
+  EXPECT_EQ(7, CountLeadingZeroBits(uint8_t{1}));
+  for (int shift = 0; shift <= 7; ++shift) {
+    EXPECT_EQ(7 - shift,
+              CountLeadingZeroBits(static_cast<uint8_t>(1 << shift)));
+  }
+  EXPECT_EQ(4, CountLeadingZeroBits(uint8_t{0x0f}));
+}
+
+TEST(BitsTestPA, CountLeadingZeroBits16) {
+  EXPECT_EQ(16, CountLeadingZeroBits(uint16_t{0}));
+  EXPECT_EQ(15, CountLeadingZeroBits(uint16_t{1}));
+  for (int shift = 0; shift <= 15; ++shift) {
+    EXPECT_EQ(15 - shift,
+              CountLeadingZeroBits(static_cast<uint16_t>(1 << shift)));
+  }
+  EXPECT_EQ(4, CountLeadingZeroBits(uint16_t{0x0f0f}));
+}
+
+TEST(BitsTestPA, CountLeadingZeroBits32) {
+  EXPECT_EQ(32, CountLeadingZeroBits(uint32_t{0}));
+  EXPECT_EQ(31, CountLeadingZeroBits(uint32_t{1}));
+  for (int shift = 0; shift <= 31; ++shift) {
+    EXPECT_EQ(31 - shift, CountLeadingZeroBits(uint32_t{1} << shift));
+  }
+  EXPECT_EQ(4, CountLeadingZeroBits(uint32_t{0x0f0f0f0f}));
+}
+
+TEST(BitsTestPA, CountTrailingZeroBits8) {
+  EXPECT_EQ(8, CountTrailingZeroBits(uint8_t{0}));
+  EXPECT_EQ(7, CountTrailingZeroBits(uint8_t{128}));
+  for (int shift = 0; shift <= 7; ++shift) {
+    EXPECT_EQ(shift, CountTrailingZeroBits(static_cast<uint8_t>(1 << shift)));
+  }
+  EXPECT_EQ(4, CountTrailingZeroBits(uint8_t{0xf0}));
+}
+
+TEST(BitsTestPA, CountTrailingZeroBits16) {
+  EXPECT_EQ(16, CountTrailingZeroBits(uint16_t{0}));
+  EXPECT_EQ(15, CountTrailingZeroBits(uint16_t{32768}));
+  for (int shift = 0; shift <= 15; ++shift) {
+    EXPECT_EQ(shift, CountTrailingZeroBits(static_cast<uint16_t>(1 << shift)));
+  }
+  EXPECT_EQ(4, CountTrailingZeroBits(uint16_t{0xf0f0}));
+}
+
+TEST(BitsTestPA, CountTrailingZeroBits32) {
+  EXPECT_EQ(32, CountTrailingZeroBits(uint32_t{0}));
+  EXPECT_EQ(31, CountTrailingZeroBits(uint32_t{1} << 31));
+  for (int shift = 0; shift <= 31; ++shift) {
+    EXPECT_EQ(shift, CountTrailingZeroBits(uint32_t{1} << shift));
+  }
+  EXPECT_EQ(4, CountTrailingZeroBits(uint32_t{0xf0f0f0f0}));
+}
+
+TEST(BitsTestPA, CountLeadingZeroBits64) {
+  EXPECT_EQ(64, CountLeadingZeroBits(uint64_t{0}));
+  EXPECT_EQ(63, CountLeadingZeroBits(uint64_t{1}));
+  for (int shift = 0; shift <= 63; ++shift) {
+    EXPECT_EQ(63 - shift, CountLeadingZeroBits(uint64_t{1} << shift));
+  }
+  EXPECT_EQ(4, CountLeadingZeroBits(uint64_t{0x0f0f0f0f0f0f0f0f}));
+}
+
+TEST(BitsTestPA, CountTrailingZeroBits64) {
+  EXPECT_EQ(64, CountTrailingZeroBits(uint64_t{0}));
+  EXPECT_EQ(63, CountTrailingZeroBits(uint64_t{1} << 63));
+  for (int shift = 0; shift <= 31; ++shift) {
+    EXPECT_EQ(shift, CountTrailingZeroBits(uint64_t{1} << shift));
+  }
+  EXPECT_EQ(4, CountTrailingZeroBits(uint64_t{0xf0f0f0f0f0f0f0f0}));
+}
+
+TEST(BitsTestPA, CountLeadingZeroBitsSizeT) {
+#if defined(ARCH_CPU_64_BITS)
+  EXPECT_EQ(64, CountLeadingZeroBits(size_t{0}));
+  EXPECT_EQ(63, CountLeadingZeroBits(size_t{1}));
+  EXPECT_EQ(32, CountLeadingZeroBits(size_t{1} << 31));
+  EXPECT_EQ(1, CountLeadingZeroBits(size_t{1} << 62));
+  EXPECT_EQ(0, CountLeadingZeroBits(size_t{1} << 63));
+#else
+  EXPECT_EQ(32, CountLeadingZeroBits(size_t{0}));
+  EXPECT_EQ(31, CountLeadingZeroBits(size_t{1}));
+  EXPECT_EQ(1, CountLeadingZeroBits(size_t{1} << 30));
+  EXPECT_EQ(0, CountLeadingZeroBits(size_t{1} << 31));
+#endif  // ARCH_CPU_64_BITS
+}
+
+TEST(BitsTestPA, CountTrailingZeroBitsSizeT) {
+#if defined(ARCH_CPU_64_BITS)
+  EXPECT_EQ(64, CountTrailingZeroBits(size_t{0}));
+  EXPECT_EQ(63, CountTrailingZeroBits(size_t{1} << 63));
+  EXPECT_EQ(31, CountTrailingZeroBits(size_t{1} << 31));
+  EXPECT_EQ(1, CountTrailingZeroBits(size_t{2}));
+  EXPECT_EQ(0, CountTrailingZeroBits(size_t{1}));
+#else
+  EXPECT_EQ(32, CountTrailingZeroBits(size_t{0}));
+  EXPECT_EQ(31, CountTrailingZeroBits(size_t{1} << 31));
+  EXPECT_EQ(1, CountTrailingZeroBits(size_t{2}));
+  EXPECT_EQ(0, CountTrailingZeroBits(size_t{1}));
+#endif  // ARCH_CPU_64_BITS
+}
+
+TEST(BitsTestPA, PowerOfTwo) {
+  EXPECT_FALSE(IsPowerOfTwo(-1));
+  EXPECT_FALSE(IsPowerOfTwo(0));
+  EXPECT_TRUE(IsPowerOfTwo(1));
+  EXPECT_TRUE(IsPowerOfTwo(2));
+  // Unsigned 64 bit cases.
+  for (uint32_t i = 2; i < 64; i++) {
+    const uint64_t val = uint64_t{1} << i;
+    EXPECT_FALSE(IsPowerOfTwo(val - 1));
+    EXPECT_TRUE(IsPowerOfTwo(val));
+    EXPECT_FALSE(IsPowerOfTwo(val + 1));
+  }
+  // Signed 64 bit cases.
+  for (uint32_t i = 2; i < 63; i++) {
+    const int64_t val = int64_t{1} << i;
+    EXPECT_FALSE(IsPowerOfTwo(val - 1));
+    EXPECT_TRUE(IsPowerOfTwo(val));
+    EXPECT_FALSE(IsPowerOfTwo(val + 1));
+  }
+  // Signed integers with only the last bit set are negative, not powers of two.
+  EXPECT_FALSE(IsPowerOfTwo(int64_t{1} << 63));
+}
+
+TEST(BitsTestPA, LeftMostBit) {
+  // Construction of a signed type from an unsigned one of the same width
+  // preserves all bits. Explicitly confirming this behavior here to illustrate
+  // correctness of reusing unsigned literals to test behavior of signed types.
+  // Using signed literals does not work with EXPECT_EQ.
+  static_assert(
+      static_cast<int64_t>(0xFFFFFFFFFFFFFFFFu) == 0xFFFFFFFFFFFFFFFFl,
+      "Comparing signed with unsigned literals compares bits.");
+  static_assert((0xFFFFFFFFFFFFFFFFu ^ 0xFFFFFFFFFFFFFFFFl) == 0,
+                "Signed and unsigned literals have the same bits set");
+
+  uint64_t unsigned_long_long_value = 0x8000000000000000u;
+  EXPECT_EQ(LeftmostBit<uint64_t>(), unsigned_long_long_value);
+  EXPECT_EQ(LeftmostBit<int64_t>(), int64_t(unsigned_long_long_value));
+
+  uint32_t unsigned_long_value = 0x80000000u;
+  EXPECT_EQ(LeftmostBit<uint32_t>(), unsigned_long_value);
+  EXPECT_EQ(LeftmostBit<int32_t>(), int32_t(unsigned_long_value));
+
+  uint16_t unsigned_short_value = 0x8000u;
+  EXPECT_EQ(LeftmostBit<uint16_t>(), unsigned_short_value);
+  EXPECT_EQ(LeftmostBit<int16_t>(), int16_t(unsigned_short_value));
+
+  uint8_t unsigned_byte_value = 0x80u;
+  EXPECT_EQ(LeftmostBit<uint8_t>(), unsigned_byte_value);
+  EXPECT_EQ(LeftmostBit<int8_t>(), int8_t(unsigned_byte_value));
+}
+
+}  // namespace partition_alloc::internal::base::bits
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.cc
new file mode 100644
index 0000000..bc67e1e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.cc
@@ -0,0 +1,89 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+
+namespace partition_alloc::internal::logging {
+
+// TODO(1151236): Make CheckError not to allocate memory. So we can use
+// CHECK() inside PartitionAllocator when PartitionAllocator-Everywhere is
+// enabled. (Also need to modify LogMessage).
+
+CheckError::CheckError(const char* file,
+                       int line,
+                       LogSeverity severity,
+                       const char* condition)
+    : log_message_(file, line, severity) {
+  log_message_.stream() << "Check failed: " << condition << ". ";
+}
+
+CheckError::CheckError(const char* file, int line, LogSeverity severity)
+    : log_message_(file, line, severity) {}
+
+CheckError::CheckError(const char* file,
+                       int line,
+                       LogSeverity severity,
+                       const char* condition,
+                       SystemErrorCode err_code)
+    : errno_log_message_(file, line, severity, err_code), has_errno(true) {
+  errno_log_message_.stream() << "Check failed: " << condition << ". ";
+}
+
+check_error::Check::Check(const char* file, int line, const char* condition)
+    : CheckError(file, line, LOGGING_FATAL, condition) {}
+
+check_error::DCheck::DCheck(const char* file, int line, const char* condition)
+    : CheckError(file, line, LOGGING_DCHECK, condition) {}
+
+check_error::PCheck::PCheck(const char* file, int line, const char* condition)
+    : CheckError(file,
+                 line,
+                 LOGGING_FATAL,
+                 condition,
+                 logging::GetLastSystemErrorCode()) {}
+
+check_error::PCheck::PCheck(const char* file, int line)
+    : PCheck(file, line, "") {}
+
+check_error::DPCheck::DPCheck(const char* file, int line, const char* condition)
+    : CheckError(file,
+                 line,
+                 LOGGING_DCHECK,
+                 condition,
+                 logging::GetLastSystemErrorCode()) {}
+
+check_error::NotImplemented::NotImplemented(const char* file,
+                                            int line,
+                                            const char* function)
+    : CheckError(file, line, LOGGING_ERROR) {
+  stream() << "Not implemented reached in " << function;
+}
+
+base::strings::CStringBuilder& CheckError::stream() {
+  return !has_errno ? log_message_.stream() : errno_log_message_.stream();
+}
+
+CheckError::~CheckError() {
+  // Note: This function ends up in crash stack traces. If its full name
+  // changes, the crash server's magic signature logic needs to be updated.
+  // See cl/306632920.
+  if (!has_errno) {
+    log_message_.~LogMessage();
+  } else {
+#if BUILDFLAG(IS_WIN)
+    errno_log_message_.~Win32ErrorLogMessage();
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+    errno_log_message_.~ErrnoLogMessage();
+#endif  // BUILDFLAG(IS_WIN)
+  }
+}
+
+void RawCheckFailure(const char* message) {
+  RawLog(LOGGING_FATAL, message);
+  PA_IMMEDIATE_CRASH();
+}
+
+}  // namespace partition_alloc::internal::logging
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h
new file mode 100644
index 0000000..ff8efa4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h
@@ -0,0 +1,223 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_CHECK_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_CHECK_H_
+
+#include <iosfwd>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/log_message.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder.h"
+
+#define PA_STRINGIFY_IMPL(s) #s
+#define PA_STRINGIFY(s) PA_STRINGIFY_IMPL(s)
+
+// This header defines the CHECK, DCHECK, and DPCHECK macros.
+//
+// CHECK dies with a fatal error if its condition is not true. It is not
+// controlled by NDEBUG, so the check will be executed regardless of compilation
+// mode.
+//
+// DCHECK, the "debug mode" check, is enabled depending on NDEBUG and
+// DCHECK_ALWAYS_ON, and its severity depends on DCHECK_IS_CONFIGURABLE.
+//
+// (D)PCHECK is like (D)CHECK, but includes the system error code (c.f.
+// perror(3)).
+//
+// Additional information can be streamed to these macros and will be included
+// in the log output if the condition doesn't hold (you may need to include
+// <ostream>):
+//
+//   CHECK(condition) << "Additional info.";
+//
+// The condition is evaluated exactly once. Even in build modes where e.g.
+// DCHECK is disabled, the condition and any stream arguments are still
+// referenced to avoid warnings about unused variables and functions.
+//
+// For the (D)CHECK_EQ, etc. macros, see base/check_op.h. However, that header
+// is *significantly* larger than check.h, so try to avoid including it in
+// header files.
+
+namespace partition_alloc::internal::logging {
+
+// Class used to explicitly ignore an ostream, and optionally a boolean value.
+class VoidifyStream {
+ public:
+  VoidifyStream() = default;
+  explicit VoidifyStream(bool ignored) {}
+
+  // This operator has lower precedence than << but higher than ?:
+  void operator&(base::strings::CStringBuilder&) {}
+};
+
+// Helper macro which avoids evaluating the arguments to a stream if the
+// condition is false.
+#define PA_LAZY_CHECK_STREAM(stream, condition) \
+  !(condition)                                  \
+      ? (void)0                                 \
+      : ::partition_alloc::internal::logging::VoidifyStream() & (stream)
+
+// Macro which uses but does not evaluate expr and any stream parameters.
+#define PA_EAT_CHECK_STREAM_PARAMS(expr)                             \
+  true ? (void)0                                                     \
+       : ::partition_alloc::internal::logging::VoidifyStream(expr) & \
+             (*::partition_alloc::internal::logging::g_swallow_stream)
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+extern base::strings::CStringBuilder* g_swallow_stream;
+
+class LogMessage;
+
+// Class used for raising a check error upon destruction.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) CheckError {
+ public:
+  // Stream for adding optional details to the error message.
+  base::strings::CStringBuilder& stream();
+  PA_NOMERGE ~CheckError();
+
+ protected:
+  CheckError(const char* file,
+             int line,
+             LogSeverity severity,
+             const char* condition);
+  CheckError(const char* file, int line, LogSeverity severity);
+  CheckError(const char* file,
+             int line,
+             LogSeverity severity,
+             const char* condition,
+             SystemErrorCode err_code);
+
+  union {
+    LogMessage log_message_;
+#if BUILDFLAG(IS_WIN)
+    Win32ErrorLogMessage errno_log_message_;
+#else
+    ErrnoLogMessage errno_log_message_;
+#endif
+  };
+
+  // |has_errno| describes which union member is used, |log_message_| or
+  // |errno_log_message_|. If |has_errno| is true, CheckError initializes
+  // |errno_log_message_| at its constructor and destroys at its destructor.
+  // (This also means the CheckError is an instance of the parent class of
+  // PCheck or DPCheck.)
+  // If false, CheckError initializes and destroys |log_message_|.
+  const bool has_errno = false;
+};
+
+namespace check_error {
+
+// Class used for raising a check error upon destruction.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) Check : public CheckError {
+ public:
+  Check(const char* file, int line, const char* condition);
+};
+
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) DCheck : public CheckError {
+ public:
+  DCheck(const char* file, int line, const char* condition);
+};
+
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) PCheck : public CheckError {
+ public:
+  PCheck(const char* file, int line, const char* condition);
+  PCheck(const char* file, int line);
+};
+
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) DPCheck : public CheckError {
+ public:
+  DPCheck(const char* file, int line, const char* condition);
+};
+
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) NotImplemented
+    : public CheckError {
+ public:
+  NotImplemented(const char* file, int line, const char* function);
+};
+
+}  // namespace check_error
+
+#if defined(OFFICIAL_BUILD) && !defined(NDEBUG)
+#error "Debug builds are not expected to be optimized as official builds."
+#endif  // defined(OFFICIAL_BUILD) && !defined(NDEBUG)
+
+#if defined(OFFICIAL_BUILD) && !BUILDFLAG(PA_DCHECK_IS_ON)
+
+// Discard log strings to reduce code bloat.
+//
+// This is not calling BreakDebugger since this is called frequently, and
+// calling an out-of-line function instead of a noreturn inline macro prevents
+// compiler optimizations.
+#define PA_BASE_CHECK(condition)                   \
+  PA_UNLIKELY(!(condition)) ? PA_IMMEDIATE_CRASH() \
+                            : PA_EAT_CHECK_STREAM_PARAMS()
+
+#define PA_BASE_CHECK_WILL_STREAM() false
+
+#define PA_BASE_PCHECK(condition)                                         \
+  PA_LAZY_CHECK_STREAM(                                                   \
+      ::partition_alloc::internal::logging::check_error::PCheck(__FILE__, \
+                                                                __LINE__) \
+          .stream(),                                                      \
+      PA_UNLIKELY(!(condition)))
+
+#else
+
+#define PA_BASE_CHECK(condition)                                \
+  PA_LAZY_CHECK_STREAM(                                         \
+      ::partition_alloc::internal::logging::check_error::Check( \
+          __FILE__, __LINE__, #condition)                       \
+          .stream(),                                            \
+      !PA_ANALYZER_ASSUME_TRUE(condition))
+
+#define PA_BASE_CHECK_WILL_STREAM() true
+
+#define PA_BASE_PCHECK(condition)                                \
+  PA_LAZY_CHECK_STREAM(                                          \
+      ::partition_alloc::internal::logging::check_error::PCheck( \
+          __FILE__, __LINE__, #condition)                        \
+          .stream(),                                             \
+      !PA_ANALYZER_ASSUME_TRUE(condition))
+
+#endif
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+
+#define PA_BASE_DCHECK(condition)                                \
+  PA_LAZY_CHECK_STREAM(                                          \
+      ::partition_alloc::internal::logging::check_error::DCheck( \
+          __FILE__, __LINE__, #condition)                        \
+          .stream(),                                             \
+      !PA_ANALYZER_ASSUME_TRUE(condition))
+
+#define PA_BASE_DPCHECK(condition)                                \
+  PA_LAZY_CHECK_STREAM(                                           \
+      ::partition_alloc::internal::logging::check_error::DPCheck( \
+          __FILE__, __LINE__, #condition)                         \
+          .stream(),                                              \
+      !PA_ANALYZER_ASSUME_TRUE(condition))
+
+#else
+
+#define PA_BASE_DCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
+#define PA_BASE_DPCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
+
+#endif
+
+// Async signal safe checking mechanism.
+[[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) void RawCheckFailure(
+    const char* message);
+#define PA_RAW_CHECK(condition)                              \
+  do {                                                       \
+    if (!(condition))                                        \
+      ::partition_alloc::internal::logging::RawCheckFailure( \
+          "Check failed: " #condition "\n");                 \
+  } while (0)
+
+}  // namespace partition_alloc::internal::logging
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_CHECK_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h
new file mode 100644
index 0000000..30be67c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h
@@ -0,0 +1,241 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
+
+#include "build/build_config.h"
+
+// A wrapper around `__has_attribute`, similar to HAS_CPP_ATTRIBUTE.
+#if defined(__has_attribute)
+#define PA_HAS_ATTRIBUTE(x) __has_attribute(x)
+#else
+#define PA_HAS_ATTRIBUTE(x) 0
+#endif
+
+// A wrapper around `__has_builtin`, similar to HAS_CPP_ATTRIBUTE.
+#if defined(__has_builtin)
+#define PA_HAS_BUILTIN(x) __has_builtin(x)
+#else
+#define PA_HAS_BUILTIN(x) 0
+#endif
+
+// Annotate a function indicating it should not be inlined.
+// Use like:
+//   NOINLINE void DoStuff() { ... }
+#if defined(__clang__) && PA_HAS_ATTRIBUTE(noinline)
+#define PA_NOINLINE [[clang::noinline]]
+#elif defined(COMPILER_GCC) && PA_HAS_ATTRIBUTE(noinline)
+#define PA_NOINLINE __attribute__((noinline))
+#elif defined(COMPILER_MSVC)
+#define PA_NOINLINE __declspec(noinline)
+#else
+#define PA_NOINLINE
+#endif
+
+#if defined(__clang__) && defined(NDEBUG) && PA_HAS_ATTRIBUTE(always_inline)
+#define PA_ALWAYS_INLINE [[clang::always_inline]] inline
+#elif defined(COMPILER_GCC) && defined(NDEBUG) && \
+    PA_HAS_ATTRIBUTE(always_inline)
+#define PA_ALWAYS_INLINE inline __attribute__((__always_inline__))
+#elif defined(COMPILER_MSVC) && defined(NDEBUG)
+#define PA_ALWAYS_INLINE __forceinline
+#else
+#define PA_ALWAYS_INLINE inline
+#endif
+
+// Annotate a function indicating it should never be tail called. Useful to make
+// sure callers of the annotated function are never omitted from call-stacks.
+// To provide the complementary behavior (prevent the annotated function from
+// being omitted) look at NOINLINE. Also note that this doesn't prevent code
+// folding of multiple identical caller functions into a single signature. To
+// prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h.
+// Use like:
+//   void NOT_TAIL_CALLED FooBar();
+#if defined(__clang__) && PA_HAS_ATTRIBUTE(not_tail_called)
+#define PA_NOT_TAIL_CALLED [[clang::not_tail_called]]
+#else
+#define PA_NOT_TAIL_CALLED
+#endif
+
+// Specify memory alignment for structs, classes, etc.
+// Use like:
+//   class PA_ALIGNAS(16) MyClass { ... }
+//   PA_ALIGNAS(16) int array[4];
+//
+// In most places you can use the C++11 keyword "alignas", which is preferred.
+//
+// Historically, compilers had trouble mixing __attribute__((...)) syntax with
+// alignas(...) syntax. However, at least Clang is very accepting nowadays. It
+// may be that this macro can be removed entirely.
+#if defined(__clang__)
+#define PA_ALIGNAS(byte_alignment) alignas(byte_alignment)
+#elif defined(COMPILER_MSVC)
+#define PA_ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
+#elif defined(COMPILER_GCC) && PA_HAS_ATTRIBUTE(aligned)
+#define PA_ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
+#endif
+
+// Tells the compiler a function is using a printf-style format string.
+// |format_param| is the one-based index of the format string parameter;
+// |dots_param| is the one-based index of the "..." parameter.
+// For v*printf functions (which take a va_list), pass 0 for dots_param.
+// (This is undocumented but matches what the system C headers do.)
+// For member functions, the implicit this parameter counts as index 1.
+#if (defined(COMPILER_GCC) || defined(__clang__)) && PA_HAS_ATTRIBUTE(format)
+#define PA_PRINTF_FORMAT(format_param, dots_param) \
+  __attribute__((format(printf, format_param, dots_param)))
+#else
+#define PA_PRINTF_FORMAT(format_param, dots_param)
+#endif
+
+// Sanitizers annotations.
+#if PA_HAS_ATTRIBUTE(no_sanitize)
+#define PA_NO_SANITIZE(what) __attribute__((no_sanitize(what)))
+#endif
+#if !defined(PA_NO_SANITIZE)
+#define PA_NO_SANITIZE(what)
+#endif
+
+// MemorySanitizer annotations.
+#if defined(MEMORY_SANITIZER)
+#include <sanitizer/msan_interface.h>
+
+// Mark a memory region fully initialized.
+// Use this to annotate code that deliberately reads uninitialized data, for
+// example a GC scavenging root set pointers from the stack.
+#define PA_MSAN_UNPOISON(p, size) __msan_unpoison(p, size)
+#else  // MEMORY_SANITIZER
+#define PA_MSAN_UNPOISON(p, size)
+#endif  // MEMORY_SANITIZER
+
+// Macro for hinting that an expression is likely to be false.
+#if !defined(PA_UNLIKELY)
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define PA_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define PA_UNLIKELY(x) (x)
+#endif  // defined(COMPILER_GCC)
+#endif  // !defined(PA_UNLIKELY)
+
+#if !defined(PA_LIKELY)
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define PA_LIKELY(x) __builtin_expect(!!(x), 1)
+#else
+#define PA_LIKELY(x) (x)
+#endif  // defined(COMPILER_GCC)
+#endif  // !defined(PA_LIKELY)
+
+#if !defined(PA_CPU_ARM_NEON)
+#if defined(__arm__)
+#if !defined(__ARMEB__) && !defined(__ARM_EABI__) && !defined(__EABI__) && \
+    !defined(__VFP_FP__) && !defined(_WIN32_WCE) && !defined(ANDROID)
+#error Chromium does not support middle endian architecture
+#endif
+#if defined(__ARM_NEON__)
+#define PA_CPU_ARM_NEON 1
+#endif
+#endif  // defined(__arm__)
+#endif  // !defined(CPU_ARM_NEON)
+
+#if !defined(PA_HAVE_MIPS_MSA_INTRINSICS)
+#if defined(__mips_msa) && defined(__mips_isa_rev) && (__mips_isa_rev >= 5)
+#define PA_HAVE_MIPS_MSA_INTRINSICS 1
+#endif
+#endif
+
+// The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints
+// to Clang which control what code paths are statically analyzed,
+// and is meant to be used in conjunction with assert & assert-like functions.
+// The expression is passed straight through if analysis isn't enabled.
+//
+// ANALYZER_SKIP_THIS_PATH() suppresses static analysis for the current
+// codepath and any other branching codepaths that might follow.
+#if defined(__clang_analyzer__)
+
+namespace partition_alloc::internal {
+
+inline constexpr bool AnalyzerNoReturn() __attribute__((analyzer_noreturn)) {
+  return false;
+}
+
+inline constexpr bool AnalyzerAssumeTrue(bool arg) {
+  // PartitionAllocAnalyzerNoReturn() is invoked and analysis is terminated if
+  // |arg| is false.
+  return arg || AnalyzerNoReturn();
+}
+
+}  // namespace partition_alloc::internal
+
+#define PA_ANALYZER_ASSUME_TRUE(arg) \
+  ::partition_alloc::internal::AnalyzerAssumeTrue(!!(arg))
+#define PA_ANALYZER_SKIP_THIS_PATH() \
+  static_cast<void>(::partition_alloc::internal::AnalyzerNoReturn())
+
+#else  // !defined(__clang_analyzer__)
+
+#define PA_ANALYZER_ASSUME_TRUE(arg) (arg)
+#define PA_ANALYZER_SKIP_THIS_PATH()
+
+#endif  // defined(__clang_analyzer__)
+
+// Use nomerge attribute to disable optimization of merging multiple same calls.
+#if defined(__clang__) && PA_HAS_ATTRIBUTE(nomerge)
+#define PA_NOMERGE [[clang::nomerge]]
+#else
+#define PA_NOMERGE
+#endif
+
+// Marks a type as being eligible for the "trivial" ABI despite having a
+// non-trivial destructor or copy/move constructor. Such types can be relocated
+// after construction by simply copying their memory, which makes them eligible
+// to be passed in registers. The canonical example is std::unique_ptr.
+//
+// Use with caution; this has some subtle effects on constructor/destructor
+// ordering and will be very incorrect if the type relies on its address
+// remaining constant. When used as a function argument (by value), the value
+// may be constructed in the caller's stack frame, passed in a register, and
+// then used and destructed in the callee's stack frame. A similar thing can
+// occur when values are returned.
+//
+// TRIVIAL_ABI is not needed for types which have a trivial destructor and
+// copy/move constructors, such as base::TimeTicks and other POD.
+//
+// It is also not likely to be effective on types too large to be passed in one
+// or two registers on typical target ABIs.
+//
+// See also:
+//   https://clang.llvm.org/docs/AttributeReference.html#trivial-abi
+//   https://libcxx.llvm.org/docs/DesignDocs/UniquePtrTrivialAbi.html
+#if defined(__clang__) && PA_HAS_ATTRIBUTE(trivial_abi)
+#define PA_TRIVIAL_ABI [[clang::trivial_abi]]
+#else
+#define PA_TRIVIAL_ABI
+#endif
+
+// Requires constant initialization. See constinit in C++20. Allows to rely on a
+// variable being initialized before execution, and not requiring a global
+// constructor.
+#if PA_HAS_ATTRIBUTE(require_constant_initialization)
+#define PA_CONSTINIT __attribute__((require_constant_initialization))
+#endif
+#if !defined(PA_CONSTINIT)
+#define PA_CONSTINIT
+#endif
+
+#if defined(__clang__)
+#define PA_GSL_POINTER [[gsl::Pointer]]
+#else
+#define PA_GSL_POINTER
+#endif
+
+// Constexpr destructors were introduced in C++20. PartitionAlloc's minimum
+// supported C++ version is C++17.
+#if defined(__cpp_constexpr) && __cpp_constexpr >= 201907L
+#define PA_CONSTEXPR_DTOR constexpr
+#else
+#define PA_CONSTEXPR_DTOR
+#endif
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h
new file mode 100644
index 0000000..0bcbea2
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h
@@ -0,0 +1,80 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPONENT_EXPORT_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPONENT_EXPORT_H_
+
+// Used to annotate symbols which are exported by the component named
+// |component|. Note that this only does the right thing if the corresponding
+// component target's sources are compiled with |IS_$component_IMPL| defined
+// as 1. For example:
+//
+//   class PA_COMPONENT_EXPORT(FOO) Bar {};
+//
+// If IS_FOO_IMPL=1 at compile time, then Bar will be annotated using the
+// PA_COMPONENT_EXPORT_ANNOTATION macro defined below. Otherwise it will be
+// annotated using the PA_COMPONENT_IMPORT_ANNOTATION macro.
+#define PA_COMPONENT_EXPORT(component)                            \
+  PA_COMPONENT_MACRO_CONDITIONAL_(IS_##component##_IMPL,          \
+                                  PA_COMPONENT_EXPORT_ANNOTATION, \
+                                  PA_COMPONENT_IMPORT_ANNOTATION)
+
+// Indicates whether the current compilation unit is being compiled as part of
+// the implementation of the component named |component|. Expands to |1| if
+// |IS_$component_IMPL| is defined as |1|; expands to |0| otherwise.
+//
+// Note in particular that if |IS_$component_IMPL| is not defined at all, it is
+// still fine to test PA_INSIDE_COMPONENT_IMPL(component), which expands to |0|
+// as expected.
+#define PA_INSIDE_COMPONENT_IMPL(component) \
+  PA_COMPONENT_MACRO_CONDITIONAL_(IS_##component##_IMPL, 1, 0)
+
+// Compiler-specific macros to annotate for export or import of a symbol. No-op
+// in non-component builds. These should not see much if any direct use.
+// Instead use the PA_COMPONENT_EXPORT macro defined above.
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+#define PA_COMPONENT_EXPORT_ANNOTATION __declspec(dllexport)
+#define PA_COMPONENT_IMPORT_ANNOTATION __declspec(dllimport)
+#else  // defined(WIN32)
+#define PA_COMPONENT_EXPORT_ANNOTATION __attribute__((visibility("default")))
+#define PA_COMPONENT_IMPORT_ANNOTATION
+#endif  // defined(WIN32)
+#else   // defined(COMPONENT_BUILD)
+#define PA_COMPONENT_EXPORT_ANNOTATION
+#define PA_COMPONENT_IMPORT_ANNOTATION
+#endif  // defined(COMPONENT_BUILD)
+
+// Below this point are several internal utility macros used for the
+// implementation of the above macros. Not intended for external use.
+
+// Helper for conditional expansion to one of two token strings. If |condition|
+// expands to |1| then this macro expands to |consequent|; otherwise it expands
+// to |alternate|.
+#define PA_COMPONENT_MACRO_CONDITIONAL_(condition, consequent, alternate) \
+  PA_COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_(                              \
+      PA_COMPONENT_MACRO_CONDITIONAL_COMMA_(condition), consequent, alternate)
+
+// MSVC workaround for __VA_ARGS__ expanding into one expression.
+#define PA_MSVC_EXPAND_ARG(arg) arg
+
+// Expands to a comma (,) iff its first argument expands to |1|. Used in
+// conjunction with |PA_COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_()|, as the
+// presence or absense of an extra comma can be used to conditionally shift
+// subsequent argument positions and thus influence which argument is selected.
+#define PA_COMPONENT_MACRO_CONDITIONAL_COMMA_(...) \
+  PA_COMPONENT_MACRO_CONDITIONAL_COMMA_IMPL_(__VA_ARGS__, )
+#define PA_COMPONENT_MACRO_CONDITIONAL_COMMA_IMPL_(x, ...) \
+  PA_COMPONENT_MACRO_CONDITIONAL_COMMA_##x##_
+#define PA_COMPONENT_MACRO_CONDITIONAL_COMMA_1_ ,
+
+// Helper which simply selects its third argument. Used in conjunction with
+// |PA_COMPONENT_MACRO_CONDITIONAL_COMMA_()| above to implement conditional
+// macro expansion.
+#define PA_COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_(...) \
+  PA_MSVC_EXPAND_ARG(                                  \
+      PA_COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_IMPL_(__VA_ARGS__))
+#define PA_COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_IMPL_(a, b, c, ...) c
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_COMPONENT_EXPORT_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export_pa_unittest.cc
new file mode 100644
index 0000000..bb4fd2b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export_pa_unittest.cc
@@ -0,0 +1,82 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal::base {
+namespace {
+
+using ComponentExportTestPA = testing::Test;
+
+#define IS_TEST_COMPONENT_A_IMPL 1
+#define IS_TEST_COMPONENT_B_IMPL
+#define IS_TEST_COMPONENT_C_IMPL 0
+#define IS_TEST_COMPONENT_D_IMPL 2
+#define IS_TEST_COMPONENT_E_IMPL xyz
+
+TEST(ComponentExportTestPA, ImportExport) {
+  // Defined as 1. Treat as export.
+  EXPECT_EQ(1, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_A));
+
+  // Defined, but empty. Treat as import.
+  EXPECT_EQ(0, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_B));
+
+  // Defined, but 0. Treat as import.
+  EXPECT_EQ(0, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_C));
+
+  // Defined, but some other arbitrary thing that isn't 1. Treat as import.
+  EXPECT_EQ(0, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_D));
+  EXPECT_EQ(0, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_E));
+
+  // Undefined. Treat as import.
+  EXPECT_EQ(0, PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_F));
+
+  // And just for good measure, ensure that the macros evaluate properly in the
+  // context of preprocessor #if blocks.
+#if PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_A)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+
+#if !PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_B)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+
+#if !PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_C)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+
+#if !PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_D)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+
+#if !PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_E)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+
+#if !PA_INSIDE_COMPONENT_IMPL(TEST_COMPONENT_F)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+}
+
+#undef IS_TEST_COMPONENT_A_IMPL
+#undef IS_TEST_COMPONENT_B_IMPL
+#undef IS_TEST_COMPONENT_C_IMPL
+#undef IS_TEST_COMPONENT_D_IMPL
+#undef IS_TEST_COMPONENT_E_IMPL
+
+}  // namespace
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.cc
new file mode 100644
index 0000000..0000d86
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.cc
@@ -0,0 +1,203 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+
+#include <inttypes.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <algorithm>
+#include <sstream>
+#include <utility>
+
+#include "build/build_config.h"
+
+#if defined(ARCH_CPU_ARM_FAMILY) && \
+    (BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS))
+#include <asm/hwcap.h>
+#include <sys/auxv.h>
+
+// Temporary definitions until a new hwcap.h is pulled in everywhere.
+// https://crbug.com/1265965
+#ifndef HWCAP2_MTE
+#define HWCAP2_MTE (1 << 18)
+#define HWCAP2_BTI (1 << 17)
+#endif
+#endif
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#if defined(COMPILER_MSVC)
+#include <immintrin.h>  // For _xgetbv()
+#include <intrin.h>
+#endif
+#endif
+
+namespace partition_alloc::internal::base {
+
+CPU::CPU() {
+  Initialize();
+}
+CPU::CPU(CPU&&) = default;
+
+namespace {
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#if !defined(COMPILER_MSVC)
+
+#if defined(__pic__) && defined(__i386__)
+
+void __cpuid(int cpu_info[4], int info_type) {
+  __asm__ volatile(
+      "mov %%ebx, %%edi\n"
+      "cpuid\n"
+      "xchg %%edi, %%ebx\n"
+      : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]),
+        "=d"(cpu_info[3])
+      : "a"(info_type), "c"(0));
+}
+
+#else
+
+void __cpuid(int cpu_info[4], int info_type) {
+  __asm__ volatile("cpuid\n"
+                   : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
+                     "=d"(cpu_info[3])
+                   : "a"(info_type), "c"(0));
+}
+
+#endif
+#endif  // !defined(COMPILER_MSVC)
+
+// xgetbv returns the value of an Intel Extended Control Register (XCR).
+// Currently only XCR0 is defined by Intel so |xcr| should always be zero.
+uint64_t xgetbv(uint32_t xcr) {
+#if defined(COMPILER_MSVC)
+  return _xgetbv(xcr);
+#else
+  uint32_t eax, edx;
+
+  __asm__ volatile("xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr));
+  return (static_cast<uint64_t>(edx) << 32) | eax;
+#endif  // defined(COMPILER_MSVC)
+}
+
+#endif  // ARCH_CPU_X86_FAMILY
+
+}  // namespace
+
+void CPU::Initialize() {
+#if defined(ARCH_CPU_X86_FAMILY)
+  int cpu_info[4] = {-1};
+
+  // __cpuid with an InfoType argument of 0 returns the number of
+  // valid Ids in CPUInfo[0] and the CPU identification string in
+  // the other three array elements. The CPU identification string is
+  // not in linear order. The code below arranges the information
+  // in a human readable form. The human readable order is CPUInfo[1] |
+  // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
+  // before using memcpy() to copy these three array elements to |cpu_string|.
+  __cpuid(cpu_info, 0);
+  int num_ids = cpu_info[0];
+  std::swap(cpu_info[2], cpu_info[3]);
+
+  // Interpret CPU feature information.
+  if (num_ids > 0) {
+    int cpu_info7[4] = {0};
+    __cpuid(cpu_info, 1);
+    if (num_ids >= 7) {
+      __cpuid(cpu_info7, 7);
+    }
+    signature_ = cpu_info[0];
+    stepping_ = cpu_info[0] & 0xf;
+    type_ = (cpu_info[0] >> 12) & 0x3;
+    has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
+    has_sse_ = (cpu_info[3] & 0x02000000) != 0;
+    has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
+    has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
+    has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
+    has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
+    has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+    has_popcnt_ = (cpu_info[2] & 0x00800000) != 0;
+
+    // "Hypervisor Present Bit: Bit 31 of ECX of CPUID leaf 0x1."
+    // See https://lwn.net/Articles/301888/
+    // This is checking for any hypervisor. Hypervisors may choose not to
+    // announce themselves. Hypervisors trap CPUID and sometimes return
+    // different results to underlying hardware.
+    is_running_in_vm_ = (cpu_info[2] & 0x80000000) != 0;
+
+    // AVX instructions will generate an illegal instruction exception unless
+    //   a) they are supported by the CPU,
+    //   b) XSAVE is supported by the CPU and
+    //   c) XSAVE is enabled by the kernel.
+    // See http://software.intel.com/en-us/blogs/2011/04/14/is-avx-enabled
+    //
+    // In addition, we have observed some crashes with the xgetbv instruction
+    // even after following Intel's example code. (See crbug.com/375968.)
+    // Because of that, we also test the XSAVE bit because its description in
+    // the CPUID documentation suggests that it signals xgetbv support.
+    has_avx_ = (cpu_info[2] & 0x10000000) != 0 &&
+               (cpu_info[2] & 0x04000000) != 0 /* XSAVE */ &&
+               (cpu_info[2] & 0x08000000) != 0 /* OSXSAVE */ &&
+               (xgetbv(0) & 6) == 6 /* XSAVE enabled by kernel */;
+    has_aesni_ = (cpu_info[2] & 0x02000000) != 0;
+    has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
+    has_avx2_ = has_avx_ && (cpu_info7[1] & 0x00000020) != 0;
+
+    has_pku_ = (cpu_info7[2] & 0x00000010) != 0;
+  }
+
+  // Get the brand string of the cpu.
+  __cpuid(cpu_info, 0x80000000);
+  const int max_parameter = cpu_info[0];
+
+  static constexpr int kParameterContainingNonStopTimeStampCounter = 0x80000007;
+  if (max_parameter >= kParameterContainingNonStopTimeStampCounter) {
+    __cpuid(cpu_info, kParameterContainingNonStopTimeStampCounter);
+    has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
+  }
+
+  if (!has_non_stop_time_stamp_counter_ && is_running_in_vm_) {
+    int cpu_info_hv[4] = {};
+    __cpuid(cpu_info_hv, 0x40000000);
+    if (cpu_info_hv[1] == 0x7263694D &&  // Micr
+        cpu_info_hv[2] == 0x666F736F &&  // osof
+        cpu_info_hv[3] == 0x76482074) {  // t Hv
+      // If CPUID says we have a variant TSC and a hypervisor has identified
+      // itself and the hypervisor says it is Microsoft Hyper-V, then treat
+      // TSC as invariant.
+      //
+      // Microsoft Hyper-V hypervisor reports variant TSC as there are some
+      // scenarios (eg. VM live migration) where the TSC is variant, but for
+      // our purposes we can treat it as invariant.
+      has_non_stop_time_stamp_counter_ = true;
+    }
+  }
+#elif defined(ARCH_CPU_ARM_FAMILY)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+#if defined(ARCH_CPU_ARM64)
+  // Check for Armv8.5-A BTI/MTE support, exposed via HWCAP2
+  unsigned long hwcap2 = getauxval(AT_HWCAP2);
+  has_mte_ = hwcap2 & HWCAP2_MTE;
+  has_bti_ = hwcap2 & HWCAP2_BTI;
+#endif
+
+#elif BUILDFLAG(IS_WIN)
+  // Windows makes high-resolution thread timing information available in
+  // user-space.
+  has_non_stop_time_stamp_counter_ = true;
+#endif
+#endif
+}
+
+const CPU& CPU::GetInstanceNoAllocation() {
+  static const CPU cpu;
+  return cpu;
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h
new file mode 100644
index 0000000..ec3590c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h
@@ -0,0 +1,109 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_CPU_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_CPU_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base {
+
+// Query information about the processor.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) CPU final {
+ public:
+  CPU();
+  CPU(CPU&&);
+  CPU(const CPU&) = delete;
+
+  // Get a preallocated instance of CPU.
+  // This can be used in very early application startup. The instance of CPU is
+  // created without branding, see CPU(bool requires_branding) for details and
+  // implications.
+  static const CPU& GetInstanceNoAllocation();
+
+  enum IntelMicroArchitecture {
+    PENTIUM = 0,
+    SSE = 1,
+    SSE2 = 2,
+    SSE3 = 3,
+    SSSE3 = 4,
+    SSE41 = 5,
+    SSE42 = 6,
+    AVX = 7,
+    AVX2 = 8,
+    FMA3 = 9,
+    MAX_INTEL_MICRO_ARCHITECTURE = 10
+  };
+
+  // Accessors for CPU information.
+  int signature() const { return signature_; }
+  int stepping() const { return stepping_; }
+  int type() const { return type_; }
+  bool has_mmx() const { return has_mmx_; }
+  bool has_sse() const { return has_sse_; }
+  bool has_sse2() const { return has_sse2_; }
+  bool has_sse3() const { return has_sse3_; }
+  bool has_ssse3() const { return has_ssse3_; }
+  bool has_sse41() const { return has_sse41_; }
+  bool has_sse42() const { return has_sse42_; }
+  bool has_popcnt() const { return has_popcnt_; }
+  bool has_avx() const { return has_avx_; }
+  bool has_fma3() const { return has_fma3_; }
+  bool has_avx2() const { return has_avx2_; }
+  bool has_aesni() const { return has_aesni_; }
+  bool has_non_stop_time_stamp_counter() const {
+    return has_non_stop_time_stamp_counter_;
+  }
+  bool is_running_in_vm() const { return is_running_in_vm_; }
+
+  // Armv8.5-A extensions for control flow and memory safety.
+#if defined(ARCH_CPU_ARM_FAMILY)
+  bool has_mte() const { return has_mte_; }
+  bool has_bti() const { return has_bti_; }
+#else
+  constexpr bool has_mte() const { return false; }
+  constexpr bool has_bti() const { return false; }
+#endif
+
+#if defined(ARCH_CPU_X86_FAMILY)
+  // Memory protection key support for user-mode pages
+  bool has_pku() const { return has_pku_; }
+#else
+  constexpr bool has_pku() const { return false; }
+#endif
+
+ private:
+  // Query the processor for CPUID information.
+  void Initialize();
+
+  int signature_ = 0;  // raw form of type, family, model, and stepping
+  int type_ = 0;       // process type
+  int stepping_ = 0;   // processor revision number
+  bool has_mmx_ = false;
+  bool has_sse_ = false;
+  bool has_sse2_ = false;
+  bool has_sse3_ = false;
+  bool has_ssse3_ = false;
+  bool has_sse41_ = false;
+  bool has_sse42_ = false;
+  bool has_popcnt_ = false;
+  bool has_avx_ = false;
+  bool has_fma3_ = false;
+  bool has_avx2_ = false;
+  bool has_aesni_ = false;
+#if defined(ARCH_CPU_ARM_FAMILY)
+  bool has_mte_ = false;  // Armv8.5-A MTE (Memory Taggging Extension)
+  bool has_bti_ = false;  // Armv8.5-A BTI (Branch Target Identification)
+#endif
+#if defined(ARCH_CPU_X86_FAMILY)
+  bool has_pku_ = false;
+#endif
+  bool has_non_stop_time_stamp_counter_ = false;
+  bool is_running_in_vm_ = false;
+};
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_CPU_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu_pa_unittest.cc
new file mode 100644
index 0000000..ea168af
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu_pa_unittest.cc
@@ -0,0 +1,163 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc {
+
+// Tests whether we can run extended instructions represented by the CPU
+// information. This test actually executes some extended instructions (such as
+// MMX, SSE, etc.) supported by the CPU and sees we can run them without
+// "undefined instruction" exceptions. That is, this test succeeds when this
+// test finishes without a crash.
+TEST(CPUPA, RunExtendedInstructions) {
+  // Retrieve the CPU information.
+  internal::base::CPU cpu;
+#if defined(ARCH_CPU_X86_FAMILY)
+
+  ASSERT_TRUE(cpu.has_mmx());
+  ASSERT_TRUE(cpu.has_sse());
+  ASSERT_TRUE(cpu.has_sse2());
+  ASSERT_TRUE(cpu.has_sse3());
+
+// GCC and clang instruction test.
+#if defined(COMPILER_GCC)
+  // Execute an MMX instruction.
+  __asm__ __volatile__("emms\n" : : : "mm0");
+
+  // Execute an SSE instruction.
+  __asm__ __volatile__("xorps %%xmm0, %%xmm0\n" : : : "xmm0");
+
+  // Execute an SSE 2 instruction.
+  __asm__ __volatile__("psrldq $0, %%xmm0\n" : : : "xmm0");
+
+  // Execute an SSE 3 instruction.
+  __asm__ __volatile__("addsubpd %%xmm0, %%xmm0\n" : : : "xmm0");
+
+  if (cpu.has_ssse3()) {
+    // Execute a Supplimental SSE 3 instruction.
+    __asm__ __volatile__("psignb %%xmm0, %%xmm0\n" : : : "xmm0");
+  }
+
+  if (cpu.has_sse41()) {
+    // Execute an SSE 4.1 instruction.
+    __asm__ __volatile__("pmuldq %%xmm0, %%xmm0\n" : : : "xmm0");
+  }
+
+  if (cpu.has_sse42()) {
+    // Execute an SSE 4.2 instruction.
+    __asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
+  }
+
+  if (cpu.has_popcnt()) {
+    // Execute a POPCNT instruction.
+    __asm__ __volatile__("popcnt %%eax, %%eax\n" : : : "eax");
+  }
+
+  if (cpu.has_avx()) {
+    // Execute an AVX instruction.
+    __asm__ __volatile__("vzeroupper\n" : : : "xmm0");
+  }
+
+  if (cpu.has_fma3()) {
+    // Execute a FMA3 instruction.
+    __asm__ __volatile__("vfmadd132ps %%xmm0, %%xmm0, %%xmm0\n" : : : "xmm0");
+  }
+
+  if (cpu.has_avx2()) {
+    // Execute an AVX 2 instruction.
+    __asm__ __volatile__("vpunpcklbw %%ymm0, %%ymm0, %%ymm0\n" : : : "xmm0");
+  }
+
+  if (cpu.has_pku()) {
+    // rdpkru
+    uint32_t pkru;
+    __asm__ __volatile__(".byte 0x0f,0x01,0xee\n"
+                         : "=a"(pkru)
+                         : "c"(0), "d"(0));
+  }
+// Visual C 32 bit and ClangCL 32/64 bit test.
+#elif defined(COMPILER_MSVC) &&   \
+    (defined(ARCH_CPU_32_BITS) || \
+     (defined(ARCH_CPU_64_BITS) && defined(__clang__)))
+
+  // Execute an MMX instruction.
+  __asm emms;
+
+  // Execute an SSE instruction.
+  __asm xorps xmm0, xmm0;
+
+  // Execute an SSE 2 instruction.
+  __asm psrldq xmm0, 0;
+
+  // Execute an SSE 3 instruction.
+  __asm addsubpd xmm0, xmm0;
+
+  if (cpu.has_ssse3()) {
+    // Execute a Supplimental SSE 3 instruction.
+    __asm psignb xmm0, xmm0;
+  }
+
+  if (cpu.has_sse41()) {
+    // Execute an SSE 4.1 instruction.
+    __asm pmuldq xmm0, xmm0;
+  }
+
+  if (cpu.has_sse42()) {
+    // Execute an SSE 4.2 instruction.
+    __asm crc32 eax, eax;
+  }
+
+  if (cpu.has_popcnt()) {
+    // Execute a POPCNT instruction.
+    __asm popcnt eax, eax;
+  }
+
+  if (cpu.has_avx()) {
+    // Execute an AVX instruction.
+    __asm vzeroupper;
+  }
+
+  if (cpu.has_fma3()) {
+    // Execute an AVX instruction.
+    __asm vfmadd132ps xmm0, xmm0, xmm0;
+  }
+
+  if (cpu.has_avx2()) {
+    // Execute an AVX 2 instruction.
+    __asm vpunpcklbw ymm0, ymm0, ymm0
+  }
+#endif  // defined(COMPILER_GCC)
+#endif  // defined(ARCH_CPU_X86_FAMILY)
+
+#if defined(ARCH_CPU_ARM64)
+  // Check that the CPU is correctly reporting support for the Armv8.5-A memory
+  // tagging extension. The new MTE instructions aren't encoded in NOP space
+  // like BTI/Pointer Authentication and will crash older cores with a SIGILL if
+  // used incorrectly. This test demonstrates how it should be done and that
+  // this approach works.
+  if (cpu.has_mte()) {
+#if !defined(__ARM_FEATURE_MEMORY_TAGGING)
+    // In this section, we're running on an MTE-compatible core, but we're
+    // building this file without MTE support. Fail this test to indicate that
+    // there's a problem with the base/ build configuration.
+    GTEST_FAIL()
+        << "MTE support detected (but base/ built without MTE support)";
+#else
+    char ptr[32];
+    uint64_t val;
+    // Execute a trivial MTE instruction. Normally, MTE should be used via the
+    // intrinsics documented at
+    // https://developer.arm.com/documentation/101028/0012/10--Memory-tagging-intrinsics,
+    // this test uses the irg (Insert Random Tag) instruction directly to make
+    // sure that it's not optimized out by the compiler.
+    __asm__ __volatile__("irg %0, %1" : "=r"(val) : "r"(ptr));
+#endif  // __ARM_FEATURE_MEMORY_TAGGING
+  }
+#endif  // ARCH_CPU_ARM64
+}
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cxx20_is_constant_evaluated.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cxx20_is_constant_evaluated.h
new file mode 100644
index 0000000..b15e4d3
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cxx20_is_constant_evaluated.h
@@ -0,0 +1,34 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
+
+#include <type_traits>
+
+namespace partition_alloc::internal::base {
+
+// std::is_constant_evaluated was introduced in C++20. PartitionAlloc's minimum
+// supported C++ version is C++17.
+#if defined(__cpp_lib_is_constant_evaluated) && \
+    __cpp_lib_is_constant_evaluated >= 201811L
+
+using std::is_constant_evaluated;
+
+#else
+
+// Implementation of C++20's std::is_constant_evaluated.
+//
+// References:
+// - https://en.cppreference.com/w/cpp/types/is_constant_evaluated
+// - https://wg21.link/meta.const.eval
+constexpr bool is_constant_evaluated() noexcept {
+  return __builtin_is_constant_evaluated();
+}
+
+#endif
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.cc
new file mode 100644
index 0000000..12ac706
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.cc
@@ -0,0 +1,15 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+
+namespace partition_alloc::internal::base::debug {
+
+// This file/function should be excluded from LTO/LTCG to ensure that the
+// compiler can't see this function's implementation when compiling calls to it.
+PA_NOINLINE void Alias(const void* var) {}
+
+}  // namespace partition_alloc::internal::base::debug
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h
new file mode 100644
index 0000000..2289c39
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h
@@ -0,0 +1,92 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_DEBUG_ALIAS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_DEBUG_ALIAS_H_
+
+#include <stddef.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc::internal::base::debug {
+
+// Make the optimizer think that |var| is aliased. This can be used to inhibit
+// three different kinds of optimizations:
+//
+// Case #1: Prevent a local variable from being optimized out if it would not
+// otherwise be live at the point of a potential crash. This can only be done
+// with local variables, not globals, object members, or function return values
+// - these must be copied to locals if you want to ensure they are recorded in
+// crash dumps. Function arguments are fine to use since the
+// base::debug::Alias() call on them will make sure they are copied to the stack
+// even if they were passed in a register. Note that if the local variable is a
+// pointer then its value will be retained but the memory that it points to will
+// probably not be saved in the crash dump - by default only stack memory is
+// saved. Therefore the aliasing technique is usually only worthwhile with
+// non-pointer variables. If you have a pointer to an object and you want to
+// retain the object's state you need to copy the object or its fields to local
+// variables.
+//
+// Example usage:
+//   int last_error = err_;
+//   base::debug::Alias(&last_error);
+//   char name_copy[16];
+//   strncpy(name_copy, p->name, sizeof(name_copy)-1);
+//   name_copy[sizeof(name_copy)-1] = '\0';;
+//   base::debug::alias(name_copy);
+//   CHECK(false);
+//
+// Case #2: Prevent a tail call into a function. This is useful to make sure the
+// function containing the call to base::debug::Alias() will be present in the
+// call stack. In this case there is no memory that needs to be on
+// the stack so we can use nullptr. The call to base::debug::Alias() needs to
+// happen after the call that is suspected to be tail called. Note: This
+// technique will prevent tail calls at the specific call site only. To prevent
+// them for all invocations of a function look at PA_NOT_TAIL_CALLED.
+//
+// Example usage:
+//   PA_NOINLINE void Foo(){
+//     ... code ...
+//
+//     Bar();
+//     base::debug::Alias(nullptr);
+//   }
+//
+// Case #3: Prevent code folding of a non-unique function. Code folding can
+// cause the same address to be assigned to different functions if they are
+// identical. If finding the precise signature of a function in the call-stack
+// is important and it's suspected the function is identical to other functions
+// it can be made unique using PA_NO_CODE_FOLDING which is a wrapper around
+// base::debug::Alias();
+//
+// Example usage:
+//   PA_NOINLINE void Foo(){
+//     PA_NO_CODE_FOLDING();
+//     Bar();
+//   }
+//
+// Finally please note that these effects compound. This means that saving a
+// stack variable (case #1) using base::debug::Alias() will also inhibit
+// tail calls for calls in earlier lines and prevent code folding.
+
+void PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) Alias(const void* var);
+
+}  // namespace partition_alloc::internal::base::debug
+
+// Code folding is a linker optimization whereby the linker identifies functions
+// that are bit-identical and overlays them. This saves space but it leads to
+// confusing call stacks because multiple symbols are at the same address and
+// it is unpredictable which one will be displayed. Disabling of code folding is
+// particularly useful when function names are used as signatures in crashes.
+// This macro doesn't guarantee that code folding will be prevented but it
+// greatly reduces the odds and always prevents it within one source file.
+// If using in a function that terminates the process it is safest to put the
+// PA_NO_CODE_FOLDING macro at the top of the function.
+// Use like:
+//   void FooBarFailure(size_t size) { PA_NO_CODE_FOLDING(); OOM_CRASH(size); }
+#define PA_NO_CODE_FOLDING()        \
+  const int line_number = __LINE__; \
+  ::partition_alloc::internal::base::debug::Alias(&line_number)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_DEBUG_ALIAS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.cc
new file mode 100644
index 0000000..e789c54
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.cc
@@ -0,0 +1,252 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+#include <stdint.h>
+
+#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && defined(__GLIBC__)
+extern "C" void* __libc_stack_end;
+#endif
+
+namespace partition_alloc::internal::base::debug {
+namespace {
+
+#if BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
+
+#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
+// GCC and LLVM generate slightly different frames on ARM, see
+// https://llvm.org/bugs/show_bug.cgi?id=18505 - LLVM generates
+// x86-compatible frame, while GCC needs adjustment.
+constexpr size_t kStackFrameAdjustment = sizeof(uintptr_t);
+#else
+constexpr size_t kStackFrameAdjustment = 0;
+#endif
+
+// On Arm-v8.3+ systems with pointer authentication codes (PAC), signature bits
+// are set in the top bits of the pointer, which confuses test assertions.
+// Because the signature size can vary based on the system configuration, use
+// the xpaclri instruction to remove the signature.
+static uintptr_t StripPointerAuthenticationBits(uintptr_t ptr) {
+#if defined(ARCH_CPU_ARM64)
+  // A single Chromium binary currently spans all Arm systems (including those
+  // with and without pointer authentication). xpaclri is used here because it's
+  // in the HINT space and treated as a no-op on older Arm cores (unlike the
+  // more generic xpaci which has a new encoding). The downside is that ptr has
+  // to be moved to x30 to use this instruction. TODO([email protected]):
+  // replace with an intrinsic once that is available.
+  register uintptr_t x30 __asm("x30") = ptr;
+  asm("xpaclri" : "+r"(x30));
+  return x30;
+#else
+  // No-op on other platforms.
+  return ptr;
+#endif
+}
+
+uintptr_t GetNextStackFrame(uintptr_t fp) {
+  const uintptr_t* fp_addr = reinterpret_cast<const uintptr_t*>(fp);
+  PA_MSAN_UNPOISON(fp_addr, sizeof(uintptr_t));
+  return fp_addr[0] - kStackFrameAdjustment;
+}
+
+uintptr_t GetStackFramePC(uintptr_t fp) {
+  const uintptr_t* fp_addr = reinterpret_cast<const uintptr_t*>(fp);
+  PA_MSAN_UNPOISON(&fp_addr[1], sizeof(uintptr_t));
+  return StripPointerAuthenticationBits(fp_addr[1]);
+}
+
+bool IsStackFrameValid(uintptr_t fp, uintptr_t prev_fp, uintptr_t stack_end) {
+  // With the stack growing downwards, older stack frame must be
+  // at a greater address that the current one.
+  if (fp <= prev_fp) {
+    return false;
+  }
+
+  // Assume huge stack frames are bogus.
+  if (fp - prev_fp > 100000) {
+    return false;
+  }
+
+  // Check alignment.
+  if (fp & (sizeof(uintptr_t) - 1)) {
+    return false;
+  }
+
+  if (stack_end) {
+    // Both fp[0] and fp[1] must be within the stack.
+    if (fp > stack_end - 2 * sizeof(uintptr_t)) {
+      return false;
+    }
+
+    // Additional check to filter out false positives.
+    if (GetStackFramePC(fp) < 32768) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+// ScanStackForNextFrame() scans the stack for a valid frame to allow unwinding
+// past system libraries. Only supported on Linux where system libraries are
+// usually in the middle of the trace:
+//
+//   TraceStackFramePointers
+//   <more frames from Chrome>
+//   base::WorkSourceDispatch   <-- unwinding stops (next frame is invalid),
+//   g_main_context_dispatch        ScanStackForNextFrame() is called
+//   <more frames from glib>
+//   g_main_context_iteration
+//   base::MessagePumpGlib::Run <-- ScanStackForNextFrame() finds valid frame,
+//   base::RunLoop::Run             unwinding resumes
+//   <more frames from Chrome>
+//   __libc_start_main
+//
+// ScanStackForNextFrame() returns 0 if it couldn't find a valid frame
+// (or if stack scanning is not supported on the current platform).
+uintptr_t ScanStackForNextFrame(uintptr_t fp, uintptr_t stack_end) {
+  // Enough to resume almost all prematurely terminated traces.
+  constexpr size_t kMaxStackScanArea = 8192;
+
+  if (!stack_end) {
+    // Too dangerous to scan without knowing where the stack ends.
+    return 0;
+  }
+
+  fp += sizeof(uintptr_t);  // current frame is known to be invalid
+  uintptr_t last_fp_to_scan =
+      std::min(fp + kMaxStackScanArea, stack_end) - sizeof(uintptr_t);
+  for (; fp <= last_fp_to_scan; fp += sizeof(uintptr_t)) {
+    uintptr_t next_fp = GetNextStackFrame(fp);
+    if (IsStackFrameValid(next_fp, fp, stack_end)) {
+      // Check two frames deep. Since stack frame is just a pointer to
+      // a higher address on the stack, it's relatively easy to find
+      // something that looks like one. However two linked frames are
+      // far less likely to be bogus.
+      uintptr_t next2_fp = GetNextStackFrame(next_fp);
+      if (IsStackFrameValid(next2_fp, next_fp, stack_end)) {
+        return fp;
+      }
+    }
+  }
+
+  return 0;
+}
+
+#endif  // BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
+
+}  // namespace
+
+#if BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
+
+// We force this function to be inlined into its callers (e.g.
+// TraceStackFramePointers()) in all build modes so we don't have to worry about
+// conditionally skipping a frame based on potential inlining or tail calls.
+__attribute__((always_inline)) size_t TraceStackFramePointersInternal(
+    uintptr_t fp,
+    uintptr_t stack_end,
+    size_t max_depth,
+    size_t skip_initial,
+    bool enable_scanning,
+    const void** out_trace) {
+  size_t depth = 0;
+  while (depth < max_depth) {
+    uintptr_t pc = GetStackFramePC(fp);
+    if (skip_initial != 0) {
+      skip_initial--;
+    } else {
+      out_trace[depth++] = reinterpret_cast<const void*>(pc);
+    }
+
+    uintptr_t next_fp = GetNextStackFrame(fp);
+    if (IsStackFrameValid(next_fp, fp, stack_end)) {
+      fp = next_fp;
+      continue;
+    }
+
+    if (!enable_scanning) {
+      break;
+    }
+
+    next_fp = ScanStackForNextFrame(fp, stack_end);
+    if (next_fp) {
+      fp = next_fp;
+    } else {
+      break;
+    }
+  }
+
+  return depth;
+}
+
+PA_NOINLINE size_t TraceStackFramePointers(const void** out_trace,
+                                           size_t max_depth,
+                                           size_t skip_initial,
+                                           bool enable_scanning) {
+  return TraceStackFramePointersInternal(
+      reinterpret_cast<uintptr_t>(__builtin_frame_address(0)) -
+          kStackFrameAdjustment,
+      GetStackEnd(), max_depth, skip_initial, enable_scanning, out_trace);
+}
+
+#endif  // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+#if BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
+uintptr_t GetStackEnd() {
+#if BUILDFLAG(IS_ANDROID)
+  // Bionic reads proc/maps on every call to pthread_getattr_np() when called
+  // from the main thread. So we need to cache end of stack in that case to get
+  // acceptable performance.
+  // For all other threads pthread_getattr_np() is fast enough as it just reads
+  // values from its pthread_t argument.
+  static uintptr_t main_stack_end = 0;
+
+  bool is_main_thread = GetCurrentProcId() == PlatformThread::CurrentId();
+  if (is_main_thread && main_stack_end) {
+    return main_stack_end;
+  }
+
+  uintptr_t stack_begin = 0;
+  size_t stack_size = 0;
+  pthread_attr_t attributes;
+  int error = pthread_getattr_np(pthread_self(), &attributes);
+  if (!error) {
+    error = pthread_attr_getstack(
+        &attributes, reinterpret_cast<void**>(&stack_begin), &stack_size);
+    pthread_attr_destroy(&attributes);
+  }
+  PA_BASE_DCHECK(!error);
+
+  uintptr_t stack_end = stack_begin + stack_size;
+  if (is_main_thread) {
+    main_stack_end = stack_end;
+  }
+  return stack_end;  // 0 in case of error
+#elif BUILDFLAG(IS_APPLE)
+  // No easy way to get end of the stack for non-main threads,
+  // see crbug.com/617730.
+  return reinterpret_cast<uintptr_t>(pthread_get_stackaddr_np(pthread_self()));
+#else
+
+#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && defined(__GLIBC__)
+  if (GetCurrentProcId() == PlatformThread::CurrentId()) {
+    // For the main thread we have a shortcut.
+    return reinterpret_cast<uintptr_t>(__libc_stack_end);
+  }
+#endif
+
+  // Don't know how to get end of the stack.
+  return 0;
+#endif
+}
+#endif  // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+}  // namespace partition_alloc::internal::base::debug
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.h
new file mode 100644
index 0000000..740529a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.h
@@ -0,0 +1,74 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_DEBUG_STACK_TRACE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_DEBUG_STACK_TRACE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base::debug {
+
+// Returns end of the stack, or 0 if we couldn't get it.
+#if BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+uintptr_t GetStackEnd();
+#endif
+
+// Record a stack trace with up to |count| frames into |trace|. Returns the
+// number of frames read.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+size_t CollectStackTrace(const void** trace, size_t count);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+void PrintStackTrace(const void** trace, size_t count);
+
+#if BUILDFLAG(IS_POSIX)
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+void OutputStackTrace(unsigned index,
+                      uintptr_t address,
+                      uintptr_t base_address,
+                      const char* module_name,
+                      uintptr_t offset);
+#endif
+
+#if BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
+
+// For stack scanning to be efficient it's very important for the thread to
+// be started by Chrome. In that case we naturally terminate unwinding once
+// we reach the origin of the stack (i.e. GetStackEnd()). If the thread is
+// not started by Chrome (e.g. Android's main thread), then we end up always
+// scanning area at the origin of the stack, wasting time and not finding any
+// frames (since Android libraries don't have frame pointers). Scanning is not
+// enabled on other posix platforms due to legacy reasons.
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+constexpr bool kEnableScanningByDefault = true;
+#else
+constexpr bool kEnableScanningByDefault = false;
+#endif
+
+// Traces the stack by using frame pointers. This function is faster but less
+// reliable than StackTrace. It should work for debug and profiling builds,
+// but not for release builds (although there are some exceptions).
+//
+// Writes at most |max_depth| frames (instruction pointers) into |out_trace|
+// after skipping |skip_initial| frames. Note that the function itself is not
+// added to the trace so |skip_initial| should be 0 in most cases.
+// Returns number of frames written. |enable_scanning| enables scanning on
+// platforms that do not enable scanning by default.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+size_t TraceStackFramePointers(const void** out_trace,
+                               size_t max_depth,
+                               size_t skip_initial,
+                               bool enable_scanning = kEnableScanningByDefault);
+
+#endif  // BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
+
+}  // namespace partition_alloc::internal::base::debug
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_SRC_PARTITION_ALLOC_BASE_DEBUG_STACK_TRACE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_android.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_android.cc
new file mode 100644
index 0000000..bb945a3
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_android.cc
@@ -0,0 +1,75 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.h"
+
+#include <string.h>
+#include <unistd.h>
+#include <unwind.h>
+
+namespace partition_alloc::internal::base::debug {
+
+namespace {
+
+struct StackCrawlState {
+  StackCrawlState(uintptr_t* frames, size_t max_depth)
+      : frames(frames),
+        frame_count(0),
+        max_depth(max_depth),
+        have_skipped_self(false) {}
+
+  uintptr_t* frames;
+  size_t frame_count;
+  size_t max_depth;
+  bool have_skipped_self;
+};
+
+_Unwind_Reason_Code TraceStackFrame(_Unwind_Context* context, void* arg) {
+  StackCrawlState* state = static_cast<StackCrawlState*>(arg);
+  uintptr_t ip = _Unwind_GetIP(context);
+
+  // The first stack frame is this function itself.  Skip it.
+  if (ip != 0 && !state->have_skipped_self) {
+    state->have_skipped_self = true;
+    return _URC_NO_REASON;
+  }
+
+  state->frames[state->frame_count++] = ip;
+  if (state->frame_count >= state->max_depth) {
+    return _URC_END_OF_STACK;
+  }
+  return _URC_NO_REASON;
+}
+
+}  // namespace
+
+size_t CollectStackTrace(const void** trace, size_t count) {
+  StackCrawlState state(reinterpret_cast<uintptr_t*>(trace), count);
+  _Unwind_Backtrace(&TraceStackFrame, &state);
+  return state.frame_count;
+}
+
+void OutputStackTrace(unsigned index,
+                      uintptr_t address,
+                      uintptr_t base_address,
+                      const char* module_name,
+                      uintptr_t offset) {
+  size_t module_name_len = strlen(module_name);
+
+  char buffer[256];
+  if (module_name_len > 4 &&
+      !strcmp(module_name + module_name_len - 4, ".apk")) {
+    strings::SafeSPrintf(buffer, "#%02d pc 0x%0x %s (offset 0x%0x)\n", index,
+                         address - base_address, module_name, offset);
+  } else {
+    strings::SafeSPrintf(buffer, "#%02d pc 0x%0x %s\n", index,
+                         address - base_address, module_name);
+  }
+  PA_RAW_LOG(INFO, buffer);
+}
+
+}  // namespace partition_alloc::internal::base::debug
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_linux.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_linux.cc
new file mode 100644
index 0000000..6c61163
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_linux.cc
@@ -0,0 +1,27 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base::debug {
+
+size_t CollectStackTrace(const void** trace, size_t count) {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if BUILDFLAG(PA_CAN_UNWIND_WITH_FRAME_POINTERS)
+  // Regarding Linux and Android, backtrace API internally invokes malloc().
+  // So the API is not available inside memory allocation. Instead try tracing
+  // using frame pointers.
+  return base::debug::TraceStackFramePointers(trace, count, 0);
+#else
+  // Not able to obtain stack traces.
+  return 0;
+#endif
+}
+
+}  // namespace partition_alloc::internal::base::debug
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_mac.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_mac.cc
new file mode 100644
index 0000000..be4678d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_mac.cc
@@ -0,0 +1,38 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+#include "build/build_config.h"
+
+// Surprisingly, uClibc defines __GLIBC__ in some build configs, but
+// execinfo.h and backtrace(3) are really only present in glibc and in macOS
+// libc.
+#if BUILDFLAG(IS_APPLE) || \
+    (defined(__GLIBC__) && !defined(__UCLIBC__) && !defined(__AIX))
+#define HAVE_BACKTRACE
+#include <execinfo.h>
+#endif
+
+namespace partition_alloc::internal::base::debug {
+
+size_t CollectStackTrace(const void** trace, size_t count) {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if BUILDFLAG(IS_APPLE) && defined(HAVE_BACKTRACE)
+  // Regarding Apple, no /proc is available. Try backtrace API.
+  // Though the backtrace API man page does not list any possible negative
+  // return values, we take no chance.
+  return base::saturated_cast<size_t>(
+      backtrace(const_cast<void**>(trace), base::saturated_cast<int>(count)));
+#else
+  // Not able to obtain stack traces.
+  return 0;
+#endif
+}
+
+}  // namespace partition_alloc::internal::base::debug
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_posix.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_posix.cc
new file mode 100644
index 0000000..caa5791
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_posix.cc
@@ -0,0 +1,413 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/eintr_wrapper.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.h"
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+
+#if !BUILDFLAG(IS_ANDROID) && !BUILDFLAG(IS_APPLE)
+#include <link.h>  // For ElfW() macro.
+#endif
+
+#if BUILDFLAG(IS_APPLE)
+#define HAVE_DLADDR
+#include <dlfcn.h>
+#endif
+
+namespace partition_alloc::internal::base::debug {
+
+namespace {
+
+#if !BUILDFLAG(IS_APPLE)
+
+constexpr size_t kBufferSize = 4096u;
+
+enum {
+  kMapReadable = 1u,
+  kMapWritable = 2u,
+  kMapExecutable = 4u,
+  kMapPrivate = 8u,
+};
+
+bool ParseAddress(const char** ptr,
+                  const char* end,
+                  uintptr_t* address_return) {
+  const char* start = *ptr;
+
+  // 0xNN = 2 characters
+  const char* max_address = start + sizeof(void*) * 2;
+  uintptr_t value = 0;
+
+  const char* p = start;
+  for (; p < end && p < max_address; ++p) {
+    if ('0' <= *p && *p <= '9') {
+      value = (value << 4) | (unsigned char)(*p - '0');
+    } else if ('a' <= *p && *p <= 'f') {
+      value = (value << 4) | (unsigned char)(*p - 'a' + 10);
+    } else {
+      break;
+    }
+  }
+  if (p == start) {
+    return false;
+  }
+  *ptr = p;
+  if (address_return) {
+    *address_return = value;
+  }
+  return true;
+}
+
+bool ParseInteger(const char** ptr, const char* end) {
+  const char* start = *ptr;
+
+  const char* p = start;
+  for (; p < end && '0' <= *p && *p <= '9'; ++p)
+    ;
+  *ptr = p;
+  return p > start;
+}
+
+bool ParsePermissions(const char** ptr,
+                      const char* end,
+                      unsigned* permission_return) {
+  unsigned permission = 0u;
+  const char* p = *ptr;
+  if (p < end && (*p == 'r' || *p == '-')) {
+    permission |= (*p == 'r') ? kMapReadable : 0u;
+    ++p;
+  } else {
+    return false;
+  }
+  if (p < end && (*p == 'w' || *p == '-')) {
+    permission |= (*p == 'w') ? kMapWritable : 0u;
+    ++p;
+  } else {
+    return false;
+  }
+  if (p < end && (*p == 'x' || *p == '-')) {
+    permission |= (*p == 'w') ? kMapExecutable : 0u;
+    ++p;
+  } else {
+    return false;
+  }
+  if (p < end && (*p == 'p' || *p == '-' || *p == 's')) {
+    permission |= (*p == 'w') ? kMapPrivate : 0u;
+    ++p;
+  } else {
+    return false;
+  }
+  *ptr = p;
+  if (permission_return) {
+    *permission_return = permission;
+  }
+  return true;
+}
+
+bool ParseMapsLine(const char* line_start,
+                   const char* line_end,
+                   uintptr_t* start_address_return,
+                   uintptr_t* end_address_return,
+                   unsigned* permission_return,
+                   uintptr_t* offset_return,
+                   const char** module_name) {
+  const char* ptr = line_start;
+  if (!ParseAddress(&ptr, line_end, start_address_return)) {
+    return false;
+  }
+  // Delimiter
+  if (ptr >= line_end || *ptr != '-') {
+    return false;
+  }
+  ++ptr;
+  if (!ParseAddress(&ptr, line_end, end_address_return)) {
+    return false;
+  }
+
+  // Delimiter
+  if (ptr >= line_end || *ptr != ' ') {
+    return false;
+  }
+  ++ptr;
+
+  // skip permissions.
+  if (!ParsePermissions(&ptr, line_end, permission_return)) {
+    return false;
+  }
+
+  // Delimiter
+  if (ptr >= line_end || *ptr != ' ') {
+    return false;
+  }
+  ++ptr;
+
+  // skip offset
+  if (ParseAddress(&ptr, line_end, offset_return)) {
+    if (ptr >= line_end || *ptr != ' ') {
+      return false;
+    }
+    ++ptr;
+
+    // skip dev
+    if (!ParseAddress(&ptr, line_end, nullptr)) {
+      return false;
+    }
+    if (ptr >= line_end || *ptr != ':') {
+      return false;
+    }
+    ++ptr;
+    if (!ParseAddress(&ptr, line_end, nullptr)) {
+      return false;
+    }
+
+    // Delimiter
+    if (ptr >= line_end || *ptr != ' ') {
+      return false;
+    }
+    ++ptr;
+
+    // skip inode
+    if (!ParseInteger(&ptr, line_end)) {
+      return false;
+    }
+  } else {
+    if (offset_return) {
+      *offset_return = 0u;
+    }
+  }
+  if (ptr >= line_end || *ptr != ' ') {
+    return false;
+  }
+  for (; ptr < line_end && *ptr == ' '; ++ptr)
+    ;
+  if (ptr <= line_end && module_name) {
+    *module_name = ptr;
+  }
+  return true;
+}
+
+#if !BUILDFLAG(IS_ANDROID)
+
+ssize_t ReadFromOffset(const int fd,
+                       void* buf,
+                       const size_t count,
+                       const size_t offset) {
+  char* buf0 = reinterpret_cast<char*>(buf);
+  size_t num_bytes = 0;
+  while (num_bytes < count) {
+    ssize_t len;
+    len = PA_HANDLE_EINTR(pread(fd, buf0 + num_bytes, count - num_bytes,
+                                static_cast<off_t>(offset + num_bytes)));
+    if (len < 0) {  // There was an error other than EINTR.
+      return -1;
+    }
+    if (len == 0) {  // Reached EOF.
+      break;
+    }
+    num_bytes += static_cast<size_t>(len);
+  }
+  return static_cast<ssize_t>(num_bytes);
+}
+
+void UpdateBaseAddress(unsigned permissions,
+                       uintptr_t start_address,
+                       uintptr_t* base_address) {
+  // Determine the base address by reading ELF headers in process memory.
+  // Skip non-readable maps.
+  if (!(permissions & kMapReadable)) {
+    return;
+  }
+
+  int mem_fd = PA_HANDLE_EINTR(open("/proc/self/mem", O_RDONLY));
+  if (mem_fd == -1) {
+    PA_RAW_LOG(ERROR, "Failed to open /proc/self/mem\n");
+    return;
+  }
+
+  ElfW(Ehdr) ehdr;
+  ssize_t len =
+      ReadFromOffset(mem_fd, &ehdr, sizeof(ElfW(Ehdr)), start_address);
+  if (len == sizeof(ElfW(Ehdr))) {
+    if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) == 0) {
+      switch (ehdr.e_type) {
+        case ET_EXEC:
+          *base_address = 0;
+          break;
+        case ET_DYN:
+          // Find the segment containing file offset 0. This will correspond
+          // to the ELF header that we just read. Normally this will have
+          // virtual address 0, but this is not guaranteed. We must subtract
+          // the virtual address from the address where the ELF header was
+          // mapped to get the base address.
+          //
+          // If we fail to find a segment for file offset 0, use the address
+          // of the ELF header as the base address.
+          *base_address = start_address;
+          for (unsigned i = 0; i != ehdr.e_phnum; ++i) {
+            ElfW(Phdr) phdr;
+            len =
+                ReadFromOffset(mem_fd, &phdr, sizeof(ElfW(Phdr)),
+                               start_address + ehdr.e_phoff + i * sizeof(phdr));
+            if (len == sizeof(ElfW(Phdr)) && phdr.p_type == PT_LOAD &&
+                phdr.p_offset == 0) {
+              *base_address = start_address - phdr.p_vaddr;
+              break;
+            }
+          }
+          break;
+        default:
+          // ET_REL or ET_CORE. These aren't directly executable, so they don't
+          // affect the base address.
+          break;
+      }
+    }
+  }
+  close(mem_fd);
+}
+
+#endif  // !BUILDFLAG(IS_ANDROID)
+
+void PrintStackTraceInternal(const void** trace, size_t count) {
+  int fd = PA_HANDLE_EINTR(open("/proc/self/maps", O_RDONLY));
+  if (fd == -1) {
+    PA_RAW_LOG(ERROR, "Failed to open /proc/self/maps\n");
+    return;
+  }
+
+  char buffer[kBufferSize];
+  char* dest = buffer;
+  char* buffer_end = buffer + kBufferSize;
+#if !BUILDFLAG(IS_ANDROID) && !BUILDFLAG(IS_APPLE)
+  uintptr_t base_address = 0u;
+#endif
+
+  while (dest < buffer_end) {
+    ssize_t bytes_read = PA_HANDLE_EINTR(read(fd, dest, buffer_end - dest));
+    if (bytes_read == 0) {
+      break;
+    }
+    if (bytes_read < 0) {
+      PA_RAW_LOG(ERROR, "Failed to read /proc/self/maps\n");
+      break;
+    }
+
+    char* read_end = dest + bytes_read;
+    char* parsed = buffer;
+    char* line_start = buffer;
+    // It is difficult to remember entire memory regions and to use them
+    // to process stack traces. Instead, try to parse each line of
+    // /proc/self/maps and to process matched stack traces. This will
+    // make the order of the output stack traces different from the input.
+    for (char* line_end = buffer; line_end < read_end; ++line_end) {
+      if (*line_end == '\n') {
+        parsed = line_end + 1;
+        *line_end = '\0';
+        uintptr_t start_address = 0u;
+        uintptr_t end_address = 0u;
+        uintptr_t offset = 0u;
+        unsigned permissions = 0u;
+        const char* module_name = nullptr;
+        bool ok =
+            ParseMapsLine(line_start, line_end, &start_address, &end_address,
+                          &permissions, &offset, &module_name);
+        if (ok) {
+#if !BUILDFLAG(IS_ANDROID)
+          UpdateBaseAddress(permissions, start_address, &base_address);
+#endif
+          if (module_name && *module_name != '\0') {
+            for (size_t i = 0; i < count; i++) {
+#if BUILDFLAG(IS_ANDROID)
+              // Subtract one as return address of function may be in the next
+              // function when a function is annotated as noreturn.
+              uintptr_t address = reinterpret_cast<uintptr_t>(trace[i]) - 1;
+              uintptr_t base_address = start_address;
+#else
+              uintptr_t address = reinterpret_cast<uintptr_t>(trace[i]);
+#endif
+              if (start_address <= address && address < end_address) {
+                OutputStackTrace(i, address, base_address, module_name, offset);
+              }
+            }
+          }
+        } else {
+          PA_RAW_LOG(ERROR, "Parse failed.\n");
+        }
+        line_start = parsed;
+      }
+    }
+    if (parsed == buffer) {
+      // /proc/self/maps contains too long line (> kBufferSize).
+      PA_RAW_LOG(ERROR, "/proc/self/maps has too long line.\n");
+      break;
+    }
+    if (parsed < read_end) {
+      size_t left_chars = read_end - parsed;
+      memmove(buffer, parsed, left_chars);
+      dest = buffer + left_chars;
+    } else {
+      dest = buffer;
+    }
+  }
+  close(fd);
+}
+#endif  // !BUILDFLAG(IS_APPLE)
+
+#if BUILDFLAG(IS_APPLE)
+// Since /proc/self/maps is not available, use dladdr() to obtain module
+// names and offsets inside the modules from the given addresses.
+void PrintStackTraceInternal(const void* const* trace, size_t size) {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+  Dl_info dl_info;
+  for (size_t i = 0; i < size; ++i) {
+    const bool dl_info_found = dladdr(trace[i], &dl_info) != 0;
+    if (dl_info_found) {
+      const char* last_sep = strrchr(dl_info.dli_fname, '/');
+      const char* basename = last_sep ? last_sep + 1 : dl_info.dli_fname;
+
+      // Use atos with --offset to obtain symbols from the printed addresses,
+      // e.g.
+      //  #01 0x0000000106225d6c  (base_unittests+0x0000000001999d6c)
+      //  bash-3.2$ atos -o out/default/base_unittests --offset
+      //   0x0000000001999d6c
+      //  partition_alloc::internal::PartitionAllocTest_Basic_Test::TestBody()
+      //  (in base_unittests) + 156
+      OutputStackTrace(i, reinterpret_cast<uintptr_t>(trace[i]),
+                       reinterpret_cast<uintptr_t>(dl_info.dli_fbase), basename,
+                       0u);
+    } else {
+      OutputStackTrace(i, reinterpret_cast<uintptr_t>(trace[i]), 0u, "???", 0u);
+    }
+  }
+}
+#endif  // BUILDFLAG(IS_APPLE)
+
+}  // namespace
+
+void PrintStackTrace(const void** trace, size_t count) {
+  PrintStackTraceInternal(trace, count);
+}
+
+// stack_trace_android.cc defines its own OutputStackTrace.
+#if !BUILDFLAG(IS_ANDROID)
+void OutputStackTrace(unsigned index,
+                      uintptr_t address,
+                      uintptr_t base_address,
+                      const char* module_name,
+                      uintptr_t) {
+  char buffer[256];
+  strings::SafeSPrintf(buffer, "#%02d 0x%0x  (%s+0x%0x)\n", index, address,
+                       module_name, address - base_address);
+  PA_RAW_LOG(INFO, buffer);
+}
+#endif  // !BUILDFLAG(IS_ANDROID)
+
+}  // namespace partition_alloc::internal::base::debug
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_win.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_win.cc
new file mode 100644
index 0000000..dd347d0
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_win.cc
@@ -0,0 +1,104 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.h"
+
+#include <windows.h>
+
+#include <psapi.h>
+
+namespace partition_alloc::internal::base::debug {
+
+namespace {
+
+void PrintStackTraceInternal(const void** trace, size_t count) {
+  HANDLE process_handle = OpenProcess(
+      PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, GetCurrentProcId());
+  if (!process_handle) {
+    return;
+  }
+
+  bool is_output_trace[count];
+  for (size_t i = 0; i < count; ++i) {
+    is_output_trace[i] = false;
+  }
+  DWORD bytes_required = 0;
+  if (EnumProcessModules(process_handle, nullptr, 0, &bytes_required)) {
+    HMODULE* module_array = nullptr;
+    LPBYTE module_array_bytes = nullptr;
+
+    if (bytes_required) {
+      module_array_bytes = (LPBYTE)LocalAlloc(LPTR, bytes_required);
+      if (module_array_bytes) {
+        unsigned int module_count = bytes_required / sizeof(HMODULE);
+        module_array = reinterpret_cast<HMODULE*>(module_array_bytes);
+
+        if (EnumProcessModules(process_handle, module_array, bytes_required,
+                               &bytes_required)) {
+          for (unsigned i = 0; i < module_count; ++i) {
+            MODULEINFO info;
+            if (GetModuleInformation(process_handle, module_array[i], &info,
+                                     sizeof(info))) {
+              char module_name[MAX_PATH + 1];
+              bool module_name_checked = false;
+              for (unsigned j = 0; j < count; j++) {
+                uintptr_t base_of_dll =
+                    reinterpret_cast<uintptr_t>(info.lpBaseOfDll);
+                uintptr_t address = reinterpret_cast<uintptr_t>(trace[j]);
+                if (base_of_dll <= address &&
+                    address < base_of_dll + info.SizeOfImage) {
+                  if (!module_name_checked) {
+                    size_t module_name_length = GetModuleFileNameExA(
+                        process_handle, module_array[i], module_name,
+                        sizeof(module_name) - 1);
+                    module_name[module_name_length] = '\0';
+                    module_name_checked = true;
+                  }
+                  // llvm-symbolizer needs --relative-address to symbolize the
+                  // "address - base_of_dll".
+                  char buffer[256];
+                  strings::SafeSPrintf(buffer, "#%d 0x%x (%s+0x%x)\n", j,
+                                       address, module_name,
+                                       address - base_of_dll);
+                  PA_RAW_LOG(INFO, buffer);
+                  is_output_trace[j] = true;
+                }
+              }
+            }
+          }
+        }
+        LocalFree(module_array_bytes);
+      }
+    }
+  }
+
+  for (size_t i = 0; i < count; ++i) {
+    if (is_output_trace[i]) {
+      continue;
+    }
+    char buffer[256];
+    strings::SafeSPrintf(buffer, "#%d 0x%x <unknown>\n", i,
+                         reinterpret_cast<uintptr_t>(trace[i]));
+    PA_RAW_LOG(INFO, buffer);
+  }
+
+  CloseHandle(process_handle);
+}
+
+}  // namespace
+
+PA_NOINLINE size_t CollectStackTrace(const void** trace, size_t count) {
+  // When walking our own stack, use CaptureStackBackTrace().
+  return CaptureStackBackTrace(0, count, const_cast<void**>(trace), NULL);
+}
+
+void PrintStackTrace(const void** trace, size_t count) {
+  PrintStackTraceInternal(trace, count);
+}
+
+}  // namespace partition_alloc::internal::base::debug
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/export_template.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/export_template.h
new file mode 100644
index 0000000..23b7cd8
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/export_template.h
@@ -0,0 +1,188 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_EXPORT_TEMPLATE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_EXPORT_TEMPLATE_H_
+
+// Synopsis
+//
+// This header provides macros for using
+// PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) macros with explicit template
+// instantiation declarations and definitions. Generally, the
+// PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) macros are used at declarations,
+// and GCC requires them to be used at explicit instantiation declarations, but
+// MSVC requires __declspec(dllexport) to be used at the explicit instantiation
+// definitions instead.
+
+// Usage
+//
+// In a header file, write:
+//
+//   extern template class
+//   PA_EXPORT_TEMPLATE_DECLARE(PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE))
+//   foo<bar>;
+//
+// In a source file, write:
+//
+//   template class
+//   PA_EXPORT_TEMPLATE_DEFINE(PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE))
+//   foo<bar>;
+
+// Implementation notes
+//
+// On Windows, when building when PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+// expands to __declspec(dllexport)), we want the two lines to expand to:
+//
+//     extern template class foo<bar>;
+//     template class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) foo<bar>;
+//
+// In all other cases (non-Windows, and Windows when
+// PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) expands to
+// __declspec(dllimport)), we want:
+//
+//     extern template class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) foo<bar>;
+//     template class foo<bar>;
+//
+// The implementation of this header uses some subtle macro semantics to
+// detect what the provided PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) value was
+// defined as and then to dispatch to appropriate macro definitions.
+// Unfortunately, MSVC's C preprocessor is rather non-compliant and requires
+// special care to make it work.
+//
+// Issue 1.
+//
+//   #define F(x)
+//   F()
+//
+// MSVC emits warning C4003 ("not enough actual parameters for macro
+// 'F'), even though it's a valid macro invocation.  This affects the
+// macros below that take just an "export" parameter, because export
+// may be empty.
+//
+// As a workaround, we can add a dummy parameter and arguments:
+//
+//   #define F(x,_)
+//   F(,)
+//
+// Issue 2.
+//
+//   #define F(x) G##x
+//   #define Gj() ok
+//   F(j())
+//
+// The correct replacement for "F(j())" is "ok", but MSVC replaces it
+// with "Gj()".  As a workaround, we can pass the result to an
+// identity macro to force MSVC to look for replacements again.  (This
+// is why PA_EXPORT_TEMPLATE_STYLE_3 exists.)
+
+#define PA_EXPORT_TEMPLATE_DECLARE(export)                               \
+  PA_EXPORT_TEMPLATE_INVOKE(DECLARE, PA_EXPORT_TEMPLATE_STYLE(export, ), \
+                            export)  // NOLINT
+#define PA_EXPORT_TEMPLATE_DEFINE(export)                               \
+  PA_EXPORT_TEMPLATE_INVOKE(DEFINE, PA_EXPORT_TEMPLATE_STYLE(export, ), \
+                            export)  // NOLINT
+
+// INVOKE is an internal helper macro to perform parameter replacements
+// and token pasting to chain invoke another macro.  E.g.,
+//     PA_EXPORT_TEMPLATE_INVOKE(DECLARE, DEFAULT, PA_EXPORT)
+// will export to call
+//     PA_EXPORT_TEMPLATE_DECLARE_DEFAULT(PA_EXPORT, )
+// (but with PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) expanded too).
+#define PA_EXPORT_TEMPLATE_INVOKE(which, style, export) \
+  PA_EXPORT_TEMPLATE_INVOKE_2(which, style, export)
+#define PA_EXPORT_TEMPLATE_INVOKE_2(which, style, export) \
+  PA_EXPORT_TEMPLATE_##which##_##style(export, )
+
+// Default style is to apply the PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) macro
+// at declaration sites.
+#define PA_EXPORT_TEMPLATE_DECLARE_DEFAULT(export, _) export
+#define PA_EXPORT_TEMPLATE_DEFINE_DEFAULT(export, _)
+
+// The "MSVC hack" style is used when PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+// is defined as __declspec(dllexport), which MSVC requires to be used at
+// definition sites instead.
+#define PA_EXPORT_TEMPLATE_DECLARE_EXPORT_DLLEXPORT(export, _)
+#define PA_EXPORT_TEMPLATE_DEFINE_EXPORT_DLLEXPORT(export, _) export
+
+// PA_EXPORT_TEMPLATE_STYLE is an internal helper macro that identifies which
+// export style needs to be used for the provided
+// PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) macro definition.
+// "", "__attribute__(...)", and "__declspec(dllimport)" are mapped
+// to "DEFAULT"; while "__declspec(dllexport)" is mapped to "MSVC_HACK".
+//
+// It's implemented with token pasting to transform the __attribute__ and
+// __declspec annotations into macro invocations.  E.g., if
+// PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) is defined as
+// "__declspec(dllimport)", it undergoes the following sequence of macro
+// substitutions:
+//     PA_EXPORT_TEMPLATE_STYLE(PA_EXPORT,)
+//     PA_EXPORT_TEMPLATE_STYLE_2(__declspec(dllimport),)
+//     PA_EXPORT_TEMPLATE_STYLE_3(
+//         PA_EXPORT_TEMPLATE_STYLE_MATCH__declspec(dllimport))
+//     PA_EXPORT_TEMPLATE_STYLE_MATCH__declspec(dllimport)
+//     PA_EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllimport
+//     DEFAULT
+#define PA_EXPORT_TEMPLATE_STYLE(export, _) PA_EXPORT_TEMPLATE_STYLE_2(export, )
+#define PA_EXPORT_TEMPLATE_STYLE_2(export, _) \
+  PA_EXPORT_TEMPLATE_STYLE_3(                 \
+      PA_EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA##export)
+#define PA_EXPORT_TEMPLATE_STYLE_3(style) style
+
+// Internal helper macros for PA_EXPORT_TEMPLATE_STYLE.
+//
+// XXX: C++ reserves all identifiers containing "__" for the implementation,
+// but "__attribute__" and "__declspec" already contain "__" and the token-paste
+// operator can only add characters; not remove them.  To minimize the risk of
+// conflict with implementations, we include "foj3FJo5StF0OvIzl7oMxA" (a random
+// 128-bit string, encoded in Base64) in the macro name.
+#define PA_EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA DEFAULT
+#define PA_EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA__attribute__( \
+    ...)                                                                    \
+  DEFAULT
+#define PA_EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA__declspec(arg) \
+  PA_EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_##arg
+
+// Internal helper macros for PA_EXPORT_TEMPLATE_STYLE.
+#define PA_EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllexport EXPORT_DLLEXPORT
+#define PA_EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllimport DEFAULT
+
+// Sanity checks.
+//
+// PA_EXPORT_TEMPLATE_TEST uses the same macro invocation pattern as
+// PA_EXPORT_TEMPLATE_DECLARE and PA_EXPORT_TEMPLATE_DEFINE do to check that
+// they're working correctly. When they're working correctly, the sequence of
+// macro replacements should go something like:
+//
+//     PA_EXPORT_TEMPLATE_TEST(DEFAULT, __declspec(dllimport));
+//
+//     static_assert(PA_EXPORT_TEMPLATE_INVOKE(TEST_DEFAULT,
+//         PA_EXPORT_TEMPLATE_STYLE(__declspec(dllimport), ),
+//         __declspec(dllimport)), "__declspec(dllimport)");
+//
+//     static_assert(PA_EXPORT_TEMPLATE_INVOKE(TEST_DEFAULT,
+//         DEFAULT, __declspec(dllimport)), "__declspec(dllimport)");
+//
+//     static_assert(PA_EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT(
+//         __declspec(dllimport)), "__declspec(dllimport)");
+//
+//     static_assert(true, "__declspec(dllimport)");
+//
+// When they're not working correctly, a syntax error should occur instead.
+#define PA_EXPORT_TEMPLATE_TEST(want, export)                                 \
+  static_assert(PA_EXPORT_TEMPLATE_INVOKE(                                    \
+                    TEST_##want, PA_EXPORT_TEMPLATE_STYLE(export, ), export), \
+                #export)  // NOLINT
+#define PA_EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT(...) true
+#define PA_EXPORT_TEMPLATE_TEST_EXPORT_DLLEXPORT_EXPORT_DLLEXPORT(...) true
+
+PA_EXPORT_TEMPLATE_TEST(DEFAULT, );  // NOLINT
+PA_EXPORT_TEMPLATE_TEST(DEFAULT, __attribute__((visibility("default"))));
+PA_EXPORT_TEMPLATE_TEST(EXPORT_DLLEXPORT, __declspec(dllexport));
+PA_EXPORT_TEMPLATE_TEST(DEFAULT, __declspec(dllimport));
+
+#undef PA_EXPORT_TEMPLATE_TEST
+#undef PA_EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT
+#undef PA_EXPORT_TEMPLATE_TEST_EXPORT_DLLEXPORT_EXPORT_DLLEXPORT
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_EXPORT_TEMPLATE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.cc
new file mode 100644
index 0000000..3700c7f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.cc
@@ -0,0 +1,156 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.h"
+
+#include <string.h>
+#include <algorithm>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#elif BUILDFLAG(IS_APPLE)
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+namespace partition_alloc::internal::base {
+
+using StringType = FilePath::StringType;
+const FilePath::CharType kStringTerminator = PA_FILE_PATH_LITERAL('\0');
+
+// If this FilePath contains a drive letter specification, returns the
+// position of the last character of the drive letter specification,
+// otherwise returns npos.  This can only be true on Windows, when a pathname
+// begins with a letter followed by a colon.  On other platforms, this always
+// returns npos.
+StringType::size_type FindDriveLetter(const StringType& path) {
+#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
+  // This is dependent on an ASCII-based character set, but that's a
+  // reasonable assumption.  iswalpha can be too inclusive here.
+  if (path.length() >= 2 && path[1] == L':' &&
+      ((path[0] >= L'A' && path[0] <= L'Z') ||
+       (path[0] >= L'a' && path[0] <= L'z'))) {
+    return 1;
+  }
+#endif  // PA_FILE_PATH_USES_DRIVE_LETTERS
+  return StringType::npos;
+}
+
+bool IsPathAbsolute(const StringType& path) {
+#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
+  StringType::size_type letter = FindDriveLetter(path);
+  if (letter != StringType::npos) {
+    // Look for a separator right after the drive specification.
+    return path.length() > letter + 1 &&
+           FilePath::IsSeparator(path[letter + 1]);
+  }
+  // Look for a pair of leading separators.
+  return path.length() > 1 && FilePath::IsSeparator(path[0]) &&
+         FilePath::IsSeparator(path[1]);
+#else   // PA_FILE_PATH_USES_DRIVE_LETTERS
+  // Look for a separator in the first position.
+  return path.length() > 0 && FilePath::IsSeparator(path[0]);
+#endif  // PA_FILE_PATH_USES_DRIVE_LETTERS
+}
+
+FilePath::FilePath() = default;
+
+FilePath::FilePath(const FilePath& that) = default;
+FilePath::FilePath(FilePath&& that) noexcept = default;
+
+FilePath::FilePath(const StringType& path) : path_(path) {
+  StringType::size_type nul_pos = path_.find(kStringTerminator);
+  if (nul_pos != StringType::npos) {
+    path_.erase(nul_pos, StringType::npos);
+  }
+}
+
+FilePath::~FilePath() = default;
+
+FilePath& FilePath::operator=(const FilePath& that) = default;
+
+FilePath& FilePath::operator=(FilePath&& that) noexcept = default;
+
+// static
+bool FilePath::IsSeparator(CharType character) {
+  for (size_t i = 0; i < kSeparatorsLength - 1; ++i) {
+    if (character == kSeparators[i]) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+FilePath FilePath::Append(const StringType& component) const {
+  StringType appended = component;
+  StringType without_nuls;
+
+  StringType::size_type nul_pos = component.find(kStringTerminator);
+  if (nul_pos != StringType::npos) {
+    without_nuls = component.substr(0, nul_pos);
+    appended = without_nuls;
+  }
+
+  PA_BASE_DCHECK(!IsPathAbsolute(appended));
+
+  if (path_.compare(kCurrentDirectory) == 0 && !appended.empty()) {
+    // Append normally doesn't do any normalization, but as a special case,
+    // when appending to kCurrentDirectory, just return a new path for the
+    // component argument.  Appending component to kCurrentDirectory would
+    // serve no purpose other than needlessly lengthening the path, and
+    // it's likely in practice to wind up with FilePath objects containing
+    // only kCurrentDirectory when calling DirName on a single relative path
+    // component.
+    return FilePath(appended);
+  }
+
+  FilePath new_path(path_);
+  new_path.StripTrailingSeparatorsInternal();
+
+  // Don't append a separator if the path is empty (indicating the current
+  // directory) or if the path component is empty (indicating nothing to
+  // append).
+  if (!appended.empty() && !new_path.path_.empty()) {
+    // Don't append a separator if the path still ends with a trailing
+    // separator after stripping (indicating the root directory).
+    if (!IsSeparator(new_path.path_.back())) {
+      // Don't append a separator if the path is just a drive letter.
+      if (FindDriveLetter(new_path.path_) + 1 != new_path.path_.length()) {
+        new_path.path_.append(1, kSeparators[0]);
+      }
+    }
+  }
+
+  new_path.path_.append(appended);
+  return new_path;
+}
+
+FilePath FilePath::Append(const FilePath& component) const {
+  return Append(component.value());
+}
+
+void FilePath::StripTrailingSeparatorsInternal() {
+  // If there is no drive letter, start will be 1, which will prevent stripping
+  // the leading separator if there is only one separator.  If there is a drive
+  // letter, start will be set appropriately to prevent stripping the first
+  // separator following the drive letter, if a separator immediately follows
+  // the drive letter.
+  StringType::size_type start = FindDriveLetter(path_) + 2;
+
+  StringType::size_type last_stripped = StringType::npos;
+  for (StringType::size_type pos = path_.length();
+       pos > start && IsSeparator(path_[pos - 1]); --pos) {
+    // If the string only has two separators and they're at the beginning,
+    // don't strip them, unless the string began with more than two separators.
+    if (pos != start + 1 || last_stripped == start + 2 ||
+        !IsSeparator(path_[start - 1])) {
+      path_.resize(pos - 1);
+      last_stripped = pos;
+    }
+  }
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.h
new file mode 100644
index 0000000..609d49c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.h
@@ -0,0 +1,231 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// FilePath is a container for pathnames stored in a platform's native string
+// type, providing containers for manipulation in according with the
+// platform's conventions for pathnames.  It supports the following path
+// types:
+//
+//                   POSIX            Windows
+//                   ---------------  ----------------------------------
+// Fundamental type  char[]           wchar_t[]
+// Encoding          unspecified*     UTF-16
+// Separator         /                \, tolerant of /
+// Drive letters     no               case-insensitive A-Z followed by :
+// Alternate root    // (surprise!)   \\ (2 Separators), for UNC paths
+//
+// * The encoding need not be specified on POSIX systems, although some
+//   POSIX-compliant systems do specify an encoding.  Mac OS X uses UTF-8.
+//   Chrome OS also uses UTF-8.
+//   Linux does not specify an encoding, but in practice, the locale's
+//   character set may be used.
+//
+// For more arcane bits of path trivia, see below.
+//
+// FilePath objects are intended to be used anywhere paths are.  An
+// application may pass FilePath objects around internally, masking the
+// underlying differences between systems, only differing in implementation
+// where interfacing directly with the system.  For example, a single
+// OpenFile(const FilePath &) function may be made available, allowing all
+// callers to operate without regard to the underlying implementation.  On
+// POSIX-like platforms, OpenFile might wrap fopen, and on Windows, it might
+// wrap _wfopen_s, perhaps both by calling file_path.value().c_str().  This
+// allows each platform to pass pathnames around without requiring conversions
+// between encodings, which has an impact on performance, but more imporantly,
+// has an impact on correctness on platforms that do not have well-defined
+// encodings for pathnames.
+//
+// Several methods are available to perform common operations on a FilePath
+// object, such as determining the parent directory (DirName), isolating the
+// final path component (BaseName), and appending a relative pathname string
+// to an existing FilePath object (Append).  These methods are highly
+// recommended over attempting to split and concatenate strings directly.
+// These methods are based purely on string manipulation and knowledge of
+// platform-specific pathname conventions, and do not consult the filesystem
+// at all, making them safe to use without fear of blocking on I/O operations.
+// These methods do not function as mutators but instead return distinct
+// instances of FilePath objects, and are therefore safe to use on const
+// objects.  The objects themselves are safe to share between threads.
+//
+// To aid in initialization of FilePath objects from string literals, a
+// FILE_PATH_LITERAL macro is provided, which accounts for the difference
+// between char[]-based pathnames on POSIX systems and wchar_t[]-based
+// pathnames on Windows.
+//
+// As a precaution against premature truncation, paths can't contain NULs.
+//
+// Because a FilePath object should not be instantiated at the global scope,
+// instead, use a FilePath::CharType[] and initialize it with
+// FILE_PATH_LITERAL.  At runtime, a FilePath object can be created from the
+// character array.  Example:
+//
+// | const FilePath::CharType kLogFileName[] = FILE_PATH_LITERAL("log.txt");
+// |
+// | void Function() {
+// |   FilePath log_file_path(kLogFileName);
+// |   [...]
+// | }
+//
+// WARNING: FilePaths should ALWAYS be displayed with LTR directionality, even
+// when the UI language is RTL. This means you always need to pass filepaths
+// through base::i18n::WrapPathWithLTRFormatting() before displaying it in the
+// RTL UI.
+//
+// This is a very common source of bugs, please try to keep this in mind.
+//
+// ARCANE BITS OF PATH TRIVIA
+//
+//  - A double leading slash is actually part of the POSIX standard.  Systems
+//    are allowed to treat // as an alternate root, as Windows does for UNC
+//    (network share) paths.  Most POSIX systems don't do anything special
+//    with two leading slashes, but FilePath handles this case properly
+//    in case it ever comes across such a system.  FilePath needs this support
+//    for Windows UNC paths, anyway.
+//    References:
+//    The Open Group Base Specifications Issue 7, sections 3.267 ("Pathname")
+//    and 4.12 ("Pathname Resolution"), available at:
+//    http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_267
+//    http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_12
+//
+//  - Windows treats c:\\ the same way it treats \\.  This was intended to
+//    allow older applications that require drive letters to support UNC paths
+//    like \\server\share\path, by permitting c:\\server\share\path as an
+//    equivalent.  Since the OS treats these paths specially, FilePath needs
+//    to do the same.  Since Windows can use either / or \ as the separator,
+//    FilePath treats c://, c:\\, //, and \\ all equivalently.
+//    Reference:
+//    The Old New Thing, "Why is a drive letter permitted in front of UNC
+//    paths (sometimes)?", available at:
+//    http://blogs.msdn.com/oldnewthing/archive/2005/11/22/495740.aspx
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_FILES_FILE_PATH_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_FILES_FILE_PATH_H_
+
+#include <cstddef>
+#include <iosfwd>
+#include <string>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+// Windows-style drive letter support and pathname separator characters can be
+// enabled and disabled independently, to aid testing.  These #defines are
+// here so that the same setting can be used in both the implementation and
+// in the unit test.
+#if BUILDFLAG(IS_WIN)
+#define PA_FILE_PATH_USES_DRIVE_LETTERS
+#define PA_FILE_PATH_USES_WIN_SEPARATORS
+#endif  // BUILDFLAG(IS_WIN)
+
+// Macros for string literal initialization of FilePath::CharType[].
+#if BUILDFLAG(IS_WIN)
+#define PA_FILE_PATH_LITERAL(x) L##x
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+#define PA_FILE_PATH_LITERAL(x) x
+#endif  // BUILDFLAG(IS_WIN)
+
+namespace partition_alloc::internal::base {
+
+// An abstraction to isolate users from the differences between native
+// pathnames on different platforms.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) FilePath {
+ public:
+#if BUILDFLAG(IS_WIN)
+  // On Windows, for Unicode-aware applications, native pathnames are wchar_t
+  // arrays encoded in UTF-16.
+  typedef std::wstring StringType;
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  // On most platforms, native pathnames are char arrays, and the encoding
+  // may or may not be specified.  On Mac OS X, native pathnames are encoded
+  // in UTF-8.
+  typedef std::string StringType;
+#endif  // BUILDFLAG(IS_WIN)
+
+  typedef StringType::value_type CharType;
+
+  // Null-terminated array of separators used to separate components in paths.
+  // Each character in this array is a valid separator, but kSeparators[0] is
+  // treated as the canonical separator and is used when composing pathnames.
+  static constexpr CharType kSeparators[] =
+#if defined(PA_FILE_PATH_USES_WIN_SEPARATORS)
+      PA_FILE_PATH_LITERAL("\\/");
+#else   // PA_FILE_PATH_USES_WIN_SEPARATORS
+      PA_FILE_PATH_LITERAL("/");
+#endif  // PA_FILE_PATH_USES_WIN_SEPARATORS
+
+  // std::size(kSeparators), i.e., the number of separators in kSeparators plus
+  // one (the null terminator at the end of kSeparators).
+  static constexpr size_t kSeparatorsLength = std::size(kSeparators);
+
+  // The special path component meaning "this directory."
+  static constexpr CharType kCurrentDirectory[] = PA_FILE_PATH_LITERAL(".");
+
+  // The special path component meaning "the parent directory."
+  static constexpr CharType kParentDirectory[] = PA_FILE_PATH_LITERAL("..");
+
+  // The character used to identify a file extension.
+  static constexpr CharType kExtensionSeparator = PA_FILE_PATH_LITERAL('.');
+
+  FilePath();
+  FilePath(const FilePath& that);
+  explicit FilePath(const StringType& that);
+  ~FilePath();
+  FilePath& operator=(const FilePath& that);
+
+  // Constructs FilePath with the contents of |that|, which is left in valid but
+  // unspecified state.
+  FilePath(FilePath&& that) noexcept;
+  // Replaces the contents with those of |that|, which is left in valid but
+  // unspecified state.
+  FilePath& operator=(FilePath&& that) noexcept;
+
+  // Required for some STL containers and operations
+  bool operator<(const FilePath& that) const { return path_ < that.path_; }
+
+  const StringType& value() const { return path_; }
+
+  [[nodiscard]] bool empty() const { return path_.empty(); }
+
+  void clear() { path_.clear(); }
+
+  // Returns true if |character| is in kSeparators.
+  static bool IsSeparator(CharType character);
+
+  // Returns a FilePath by appending a separator and the supplied path
+  // component to this object's path.  Append takes care to avoid adding
+  // excessive separators if this object's path already ends with a separator.
+  // If this object's path is kCurrentDirectory, a new FilePath corresponding
+  // only to |component| is returned.  |component| must be a relative path;
+  // it is an error to pass an absolute path.
+  [[nodiscard]] FilePath Append(const FilePath& component) const;
+  [[nodiscard]] FilePath Append(const StringType& component) const;
+
+ private:
+  // Remove trailing separators from this object.  If the path is absolute, it
+  // will never be stripped any more than to refer to the absolute root
+  // directory, so "////" will become "/", not "".  A leading pair of
+  // separators is never stripped, to support alternate roots.  This is used to
+  // support UNC paths on Windows.
+  void StripTrailingSeparatorsInternal();
+
+  StringType path_;
+};
+
+}  // namespace partition_alloc::internal::base
+
+namespace std {
+
+template <>
+struct hash<::partition_alloc::internal::base::FilePath> {
+  typedef ::partition_alloc::internal::base::FilePath argument_type;
+  typedef std::size_t result_type;
+  result_type operator()(argument_type const& f) const {
+    return hash<::partition_alloc::internal::base::FilePath::StringType>()(
+        f.value());
+  }
+};
+
+}  // namespace std
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_FILES_FILE_PATH_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path_pa_unittest.cc
new file mode 100644
index 0000000..33cde7b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path_pa_unittest.cc
@@ -0,0 +1,154 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.h"
+
+#include <stddef.h>
+
+#include <sstream>
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// This macro helps avoid wrapped lines in the test structs.
+#define FPL(x) PA_FILE_PATH_LITERAL(x)
+
+// This macro constructs strings which can contain NULs.
+#define FPS(x) FilePath::StringType(FPL(x), std::size(FPL(x)) - 1)
+
+namespace partition_alloc::internal::base {
+
+struct UnaryTestData {
+  FilePath::StringType input;
+  FilePath::StringType expected;
+};
+
+struct UnaryBooleanTestData {
+  FilePath::StringType input;
+  bool expected;
+};
+
+struct BinaryTestData {
+  FilePath::StringType inputs[2];
+  FilePath::StringType expected;
+};
+
+struct BinaryBooleanTestData {
+  FilePath::StringType inputs[2];
+  bool expected;
+};
+
+struct BinaryIntTestData {
+  FilePath::StringType inputs[2];
+  int expected;
+};
+
+TEST(PartitionAllocBaseFilePathTest, Append) {
+  const struct BinaryTestData cases[] = {
+    {{FPL(""), FPL("cc")}, FPL("cc")},
+    {{FPL("."), FPL("ff")}, FPL("ff")},
+    {{FPL("."), FPL("")}, FPL(".")},
+    {{FPL("/"), FPL("cc")}, FPL("/cc")},
+    {{FPL("/aa"), FPL("")}, FPL("/aa")},
+    {{FPL("/aa/"), FPL("")}, FPL("/aa")},
+    {{FPL("//aa"), FPL("")}, FPL("//aa")},
+    {{FPL("//aa/"), FPL("")}, FPL("//aa")},
+    {{FPL("//"), FPL("aa")}, FPL("//aa")},
+#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
+    {{FPL("c:"), FPL("a")}, FPL("c:a")},
+    {{FPL("c:"), FPL("")}, FPL("c:")},
+    {{FPL("c:/"), FPL("a")}, FPL("c:/a")},
+    {{FPL("c://"), FPL("a")}, FPL("c://a")},
+    {{FPL("c:///"), FPL("a")}, FPL("c:/a")},
+#endif  // PA_FILE_PATH_USES_DRIVE_LETTERS
+#if defined(PA_FILE_PATH_USES_WIN_SEPARATORS)
+    // Append introduces the default separator character, so these test cases
+    // need to be defined with different expected results on platforms that use
+    // different default separator characters.
+    {{FPL("\\"), FPL("cc")}, FPL("\\cc")},
+    {{FPL("\\aa"), FPL("")}, FPL("\\aa")},
+    {{FPL("\\aa\\"), FPL("")}, FPL("\\aa")},
+    {{FPL("\\\\aa"), FPL("")}, FPL("\\\\aa")},
+    {{FPL("\\\\aa\\"), FPL("")}, FPL("\\\\aa")},
+    {{FPL("\\\\"), FPL("aa")}, FPL("\\\\aa")},
+    {{FPL("/aa/bb"), FPL("cc")}, FPL("/aa/bb\\cc")},
+    {{FPL("/aa/bb/"), FPL("cc")}, FPL("/aa/bb\\cc")},
+    {{FPL("aa/bb/"), FPL("cc")}, FPL("aa/bb\\cc")},
+    {{FPL("aa/bb"), FPL("cc")}, FPL("aa/bb\\cc")},
+    {{FPL("a/b"), FPL("c")}, FPL("a/b\\c")},
+    {{FPL("a/b/"), FPL("c")}, FPL("a/b\\c")},
+    {{FPL("//aa"), FPL("bb")}, FPL("//aa\\bb")},
+    {{FPL("//aa/"), FPL("bb")}, FPL("//aa\\bb")},
+    {{FPL("\\aa\\bb"), FPL("cc")}, FPL("\\aa\\bb\\cc")},
+    {{FPL("\\aa\\bb\\"), FPL("cc")}, FPL("\\aa\\bb\\cc")},
+    {{FPL("aa\\bb\\"), FPL("cc")}, FPL("aa\\bb\\cc")},
+    {{FPL("aa\\bb"), FPL("cc")}, FPL("aa\\bb\\cc")},
+    {{FPL("a\\b"), FPL("c")}, FPL("a\\b\\c")},
+    {{FPL("a\\b\\"), FPL("c")}, FPL("a\\b\\c")},
+    {{FPL("\\\\aa"), FPL("bb")}, FPL("\\\\aa\\bb")},
+    {{FPL("\\\\aa\\"), FPL("bb")}, FPL("\\\\aa\\bb")},
+#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
+    {{FPL("c:\\"), FPL("a")}, FPL("c:\\a")},
+    {{FPL("c:\\\\"), FPL("a")}, FPL("c:\\\\a")},
+    {{FPL("c:\\\\\\"), FPL("a")}, FPL("c:\\a")},
+    {{FPL("c:\\"), FPL("")}, FPL("c:\\")},
+    {{FPL("c:\\a"), FPL("b")}, FPL("c:\\a\\b")},
+    {{FPL("c:\\a\\"), FPL("b")}, FPL("c:\\a\\b")},
+#endif  // PA_FILE_PATH_USES_DRIVE_LETTERS
+#else   // PA_FILE_PATH_USES_WIN_SEPARATORS
+    {{FPL("/aa/bb"), FPL("cc")}, FPL("/aa/bb/cc")},
+    {{FPL("/aa/bb/"), FPL("cc")}, FPL("/aa/bb/cc")},
+    {{FPL("aa/bb/"), FPL("cc")}, FPL("aa/bb/cc")},
+    {{FPL("aa/bb"), FPL("cc")}, FPL("aa/bb/cc")},
+    {{FPL("a/b"), FPL("c")}, FPL("a/b/c")},
+    {{FPL("a/b/"), FPL("c")}, FPL("a/b/c")},
+    {{FPL("//aa"), FPL("bb")}, FPL("//aa/bb")},
+    {{FPL("//aa/"), FPL("bb")}, FPL("//aa/bb")},
+#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
+    {{FPL("c:/"), FPL("a")}, FPL("c:/a")},
+    {{FPL("c:/"), FPL("")}, FPL("c:/")},
+    {{FPL("c:/a"), FPL("b")}, FPL("c:/a/b")},
+    {{FPL("c:/a/"), FPL("b")}, FPL("c:/a/b")},
+#endif  // PA_FILE_PATH_USES_DRIVE_LETTERS
+#endif  // PA_FILE_PATH_USES_WIN_SEPARATORS
+  };
+
+  for (size_t i = 0; i < std::size(cases); ++i) {
+    FilePath root(cases[i].inputs[0]);
+    FilePath::StringType leaf(cases[i].inputs[1]);
+    FilePath observed_str = root.Append(leaf);
+    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed_str.value())
+        << "i: " << i << ", root: " << root.value() << ", leaf: " << leaf;
+    FilePath observed_path = root.Append(FilePath(leaf));
+    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed_path.value())
+        << "i: " << i << ", root: " << root.value() << ", leaf: " << leaf;
+  }
+}
+
+TEST(PartitionAllocBaseFilePathTest, ConstructWithNUL) {
+  // Assert FPS() works.
+  ASSERT_EQ(3U, FPS("a\0b").length());
+
+  // Test constructor strips '\0'
+  FilePath path(FPS("a\0b"));
+  EXPECT_EQ(1U, path.value().length());
+  EXPECT_EQ(FPL("a"), path.value());
+}
+
+TEST(PartitionAllocBaseFilePathTest, AppendWithNUL) {
+  // Assert FPS() works.
+  ASSERT_EQ(3U, FPS("b\0b").length());
+
+  // Test Append() strips '\0'
+  FilePath path(FPL("a"));
+  path = path.Append(FPS("b\0b"));
+  EXPECT_EQ(3U, path.value().length());
+#if defined(PA_FILE_PATH_USES_WIN_SEPARATORS)
+  EXPECT_EQ(FPL("a\\b"), path.value());
+#else
+  EXPECT_EQ(FPL("a/b"), path.value());
+#endif
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_util.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_util.h
new file mode 100644
index 0000000..cd80918
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_util.h
@@ -0,0 +1,37 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains utility functions for dealing with the local
+// filesystem.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_FILES_FILE_UTIL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_FILES_FILE_UTIL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+#include <sys/stat.h>
+#include <unistd.h>
+#endif
+
+namespace partition_alloc::internal::base {
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+
+// Read exactly |bytes| bytes from file descriptor |fd|, storing the result
+// in |buffer|. This function is protected against EINTR and partial reads.
+// Returns true iff |bytes| bytes have been successfully read from |fd|.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+bool ReadFromFD(int fd, char* buffer, size_t bytes);
+
+#endif  // BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_FILES_FILE_UTIL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_util_posix.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_util_posix.cc
new file mode 100644
index 0000000..ed3754c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_util_posix.cc
@@ -0,0 +1,24 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_util.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/eintr_wrapper.h"
+
+namespace partition_alloc::internal::base {
+
+bool ReadFromFD(int fd, char* buffer, size_t bytes) {
+  size_t total_read = 0;
+  while (total_read < bytes) {
+    ssize_t bytes_read =
+        PA_HANDLE_EINTR(read(fd, buffer + total_read, bytes - total_read));
+    if (bytes_read <= 0) {
+      break;
+    }
+    total_read += bytes_read;
+  }
+  return total_read == bytes;
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging.cc
new file mode 100644
index 0000000..1074ab1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging.cc
@@ -0,0 +1,26 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging.h"
+
+#include <zircon/status.h>
+
+#include <iomanip>
+
+namespace partition_alloc::internal::logging {
+
+ZxLogMessage::ZxLogMessage(const char* file_path,
+                           int line,
+                           LogSeverity severity,
+                           zx_status_t zx_err)
+    : LogMessage(file_path, line, severity), zx_err_(zx_err) {}
+
+ZxLogMessage::~ZxLogMessage() {
+  // zx_status_t error values are negative, so log the numeric version as
+  // decimal rather than hex. This is also useful to match zircon/errors.h for
+  // grepping.
+  stream() << ": " << zx_status_get_string(zx_err_) << " (" << zx_err_ << ")";
+}
+
+}  // namespace partition_alloc::internal::logging
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging.h
new file mode 100644
index 0000000..0f17854
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging.h
@@ -0,0 +1,70 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_FUCHSIA_FUCHSIA_LOGGING_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_FUCHSIA_FUCHSIA_LOGGING_H_
+
+#include <lib/fit/function.h>
+#include <zircon/types.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "build/build_config.h"
+
+// Use the PA_ZX_LOG family of macros along with a zx_status_t containing a
+// Zircon error. The error value will be decoded so that logged messages explain
+// the error.
+
+namespace partition_alloc::internal::logging {
+
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) ZxLogMessage
+    : public logging::LogMessage {
+ public:
+  ZxLogMessage(const char* file_path,
+               int line,
+               LogSeverity severity,
+               zx_status_t zx_err);
+
+  ZxLogMessage(const ZxLogMessage&) = delete;
+  ZxLogMessage& operator=(const ZxLogMessage&) = delete;
+
+  ~ZxLogMessage() override;
+
+ private:
+  zx_status_t zx_err_;
+};
+
+}  // namespace partition_alloc::internal::logging
+
+#define PA_ZX_LOG_STREAM(severity, zx_err) \
+  PA_COMPACT_GOOGLE_LOG_EX_##severity(ZxLogMessage, zx_err).stream()
+
+#define PA_ZX_LOG(severity, zx_err) \
+  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(severity, zx_err), PA_LOG_IS_ON(severity))
+#define PA_ZX_LOG_IF(severity, condition, zx_err)    \
+  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(severity, zx_err), \
+                 PA_LOG_IS_ON(severity) && (condition))
+
+#define PA_ZX_CHECK(condition, zx_err)                          \
+  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(FATAL, zx_err), !(condition)) \
+      << "Check failed: " #condition << ". "
+
+#define PA_ZX_DLOG(severity, zx_err) \
+  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(severity, zx_err), PA_DLOG_IS_ON(severity))
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+#define PA_ZX_DLOG_IF(severity, condition, zx_err)   \
+  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(severity, zx_err), \
+                 PA_DLOG_IS_ON(severity) && (condition))
+#else  // BUILDFLAG(PA_DCHECK_IS_ON)
+#define PA_ZX_DLOG_IF(severity, condition, zx_err) PA_EAT_STREAM_PARAMETERS
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+#define PA_ZX_DCHECK(condition, zx_err)                      \
+  PA_LAZY_STREAM(PA_ZX_LOG_STREAM(DCHECK, zx_err),           \
+                 BUILDFLAG(PA_DCHECK_IS_ON) && !(condition)) \
+      << "Check failed: " #condition << ". "
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_FUCHSIA_FUCHSIA_LOGGING_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc
new file mode 100644
index 0000000..f020680
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging_pa_unittest.cc
@@ -0,0 +1,52 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/fuchsia/fuchsia_logging.h"
+
+#include <fuchsia/logger/cpp/fidl.h>
+#include <lib/fidl/cpp/binding.h>
+#include <lib/sys/cpp/component_context.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal::base {
+
+namespace {
+
+class MockLogSource {
+ public:
+  MOCK_METHOD0(Log, const char*());
+};
+
+}  // namespace
+
+// Verifies the Fuchsia-specific PA_ZX_*() logging macros.
+TEST(FuchsiaLoggingTestPA, FuchsiaLogging) {
+  MockLogSource mock_log_source;
+  constexpr int kTimes =
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+      2;
+#else
+      1;
+#endif
+  EXPECT_CALL(mock_log_source, Log())
+      .Times(kTimes)
+      .WillRepeatedly(testing::Return("log message"));
+
+  logging::SetMinLogLevel(logging::LOGGING_INFO);
+
+  EXPECT_TRUE(PA_LOG_IS_ON(INFO));
+  EXPECT_EQ(BUILDFLAG(PA_DCHECK_IS_ON), PA_DLOG_IS_ON(INFO));
+
+  PA_ZX_LOG(INFO, ZX_ERR_INTERNAL) << mock_log_source.Log();
+  PA_ZX_DLOG(INFO, ZX_ERR_INTERNAL) << mock_log_source.Log();
+
+  PA_ZX_CHECK(true, ZX_ERR_INTERNAL);
+  PA_ZX_DCHECK(true, ZX_ERR_INTERNAL);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/gtest_prod_util.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/gtest_prod_util.h
new file mode 100644
index 0000000..7347d26
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/gtest_prod_util.h
@@ -0,0 +1,66 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_GTEST_PROD_UTIL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_GTEST_PROD_UTIL_H_
+
+#include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck
+
+// This is a wrapper for gtest's FRIEND_TEST macro that friends
+// test with all possible prefixes. This is very helpful when changing the test
+// prefix, because the friend declarations don't need to be updated.
+//
+// Example usage:
+//
+// class MyClass {
+//  private:
+//   void MyMethod();
+//   PA_FRIEND_TEST_ALL_PREFIXES(MyClassTest, MyMethod);
+// };
+#define PA_FRIEND_TEST_ALL_PREFIXES(test_case_name, test_name) \
+  FRIEND_TEST(test_case_name, test_name);                      \
+  FRIEND_TEST(test_case_name, DISABLED_##test_name);           \
+  FRIEND_TEST(test_case_name, FLAKY_##test_name)
+
+// C++ compilers will refuse to compile the following code:
+//
+// namespace foo {
+// class MyClass {
+//  private:
+//   PA_FRIEND_TEST_ALL_PREFIXES(MyClassTest, TestMethod);
+//   bool private_var;
+// };
+// }  // namespace foo
+//
+// class MyClassTest::TestMethod() {
+//   foo::MyClass foo_class;
+//   foo_class.private_var = true;
+// }
+//
+// Unless you forward declare MyClassTest::TestMethod outside of namespace foo.
+// Use PA_FORWARD_DECLARE_TEST to do so for all possible prefixes.
+//
+// Example usage:
+//
+// PA_FORWARD_DECLARE_TEST(MyClassTest, TestMethod);
+//
+// namespace foo {
+// class MyClass {
+//  private:
+//   PA_FRIEND_TEST_ALL_PREFIXES(::MyClassTest, TestMethod);  // NOTE use of ::
+//   bool private_var;
+// };
+// }  // namespace foo
+//
+// class MyClassTest::TestMethod() {
+//   foo::MyClass foo_class;
+//   foo_class.private_var = true;
+// }
+
+#define PA_FORWARD_DECLARE_TEST(test_case_name, test_name) \
+  class test_case_name##_##test_name##_Test;               \
+  class test_case_name##_##DISABLED_##test_name##_Test;    \
+  class test_case_name##_##FLAKY_##test_name##_Test
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_GTEST_PROD_UTIL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h
new file mode 100644
index 0000000..1f873d6
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h
@@ -0,0 +1,162 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_IMMEDIATE_CRASH_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_IMMEDIATE_CRASH_H_
+
+#include "build/build_config.h"
+
+// Crashes in the fastest possible way with no attempt at logging.
+// There are several constraints; see http://crbug.com/664209 for more context.
+//
+// - PA_TRAP_SEQUENCE_() must be fatal. It should not be possible to ignore the
+//   resulting exception or simply hit 'continue' to skip over it in a debugger.
+// - Different instances of PA_TRAP_SEQUENCE_() must not be folded together, to
+//   ensure crash reports are debuggable. Unlike __builtin_trap(), asm volatile
+//   blocks will not be folded together.
+//   Note: PA_TRAP_SEQUENCE_() previously required an instruction with a unique
+//   nonce since unlike clang, GCC folds together identical asm volatile
+//   blocks.
+// - PA_TRAP_SEQUENCE_() must produce a signal that is distinct from an invalid
+//   memory access.
+// - PA_TRAP_SEQUENCE_() must be treated as a set of noreturn instructions.
+//   __builtin_unreachable() is used to provide that hint here. clang also uses
+//   this as a heuristic to pack the instructions in the function epilogue to
+//   improve code density.
+//
+// Additional properties that are nice to have:
+// - PA_TRAP_SEQUENCE_() should be as compact as possible.
+// - The first instruction of PA_TRAP_SEQUENCE_() should not change, to avoid
+//   shifting crash reporting clusters. As a consequence of this, explicit
+//   assembly is preferred over intrinsics.
+//   Note: this last bullet point may no longer be true, and may be removed in
+//   the future.
+
+// Note: PA_TRAP_SEQUENCE Is currently split into two macro helpers due to the
+// fact that clang emits an actual instruction for __builtin_unreachable() on
+// certain platforms (see https://crbug.com/958675). In addition, the
+// int3/bkpt/brk will be removed in followups, so splitting it up like this now
+// makes it easy to land the followups.
+
+#if defined(COMPILER_GCC)
+
+#if defined(ARCH_CPU_X86_FAMILY)
+
+// TODO(https://crbug.com/958675): In theory, it should be possible to use just
+// int3. However, there are a number of crashes with SIGILL as the exception
+// code, so it seems likely that there's a signal handler that allows execution
+// to continue after SIGTRAP.
+#define PA_TRAP_SEQUENCE1_() asm volatile("int3")
+
+#if BUILDFLAG(IS_APPLE)
+// Intentionally empty: __builtin_unreachable() is always part of the sequence
+// (see PA_IMMEDIATE_CRASH below) and already emits a ud2 on Mac.
+#define PA_TRAP_SEQUENCE2_() asm volatile("")
+#else
+#define PA_TRAP_SEQUENCE2_() asm volatile("ud2")
+#endif  // BUILDFLAG(IS_APPLE)
+
+#elif defined(ARCH_CPU_ARMEL)
+
+// bkpt will generate a SIGBUS when running on armv7 and a SIGTRAP when running
+// as a 32 bit userspace app on arm64. There doesn't seem to be any way to
+// cause a SIGTRAP from userspace without using a syscall (which would be a
+// problem for sandboxing).
+// TODO(https://crbug.com/958675): Remove bkpt from this sequence.
+#define PA_TRAP_SEQUENCE1_() asm volatile("bkpt #0")
+#define PA_TRAP_SEQUENCE2_() asm volatile("udf #0")
+
+#elif defined(ARCH_CPU_ARM64)
+
+// This will always generate a SIGTRAP on arm64.
+// TODO(https://crbug.com/958675): Remove brk from this sequence.
+#define PA_TRAP_SEQUENCE1_() asm volatile("brk #0")
+#define PA_TRAP_SEQUENCE2_() asm volatile("hlt #0")
+
+#else
+
+// Crash report accuracy will not be guaranteed on other architectures, but at
+// least this will crash as expected.
+#define PA_TRAP_SEQUENCE1_() __builtin_trap()
+#define PA_TRAP_SEQUENCE2_() asm volatile("")
+
+#endif  // ARCH_CPU_*
+
+#elif defined(COMPILER_MSVC)
+
+#if !defined(__clang__)
+
+// MSVC x64 doesn't support inline asm, so use the MSVC intrinsic.
+#define PA_TRAP_SEQUENCE1_() __debugbreak()
+#define PA_TRAP_SEQUENCE2_()
+
+#elif defined(ARCH_CPU_ARM64)
+
+// Windows ARM64 uses "BRK #F000" as its breakpoint instruction, and
+// __debugbreak() generates that in both VC++ and clang.
+#define PA_TRAP_SEQUENCE1_() __debugbreak()
+// Intentionally empty: __builtin_unreachable() is always part of the sequence
+// (see PA_IMMEDIATE_CRASH below) and already emits a ud2 on Win64,
+// https://crbug.com/958373
+#define PA_TRAP_SEQUENCE2_() __asm volatile("")
+
+#else
+
+#define PA_TRAP_SEQUENCE1_() asm volatile("int3")
+#define PA_TRAP_SEQUENCE2_() asm volatile("ud2")
+
+#endif  // __clang__
+
+#else
+
+#error No supported trap sequence!
+
+#endif  // COMPILER_GCC
+
+#define PA_TRAP_SEQUENCE_() \
+  do {                      \
+    PA_TRAP_SEQUENCE1_();   \
+    PA_TRAP_SEQUENCE2_();   \
+  } while (false)
+
+// CHECK() and the trap sequence can be invoked from a constexpr function.
+// This could make compilation fail on GCC, as it forbids directly using inline
+// asm inside a constexpr function. However, it allows calling a lambda
+// expression including the same asm.
+// The side effect is that the top of the stacktrace will not point to the
+// calling function, but to this anonymous lambda. This is still useful as the
+// full name of the lambda will typically include the name of the function that
+// calls CHECK() and the debugger will still break at the right line of code.
+#if !defined(COMPILER_GCC) || defined(__clang__)
+
+#define PA_WRAPPED_TRAP_SEQUENCE_() PA_TRAP_SEQUENCE_()
+
+#else
+
+#define PA_WRAPPED_TRAP_SEQUENCE_() \
+  do {                              \
+    [] { PA_TRAP_SEQUENCE_(); }();  \
+  } while (false)
+
+#endif  // !defined(COMPILER_GCC) || defined(__clang__)
+
+#if defined(__clang__) || defined(COMPILER_GCC)
+
+// __builtin_unreachable() hints to the compiler that this is noreturn and can
+// be packed in the function epilogue.
+#define PA_IMMEDIATE_CRASH()     \
+  ({                             \
+    PA_WRAPPED_TRAP_SEQUENCE_(); \
+    __builtin_unreachable();     \
+  })
+
+#else
+
+// This is supporting non-chromium user of logging.h to build with MSVC, like
+// pdfium. On MSVC there is no __builtin_unreachable().
+#define PA_IMMEDIATE_CRASH() PA_WRAPPED_TRAP_SEQUENCE_()
+
+#endif  // defined(__clang__) || defined(COMPILER_GCC)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_IMMEDIATE_CRASH_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/ios/ios_util.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/ios/ios_util.h
new file mode 100644
index 0000000..bdc7b31
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/ios/ios_util.h
@@ -0,0 +1,36 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
+
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc::internal::base::ios {
+
+// Returns whether the operating system is iOS 12 or later.
+// TODO(crbug.com/1129482): Remove once minimum supported version is at least 12
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) bool IsRunningOnIOS12OrLater();
+
+// Returns whether the operating system is iOS 13 or later.
+// TODO(crbug.com/1129483): Remove once minimum supported version is at least 13
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) bool IsRunningOnIOS13OrLater();
+
+// Returns whether the operating system is iOS 14 or later.
+// TODO(crbug.com/1129484): Remove once minimum supported version is at least 14
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) bool IsRunningOnIOS14OrLater();
+
+// Returns whether the operating system is iOS 15 or later.
+// TODO(crbug.com/1227419): Remove once minimum supported version is at least 15
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) bool IsRunningOnIOS15OrLater();
+
+// Returns whether the operating system is at the given version or later.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix);
+
+}  // namespace partition_alloc::internal::base::ios
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/ios/ios_util.mm b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/ios/ios_util.mm
new file mode 100644
index 0000000..1772b46
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/ios/ios_util.mm
@@ -0,0 +1,58 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/ios/ios_util.h"
+
+#include <array>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info.h"
+
+namespace partition_alloc::internal::base::ios {
+
+bool IsRunningOnIOS12OrLater() {
+  static const bool is_running_on_or_later = IsRunningOnOrLater(12, 0, 0);
+  return is_running_on_or_later;
+}
+
+bool IsRunningOnIOS13OrLater() {
+  static const bool is_running_on_or_later = IsRunningOnOrLater(13, 0, 0);
+  return is_running_on_or_later;
+}
+
+bool IsRunningOnIOS14OrLater() {
+  static const bool is_running_on_or_later = IsRunningOnOrLater(14, 0, 0);
+  return is_running_on_or_later;
+}
+
+bool IsRunningOnIOS15OrLater() {
+  static const bool is_running_on_or_later = IsRunningOnOrLater(15, 0, 0);
+  return is_running_on_or_later;
+}
+
+bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix) {
+  static const class OSVersion {
+   public:
+    OSVersion() {
+      SysInfo::OperatingSystemVersionNumbers(
+          &current_version_[0], &current_version_[1], &current_version_[2]);
+    }
+
+    bool IsRunningOnOrLater(int32_t version[3]) const {
+      for (size_t i = 0; i < std::size(current_version_); ++i) {
+        if (current_version_[i] != version[i]) {
+          return current_version_[i] > version[i];
+        }
+      }
+      return true;
+    }
+
+   private:
+    int32_t current_version_[3];
+  } kOSVersion;
+
+  int32_t version[3] = {major, minor, bug_fix};
+  return kOSVersion.IsRunningOnOrLater(version);
+}
+
+}  // namespace partition_alloc::internal::base::ios
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/log_message.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/log_message.cc
new file mode 100644
index 0000000..eee763a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/log_message.cc
@@ -0,0 +1,229 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/log_message.h"
+
+// TODO(1151236): After finishing copying //base files to PA library, remove
+// defined(BASE_CHECK_H_) from here.
+#if defined(                                                                                 \
+    BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_CHECK_H_) || \
+    defined(BASE_CHECK_H_) ||                                                                \
+    defined(                                                                                 \
+        BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CHECK_H_)
+#error "log_message.h should not include check.h"
+#endif
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+
+#include <io.h>
+#include <windows.h>
+
+#endif
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/safe_strerror.h"
+#endif
+
+namespace partition_alloc::internal::logging {
+
+namespace {
+
+const char* const log_severity_names[] = {"INFO", "WARNING", "ERROR", "FATAL"};
+static_assert(LOGGING_NUM_SEVERITIES == std::size(log_severity_names),
+              "Incorrect number of log_severity_names");
+
+const char* log_severity_name(int severity) {
+  if (severity >= 0 && severity < LOGGING_NUM_SEVERITIES) {
+    return log_severity_names[severity];
+  }
+  return "UNKNOWN";
+}
+
+// A log message handler that gets notified of every log message we process.
+LogMessageHandlerFunction g_log_message_handler = nullptr;
+
+}  // namespace
+
+#if BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
+// In DCHECK-enabled Chrome builds, allow the meaning of LOGGING_DCHECK to be
+// determined at run-time. We default it to INFO, to avoid it triggering
+// crashes before the run-time has explicitly chosen the behaviour.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+logging::LogSeverity LOGGING_DCHECK = LOGGING_INFO;
+#endif  // BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
+
+// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to have
+// an object of the correct type on the LHS of the unused part of the ternary
+// operator.
+base::strings::CStringBuilder* g_swallow_stream;
+
+void SetLogMessageHandler(LogMessageHandlerFunction handler) {
+  g_log_message_handler = handler;
+}
+
+LogMessageHandlerFunction GetLogMessageHandler() {
+  return g_log_message_handler;
+}
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity)
+    : severity_(severity), file_(file), line_(line) {
+  Init(file, line);
+}
+
+LogMessage::LogMessage(const char* file, int line, const char* condition)
+    : severity_(LOGGING_FATAL), file_(file), line_(line) {
+  Init(file, line);
+  stream_ << "Check failed: " << condition << ". ";
+}
+
+LogMessage::~LogMessage() {
+  stream_ << '\n';
+  const char* str_newline = stream_.c_str();
+
+  // Give any log message handler first dibs on the message.
+  if (g_log_message_handler &&
+      g_log_message_handler(severity_, file_, line_, message_start_,
+                            str_newline)) {
+    // The handler took care of it, no further processing.
+    return;
+  }
+
+  // Always use RawLog() if g_log_message_handler doesn't filter messages.
+  RawLog(severity_, str_newline);
+
+  // TODO(1293552): Enable a stack trace on a fatal on fuchsia.
+#if !defined(OFFICIAL_BUILD) && (BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_WIN)) && \
+    !defined(__UCLIBC__) && !BUILDFLAG(IS_AIX)
+  // TODO(1293552): Show a stack trace on a fatal, unless a debugger is
+  // attached.
+  if (severity_ == LOGGING_FATAL) {
+    constexpr size_t kMaxTracesOfLoggingFatal = 32u;
+    const void* traces[kMaxTracesOfLoggingFatal];
+    size_t num_traces =
+        base::debug::CollectStackTrace(traces, kMaxTracesOfLoggingFatal);
+    base::debug::PrintStackTrace(traces, num_traces);
+  }
+#endif
+
+  if (severity_ == LOGGING_FATAL) {
+    PA_IMMEDIATE_CRASH();
+  }
+}
+
+// writes the common header info to the stream
+void LogMessage::Init(const char* file, int line) {
+  const char* last_slash_pos = base::strings::FindLastOf(file, "\\/");
+  const char* filename = last_slash_pos ? last_slash_pos + 1 : file;
+
+  {
+    // TODO(darin): It might be nice if the columns were fixed width.
+    stream_ << '[';
+    // TODO(1151236): show process id, thread id, timestamp and so on
+    // if needed.
+    if (severity_ >= 0) {
+      stream_ << log_severity_name(severity_);
+    } else {
+      stream_ << "VERBOSE" << -severity_;
+    }
+    stream_ << ":" << filename << "(" << line << ")] ";
+  }
+  message_start_ = strlen(stream_.c_str());
+}
+
+#if BUILDFLAG(IS_WIN)
+// This has already been defined in the header, but defining it again as DWORD
+// ensures that the type used in the header is equivalent to DWORD. If not,
+// the redefinition is a compile error.
+typedef DWORD SystemErrorCode;
+#endif
+
+SystemErrorCode GetLastSystemErrorCode() {
+#if BUILDFLAG(IS_WIN)
+  return ::GetLastError();
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  return errno;
+#endif
+}
+
+void SystemErrorCodeToStream(base::strings::CStringBuilder& os,
+                             SystemErrorCode error_code) {
+  char buffer[256];
+#if BUILDFLAG(IS_WIN)
+  const int kErrorMessageBufferSize = 256;
+  char msgbuf[kErrorMessageBufferSize];
+  DWORD flags = FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+  DWORD len = FormatMessageA(flags, nullptr, error_code, 0, msgbuf,
+                             std::size(msgbuf), nullptr);
+  if (len) {
+    // Messages returned by system end with line breaks.
+    const char* whitespace_pos = base::strings::FindLastNotOf(msgbuf, "\n\r ");
+    if (whitespace_pos) {
+      size_t whitespace_index = whitespace_pos - msgbuf + 1;
+      msgbuf[whitespace_index] = '\0';
+    }
+    base::strings::SafeSPrintf(buffer, "%s (0x%x)", msgbuf, error_code);
+    os << buffer;
+    return;
+  }
+  base::strings::SafeSPrintf(buffer,
+                             "Error (0x%x) while retrieving error. (0x%x)",
+                             GetLastError(), error_code);
+  os << buffer;
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  base::safe_strerror_r(error_code, buffer, sizeof(buffer));
+  os << buffer << " (" << error_code << ")";
+#endif  // BUILDFLAG(IS_WIN)
+}
+
+#if BUILDFLAG(IS_WIN)
+Win32ErrorLogMessage::Win32ErrorLogMessage(const char* file,
+                                           int line,
+                                           LogSeverity severity,
+                                           SystemErrorCode err)
+    : LogMessage(file, line, severity), err_(err) {}
+
+Win32ErrorLogMessage::~Win32ErrorLogMessage() {
+  stream() << ": ";
+  SystemErrorCodeToStream(stream(), err_);
+  // We're about to crash (CHECK). Put |err_| on the stack (by placing it in a
+  // field) and use Alias in hopes that it makes it into crash dumps.
+  DWORD last_error = err_;
+  base::debug::Alias(&last_error);
+}
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+ErrnoLogMessage::ErrnoLogMessage(const char* file,
+                                 int line,
+                                 LogSeverity severity,
+                                 SystemErrorCode err)
+    : LogMessage(file, line, severity), err_(err) {}
+
+ErrnoLogMessage::~ErrnoLogMessage() {
+  stream() << ": ";
+  SystemErrorCodeToStream(stream(), err_);
+  // We're about to crash (CHECK). Put |err_| on the stack (by placing it in a
+  // field) and use Alias in hopes that it makes it into crash dumps.
+  int last_error = err_;
+  base::debug::Alias(&last_error);
+}
+#endif  // BUILDFLAG(IS_WIN)
+
+}  // namespace partition_alloc::internal::logging
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/log_message.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/log_message.h
new file mode 100644
index 0000000..3be4c56
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/log_message.h
@@ -0,0 +1,154 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_LOG_MESSAGE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_LOG_MESSAGE_H_
+
+#include <stddef.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::logging {
+
+// Sets the Log Message Handler that gets passed every log message before
+// it's sent to other log destinations (if any).
+// Returns true to signal that it handled the message and the message
+// should not be sent to other log destinations.
+typedef bool (*LogMessageHandlerFunction)(int severity,
+                                          const char* file,
+                                          int line,
+                                          size_t message_start,
+                                          const char* str);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+void SetLogMessageHandler(LogMessageHandlerFunction handler);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+LogMessageHandlerFunction GetLogMessageHandler();
+
+using LogSeverity = int;
+constexpr LogSeverity LOGGING_VERBOSE = -1;  // This is level 1 verbosity
+// Note: the log severities are used to index into the array of names,
+// see log_severity_names.
+constexpr LogSeverity LOGGING_INFO = 0;
+constexpr LogSeverity LOGGING_WARNING = 1;
+constexpr LogSeverity LOGGING_ERROR = 2;
+constexpr LogSeverity LOGGING_FATAL = 3;
+constexpr LogSeverity LOGGING_NUM_SEVERITIES = 4;
+
+// LOGGING_DFATAL is LOGGING_FATAL in DCHECK-enabled builds, ERROR in normal
+// mode.
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+constexpr LogSeverity LOGGING_DFATAL = LOGGING_FATAL;
+#else
+constexpr LogSeverity LOGGING_DFATAL = LOGGING_ERROR;
+#endif
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+extern base::strings::CStringBuilder* g_swallow_stream;
+
+// This class more or less represents a particular log message.  You
+// create an instance of LogMessage and then stream stuff to it.
+// When you finish streaming to it, ~LogMessage is called and the
+// full message gets streamed to the appropriate destination.
+//
+// You shouldn't actually use LogMessage's constructor to log things,
+// though.  You should use the PA_LOG() macro (and variants thereof)
+// above.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) LogMessage {
+ public:
+  // Used for PA_LOG(severity).
+  LogMessage(const char* file, int line, LogSeverity severity);
+
+  // Used for CHECK().  Implied severity = LOGGING_FATAL.
+  LogMessage(const char* file, int line, const char* condition);
+  LogMessage(const LogMessage&) = delete;
+  LogMessage& operator=(const LogMessage&) = delete;
+  virtual ~LogMessage();
+
+  base::strings::CStringBuilder& stream() { return stream_; }
+
+  LogSeverity severity() { return severity_; }
+  const char* c_str() { return stream_.c_str(); }
+
+ private:
+  void Init(const char* file, int line);
+
+  const LogSeverity severity_;
+  base::strings::CStringBuilder stream_;
+  size_t message_start_;  // Offset of the start of the message (past prefix
+                          // info).
+  // The file and line information passed in to the constructor.
+  const char* const file_;
+  const int line_;
+
+  // This is useful since the LogMessage class uses a lot of Win32 calls
+  // that will lose the value of GLE and the code that called the log function
+  // will have lost the thread error value when the log call returns.
+  base::ScopedClearLastError last_error_;
+};
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros.  This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+class LogMessageVoidify {
+ public:
+  LogMessageVoidify() = default;
+  // This has to be an operator with a precedence lower than << but
+  // higher than ?:
+  void operator&(base::strings::CStringBuilder&) {}
+};
+
+#if BUILDFLAG(IS_WIN)
+typedef unsigned long SystemErrorCode;
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+typedef int SystemErrorCode;
+#endif
+
+// Alias for ::GetLastError() on Windows and errno on POSIX. Avoids having to
+// pull in windows.h just for GetLastError() and DWORD.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+SystemErrorCode GetLastSystemErrorCode();
+
+#if BUILDFLAG(IS_WIN)
+// Appends a formatted system message of the GetLastError() type.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) Win32ErrorLogMessage
+    : public LogMessage {
+ public:
+  Win32ErrorLogMessage(const char* file,
+                       int line,
+                       LogSeverity severity,
+                       SystemErrorCode err);
+  Win32ErrorLogMessage(const Win32ErrorLogMessage&) = delete;
+  Win32ErrorLogMessage& operator=(const Win32ErrorLogMessage&) = delete;
+  // Appends the error message before destructing the encapsulated class.
+  ~Win32ErrorLogMessage() override;
+
+ private:
+  SystemErrorCode err_;
+};
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+// Appends a formatted system message of the errno type
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) ErrnoLogMessage
+    : public LogMessage {
+ public:
+  ErrnoLogMessage(const char* file,
+                  int line,
+                  LogSeverity severity,
+                  SystemErrorCode err);
+  ErrnoLogMessage(const ErrnoLogMessage&) = delete;
+  ErrnoLogMessage& operator=(const ErrnoLogMessage&) = delete;
+  // Appends the error message before destructing the encapsulated class.
+  ~ErrnoLogMessage() override;
+
+ private:
+  SystemErrorCode err_;
+};
+#endif  // BUILDFLAG(IS_WIN)
+
+}  // namespace partition_alloc::internal::logging
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_LOG_MESSAGE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.cc
new file mode 100644
index 0000000..275d54d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.cc
@@ -0,0 +1,123 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+
+// TODO(1151236): After finishing copying //base files to PA library, remove
+// defined(BASE_CHECK_H_) from here.
+#if defined(                                                                                 \
+    BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_CHECK_H_) || \
+    defined(BASE_CHECK_H_) ||                                                                \
+    defined(                                                                                 \
+        BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CHECK_H_)
+#error "logging.h should not include check.h"
+#endif
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "build/build_config.h"
+
+#include <algorithm>
+
+#if BUILDFLAG(IS_WIN)
+
+#include <io.h>
+#include <windows.h>
+// Windows warns on using write().  It prefers _write().
+#define write(fd, buf, count) _write(fd, buf, static_cast<unsigned int>(count))
+// Windows doesn't define STDERR_FILENO.  Define it here.
+#define STDERR_FILENO 2
+
+#endif
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/eintr_wrapper.h"
+
+namespace partition_alloc::internal::logging {
+
+namespace {
+
+int g_min_log_level = 0;
+
+#if !BUILDFLAG(IS_WIN)
+void WriteToStderr(const char* data, size_t length) {
+  size_t bytes_written = 0;
+  int rv;
+  while (bytes_written < length) {
+    rv = PA_HANDLE_EINTR(
+        write(STDERR_FILENO, data + bytes_written, length - bytes_written));
+    if (rv < 0) {
+      // Give up, nothing we can do now.
+      break;
+    }
+    bytes_written += rv;
+  }
+}
+#else   // !BUILDFLAG(IS_WIN)
+void WriteToStderr(const char* data, size_t length) {
+  HANDLE handle = ::GetStdHandle(STD_ERROR_HANDLE);
+  const char* ptr = data;
+  const char* ptr_end = data + length;
+  while (ptr < ptr_end) {
+    DWORD bytes_written = 0;
+    if (!::WriteFile(handle, ptr, ptr_end - ptr, &bytes_written, nullptr) ||
+        bytes_written == 0) {
+      // Give up, nothing we can do now.
+      break;
+    }
+    ptr += bytes_written;
+  }
+}
+#endif  // !BUILDFLAG(IS_WIN)
+
+}  // namespace
+
+void SetMinLogLevel(int level) {
+  g_min_log_level = std::min(LOGGING_FATAL, level);
+}
+
+int GetMinLogLevel() {
+  return g_min_log_level;
+}
+
+bool ShouldCreateLogMessage(int severity) {
+  if (severity < g_min_log_level) {
+    return false;
+  }
+
+  // Return true here unless we know ~LogMessage won't do anything.
+  return true;
+}
+
+int GetVlogVerbosity() {
+  return std::max(-1, LOGGING_INFO - GetMinLogLevel());
+}
+
+void RawLog(int level, const char* message) {
+  if (level >= g_min_log_level && message) {
+#if !BUILDFLAG(IS_WIN)
+    const size_t message_len = strlen(message);
+#else   // !BUILDFLAG(IS_WIN)
+    const size_t message_len = ::lstrlenA(message);
+#endif  // !BUILDFLAG(IS_WIN)
+    WriteToStderr(message, message_len);
+
+    if (message_len > 0 && message[message_len - 1] != '\n') {
+      WriteToStderr("\n", 1);
+    }
+  }
+}
+
+// This was defined at the beginning of this file.
+#undef write
+
+}  // namespace partition_alloc::internal::logging
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h
new file mode 100644
index 0000000..fc641da
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h
@@ -0,0 +1,378 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_LOGGING_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_LOGGING_H_
+
+#include <stddef.h>
+
+#include <cassert>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/log_message.h"
+#include "build/build_config.h"
+
+// TODO(1151236): Need to update the description, because logging for PA
+// standalone library was minimized.
+//
+// Optional message capabilities
+// -----------------------------
+// Assertion failed messages and fatal errors are displayed in a dialog box
+// before the application exits. However, running this UI creates a message
+// loop, which causes application messages to be processed and potentially
+// dispatched to existing application windows. Since the application is in a
+// bad state when this assertion dialog is displayed, these messages may not
+// get processed and hang the dialog, or the application might go crazy.
+//
+// Therefore, it can be beneficial to display the error dialog in a separate
+// process from the main application. When the logging system needs to display
+// a fatal error dialog box, it will look for a program called
+// "DebugMessage.exe" in the same directory as the application executable. It
+// will run this application with the message as the command line, and will
+// not include the name of the application as is traditional for easier
+// parsing.
+//
+// The code for DebugMessage.exe is only one line. In WinMain, do:
+//   MessageBox(NULL, GetCommandLineW(), L"Fatal Error", 0);
+//
+// If DebugMessage.exe is not found, the logging code will use a normal
+// MessageBox, potentially causing the problems discussed above.
+
+// Instructions
+// ------------
+//
+// Make a bunch of macros for logging.  The way to log things is to stream
+// things to PA_LOG(<a particular severity level>).  E.g.,
+//
+//   PA_LOG(INFO) << "Found " << num_cookies << " cookies";
+//
+// You can also do conditional logging:
+//
+//   PA_LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// The CHECK(condition) macro is active in both debug and release builds and
+// effectively performs a PA_LOG(FATAL) which terminates the process and
+// generates a crashdump unless a debugger is attached.
+//
+// There are also "debug mode" logging macros like the ones above:
+//
+//   PA_DLOG(INFO) << "Found cookies";
+//
+//   PA_DLOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// All "debug mode" logging is compiled away to nothing for non-debug mode
+// compiles.  PA_LOG_IF and development flags also work well together
+// because the code can be compiled away sometimes.
+//
+// We also have
+//
+//   PA_LOG_ASSERT(assertion);
+//   PA_DLOG_ASSERT(assertion);
+//
+// which is syntactic sugar for PA_{,D}LOG_IF(FATAL, assert fails) << assertion;
+//
+// There are "verbose level" logging macros.  They look like
+//
+//   PA_VLOG(1) << "I'm printed when you run the program with --v=1 or more";
+//   PA_VLOG(2) << "I'm printed when you run the program with --v=2 or more";
+//
+// These always log at the INFO log level (when they log at all).
+//
+// There's also PA_VLOG_IS_ON(n) "verbose level" condition macro. To be used as
+//
+//   if (PA_VLOG_IS_ON(2)) {
+//     // do some logging preparation and logging
+//     // that can't be accomplished with just PA_VLOG(2) << ...;
+//   }
+//
+// There is also a PA_VLOG_IF "verbose level" condition macro for sample
+// cases, when some extra computation and preparation for logs is not
+// needed.
+//
+//   PA_VLOG_IF(1, (size > 1024))
+//      << "I'm printed when size is more than 1024 and when you run the "
+//         "program with --v=1 or more";
+//
+// We also override the standard 'assert' to use 'PA_DLOG_ASSERT'.
+//
+// Lastly, there is:
+//
+//   PA_PLOG(ERROR) << "Couldn't do foo";
+//   PA_DPLOG(ERROR) << "Couldn't do foo";
+//   PA_PLOG_IF(ERROR, cond) << "Couldn't do foo";
+//   PA_DPLOG_IF(ERROR, cond) << "Couldn't do foo";
+//   PA_PCHECK(condition) << "Couldn't do foo";
+//   PA_DPCHECK(condition) << "Couldn't do foo";
+//
+// which append the last system error to the message in string form (taken from
+// GetLastError() on Windows and errno on POSIX).
+//
+// The supported severity levels for macros that allow you to specify one
+// are (in increasing order of severity) INFO, WARNING, ERROR, and FATAL.
+//
+// Very important: logging a message at the FATAL severity level causes
+// the program to terminate (after the message is logged).
+//
+// There is the special severity of DFATAL, which logs FATAL in DCHECK-enabled
+// builds, ERROR in normal mode.
+//
+// Output is formatted as per the following example:
+// [VERBOSE1:drm_device_handle.cc(90)] Succeeded
+// authenticating /dev/dri/card0 in 0 ms with 1 attempt(s)
+//
+// The colon separated fields inside the brackets are as follows:
+// 1. The log level
+// 2. The filename and line number where the log was instantiated
+//
+// Additional logging-related information can be found here:
+// https://chromium.googlesource.com/chromium/src/+/main/docs/linux/debugging.md#Logging
+
+namespace partition_alloc::internal::logging {
+
+// Sets the log level. Anything at or above this level will be written to the
+// log file/displayed to the user (if applicable). Anything below this level
+// will be silently ignored. The log level defaults to 0 (everything is logged
+// up to level INFO) if this function is not called.
+// Note that log messages for VLOG(x) are logged at level -x, so setting
+// the min log level to negative values enables verbose logging.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) void SetMinLogLevel(int level);
+
+// Gets the current log level.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) int GetMinLogLevel();
+
+// Used by PA_LOG_IS_ON to lazy-evaluate stream arguments.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+bool ShouldCreateLogMessage(int severity);
+
+// Gets the PA_VLOG default verbosity level.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) int GetVlogVerbosity();
+
+// A few definitions of macros that don't generate much code. These are used
+// by PA_LOG() and LOG_IF, etc. Since these are used all over our code, it's
+// better to have compact code for these operations.
+#define PA_COMPACT_GOOGLE_LOG_EX_INFO(ClassName, ...)                         \
+  ::partition_alloc::internal::logging::ClassName(                            \
+      __FILE__, __LINE__, ::partition_alloc::internal::logging::LOGGING_INFO, \
+      ##__VA_ARGS__)
+#define PA_COMPACT_GOOGLE_LOG_EX_WARNING(ClassName, ...) \
+  ::partition_alloc::internal::logging::ClassName(       \
+      __FILE__, __LINE__,                                \
+      ::partition_alloc::internal::logging::LOGGING_WARNING, ##__VA_ARGS__)
+#define PA_COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ...)                         \
+  ::partition_alloc::internal::logging::ClassName(                             \
+      __FILE__, __LINE__, ::partition_alloc::internal::logging::LOGGING_ERROR, \
+      ##__VA_ARGS__)
+#define PA_COMPACT_GOOGLE_LOG_EX_FATAL(ClassName, ...)                         \
+  ::partition_alloc::internal::logging::ClassName(                             \
+      __FILE__, __LINE__, ::partition_alloc::internal::logging::LOGGING_FATAL, \
+      ##__VA_ARGS__)
+#define PA_COMPACT_GOOGLE_LOG_EX_DFATAL(ClassName, ...) \
+  ::partition_alloc::internal::logging::ClassName(      \
+      __FILE__, __LINE__,                               \
+      ::partition_alloc::internal::logging::LOGGING_DFATAL, ##__VA_ARGS__)
+#define PA_COMPACT_GOOGLE_LOG_EX_DCHECK(ClassName, ...) \
+  ::partition_alloc::internal::logging::ClassName(      \
+      __FILE__, __LINE__,                               \
+      ::partition_alloc::internal::logging::LOGGING_DCHECK, ##__VA_ARGS__)
+
+#define PA_COMPACT_GOOGLE_LOG_INFO PA_COMPACT_GOOGLE_LOG_EX_INFO(LogMessage)
+#define PA_COMPACT_GOOGLE_LOG_WARNING \
+  PA_COMPACT_GOOGLE_LOG_EX_WARNING(LogMessage)
+#define PA_COMPACT_GOOGLE_LOG_ERROR PA_COMPACT_GOOGLE_LOG_EX_ERROR(LogMessage)
+#define PA_COMPACT_GOOGLE_LOG_FATAL PA_COMPACT_GOOGLE_LOG_EX_FATAL(LogMessage)
+#define PA_COMPACT_GOOGLE_LOG_DFATAL PA_COMPACT_GOOGLE_LOG_EX_DFATAL(LogMessage)
+#define PA_COMPACT_GOOGLE_LOG_DCHECK PA_COMPACT_GOOGLE_LOG_EX_DCHECK(LogMessage)
+
+#if BUILDFLAG(IS_WIN)
+// wingdi.h defines ERROR to be 0. When we call PA_LOG(ERROR), it gets
+// substituted with 0, and it expands to PA_COMPACT_GOOGLE_LOG_0. To allow us
+// to keep using this syntax, we define this macro to do the same thing
+// as PA_COMPACT_GOOGLE_LOG_ERROR, and also define ERROR the same way that
+// the Windows SDK does for consistency.
+#define PA_ERROR 0
+#define PA_COMPACT_GOOGLE_LOG_EX_0(ClassName, ...) \
+  PA_COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ##__VA_ARGS__)
+#define PA_COMPACT_GOOGLE_LOG_0 PA_COMPACT_GOOGLE_LOG_ERROR
+// Needed for LOG_IS_ON(ERROR).
+constexpr LogSeverity LOGGING_0 = LOGGING_ERROR;
+#endif
+
+// As special cases, we can assume that LOG_IS_ON(FATAL) always holds. Also,
+// LOG_IS_ON(DFATAL) always holds in debug mode. In particular, CHECK()s will
+// always fire if they fail.
+#define PA_LOG_IS_ON(severity)                                   \
+  (::partition_alloc::internal::logging::ShouldCreateLogMessage( \
+      ::partition_alloc::internal::logging::LOGGING_##severity))
+
+// We don't do any caching tricks with VLOG_IS_ON() like the
+// google-glog version since it increases binary size.  This means
+// that using the v-logging functions in conjunction with --vmodule
+// may be slow.
+#define PA_VLOG_IS_ON(verboselevel) \
+  ((verboselevel) <= ::partition_alloc::internal::logging::GetVlogVerbosity())
+
+// Helper macro which avoids evaluating the arguments to a stream if
+// the condition doesn't hold. Condition is evaluated once and only once.
+#define PA_LAZY_STREAM(stream, condition) \
+  !(condition)                            \
+      ? (void)0                           \
+      : ::partition_alloc::internal::logging::LogMessageVoidify() & (stream)
+
+// We use the preprocessor's merging operator, "##", so that, e.g.,
+// PA_LOG(INFO) becomes the token PA_COMPACT_GOOGLE_LOG_INFO.  There's some
+// funny subtle difference between ostream member streaming functions (e.g.,
+// ostream::operator<<(int) and ostream non-member streaming functions
+// (e.g., ::operator<<(ostream&, string&): it turns out that it's
+// impossible to stream something like a string directly to an unnamed
+// ostream. We employ a neat hack by calling the stream() member
+// function of LogMessage which seems to avoid the problem.
+#define PA_LOG_STREAM(severity) PA_COMPACT_GOOGLE_LOG_##severity.stream()
+
+#define PA_LOG(severity) \
+  PA_LAZY_STREAM(PA_LOG_STREAM(severity), PA_LOG_IS_ON(severity))
+#define PA_LOG_IF(severity, condition) \
+  PA_LAZY_STREAM(PA_LOG_STREAM(severity), PA_LOG_IS_ON(severity) && (condition))
+
+// The VLOG macros log with negative verbosities.
+#define PA_VLOG_STREAM(verbose_level)                                  \
+  ::partition_alloc::internal::logging::LogMessage(__FILE__, __LINE__, \
+                                                   -(verbose_level))   \
+      .stream()
+
+#define PA_VLOG(verbose_level) \
+  PA_LAZY_STREAM(PA_VLOG_STREAM(verbose_level), PA_VLOG_IS_ON(verbose_level))
+
+#define PA_VLOG_IF(verbose_level, condition)    \
+  PA_LAZY_STREAM(PA_VLOG_STREAM(verbose_level), \
+                 PA_VLOG_IS_ON(verbose_level) && (condition))
+
+#if BUILDFLAG(IS_WIN)
+#define PA_VPLOG_STREAM(verbose_level)                                \
+  ::partition_alloc::internal::logging::Win32ErrorLogMessage(         \
+      __FILE__, __LINE__, -(verbose_level),                           \
+      ::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
+      .stream()
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+#define PA_VPLOG_STREAM(verbose_level)                                \
+  ::partition_alloc::internal::logging::ErrnoLogMessage(              \
+      __FILE__, __LINE__, -(verbose_level),                           \
+      ::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
+      .stream()
+#endif
+
+#define PA_VPLOG(verbose_level) \
+  PA_LAZY_STREAM(PA_VPLOG_STREAM(verbose_level), PA_VLOG_IS_ON(verbose_level))
+
+#define PA_VPLOG_IF(verbose_level, condition)    \
+  PA_LAZY_STREAM(PA_VPLOG_STREAM(verbose_level), \
+                 PA_VLOG_IS_ON(verbose_level) && (condition))
+
+// TODO(akalin): Add more VLOG variants, e.g. VPLOG.
+
+#define PA_LOG_ASSERT(condition)                          \
+  PA_LOG_IF(FATAL, !(PA_ANALYZER_ASSUME_TRUE(condition))) \
+      << "Assert failed: " #condition ". "
+
+#if BUILDFLAG(IS_WIN)
+#define PA_PLOG_STREAM(severity)                                      \
+  PA_COMPACT_GOOGLE_LOG_EX_##severity(                                \
+      Win32ErrorLogMessage,                                           \
+      ::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
+      .stream()
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+#define PA_PLOG_STREAM(severity)                                      \
+  PA_COMPACT_GOOGLE_LOG_EX_##severity(                                \
+      ErrnoLogMessage,                                                \
+      ::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
+      .stream()
+#endif
+
+#define PA_PLOG(severity) \
+  PA_LAZY_STREAM(PA_PLOG_STREAM(severity), PA_LOG_IS_ON(severity))
+
+#define PA_PLOG_IF(severity, condition)    \
+  PA_LAZY_STREAM(PA_PLOG_STREAM(severity), \
+                 PA_LOG_IS_ON(severity) && (condition))
+
+// Note that g_swallow_stream is used instead of an arbitrary PA_LOG() stream to
+// avoid the creation of an object with a non-trivial destructor (LogMessage).
+// On MSVC x86 (checked on 2015 Update 3), this causes a few additional
+// pointless instructions to be emitted even at full optimization level, even
+// though the : arm of the ternary operator is clearly never executed. Using a
+// simpler object to be &'d with Voidify() avoids these extra instructions.
+// Using a simpler POD object with a templated operator<< also works to avoid
+// these instructions. However, this causes warnings on statically defined
+// implementations of operator<<(std::ostream, ...) in some .cc files, because
+// they become defined-but-unreferenced functions. A reinterpret_cast of 0 to an
+// ostream* also is not suitable, because some compilers warn of undefined
+// behavior.
+#define PA_EAT_STREAM_PARAMETERS                                     \
+  true ? (void)0                                                     \
+       : ::partition_alloc::internal::logging::LogMessageVoidify() & \
+             (*::partition_alloc::internal::logging::g_swallow_stream)
+
+// Definitions for DLOG et al.
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+
+#define PA_DLOG_IS_ON(severity) PA_LOG_IS_ON(severity)
+#define PA_DLOG_IF(severity, condition) PA_LOG_IF(severity, condition)
+#define PA_DLOG_ASSERT(condition) PA_LOG_ASSERT(condition)
+#define PA_DPLOG_IF(severity, condition) PA_PLOG_IF(severity, condition)
+#define PA_DVLOG_IF(verboselevel, condition) PA_VLOG_IF(verboselevel, condition)
+#define PA_DVPLOG_IF(verboselevel, condition) \
+  PA_VPLOG_IF(verboselevel, condition)
+
+#else  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+// If !BUILDFLAG(PA_DCHECK_IS_ON), we want to avoid emitting any references to
+// |condition| (which may reference a variable defined only if
+// BUILDFLAG(PA_DCHECK_IS_ON)). Contrast this with DCHECK et al., which has
+// different behavior.
+
+#define PA_DLOG_IS_ON(severity) false
+#define PA_DLOG_IF(severity, condition) PA_EAT_STREAM_PARAMETERS
+#define PA_DLOG_ASSERT(condition) PA_EAT_STREAM_PARAMETERS
+#define PA_DPLOG_IF(severity, condition) PA_EAT_STREAM_PARAMETERS
+#define PA_DVLOG_IF(verboselevel, condition) PA_EAT_STREAM_PARAMETERS
+#define PA_DVPLOG_IF(verboselevel, condition) PA_EAT_STREAM_PARAMETERS
+
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+#define PA_DLOG(severity) \
+  PA_LAZY_STREAM(PA_LOG_STREAM(severity), PA_DLOG_IS_ON(severity))
+
+#define PA_DPLOG(severity) \
+  PA_LAZY_STREAM(PA_PLOG_STREAM(severity), PA_DLOG_IS_ON(severity))
+
+#define PA_DVLOG(verboselevel) PA_DVLOG_IF(verboselevel, true)
+
+#define PA_DVPLOG(verboselevel) PA_DVPLOG_IF(verboselevel, true)
+
+// Definitions for DCHECK et al.
+
+#if BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) extern LogSeverity LOGGING_DCHECK;
+#else
+constexpr LogSeverity LOGGING_DCHECK = LOGGING_FATAL;
+#endif  // BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
+
+// Redefine the standard assert to use our nice log files
+#undef assert
+#define assert(x) PA_DLOG_ASSERT(x)
+
+// Async signal safe logging mechanism.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+void RawLog(int level, const char* message);
+
+#define PA_RAW_LOG(level, message)              \
+  ::partition_alloc::internal::logging::RawLog( \
+      ::partition_alloc::internal::logging::LOGGING_##level, message)
+
+}  // namespace partition_alloc::internal::logging
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_LOGGING_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging_pa_unittest.cc
new file mode 100644
index 0000000..5fa8147
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging_pa_unittest.cc
@@ -0,0 +1,148 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sstream>
+#include <string>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "build/build_config.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal::logging {
+
+namespace {
+
+using ::testing::_;
+using ::testing::Return;
+
+class MockLogSource {
+ public:
+  MOCK_METHOD0(Log, const char*());
+};
+
+TEST(PALoggingTest, BasicLogging) {
+  MockLogSource mock_log_source;
+  constexpr int kTimes =
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+      16;
+#else
+      8;
+#endif
+  EXPECT_CALL(mock_log_source, Log())
+      .Times(kTimes)
+      .WillRepeatedly(Return("log message"));
+
+  SetMinLogLevel(LOGGING_INFO);
+
+  EXPECT_TRUE(PA_LOG_IS_ON(INFO));
+  EXPECT_EQ(BUILDFLAG(PA_DCHECK_IS_ON), PA_DLOG_IS_ON(INFO));
+  EXPECT_TRUE(PA_VLOG_IS_ON(0));
+
+  PA_LOG(INFO) << mock_log_source.Log();
+  PA_LOG_IF(INFO, true) << mock_log_source.Log();
+  PA_PLOG(INFO) << mock_log_source.Log();
+  PA_PLOG_IF(INFO, true) << mock_log_source.Log();
+  PA_VLOG(0) << mock_log_source.Log();
+  PA_VLOG_IF(0, true) << mock_log_source.Log();
+  PA_VPLOG(0) << mock_log_source.Log();
+  PA_VPLOG_IF(0, true) << mock_log_source.Log();
+
+  PA_DLOG(INFO) << mock_log_source.Log();
+  PA_DLOG_IF(INFO, true) << mock_log_source.Log();
+  PA_DPLOG(INFO) << mock_log_source.Log();
+  PA_DPLOG_IF(INFO, true) << mock_log_source.Log();
+  PA_DVLOG(0) << mock_log_source.Log();
+  PA_DVLOG_IF(0, true) << mock_log_source.Log();
+  PA_DVPLOG(0) << mock_log_source.Log();
+  PA_DVPLOG_IF(0, true) << mock_log_source.Log();
+}
+
+TEST(PALoggingTest, LogIsOn) {
+  SetMinLogLevel(LOGGING_INFO);
+  EXPECT_TRUE(PA_LOG_IS_ON(INFO));
+  EXPECT_TRUE(PA_LOG_IS_ON(WARNING));
+  EXPECT_TRUE(PA_LOG_IS_ON(ERROR));
+  EXPECT_TRUE(PA_LOG_IS_ON(FATAL));
+  EXPECT_TRUE(PA_LOG_IS_ON(DFATAL));
+
+  SetMinLogLevel(LOGGING_WARNING);
+  EXPECT_FALSE(PA_LOG_IS_ON(INFO));
+  EXPECT_TRUE(PA_LOG_IS_ON(WARNING));
+  EXPECT_TRUE(PA_LOG_IS_ON(ERROR));
+  EXPECT_TRUE(PA_LOG_IS_ON(FATAL));
+  EXPECT_TRUE(PA_LOG_IS_ON(DFATAL));
+
+  SetMinLogLevel(LOGGING_ERROR);
+  EXPECT_FALSE(PA_LOG_IS_ON(INFO));
+  EXPECT_FALSE(PA_LOG_IS_ON(WARNING));
+  EXPECT_TRUE(PA_LOG_IS_ON(ERROR));
+  EXPECT_TRUE(PA_LOG_IS_ON(FATAL));
+  EXPECT_TRUE(PA_LOG_IS_ON(DFATAL));
+
+  SetMinLogLevel(LOGGING_FATAL + 1);
+  EXPECT_FALSE(PA_LOG_IS_ON(INFO));
+  EXPECT_FALSE(PA_LOG_IS_ON(WARNING));
+  EXPECT_FALSE(PA_LOG_IS_ON(ERROR));
+  // PA_LOG_IS_ON(FATAL) should always be true.
+  EXPECT_TRUE(PA_LOG_IS_ON(FATAL));
+  // If BUILDFLAG(PA_DCHECK_IS_ON) then DFATAL is FATAL.
+  EXPECT_EQ(BUILDFLAG(PA_DCHECK_IS_ON), PA_LOG_IS_ON(DFATAL));
+}
+
+TEST(PALoggingTest, LoggingIsLazyBySeverity) {
+  MockLogSource mock_log_source;
+  EXPECT_CALL(mock_log_source, Log()).Times(0);
+
+  SetMinLogLevel(LOGGING_WARNING);
+
+  EXPECT_FALSE(PA_LOG_IS_ON(INFO));
+  EXPECT_FALSE(PA_DLOG_IS_ON(INFO));
+  EXPECT_FALSE(PA_VLOG_IS_ON(1));
+
+  PA_LOG(INFO) << mock_log_source.Log();
+  PA_LOG_IF(INFO, false) << mock_log_source.Log();
+  PA_PLOG(INFO) << mock_log_source.Log();
+  PA_PLOG_IF(INFO, false) << mock_log_source.Log();
+  PA_VLOG(1) << mock_log_source.Log();
+  PA_VLOG_IF(1, true) << mock_log_source.Log();
+  PA_VPLOG(1) << mock_log_source.Log();
+  PA_VPLOG_IF(1, true) << mock_log_source.Log();
+
+  PA_DLOG(INFO) << mock_log_source.Log();
+  PA_DLOG_IF(INFO, true) << mock_log_source.Log();
+  PA_DPLOG(INFO) << mock_log_source.Log();
+  PA_DPLOG_IF(INFO, true) << mock_log_source.Log();
+  PA_DVLOG(1) << mock_log_source.Log();
+  PA_DVLOG_IF(1, true) << mock_log_source.Log();
+  PA_DVPLOG(1) << mock_log_source.Log();
+  PA_DVPLOG_IF(1, true) << mock_log_source.Log();
+}
+
+// Always log-to-stderr(RawLog) if message handler is not assigned.
+TEST(PALoggingTest, LogIsAlwaysToStdErr) {
+  MockLogSource mock_log_source_stderr;
+  SetMinLogLevel(LOGGING_INFO);
+  EXPECT_TRUE(PA_LOG_IS_ON(INFO));
+  EXPECT_CALL(mock_log_source_stderr, Log()).Times(1).WillOnce(Return("foo"));
+  PA_LOG(INFO) << mock_log_source_stderr.Log();
+}
+
+TEST(PALoggingTest, DebugLoggingReleaseBehavior) {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  int debug_only_variable = 1;
+#endif
+  // These should avoid emitting references to |debug_only_variable|
+  // in release mode.
+  PA_DLOG_IF(INFO, debug_only_variable) << "test";
+  PA_DLOG_ASSERT(debug_only_variable) << "test";
+  PA_DPLOG_IF(INFO, debug_only_variable) << "test";
+  PA_DVLOG_IF(1, debug_only_variable) << "test";
+}
+
+}  // namespace
+
+}  // namespace partition_alloc::internal::logging
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.h
new file mode 100644
index 0000000..ed79036
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.h
@@ -0,0 +1,25 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MAC_MAC_UTIL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MAC_MAC_UTIL_H_
+
+#include <AvailabilityMacros.h>
+#import <CoreGraphics/CoreGraphics.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc::internal::base::mac {
+
+// MacOSMajorVersion() returns the major version number (e.g. macOS 12.6.5
+// returns 12) of the macOS currently running. Use for runtime OS version
+// checking. Prefer to use @available in Objective-C files. Note that this does
+// not include any Rapid Security Response (RSR) suffixes (the "(a)" at the end
+// of version numbers.)
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+__attribute__((const)) int MacOSMajorVersion();
+
+}  // namespace partition_alloc::internal::base::mac
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MAC_MAC_UTIL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.mm b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.mm
new file mode 100644
index 0000000..94b08d1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.mm
@@ -0,0 +1,98 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.h"
+
+#include <stddef.h>
+#include <string.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+
+// This is a simplified version of base::mac. Because
+// "base/strings/string_split.h" is unavailable, only provide access to the
+// macOS major version number via direct string work on the Darwin version.
+
+namespace partition_alloc::internal::base::mac {
+
+namespace {
+
+// Returns the running system's Darwin major version. Don't call this, it's an
+// implementation detail and its result is meant to be cached by
+// MacOSMajorVersion().
+int DarwinMajorVersion() {
+  // base::OperatingSystemVersionNumbers() at one time called Gestalt(), which
+  // was observed to be able to spawn threads (see https://crbug.com/53200).
+  // Nowadays that function calls -[NSProcessInfo operatingSystemVersion], whose
+  // current implementation does things like hit the file system, which is
+  // possibly a blocking operation. Either way, it's overkill for what needs to
+  // be done here.
+  //
+  // uname, on the other hand, is implemented as a simple series of sysctl
+  // system calls to obtain the relevant data from the kernel. The data is
+  // compiled right into the kernel, so no threads or blocking or other
+  // funny business is necessary.
+
+  struct utsname uname_info;
+  if (uname(&uname_info) != 0) {
+    PA_DPLOG(ERROR) << "uname";
+    return 0;
+  }
+
+  if (strcmp(uname_info.sysname, "Darwin") != 0) {
+    PA_DLOG(ERROR) << "unexpected uname sysname " << uname_info.sysname;
+    return 0;
+  }
+
+  const char* dot = strchr(uname_info.release, '.');
+  if (!dot || uname_info.release == dot ||
+      // Darwin version should be 1 or 2 digits, it's unlikely to be more than
+      // 4 digits.
+      dot - uname_info.release > 4) {
+    PA_DLOG(ERROR) << "could not parse uname release " << uname_info.release;
+    return 0;
+  }
+
+  int darwin_major_version = 0;
+  constexpr int base = 10;
+  for (const char* p = uname_info.release; p < dot; ++p) {
+    if (!('0' <= *p && *p < '0' + base)) {
+      PA_DLOG(ERROR) << "could not parse uname release " << uname_info.release;
+      return 0;
+    }
+
+    // Since we checked the number of digits is 4 at most (see above), there is
+    // no chance to overflow.
+    darwin_major_version *= base;
+    darwin_major_version += *p - '0';
+  }
+
+  return darwin_major_version;
+}
+
+}  // namespace
+
+int MacOSMajorVersion() {
+  static int macos_major_version = [] {
+    int darwin_major_version = DarwinMajorVersion();
+
+    // Darwin major versions 6 through 19 corresponded to macOS versions 10.2
+    // through 10.15.
+    PA_BASE_CHECK(darwin_major_version >= 6);
+    if (darwin_major_version <= 19) {
+      return 10;
+    }
+
+    // Darwin major version 20 corresponds to macOS version 11.0. Assume a
+    // correspondence between Darwin's major version numbers and macOS major
+    // version numbers.
+    return darwin_major_version - 9;
+  }();
+  return macos_major_version;
+}
+
+}  // namespace partition_alloc::internal::base::mac
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size.h
new file mode 100644
index 0000000..ffbb707
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size.h
@@ -0,0 +1,22 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_PAGE_SIZE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_PAGE_SIZE_H_
+
+#include <stddef.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc::internal::base {
+
+// Returns the number of bytes in a memory page. Do not use this to compute
+// the number of pages in a block of memory for calling mincore(). On some
+// platforms, e.g. iOS, mincore() uses a different page size from what is
+// returned by GetPageSize().
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) size_t GetPageSize();
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_PAGE_SIZE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size_posix.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size_posix.cc
new file mode 100644
index 0000000..b11857d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size_posix.cc
@@ -0,0 +1,24 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size.h"
+
+#include <unistd.h>
+
+namespace partition_alloc::internal::base {
+
+size_t GetPageSize() {
+  static const size_t pagesize = []() -> size_t {
+  // For more information see getpagesize(2). Portable applications should use
+  // sysconf(_SC_PAGESIZE) rather than getpagesize() if it's available.
+#if defined(_SC_PAGESIZE)
+    return static_cast<size_t>(sysconf(_SC_PAGESIZE));
+#else
+    return getpagesize();
+#endif
+  }();
+  return pagesize;
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size_win.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size_win.cc
new file mode 100644
index 0000000..6f41c08
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size_win.cc
@@ -0,0 +1,15 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size.h"
+
+namespace partition_alloc::internal::base {
+
+size_t GetPageSize() {
+  // System pagesize. This value remains constant on x86/64 architectures.
+  constexpr int PAGESIZE_KB = 4;
+  return PAGESIZE_KB * 1024;
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/ref_counted.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/ref_counted.cc
new file mode 100644
index 0000000..06594af
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/ref_counted.cc
@@ -0,0 +1,48 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/ref_counted.h"
+
+#include <limits>
+#include <ostream>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+
+namespace partition_alloc::internal::base::subtle {
+
+bool RefCountedThreadSafeBase::HasOneRef() const {
+  return ref_count_.IsOne();
+}
+
+bool RefCountedThreadSafeBase::HasAtLeastOneRef() const {
+  return !ref_count_.IsZero();
+}
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
+  PA_BASE_DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
+                              "calling Release()";
+}
+#endif
+
+// For security and correctness, we check the arithmetic on ref counts.
+//
+// In an attempt to avoid binary bloat (from inlining the `CHECK`), we define
+// these functions out-of-line. However, compilers are wily. Further testing may
+// show that `PA_NOINLINE` helps or hurts.
+//
+#if !defined(ARCH_CPU_X86_FAMILY)
+bool RefCountedThreadSafeBase::Release() const {
+  return ReleaseImpl();
+}
+void RefCountedThreadSafeBase::AddRef() const {
+  AddRefImpl();
+}
+void RefCountedThreadSafeBase::AddRefWithCheck() const {
+  AddRefWithCheckImpl();
+}
+#endif
+
+}  // namespace partition_alloc::internal::base::subtle
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/ref_counted.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/ref_counted.h
new file mode 100644
index 0000000..5bcd35c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/ref_counted.h
@@ -0,0 +1,187 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_REF_COUNTED_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_REF_COUNTED_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/atomic_ref_count.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/scoped_refptr.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base {
+namespace subtle {
+
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) RefCountedThreadSafeBase {
+ public:
+  RefCountedThreadSafeBase(const RefCountedThreadSafeBase&) = delete;
+  RefCountedThreadSafeBase& operator=(const RefCountedThreadSafeBase&) = delete;
+
+  bool HasOneRef() const;
+  bool HasAtLeastOneRef() const;
+
+ protected:
+  explicit constexpr RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
+  explicit constexpr RefCountedThreadSafeBase(StartRefCountFromOneTag)
+      : ref_count_(1) {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    needs_adopt_ref_ = true;
+#endif
+  }
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  ~RefCountedThreadSafeBase();
+#else
+  ~RefCountedThreadSafeBase() = default;
+#endif
+
+// Release and AddRef are suitable for inlining on X86 because they generate
+// very small code sequences. On other platforms (ARM), it causes a size
+// regression and is probably not worth it.
+#if defined(ARCH_CPU_X86_FAMILY)
+  // Returns true if the object should self-delete.
+  bool Release() const { return ReleaseImpl(); }
+  void AddRef() const { AddRefImpl(); }
+  void AddRefWithCheck() const { AddRefWithCheckImpl(); }
+#else
+  // Returns true if the object should self-delete.
+  bool Release() const;
+  void AddRef() const;
+  void AddRefWithCheck() const;
+#endif
+
+ private:
+  template <typename U>
+  friend scoped_refptr<U> AdoptRef(U*);
+
+  void Adopted() const {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    PA_BASE_DCHECK(needs_adopt_ref_);
+    needs_adopt_ref_ = false;
+#endif
+  }
+
+  PA_ALWAYS_INLINE void AddRefImpl() const {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    PA_BASE_DCHECK(!in_dtor_);
+    // This RefCounted object is created with non-zero reference count.
+    // The first reference to such a object has to be made by AdoptRef or
+    // MakeRefCounted.
+    PA_BASE_DCHECK(!needs_adopt_ref_);
+#endif
+    ref_count_.Increment();
+  }
+
+  PA_ALWAYS_INLINE void AddRefWithCheckImpl() const {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    PA_BASE_DCHECK(!in_dtor_);
+    // This RefCounted object is created with non-zero reference count.
+    // The first reference to such a object has to be made by AdoptRef or
+    // MakeRefCounted.
+    PA_BASE_DCHECK(!needs_adopt_ref_);
+#endif
+    PA_BASE_CHECK(ref_count_.Increment() > 0);
+  }
+
+  PA_ALWAYS_INLINE bool ReleaseImpl() const {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    PA_BASE_DCHECK(!in_dtor_);
+    PA_BASE_DCHECK(!ref_count_.IsZero());
+#endif
+    if (!ref_count_.Decrement()) {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+      in_dtor_ = true;
+#endif
+      return true;
+    }
+    return false;
+  }
+
+  mutable AtomicRefCount ref_count_{0};
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  mutable bool needs_adopt_ref_ = false;
+  mutable bool in_dtor_ = false;
+#endif
+};
+
+}  // namespace subtle
+
+// Forward declaration.
+template <class T, typename Traits>
+class RefCountedThreadSafe;
+
+// Default traits for RefCountedThreadSafe<T>.  Deletes the object when its ref
+// count reaches 0.  Overload to delete it on a different thread etc.
+template <typename T>
+struct DefaultRefCountedThreadSafeTraits {
+  static void Destruct(const T* x) {
+    // Delete through RefCountedThreadSafe to make child classes only need to be
+    // friend with RefCountedThreadSafe instead of this struct, which is an
+    // implementation detail.
+    RefCountedThreadSafe<T, DefaultRefCountedThreadSafeTraits>::DeleteInternal(
+        x);
+  }
+};
+
+//
+// A thread-safe variant of RefCounted<T>
+//
+//   class MyFoo : public base::RefCountedThreadSafe<MyFoo> {
+//    ...
+//   };
+//
+// If you're using the default trait, then you should add compile time
+// asserts that no one else is deleting your object.  i.e.
+//    private:
+//     friend class base::RefCountedThreadSafe<MyFoo>;
+//     ~MyFoo();
+//
+// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
+// too. See the comment above the RefCounted definition for details.
+template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T>>
+class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
+ public:
+  static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+      subtle::kStartRefCountFromZeroTag;
+
+  explicit RefCountedThreadSafe()
+      : subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
+
+  RefCountedThreadSafe(const RefCountedThreadSafe&) = delete;
+  RefCountedThreadSafe& operator=(const RefCountedThreadSafe&) = delete;
+
+  void AddRef() const { AddRefImpl(T::kRefCountPreference); }
+
+  void Release() const {
+    if (subtle::RefCountedThreadSafeBase::Release()) {
+      PA_ANALYZER_SKIP_THIS_PATH();
+      Traits::Destruct(static_cast<const T*>(this));
+    }
+  }
+
+ protected:
+  ~RefCountedThreadSafe() = default;
+
+ private:
+  friend struct DefaultRefCountedThreadSafeTraits<T>;
+  template <typename U>
+  static void DeleteInternal(const U* x) {
+    delete x;
+  }
+
+  void AddRefImpl(subtle::StartRefCountFromZeroTag) const {
+    subtle::RefCountedThreadSafeBase::AddRef();
+  }
+
+  void AddRefImpl(subtle::StartRefCountFromOneTag) const {
+    subtle::RefCountedThreadSafeBase::AddRefWithCheck();
+  }
+};
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_REF_COUNTED_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/scoped_policy.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/scoped_policy.h
new file mode 100644
index 0000000..106cb59
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/scoped_policy.h
@@ -0,0 +1,23 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_SCOPED_POLICY_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_SCOPED_POLICY_H_
+
+namespace partition_alloc::internal::base::scoped_policy {
+
+// Defines the ownership policy for a scoped object.
+enum OwnershipPolicy {
+  // The scoped object takes ownership of an object by taking over an existing
+  // ownership claim.
+  ASSUME,
+
+  // The scoped object will retain the object and any initial ownership is
+  // not changed.
+  RETAIN
+};
+
+}  // namespace partition_alloc::internal::base::scoped_policy
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_SCOPED_POLICY_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/scoped_refptr.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/scoped_refptr.h
new file mode 100644
index 0000000..f438df4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/scoped_refptr.h
@@ -0,0 +1,373 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_SCOPED_REFPTR_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_SCOPED_REFPTR_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <type_traits>
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+
+namespace partition_alloc::internal {
+
+template <class T>
+class scoped_refptr;
+
+namespace base {
+
+template <class, typename>
+class RefCountedThreadSafe;
+
+template <typename T>
+scoped_refptr<T> AdoptRef(T* t);
+
+namespace subtle {
+
+enum AdoptRefTag { kAdoptRefTag };
+enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
+enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
+
+// scoped_refptr<T> is typically used with one of several RefCounted<T> base
+// classes or with custom AddRef and Release methods. These overloads dispatch
+// on which was used.
+
+template <typename T, typename U, typename V>
+constexpr bool IsRefCountPreferenceOverridden(
+    const T*,
+    const RefCountedThreadSafe<U, V>*) {
+  return !std::is_same_v<std::decay_t<decltype(T::kRefCountPreference)>,
+                         std::decay_t<decltype(U::kRefCountPreference)>>;
+}
+
+constexpr bool IsRefCountPreferenceOverridden(...) {
+  return false;
+}
+
+template <typename T, typename U, typename V>
+constexpr void AssertRefCountBaseMatches(const T*,
+                                         const RefCountedThreadSafe<U, V>*) {
+  static_assert(
+      std::is_base_of_v<U, T>,
+      "T implements RefCountedThreadSafe<U>, but U is not a base of T.");
+}
+
+constexpr void AssertRefCountBaseMatches(...) {}
+
+}  // namespace subtle
+
+// Creates a scoped_refptr from a raw pointer without incrementing the reference
+// count. Use this only for a newly created object whose reference count starts
+// from 1 instead of 0.
+template <typename T>
+scoped_refptr<T> AdoptRef(T* obj) {
+  using Tag = std::decay_t<decltype(T::kRefCountPreference)>;
+  static_assert(std::is_same_v<subtle::StartRefCountFromOneTag, Tag>,
+                "Use AdoptRef only if the reference count starts from one.");
+
+  PA_BASE_DCHECK(obj);
+  PA_BASE_DCHECK(obj->HasOneRef());
+  obj->Adopted();
+  return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
+}
+
+namespace subtle {
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
+  return scoped_refptr<T>(obj);
+}
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
+  return AdoptRef(obj);
+}
+
+}  // namespace subtle
+
+// Constructs an instance of T, which is a ref counted type, and wraps the
+// object into a scoped_refptr<T>.
+template <typename T, typename... Args>
+scoped_refptr<T> MakeRefCounted(Args&&... args) {
+  T* obj = new T(std::forward<Args>(args)...);
+  return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
+}
+
+// Takes an instance of T, which is a ref counted type, and wraps the object
+// into a scoped_refptr<T>.
+template <typename T>
+scoped_refptr<T> WrapRefCounted(T* t) {
+  return scoped_refptr<T>(t);
+}
+
+}  // namespace base
+
+//
+// A smart pointer class for reference counted objects.  Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference.  Sample usage:
+//
+//   class MyFoo : public RefCounted<MyFoo> {
+//    ...
+//    private:
+//     friend class RefCounted<MyFoo>;  // Allow destruction by RefCounted<>.
+//     ~MyFoo();                        // Destructor must be private/protected.
+//   };
+//
+//   void some_function() {
+//     scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
+//     foo->Method(param);
+//     // |foo| is released when this function returns
+//   }
+//
+//   void some_other_function() {
+//     scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
+//     ...
+//     foo.reset();  // explicitly releases |foo|
+//     ...
+//     if (foo)
+//       foo->Method(param);
+//   }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+//   {
+//     scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
+//     scoped_refptr<MyFoo> b;
+//
+//     b.swap(a);
+//     // now, |b| references the MyFoo object, and |a| references nullptr.
+//   }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+//   {
+//     scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
+//     scoped_refptr<MyFoo> b;
+//
+//     b = a;
+//     // now, |a| and |b| each own a reference to the same MyFoo object.
+//   }
+//
+// Also see Chromium's ownership and calling conventions:
+// https://chromium.googlesource.com/chromium/src/+/lkgr/styleguide/c++/c++.md#object-ownership-and-calling-conventions
+// Specifically:
+//   If the function (at least sometimes) takes a ref on a refcounted object,
+//   declare the param as scoped_refptr<T>. The caller can decide whether it
+//   wishes to transfer ownership (by calling std::move(t) when passing t) or
+//   retain its ref (by simply passing t directly).
+//   In other words, use scoped_refptr like you would a std::unique_ptr except
+//   in the odd case where it's required to hold on to a ref while handing one
+//   to another component (if a component merely needs to use t on the stack
+//   without keeping a ref: pass t as a raw T*).
+template <class T>
+class PA_TRIVIAL_ABI scoped_refptr {
+ public:
+  typedef T element_type;
+
+  constexpr scoped_refptr() = default;
+
+  // Allow implicit construction from nullptr.
+  constexpr scoped_refptr(std::nullptr_t) {}
+
+  // Constructs from a raw pointer. Note that this constructor allows implicit
+  // conversion from T* to scoped_refptr<T> which is strongly discouraged. If
+  // you are creating a new ref-counted object please use
+  // base::MakeRefCounted<T>() or base::WrapRefCounted<T>(). Otherwise you
+  // should move or copy construct from an existing scoped_refptr<T> to the
+  // ref-counted object.
+  scoped_refptr(T* p) : ptr_(p) {
+    if (ptr_) {
+      AddRef(ptr_);
+    }
+  }
+
+  // Copy constructor. This is required in addition to the copy conversion
+  // constructor below.
+  scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {}
+
+  // Copy conversion constructor.
+  template <
+      typename U,
+      typename = typename std::enable_if<std::is_convertible_v<U*, T*>>::type>
+  scoped_refptr(const scoped_refptr<U>& r) : scoped_refptr(r.ptr_) {}
+
+  // Move constructor. This is required in addition to the move conversion
+  // constructor below.
+  scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { r.ptr_ = nullptr; }
+
+  // Move conversion constructor.
+  template <
+      typename U,
+      typename = typename std::enable_if<std::is_convertible_v<U*, T*>>::type>
+  scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.ptr_) {
+    r.ptr_ = nullptr;
+  }
+
+  ~scoped_refptr() {
+    static_assert(!base::subtle::IsRefCountPreferenceOverridden(
+                      static_cast<T*>(nullptr), static_cast<T*>(nullptr)),
+                  "It's unsafe to override the ref count preference."
+                  " Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE"
+                  " from subclasses.");
+    if (ptr_) {
+      Release(ptr_);
+    }
+  }
+
+  T* get() const { return ptr_; }
+
+  T& operator*() const {
+    PA_BASE_DCHECK(ptr_);
+    return *ptr_;
+  }
+
+  T* operator->() const {
+    PA_BASE_DCHECK(ptr_);
+    return ptr_;
+  }
+
+  scoped_refptr& operator=(std::nullptr_t) {
+    reset();
+    return *this;
+  }
+
+  scoped_refptr& operator=(T* p) { return *this = scoped_refptr(p); }
+
+  // Unified assignment operator.
+  scoped_refptr& operator=(scoped_refptr r) noexcept {
+    swap(r);
+    return *this;
+  }
+
+  // Sets managed object to null and releases reference to the previous managed
+  // object, if it existed.
+  void reset() { scoped_refptr().swap(*this); }
+
+  // Returns the owned pointer (if any), releasing ownership to the caller. The
+  // caller is responsible for managing the lifetime of the reference.
+  [[nodiscard]] T* release();
+
+  void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); }
+
+  explicit operator bool() const { return ptr_ != nullptr; }
+
+  template <typename U>
+  bool operator==(const scoped_refptr<U>& rhs) const {
+    return ptr_ == rhs.get();
+  }
+
+  template <typename U>
+  bool operator!=(const scoped_refptr<U>& rhs) const {
+    return !operator==(rhs);
+  }
+
+  template <typename U>
+  bool operator<(const scoped_refptr<U>& rhs) const {
+    return ptr_ < rhs.get();
+  }
+
+ protected:
+  T* ptr_ = nullptr;
+
+ private:
+  template <typename U>
+  friend scoped_refptr<U> base::AdoptRef(U*);
+
+  scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
+
+  // Friend required for move constructors that set r.ptr_ to null.
+  template <typename U>
+  friend class scoped_refptr;
+
+  // Non-inline helpers to allow:
+  //     class Opaque;
+  //     extern template class scoped_refptr<Opaque>;
+  // Otherwise the compiler will complain that Opaque is an incomplete type.
+  static void AddRef(T* ptr);
+  static void Release(T* ptr);
+};
+
+template <typename T>
+T* scoped_refptr<T>::release() {
+  T* ptr = ptr_;
+  ptr_ = nullptr;
+  return ptr;
+}
+
+// static
+template <typename T>
+void scoped_refptr<T>::AddRef(T* ptr) {
+  base::subtle::AssertRefCountBaseMatches(ptr, ptr);
+  ptr->AddRef();
+}
+
+// static
+template <typename T>
+void scoped_refptr<T>::Release(T* ptr) {
+  base::subtle::AssertRefCountBaseMatches(ptr, ptr);
+  ptr->Release();
+}
+
+template <typename T, typename U>
+bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
+  return lhs.get() == rhs;
+}
+
+template <typename T, typename U>
+bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
+  return lhs == rhs.get();
+}
+
+template <typename T>
+bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+  return !static_cast<bool>(lhs);
+}
+
+template <typename T>
+bool operator==(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+  return !static_cast<bool>(rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
+  return !operator==(lhs, rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
+  return !operator==(lhs, rhs);
+}
+
+template <typename T>
+bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+  return !operator==(lhs, null);
+}
+
+template <typename T>
+bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+  return !operator==(null, rhs);
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
+  return out << p.get();
+}
+
+template <typename T>
+void swap(scoped_refptr<T>& lhs, scoped_refptr<T>& rhs) noexcept {
+  lhs.swap(rhs);
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_MEMORY_SCOPED_REFPTR_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library.cc
new file mode 100644
index 0000000..461e8cf
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library.cc
@@ -0,0 +1,15 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library.h"
+
+namespace partition_alloc::internal::base {
+
+NativeLibrary LoadNativeLibrary(const FilePath& library_path,
+                                NativeLibraryLoadError* error) {
+  return LoadNativeLibraryWithOptions(library_path, NativeLibraryOptions(),
+                                      error);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library.h
new file mode 100644
index 0000000..aa95ecb
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library.h
@@ -0,0 +1,97 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NATIVE_LIBRARY_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NATIVE_LIBRARY_H_
+
+// This file defines a cross-platform "NativeLibrary" type which represents
+// a loadable module.
+
+#include <string>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#elif BUILDFLAG(IS_APPLE)
+#import <CoreFoundation/CoreFoundation.h>
+#endif  // OS_*
+
+namespace partition_alloc::internal::base {
+
+#if BUILDFLAG(IS_WIN)
+using NativeLibrary = HMODULE;
+#elif BUILDFLAG(IS_APPLE)
+enum NativeLibraryType { BUNDLE, DYNAMIC_LIB };
+enum NativeLibraryObjCStatus {
+  OBJC_UNKNOWN,
+  OBJC_PRESENT,
+  OBJC_NOT_PRESENT,
+};
+struct NativeLibraryStruct {
+  NativeLibraryType type;
+  CFBundleRefNum bundle_resource_ref;
+  NativeLibraryObjCStatus objc_status;
+  union {
+    CFBundleRef bundle;
+    void* dylib;
+  };
+};
+using NativeLibrary = NativeLibraryStruct*;
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+using NativeLibrary = void*;
+#endif  // OS_*
+
+struct PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) NativeLibraryLoadError {
+#if BUILDFLAG(IS_WIN)
+  NativeLibraryLoadError() : code(0) {}
+#endif  // BUILDFLAG(IS_WIN)
+
+  // Returns a string representation of the load error.
+  std::string ToString() const;
+
+#if BUILDFLAG(IS_WIN)
+  DWORD code;
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  std::string message;
+#endif  // BUILDFLAG(IS_WIN)
+};
+
+struct PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) NativeLibraryOptions {
+  NativeLibraryOptions() = default;
+  NativeLibraryOptions(const NativeLibraryOptions& options) = default;
+
+  // If |true|, a loaded library is required to prefer local symbol resolution
+  // before considering global symbols. Note that this is already the default
+  // behavior on most systems. Setting this to |false| does not guarantee the
+  // inverse, i.e., it does not force a preference for global symbols over local
+  // ones.
+  bool prefer_own_symbols = false;
+};
+
+// Loads a native library from disk.  Release it with UnloadNativeLibrary when
+// you're done.  Returns NULL on failure.
+// If |error| is not NULL, it may be filled in on load error.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+NativeLibrary LoadNativeLibrary(const FilePath& library_path,
+                                NativeLibraryLoadError* error);
+
+// Loads a native library from disk.  Release it with UnloadNativeLibrary when
+// you're done.  Returns NULL on failure.
+// If |error| is not NULL, it may be filled in on load error.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
+                                           const NativeLibraryOptions& options,
+                                           NativeLibraryLoadError* error);
+
+// Gets a function pointer from a native library.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
+                                          const std::string& name);
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NATIVE_LIBRARY_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library_pa_unittest.cc
new file mode 100644
index 0000000..fd15483
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library_pa_unittest.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal::base {
+
+const FilePath::CharType kDummyLibraryPath[] =
+    PA_FILE_PATH_LITERAL("dummy_library");
+
+TEST(PartitionAllocBaseNativeLibraryTest, LoadFailure) {
+  NativeLibraryLoadError error;
+  EXPECT_FALSE(LoadNativeLibrary(FilePath(kDummyLibraryPath), &error));
+  EXPECT_FALSE(error.ToString().empty());
+}
+
+// |error| is optional and can be null.
+TEST(PartitionAllocBaseNativeLibraryTest, LoadFailureWithNullError) {
+  EXPECT_FALSE(LoadNativeLibrary(FilePath(kDummyLibraryPath), nullptr));
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library_posix.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library_posix.cc
new file mode 100644
index 0000000..f410115
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library_posix.cc
@@ -0,0 +1,58 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library.h"
+
+#include <dlfcn.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base {
+
+std::string NativeLibraryLoadError::ToString() const {
+  return message;
+}
+
+NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
+                                           const NativeLibraryOptions& options,
+                                           NativeLibraryLoadError* error) {
+  // TODO(1151236): Temporarily disable this ScopedBlockingCall. After making
+  // partition_alloc ScopedBlockingCall() to see the same blocking_observer_
+  // in base's ScopedBlockingCall(), we will copy ScopedBlockingCall code and
+  // will enable this.
+
+  // dlopen() opens the file off disk.
+  // ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+
+  // We deliberately do not use RTLD_DEEPBIND by default.  For the history why,
+  // please refer to the bug tracker.  Some useful bug reports to read include:
+  // http://crbug.com/17943, http://crbug.com/17557, http://crbug.com/36892,
+  // and http://crbug.com/40794.
+  int flags = RTLD_LAZY;
+#if BUILDFLAG(IS_ANDROID) || !defined(RTLD_DEEPBIND)
+  // Certain platforms don't define RTLD_DEEPBIND. Android dlopen() requires
+  // further investigation, as it might vary across versions. Crash here to
+  // warn developers that they're trying to rely on uncertain behavior.
+  PA_BASE_CHECK(!options.prefer_own_symbols);
+#else
+  if (options.prefer_own_symbols) {
+    flags |= RTLD_DEEPBIND;
+  }
+#endif
+  void* dl = dlopen(library_path.value().c_str(), flags);
+  if (!dl && error) {
+    error->message = dlerror();
+  }
+
+  return dl;
+}
+
+void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
+                                          const std::string& name) {
+  return dlsym(library, name.c_str());
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h
new file mode 100644
index 0000000..e6811c4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h
@@ -0,0 +1,132 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NO_DESTRUCTOR_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NO_DESTRUCTOR_H_
+
+#include <new>
+#include <type_traits>
+#include <utility>
+
+namespace partition_alloc::internal::base {
+
+// Helper type to create a function-local static variable of type `T` when `T`
+// has a non-trivial destructor. Storing a `T` in a `base::NoDestructor<T>` will
+// prevent `~T()` from running, even when the variable goes out of scope.
+//
+// Useful when a variable has static storage duration but its type has a
+// non-trivial destructor. Chromium bans global constructors and destructors:
+// using a function-local static variable prevents the former, while using
+// `base::NoDestructor<T>` prevents the latter.
+//
+// ## Caveats
+//
+// - Must only be used as a function-local static variable. Declaring a global
+//   variable of type `base::NoDestructor<T>` will still generate a global
+//   constructor; declaring a local or member variable will lead to memory leaks
+//   or other surprising and undesirable behaviour.
+//
+// - If the data is rarely used, consider creating it on demand rather than
+//   caching it for the lifetime of the program. Though `base::NoDestructor<T>`
+//   does not heap allocate, the compiler still reserves space in bss for
+//   storing `T`, which costs memory at runtime.
+//
+// - If `T` is trivially destructible, do not use `base::NoDestructor<T>`:
+//
+//     const uint64_t GetUnstableSessionSeed() {
+//       // No need to use `base::NoDestructor<T>` as `uint64_t` is trivially
+//       // destructible and does not require a global destructor.
+//       static const uint64_t kSessionSeed = base::RandUint64();
+//       return kSessionSeed;
+//     }
+//
+// ## Example Usage
+//
+// const std::string& GetDefaultText() {
+//   // Required since `static const std::string` requires a global destructor.
+//   static const base::NoDestructor<std::string> s("Hello world!");
+//   return *s;
+// }
+//
+// More complex initialization using a lambda:
+//
+// const std::string& GetRandomNonce() {
+//   // `nonce` is initialized with random data the first time this function is
+//   // called, but its value is fixed thereafter.
+//   static const base::NoDestructor<std::string> nonce([] {
+//     std::string s(16);
+//     crypto::RandString(s.data(), s.size());
+//     return s;
+//   }());
+//   return *nonce;
+// }
+//
+// ## Thread safety
+//
+// Initialisation of function-local static variables is thread-safe since C++11.
+// The standard guarantees that:
+//
+// - function-local static variables will be initialised the first time
+//   execution passes through the declaration.
+//
+// - if another thread's execution concurrently passes through the declaration
+//   in the middle of initialisation, that thread will wait for the in-progress
+//   initialisation to complete.
+template <typename T>
+class NoDestructor {
+ public:
+  static_assert(
+      !std::is_trivially_destructible_v<T>,
+      "T is trivially destructible; please use a function-local static "
+      "of type T directly instead");
+
+  // Not constexpr; just write static constexpr T x = ...; if the value should
+  // be a constexpr.
+  template <typename... Args>
+  explicit NoDestructor(Args&&... args) {
+    new (storage_) T(std::forward<Args>(args)...);
+  }
+
+  // Allows copy and move construction of the contained type, to allow
+  // construction from an initializer list, e.g. for std::vector.
+  explicit NoDestructor(const T& x) { new (storage_) T(x); }
+  explicit NoDestructor(T&& x) { new (storage_) T(std::move(x)); }
+
+  NoDestructor(const NoDestructor&) = delete;
+  NoDestructor& operator=(const NoDestructor&) = delete;
+
+  ~NoDestructor() = default;
+
+  const T& operator*() const { return *get(); }
+  T& operator*() { return *get(); }
+
+  const T* operator->() const { return get(); }
+  T* operator->() { return get(); }
+
+  const T* get() const { return reinterpret_cast<const T*>(storage_); }
+  T* get() { return reinterpret_cast<T*>(storage_); }
+
+ private:
+  alignas(T) char storage_[sizeof(T)];
+
+#if defined(LEAK_SANITIZER)
+  // TODO(https://crbug.com/812277): This is a hack to work around the fact
+  // that LSan doesn't seem to treat NoDestructor as a root for reachability
+  // analysis. This means that code like this:
+  //   static base::NoDestructor<std::vector<int>> v({1, 2, 3});
+  // is considered a leak. Using the standard leak sanitizer annotations to
+  // suppress leaks doesn't work: std::vector is implicitly constructed before
+  // calling the base::NoDestructor constructor.
+  //
+  // Unfortunately, I haven't been able to demonstrate this issue in simpler
+  // reproductions: until that's resolved, hold an explicit pointer to the
+  // placement-new'd object in leak sanitizer mode to help LSan realize that
+  // objects allocated by the contained type are still reachable.
+  T* storage_ptr_ = reinterpret_cast<T*>(storage_);
+#endif  // defined(LEAK_SANITIZER)
+};
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NO_DESTRUCTOR_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h
new file mode 100644
index 0000000..7bb1cdd
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h
@@ -0,0 +1,23 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NOTREACHED_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NOTREACHED_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+
+// PA_NOTREACHED() annotates paths that are supposed to be unreachable. They
+// crash if they are ever hit.
+#if PA_BASE_CHECK_WILL_STREAM()
+// PartitionAlloc uses async-signal-safe RawCheckFailure() for error reporting.
+// Async-signal-safe functions are guaranteed to not allocate as otherwise they
+// could operate with inconsistent allocator state.
+#define PA_NOTREACHED()                                  \
+  ::partition_alloc::internal::logging::RawCheckFailure( \
+      __FILE__ "(" PA_STRINGIFY(__LINE__) ") PA_NOTREACHED() hit.")
+#else
+#define PA_NOTREACHED() PA_IMMEDIATE_CRASH()
+#endif  // CHECK_WILL_STREAM()
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NOTREACHED_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math.h
new file mode 100644
index 0000000..3d1c765
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math.h
@@ -0,0 +1,375 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math_impl.h"
+
+namespace partition_alloc::internal::base {
+namespace internal {
+
+template <typename T>
+class CheckedNumeric {
+  static_assert(std::is_arithmetic_v<T>,
+                "CheckedNumeric<T>: T must be a numeric type.");
+
+ public:
+  template <typename Src>
+  friend class CheckedNumeric;
+
+  using type = T;
+
+  constexpr CheckedNumeric() = default;
+
+  // Copy constructor.
+  template <typename Src>
+  constexpr CheckedNumeric(const CheckedNumeric<Src>& rhs)
+      : state_(rhs.state_.value(), rhs.IsValid()) {}
+
+  // This is not an explicit constructor because we implicitly upgrade regular
+  // numerics to CheckedNumerics to make them easier to use.
+  template <typename Src>
+  constexpr CheckedNumeric(Src value)  // NOLINT(runtime/explicit)
+      : state_(value) {
+    static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
+  }
+
+  // This is not an explicit constructor because we want a seamless conversion
+  // from StrictNumeric types.
+  template <typename Src>
+  constexpr CheckedNumeric(
+      StrictNumeric<Src> value)  // NOLINT(runtime/explicit)
+      : state_(static_cast<Src>(value)) {}
+
+  // IsValid() - The public API to test if a CheckedNumeric is currently valid.
+  // A range checked destination type can be supplied using the Dst template
+  // parameter.
+  template <typename Dst = T>
+  constexpr bool IsValid() const {
+    return state_.is_valid() &&
+           IsValueInRangeForNumericType<Dst>(state_.value());
+  }
+
+  // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid
+  // and is within the range supported by the destination type. Returns true if
+  // successful and false otherwise.
+  template <typename Dst>
+#if defined(__clang__) || defined(__GNUC__)
+  __attribute__((warn_unused_result))
+#elif defined(_MSC_VER)
+  _Check_return_
+#endif
+  constexpr bool
+  AssignIfValid(Dst* result) const {
+    return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
+               ? ((*result = static_cast<Dst>(state_.value())), true)
+               : false;
+  }
+
+  // ValueOrDie() - The primary accessor for the underlying value. If the
+  // current state is not valid it will CHECK and crash.
+  // A range checked destination type can be supplied using the Dst template
+  // parameter, which will trigger a CHECK if the value is not in bounds for
+  // the destination.
+  // The CHECK behavior can be overridden by supplying a handler as a
+  // template parameter, for test code, etc. However, the handler cannot access
+  // the underlying value, and it is not available through other means.
+  template <typename Dst = T, class CheckHandler = CheckOnFailure>
+  constexpr StrictNumeric<Dst> ValueOrDie() const {
+    return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
+               ? static_cast<Dst>(state_.value())
+               : CheckHandler::template HandleFailure<Dst>();
+  }
+
+  // ValueOrDefault(T default_value) - A convenience method that returns the
+  // current value if the state is valid, and the supplied default_value for
+  // any other state.
+  // A range checked destination type can be supplied using the Dst template
+  // parameter. WARNING: This function may fail to compile or CHECK at runtime
+  // if the supplied default_value is not within range of the destination type.
+  template <typename Dst = T, typename Src>
+  constexpr StrictNumeric<Dst> ValueOrDefault(const Src default_value) const {
+    return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
+               ? static_cast<Dst>(state_.value())
+               : checked_cast<Dst>(default_value);
+  }
+
+  // Returns a checked numeric of the specified type, cast from the current
+  // CheckedNumeric. If the current state is invalid or the destination cannot
+  // represent the result then the returned CheckedNumeric will be invalid.
+  template <typename Dst>
+  constexpr CheckedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
+    return *this;
+  }
+
+  // This friend method is available solely for providing more detailed logging
+  // in the tests. Do not implement it in production code, because the
+  // underlying values may change at any time.
+  template <typename U>
+  friend U GetNumericValueForTest(const CheckedNumeric<U>& src);
+
+  // Prototypes for the supported arithmetic operator overloads.
+  template <typename Src>
+  constexpr CheckedNumeric& operator+=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator-=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator*=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator/=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator%=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator<<=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator>>=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator&=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator|=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator^=(const Src rhs);
+
+  constexpr CheckedNumeric operator-() const {
+    // Use an optimized code path for a known run-time variable.
+    if (!PA_IsConstantEvaluated() && std::is_signed_v<T> &&
+        std::is_floating_point_v<T>) {
+      return FastRuntimeNegate();
+    }
+    // The negation of two's complement int min is int min.
+    const bool is_valid =
+        IsValid() &&
+        (!std::is_signed_v<T> || std::is_floating_point_v<T> ||
+         NegateWrapper(state_.value()) != std::numeric_limits<T>::lowest());
+    return CheckedNumeric<T>(NegateWrapper(state_.value()), is_valid);
+  }
+
+  constexpr CheckedNumeric operator~() const {
+    return CheckedNumeric<decltype(InvertWrapper(T()))>(
+        InvertWrapper(state_.value()), IsValid());
+  }
+
+  constexpr CheckedNumeric Abs() const {
+    return !IsValueNegative(state_.value()) ? *this : -*this;
+  }
+
+  template <typename U>
+  constexpr CheckedNumeric<typename MathWrapper<CheckedMaxOp, T, U>::type> Max(
+      const U rhs) const {
+    return CheckMax(*this, rhs);
+  }
+
+  template <typename U>
+  constexpr CheckedNumeric<typename MathWrapper<CheckedMinOp, T, U>::type> Min(
+      const U rhs) const {
+    return CheckMin(*this, rhs);
+  }
+
+  // This function is available only for integral types. It returns an unsigned
+  // integer of the same width as the source type, containing the absolute value
+  // of the source, and properly handling signed min.
+  constexpr CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>
+  UnsignedAbs() const {
+    return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
+        SafeUnsignedAbs(state_.value()), state_.is_valid());
+  }
+
+  constexpr CheckedNumeric& operator++() {
+    *this += 1;
+    return *this;
+  }
+
+  constexpr CheckedNumeric operator++(int) {
+    CheckedNumeric value = *this;
+    *this += 1;
+    return value;
+  }
+
+  constexpr CheckedNumeric& operator--() {
+    *this -= 1;
+    return *this;
+  }
+
+  constexpr CheckedNumeric operator--(int) {
+    // TODO(pkasting): Consider std::exchange() once it's constexpr in C++20.
+    const CheckedNumeric value = *this;
+    *this -= 1;
+    return value;
+  }
+
+  // These perform the actual math operations on the CheckedNumerics.
+  // Binary arithmetic operations.
+  template <template <typename, typename, typename> class M,
+            typename L,
+            typename R>
+  static constexpr CheckedNumeric MathOp(const L lhs, const R rhs) {
+    using Math = typename MathWrapper<M, L, R>::math;
+    T result = 0;
+    const bool is_valid =
+        Wrapper<L>::is_valid(lhs) && Wrapper<R>::is_valid(rhs) &&
+        Math::Do(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs), &result);
+    return CheckedNumeric<T>(result, is_valid);
+  }
+
+  // Assignment arithmetic operations.
+  template <template <typename, typename, typename> class M, typename R>
+  constexpr CheckedNumeric& MathOp(const R rhs) {
+    using Math = typename MathWrapper<M, T, R>::math;
+    T result = 0;  // Using T as the destination saves a range check.
+    const bool is_valid =
+        state_.is_valid() && Wrapper<R>::is_valid(rhs) &&
+        Math::Do(state_.value(), Wrapper<R>::value(rhs), &result);
+    *this = CheckedNumeric<T>(result, is_valid);
+    return *this;
+  }
+
+ private:
+  CheckedNumericState<T> state_;
+
+  CheckedNumeric FastRuntimeNegate() const {
+    T result;
+    const bool success = CheckedSubOp<T, T>::Do(T(0), state_.value(), &result);
+    return CheckedNumeric<T>(result, IsValid() && success);
+  }
+
+  template <typename Src>
+  constexpr CheckedNumeric(Src value, bool is_valid)
+      : state_(value, is_valid) {}
+
+  // These wrappers allow us to handle state the same way for both
+  // CheckedNumeric and POD arithmetic types.
+  template <typename Src>
+  struct Wrapper {
+    static constexpr bool is_valid(Src) { return true; }
+    static constexpr Src value(Src value) { return value; }
+  };
+
+  template <typename Src>
+  struct Wrapper<CheckedNumeric<Src>> {
+    static constexpr bool is_valid(const CheckedNumeric<Src> v) {
+      return v.IsValid();
+    }
+    static constexpr Src value(const CheckedNumeric<Src> v) {
+      return v.state_.value();
+    }
+  };
+
+  template <typename Src>
+  struct Wrapper<StrictNumeric<Src>> {
+    static constexpr bool is_valid(const StrictNumeric<Src>) { return true; }
+    static constexpr Src value(const StrictNumeric<Src> v) {
+      return static_cast<Src>(v);
+    }
+  };
+};
+
+// Convenience functions to avoid the ugly template disambiguator syntax.
+template <typename Dst, typename Src>
+constexpr bool IsValidForType(const CheckedNumeric<Src> value) {
+  return value.template IsValid<Dst>();
+}
+
+template <typename Dst, typename Src>
+constexpr StrictNumeric<Dst> ValueOrDieForType(
+    const CheckedNumeric<Src> value) {
+  return value.template ValueOrDie<Dst>();
+}
+
+template <typename Dst, typename Src, typename Default>
+constexpr StrictNumeric<Dst> ValueOrDefaultForType(
+    const CheckedNumeric<Src> value,
+    const Default default_value) {
+  return value.template ValueOrDefault<Dst>(default_value);
+}
+
+// Convenience wrapper to return a new CheckedNumeric from the provided
+// arithmetic or CheckedNumericType.
+template <typename T>
+constexpr CheckedNumeric<typename UnderlyingType<T>::type> MakeCheckedNum(
+    const T value) {
+  return value;
+}
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R>
+constexpr CheckedNumeric<typename MathWrapper<M, L, R>::type> CheckMathOp(
+    const L lhs,
+    const R rhs) {
+  using Math = typename MathWrapper<M, L, R>::math;
+  return CheckedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
+                                                                        rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R,
+          typename... Args>
+constexpr auto CheckMathOp(const L lhs, const R rhs, const Args... args) {
+  return CheckMathOp<M>(CheckMathOp<M>(lhs, rhs), args...);
+}
+
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Add, +, +=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Sub, -, -=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mul, *, *=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Div, /, /=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mod, %, %=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Lsh, <<, <<=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Rsh, >>, >>=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, And, &, &=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Or, |, |=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Xor, ^, ^=)
+PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Max)
+PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Min)
+
+// These are some extra StrictNumeric operators to support simple pointer
+// arithmetic with our result types. Since wrapping on a pointer is always
+// bad, we trigger the CHECK condition here.
+template <typename L, typename R>
+L* operator+(L* lhs, const StrictNumeric<R> rhs) {
+  const uintptr_t result = CheckAdd(reinterpret_cast<uintptr_t>(lhs),
+                                    CheckMul(sizeof(L), static_cast<R>(rhs)))
+                               .template ValueOrDie<uintptr_t>();
+  return reinterpret_cast<L*>(result);
+}
+
+template <typename L, typename R>
+L* operator-(L* lhs, const StrictNumeric<R> rhs) {
+  const uintptr_t result = CheckSub(reinterpret_cast<uintptr_t>(lhs),
+                                    CheckMul(sizeof(L), static_cast<R>(rhs)))
+                               .template ValueOrDie<uintptr_t>();
+  return reinterpret_cast<L*>(result);
+}
+
+}  // namespace internal
+
+using internal::CheckAdd;
+using internal::CheckAnd;
+using internal::CheckDiv;
+using internal::CheckedNumeric;
+using internal::CheckLsh;
+using internal::CheckMax;
+using internal::CheckMin;
+using internal::CheckMod;
+using internal::CheckMul;
+using internal::CheckOr;
+using internal::CheckRsh;
+using internal::CheckSub;
+using internal::CheckXor;
+using internal::IsValidForType;
+using internal::MakeCheckedNum;
+using internal::ValueOrDefaultForType;
+using internal::ValueOrDieForType;
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math_impl.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math_impl.h
new file mode 100644
index 0000000..ea32015
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math_impl.h
@@ -0,0 +1,610 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_IMPL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_shared_impl.h"
+
+namespace partition_alloc::internal::base::internal {
+
+template <typename T>
+constexpr bool CheckedAddImpl(T x, T y, T* result) {
+  static_assert(std::is_integral_v<T>, "Type must be integral");
+  // Since the value of x+y is undefined if we have a signed type, we compute
+  // it using the unsigned type of the same size.
+  using UnsignedDst = typename std::make_unsigned<T>::type;
+  using SignedDst = typename std::make_signed<T>::type;
+  const UnsignedDst ux = static_cast<UnsignedDst>(x);
+  const UnsignedDst uy = static_cast<UnsignedDst>(y);
+  const UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
+  // Addition is valid if the sign of (x + y) is equal to either that of x or
+  // that of y.
+  if (std::is_signed_v<T>
+          ? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) < 0
+          : uresult < uy) {  // Unsigned is either valid or underflow.
+    return false;
+  }
+  *result = static_cast<T>(uresult);
+  return true;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAddOp {};
+
+template <typename T, typename U>
+struct CheckedAddOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    if constexpr (CheckedAddFastOp<T, U>::is_supported) {
+      return CheckedAddFastOp<T, U>::Do(x, y, result);
+    }
+
+    // Double the underlying type up to a full machine word.
+    using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+    using Promotion =
+        typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
+                                   IntegerBitsPlusSign<intptr_t>::value),
+                                  typename BigEnoughPromotion<T, U>::type,
+                                  FastPromotion>::type;
+    // Fail if either operand is out of range for the promoted type.
+    // TODO(jschuh): This could be made to work for a broader range of values.
+    if (PA_BASE_NUMERICS_UNLIKELY(
+            !IsValueInRangeForNumericType<Promotion>(x) ||
+            !IsValueInRangeForNumericType<Promotion>(y))) {
+      return false;
+    }
+
+    Promotion presult = {};
+    bool is_valid = true;
+    if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+      presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
+    } else {
+      is_valid = CheckedAddImpl(static_cast<Promotion>(x),
+                                static_cast<Promotion>(y), &presult);
+    }
+    if (!is_valid || !IsValueInRangeForNumericType<V>(presult)) {
+      return false;
+    }
+    *result = static_cast<V>(presult);
+    return true;
+  }
+};
+
+template <typename T>
+constexpr bool CheckedSubImpl(T x, T y, T* result) {
+  static_assert(std::is_integral_v<T>, "Type must be integral");
+  // Since the value of x+y is undefined if we have a signed type, we compute
+  // it using the unsigned type of the same size.
+  using UnsignedDst = typename std::make_unsigned<T>::type;
+  using SignedDst = typename std::make_signed<T>::type;
+  const UnsignedDst ux = static_cast<UnsignedDst>(x);
+  const UnsignedDst uy = static_cast<UnsignedDst>(y);
+  const UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
+  // Subtraction is valid if either x and y have same sign, or (x-y) and x have
+  // the same sign.
+  if (std::is_signed_v<T>
+          ? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) < 0
+          : x < y) {
+    return false;
+  }
+  *result = static_cast<T>(uresult);
+  return true;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedSubOp {};
+
+template <typename T, typename U>
+struct CheckedSubOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    if constexpr (CheckedSubFastOp<T, U>::is_supported) {
+      return CheckedSubFastOp<T, U>::Do(x, y, result);
+    }
+
+    // Double the underlying type up to a full machine word.
+    using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+    using Promotion =
+        typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
+                                   IntegerBitsPlusSign<intptr_t>::value),
+                                  typename BigEnoughPromotion<T, U>::type,
+                                  FastPromotion>::type;
+    // Fail if either operand is out of range for the promoted type.
+    // TODO(jschuh): This could be made to work for a broader range of values.
+    if (PA_BASE_NUMERICS_UNLIKELY(
+            !IsValueInRangeForNumericType<Promotion>(x) ||
+            !IsValueInRangeForNumericType<Promotion>(y))) {
+      return false;
+    }
+
+    Promotion presult = {};
+    bool is_valid = true;
+    if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+      presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
+    } else {
+      is_valid = CheckedSubImpl(static_cast<Promotion>(x),
+                                static_cast<Promotion>(y), &presult);
+    }
+    if (!is_valid || !IsValueInRangeForNumericType<V>(presult)) {
+      return false;
+    }
+    *result = static_cast<V>(presult);
+    return true;
+  }
+};
+
+template <typename T>
+constexpr bool CheckedMulImpl(T x, T y, T* result) {
+  static_assert(std::is_integral_v<T>, "Type must be integral");
+  // Since the value of x*y is potentially undefined if we have a signed type,
+  // we compute it using the unsigned type of the same size.
+  using UnsignedDst = typename std::make_unsigned<T>::type;
+  using SignedDst = typename std::make_signed<T>::type;
+  const UnsignedDst ux = SafeUnsignedAbs(x);
+  const UnsignedDst uy = SafeUnsignedAbs(y);
+  const UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
+  const bool is_negative =
+      std::is_signed_v<T> && static_cast<SignedDst>(x ^ y) < 0;
+  // We have a fast out for unsigned identity or zero on the second operand.
+  // After that it's an unsigned overflow check on the absolute value, with
+  // a +1 bound for a negative result.
+  if (uy > UnsignedDst(!std::is_signed_v<T> || is_negative) &&
+      ux > (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy) {
+    return false;
+  }
+  *result = static_cast<T>(is_negative ? 0 - uresult : uresult);
+  return true;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedMulOp {};
+
+template <typename T, typename U>
+struct CheckedMulOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    if constexpr (CheckedMulFastOp<T, U>::is_supported) {
+      return CheckedMulFastOp<T, U>::Do(x, y, result);
+    }
+
+    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+    // Verify the destination type can hold the result (always true for 0).
+    if (PA_BASE_NUMERICS_UNLIKELY(
+            (!IsValueInRangeForNumericType<Promotion>(x) ||
+             !IsValueInRangeForNumericType<Promotion>(y)) &&
+            x && y)) {
+      return false;
+    }
+
+    Promotion presult = {};
+    bool is_valid = true;
+    if (CheckedMulFastOp<Promotion, Promotion>::is_supported) {
+      // The fast op may be available with the promoted type.
+      is_valid = CheckedMulFastOp<Promotion, Promotion>::Do(
+          static_cast<Promotion>(x), static_cast<Promotion>(y), &presult);
+    } else if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+      presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+    } else {
+      is_valid = CheckedMulImpl(static_cast<Promotion>(x),
+                                static_cast<Promotion>(y), &presult);
+    }
+    if (!is_valid || !IsValueInRangeForNumericType<V>(presult)) {
+      return false;
+    }
+    *result = static_cast<V>(presult);
+    return true;
+  }
+};
+
+// Division just requires a check for a zero denominator or an invalid negation
+// on signed min/-1.
+template <typename T, typename U, class Enable = void>
+struct CheckedDivOp {};
+
+template <typename T, typename U>
+struct CheckedDivOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    if (PA_BASE_NUMERICS_UNLIKELY(!y)) {
+      return false;
+    }
+
+    // The overflow check can be compiled away if we don't have the exact
+    // combination of types needed to trigger this case.
+    using Promotion = typename BigEnoughPromotion<T, U>::type;
+    if (PA_BASE_NUMERICS_UNLIKELY(
+            (std::is_signed_v<T> && std::is_signed_v<U> &&
+             IsTypeInRangeForNumericType<T, Promotion>::value &&
+             static_cast<Promotion>(x) ==
+                 std::numeric_limits<Promotion>::lowest() &&
+             y == static_cast<U>(-1)))) {
+      return false;
+    }
+
+    // This branch always compiles away if the above branch wasn't removed.
+    if (PA_BASE_NUMERICS_UNLIKELY(
+            (!IsValueInRangeForNumericType<Promotion>(x) ||
+             !IsValueInRangeForNumericType<Promotion>(y)) &&
+            x)) {
+      return false;
+    }
+
+    const Promotion presult = Promotion(x) / Promotion(y);
+    if (!IsValueInRangeForNumericType<V>(presult)) {
+      return false;
+    }
+    *result = static_cast<V>(presult);
+    return true;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedModOp {};
+
+template <typename T, typename U>
+struct CheckedModOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    if (PA_BASE_NUMERICS_UNLIKELY(!y)) {
+      return false;
+    }
+
+    using Promotion = typename BigEnoughPromotion<T, U>::type;
+    if (PA_BASE_NUMERICS_UNLIKELY(
+            (std::is_signed_v<T> && std::is_signed_v<U> &&
+             IsTypeInRangeForNumericType<T, Promotion>::value &&
+             static_cast<Promotion>(x) ==
+                 std::numeric_limits<Promotion>::lowest() &&
+             y == static_cast<U>(-1)))) {
+      *result = 0;
+      return true;
+    }
+
+    const Promotion presult =
+        static_cast<Promotion>(x) % static_cast<Promotion>(y);
+    if (!IsValueInRangeForNumericType<V>(presult)) {
+      return false;
+    }
+    *result = static_cast<Promotion>(presult);
+    return true;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedLshOp {};
+
+// Left shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Shifts of negative values
+// are undefined. Otherwise it is defined when the result fits.
+template <typename T, typename U>
+struct CheckedLshOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = T;
+  template <typename V>
+  static constexpr bool Do(T x, U shift, V* result) {
+    // Disallow negative numbers and verify the shift is in bounds.
+    if (PA_BASE_NUMERICS_LIKELY(
+            !IsValueNegative(x) &&
+            as_unsigned(shift) < as_unsigned(std::numeric_limits<T>::digits))) {
+      // Shift as unsigned to avoid undefined behavior.
+      *result = static_cast<V>(as_unsigned(x) << shift);
+      // If the shift can be reversed, we know it was valid.
+      return *result >> shift == x;
+    }
+
+    // Handle the legal corner-case of a full-width signed shift of zero.
+    if (!std::is_signed_v<T> || x ||
+        as_unsigned(shift) != as_unsigned(std::numeric_limits<T>::digits)) {
+      return false;
+    }
+    *result = 0;
+    return true;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedRshOp {};
+
+// Right shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Otherwise, it is always defined,
+// but a right shift of a negative value is implementation-dependent.
+template <typename T, typename U>
+struct CheckedRshOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = T;
+  template <typename V>
+  static constexpr bool Do(T x, U shift, V* result) {
+    // Use sign conversion to push negative values out of range.
+    if (PA_BASE_NUMERICS_UNLIKELY(as_unsigned(shift) >=
+                                  IntegerBitsPlusSign<T>::value)) {
+      return false;
+    }
+
+    const T tmp = x >> shift;
+    if (!IsValueInRangeForNumericType<V>(tmp)) {
+      return false;
+    }
+    *result = static_cast<V>(tmp);
+    return true;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAndOp {};
+
+// For simplicity we support only unsigned integer results.
+template <typename T, typename U>
+struct CheckedAndOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    const result_type tmp =
+        static_cast<result_type>(x) & static_cast<result_type>(y);
+    if (!IsValueInRangeForNumericType<V>(tmp)) {
+      return false;
+    }
+    *result = static_cast<V>(tmp);
+    return true;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedOrOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedOrOp<T,
+                   U,
+                   typename std::enable_if<std::is_integral_v<T> &&
+                                           std::is_integral_v<U>>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    const result_type tmp =
+        static_cast<result_type>(x) | static_cast<result_type>(y);
+    if (!IsValueInRangeForNumericType<V>(tmp)) {
+      return false;
+    }
+    *result = static_cast<V>(tmp);
+    return true;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedXorOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedXorOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    const result_type tmp =
+        static_cast<result_type>(x) ^ static_cast<result_type>(y);
+    if (!IsValueInRangeForNumericType<V>(tmp)) {
+      return false;
+    }
+    *result = static_cast<V>(tmp);
+    return true;
+  }
+};
+
+// Max doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMaxOp {};
+
+template <typename T, typename U>
+struct CheckedMaxOp<T,
+                    U,
+                    typename std::enable_if<std::is_arithmetic_v<T> &&
+                                            std::is_arithmetic_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    const result_type tmp = IsGreater<T, U>::Test(x, y)
+                                ? static_cast<result_type>(x)
+                                : static_cast<result_type>(y);
+    if (!IsValueInRangeForNumericType<V>(tmp)) {
+      return false;
+    }
+    *result = static_cast<V>(tmp);
+    return true;
+  }
+};
+
+// Min doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMinOp {};
+
+template <typename T, typename U>
+struct CheckedMinOp<T,
+                    U,
+                    typename std::enable_if<std::is_arithmetic_v<T> &&
+                                            std::is_arithmetic_v<U>>::type> {
+  using result_type = typename LowestValuePromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    const result_type tmp = IsLess<T, U>::Test(x, y)
+                                ? static_cast<result_type>(x)
+                                : static_cast<result_type>(y);
+    if (!IsValueInRangeForNumericType<V>(tmp)) {
+      return false;
+    }
+    *result = static_cast<V>(tmp);
+    return true;
+  }
+};
+
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define PA_BASE_FLOAT_ARITHMETIC_OPS(NAME, OP)                      \
+  template <typename T, typename U>                                 \
+  struct Checked##NAME##Op<                                         \
+      T, U,                                                         \
+      typename std::enable_if<std::is_floating_point_v<T> ||        \
+                              std::is_floating_point_v<U>>::type> { \
+    using result_type = typename MaxExponentPromotion<T, U>::type;  \
+    template <typename V>                                           \
+    static constexpr bool Do(T x, U y, V* result) {                 \
+      using Promotion = typename MaxExponentPromotion<T, U>::type;  \
+      const Promotion presult = x OP y;                             \
+      if (!IsValueInRangeForNumericType<V>(presult))                \
+        return false;                                               \
+      *result = static_cast<V>(presult);                            \
+      return true;                                                  \
+    }                                                               \
+  };
+
+PA_BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+PA_BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+PA_BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+PA_BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef PA_BASE_FLOAT_ARITHMETIC_OPS
+
+// Floats carry around their validity state with them, but integers do not. So,
+// we wrap the underlying value in a specialization in order to hide that detail
+// and expose an interface via accessors.
+enum NumericRepresentation {
+  NUMERIC_INTEGER,
+  NUMERIC_FLOATING,
+  NUMERIC_UNKNOWN
+};
+
+template <typename NumericType>
+struct GetNumericRepresentation {
+  static const NumericRepresentation value =
+      std::is_integral_v<NumericType>
+          ? NUMERIC_INTEGER
+          : (std::is_floating_point_v<NumericType> ? NUMERIC_FLOATING
+                                                   : NUMERIC_UNKNOWN);
+};
+
+template <typename T,
+          NumericRepresentation type = GetNumericRepresentation<T>::value>
+class CheckedNumericState {};
+
+// Integrals require quite a bit of additional housekeeping to manage state.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_INTEGER> {
+ public:
+  template <typename Src = int>
+  constexpr explicit CheckedNumericState(Src value = 0, bool is_valid = true)
+      : is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
+        value_(WellDefinedConversionOrZero(value, is_valid_)) {
+    static_assert(std::is_arithmetic_v<Src>, "Argument must be numeric.");
+  }
+
+  template <typename Src>
+  constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
+      : CheckedNumericState(rhs.value(), rhs.is_valid()) {}
+
+  constexpr bool is_valid() const { return is_valid_; }
+
+  constexpr T value() const { return value_; }
+
+ private:
+  // Ensures that a type conversion does not trigger undefined behavior.
+  template <typename Src>
+  static constexpr T WellDefinedConversionOrZero(Src value, bool is_valid) {
+    using SrcType = typename internal::UnderlyingType<Src>::type;
+    return (std::is_integral_v<SrcType> || is_valid) ? static_cast<T>(value)
+                                                     : 0;
+  }
+
+  // is_valid_ precedes value_ because member initializers in the constructors
+  // are evaluated in field order, and is_valid_ must be read when initializing
+  // value_.
+  bool is_valid_;
+  T value_;
+};
+
+// Floating points maintain their own validity, but need translation wrappers.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_FLOATING> {
+ public:
+  template <typename Src = double>
+  constexpr explicit CheckedNumericState(Src value = 0.0, bool is_valid = true)
+      : value_(WellDefinedConversionOrNaN(
+            value,
+            is_valid && IsValueInRangeForNumericType<T>(value))) {}
+
+  template <typename Src>
+  constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
+      : CheckedNumericState(rhs.value(), rhs.is_valid()) {}
+
+  constexpr bool is_valid() const {
+    // Written this way because std::isfinite is not reliably constexpr.
+    return PA_IsConstantEvaluated()
+               ? value_ <= std::numeric_limits<T>::max() &&
+                     value_ >= std::numeric_limits<T>::lowest()
+               : std::isfinite(value_);
+  }
+
+  constexpr T value() const { return value_; }
+
+ private:
+  // Ensures that a type conversion does not trigger undefined behavior.
+  template <typename Src>
+  static constexpr T WellDefinedConversionOrNaN(Src value, bool is_valid) {
+    using SrcType = typename internal::UnderlyingType<Src>::type;
+    return (StaticDstRangeRelationToSrcRange<T, SrcType>::value ==
+                NUMERIC_RANGE_CONTAINED ||
+            is_valid)
+               ? static_cast<T>(value)
+               : std::numeric_limits<T>::quiet_NaN();
+  }
+
+  T value_;
+};
+
+}  // namespace partition_alloc::internal::base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_IMPL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/clamped_math.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/clamped_math.h
new file mode 100644
index 0000000..ec382b2
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/clamped_math.h
@@ -0,0 +1,254 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/clamped_math_impl.h"
+
+namespace partition_alloc::internal::base {
+namespace internal {
+
+template <typename T>
+class ClampedNumeric {
+  static_assert(std::is_arithmetic_v<T>,
+                "ClampedNumeric<T>: T must be a numeric type.");
+
+ public:
+  using type = T;
+
+  constexpr ClampedNumeric() : value_(0) {}
+
+  // Copy constructor.
+  template <typename Src>
+  constexpr ClampedNumeric(const ClampedNumeric<Src>& rhs)
+      : value_(saturated_cast<T>(rhs.value_)) {}
+
+  template <typename Src>
+  friend class ClampedNumeric;
+
+  // This is not an explicit constructor because we implicitly upgrade regular
+  // numerics to ClampedNumerics to make them easier to use.
+  template <typename Src>
+  constexpr ClampedNumeric(Src value)  // NOLINT(runtime/explicit)
+      : value_(saturated_cast<T>(value)) {
+    static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
+  }
+
+  // This is not an explicit constructor because we want a seamless conversion
+  // from StrictNumeric types.
+  template <typename Src>
+  constexpr ClampedNumeric(
+      StrictNumeric<Src> value)  // NOLINT(runtime/explicit)
+      : value_(saturated_cast<T>(static_cast<Src>(value))) {}
+
+  // Returns a ClampedNumeric of the specified type, cast from the current
+  // ClampedNumeric, and saturated to the destination type.
+  template <typename Dst>
+  constexpr ClampedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
+    return *this;
+  }
+
+  // Prototypes for the supported arithmetic operator overloads.
+  template <typename Src>
+  constexpr ClampedNumeric& operator+=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator-=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator*=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator/=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator%=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator<<=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator>>=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator&=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator|=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator^=(const Src rhs);
+
+  constexpr ClampedNumeric operator-() const {
+    // The negation of two's complement int min is int min, so that's the
+    // only overflow case where we will saturate.
+    return ClampedNumeric<T>(SaturatedNegWrapper(value_));
+  }
+
+  constexpr ClampedNumeric operator~() const {
+    return ClampedNumeric<decltype(InvertWrapper(T()))>(InvertWrapper(value_));
+  }
+
+  constexpr ClampedNumeric Abs() const {
+    // The negation of two's complement int min is int min, so that's the
+    // only overflow case where we will saturate.
+    return ClampedNumeric<T>(SaturatedAbsWrapper(value_));
+  }
+
+  template <typename U>
+  constexpr ClampedNumeric<typename MathWrapper<ClampedMaxOp, T, U>::type> Max(
+      const U rhs) const {
+    using result_type = typename MathWrapper<ClampedMaxOp, T, U>::type;
+    return ClampedNumeric<result_type>(
+        ClampedMaxOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
+  }
+
+  template <typename U>
+  constexpr ClampedNumeric<typename MathWrapper<ClampedMinOp, T, U>::type> Min(
+      const U rhs) const {
+    using result_type = typename MathWrapper<ClampedMinOp, T, U>::type;
+    return ClampedNumeric<result_type>(
+        ClampedMinOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
+  }
+
+  // This function is available only for integral types. It returns an unsigned
+  // integer of the same width as the source type, containing the absolute value
+  // of the source, and properly handling signed min.
+  constexpr ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>
+  UnsignedAbs() const {
+    return ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>(
+        SafeUnsignedAbs(value_));
+  }
+
+  constexpr ClampedNumeric& operator++() {
+    *this += 1;
+    return *this;
+  }
+
+  constexpr ClampedNumeric operator++(int) {
+    ClampedNumeric value = *this;
+    *this += 1;
+    return value;
+  }
+
+  constexpr ClampedNumeric& operator--() {
+    *this -= 1;
+    return *this;
+  }
+
+  constexpr ClampedNumeric operator--(int) {
+    ClampedNumeric value = *this;
+    *this -= 1;
+    return value;
+  }
+
+  // These perform the actual math operations on the ClampedNumerics.
+  // Binary arithmetic operations.
+  template <template <typename, typename, typename> class M,
+            typename L,
+            typename R>
+  static constexpr ClampedNumeric MathOp(const L lhs, const R rhs) {
+    using Math = typename MathWrapper<M, L, R>::math;
+    return ClampedNumeric<T>(
+        Math::template Do<T>(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs)));
+  }
+
+  // Assignment arithmetic operations.
+  template <template <typename, typename, typename> class M, typename R>
+  constexpr ClampedNumeric& MathOp(const R rhs) {
+    using Math = typename MathWrapper<M, T, R>::math;
+    *this =
+        ClampedNumeric<T>(Math::template Do<T>(value_, Wrapper<R>::value(rhs)));
+    return *this;
+  }
+
+  template <typename Dst>
+  constexpr operator Dst() const {
+    return saturated_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(
+        value_);
+  }
+
+  // This method extracts the raw integer value without saturating it to the
+  // destination type as the conversion operator does. This is useful when
+  // e.g. assigning to an auto type or passing as a deduced template parameter.
+  constexpr T RawValue() const { return value_; }
+
+ private:
+  T value_;
+
+  // These wrappers allow us to handle state the same way for both
+  // ClampedNumeric and POD arithmetic types.
+  template <typename Src>
+  struct Wrapper {
+    static constexpr typename UnderlyingType<Src>::type value(Src value) {
+      return value;
+    }
+  };
+};
+
+// Convenience wrapper to return a new ClampedNumeric from the provided
+// arithmetic or ClampedNumericType.
+template <typename T>
+constexpr ClampedNumeric<typename UnderlyingType<T>::type> MakeClampedNum(
+    const T value) {
+  return value;
+}
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R>
+constexpr ClampedNumeric<typename MathWrapper<M, L, R>::type> ClampMathOp(
+    const L lhs,
+    const R rhs) {
+  using Math = typename MathWrapper<M, L, R>::math;
+  return ClampedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
+                                                                        rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R,
+          typename... Args>
+constexpr auto ClampMathOp(const L lhs, const R rhs, const Args... args) {
+  return ClampMathOp<M>(ClampMathOp<M>(lhs, rhs), args...);
+}
+
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Add, +, +=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Sub, -, -=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mul, *, *=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Div, /, /=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mod, %, %=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Lsh, <<, <<=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Rsh, >>, >>=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, And, &, &=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Or, |, |=)
+PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Xor, ^, ^=)
+PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Max)
+PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Min)
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLess, <)
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLessOrEqual, <=)
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreater, >)
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreaterOrEqual, >=)
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsEqual, ==)
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsNotEqual, !=)
+
+}  // namespace internal
+
+using internal::ClampAdd;
+using internal::ClampAnd;
+using internal::ClampDiv;
+using internal::ClampedNumeric;
+using internal::ClampLsh;
+using internal::ClampMax;
+using internal::ClampMin;
+using internal::ClampMod;
+using internal::ClampMul;
+using internal::ClampOr;
+using internal::ClampRsh;
+using internal::ClampSub;
+using internal::ClampXor;
+using internal::MakeClampedNum;
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/clamped_math_impl.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/clamped_math_impl.h
new file mode 100644
index 0000000..ef96b9e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/clamped_math_impl.h
@@ -0,0 +1,339 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_shared_impl.h"
+
+namespace partition_alloc::internal::base::internal {
+
+template <typename T,
+          typename std::enable_if<std::is_integral_v<T> &&
+                                  std::is_signed_v<T>>::type* = nullptr>
+constexpr T SaturatedNegWrapper(T value) {
+  return PA_IsConstantEvaluated() || !ClampedNegFastOp<T>::is_supported
+             ? (NegateWrapper(value) != std::numeric_limits<T>::lowest()
+                    ? NegateWrapper(value)
+                    : std::numeric_limits<T>::max())
+             : ClampedNegFastOp<T>::Do(value);
+}
+
+template <typename T,
+          typename std::enable_if<std::is_integral_v<T> &&
+                                  !std::is_signed_v<T>>::type* = nullptr>
+constexpr T SaturatedNegWrapper(T value) {
+  return T(0);
+}
+
+template <typename T,
+          typename std::enable_if<std::is_floating_point_v<T>>::type* = nullptr>
+constexpr T SaturatedNegWrapper(T value) {
+  return -value;
+}
+
+template <typename T,
+          typename std::enable_if<std::is_integral_v<T>>::type* = nullptr>
+constexpr T SaturatedAbsWrapper(T value) {
+  // The calculation below is a static identity for unsigned types, but for
+  // signed integer types it provides a non-branching, saturated absolute value.
+  // This works because SafeUnsignedAbs() returns an unsigned type, which can
+  // represent the absolute value of all negative numbers of an equal-width
+  // integer type. The call to IsValueNegative() then detects overflow in the
+  // special case of numeric_limits<T>::min(), by evaluating the bit pattern as
+  // a signed integer value. If it is the overflow case, we end up subtracting
+  // one from the unsigned result, thus saturating to numeric_limits<T>::max().
+  return static_cast<T>(
+      SafeUnsignedAbs(value) -
+      IsValueNegative<T>(static_cast<T>(SafeUnsignedAbs(value))));
+}
+
+template <typename T,
+          typename std::enable_if<std::is_floating_point_v<T>>::type* = nullptr>
+constexpr T SaturatedAbsWrapper(T value) {
+  return value < 0 ? -value : value;
+}
+
+template <typename T, typename U, class Enable = void>
+struct ClampedAddOp {};
+
+template <typename T, typename U>
+struct ClampedAddOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    if (!PA_IsConstantEvaluated() && ClampedAddFastOp<T, U>::is_supported) {
+      return ClampedAddFastOp<T, U>::template Do<V>(x, y);
+    }
+
+    static_assert(std::is_same_v<V, result_type> ||
+                      IsTypeInRangeForNumericType<U, V>::value,
+                  "The saturation result cannot be determined from the "
+                  "provided types.");
+    const V saturated = CommonMaxOrMin<V>(IsValueNegative(y));
+    V result = {};
+    return PA_BASE_NUMERICS_LIKELY((CheckedAddOp<T, U>::Do(x, y, &result)))
+               ? result
+               : saturated;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedSubOp {};
+
+template <typename T, typename U>
+struct ClampedSubOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    if (!PA_IsConstantEvaluated() && ClampedSubFastOp<T, U>::is_supported) {
+      return ClampedSubFastOp<T, U>::template Do<V>(x, y);
+    }
+
+    static_assert(std::is_same_v<V, result_type> ||
+                      IsTypeInRangeForNumericType<U, V>::value,
+                  "The saturation result cannot be determined from the "
+                  "provided types.");
+    const V saturated = CommonMaxOrMin<V>(!IsValueNegative(y));
+    V result = {};
+    return PA_BASE_NUMERICS_LIKELY((CheckedSubOp<T, U>::Do(x, y, &result)))
+               ? result
+               : saturated;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMulOp {};
+
+template <typename T, typename U>
+struct ClampedMulOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    if (!PA_IsConstantEvaluated() && ClampedMulFastOp<T, U>::is_supported) {
+      return ClampedMulFastOp<T, U>::template Do<V>(x, y);
+    }
+
+    V result = {};
+    const V saturated =
+        CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
+    return PA_BASE_NUMERICS_LIKELY((CheckedMulOp<T, U>::Do(x, y, &result)))
+               ? result
+               : saturated;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedDivOp {};
+
+template <typename T, typename U>
+struct ClampedDivOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    V result = {};
+    if (PA_BASE_NUMERICS_LIKELY((CheckedDivOp<T, U>::Do(x, y, &result)))) {
+      return result;
+    }
+    // Saturation goes to max, min, or NaN (if x is zero).
+    return x ? CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y))
+             : SaturationDefaultLimits<V>::NaN();
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedModOp {};
+
+template <typename T, typename U>
+struct ClampedModOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    V result = {};
+    return PA_BASE_NUMERICS_LIKELY((CheckedModOp<T, U>::Do(x, y, &result)))
+               ? result
+               : x;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedLshOp {};
+
+// Left shift. Non-zero values saturate in the direction of the sign. A zero
+// shifted by any value always results in zero.
+template <typename T, typename U>
+struct ClampedLshOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = T;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U shift) {
+    static_assert(!std::is_signed_v<U>, "Shift value must be unsigned.");
+    if (PA_BASE_NUMERICS_LIKELY(shift < std::numeric_limits<T>::digits)) {
+      // Shift as unsigned to avoid undefined behavior.
+      V result = static_cast<V>(as_unsigned(x) << shift);
+      // If the shift can be reversed, we know it was valid.
+      if (PA_BASE_NUMERICS_LIKELY(result >> shift == x)) {
+        return result;
+      }
+    }
+    return x ? CommonMaxOrMin<V>(IsValueNegative(x)) : 0;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedRshOp {};
+
+// Right shift. Negative values saturate to -1. Positive or 0 saturates to 0.
+template <typename T, typename U>
+struct ClampedRshOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = T;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U shift) {
+    static_assert(!std::is_signed_v<U>, "Shift value must be unsigned.");
+    // Signed right shift is odd, because it saturates to -1 or 0.
+    const V saturated = as_unsigned(V(0)) - IsValueNegative(x);
+    return PA_BASE_NUMERICS_LIKELY(shift < IntegerBitsPlusSign<T>::value)
+               ? saturated_cast<V>(x >> shift)
+               : saturated;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedAndOp {};
+
+template <typename T, typename U>
+struct ClampedAndOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr V Do(T x, U y) {
+    return static_cast<result_type>(x) & static_cast<result_type>(y);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedOrOp {};
+
+// For simplicity we promote to unsigned integers.
+template <typename T, typename U>
+struct ClampedOrOp<T,
+                   U,
+                   typename std::enable_if<std::is_integral_v<T> &&
+                                           std::is_integral_v<U>>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr V Do(T x, U y) {
+    return static_cast<result_type>(x) | static_cast<result_type>(y);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedXorOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct ClampedXorOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral_v<T> &&
+                                            std::is_integral_v<U>>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr V Do(T x, U y) {
+    return static_cast<result_type>(x) ^ static_cast<result_type>(y);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMaxOp {};
+
+template <typename T, typename U>
+struct ClampedMaxOp<T,
+                    U,
+                    typename std::enable_if<std::is_arithmetic_v<T> &&
+                                            std::is_arithmetic_v<U>>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    return IsGreater<T, U>::Test(x, y) ? saturated_cast<V>(x)
+                                       : saturated_cast<V>(y);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMinOp {};
+
+template <typename T, typename U>
+struct ClampedMinOp<T,
+                    U,
+                    typename std::enable_if<std::is_arithmetic_v<T> &&
+                                            std::is_arithmetic_v<U>>::type> {
+  using result_type = typename LowestValuePromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    return IsLess<T, U>::Test(x, y) ? saturated_cast<V>(x)
+                                    : saturated_cast<V>(y);
+  }
+};
+
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define PA_BASE_FLOAT_ARITHMETIC_OPS(NAME, OP)                      \
+  template <typename T, typename U>                                 \
+  struct Clamped##NAME##Op<                                         \
+      T, U,                                                         \
+      typename std::enable_if<std::is_floating_point_v<T> ||        \
+                              std::is_floating_point_v<U>>::type> { \
+    using result_type = typename MaxExponentPromotion<T, U>::type;  \
+    template <typename V = result_type>                             \
+    static constexpr V Do(T x, U y) {                               \
+      return saturated_cast<V>(x OP y);                             \
+    }                                                               \
+  };
+
+PA_BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+PA_BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+PA_BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+PA_BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef PA_BASE_FLOAT_ARITHMETIC_OPS
+
+}  // namespace partition_alloc::internal::base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h
new file mode 100644
index 0000000..ae069f5
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h
@@ -0,0 +1,379 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <cmath>
+#include <limits>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions_impl.h"
+
+#if defined(__ARMEL__) && !defined(__native_client__)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions_arm_impl.h"
+#define PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (1)
+#else
+#define PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (0)
+#endif
+
+#if !PA_BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+#include <ostream>
+#endif
+
+namespace partition_alloc::internal::base {
+namespace internal {
+
+#if !PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp {
+  static constexpr bool is_supported = false;
+  static constexpr Dst Do(Src) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<Dst>();
+  }
+};
+#endif  // PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+#undef PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+
+// The following special case a few specific integer conversions where we can
+// eke out better performance than range checking.
+template <typename Dst, typename Src, typename Enable = void>
+struct IsValueInRangeFastOp {
+  static constexpr bool is_supported = false;
+  static constexpr bool Do(Src value) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<bool>();
+  }
+};
+
+// Signed to signed range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+    Dst,
+    Src,
+    typename std::enable_if<
+        std::is_integral_v<Dst> && std::is_integral_v<Src> &&
+        std::is_signed_v<Dst> && std::is_signed_v<Src> &&
+        !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+  static constexpr bool is_supported = true;
+
+  static constexpr bool Do(Src value) {
+    // Just downcast to the smaller type, sign extend it back to the original
+    // type, and then see if it matches the original value.
+    return value == static_cast<Dst>(value);
+  }
+};
+
+// Signed to unsigned range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+    Dst,
+    Src,
+    typename std::enable_if<
+        std::is_integral_v<Dst> && std::is_integral_v<Src> &&
+        !std::is_signed_v<Dst> && std::is_signed_v<Src> &&
+        !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+  static constexpr bool is_supported = true;
+
+  static constexpr bool Do(Src value) {
+    // We cast a signed as unsigned to overflow negative values to the top,
+    // then compare against whichever maximum is smaller, as our upper bound.
+    return as_unsigned(value) <= as_unsigned(CommonMax<Src, Dst>());
+  }
+};
+
+// Convenience function that returns true if the supplied value is in range
+// for the destination type.
+template <typename Dst, typename Src>
+constexpr bool IsValueInRangeForNumericType(Src value) {
+  using SrcType = typename internal::UnderlyingType<Src>::type;
+  return internal::IsValueInRangeFastOp<Dst, SrcType>::is_supported
+             ? internal::IsValueInRangeFastOp<Dst, SrcType>::Do(
+                   static_cast<SrcType>(value))
+             : internal::DstRangeRelationToSrcRange<Dst>(
+                   static_cast<SrcType>(value))
+                   .IsValid();
+}
+
+// checked_cast<> is analogous to static_cast<> for numeric types,
+// except that it CHECKs that the specified numeric conversion will not
+// overflow or underflow. NaN source will always trigger a CHECK.
+template <typename Dst,
+          class CheckHandler = internal::CheckOnFailure,
+          typename Src>
+constexpr Dst checked_cast(Src value) {
+  // This throws a compile-time error on evaluating the constexpr if it can be
+  // determined at compile-time as failing, otherwise it will CHECK at runtime.
+  using SrcType = typename internal::UnderlyingType<Src>::type;
+  return PA_BASE_NUMERICS_LIKELY((IsValueInRangeForNumericType<Dst>(value)))
+             ? static_cast<Dst>(static_cast<SrcType>(value))
+             : CheckHandler::template HandleFailure<Dst>();
+}
+
+// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
+// You may provide your own limits (e.g. to saturated_cast) so long as you
+// implement all of the static constexpr member functions in the class below.
+template <typename T>
+struct SaturationDefaultLimits : public std::numeric_limits<T> {
+  static constexpr T NaN() {
+    return std::numeric_limits<T>::has_quiet_NaN
+               ? std::numeric_limits<T>::quiet_NaN()
+               : T();
+  }
+  using std::numeric_limits<T>::max;
+  static constexpr T Overflow() {
+    return std::numeric_limits<T>::has_infinity
+               ? std::numeric_limits<T>::infinity()
+               : std::numeric_limits<T>::max();
+  }
+  using std::numeric_limits<T>::lowest;
+  static constexpr T Underflow() {
+    return std::numeric_limits<T>::has_infinity
+               ? std::numeric_limits<T>::infinity() * -1
+               : std::numeric_limits<T>::lowest();
+  }
+};
+
+template <typename Dst, template <typename> class S, typename Src>
+constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint) {
+  // For some reason clang generates much better code when the branch is
+  // structured exactly this way, rather than a sequence of checks.
+  return !constraint.IsOverflowFlagSet()
+             ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
+                                                 : S<Dst>::Underflow())
+             // Skip this check for integral Src, which cannot be NaN.
+             : (std::is_integral_v<Src> || !constraint.IsUnderflowFlagSet()
+                    ? S<Dst>::Overflow()
+                    : S<Dst>::NaN());
+}
+
+// We can reduce the number of conditions and get slightly better performance
+// for normal signed and unsigned integer ranges. And in the specific case of
+// Arm, we can use the optimized saturation instructions.
+template <typename Dst, typename Src, typename Enable = void>
+struct SaturateFastOp {
+  static constexpr bool is_supported = false;
+  static constexpr Dst Do(Src value) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<Dst>();
+  }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<Dst,
+                      Src,
+                      typename std::enable_if<
+                          std::is_integral_v<Src> && std::is_integral_v<Dst> &&
+                          SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+  static constexpr bool is_supported = true;
+  static constexpr Dst Do(Src value) {
+    return SaturateFastAsmOp<Dst, Src>::Do(value);
+  }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<Dst,
+                      Src,
+                      typename std::enable_if<
+                          std::is_integral_v<Src> && std::is_integral_v<Dst> &&
+                          !SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+  static constexpr bool is_supported = true;
+  static constexpr Dst Do(Src value) {
+    // The exact order of the following is structured to hit the correct
+    // optimization heuristics across compilers. Do not change without
+    // checking the emitted code.
+    const Dst saturated = CommonMaxOrMin<Dst, Src>(
+        IsMaxInRangeForNumericType<Dst, Src>() ||
+        (!IsMinInRangeForNumericType<Dst, Src>() && IsValueNegative(value)));
+    return PA_BASE_NUMERICS_LIKELY(IsValueInRangeForNumericType<Dst>(value))
+               ? static_cast<Dst>(value)
+               : saturated;
+  }
+};
+
+// saturated_cast<> is analogous to static_cast<> for numeric types, except
+// that the specified numeric conversion will saturate by default rather than
+// overflow or underflow, and NaN assignment to an integral will return 0.
+// All boundary condition behaviors can be overridden with a custom handler.
+template <typename Dst,
+          template <typename> class SaturationHandler = SaturationDefaultLimits,
+          typename Src>
+constexpr Dst saturated_cast(Src value) {
+  using SrcType = typename UnderlyingType<Src>::type;
+  return !PA_IsConstantEvaluated() &&
+                 SaturateFastOp<Dst, SrcType>::is_supported &&
+                 std::is_same_v<SaturationHandler<Dst>,
+                                SaturationDefaultLimits<Dst>>
+             ? SaturateFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
+             : saturated_cast_impl<Dst, SaturationHandler, SrcType>(
+                   static_cast<SrcType>(value),
+                   DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(
+                       static_cast<SrcType>(value)));
+}
+
+// strict_cast<> is analogous to static_cast<> for numeric types, except that
+// it will cause a compile failure if the destination type is not large enough
+// to contain any value in the source type. It performs no runtime checking.
+template <typename Dst, typename Src>
+constexpr Dst strict_cast(Src value) {
+  using SrcType = typename UnderlyingType<Src>::type;
+  static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
+  static_assert(std::is_arithmetic_v<Dst>, "Result must be numeric.");
+
+  // If you got here from a compiler error, it's because you tried to assign
+  // from a source type to a destination type that has insufficient range.
+  // The solution may be to change the destination type you're assigning to,
+  // and use one large enough to represent the source.
+  // Alternatively, you may be better served with the checked_cast<> or
+  // saturated_cast<> template functions for your particular use case.
+  static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value ==
+                    NUMERIC_RANGE_CONTAINED,
+                "The source type is out of range for the destination type. "
+                "Please see strict_cast<> comments for more information.");
+
+  return static_cast<Dst>(static_cast<SrcType>(value));
+}
+
+// Some wrappers to statically check that a type is in range.
+template <typename Dst, typename Src, class Enable = void>
+struct IsNumericRangeContained {
+  static constexpr bool value = false;
+};
+
+template <typename Dst, typename Src>
+struct IsNumericRangeContained<
+    Dst,
+    Src,
+    typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
+                            ArithmeticOrUnderlyingEnum<Src>::value>::type> {
+  static constexpr bool value =
+      StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+      NUMERIC_RANGE_CONTAINED;
+};
+
+// StrictNumeric implements compile time range checking between numeric types by
+// wrapping assignment operations in a strict_cast. This class is intended to be
+// used for function arguments and return types, to ensure the destination type
+// can always contain the source type. This is essentially the same as enforcing
+// -Wconversion in gcc and C4302 warnings on MSVC, but it can be applied
+// incrementally at API boundaries, making it easier to convert code so that it
+// compiles cleanly with truncation warnings enabled.
+// This template should introduce no runtime overhead, but it also provides no
+// runtime checking of any of the associated mathematical operations. Use
+// CheckedNumeric for runtime range checks of the actual value being assigned.
+template <typename T>
+class StrictNumeric {
+ public:
+  using type = T;
+
+  constexpr StrictNumeric() : value_(0) {}
+
+  // Copy constructor.
+  template <typename Src>
+  constexpr StrictNumeric(const StrictNumeric<Src>& rhs)
+      : value_(strict_cast<T>(rhs.value_)) {}
+
+  // This is not an explicit constructor because we implicitly upgrade regular
+  // numerics to StrictNumerics to make them easier to use.
+  template <typename Src>
+  constexpr StrictNumeric(Src value)  // NOLINT(runtime/explicit)
+      : value_(strict_cast<T>(value)) {}
+
+  // If you got here from a compiler error, it's because you tried to assign
+  // from a source type to a destination type that has insufficient range.
+  // The solution may be to change the destination type you're assigning to,
+  // and use one large enough to represent the source.
+  // If you're assigning from a CheckedNumeric<> class, you may be able to use
+  // the AssignIfValid() member function, specify a narrower destination type to
+  // the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
+  // of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
+  // If you've encountered an _ambiguous overload_ you can use a static_cast<>
+  // to explicitly cast the result to the destination type.
+  // If none of that works, you may be better served with the checked_cast<> or
+  // saturated_cast<> template functions for your particular use case.
+  template <typename Dst,
+            typename std::enable_if<
+                IsNumericRangeContained<Dst, T>::value>::type* = nullptr>
+  constexpr operator Dst() const {
+    return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
+  }
+
+ private:
+  const T value_;
+};
+
+// Convenience wrapper returns a StrictNumeric from the provided arithmetic
+// type.
+template <typename T>
+constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
+    const T value) {
+  return value;
+}
+
+#define PA_BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP)           \
+  template <typename L, typename R,                                     \
+            typename std::enable_if<                                    \
+                internal::Is##CLASS##Op<L, R>::value>::type* = nullptr> \
+  constexpr bool operator OP(const L lhs, const R rhs) {                \
+    return SafeCompare<NAME, typename UnderlyingType<L>::type,          \
+                       typename UnderlyingType<R>::type>(lhs, rhs);     \
+  }
+
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLess, <)
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLessOrEqual, <=)
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreater, >)
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreaterOrEqual, >=)
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsEqual, ==)
+PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsNotEqual, !=)
+
+}  // namespace internal
+
+using internal::as_signed;
+using internal::as_unsigned;
+using internal::checked_cast;
+using internal::IsTypeInRangeForNumericType;
+using internal::IsValueInRangeForNumericType;
+using internal::IsValueNegative;
+using internal::MakeStrictNum;
+using internal::SafeUnsignedAbs;
+using internal::saturated_cast;
+using internal::strict_cast;
+using internal::StrictNumeric;
+
+// Explicitly make a shorter size_t alias for convenience.
+using SizeT = StrictNumeric<size_t>;
+
+// floating -> integral conversions that saturate and thus can actually return
+// an integral type.  In most cases, these should be preferred over the std::
+// versions.
+template <typename Dst = int,
+          typename Src,
+          typename = std::enable_if_t<std::is_integral_v<Dst> &&
+                                      std::is_floating_point_v<Src>>>
+Dst ClampFloor(Src value) {
+  return saturated_cast<Dst>(std::floor(value));
+}
+template <typename Dst = int,
+          typename Src,
+          typename = std::enable_if_t<std::is_integral_v<Dst> &&
+                                      std::is_floating_point_v<Src>>>
+Dst ClampCeil(Src value) {
+  return saturated_cast<Dst>(std::ceil(value));
+}
+template <typename Dst = int,
+          typename Src,
+          typename = std::enable_if_t<std::is_integral_v<Dst> &&
+                                      std::is_floating_point_v<Src>>>
+Dst ClampRound(Src value) {
+  const Src rounded =
+      (value >= 0.0f) ? std::floor(value + 0.5f) : std::ceil(value - 0.5f);
+  return saturated_cast<Dst>(rounded);
+}
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions_arm_impl.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions_arm_impl.h
new file mode 100644
index 0000000..25c24c9
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions_arm_impl.h
@@ -0,0 +1,49 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions_impl.h"
+
+namespace partition_alloc::internal::base::internal {
+
+// Fast saturation to a destination type.
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp {
+  static constexpr bool is_supported =
+      kEnableAsmCode && std::is_signed_v<Src> && std::is_integral_v<Dst> &&
+      std::is_integral_v<Src> &&
+      IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
+      IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
+      !IsTypeInRangeForNumericType<Dst, Src>::value;
+
+  __attribute__((always_inline)) static Dst Do(Src value) {
+    int32_t src = value;
+    typename std::conditional<std::is_signed_v<Dst>, int32_t, uint32_t>::type
+        result;
+    if (std::is_signed_v<Dst>) {
+      asm("ssat %[dst], %[shift], %[src]"
+          : [dst] "=r"(result)
+          : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
+                                            ? IntegerBitsPlusSign<Dst>::value
+                                            : 32));
+    } else {
+      asm("usat %[dst], %[shift], %[src]"
+          : [dst] "=r"(result)
+          : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value < 32
+                                            ? IntegerBitsPlusSign<Dst>::value
+                                            : 31));
+    }
+    return static_cast<Dst>(result);
+  }
+};
+
+}  // namespace partition_alloc::internal::base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions_impl.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions_impl.h
new file mode 100644
index 0000000..7171a1a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions_impl.h
@@ -0,0 +1,842 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+
+#include <stdint.h>
+
+#include <limits>
+#include <type_traits>
+
+#if defined(__GNUC__) || defined(__clang__)
+#define PA_BASE_NUMERICS_LIKELY(x) __builtin_expect(!!(x), 1)
+#define PA_BASE_NUMERICS_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define PA_BASE_NUMERICS_LIKELY(x) (x)
+#define PA_BASE_NUMERICS_UNLIKELY(x) (x)
+#endif
+
+namespace partition_alloc::internal::base::internal {
+
+// The std library doesn't provide a binary max_exponent for integers, however
+// we can compute an analog using std::numeric_limits<>::digits.
+template <typename NumericType>
+struct MaxExponent {
+  static const int value = std::is_floating_point_v<NumericType>
+                               ? std::numeric_limits<NumericType>::max_exponent
+                               : std::numeric_limits<NumericType>::digits + 1;
+};
+
+// The number of bits (including the sign) in an integer. Eliminates sizeof
+// hacks.
+template <typename NumericType>
+struct IntegerBitsPlusSign {
+  static const int value =
+      std::numeric_limits<NumericType>::digits + std::is_signed_v<NumericType>;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename Integer>
+struct PositionOfSignBit {
+  static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
+};
+
+// Determines if a numeric value is negative without throwing compiler
+// warnings on: unsigned(value) < 0.
+template <typename T,
+          typename std::enable_if<std::is_signed_v<T>>::type* = nullptr>
+constexpr bool IsValueNegative(T value) {
+  static_assert(std::is_arithmetic_v<T>, "Argument must be numeric.");
+  return value < 0;
+}
+
+template <typename T,
+          typename std::enable_if<!std::is_signed_v<T>>::type* = nullptr>
+constexpr bool IsValueNegative(T) {
+  static_assert(std::is_arithmetic_v<T>, "Argument must be numeric.");
+  return false;
+}
+
+// This performs a fast negation, returning a signed value. It works on unsigned
+// arguments, but probably doesn't do what you want for any unsigned value
+// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
+template <typename T>
+constexpr typename std::make_signed<T>::type ConditionalNegate(
+    T x,
+    bool is_negative) {
+  static_assert(std::is_integral_v<T>, "Type must be integral");
+  using SignedT = typename std::make_signed<T>::type;
+  using UnsignedT = typename std::make_unsigned<T>::type;
+  return static_cast<SignedT>((static_cast<UnsignedT>(x) ^
+                               static_cast<UnsignedT>(-SignedT(is_negative))) +
+                              is_negative);
+}
+
+// This performs a safe, absolute value via unsigned overflow.
+template <typename T>
+constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
+  static_assert(std::is_integral_v<T>, "Type must be integral");
+  using UnsignedT = typename std::make_unsigned<T>::type;
+  return IsValueNegative(value)
+             ? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
+             : static_cast<UnsignedT>(value);
+}
+
+// TODO(jschuh): Switch to std::is_constant_evaluated() once C++20 is supported.
+// Alternately, the usage could be restructured for "consteval if" in C++23.
+#define PA_IsConstantEvaluated() (__builtin_is_constant_evaluated())
+
+// TODO(jschuh): Debug builds don't reliably propagate constants, so we restrict
+// some accelerated runtime paths to release builds until this can be forced
+// with consteval support in C++20 or C++23.
+#if defined(NDEBUG)
+constexpr bool kEnableAsmCode = true;
+#else
+constexpr bool kEnableAsmCode = false;
+#endif
+
+// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
+// Also used in a constexpr template to trigger a compilation failure on
+// an error condition.
+struct CheckOnFailure {
+  template <typename T>
+  static T HandleFailure() {
+#if defined(_MSC_VER)
+    __debugbreak();
+#elif defined(__GNUC__) || defined(__clang__)
+    __builtin_trap();
+#else
+    ((void)(*(volatile char*)0 = 0));
+#endif
+    return T();
+  }
+};
+
+enum IntegerRepresentation {
+  INTEGER_REPRESENTATION_UNSIGNED,
+  INTEGER_REPRESENTATION_SIGNED
+};
+
+// A range for a given nunmeric Src type is contained for a given numeric Dst
+// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
+// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
+// We implement this as template specializations rather than simple static
+// comparisons to ensure type correctness in our comparisons.
+enum NumericRangeRepresentation {
+  NUMERIC_RANGE_NOT_CONTAINED,
+  NUMERIC_RANGE_CONTAINED
+};
+
+// Helper templates to statically determine if our destination type can contain
+// maximum and minimum values represented by the source type.
+
+template <typename Dst,
+          typename Src,
+          IntegerRepresentation DstSign = std::is_signed_v<Dst>
+                                              ? INTEGER_REPRESENTATION_SIGNED
+                                              : INTEGER_REPRESENTATION_UNSIGNED,
+          IntegerRepresentation SrcSign = std::is_signed_v<Src>
+                                              ? INTEGER_REPRESENTATION_SIGNED
+                                              : INTEGER_REPRESENTATION_UNSIGNED>
+struct StaticDstRangeRelationToSrcRange;
+
+// Same sign: Dst is guaranteed to contain Src only if its range is equal or
+// larger.
+template <typename Dst, typename Src, IntegerRepresentation Sign>
+struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> {
+  static const NumericRangeRepresentation value =
+      MaxExponent<Dst>::value >= MaxExponent<Src>::value
+          ? NUMERIC_RANGE_CONTAINED
+          : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Unsigned to signed: Dst is guaranteed to contain source only if its range is
+// larger.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+                                        Src,
+                                        INTEGER_REPRESENTATION_SIGNED,
+                                        INTEGER_REPRESENTATION_UNSIGNED> {
+  static const NumericRangeRepresentation value =
+      MaxExponent<Dst>::value > MaxExponent<Src>::value
+          ? NUMERIC_RANGE_CONTAINED
+          : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Signed to unsigned: Dst cannot be statically determined to contain Src.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+                                        Src,
+                                        INTEGER_REPRESENTATION_UNSIGNED,
+                                        INTEGER_REPRESENTATION_SIGNED> {
+  static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// This class wraps the range constraints as separate booleans so the compiler
+// can identify constants and eliminate unused code paths.
+class RangeCheck {
+ public:
+  constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
+      : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
+  constexpr RangeCheck() : is_underflow_(false), is_overflow_(false) {}
+  constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
+  constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
+  constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
+  constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
+  constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
+  constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
+  constexpr bool operator==(const RangeCheck rhs) const {
+    return is_underflow_ == rhs.is_underflow_ &&
+           is_overflow_ == rhs.is_overflow_;
+  }
+  constexpr bool operator!=(const RangeCheck rhs) const {
+    return !(*this == rhs);
+  }
+
+ private:
+  // Do not change the order of these member variables. The integral conversion
+  // optimization depends on this exact order.
+  const bool is_underflow_;
+  const bool is_overflow_;
+};
+
+// The following helper template addresses a corner case in range checks for
+// conversion from a floating-point type to an integral type of smaller range
+// but larger precision (e.g. float -> unsigned). The problem is as follows:
+//   1. Integral maximum is always one less than a power of two, so it must be
+//      truncated to fit the mantissa of the floating point. The direction of
+//      rounding is implementation defined, but by default it's always IEEE
+//      floats, which round to nearest and thus result in a value of larger
+//      magnitude than the integral value.
+//      Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
+//                                   // is 4294967295u.
+//   2. If the floating point value is equal to the promoted integral maximum
+//      value, a range check will erroneously pass.
+//      Example: (4294967296f <= 4294967295u) // This is true due to a precision
+//                                            // loss in rounding up to float.
+//   3. When the floating point value is then converted to an integral, the
+//      resulting value is out of range for the target integral type and
+//      thus is implementation defined.
+//      Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
+// To fix this bug we manually truncate the maximum value when the destination
+// type is an integral of larger precision than the source floating-point type,
+// such that the resulting maximum is represented exactly as a floating point.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct NarrowingRange {
+  using SrcLimits = std::numeric_limits<Src>;
+  using DstLimits = typename std::numeric_limits<Dst>;
+
+  // Computes the mask required to make an accurate comparison between types.
+  static const int kShift =
+      (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+       SrcLimits::digits < DstLimits::digits)
+          ? (DstLimits::digits - SrcLimits::digits)
+          : 0;
+  template <typename T,
+            typename std::enable_if<std::is_integral_v<T>>::type* = nullptr>
+
+  // Masks out the integer bits that are beyond the precision of the
+  // intermediate type used for comparison.
+  static constexpr T Adjust(T value) {
+    static_assert(std::is_same_v<T, Dst>, "");
+    static_assert(kShift < DstLimits::digits, "");
+    using UnsignedDst = typename std::make_unsigned_t<T>;
+    return static_cast<T>(ConditionalNegate(
+        SafeUnsignedAbs(value) & ~((UnsignedDst{1} << kShift) - UnsignedDst{1}),
+        IsValueNegative(value)));
+  }
+
+  template <
+      typename T,
+      typename std::enable_if<std::is_floating_point_v<T>>::type* = nullptr>
+  static constexpr T Adjust(T value) {
+    static_assert(std::is_same_v<T, Dst>, "");
+    static_assert(kShift == 0, "");
+    return value;
+  }
+
+  static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
+  static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
+};
+
+template <typename Dst,
+          typename Src,
+          template <typename>
+          class Bounds,
+          IntegerRepresentation DstSign = std::is_signed_v<Dst>
+                                              ? INTEGER_REPRESENTATION_SIGNED
+                                              : INTEGER_REPRESENTATION_UNSIGNED,
+          IntegerRepresentation SrcSign = std::is_signed_v<Src>
+                                              ? INTEGER_REPRESENTATION_SIGNED
+                                              : INTEGER_REPRESENTATION_UNSIGNED,
+          NumericRangeRepresentation DstRange =
+              StaticDstRangeRelationToSrcRange<Dst, Src>::value>
+struct DstRangeRelationToSrcRangeImpl;
+
+// The following templates are for ranges that must be verified at runtime. We
+// split it into checks based on signedness to avoid confusing casts and
+// compiler warnings on signed an unsigned comparisons.
+
+// Same sign narrowing: The range is contained for normal limits.
+template <typename Dst,
+          typename Src,
+          template <typename>
+          class Bounds,
+          IntegerRepresentation DstSign,
+          IntegerRepresentation SrcSign>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      Bounds,
+                                      DstSign,
+                                      SrcSign,
+                                      NUMERIC_RANGE_CONTAINED> {
+  static constexpr RangeCheck Check(Src value) {
+    using SrcLimits = std::numeric_limits<Src>;
+    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+    return RangeCheck(
+        static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
+            static_cast<Dst>(value) >= DstLimits::lowest(),
+        static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
+            static_cast<Dst>(value) <= DstLimits::max());
+  }
+};
+
+// Signed to signed narrowing: Both the upper and lower boundaries may be
+// exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      Bounds,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static constexpr RangeCheck Check(Src value) {
+    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+    return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
+  }
+};
+
+// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
+// standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      Bounds,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static constexpr RangeCheck Check(Src value) {
+    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+    return RangeCheck(
+        DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
+        value <= DstLimits::max());
+  }
+};
+
+// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      Bounds,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static constexpr RangeCheck Check(Src value) {
+    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+    using Promotion = decltype(Src() + Dst());
+    return RangeCheck(DstLimits::lowest() <= Dst(0) ||
+                          static_cast<Promotion>(value) >=
+                              static_cast<Promotion>(DstLimits::lowest()),
+                      static_cast<Promotion>(value) <=
+                          static_cast<Promotion>(DstLimits::max()));
+  }
+};
+
+// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
+// and any negative value exceeds the lower boundary for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      Bounds,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static constexpr RangeCheck Check(Src value) {
+    using SrcLimits = std::numeric_limits<Src>;
+    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+    using Promotion = decltype(Src() + Dst());
+    bool ge_zero = false;
+    // Converting floating-point to integer will discard fractional part, so
+    // values in (-1.0, -0.0) will truncate to 0 and fit in Dst.
+    if (std::is_floating_point_v<Src>) {
+      ge_zero = value > Src(-1);
+    } else {
+      ge_zero = value >= Src(0);
+    }
+    return RangeCheck(
+        ge_zero && (DstLimits::lowest() == 0 ||
+                    static_cast<Dst>(value) >= DstLimits::lowest()),
+        static_cast<Promotion>(SrcLimits::max()) <=
+                static_cast<Promotion>(DstLimits::max()) ||
+            static_cast<Promotion>(value) <=
+                static_cast<Promotion>(DstLimits::max()));
+  }
+};
+
+// Simple wrapper for statically checking if a type's range is contained.
+template <typename Dst, typename Src>
+struct IsTypeInRangeForNumericType {
+  static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+                            NUMERIC_RANGE_CONTAINED;
+};
+
+template <typename Dst,
+          template <typename> class Bounds = std::numeric_limits,
+          typename Src>
+constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
+  static_assert(std::is_arithmetic_v<Src>, "Argument must be numeric.");
+  static_assert(std::is_arithmetic_v<Dst>, "Result must be numeric.");
+  static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
+  return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
+}
+
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForDigitsAndSign;
+
+#define PA_INTEGER_FOR_DIGITS_AND_SIGN(I)                       \
+  template <>                                                   \
+  struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
+                                 std::is_signed_v<I>> {         \
+    using type = I;                                             \
+  }
+
+PA_INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
+PA_INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
+PA_INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
+PA_INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
+PA_INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
+PA_INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
+PA_INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
+PA_INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
+#undef PA_INTEGER_FOR_DIGITS_AND_SIGN
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
+              "Max integer size not supported for this toolchain.");
+
+template <typename Integer, bool IsSigned = std::is_signed_v<Integer>>
+struct TwiceWiderInteger {
+  using type =
+      typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
+                                       IsSigned>::type;
+};
+
+enum ArithmeticPromotionCategory {
+  LEFT_PROMOTION,  // Use the type of the left-hand argument.
+  RIGHT_PROMOTION  // Use the type of the right-hand argument.
+};
+
+// Determines the type that can represent the largest positive value.
+template <typename Lhs,
+          typename Rhs,
+          ArithmeticPromotionCategory Promotion =
+              (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+                  ? LEFT_PROMOTION
+                  : RIGHT_PROMOTION>
+struct MaxExponentPromotion;
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+  using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+  using type = Rhs;
+};
+
+// Determines the type that can represent the lowest arithmetic value.
+template <typename Lhs,
+          typename Rhs,
+          ArithmeticPromotionCategory Promotion =
+              std::is_signed_v<Lhs>
+                  ? (std::is_signed_v<Rhs>
+                         ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
+                                ? LEFT_PROMOTION
+                                : RIGHT_PROMOTION)
+                         : LEFT_PROMOTION)
+                  : (std::is_signed_v<Rhs>
+                         ? RIGHT_PROMOTION
+                         : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
+                                ? LEFT_PROMOTION
+                                : RIGHT_PROMOTION))>
+struct LowestValuePromotion;
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION> {
+  using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+  using type = Rhs;
+};
+
+// Determines the type that is best able to represent an arithmetic result.
+template <
+    typename Lhs,
+    typename Rhs = Lhs,
+    bool is_intmax_type =
+        std::is_integral_v<typename MaxExponentPromotion<Lhs, Rhs>::type> &&
+        IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
+                value == IntegerBitsPlusSign<intmax_t>::value,
+    bool is_max_exponent = StaticDstRangeRelationToSrcRange<
+                               typename MaxExponentPromotion<Lhs, Rhs>::type,
+                               Lhs>::value == NUMERIC_RANGE_CONTAINED &&
+                           StaticDstRangeRelationToSrcRange<
+                               typename MaxExponentPromotion<Lhs, Rhs>::type,
+                               Rhs>::value == NUMERIC_RANGE_CONTAINED>
+struct BigEnoughPromotion;
+
+// The side with the max exponent is big enough.
+template <typename Lhs, typename Rhs, bool is_intmax_type>
+struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
+  using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+  static const bool is_contained = true;
+};
+
+// We can use a twice wider type to fit.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, false, false> {
+  using type =
+      typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+                                 std::is_signed_v<Lhs> ||
+                                     std::is_signed_v<Rhs>>::type;
+  static const bool is_contained = true;
+};
+
+// No type is large enough.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, true, false> {
+  using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+  static const bool is_contained = false;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs = Lhs>
+struct IsIntegerArithmeticSafe {
+  static const bool value =
+      !std::is_floating_point_v<T> && !std::is_floating_point_v<Lhs> &&
+      !std::is_floating_point_v<Rhs> &&
+      std::is_signed_v<T> >= std::is_signed_v<Lhs> &&
+      IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
+      std::is_signed_v<T> >= std::is_signed_v<Rhs> &&
+      IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
+};
+
+// Promotes to a type that can represent any possible result of a binary
+// arithmetic operation with the source types.
+template <typename Lhs,
+          typename Rhs,
+          bool is_promotion_possible = IsIntegerArithmeticSafe<
+              typename std::conditional<std::is_signed_v<Lhs> ||
+                                            std::is_signed_v<Rhs>,
+                                        intmax_t,
+                                        uintmax_t>::type,
+              typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
+struct FastIntegerArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
+  using type =
+      typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+                                 std::is_signed_v<Lhs> ||
+                                     std::is_signed_v<Rhs>>::type;
+  static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
+  static const bool is_contained = true;
+};
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, false> {
+  using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
+  static const bool is_contained = false;
+};
+
+// Extracts the underlying type from an enum.
+template <typename T, bool is_enum = std::is_enum_v<T>>
+struct ArithmeticOrUnderlyingEnum;
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, true> {
+  using type = typename std::underlying_type<T>::type;
+  static const bool value = std::is_arithmetic_v<type>;
+};
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, false> {
+  using type = T;
+  static const bool value = std::is_arithmetic_v<type>;
+};
+
+// The following are helper templates used in the CheckedNumeric class.
+template <typename T>
+class CheckedNumeric;
+
+template <typename T>
+class ClampedNumeric;
+
+template <typename T>
+class StrictNumeric;
+
+// Used to treat CheckedNumeric and arithmetic underlying types the same.
+template <typename T>
+struct UnderlyingType {
+  using type = typename ArithmeticOrUnderlyingEnum<T>::type;
+  static const bool is_numeric = std::is_arithmetic_v<type>;
+  static const bool is_checked = false;
+  static const bool is_clamped = false;
+  static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<CheckedNumeric<T>> {
+  using type = T;
+  static const bool is_numeric = true;
+  static const bool is_checked = true;
+  static const bool is_clamped = false;
+  static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<ClampedNumeric<T>> {
+  using type = T;
+  static const bool is_numeric = true;
+  static const bool is_checked = false;
+  static const bool is_clamped = true;
+  static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<StrictNumeric<T>> {
+  using type = T;
+  static const bool is_numeric = true;
+  static const bool is_checked = false;
+  static const bool is_clamped = false;
+  static const bool is_strict = true;
+};
+
+template <typename L, typename R>
+struct IsCheckedOp {
+  static const bool value =
+      UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+      (UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsClampedOp {
+  static const bool value =
+      UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+      (UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped) &&
+      !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsStrictOp {
+  static const bool value =
+      UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+      (UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict) &&
+      !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked) &&
+      !(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped);
+};
+
+// as_signed<> returns the supplied integral value (or integral castable
+// Numeric template) cast as a signed integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_signed<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_signed<
+    typename base::internal::UnderlyingType<Src>::type>::type
+as_signed(const Src value) {
+  static_assert(std::is_integral_v<decltype(as_signed(value))>,
+                "Argument must be a signed or unsigned integer type.");
+  return static_cast<decltype(as_signed(value))>(value);
+}
+
+// as_unsigned<> returns the supplied integral value (or integral castable
+// Numeric template) cast as an unsigned integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_unsigned<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_unsigned<
+    typename base::internal::UnderlyingType<Src>::type>::type
+as_unsigned(const Src value) {
+  static_assert(std::is_integral_v<decltype(as_unsigned(value))>,
+                "Argument must be a signed or unsigned integer type.");
+  return static_cast<decltype(as_unsigned(value))>(value);
+}
+
+template <typename L, typename R>
+constexpr bool IsLessImpl(const L lhs,
+                          const R rhs,
+                          const RangeCheck l_range,
+                          const RangeCheck r_range) {
+  return l_range.IsUnderflow() || r_range.IsOverflow() ||
+         (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <
+                                    static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLess {
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+                      DstRangeRelationToSrcRange<L>(rhs));
+  }
+};
+
+template <typename L, typename R>
+constexpr bool IsLessOrEqualImpl(const L lhs,
+                                 const R rhs,
+                                 const RangeCheck l_range,
+                                 const RangeCheck r_range) {
+  return l_range.IsUnderflow() || r_range.IsOverflow() ||
+         (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <=
+                                    static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLessOrEqual {
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+                             DstRangeRelationToSrcRange<L>(rhs));
+  }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterImpl(const L lhs,
+                             const R rhs,
+                             const RangeCheck l_range,
+                             const RangeCheck r_range) {
+  return l_range.IsOverflow() || r_range.IsUnderflow() ||
+         (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >
+                                    static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreater {
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+                         DstRangeRelationToSrcRange<L>(rhs));
+  }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterOrEqualImpl(const L lhs,
+                                    const R rhs,
+                                    const RangeCheck l_range,
+                                    const RangeCheck r_range) {
+  return l_range.IsOverflow() || r_range.IsUnderflow() ||
+         (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >=
+                                    static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreaterOrEqual {
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+                                DstRangeRelationToSrcRange<L>(rhs));
+  }
+};
+
+template <typename L, typename R>
+struct IsEqual {
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return DstRangeRelationToSrcRange<R>(lhs) ==
+               DstRangeRelationToSrcRange<L>(rhs) &&
+           static_cast<decltype(lhs + rhs)>(lhs) ==
+               static_cast<decltype(lhs + rhs)>(rhs);
+  }
+};
+
+template <typename L, typename R>
+struct IsNotEqual {
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return DstRangeRelationToSrcRange<R>(lhs) !=
+               DstRangeRelationToSrcRange<L>(rhs) ||
+           static_cast<decltype(lhs + rhs)>(lhs) !=
+               static_cast<decltype(lhs + rhs)>(rhs);
+  }
+};
+
+// These perform the actual math operations on the CheckedNumerics.
+// Binary arithmetic operations.
+template <template <typename, typename> class C, typename L, typename R>
+constexpr bool SafeCompare(const L lhs, const R rhs) {
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
+                "Types must be numeric.");
+  using Promotion = BigEnoughPromotion<L, R>;
+  using BigType = typename Promotion::type;
+  return Promotion::is_contained
+             // Force to a larger type for speed if both are contained.
+             ? C<BigType, BigType>::Test(
+                   static_cast<BigType>(static_cast<L>(lhs)),
+                   static_cast<BigType>(static_cast<R>(rhs)))
+             // Let the template functions figure it out for mixed types.
+             : C<L, R>::Test(lhs, rhs);
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMaxInRangeForNumericType() {
+  return IsGreaterOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::max(),
+                                          std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMinInRangeForNumericType() {
+  return IsLessOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::lowest(),
+                                       std::numeric_limits<Src>::lowest());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMax() {
+  return !IsMaxInRangeForNumericType<Dst, Src>()
+             ? Dst(std::numeric_limits<Dst>::max())
+             : Dst(std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMin() {
+  return !IsMinInRangeForNumericType<Dst, Src>()
+             ? Dst(std::numeric_limits<Dst>::lowest())
+             : Dst(std::numeric_limits<Src>::lowest());
+}
+
+// This is a wrapper to generate return the max or min for a supplied type.
+// If the argument is false, the returned value is the maximum. If true the
+// returned value is the minimum.
+template <typename Dst, typename Src = Dst>
+constexpr Dst CommonMaxOrMin(bool is_min) {
+  return is_min ? CommonMin<Dst, Src>() : CommonMax<Dst, Src>();
+}
+
+}  // namespace partition_alloc::internal::base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math.h
new file mode 100644
index 0000000..00a20aa
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math.h
@@ -0,0 +1,12 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/clamped_math.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_arm_impl.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_arm_impl.h
new file mode 100644
index 0000000..ea1e053
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_arm_impl.h
@@ -0,0 +1,126 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
+
+#include <cassert>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+
+namespace partition_alloc::internal::base::internal {
+
+template <typename T, typename U>
+struct CheckedMulFastAsmOp {
+  static const bool is_supported =
+      kEnableAsmCode && FastIntegerArithmeticPromotion<T, U>::is_contained;
+
+  // The following is not an assembler routine and is thus constexpr safe, it
+  // just emits much more efficient code than the Clang and GCC builtins for
+  // performing overflow-checked multiplication when a twice wider type is
+  // available. The below compiles down to 2-3 instructions, depending on the
+  // width of the types in use.
+  // As an example, an int32_t multiply compiles to:
+  //    smull   r0, r1, r0, r1
+  //    cmp     r1, r1, asr #31
+  // And an int16_t multiply compiles to:
+  //    smulbb  r1, r1, r0
+  //    asr     r2, r1, #16
+  //    cmp     r2, r1, asr #15
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+    Promotion presult;
+
+    presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+    if (!IsValueInRangeForNumericType<V>(presult)) {
+      return false;
+    }
+    *result = static_cast<V>(presult);
+    return true;
+  }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastAsmOp {
+  static const bool is_supported =
+      kEnableAsmCode && BigEnoughPromotion<T, U>::is_contained &&
+      IsTypeInRangeForNumericType<
+          int32_t,
+          typename BigEnoughPromotion<T, U>::type>::value;
+
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    // This will get promoted to an int, so let the compiler do whatever is
+    // clever and rely on the saturated cast to bounds check.
+    if (IsIntegerArithmeticSafe<int, T, U>::value) {
+      return saturated_cast<V>(x + y);
+    }
+
+    int32_t result;
+    int32_t x_i32 = checked_cast<int32_t>(x);
+    int32_t y_i32 = checked_cast<int32_t>(y);
+
+    asm("qadd %[result], %[first], %[second]"
+        : [result] "=r"(result)
+        : [first] "r"(x_i32), [second] "r"(y_i32));
+    return saturated_cast<V>(result);
+  }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastAsmOp {
+  static const bool is_supported =
+      kEnableAsmCode && BigEnoughPromotion<T, U>::is_contained &&
+      IsTypeInRangeForNumericType<
+          int32_t,
+          typename BigEnoughPromotion<T, U>::type>::value;
+
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    // This will get promoted to an int, so let the compiler do whatever is
+    // clever and rely on the saturated cast to bounds check.
+    if (IsIntegerArithmeticSafe<int, T, U>::value) {
+      return saturated_cast<V>(x - y);
+    }
+
+    int32_t result;
+    int32_t x_i32 = checked_cast<int32_t>(x);
+    int32_t y_i32 = checked_cast<int32_t>(y);
+
+    asm("qsub %[result], %[first], %[second]"
+        : [result] "=r"(result)
+        : [first] "r"(x_i32), [second] "r"(y_i32));
+    return saturated_cast<V>(result);
+  }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastAsmOp {
+  static const bool is_supported =
+      kEnableAsmCode && CheckedMulFastAsmOp<T, U>::is_supported;
+
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    // Use the CheckedMulFastAsmOp for full-width 32-bit values, because
+    // it's fewer instructions than promoting and then saturating.
+    if (!IsIntegerArithmeticSafe<int32_t, T, U>::value &&
+        !IsIntegerArithmeticSafe<uint32_t, T, U>::value) {
+      V result;
+      return CheckedMulFastAsmOp<T, U>::Do(x, y, &result)
+                 ? result
+                 : CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
+    }
+
+    assert((FastIntegerArithmeticPromotion<T, U>::is_contained));
+    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+    return saturated_cast<V>(static_cast<Promotion>(x) *
+                             static_cast<Promotion>(y));
+  }
+};
+
+}  // namespace partition_alloc::internal::base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_clang_gcc_impl.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_clang_gcc_impl.h
new file mode 100644
index 0000000..49fd9be
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_clang_gcc_impl.h
@@ -0,0 +1,155 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+
+#if !defined(__native_client__) && (defined(__ARMEL__) || defined(__arch64__))
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_arm_impl.h"
+#define PA_BASE_HAS_ASSEMBLER_SAFE_MATH (1)
+#else
+#define PA_BASE_HAS_ASSEMBLER_SAFE_MATH (0)
+#endif
+
+namespace partition_alloc::internal::base::internal {
+
+// These are the non-functioning boilerplate implementations of the optimized
+// safe math routines.
+#if !PA_BASE_HAS_ASSEMBLER_SAFE_MATH
+template <typename T, typename U>
+struct CheckedMulFastAsmOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr bool Do(T, U, V*) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<bool>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastAsmOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastAsmOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastAsmOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+#endif  // PA_BASE_HAS_ASSEMBLER_SAFE_MATH
+#undef PA_BASE_HAS_ASSEMBLER_SAFE_MATH
+
+template <typename T, typename U>
+struct CheckedAddFastOp {
+  static const bool is_supported = true;
+  template <typename V>
+  __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
+    return !__builtin_add_overflow(x, y, result);
+  }
+};
+
+template <typename T, typename U>
+struct CheckedSubFastOp {
+  static const bool is_supported = true;
+  template <typename V>
+  __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
+    return !__builtin_sub_overflow(x, y, result);
+  }
+};
+
+template <typename T, typename U>
+struct CheckedMulFastOp {
+#if defined(__clang__)
+  // TODO(jschuh): Get the Clang runtime library issues sorted out so we can
+  // support full-width, mixed-sign multiply builtins.
+  // https://crbug.com/613003
+  // We can support intptr_t, uintptr_t, or a smaller common type.
+  static const bool is_supported =
+      (IsTypeInRangeForNumericType<intptr_t, T>::value &&
+       IsTypeInRangeForNumericType<intptr_t, U>::value) ||
+      (IsTypeInRangeForNumericType<uintptr_t, T>::value &&
+       IsTypeInRangeForNumericType<uintptr_t, U>::value);
+#else
+  static const bool is_supported = true;
+#endif
+  template <typename V>
+  __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
+    return CheckedMulFastAsmOp<T, U>::is_supported
+               ? CheckedMulFastAsmOp<T, U>::Do(x, y, result)
+               : !__builtin_mul_overflow(x, y, result);
+  }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastOp {
+  static const bool is_supported = ClampedAddFastAsmOp<T, U>::is_supported;
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    return ClampedAddFastAsmOp<T, U>::template Do<V>(x, y);
+  }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastOp {
+  static const bool is_supported = ClampedSubFastAsmOp<T, U>::is_supported;
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    return ClampedSubFastAsmOp<T, U>::template Do<V>(x, y);
+  }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastOp {
+  static const bool is_supported = ClampedMulFastAsmOp<T, U>::is_supported;
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    return ClampedMulFastAsmOp<T, U>::template Do<V>(x, y);
+  }
+};
+
+template <typename T>
+struct ClampedNegFastOp {
+  static const bool is_supported = std::is_signed_v<T>;
+  __attribute__((always_inline)) static T Do(T value) {
+    // Use this when there is no assembler path available.
+    if (!ClampedSubFastAsmOp<T, T>::is_supported) {
+      T result;
+      return !__builtin_sub_overflow(T(0), value, &result)
+                 ? result
+                 : std::numeric_limits<T>::max();
+    }
+
+    // Fallback to the normal subtraction path.
+    return ClampedSubFastOp<T, T>::template Do<T>(T(0), value);
+  }
+};
+
+}  // namespace partition_alloc::internal::base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_shared_impl.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_shared_impl.h
new file mode 100644
index 0000000..0d4846a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_shared_impl.h
@@ -0,0 +1,213 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <cassert>
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_ASMJS)
+// Optimized safe math instructions are incompatible with asmjs.
+#define PA_BASE_HAS_OPTIMIZED_SAFE_MATH (0)
+// Where available use builtin math overflow support on Clang and GCC.
+#elif !defined(__native_client__) &&                       \
+    ((defined(__clang__) &&                                \
+      ((__clang_major__ > 3) ||                            \
+       (__clang_major__ == 3 && __clang_minor__ >= 4))) || \
+     (defined(__GNUC__) && __GNUC__ >= 5))
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math_clang_gcc_impl.h"
+#define PA_BASE_HAS_OPTIMIZED_SAFE_MATH (1)
+#else
+#define PA_BASE_HAS_OPTIMIZED_SAFE_MATH (0)
+#endif
+
+namespace partition_alloc::internal::base::internal {
+
+// These are the non-functioning boilerplate implementations of the optimized
+// safe math routines.
+#if !PA_BASE_HAS_OPTIMIZED_SAFE_MATH
+template <typename T, typename U>
+struct CheckedAddFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr bool Do(T, U, V*) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<bool>();
+  }
+};
+
+template <typename T, typename U>
+struct CheckedSubFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr bool Do(T, U, V*) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<bool>();
+  }
+};
+
+template <typename T, typename U>
+struct CheckedMulFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr bool Do(T, U, V*) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<bool>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+
+template <typename T>
+struct ClampedNegFastOp {
+  static const bool is_supported = false;
+  static constexpr T Do(T) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<T>();
+  }
+};
+#endif  // PA_BASE_HAS_OPTIMIZED_SAFE_MATH
+#undef PA_BASE_HAS_OPTIMIZED_SAFE_MATH
+
+// This is used for UnsignedAbs, where we need to support floating-point
+// template instantiations even though we don't actually support the operations.
+// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
+// so the float versions will not compile.
+template <typename Numeric,
+          bool IsInteger = std::is_integral_v<Numeric>,
+          bool IsFloat = std::is_floating_point_v<Numeric>>
+struct UnsignedOrFloatForSize;
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, true, false> {
+  using type = typename std::make_unsigned<Numeric>::type;
+};
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, false, true> {
+  using type = Numeric;
+};
+
+// Wrap the unary operations to allow SFINAE when instantiating integrals versus
+// floating points. These don't perform any overflow checking. Rather, they
+// exhibit well-defined overflow semantics and rely on the caller to detect
+// if an overflow occurred.
+
+template <typename T,
+          typename std::enable_if<std::is_integral_v<T>>::type* = nullptr>
+constexpr T NegateWrapper(T value) {
+  using UnsignedT = typename std::make_unsigned<T>::type;
+  // This will compile to a NEG on Intel, and is normal negation on ARM.
+  return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
+}
+
+template <typename T,
+          typename std::enable_if<std::is_floating_point_v<T>>::type* = nullptr>
+constexpr T NegateWrapper(T value) {
+  return -value;
+}
+
+template <typename T,
+          typename std::enable_if<std::is_integral_v<T>>::type* = nullptr>
+constexpr typename std::make_unsigned<T>::type InvertWrapper(T value) {
+  return ~value;
+}
+
+template <typename T,
+          typename std::enable_if<std::is_integral_v<T>>::type* = nullptr>
+constexpr T AbsWrapper(T value) {
+  return static_cast<T>(SafeUnsignedAbs(value));
+}
+
+template <typename T,
+          typename std::enable_if<std::is_floating_point_v<T>>::type* = nullptr>
+constexpr T AbsWrapper(T value) {
+  return value < 0 ? -value : value;
+}
+
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R>
+struct MathWrapper {
+  using math = M<typename UnderlyingType<L>::type,
+                 typename UnderlyingType<R>::type,
+                 void>;
+  using type = typename math::result_type;
+};
+
+// The following macros are just boilerplate for the standard arithmetic
+// operator overloads and variadic function templates. A macro isn't the nicest
+// solution, but it beats rewriting these over and over again.
+#define PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME)    \
+  template <typename L, typename R, typename... Args>                   \
+  constexpr auto CL_ABBR##OP_NAME(const L lhs, const R rhs,             \
+                                  const Args... args) {                 \
+    return CL_ABBR##MathOp<CLASS##OP_NAME##Op, L, R, Args...>(lhs, rhs, \
+                                                              args...); \
+  }
+
+#define PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(CLASS, CL_ABBR, OP_NAME, OP,  \
+                                             CMP_OP)                       \
+  /* Binary arithmetic operator for all CLASS##Numeric operations. */      \
+  template <typename L, typename R,                                        \
+            typename std::enable_if<Is##CLASS##Op<L, R>::value>::type* =   \
+                nullptr>                                                   \
+  constexpr CLASS##Numeric<                                                \
+      typename MathWrapper<CLASS##OP_NAME##Op, L, R>::type>                \
+  operator OP(const L lhs, const R rhs) {                                  \
+    return decltype(lhs OP rhs)::template MathOp<CLASS##OP_NAME##Op>(lhs,  \
+                                                                     rhs); \
+  }                                                                        \
+  /* Assignment arithmetic operator implementation from CLASS##Numeric. */ \
+  template <typename L>                                                    \
+  template <typename R>                                                    \
+  constexpr CLASS##Numeric<L>& CLASS##Numeric<L>::operator CMP_OP(         \
+      const R rhs) {                                                       \
+    return MathOp<CLASS##OP_NAME##Op>(rhs);                                \
+  }                                                                        \
+  /* Variadic arithmetic functions that return CLASS##Numeric. */          \
+  PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME)
+
+}  // namespace partition_alloc::internal::base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/eintr_wrapper.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/eintr_wrapper.h
new file mode 100644
index 0000000..42e0760
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/eintr_wrapper.h
@@ -0,0 +1,58 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This provides a wrapper around system calls which may be interrupted by a
+// signal and return EINTR. See man 7 signal.
+// To prevent long-lasting loops (which would likely be a bug, such as a signal
+// that should be masked) to go unnoticed, there is a limit after which the
+// caller will nonetheless see an EINTR in Debug builds.
+//
+// On Windows and Fuchsia, this wrapper macro does nothing because there are no
+// signals.
+//
+// Don't wrap close calls in HANDLE_EINTR. Use IGNORE_EINTR if the return
+// value of close is significant. See http://crbug.com/269623.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
+
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_POSIX)
+
+#include <errno.h>
+
+#if defined(NDEBUG)
+
+#define PA_HANDLE_EINTR(x)                                  \
+  ({                                                        \
+    decltype(x) eintr_wrapper_result;                       \
+    do {                                                    \
+      eintr_wrapper_result = (x);                           \
+    } while (eintr_wrapper_result == -1 && errno == EINTR); \
+    eintr_wrapper_result;                                   \
+  })
+
+#else
+
+#define PA_HANDLE_EINTR(x)                                   \
+  ({                                                         \
+    int eintr_wrapper_counter = 0;                           \
+    decltype(x) eintr_wrapper_result;                        \
+    do {                                                     \
+      eintr_wrapper_result = (x);                            \
+    } while (eintr_wrapper_result == -1 && errno == EINTR && \
+             eintr_wrapper_counter++ < 100);                 \
+    eintr_wrapper_result;                                    \
+  })
+
+#endif  // NDEBUG
+
+#else  // !BUILDFLAG(IS_POSIX)
+
+#define PA_HANDLE_EINTR(x) (x)
+
+#endif  // !BUILDFLAG(IS_POSIX)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/safe_strerror.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/safe_strerror.cc
new file mode 100644
index 0000000..b779ff1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/safe_strerror.cc
@@ -0,0 +1,113 @@
+// Copyright 2006-2009 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/safe_strerror.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base {
+
+#if defined(__GLIBC__)
+#define USE_HISTORICAL_STRERROR_R 1
+// Post-L versions of bionic define the GNU-specific strerror_r if _GNU_SOURCE
+// is defined, but the symbol is renamed to __gnu_strerror_r which only exists
+// on those later versions. For parity, add the same condition as bionic.
+#elif defined(__BIONIC__) && defined(_GNU_SOURCE) && __ANDROID_API__ >= 23
+#define USE_HISTORICAL_STRERROR_R 1
+#else
+#define USE_HISTORICAL_STRERROR_R 0
+#endif
+
+#if USE_HISTORICAL_STRERROR_R
+// glibc has two strerror_r functions: a historical GNU-specific one that
+// returns type char *, and a POSIX.1-2001 compliant one available since 2.3.4
+// that returns int. This wraps the GNU-specific one.
+[[maybe_unused]] static void wrap_posix_strerror_r(
+    char* (*strerror_r_ptr)(int, char*, size_t),
+    int err,
+    char* buf,
+    size_t len) {
+  // GNU version.
+  char* rc = (*strerror_r_ptr)(err, buf, len);
+  if (rc != buf) {
+    // glibc did not use buf and returned a static string instead. Copy it
+    // into buf.
+    buf[0] = '\0';
+    strncat(buf, rc, len - 1);
+  }
+  // The GNU version never fails. Unknown errors get an "unknown error" message.
+  // The result is always null terminated.
+}
+#endif  // USE_HISTORICAL_STRERROR_R
+
+// Wrapper for strerror_r functions that implement the POSIX interface. POSIX
+// does not define the behaviour for some of the edge cases, so we wrap it to
+// guarantee that they are handled. This is compiled on all POSIX platforms, but
+// it will only be used on Linux if the POSIX strerror_r implementation is
+// being used (see below).
+[[maybe_unused]] static void wrap_posix_strerror_r(
+    int (*strerror_r_ptr)(int, char*, size_t),
+    int err,
+    char* buf,
+    size_t len) {
+  int old_errno = errno;
+  // Have to cast since otherwise we get an error if this is the GNU version
+  // (but in such a scenario this function is never called). Sadly we can't use
+  // C++-style casts because the appropriate one is reinterpret_cast but it's
+  // considered illegal to reinterpret_cast a type to itself, so we get an
+  // error in the opposite case.
+  int result = (*strerror_r_ptr)(err, buf, len);
+  if (result == 0) {
+    // POSIX is vague about whether the string will be terminated, although
+    // it indirectly implies that typically ERANGE will be returned, instead
+    // of truncating the string. We play it safe by always terminating the
+    // string explicitly.
+    buf[len - 1] = '\0';
+  } else {
+    // Error. POSIX is vague about whether the return value is itself a system
+    // error code or something else. On Linux currently it is -1 and errno is
+    // set. On BSD-derived systems it is a system error and errno is unchanged.
+    // We try and detect which case it is so as to put as much useful info as
+    // we can into our message.
+    int strerror_error;  // The error encountered in strerror
+    int new_errno = errno;
+    if (new_errno != old_errno) {
+      // errno was changed, so probably the return value is just -1 or something
+      // else that doesn't provide any info, and errno is the error.
+      strerror_error = new_errno;
+    } else {
+      // Either the error from strerror_r was the same as the previous value, or
+      // errno wasn't used. Assume the latter.
+      strerror_error = result;
+    }
+    // snprintf truncates and always null-terminates.
+    snprintf(buf, len, "Error %d while retrieving error %d", strerror_error,
+             err);
+  }
+  errno = old_errno;
+}
+
+void safe_strerror_r(int err, char* buf, size_t len) {
+  if (buf == nullptr || len <= 0) {
+    return;
+  }
+  // If using glibc (i.e., Linux), the compiler will automatically select the
+  // appropriate overloaded function based on the function type of strerror_r.
+  // The other one will be elided from the translation unit since both are
+  // static.
+  wrap_posix_strerror_r(&strerror_r, err, buf, len);
+}
+
+std::string safe_strerror(int err) {
+  const int buffer_size = 256;
+  char buf[buffer_size];
+  safe_strerror_r(err, buf, sizeof(buf));
+  return std::string(buf);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/safe_strerror.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/safe_strerror.h
new file mode 100644
index 0000000..9af43a8
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/safe_strerror.h
@@ -0,0 +1,45 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_POSIX_SAFE_STRERROR_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_POSIX_SAFE_STRERROR_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc::internal::base {
+
+// BEFORE using anything from this file, first look at PLOG and friends in
+// logging.h and use them instead if applicable.
+//
+// This file declares safe, portable alternatives to the POSIX strerror()
+// function. strerror() is inherently unsafe in multi-threaded apps and should
+// never be used. Doing so can cause crashes. Additionally, the thread-safe
+// alternative strerror_r varies in semantics across platforms. Use these
+// functions instead.
+
+// Thread-safe strerror function with dependable semantics that never fails.
+// It will write the string form of error "err" to buffer buf of length len.
+// If there is an error calling the OS's strerror_r() function then a message to
+// that effect will be printed into buf, truncating if necessary. The final
+// result is always null-terminated. The value of errno is never changed.
+//
+// Use this instead of strerror_r().
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+void safe_strerror_r(int err, char* buf, size_t len);
+
+// Calls safe_strerror_r with a buffer of suitable size and returns the result
+// in a C++ string.
+//
+// Use this instead of strerror(). Note though that safe_strerror_r will be
+// more robust in the case of heap corruption errors, since it doesn't need to
+// allocate a string.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) std::string safe_strerror(int err);
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_POSIX_SAFE_STRERROR_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle.h
new file mode 100644
index 0000000..f42fb15
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle.h
@@ -0,0 +1,46 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_PROCESS_PROCESS_HANDLE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_PROCESS_PROCESS_HANDLE_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/windows_types.h"
+#endif
+
+#if BUILDFLAG(IS_FUCHSIA)
+#include <zircon/types.h>
+#endif
+
+namespace partition_alloc::internal::base {
+
+// ProcessHandle is a platform specific type which represents the underlying OS
+// handle to a process.
+// ProcessId is a number which identifies the process in the OS.
+#if BUILDFLAG(IS_WIN)
+typedef DWORD ProcessId;
+const ProcessId kNullProcessId = 0;
+#elif BUILDFLAG(IS_FUCHSIA)
+typedef zx_koid_t ProcessId;
+const ProcessId kNullProcessId = ZX_KOID_INVALID;
+#elif BUILDFLAG(IS_POSIX)
+// On POSIX, our ProcessHandle will just be the PID.
+typedef pid_t ProcessId;
+const ProcessId kNullProcessId = 0;
+#endif  // BUILDFLAG(IS_WIN)
+
+// Returns the id of the current process.
+// Note that on some platforms, this is not guaranteed to be unique across
+// processes (use GetUniqueIdForProcess if uniqueness is required).
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) ProcessId GetCurrentProcId();
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_PROCESS_PROCESS_HANDLE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle_posix.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle_posix.cc
new file mode 100644
index 0000000..f807f93
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle_posix.cc
@@ -0,0 +1,15 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle.h"
+
+#include <unistd.h>
+
+namespace partition_alloc::internal::base {
+
+ProcessId GetCurrentProcId() {
+  return getpid();
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle_win.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle_win.cc
new file mode 100644
index 0000000..1eed450
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle_win.cc
@@ -0,0 +1,15 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/process/process_handle.h"
+
+#include <windows.h>
+
+namespace partition_alloc::internal::base {
+
+ProcessId GetCurrentProcId() {
+  return ::GetCurrentProcessId();
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.cc
new file mode 100644
index 0000000..00f1721
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.cc
@@ -0,0 +1,74 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h"
+
+#include <limits.h>
+#include <math.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+
+namespace partition_alloc::internal::base {
+
+uint64_t RandUint64() {
+  uint64_t number;
+  RandBytes(&number, sizeof(number));
+  return number;
+}
+
+uint64_t RandGenerator(uint64_t range) {
+  PA_BASE_DCHECK(range > 0u);
+  // We must discard random results above this number, as they would
+  // make the random generator non-uniform (consider e.g. if
+  // MAX_UINT64 was 7 and |range| was 5, then a result of 1 would be twice
+  // as likely as a result of 3 or 4).
+  uint64_t max_acceptable_value =
+      (std::numeric_limits<uint64_t>::max() / range) * range - 1;
+
+  uint64_t value;
+  do {
+    value = base::RandUint64();
+  } while (value > max_acceptable_value);
+
+  return value % range;
+}
+
+InsecureRandomGenerator::InsecureRandomGenerator() {
+  a_ = base::RandUint64();
+  b_ = base::RandUint64();
+}
+
+void InsecureRandomGenerator::ReseedForTesting(uint64_t seed) {
+  a_ = seed;
+  b_ = seed;
+}
+
+uint64_t InsecureRandomGenerator::RandUint64() {
+  // Using XorShift128+, which is simple and widely used. See
+  // https://en.wikipedia.org/wiki/Xorshift#xorshift+ for details.
+  uint64_t t = a_;
+  const uint64_t s = b_;
+
+  a_ = s;
+  t ^= t << 23;
+  t ^= t >> 17;
+  t ^= s ^ (s >> 26);
+  b_ = t;
+
+  return t + s;
+}
+
+uint32_t InsecureRandomGenerator::RandUint32() {
+  // The generator usually returns an uint64_t, truncate it.
+  //
+  // It is noted in this paper (https://arxiv.org/abs/1810.05313) that the
+  // lowest 32 bits fail some statistical tests from the Big Crush
+  // suite. Use the higher ones instead.
+  return this->RandUint64() >> 32;
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h
new file mode 100644
index 0000000..937c637
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h
@@ -0,0 +1,103 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_RAND_UTIL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_RAND_UTIL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/gtest_prod_util.h"
+#include "build/build_config.h"
+
+namespace partition_alloc {
+class RandomGenerator;
+
+namespace internal {
+template <typename QuarantineEntry, size_t CountCapacity>
+class LightweightQuarantineList;
+}
+}  // namespace partition_alloc
+
+namespace partition_alloc::internal::base {
+
+// Returns a random number in range [0, UINT64_MAX]. Thread-safe.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) uint64_t RandUint64();
+
+// Returns a random number in range [0, range).  Thread-safe.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+uint64_t RandGenerator(uint64_t range);
+
+// Fills |output_length| bytes of |output| with random data. Thread-safe.
+//
+// Although implementations are required to use a cryptographically secure
+// random number source, code outside of base/ that relies on this should use
+// crypto::RandBytes instead to ensure the requirement is easily discoverable.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+void RandBytes(void* output, size_t output_length);
+
+// Fast, insecure pseudo-random number generator.
+//
+// WARNING: This is not the generator you are looking for. This has significant
+// caveats:
+//   - It is non-cryptographic, so easy to miuse
+//   - It is neither fork() nor clone()-safe.
+//   - Synchronization is up to the client.
+//
+// Always prefer base::Rand*() above, unless you have a use case where its
+// overhead is too high, or system calls are disallowed.
+//
+// Performance: As of 2021, rough overhead on Linux on a desktop machine of
+// base::RandUint64() is ~800ns per call (it performs a system call). On Windows
+// it is lower. On the same machine, this generator's cost is ~2ns per call,
+// regardless of platform.
+//
+// This is different from |Rand*()| above as it is guaranteed to never make a
+// system call to generate a new number, except to seed it.  This should *never*
+// be used for cryptographic applications, and is not thread-safe.
+//
+// It is seeded using base::RandUint64() in the constructor, meaning that it
+// doesn't need to be seeded. It can be re-seeded though, with
+// ReseedForTesting(). Its period is long enough that it should not need to be
+// re-seeded during use.
+//
+// Uses the XorShift128+ generator under the hood.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) InsecureRandomGenerator {
+ public:
+  // Never use outside testing, not enough entropy.
+  void ReseedForTesting(uint64_t seed);
+
+  uint32_t RandUint32();
+  uint64_t RandUint64();
+
+ private:
+  InsecureRandomGenerator();
+  // State.
+  uint64_t a_ = 0, b_ = 0;
+
+  // Before adding a new friend class, make sure that the overhead of
+  // base::Rand*() is too high, using something more representative than a
+  // microbenchmark.
+  //
+  // PartitionAlloc allocations should not take more than 40-50ns per
+  // malloc()/free() pair, otherwise high-level benchmarks regress, and does not
+  // need a secure PRNG, as it's used for ASLR and zeroing some allocations at
+  // free() time.
+  friend class ::partition_alloc::RandomGenerator;
+  template <typename QuarantineEntry, size_t CountCapacity>
+  friend class ::partition_alloc::internal::LightweightQuarantineList;
+
+  PA_FRIEND_TEST_ALL_PREFIXES(
+      PartitionAllocBaseRandUtilTest,
+      InsecureRandomGeneratorProducesBothValuesOfAllBits);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocBaseRandUtilTest,
+                              InsecureRandomGeneratorChiSquared);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocBaseRandUtilTest,
+                              InsecureRandomGeneratorRandDouble);
+};
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_RAND_UTIL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_fuchsia.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_fuchsia.cc
new file mode 100644
index 0000000..b19fed7
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_fuchsia.cc
@@ -0,0 +1,15 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h"
+
+#include <zircon/syscalls.h>
+
+namespace partition_alloc::internal::base {
+
+void RandBytes(void* output, size_t output_length) {
+  zx_cprng_draw(output, output_length);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_pa_unittest.cc
new file mode 100644
index 0000000..f04c083
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_pa_unittest.cc
@@ -0,0 +1,242 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal::base {
+
+TEST(PartitionAllocBaseRandUtilTest, RandBytes) {
+  const size_t buffer_size = 50;
+  char buffer[buffer_size];
+  memset(buffer, 0, buffer_size);
+  base::RandBytes(buffer, buffer_size);
+  std::sort(buffer, buffer + buffer_size);
+  // Probability of occurrence of less than 25 unique bytes in 50 random bytes
+  // is below 10^-25.
+  EXPECT_GT(std::unique(buffer, buffer + buffer_size) - buffer, 25);
+}
+
+// Verify that calling base::RandBytes with an empty buffer doesn't fail.
+TEST(PartitionAllocBaseRandUtilTest, RandBytes0) {
+  base::RandBytes(nullptr, 0);
+}
+
+// Make sure that it is still appropriate to use RandGenerator in conjunction
+// with std::random_shuffle().
+TEST(PartitionAllocBaseRandUtilTest, RandGeneratorForRandomShuffle) {
+  EXPECT_EQ(base::RandGenerator(1), 0U);
+  EXPECT_LE(std::numeric_limits<ptrdiff_t>::max(),
+            std::numeric_limits<int64_t>::max());
+}
+
+TEST(PartitionAllocBaseRandUtilTest, RandGeneratorIsUniform) {
+  // Verify that RandGenerator has a uniform distribution. This is a
+  // regression test that consistently failed when RandGenerator was
+  // implemented this way:
+  //
+  //   return base::RandUint64() % max;
+  //
+  // A degenerate case for such an implementation is e.g. a top of
+  // range that is 2/3rds of the way to MAX_UINT64, in which case the
+  // bottom half of the range would be twice as likely to occur as the
+  // top half. A bit of calculus care of jar@ shows that the largest
+  // measurable delta is when the top of the range is 3/4ths of the
+  // way, so that's what we use in the test.
+  constexpr uint64_t kTopOfRange =
+      (std::numeric_limits<uint64_t>::max() / 4ULL) * 3ULL;
+  constexpr double kExpectedAverage = static_cast<double>(kTopOfRange / 2);
+  constexpr double kAllowedVariance = kExpectedAverage / 50.0;  // +/- 2%
+  constexpr int kMinAttempts = 1000;
+  constexpr int kMaxAttempts = 1000000;
+
+  double cumulative_average = 0.0;
+  int count = 0;
+  while (count < kMaxAttempts) {
+    uint64_t value = base::RandGenerator(kTopOfRange);
+    cumulative_average = (count * cumulative_average + value) / (count + 1);
+
+    // Don't quit too quickly for things to start converging, or we may have
+    // a false positive.
+    if (count > kMinAttempts &&
+        kExpectedAverage - kAllowedVariance < cumulative_average &&
+        cumulative_average < kExpectedAverage + kAllowedVariance) {
+      break;
+    }
+
+    ++count;
+  }
+
+  ASSERT_LT(count, kMaxAttempts) << "Expected average was " << kExpectedAverage
+                                 << ", average ended at " << cumulative_average;
+}
+
+TEST(PartitionAllocBaseRandUtilTest, RandUint64ProducesBothValuesOfAllBits) {
+  // This tests to see that our underlying random generator is good
+  // enough, for some value of good enough.
+  uint64_t kAllZeros = 0ULL;
+  uint64_t kAllOnes = ~kAllZeros;
+  uint64_t found_ones = kAllZeros;
+  uint64_t found_zeros = kAllOnes;
+
+  for (size_t i = 0; i < 1000; ++i) {
+    uint64_t value = base::RandUint64();
+    found_ones |= value;
+    found_zeros &= value;
+
+    if (found_zeros == kAllZeros && found_ones == kAllOnes) {
+      return;
+    }
+  }
+
+  FAIL() << "Didn't achieve all bit values in maximum number of tries.";
+}
+
+// Benchmark test for RandBytes().  Disabled since it's intentionally slow and
+// does not test anything that isn't already tested by the existing RandBytes()
+// tests.
+TEST(PartitionAllocBaseRandUtilTest, DISABLED_RandBytesPerf) {
+  // Benchmark the performance of |kTestIterations| of RandBytes() using a
+  // buffer size of |kTestBufferSize|.
+  const int kTestIterations = 10;
+  const size_t kTestBufferSize = 1 * 1024 * 1024;
+
+  std::unique_ptr<uint8_t[]> buffer(new uint8_t[kTestBufferSize]);
+  const TimeTicks now = TimeTicks::Now();
+  for (int i = 0; i < kTestIterations; ++i) {
+    base::RandBytes(buffer.get(), kTestBufferSize);
+  }
+  const TimeTicks end = TimeTicks::Now();
+
+  PA_LOG(INFO) << "RandBytes(" << kTestBufferSize
+               << ") took: " << (end - now).InMicroseconds() << "µs";
+}
+
+TEST(PartitionAllocBaseRandUtilTest,
+     InsecureRandomGeneratorProducesBothValuesOfAllBits) {
+  // This tests to see that our underlying random generator is good
+  // enough, for some value of good enough.
+  uint64_t kAllZeros = 0ULL;
+  uint64_t kAllOnes = ~kAllZeros;
+  uint64_t found_ones = kAllZeros;
+  uint64_t found_zeros = kAllOnes;
+
+  InsecureRandomGenerator generator;
+
+  for (size_t i = 0; i < 1000; ++i) {
+    uint64_t value = generator.RandUint64();
+    found_ones |= value;
+    found_zeros &= value;
+
+    if (found_zeros == kAllZeros && found_ones == kAllOnes) {
+      return;
+    }
+  }
+
+  FAIL() << "Didn't achieve all bit values in maximum number of tries.";
+}
+
+namespace {
+
+constexpr double kXp1Percent = -2.33;
+constexpr double kXp99Percent = 2.33;
+
+double ChiSquaredCriticalValue(double nu, double x_p) {
+  // From "The Art Of Computer Programming" (TAOCP), Volume 2, Section 3.3.1,
+  // Table 1. This is the asymptotic value for nu > 30, up to O(1 / sqrt(nu)).
+  return nu + sqrt(2. * nu) * x_p + 2. / 3. * (x_p * x_p) - 2. / 3.;
+}
+
+int ExtractBits(uint64_t value, int from_bit, int num_bits) {
+  return (value >> from_bit) & ((1 << num_bits) - 1);
+}
+
+// Performs a Chi-Squared test on a subset of |num_bits| extracted starting from
+// |from_bit| in the generated value.
+//
+// See TAOCP, Volume 2, Section 3.3.1, and
+// https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test for details.
+//
+// This is only one of the many, many random number generator test we could do,
+// but they are cumbersome, as they are typically very slow, and expected to
+// fail from time to time, due to their probabilistic nature.
+//
+// The generator we use has however been vetted with the BigCrush test suite
+// from Marsaglia, so this should suffice as a smoke test that our
+// implementation is wrong.
+bool ChiSquaredTest(InsecureRandomGenerator& gen,
+                    size_t n,
+                    int from_bit,
+                    int num_bits) {
+  const int range = 1 << num_bits;
+  PA_BASE_CHECK(static_cast<int>(n % range) == 0)
+      << "Makes computations simpler";
+  std::vector<size_t> samples(range, 0);
+
+  // Count how many samples pf each value are found. All buckets should be
+  // almost equal if the generator is suitably uniformly random.
+  for (size_t i = 0; i < n; i++) {
+    int sample = ExtractBits(gen.RandUint64(), from_bit, num_bits);
+    samples[sample] += 1;
+  }
+
+  // Compute the Chi-Squared statistic, which is:
+  // \Sum_{k=0}^{range-1} \frac{(count - expected)^2}{expected}
+  double chi_squared = 0.;
+  double expected_count = n / range;
+  for (size_t sample_count : samples) {
+    double deviation = sample_count - expected_count;
+    chi_squared += (deviation * deviation) / expected_count;
+  }
+
+  // The generator should produce numbers that are not too far of (chi_squared
+  // lower than a given quantile), but not too close to the ideal distribution
+  // either (chi_squared is too low).
+  //
+  // See The Art Of Computer Programming, Volume 2, Section 3.3.1 for details.
+  return chi_squared > ChiSquaredCriticalValue(range - 1, kXp1Percent) &&
+         chi_squared < ChiSquaredCriticalValue(range - 1, kXp99Percent);
+}
+
+}  // namespace
+
+TEST(PartitionAllocBaseRandUtilTest, InsecureRandomGeneratorChiSquared) {
+  constexpr int kIterations = 50;
+
+  // Specifically test the low bits, which are usually weaker in random number
+  // generators. We don't use them for the 32 bit number generation, but let's
+  // make sure they are still suitable.
+  for (int start_bit : {1, 2, 3, 8, 12, 20, 32, 48, 54}) {
+    int pass_count = 0;
+    for (int i = 0; i < kIterations; i++) {
+      size_t samples = 1 << 16;
+      InsecureRandomGenerator gen;
+      // Fix the seed to make the test non-flaky.
+      gen.ReseedForTesting(kIterations + 1);
+      bool pass = ChiSquaredTest(gen, samples, start_bit, 8);
+      pass_count += pass;
+    }
+
+    // We exclude 1% on each side, so we expect 98% of tests to pass, meaning 98
+    // * kIterations / 100. However this is asymptotic, so add a bit of leeway.
+    int expected_pass_count = (kIterations * 98) / 100;
+    EXPECT_GE(pass_count, expected_pass_count - ((kIterations * 2) / 100))
+        << "For start_bit = " << start_bit;
+  }
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_posix.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_posix.cc
new file mode 100644
index 0000000..20d4c66
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_posix.cc
@@ -0,0 +1,120 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <sstream>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_util.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/eintr_wrapper.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_MAC)
+// TODO(crbug.com/995996): Waiting for this header to appear in the iOS SDK.
+// (See below.)
+#include <sys/random.h>
+#endif
+
+namespace {
+
+#if BUILDFLAG(IS_AIX)
+// AIX has no 64-bit support for O_CLOEXEC.
+static constexpr int kOpenFlags = O_RDONLY;
+#else
+static constexpr int kOpenFlags = O_RDONLY | O_CLOEXEC;
+#endif
+
+// We keep the file descriptor for /dev/urandom around so we don't need to
+// reopen it (which is expensive), and since we may not even be able to reopen
+// it if we are later put in a sandbox. This class wraps the file descriptor so
+// we can use a static-local variable to handle opening it on the first access.
+class URandomFd {
+ public:
+  URandomFd() : fd_(PA_HANDLE_EINTR(open("/dev/urandom", kOpenFlags))) {
+    PA_BASE_CHECK(fd_ >= 0) << "Cannot open /dev/urandom";
+  }
+
+  ~URandomFd() { close(fd_); }
+
+  int fd() const { return fd_; }
+
+ private:
+  const int fd_;
+};
+
+int GetUrandomFD() {
+  static partition_alloc::internal::base::NoDestructor<URandomFd> urandom_fd;
+  return urandom_fd->fd();
+}
+
+}  // namespace
+
+namespace partition_alloc::internal::base {
+
+// NOTE: In an ideal future, all implementations of this function will just
+// wrap BoringSSL's `RAND_bytes`. TODO(crbug.com/995996): Figure out the
+// build/test/performance issues with dcheng's CL
+// (https://chromium-review.googlesource.com/c/chromium/src/+/1545096) and land
+// it or some form of it.
+void RandBytes(void* output, size_t output_length) {
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+  // Use `syscall(__NR_getrandom...` to avoid a dependency on
+  // `third_party/linux_syscall_support.h`.
+  //
+  // Here in PartitionAlloc, we don't need to look before we leap
+  // because we know that both Linux and CrOS only support kernels
+  // that do have this syscall defined. This diverges from upstream
+  // `//base` behavior both here and below.
+  const ssize_t r =
+      PA_HANDLE_EINTR(syscall(__NR_getrandom, output, output_length, 0));
+
+  // Return success only on total success. In case errno == ENOSYS (or any other
+  // error), we'll fall through to reading from urandom below.
+  if (output_length == static_cast<size_t>(r)) {
+    PA_MSAN_UNPOISON(output, output_length);
+    return;
+  }
+#elif BUILDFLAG(IS_MAC)
+  // TODO(crbug.com/995996): Enable this on iOS too, when sys/random.h arrives
+  // in its SDK.
+  if (getentropy(output, output_length) == 0) {
+    return;
+  }
+#endif
+  // If getrandom(2) above returned with an error and the /dev/urandom fallback
+  // took place on Linux/ChromeOS bots, they would fail with a CHECK in
+  // nacl_helper. The latter assumes that the number of open file descriptors
+  // must be constant. The nacl_helper knows about the FD from
+  // //base/rand_utils, but is not aware of the urandom_fd from this file (see
+  // CheckForExpectedNumberOfOpenFds).
+  //
+  // *  On `linux_chromium_asan_rel_ng` in
+  //    `ContentBrowserTest.RendererCrashCallStack`:
+  //    ```
+  //    [FATAL:rand_util_posix.cc(45)] Check failed: fd_ >= 0. Cannot open
+  //    /dev/urandom
+  //    ```
+  // *  On `linux-lacros-rel` in
+  //    `NaClBrowserTestGLibc.CrashInCallback`:
+  //    ```
+  //    2023-07-03T11:31:13.115755Z FATAL nacl_helper:
+  //    [nacl_sandbox_linux.cc(178)] Check failed: expected_num_fds ==
+  //    sandbox::ProcUtil::CountOpenFds(proc_fd_.get()) (6 vs. 7)
+  //    ```
+  const int urandom_fd = GetUrandomFD();
+  const bool success =
+      ReadFromFD(urandom_fd, static_cast<char*>(output), output_length);
+  PA_BASE_CHECK(success);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_win.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_win.cc
new file mode 100644
index 0000000..91271c5
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_win.cc
@@ -0,0 +1,43 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <windows.h>
+
+#include <algorithm>
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+
+// Prototype for ProcessPrng.
+// See: https://learn.microsoft.com/en-us/windows/win32/seccng/processprng
+extern "C" {
+BOOL WINAPI ProcessPrng(PBYTE pbData, SIZE_T cbData);
+}
+
+namespace partition_alloc::internal::base {
+
+void RandBytes(void* output, size_t output_length) {
+  // Import bcryptprimitives directly rather than cryptbase to avoid opening a
+  // handle to \\Device\KsecDD in the renderer.
+  // Note: we cannot use a magic static here as PA runs too early in process
+  // startup, but this should be safe as the process will be single-threaded
+  // when this first runs.
+  static decltype(&ProcessPrng) process_prng_fn = nullptr;
+  if (!process_prng_fn) {
+    HMODULE hmod = LoadLibraryW(L"bcryptprimitives.dll");
+    PA_BASE_CHECK(hmod);
+    process_prng_fn = reinterpret_cast<decltype(&ProcessPrng)>(
+        GetProcAddress(hmod, "ProcessPrng"));
+    PA_BASE_CHECK(process_prng_fn);
+  }
+  BOOL success = process_prng_fn(static_cast<BYTE*>(output), output_length);
+  // ProcessPrng is documented to always return TRUE.
+  PA_BASE_CHECK(success);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error.h
new file mode 100644
index 0000000..7c1cef4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error.h
@@ -0,0 +1,56 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_SCOPED_CLEAR_LAST_ERROR_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_SCOPED_CLEAR_LAST_ERROR_H_
+
+#include <errno.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base {
+
+// ScopedClearLastError stores and resets the value of thread local error codes
+// (errno, GetLastError()), and restores them in the destructor. This is useful
+// to avoid side effects on these values in instrumentation functions that
+// interact with the OS.
+
+// Common implementation of ScopedClearLastError for all platforms. Use
+// ScopedClearLastError instead.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) ScopedClearLastErrorBase {
+ public:
+  ScopedClearLastErrorBase() : last_errno_(errno) { errno = 0; }
+  ScopedClearLastErrorBase(const ScopedClearLastErrorBase&) = delete;
+  ScopedClearLastErrorBase& operator=(const ScopedClearLastErrorBase&) = delete;
+  ~ScopedClearLastErrorBase() { errno = last_errno_; }
+
+ private:
+  const int last_errno_;
+};
+
+#if BUILDFLAG(IS_WIN)
+
+// Windows specific implementation of ScopedClearLastError.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) ScopedClearLastError
+    : public ScopedClearLastErrorBase {
+ public:
+  ScopedClearLastError();
+  ScopedClearLastError(const ScopedClearLastError&) = delete;
+  ScopedClearLastError& operator=(const ScopedClearLastError&) = delete;
+  ~ScopedClearLastError();
+
+ private:
+  const unsigned long last_system_error_;
+};
+
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+
+using ScopedClearLastError = ScopedClearLastErrorBase;
+
+#endif  // BUILDFLAG(IS_WIN)
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_SCOPED_CLEAR_LAST_ERROR_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error_pa_unittest.cc
new file mode 100644
index 0000000..2bbfee2
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error_pa_unittest.cc
@@ -0,0 +1,57 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#endif  // BUILDFLAG(IS_WIN)
+
+namespace partition_alloc::internal::base {
+
+TEST(PAScopedClearLastError, TestNoError) {
+  errno = 1;
+  {
+    ScopedClearLastError clear_error;
+    EXPECT_EQ(0, errno);
+  }
+  EXPECT_EQ(1, errno);
+}
+
+TEST(PAScopedClearLastError, TestError) {
+  errno = 1;
+  {
+    ScopedClearLastError clear_error;
+    errno = 2;
+  }
+  EXPECT_EQ(1, errno);
+}
+
+#if BUILDFLAG(IS_WIN)
+
+TEST(PAScopedClearLastError, TestNoErrorWin) {
+  ::SetLastError(1);
+  {
+    ScopedClearLastError clear_error;
+    EXPECT_EQ(logging::SystemErrorCode(0), ::GetLastError());
+  }
+  EXPECT_EQ(logging::SystemErrorCode(1), ::GetLastError());
+}
+
+TEST(PAScopedClearLastError, TestErrorWin) {
+  ::SetLastError(1);
+  {
+    ScopedClearLastError clear_error;
+    ::SetLastError(2);
+  }
+  EXPECT_EQ(logging::SystemErrorCode(1), ::GetLastError());
+}
+
+#endif  // BUILDFLAG(IS_WIN)
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error_win.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error_win.cc
new file mode 100644
index 0000000..a4c9549
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error_win.cc
@@ -0,0 +1,20 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error.h"
+
+#include <windows.h>
+
+namespace partition_alloc::internal::base {
+
+ScopedClearLastError::ScopedClearLastError()
+    : ScopedClearLastErrorBase(), last_system_error_(GetLastError()) {
+  SetLastError(0);
+}
+
+ScopedClearLastError::~ScopedClearLastError() {
+  SetLastError(last_system_error_);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder.cc
new file mode 100644
index 0000000..290b77b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder.cc
@@ -0,0 +1,214 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.h"
+#include "build/build_config.h"
+
+#if !BUILDFLAG(IS_WIN)
+#include <unistd.h>
+#endif
+
+#include <cmath>
+#include <cstring>
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#define PA_RAW_DCHECK PA_RAW_CHECK
+#else
+#define PA_RAW_DCHECK(x) \
+  do {                   \
+    if (x) {             \
+    }                    \
+  } while (0)
+#endif
+
+namespace partition_alloc::internal::base::strings {
+
+namespace {
+
+constexpr size_t kNumDigits10 = 5u;
+
+constexpr uint64_t Pow10(unsigned exp) {
+  uint64_t ret = 1;
+  for (unsigned i = 0; i < exp; ++i) {
+    ret *= 10U;
+  }
+  return ret;
+}
+
+constexpr uint64_t Log10(uint64_t value) {
+  uint64_t ret = 0;
+  while (value != 0u) {
+    value = value / 10u;
+    ++ret;
+  }
+  return ret;
+}
+
+constexpr uint64_t GetDigits10(unsigned num_digits10) {
+  return Pow10(num_digits10);
+}
+
+}  // namespace
+
+template <typename T>
+void CStringBuilder::PutInteger(T value) {
+  // We need an array of chars whose size is:
+  // - floor(log10(max value)) + 1 chars for the give value, and
+  // - 1 char for '-' (if negative)
+  // - 1 char for '\0'
+  char buffer[Log10(std::numeric_limits<T>::max()) + 3];
+  ssize_t n = base::strings::SafeSPrintf(buffer, "%d", value);
+  PA_RAW_DCHECK(n >= 0);
+  PA_RAW_DCHECK(static_cast<size_t>(n) < sizeof(buffer));
+  PutText(buffer, n);
+}
+
+CStringBuilder& CStringBuilder::operator<<(char ch) {
+  PutText(&ch, 1);
+  return *this;
+}
+
+CStringBuilder& CStringBuilder::operator<<(const char* text) {
+  PutText(text);
+  return *this;
+}
+
+CStringBuilder& CStringBuilder::operator<<(float value) {
+  PutFloatingPoint(value, kNumDigits10);
+  return *this;
+}
+
+CStringBuilder& CStringBuilder::operator<<(double value) {
+  PutFloatingPoint(value, kNumDigits10);
+  return *this;
+}
+
+CStringBuilder& CStringBuilder::operator<<(int value) {
+  PutInteger(value);
+  return *this;
+}
+
+CStringBuilder& CStringBuilder::operator<<(unsigned int value) {
+  PutInteger(value);
+  return *this;
+}
+
+CStringBuilder& CStringBuilder::operator<<(long value) {
+  PutInteger(value);
+  return *this;
+}
+
+CStringBuilder& CStringBuilder::operator<<(unsigned long value) {
+  PutInteger(value);
+  return *this;
+}
+
+CStringBuilder& CStringBuilder::operator<<(long long value) {
+  PutInteger(value);
+  return *this;
+}
+
+CStringBuilder& CStringBuilder::operator<<(unsigned long long value) {
+  PutInteger(value);
+  return *this;
+}
+
+CStringBuilder& CStringBuilder::operator<<(const void* value) {
+  if (!value) {
+    PutText("(nil)");
+  } else {
+    // We need an array of chars whose size is:
+    // - 2 chars per 1 byte(00-FF), totally sizeof(const void*) * 2 chars,
+    // - 2 chars for "0x",
+    // - 1 char for '\0',
+    char buffer[sizeof(const void*) * 2 + 2 + 1];
+    ssize_t n = base::strings::SafeSPrintf(buffer, "%p", value);
+    PA_RAW_DCHECK(n > 0);
+    PA_RAW_DCHECK(static_cast<size_t>(n) < sizeof(buffer));
+    PutText(buffer, n);
+  }
+  return *this;
+}
+
+CStringBuilder& CStringBuilder::operator<<(std::nullptr_t) {
+  PutText("nullptr");
+  return *this;
+}
+
+const char* CStringBuilder::c_str() {
+  PA_RAW_DCHECK(buffer_ <= ptr_ && ptr_ < buffer_ + kBufferSize);
+  *ptr_ = '\0';
+  return buffer_;
+}
+
+void CStringBuilder::PutFloatingPoint(double value, unsigned num_digits10) {
+  switch (std::fpclassify(value)) {
+    case FP_INFINITE:
+      PutText(value < 0 ? "-inf" : "inf");
+      break;
+    case FP_NAN:
+      PutText("NaN");
+      break;
+    case FP_ZERO:
+      PutText("0");
+      break;
+    case FP_SUBNORMAL:
+      // Denormalized values are not supported.
+      PutNormalFloatingPoint(value > 0 ? std::numeric_limits<double>::min()
+                                       : -std::numeric_limits<double>::min(),
+                             num_digits10);
+      break;
+    case FP_NORMAL:
+    default:
+      PutNormalFloatingPoint(value, num_digits10);
+      break;
+  }
+}
+
+void CStringBuilder::PutNormalFloatingPoint(double value,
+                                            unsigned num_digits10) {
+  if (value < 0) {
+    PutText("-", 1);
+    value = -value;
+  }
+
+  int exponent = floor(log10(value));
+  double significand = value / pow(10, exponent);
+
+  char buffer[64];
+  ssize_t n = base::strings::SafeSPrintf(
+      buffer, "%d", lrint(significand * GetDigits10(num_digits10)));
+  PA_RAW_DCHECK(n > 0);
+  PA_RAW_DCHECK(static_cast<size_t>(n) < sizeof(buffer));
+  PutText(buffer, 1);
+  if (n > 1) {
+    PutText(".", 1);
+    PutText(buffer + 1, n - 1);
+  }
+  if (exponent != 0) {
+    n = base::strings::SafeSPrintf(buffer, "e%s%d", exponent > 0 ? "+" : "",
+                                   exponent);
+    PA_RAW_DCHECK(n > 0);
+    PA_RAW_DCHECK(static_cast<size_t>(n) < sizeof(buffer));
+    PutText(buffer, n);
+  }
+}
+
+void CStringBuilder::PutText(const char* text) {
+  PutText(text, strlen(text));
+}
+
+void CStringBuilder::PutText(const char* text, size_t length) {
+  PA_RAW_DCHECK(buffer_ <= ptr_ && ptr_ < buffer_ + kBufferSize);
+  while (ptr_ < buffer_ + kBufferSize - 1 && length > 0 && *text != '\0') {
+    *ptr_++ = *text++;
+    --length;
+  }
+}
+
+}  // namespace partition_alloc::internal::base::strings
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder.h
new file mode 100644
index 0000000..45cdbe4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder.h
@@ -0,0 +1,62 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_CSTRING_BUILDER_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_CSTRING_BUILDER_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+#include <cstddef>
+
+#if !BUILDFLAG(IS_WIN)
+#include <unistd.h>
+#endif
+
+namespace partition_alloc::internal::base::strings {
+
+// Similar to std::ostringstream, but creates a C string, i.e. nul-terminated
+// char-type string, instead of std::string. To use inside memory allocation,
+// this method must not allocate any memory with malloc, aligned_malloc,
+// calloc, and so on.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) CStringBuilder {
+ public:
+  // If kBufferSize is too large, PA_LOG() and PA_BASE_*CHECK() will spend
+  // much more stack. This causes out-of-stack.
+  // ThreadTest.StartWithOptions_StackSize checks if threads can run with
+  // some specified stack size. If kBufferSize==1024u, the test will fail
+  // on 32bit bots.
+  static constexpr size_t kBufferSize = 256u;
+
+  CStringBuilder() : ptr_(buffer_) {}
+
+  CStringBuilder& operator<<(char ch);
+  CStringBuilder& operator<<(const char* text);
+  CStringBuilder& operator<<(float value);
+  CStringBuilder& operator<<(double value);
+  CStringBuilder& operator<<(int value);
+  CStringBuilder& operator<<(unsigned int value);
+  CStringBuilder& operator<<(long value);
+  CStringBuilder& operator<<(unsigned long value);
+  CStringBuilder& operator<<(long long value);
+  CStringBuilder& operator<<(unsigned long long value);
+  CStringBuilder& operator<<(const void* value);
+  CStringBuilder& operator<<(std::nullptr_t);
+  const char* c_str();
+
+ private:
+  template <typename T>
+  void PutInteger(T value);
+  void PutFloatingPoint(double value, unsigned num_digits10);
+  void PutNormalFloatingPoint(double value, unsigned num_digits10);
+  void PutText(const char* text);
+  void PutText(const char* text, size_t length);
+
+  char buffer_[kBufferSize];
+  char* ptr_;
+};
+
+}  // namespace partition_alloc::internal::base::strings
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_CSTRING_BUILDER_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder_pa_unittest.cc
new file mode 100644
index 0000000..865fbe7
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder_pa_unittest.cc
@@ -0,0 +1,147 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/cstring_builder.h"
+
+#include <cmath>
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal::base::strings {
+
+namespace {
+
+template <typename T>
+void FillBuffer(CStringBuilder& builder, T value, unsigned count) {
+  for (unsigned i = 0; i < count; ++i) {
+    builder << value;
+  }
+}
+
+}  // namespace
+
+TEST(CStringBuilderTestPA, String) {
+  CStringBuilder builder;
+  const char buffer[] = "Buffer\n";
+  builder << "Hello, World" << '\n' << buffer << '\n';
+  EXPECT_EQ("Hello, World\nBuffer\n\n", std::string(builder.c_str()));
+}
+
+TEST(CStringBuilderTestPA, Char) {
+  CStringBuilder builder;
+  builder << 'c' << 'h' << 'a' << ' ' << 'r' << '\\' << '\n';
+  EXPECT_EQ("cha r\\\n", std::string(builder.c_str()));
+  builder << '\0' << '\n';
+  EXPECT_EQ("cha r\\\n\n", std::string(builder.c_str()));
+}
+
+TEST(CStringBuilderTestPA, Integer) {
+  CStringBuilder builder1;
+  builder1 << std::numeric_limits<uint64_t>::max();
+  EXPECT_EQ("18446744073709551615", std::string(builder1.c_str()));
+
+  builder1 << " " << std::numeric_limits<int64_t>::min();
+  EXPECT_EQ("18446744073709551615 -9223372036854775808",
+            std::string(builder1.c_str()));
+
+  CStringBuilder builder2;
+  builder2 << std::numeric_limits<int64_t>::max();
+  EXPECT_EQ("9223372036854775807", std::string(builder2.c_str()));
+
+  CStringBuilder builder3;
+  builder3 << std::numeric_limits<int64_t>::min();
+  EXPECT_EQ("-9223372036854775808", std::string(builder3.c_str()));
+}
+
+TEST(CStringBuilderTestPA, FloatingPoint) {
+  CStringBuilder builder1;
+  builder1 << 3.1415926;
+  EXPECT_EQ("3.14159", std::string(builder1.c_str()));
+
+  CStringBuilder builder2;
+  builder2 << 0.0000725692;
+  EXPECT_EQ("7.25692e-5", std::string(builder2.c_str()));
+
+  // Zero
+  CStringBuilder builder3;
+  builder3 << 0.0;
+  EXPECT_EQ("0", std::string(builder3.c_str()));
+
+  // min()
+  CStringBuilder builder4;
+  builder4 << std::numeric_limits<double>::min();
+  EXPECT_EQ("2.22507e-308", std::string(builder4.c_str()));
+
+  // Subnormal value
+  CStringBuilder builder5;
+  builder5 << std::numeric_limits<double>::denorm_min();
+  // denorm_min() < min()
+  EXPECT_EQ("2.22507e-308", std::string(builder5.c_str()));
+
+  // Positive Infinity
+  CStringBuilder builder6;
+  builder6 << std::numeric_limits<double>::infinity();
+  EXPECT_EQ("inf", std::string(builder6.c_str()));
+
+  // Negative Infinity
+  CStringBuilder builder7;
+  builder7 << -std::numeric_limits<double>::infinity();
+  EXPECT_EQ("-inf", std::string(builder7.c_str()));
+
+  // max()
+  CStringBuilder builder8;
+  builder8 << std::numeric_limits<double>::max();
+  EXPECT_EQ("1.79769e+308", std::string(builder8.c_str()));
+
+  // NaN
+  CStringBuilder builder9;
+  builder9 << nan("");
+  EXPECT_EQ("NaN", std::string(builder9.c_str()));
+}
+
+TEST(CStringBuilderTestPA, FillBuffer) {
+  CStringBuilder builder1;
+  FillBuffer(builder1, ' ', CStringBuilder::kBufferSize * 2);
+  EXPECT_EQ(CStringBuilder::kBufferSize - 1, strlen(builder1.c_str()));
+
+  CStringBuilder builder2;
+  FillBuffer(builder2, 3.141592653, CStringBuilder::kBufferSize * 2);
+  EXPECT_EQ(CStringBuilder::kBufferSize - 1, strlen(builder2.c_str()));
+
+  CStringBuilder builder3;
+  FillBuffer(builder3, 3.14159f, CStringBuilder::kBufferSize * 2);
+  EXPECT_EQ(CStringBuilder::kBufferSize - 1, strlen(builder3.c_str()));
+
+  CStringBuilder builder4;
+  FillBuffer(builder4, 65535u, CStringBuilder::kBufferSize * 2);
+  EXPECT_EQ(CStringBuilder::kBufferSize - 1, strlen(builder4.c_str()));
+
+  CStringBuilder builder5;
+  FillBuffer(builder5, "Dummy Text", CStringBuilder::kBufferSize * 2);
+  EXPECT_EQ(CStringBuilder::kBufferSize - 1, strlen(builder5.c_str()));
+}
+
+TEST(CStringBuilderTestPA, Pointer) {
+  CStringBuilder builder1;
+  char* str = reinterpret_cast<char*>(0x80000000u);
+  void* ptr = str;
+  builder1 << ptr;
+  EXPECT_EQ("0x80000000", std::string(builder1.c_str()));
+
+  CStringBuilder builder2;
+  builder2 << reinterpret_cast<void*>(0xdeadbeafu);
+  EXPECT_EQ("0xDEADBEAF", std::string(builder2.c_str()));
+
+  // nullptr
+  CStringBuilder builder3;
+  builder3 << nullptr;
+  EXPECT_EQ("nullptr", std::string(builder3.c_str()));
+
+  CStringBuilder builder4;
+  builder4 << reinterpret_cast<unsigned*>(0);
+  EXPECT_EQ("(nil)", std::string(builder4.c_str()));
+}
+
+}  // namespace partition_alloc::internal::base::strings
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.cc
new file mode 100644
index 0000000..777089c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.cc
@@ -0,0 +1,712 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.h"
+
+#include <errno.h>
+#include <string.h>
+
+#include <algorithm>
+#include <limits>
+
+#include "build/build_config.h"
+
+#if !defined(NDEBUG)
+// In debug builds, we use RAW_CHECK() to print useful error messages, if
+// SafeSPrintf() is called with broken arguments.
+// As our contract promises that SafeSPrintf() can be called from any
+// restricted run-time context, it is not actually safe to call logging
+// functions from it; and we only ever do so for debug builds and hope for the
+// best. We should _never_ call any logging function other than RAW_CHECK(),
+// and we should _never_ include any logging code that is active in production
+// builds. Most notably, we should not include these logging functions in
+// unofficial release builds, even though those builds would otherwise have
+// DCHECKS() enabled.
+// In other words; please do not remove the #ifdef around this #include.
+// Instead, in production builds we opt for returning a degraded result,
+// whenever an error is encountered.
+// E.g. The broken function call
+//        SafeSPrintf("errno = %d (%x)", errno, strerror(errno))
+//      will print something like
+//        errno = 13, (%x)
+//      instead of
+//        errno = 13 (Access denied)
+//      In most of the anticipated use cases, that's probably the preferred
+//      behavior.
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#define DEBUG_CHECK PA_RAW_CHECK
+#else
+#define DEBUG_CHECK(x) \
+  do {                 \
+    if (x) {           \
+    }                  \
+  } while (0)
+#endif
+
+namespace partition_alloc::internal::base::strings {
+
+// The code in this file is extremely careful to be async-signal-safe.
+//
+// Most obviously, we avoid calling any code that could dynamically allocate
+// memory. Doing so would almost certainly result in bugs and dead-locks.
+// We also avoid calling any other STL functions that could have unintended
+// side-effects involving memory allocation or access to other shared
+// resources.
+//
+// But on top of that, we also avoid calling other library functions, as many
+// of them have the side-effect of calling getenv() (in order to deal with
+// localization) or accessing errno. The latter sounds benign, but there are
+// several execution contexts where it isn't even possible to safely read let
+// alone write errno.
+//
+// The stated design goal of the SafeSPrintf() function is that it can be
+// called from any context that can safely call C or C++ code (i.e. anything
+// that doesn't require assembly code).
+//
+// For a brief overview of some but not all of the issues with async-signal-
+// safety, refer to:
+// http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html
+
+namespace {
+const size_t kSSizeMaxConst = ((size_t)(ssize_t)-1) >> 1;
+
+const char kUpCaseHexDigits[] = "0123456789ABCDEF";
+const char kDownCaseHexDigits[] = "0123456789abcdef";
+}  // namespace
+
+#if defined(NDEBUG)
+// We would like to define kSSizeMax as std::numeric_limits<ssize_t>::max(),
+// but C++ doesn't allow us to do that for constants. Instead, we have to
+// use careful casting and shifting. We later use a static_assert to
+// verify that this worked correctly.
+namespace {
+const size_t kSSizeMax = kSSizeMaxConst;
+}
+#else   // defined(NDEBUG)
+// For efficiency, we really need kSSizeMax to be a constant. But for unit
+// tests, it should be adjustable. This allows us to verify edge cases without
+// having to fill the entire available address space. As a compromise, we make
+// kSSizeMax adjustable in debug builds, and then only compile that particular
+// part of the unit test in debug builds.
+namespace {
+static size_t kSSizeMax = kSSizeMaxConst;
+}
+
+namespace internal {
+void SetSafeSPrintfSSizeMaxForTest(size_t max) {
+  kSSizeMax = max;
+}
+
+size_t GetSafeSPrintfSSizeMaxForTest() {
+  return kSSizeMax;
+}
+}  // namespace internal
+#endif  // defined(NDEBUG)
+
+namespace {
+class Buffer {
+ public:
+  // |buffer| is caller-allocated storage that SafeSPrintf() writes to. It
+  // has |size| bytes of writable storage. It is the caller's responsibility
+  // to ensure that the buffer is at least one byte in size, so that it fits
+  // the trailing NUL that will be added by the destructor. The buffer also
+  // must be smaller or equal to kSSizeMax in size.
+  Buffer(char* buffer, size_t size)
+      : buffer_(buffer),
+        size_(size - 1),  // Account for trailing NUL byte
+        count_(0) {
+// MSVS2013's standard library doesn't mark max() as constexpr yet. cl.exe
+// supports static_cast but doesn't really implement constexpr yet so it doesn't
+// complain, but clang does.
+#if __cplusplus >= 201103 && !(defined(__clang__) && BUILDFLAG(IS_WIN))
+    static_assert(kSSizeMaxConst ==
+                      static_cast<size_t>(std::numeric_limits<ssize_t>::max()),
+                  "kSSizeMaxConst should be the max value of an ssize_t");
+#endif
+    DEBUG_CHECK(size > 0);
+    DEBUG_CHECK(size <= kSSizeMax);
+  }
+
+  Buffer(const Buffer&) = delete;
+  Buffer& operator=(const Buffer&) = delete;
+
+  ~Buffer() {
+    // The code calling the constructor guaranteed that there was enough space
+    // to store a trailing NUL -- and in debug builds, we are actually
+    // verifying this with DEBUG_CHECK()s in the constructor. So, we can
+    // always unconditionally write the NUL byte in the destructor.  We do not
+    // need to adjust the count_, as SafeSPrintf() copies snprintf() in not
+    // including the NUL byte in its return code.
+    *GetInsertionPoint() = '\000';
+  }
+
+  // Returns true, iff the buffer is filled all the way to |kSSizeMax-1|. The
+  // caller can now stop adding more data, as GetCount() has reached its
+  // maximum possible value.
+  inline bool OutOfAddressableSpace() const {
+    return count_ == static_cast<size_t>(kSSizeMax - 1);
+  }
+
+  // Returns the number of bytes that would have been emitted to |buffer_|
+  // if it was sized sufficiently large. This number can be larger than
+  // |size_|, if the caller provided an insufficiently large output buffer.
+  // But it will never be bigger than |kSSizeMax-1|.
+  inline ssize_t GetCount() const {
+    DEBUG_CHECK(count_ < kSSizeMax);
+    return static_cast<ssize_t>(count_);
+  }
+
+  // Emits one |ch| character into the |buffer_| and updates the |count_| of
+  // characters that are currently supposed to be in the buffer.
+  // Returns "false", iff the buffer was already full.
+  // N.B. |count_| increases even if no characters have been written. This is
+  // needed so that GetCount() can return the number of bytes that should
+  // have been allocated for the |buffer_|.
+  inline bool Out(char ch) {
+    if (size_ >= 1 && count_ < size_) {
+      buffer_[count_] = ch;
+      return IncrementCountByOne();
+    }
+    // |count_| still needs to be updated, even if the buffer has been
+    // filled completely. This allows SafeSPrintf() to return the number of
+    // bytes that should have been emitted.
+    IncrementCountByOne();
+    return false;
+  }
+
+  // Inserts |padding|-|len| bytes worth of padding into the |buffer_|.
+  // |count_| will also be incremented by the number of bytes that were meant
+  // to be emitted. The |pad| character is typically either a ' ' space
+  // or a '0' zero, but other non-NUL values are legal.
+  // Returns "false", iff the |buffer_| filled up (i.e. |count_|
+  // overflowed |size_|) at any time during padding.
+  inline bool Pad(char pad, size_t padding, size_t len) {
+    DEBUG_CHECK(pad);
+    DEBUG_CHECK(padding <= kSSizeMax);
+    for (; padding > len; --padding) {
+      if (!Out(pad)) {
+        if (--padding) {
+          IncrementCount(padding - len);
+        }
+        return false;
+      }
+    }
+    return true;
+  }
+
+  // POSIX doesn't define any async-signal-safe function for converting
+  // an integer to ASCII. Define our own version.
+  //
+  // This also gives us the ability to make the function a little more
+  // powerful and have it deal with |padding|, with truncation, and with
+  // predicting the length of the untruncated output.
+  //
+  // IToASCII() converts an integer |i| to ASCII.
+  //
+  // Unlike similar functions in the standard C library, it never appends a
+  // NUL character. This is left for the caller to do.
+  //
+  // While the function signature takes a signed int64_t, the code decides at
+  // run-time whether to treat the argument as signed (int64_t) or as unsigned
+  // (uint64_t) based on the value of |sign|.
+  //
+  // It supports |base|s 2 through 16. Only a |base| of 10 is allowed to have
+  // a |sign|. Otherwise, |i| is treated as unsigned.
+  //
+  // For bases larger than 10, |upcase| decides whether lower-case or upper-
+  // case letters should be used to designate digits greater than 10.
+  //
+  // Padding can be done with either '0' zeros or ' ' spaces. Padding has to
+  // be positive and will always be applied to the left of the output.
+  //
+  // Prepends a |prefix| to the number (e.g. "0x"). This prefix goes to
+  // the left of |padding|, if |pad| is '0'; and to the right of |padding|
+  // if |pad| is ' '.
+  //
+  // Returns "false", if the |buffer_| overflowed at any time.
+  bool IToASCII(bool sign,
+                bool upcase,
+                int64_t i,
+                size_t base,
+                char pad,
+                size_t padding,
+                const char* prefix);
+
+ private:
+  // Increments |count_| by |inc| unless this would cause |count_| to
+  // overflow |kSSizeMax-1|. Returns "false", iff an overflow was detected;
+  // it then clamps |count_| to |kSSizeMax-1|.
+  inline bool IncrementCount(size_t inc) {
+    // "inc" is either 1 or a "padding" value. Padding is clamped at
+    // run-time to at most kSSizeMax-1. So, we know that "inc" is always in
+    // the range 1..kSSizeMax-1.
+    // This allows us to compute "kSSizeMax - 1 - inc" without incurring any
+    // integer overflows.
+    DEBUG_CHECK(inc <= kSSizeMax - 1);
+    if (count_ > kSSizeMax - 1 - inc) {
+      count_ = kSSizeMax - 1;
+      return false;
+    }
+    count_ += inc;
+    return true;
+  }
+
+  // Convenience method for the common case of incrementing |count_| by one.
+  inline bool IncrementCountByOne() { return IncrementCount(1); }
+
+  // Return the current insertion point into the buffer. This is typically
+  // at |buffer_| + |count_|, but could be before that if truncation
+  // happened. It always points to one byte past the last byte that was
+  // successfully placed into the |buffer_|.
+  inline char* GetInsertionPoint() const {
+    size_t idx = count_;
+    if (idx > size_) {
+      idx = size_;
+    }
+    return buffer_ + idx;
+  }
+
+  // User-provided buffer that will receive the fully formatted output string.
+  char* buffer_;
+
+  // Number of bytes that are available in the buffer excluding the trailing
+  // NUL byte that will be added by the destructor.
+  const size_t size_;
+
+  // Number of bytes that would have been emitted to the buffer, if the buffer
+  // was sufficiently big. This number always excludes the trailing NUL byte
+  // and it is guaranteed to never grow bigger than kSSizeMax-1.
+  size_t count_;
+};
+
+bool Buffer::IToASCII(bool sign,
+                      bool upcase,
+                      int64_t i,
+                      size_t base,
+                      char pad,
+                      size_t padding,
+                      const char* prefix) {
+  // Sanity check for parameters. None of these should ever fail, but see
+  // above for the rationale why we can't call CHECK().
+  DEBUG_CHECK(base >= 2);
+  DEBUG_CHECK(base <= 16);
+  DEBUG_CHECK(!sign || base == 10);
+  DEBUG_CHECK(pad == '0' || pad == ' ');
+  DEBUG_CHECK(padding <= kSSizeMax);
+  DEBUG_CHECK(!(sign && prefix && *prefix));
+
+  // Handle negative numbers, if the caller indicated that |i| should be
+  // treated as a signed number; otherwise treat |i| as unsigned (even if the
+  // MSB is set!)
+  // Details are tricky, because of limited data-types, but equivalent pseudo-
+  // code would look like:
+  //   if (sign && i < 0)
+  //     prefix = "-";
+  //   num = abs(i);
+  size_t minint = 0;
+  uint64_t num;
+  if (sign && i < 0) {
+    prefix = "-";
+
+    // Turn our number positive.
+    if (i == std::numeric_limits<int64_t>::min()) {
+      // The most negative integer needs special treatment.
+      minint = 1;
+      num = static_cast<uint64_t>(-(i + 1));
+    } else {
+      // "Normal" negative numbers are easy.
+      num = static_cast<uint64_t>(-i);
+    }
+  } else {
+    num = static_cast<uint64_t>(i);
+  }
+
+  // If padding with '0' zero, emit the prefix or '-' character now. Otherwise,
+  // make the prefix accessible in reverse order, so that we can later output
+  // it right between padding and the number.
+  // We cannot choose the easier approach of just reversing the number, as that
+  // fails in situations where we need to truncate numbers that have padding
+  // and/or prefixes.
+  const char* reverse_prefix = nullptr;
+  if (prefix && *prefix) {
+    if (pad == '0') {
+      while (*prefix) {
+        if (padding) {
+          --padding;
+        }
+        Out(*prefix++);
+      }
+      prefix = nullptr;
+    } else {
+      for (reverse_prefix = prefix; *reverse_prefix; ++reverse_prefix) {
+      }
+    }
+  } else {
+    prefix = nullptr;
+  }
+  const size_t prefix_length = static_cast<size_t>(reverse_prefix - prefix);
+
+  // Loop until we have converted the entire number. Output at least one
+  // character (i.e. '0').
+  size_t start = count_;
+  size_t discarded = 0;
+  bool started = false;
+  do {
+    // Make sure there is still enough space left in our output buffer.
+    if (count_ >= size_) {
+      if (start < size_) {
+        // It is rare that we need to output a partial number. But if asked
+        // to do so, we will still make sure we output the correct number of
+        // leading digits.
+        // Since we are generating the digits in reverse order, we actually
+        // have to discard digits in the order that we have already emitted
+        // them. This is essentially equivalent to:
+        //   memmove(buffer_ + start, buffer_ + start + 1, size_ - start - 1)
+        for (char *move = buffer_ + start, *end = buffer_ + size_ - 1;
+             move < end; ++move) {
+          *move = move[1];
+        }
+        ++discarded;
+        --count_;
+      } else if (count_ - size_ > 1) {
+        // Need to increment either |count_| or |discarded| to make progress.
+        // The latter is more efficient, as it eventually triggers fast
+        // handling of padding. But we have to ensure we don't accidentally
+        // change the overall state (i.e. switch the state-machine from
+        // discarding to non-discarding). |count_| needs to always stay
+        // bigger than |size_|.
+        --count_;
+        ++discarded;
+      }
+    }
+
+    // Output the next digit and (if necessary) compensate for the most
+    // negative integer needing special treatment. This works because,
+    // no matter the bit width of the integer, the lowest-most decimal
+    // integer always ends in 2, 4, 6, or 8.
+    if (!num && started) {
+      if (reverse_prefix > prefix) {
+        Out(*--reverse_prefix);
+      } else {
+        Out(pad);
+      }
+    } else {
+      started = true;
+      Out((upcase ? kUpCaseHexDigits
+                  : kDownCaseHexDigits)[num % base + minint]);
+    }
+
+    minint = 0;
+    num /= base;
+
+    // Add padding, if requested.
+    if (padding > 0) {
+      --padding;
+
+      // Performance optimization for when we are asked to output excessive
+      // padding, but our output buffer is limited in size.  Even if we output
+      // a 64bit number in binary, we would never write more than 64 plus
+      // prefix non-padding characters. So, once this limit has been passed,
+      // any further state change can be computed arithmetically; we know that
+      // by this time, our entire final output consists of padding characters
+      // that have all already been output.
+      if (discarded > 8 * sizeof(num) + prefix_length) {
+        IncrementCount(padding);
+        padding = 0;
+      }
+    }
+  } while (num || padding || (reverse_prefix > prefix));
+
+  if (start < size_) {
+    // Conversion to ASCII actually resulted in the digits being in reverse
+    // order. We can't easily generate them in forward order, as we can't tell
+    // the number of characters needed until we are done converting.
+    // So, now, we reverse the string (except for the possible '-' sign).
+    char* front = buffer_ + start;
+    char* back = GetInsertionPoint();
+    while (--back > front) {
+      char ch = *back;
+      *back = *front;
+      *front++ = ch;
+    }
+  }
+  IncrementCount(discarded);
+  return !discarded;
+}
+
+}  // anonymous namespace
+
+namespace internal {
+
+ssize_t SafeSNPrintf(char* buf,
+                     size_t sz,
+                     const char* fmt,
+                     const Arg* args,
+                     const size_t max_args) {
+  // Make sure that at least one NUL byte can be written, and that the buffer
+  // never overflows kSSizeMax. Not only does that use up most or all of the
+  // address space, it also would result in a return code that cannot be
+  // represented.
+  if (static_cast<ssize_t>(sz) < 1) {
+    return -1;
+  }
+  sz = std::min(sz, kSSizeMax);
+
+  // Iterate over format string and interpret '%' arguments as they are
+  // encountered.
+  Buffer buffer(buf, sz);
+  size_t padding;
+  char pad;
+  for (unsigned int cur_arg = 0; *fmt && !buffer.OutOfAddressableSpace();) {
+    if (*fmt++ == '%') {
+      padding = 0;
+      pad = ' ';
+      char ch = *fmt++;
+    format_character_found:
+      switch (ch) {
+        case '0':
+        case '1':
+        case '2':
+        case '3':
+        case '4':
+        case '5':
+        case '6':
+        case '7':
+        case '8':
+        case '9':
+          // Found a width parameter. Convert to an integer value and store in
+          // "padding". If the leading digit is a zero, change the padding
+          // character from a space ' ' to a zero '0'.
+          pad = ch == '0' ? '0' : ' ';
+          for (;;) {
+            const size_t digit = static_cast<size_t>(ch - '0');
+            // The maximum allowed padding fills all the available address
+            // space and leaves just enough space to insert the trailing NUL.
+            const size_t max_padding = kSSizeMax - 1;
+            if (padding > max_padding / 10 ||
+                10 * padding > max_padding - digit) {
+              DEBUG_CHECK(padding <= max_padding / 10 &&
+                          10 * padding <= max_padding - digit);
+              // Integer overflow detected. Skip the rest of the width until
+              // we find the format character, then do the normal error
+              // handling.
+            padding_overflow:
+              padding = max_padding;
+              while ((ch = *fmt++) >= '0' && ch <= '9') {
+              }
+              if (cur_arg < max_args) {
+                ++cur_arg;
+              }
+              goto fail_to_expand;
+            }
+            padding = 10 * padding + digit;
+            if (padding > max_padding) {
+              // This doesn't happen for "sane" values of kSSizeMax. But once
+              // kSSizeMax gets smaller than about 10, our earlier range checks
+              // are incomplete. Unittests do trigger this artificial corner
+              // case.
+              DEBUG_CHECK(padding <= max_padding);
+              goto padding_overflow;
+            }
+            ch = *fmt++;
+            if (ch < '0' || ch > '9') {
+              // Reached the end of the width parameter. This is where the
+              // format character is found.
+              goto format_character_found;
+            }
+          }
+        case 'c': {  // Output an ASCII character.
+          // Check that there are arguments left to be inserted.
+          if (cur_arg >= max_args) {
+            DEBUG_CHECK(cur_arg < max_args);
+            goto fail_to_expand;
+          }
+
+          // Check that the argument has the expected type.
+          const Arg& arg = args[cur_arg++];
+          if (arg.type != Arg::INT && arg.type != Arg::UINT) {
+            DEBUG_CHECK(arg.type == Arg::INT || arg.type == Arg::UINT);
+            goto fail_to_expand;
+          }
+
+          // Apply padding, if needed.
+          buffer.Pad(' ', padding, 1);
+
+          // Convert the argument to an ASCII character and output it.
+          char as_char = static_cast<char>(arg.integer.i);
+          if (!as_char) {
+            goto end_of_output_buffer;
+          }
+          buffer.Out(as_char);
+          break;
+        }
+        case 'd':  // Output a possibly signed decimal value.
+        case 'o':  // Output an unsigned octal value.
+        case 'x':  // Output an unsigned hexadecimal value.
+        case 'X':
+        case 'p': {  // Output a pointer value.
+          // Check that there are arguments left to be inserted.
+          if (cur_arg >= max_args) {
+            DEBUG_CHECK(cur_arg < max_args);
+            goto fail_to_expand;
+          }
+
+          const Arg& arg = args[cur_arg++];
+          int64_t i;
+          const char* prefix = nullptr;
+          if (ch != 'p') {
+            // Check that the argument has the expected type.
+            if (arg.type != Arg::INT && arg.type != Arg::UINT) {
+              DEBUG_CHECK(arg.type == Arg::INT || arg.type == Arg::UINT);
+              goto fail_to_expand;
+            }
+            i = arg.integer.i;
+
+            if (ch != 'd') {
+              // The Arg() constructor automatically performed sign expansion on
+              // signed parameters. This is great when outputting a %d decimal
+              // number, but can result in unexpected leading 0xFF bytes when
+              // outputting a %x hexadecimal number. Mask bits, if necessary.
+              // We have to do this here, instead of in the Arg() constructor,
+              // as the Arg() constructor cannot tell whether we will output a
+              // %d or a %x. Only the latter should experience masking.
+              if (arg.integer.width < sizeof(int64_t)) {
+                i &= (1LL << (8 * arg.integer.width)) - 1;
+              }
+            }
+          } else {
+            // Pointer values require an actual pointer or a string.
+            if (arg.type == Arg::POINTER) {
+              i = static_cast<int64_t>(reinterpret_cast<uintptr_t>(arg.ptr));
+            } else if (arg.type == Arg::STRING) {
+              i = static_cast<int64_t>(reinterpret_cast<uintptr_t>(arg.str));
+            } else if (arg.type == Arg::INT &&
+                       arg.integer.width == sizeof(NULL) &&
+                       arg.integer.i == 0) {  // Allow C++'s version of NULL
+              i = 0;
+            } else {
+              DEBUG_CHECK(arg.type == Arg::POINTER || arg.type == Arg::STRING);
+              goto fail_to_expand;
+            }
+
+            // Pointers always include the "0x" prefix.
+            prefix = "0x";
+          }
+
+          // Use IToASCII() to convert to ASCII representation. For decimal
+          // numbers, optionally print a sign. For hexadecimal numbers,
+          // distinguish between upper and lower case. %p addresses are always
+          // printed as upcase. Supports base 8, 10, and 16. Prints padding
+          // and/or prefixes, if so requested.
+          buffer.IToASCII(ch == 'd' && arg.type == Arg::INT, ch != 'x', i,
+                          ch == 'o'   ? 8
+                          : ch == 'd' ? 10
+                                      : 16,
+                          pad, padding, prefix);
+          break;
+        }
+        case 's': {
+          // Check that there are arguments left to be inserted.
+          if (cur_arg >= max_args) {
+            DEBUG_CHECK(cur_arg < max_args);
+            goto fail_to_expand;
+          }
+
+          // Check that the argument has the expected type.
+          const Arg& arg = args[cur_arg++];
+          const char* s;
+          if (arg.type == Arg::STRING) {
+            s = arg.str ? arg.str : "<NULL>";
+          } else if (arg.type == Arg::INT &&
+                     arg.integer.width == sizeof(NULL) &&
+                     arg.integer.i == 0) {  // Allow C++'s version of NULL
+            s = "<NULL>";
+          } else {
+            DEBUG_CHECK(arg.type == Arg::STRING);
+            goto fail_to_expand;
+          }
+
+          // Apply padding, if needed. This requires us to first check the
+          // length of the string that we are outputting.
+          if (padding) {
+            size_t len = 0;
+            for (const char* src = s; *src++;) {
+              ++len;
+            }
+            buffer.Pad(' ', padding, len);
+          }
+
+          // Printing a string involves nothing more than copying it into the
+          // output buffer and making sure we don't output more bytes than
+          // available space; Out() takes care of doing that.
+          for (const char* src = s; *src;) {
+            buffer.Out(*src++);
+          }
+          break;
+        }
+        case '%':
+          // Quoted percent '%' character.
+          goto copy_verbatim;
+        fail_to_expand:
+          // C++ gives us tools to do type checking -- something that snprintf()
+          // could never really do. So, whenever we see arguments that don't
+          // match up with the format string, we refuse to output them. But
+          // since we have to be extremely conservative about being async-
+          // signal-safe, we are limited in the type of error handling that we
+          // can do in production builds (in debug builds we can use
+          // DEBUG_CHECK() and hope for the best). So, all we do is pass the
+          // format string unchanged. That should eventually get the user's
+          // attention; and in the meantime, it hopefully doesn't lose too much
+          // data.
+        default:
+          // Unknown or unsupported format character. Just copy verbatim to
+          // output.
+          buffer.Out('%');
+          DEBUG_CHECK(ch);
+          if (!ch) {
+            goto end_of_format_string;
+          }
+          buffer.Out(ch);
+          break;
+      }
+    } else {
+    copy_verbatim:
+      buffer.Out(fmt[-1]);
+    }
+  }
+end_of_format_string:
+end_of_output_buffer:
+  return buffer.GetCount();
+}
+
+}  // namespace internal
+
+ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt) {
+  // Make sure that at least one NUL byte can be written, and that the buffer
+  // never overflows kSSizeMax. Not only does that use up most or all of the
+  // address space, it also would result in a return code that cannot be
+  // represented.
+  if (static_cast<ssize_t>(sz) < 1) {
+    return -1;
+  }
+  sz = std::min(sz, kSSizeMax);
+
+  Buffer buffer(buf, sz);
+
+  // In the slow-path, we deal with errors by copying the contents of
+  // "fmt" unexpanded. This means, if there are no arguments passed, the
+  // SafeSPrintf() function always degenerates to a version of strncpy() that
+  // de-duplicates '%' characters.
+  const char* src = fmt;
+  for (; *src; ++src) {
+    buffer.Out(*src);
+    DEBUG_CHECK(src[0] != '%' || src[1] == '%');
+    if (src[0] == '%' && src[1] == '%') {
+      ++src;
+    }
+  }
+  return buffer.GetCount();
+}
+
+}  // namespace partition_alloc::internal::base::strings
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.h
new file mode 100644
index 0000000..723a02a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.h
@@ -0,0 +1,267 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_SAFE_SPRINTF_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_SAFE_SPRINTF_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+// For ssize_t
+#include <unistd.h>
+#endif
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc::internal::base::strings {
+
+#if defined(COMPILER_MSVC)
+// Define ssize_t inside of our namespace.
+#if defined(_WIN64)
+typedef __int64 ssize_t;
+#else
+typedef long ssize_t;
+#endif
+#endif
+
+// SafeSPrintf() is a type-safe and completely self-contained version of
+// snprintf().
+//
+// SafeSNPrintf() is an alternative function signature that can be used when
+// not dealing with fixed-sized buffers. When possible, SafeSPrintf() should
+// always be used instead of SafeSNPrintf()
+//
+// These functions allow for formatting complicated messages from contexts that
+// require strict async-signal-safety. In fact, it is safe to call them from
+// any low-level execution context, as they are guaranteed to make no library
+// or system calls. It deliberately never touches "errno", either.
+//
+// The only exception to this rule is that in debug builds the code calls
+// RAW_CHECK() to help diagnose problems when the format string does not
+// match the rest of the arguments. In release builds, no CHECK()s are used,
+// and SafeSPrintf() instead returns an output string that expands only
+// those arguments that match their format characters. Mismatched arguments
+// are ignored.
+//
+// The code currently only supports a subset of format characters:
+//   %c, %o, %d, %x, %X, %p, and %s.
+//
+// SafeSPrintf() aims to be as liberal as reasonably possible. Integer-like
+// values of arbitrary width can be passed to all of the format characters
+// that expect integers. Thus, it is explicitly legal to pass an "int" to
+// "%c", and output will automatically look at the LSB only. It is also
+// explicitly legal to pass either signed or unsigned values, and the format
+// characters will automatically interpret the arguments accordingly.
+//
+// It is still not legal to mix-and-match integer-like values with pointer
+// values. For instance, you cannot pass a pointer to %x, nor can you pass an
+// integer to %p.
+//
+// The one exception is "0" zero being accepted by "%p". This works-around
+// the problem of C++ defining NULL as an integer-like value.
+//
+// All format characters take an optional width parameter. This must be a
+// positive integer. For %d, %o, %x, %X and %p, if the width starts with
+// a leading '0', padding is done with '0' instead of ' ' characters.
+//
+// There are a few features of snprintf()-style format strings, that
+// SafeSPrintf() does not support at this time.
+//
+// If an actual user showed up, there is no particularly strong reason they
+// couldn't be added. But that assumes that the trade-offs between complexity
+// and utility are favorable.
+//
+// For example, adding support for negative padding widths, and for %n are all
+// likely to be viewed positively. They are all clearly useful, low-risk, easy
+// to test, don't jeopardize the async-signal-safety of the code, and overall
+// have little impact on other parts of SafeSPrintf() function.
+//
+// On the other hands, adding support for alternate forms, positional
+// arguments, grouping, wide characters, localization or floating point numbers
+// are all unlikely to ever be added.
+//
+// SafeSPrintf() and SafeSNPrintf() mimic the behavior of snprintf() and they
+// return the number of bytes needed to store the untruncated output. This
+// does *not* include the terminating NUL byte.
+//
+// They return -1, iff a fatal error happened. This typically can only happen,
+// if the buffer size is a) negative, or b) zero (i.e. not even the NUL byte
+// can be written). The return value can never be larger than SSIZE_MAX-1.
+// This ensures that the caller can always add one to the signed return code
+// in order to determine the amount of storage that needs to be allocated.
+//
+// While the code supports type checking and while it is generally very careful
+// to avoid printing incorrect values, it tends to be conservative in printing
+// as much as possible, even when given incorrect parameters. Typically, in
+// case of an error, the format string will not be expanded. (i.e. something
+// like SafeSPrintf(buf, "%p %d", 1, 2) results in "%p 2"). See above for
+// the use of RAW_CHECK() in debug builds, though.
+//
+// Basic example:
+//   char buf[20];
+//   base::strings::SafeSPrintf(buf, "The answer: %2d", 42);
+//
+// Example with dynamically sized buffer (async-signal-safe). This code won't
+// work on Visual studio, as it requires dynamically allocating arrays on the
+// stack. Consider picking a smaller value for |kMaxSize| if stack size is
+// limited and known. On the other hand, if the parameters to SafeSNPrintf()
+// are trusted and not controllable by the user, you can consider eliminating
+// the check for |kMaxSize| altogether. The current value of SSIZE_MAX is
+// essentially a no-op that just illustrates how to implement an upper bound:
+//   const size_t kInitialSize = 128;
+//   const size_t kMaxSize = std::numeric_limits<ssize_t>::max();
+//   size_t size = kInitialSize;
+//   for (;;) {
+//     char buf[size];
+//     size = SafeSNPrintf(buf, size, "Error message \"%s\"\n", err) + 1;
+//     if (sizeof(buf) < kMaxSize && size > kMaxSize) {
+//       size = kMaxSize;
+//       continue;
+//     } else if (size > sizeof(buf))
+//       continue;
+//     write(2, buf, size-1);
+//     break;
+//   }
+
+namespace internal {
+// Helpers that use C++ overloading, templates, and specializations to deduce
+// and record type information from function arguments. This allows us to
+// later write a type-safe version of snprintf().
+
+struct Arg {
+  enum Type { INT, UINT, STRING, POINTER };
+
+  // Any integer-like value.
+  Arg(signed char c) : type(INT) {
+    integer.i = c;
+    integer.width = sizeof(char);
+  }
+  Arg(unsigned char c) : type(UINT) {
+    integer.i = c;
+    integer.width = sizeof(char);
+  }
+  Arg(signed short j) : type(INT) {
+    integer.i = j;
+    integer.width = sizeof(short);
+  }
+  Arg(unsigned short j) : type(UINT) {
+    integer.i = j;
+    integer.width = sizeof(short);
+  }
+  Arg(signed int j) : type(INT) {
+    integer.i = j;
+    integer.width = sizeof(int);
+  }
+  Arg(unsigned int j) : type(UINT) {
+    integer.i = j;
+    integer.width = sizeof(int);
+  }
+  Arg(signed long j) : type(INT) {
+    integer.i = j;
+    integer.width = sizeof(long);
+  }
+  Arg(unsigned long j) : type(UINT) {
+    integer.i = static_cast<int64_t>(j);
+    integer.width = sizeof(long);
+  }
+  Arg(signed long long j) : type(INT) {
+    integer.i = j;
+    integer.width = sizeof(long long);
+  }
+  Arg(unsigned long long j) : type(UINT) {
+    integer.i = static_cast<int64_t>(j);
+    integer.width = sizeof(long long);
+  }
+
+  // nullptr_t would be ambiguous between char* and const char*; to get
+  // consistent behavior with NULL, which prints with all three of %d, %p, and
+  // %s, treat it as an integer zero internally.
+  //
+  // Warning: don't just do Arg(NULL) here because in some libcs, NULL is an
+  // alias for nullptr!
+  Arg(nullptr_t p) : type(INT) {
+    integer.i = 0;
+    // Internally, SafeSprintf expects to represent nulls as integers whose
+    // width is equal to sizeof(NULL), which is not necessarily equal to
+    // sizeof(nullptr_t) - eg, on Windows, NULL is defined to 0 (with size 4)
+    // while nullptr_t is of size 8.
+    integer.width = sizeof(NULL);
+  }
+
+  // A C-style text string.
+  Arg(const char* s) : str(s), type(STRING) {}
+  Arg(char* s) : str(s), type(STRING) {}
+
+  // Any pointer value that can be cast to a "void*".
+  template <class T>
+  Arg(T* p) : ptr((void*)p), type(POINTER) {}
+
+  union {
+    // An integer-like value.
+    struct {
+      int64_t i;
+      unsigned char width;
+    } integer;
+
+    // A C-style text string.
+    const char* str;
+
+    // A pointer to an arbitrary object.
+    const void* ptr;
+  };
+  const enum Type type;
+};
+
+// This is the internal function that performs the actual formatting of
+// an snprintf()-style format string.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+ssize_t SafeSNPrintf(char* buf,
+                     size_t sz,
+                     const char* fmt,
+                     const Arg* args,
+                     size_t max_args);
+
+#if !defined(NDEBUG)
+// In debug builds, allow unit tests to artificially lower the kSSizeMax
+// constant that is used as a hard upper-bound for all buffers. In normal
+// use, this constant should always be std::numeric_limits<ssize_t>::max().
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+void SetSafeSPrintfSSizeMaxForTest(size_t max);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+size_t GetSafeSPrintfSSizeMaxForTest();
+#endif
+
+}  // namespace internal
+
+template <typename... Args>
+ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt, Args... args) {
+  // Use Arg() object to record type information and then copy arguments to an
+  // array to make it easier to iterate over them.
+  const internal::Arg arg_array[] = {args...};
+  return internal::SafeSNPrintf(buf, N, fmt, arg_array, sizeof...(args));
+}
+
+template <size_t N, typename... Args>
+ssize_t SafeSPrintf(char (&buf)[N], const char* fmt, Args... args) {
+  // Use Arg() object to record type information and then copy arguments to an
+  // array to make it easier to iterate over them.
+  const internal::Arg arg_array[] = {args...};
+  return internal::SafeSNPrintf(buf, N, fmt, arg_array, sizeof...(args));
+}
+
+// Fast-path when we don't actually need to substitute any arguments.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt);
+template <size_t N>
+inline ssize_t SafeSPrintf(char (&buf)[N], const char* fmt) {
+  return SafeSNPrintf(buf, N, fmt);
+}
+
+}  // namespace partition_alloc::internal::base::strings
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_SAFE_SPRINTF_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf_pa_unittest.cc
new file mode 100644
index 0000000..afef569
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf_pa_unittest.cc
@@ -0,0 +1,780 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/safe_sprintf.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <limits>
+#include <memory>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Death tests on Android are currently very flaky. No need to add more flaky
+// tests, as they just make it hard to spot real problems.
+// TODO(markus): See if the restrictions on Android can eventually be lifted.
+#if defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
+#define ALLOW_DEATH_TEST
+#endif
+
+namespace partition_alloc::internal::base::strings {
+
+TEST(SafeSPrintfTestPA, Empty) {
+  char buf[2] = {'X', 'X'};
+
+  // Negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), ""));
+  EXPECT_EQ('X', buf[0]);
+  EXPECT_EQ('X', buf[1]);
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, ""));
+  EXPECT_EQ('X', buf[0]);
+  EXPECT_EQ('X', buf[1]);
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(0, SafeSNPrintf(buf, 1, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+
+  // A larger buffer should leave the trailing bytes unchanged.
+  EXPECT_EQ(0, SafeSNPrintf(buf, 2, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(0, SafeSPrintf(buf, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+}
+
+TEST(SafeSPrintfTestPA, NoArguments) {
+  // Output a text message that doesn't require any substitutions. This
+  // is roughly equivalent to calling strncpy() (but unlike strncpy(), it does
+  // always add a trailing NUL; it always deduplicates '%' characters).
+  static const char text[] = "hello world";
+  char ref[20], buf[20];
+  memset(ref, 'X', sizeof(ref));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), text));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, text));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1, SafeSNPrintf(buf, 1, text));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_TRUE(!memcmp(buf + 1, ref + 1, sizeof(buf) - 1));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A larger (but limited) buffer should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1, SafeSNPrintf(buf, 2, text));
+  EXPECT_EQ(text[0], buf[0]);
+  EXPECT_EQ(0, buf[1]);
+  EXPECT_TRUE(!memcmp(buf + 2, ref + 2, sizeof(buf) - 2));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A unrestricted buffer length should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1,
+            SafeSNPrintf(buf, sizeof(buf), text));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1, SafeSPrintf(buf, text));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // Check for deduplication of '%' percent characters.
+  EXPECT_EQ(1, SafeSPrintf(buf, "%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%X"));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%%X"));
+#if defined(NDEBUG)
+  EXPECT_EQ(1, SafeSPrintf(buf, "%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%X"));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%X"));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%X"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%X"), "src.1. == '%'");
+#endif
+}
+
+TEST(SafeSPrintfTestPA, OneArgument) {
+  // Test basic single-argument single-character substitution.
+  const char text[] = "hello world";
+  const char fmt[] = "hello%cworld";
+  char ref[20], buf[20];
+  memset(ref, 'X', sizeof(buf));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), fmt, ' '));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, fmt, ' '));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1,
+            SafeSNPrintf(buf, 1, fmt, ' '));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_TRUE(!memcmp(buf + 1, ref + 1, sizeof(buf) - 1));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A larger (but limited) buffer should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1,
+            SafeSNPrintf(buf, 2, fmt, ' '));
+  EXPECT_EQ(text[0], buf[0]);
+  EXPECT_EQ(0, buf[1]);
+  EXPECT_TRUE(!memcmp(buf + 2, ref + 2, sizeof(buf) - 2));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A unrestricted buffer length should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1,
+            SafeSNPrintf(buf, sizeof(buf), fmt, ' '));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text)) - 1, SafeSPrintf(buf, fmt, ' '));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // Check for deduplication of '%' percent characters.
+  EXPECT_EQ(1, SafeSPrintf(buf, "%%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%Y", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%Y", 0));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%Y", 0));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%%Y", 0));
+#if defined(NDEBUG)
+  EXPECT_EQ(1, SafeSPrintf(buf, "%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%", 0), "ch");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTestPA, MissingArg) {
+#if defined(NDEBUG)
+  char buf[20];
+  EXPECT_EQ(3, SafeSPrintf(buf, "%c%c", 'A'));
+  EXPECT_EQ("A%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  char buf[20];
+  EXPECT_DEATH(SafeSPrintf(buf, "%c%c", 'A'), "cur_arg < max_args");
+#endif
+}
+
+TEST(SafeSPrintfTestPA, ASANFriendlyBufferTest) {
+  // Print into a buffer that is sized exactly to size. ASAN can verify that
+  // nobody attempts to write past the end of the buffer.
+  // There is a more complicated test in PrintLongString() that covers a lot
+  // more edge case, but it is also harder to debug in case of a failure.
+  const char kTestString[] = "This is a test";
+  std::unique_ptr<char[]> buf(new char[sizeof(kTestString)]);
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+            SafeSNPrintf(buf.get(), sizeof(kTestString), kTestString));
+  EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+            SafeSNPrintf(buf.get(), sizeof(kTestString), "%s", kTestString));
+  EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+}
+
+TEST(SafeSPrintfTestPA, NArgs) {
+  // Pre-C++11 compilers have a different code path, that can only print
+  // up to ten distinct arguments.
+  // We test both SafeSPrintf() and SafeSNPrintf(). This makes sure we don't
+  // have typos in the copy-n-pasted code that is needed to deal with various
+  // numbers of arguments.
+  char buf[12];
+  EXPECT_EQ(1, SafeSPrintf(buf, "%c", 1));
+  EXPECT_EQ("\1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%c%c", 1, 2));
+  EXPECT_EQ("\1\2", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%c%c%c", 1, 2, 3));
+  EXPECT_EQ("\1\2\3", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%c%c%c%c", 1, 2, 3, 4));
+  EXPECT_EQ("\1\2\3\4", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+  EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+  EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+  EXPECT_EQ(7, SafeSPrintf(buf, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+  EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+  EXPECT_EQ(8, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+  EXPECT_EQ(9,
+            SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8, 9));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8,
+                            9, 10));
+
+  // Repeat all the tests with SafeSNPrintf() instead of SafeSPrintf().
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+  EXPECT_EQ(1, SafeSNPrintf(buf, 11, "%c", 1));
+  EXPECT_EQ("\1", std::string(buf));
+  EXPECT_EQ(2, SafeSNPrintf(buf, 11, "%c%c", 1, 2));
+  EXPECT_EQ("\1\2", std::string(buf));
+  EXPECT_EQ(3, SafeSNPrintf(buf, 11, "%c%c%c", 1, 2, 3));
+  EXPECT_EQ("\1\2\3", std::string(buf));
+  EXPECT_EQ(4, SafeSNPrintf(buf, 11, "%c%c%c%c", 1, 2, 3, 4));
+  EXPECT_EQ("\1\2\3\4", std::string(buf));
+  EXPECT_EQ(5, SafeSNPrintf(buf, 11, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+  EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+  EXPECT_EQ(6, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+  EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+  EXPECT_EQ(7, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+  EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+  EXPECT_EQ(8,
+            SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+  EXPECT_EQ(9, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7,
+                            8, 9));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+  EXPECT_EQ(10, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6,
+                             7, 8, 9, 10));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+
+  EXPECT_EQ(11, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7,
+                            8, 9, 10, 11));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+  EXPECT_EQ(11, SafeSNPrintf(buf, 12, "%c%c%c%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5,
+                             6, 7, 8, 9, 10, 11));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+}
+
+TEST(SafeSPrintfTestPA, DataTypes) {
+  char buf[40];
+
+  // Bytes
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint8_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%d", (uint8_t)-1));
+  EXPECT_EQ("255", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int8_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int8_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%d", (int8_t)-128));
+  EXPECT_EQ("-128", std::string(buf));
+
+  // Half-words
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint16_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%d", (uint16_t)-1));
+  EXPECT_EQ("65535", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int16_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int16_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%d", (int16_t)-32768));
+  EXPECT_EQ("-32768", std::string(buf));
+
+  // Words
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint32_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%d", (uint32_t)-1));
+  EXPECT_EQ("4294967295", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int32_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int32_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  // Work-around for an limitation of C90
+  EXPECT_EQ(11, SafeSPrintf(buf, "%d", (int32_t)-2147483647 - 1));
+  EXPECT_EQ("-2147483648", std::string(buf));
+
+  // Quads
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint64_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(20, SafeSPrintf(buf, "%d", (uint64_t)-1));
+  EXPECT_EQ("18446744073709551615", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int64_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int64_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  // Work-around for an limitation of C90
+  EXPECT_EQ(20, SafeSPrintf(buf, "%d", (int64_t)-9223372036854775807LL - 1));
+  EXPECT_EQ("-9223372036854775808", std::string(buf));
+
+  // Strings (both const and mutable).
+  EXPECT_EQ(4, SafeSPrintf(buf, "test"));
+  EXPECT_EQ("test", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, buf));
+  EXPECT_EQ("test", std::string(buf));
+
+  // Pointer
+  char addr[20];
+  snprintf(addr, sizeof(addr), "0x%llX", (unsigned long long)(uintptr_t)buf);
+  SafeSPrintf(buf, "%p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  SafeSPrintf(buf, "%p", (const char*)buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  snprintf(addr, sizeof(addr), "0x%llX",
+           (unsigned long long)(uintptr_t)snprintf);
+  SafeSPrintf(buf, "%p", snprintf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+
+  // Padding for pointers is a little more complicated because of the "0x"
+  // prefix. Padding with '0' zeros is relatively straight-forward, but
+  // padding with ' ' spaces requires more effort.
+  snprintf(addr, sizeof(addr), "0x%017llX", (unsigned long long)(uintptr_t)buf);
+  SafeSPrintf(buf, "%019p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  snprintf(addr, sizeof(addr), "0x%llX", (unsigned long long)(uintptr_t)buf);
+  memset(addr, ' ',
+         (char*)memmove(addr + sizeof(addr) - strlen(addr) - 1, addr,
+                        strlen(addr) + 1) -
+             addr);
+  SafeSPrintf(buf, "%19p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+}
+
+namespace {
+void PrintLongString(char* buf, size_t sz) {
+  // Output a reasonably complex expression into a limited-size buffer.
+  // At least one byte is available for writing the NUL character.
+  PA_BASE_CHECK(sz > static_cast<size_t>(0));
+
+  // Allocate slightly more space, so that we can verify that SafeSPrintf()
+  // never writes past the end of the buffer.
+  std::unique_ptr<char[]> tmp(new char[sz + 2]);
+  memset(tmp.get(), 'X', sz + 2);
+
+  // Use SafeSPrintf() to output a complex list of arguments:
+  // - test padding and truncating %c single characters.
+  // - test truncating %s simple strings.
+  // - test mismatching arguments and truncating (for %d != %s).
+  // - test zero-padding and truncating %x hexadecimal numbers.
+  // - test outputting and truncating %d MININT.
+  // - test outputting and truncating %p arbitrary pointer values.
+  // - test outputting, padding and truncating NULL-pointer %s strings.
+  char* out = tmp.get();
+  size_t out_sz = sz;
+  size_t len;
+  for (std::unique_ptr<char[]> perfect_buf;;) {
+    size_t needed =
+        SafeSNPrintf(out, out_sz,
+#if defined(NDEBUG)
+                     "A%2cong %s: %d %010X %d %p%7s", 'l', "string", "",
+#else
+                     "A%2cong %s: %%d %010X %d %p%7s", 'l', "string",
+#endif
+                     0xDEADBEEF, std::numeric_limits<intptr_t>::min(),
+                     PrintLongString, static_cast<char*>(nullptr)) +
+        1;
+
+    // Various sanity checks:
+    // The numbered of characters needed to print the full string should always
+    // be bigger or equal to the bytes that have actually been output.
+    len = strlen(tmp.get());
+    PA_BASE_CHECK(needed >= len + 1);
+
+    // The number of characters output should always fit into the buffer that
+    // was passed into SafeSPrintf().
+    PA_BASE_CHECK(len < out_sz);
+
+    // The output is always terminated with a NUL byte (actually, this test is
+    // always going to pass, as strlen() already verified this)
+    EXPECT_FALSE(tmp[len]);
+
+    // ASAN can check that we are not overwriting buffers, iff we make sure the
+    // buffer is exactly the size that we are expecting to be written. After
+    // running SafeSNPrintf() the first time, it is possible to compute the
+    // correct buffer size for this test. So, allocate a second buffer and run
+    // the exact same SafeSNPrintf() command again.
+    if (!perfect_buf.get()) {
+      out_sz = std::min(needed, sz);
+      out = new char[out_sz];
+      perfect_buf.reset(out);
+    } else {
+      break;
+    }
+  }
+
+  // All trailing bytes are unchanged.
+  for (size_t i = len + 1; i < sz + 2; ++i) {
+    EXPECT_EQ('X', tmp[i]);
+  }
+
+  // The text that was generated by SafeSPrintf() should always match the
+  // equivalent text generated by snprintf(). Please note that the format
+  // string for snprintf() is not complicated, as it does not have the
+  // benefit of getting type information from the C++ compiler.
+  //
+  // N.B.: It would be so much cleaner to use snprintf(). But unfortunately,
+  //       Visual Studio doesn't support this function, and the work-arounds
+  //       are all really awkward.
+  char ref[256];
+  PA_BASE_CHECK(sz <= sizeof(ref));
+  snprintf(ref, sizeof(ref), "A long string: %%d 00DEADBEEF %lld 0x%llX <NULL>",
+           static_cast<long long>(std::numeric_limits<intptr_t>::min()),
+           static_cast<unsigned long long>(
+               reinterpret_cast<uintptr_t>(PrintLongString)));
+  ref[sz - 1] = '\000';
+
+#if defined(NDEBUG)
+  const size_t kSSizeMax = std::numeric_limits<ssize_t>::max();
+#else
+  const size_t kSSizeMax = internal::GetSafeSPrintfSSizeMaxForTest();
+#endif
+
+  // Compare the output from SafeSPrintf() to the one from snprintf().
+  EXPECT_EQ(std::string(ref).substr(0, kSSizeMax - 1), std::string(tmp.get()));
+
+  // We allocated a slightly larger buffer, so that we could perform some
+  // extra sanity checks. Now that the tests have all passed, we copy the
+  // data to the output buffer that the caller provided.
+  memcpy(buf, tmp.get(), len + 1);
+}
+
+#if !defined(NDEBUG)
+class ScopedSafeSPrintfSSizeMaxSetter {
+ public:
+  ScopedSafeSPrintfSSizeMaxSetter(size_t sz) {
+    old_ssize_max_ = internal::GetSafeSPrintfSSizeMaxForTest();
+    internal::SetSafeSPrintfSSizeMaxForTest(sz);
+  }
+
+  ScopedSafeSPrintfSSizeMaxSetter(const ScopedSafeSPrintfSSizeMaxSetter&) =
+      delete;
+  ScopedSafeSPrintfSSizeMaxSetter& operator=(
+      const ScopedSafeSPrintfSSizeMaxSetter&) = delete;
+
+  ~ScopedSafeSPrintfSSizeMaxSetter() {
+    internal::SetSafeSPrintfSSizeMaxForTest(old_ssize_max_);
+  }
+
+ private:
+  size_t old_ssize_max_;
+};
+#endif
+
+}  // anonymous namespace
+
+TEST(SafeSPrintfTestPA, Truncation) {
+  // We use PrintLongString() to print a complex long string and then
+  // truncate to all possible lengths. This ends up exercising a lot of
+  // different code paths in SafeSPrintf() and IToASCII(), as truncation can
+  // happen in a lot of different states.
+  char ref[256];
+  PrintLongString(ref, sizeof(ref));
+  for (size_t i = strlen(ref) + 1; i; --i) {
+    char buf[sizeof(ref)];
+    PrintLongString(buf, i);
+    EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+  }
+
+  // When compiling in debug mode, we have the ability to fake a small
+  // upper limit for the maximum value that can be stored in an ssize_t.
+  // SafeSPrintf() uses this upper limit to determine how many bytes it will
+  // write to the buffer, even if the caller claimed a bigger buffer size.
+  // Repeat the truncation test and verify that this other code path in
+  // SafeSPrintf() works correctly, too.
+#if !defined(NDEBUG)
+  for (size_t i = strlen(ref) + 1; i > 1; --i) {
+    ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(i);
+    char buf[sizeof(ref)];
+    PrintLongString(buf, sizeof(buf));
+    EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+  }
+
+  // kSSizeMax is also used to constrain the maximum amount of padding, before
+  // SafeSPrintf() detects an error in the format string.
+  ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(100);
+  char buf[256];
+  EXPECT_EQ(99, SafeSPrintf(buf, "%99c", ' '));
+  EXPECT_EQ(std::string(99, ' '), std::string(buf));
+  *buf = '\000';
+#if defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%100c", ' '), "padding <= max_padding");
+#endif
+  EXPECT_EQ(0, *buf);
+#endif
+}
+
+TEST(SafeSPrintfTestPA, Padding) {
+  char buf[40], fmt[40];
+
+  // Chars %c
+  EXPECT_EQ(1, SafeSPrintf(buf, "%c", 'A'));
+  EXPECT_EQ("A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2c", 'A'));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02c", 'A'));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2c", 'A'));
+  EXPECT_EQ("%-2c", std::string(buf));
+  SafeSPrintf(fmt, "%%%dc", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
+            SafeSPrintf(buf, fmt, 'A'));
+  SafeSPrintf(fmt, "%%%dc",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 'A'));
+  EXPECT_EQ("%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 'A'), "padding <= max_padding");
+#endif
+
+  // Octal %o
+  EXPECT_EQ(1, SafeSPrintf(buf, "%o", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2o", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02o", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(12, SafeSPrintf(buf, "%12o", -1));
+  EXPECT_EQ(" 37777777777", std::string(buf));
+  EXPECT_EQ(12, SafeSPrintf(buf, "%012o", -1));
+  EXPECT_EQ("037777777777", std::string(buf));
+  EXPECT_EQ(23, SafeSPrintf(buf, "%23o", -1LL));
+  EXPECT_EQ(" 1777777777777777777777", std::string(buf));
+  EXPECT_EQ(23, SafeSPrintf(buf, "%023o", -1LL));
+  EXPECT_EQ("01777777777777777777777", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2o", 0111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2o", 1));
+  EXPECT_EQ("%-2o", std::string(buf));
+  SafeSPrintf(fmt, "%%%do", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%do", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%do",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%o", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Decimals %d
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2d", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02d", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%3d", -1));
+  EXPECT_EQ(" -1", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%03d", -1));
+  EXPECT_EQ("-01", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2d", 111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%2d", -111));
+  EXPECT_EQ("-111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2d", 1));
+  EXPECT_EQ("%-2d", std::string(buf));
+  SafeSPrintf(fmt, "%%%dd", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dd", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%dd",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%d", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Hex %X
+  EXPECT_EQ(1, SafeSPrintf(buf, "%X", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2X", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02X", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(9, SafeSPrintf(buf, "%9X", -1));
+  EXPECT_EQ(" FFFFFFFF", std::string(buf));
+  EXPECT_EQ(9, SafeSPrintf(buf, "%09X", -1));
+  EXPECT_EQ("0FFFFFFFF", std::string(buf));
+  EXPECT_EQ(17, SafeSPrintf(buf, "%17X", -1LL));
+  EXPECT_EQ(" FFFFFFFFFFFFFFFF", std::string(buf));
+  EXPECT_EQ(17, SafeSPrintf(buf, "%017X", -1LL));
+  EXPECT_EQ("0FFFFFFFFFFFFFFFF", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2X", 0x111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2X", 1));
+  EXPECT_EQ("%-2X", std::string(buf));
+  SafeSPrintf(fmt, "%%%dX", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dX", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%dX",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%X", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Pointer %p
+  EXPECT_EQ(3, SafeSPrintf(buf, "%p", (void*)1));
+  EXPECT_EQ("0x1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%4p", (void*)1));
+  EXPECT_EQ(" 0x1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%04p", (void*)1));
+  EXPECT_EQ("0x01", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%4p", (void*)0x111));
+  EXPECT_EQ("0x111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2p", (void*)1));
+  EXPECT_EQ("%-2p", std::string(buf));
+  SafeSPrintf(fmt, "%%%dp", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
+            SafeSNPrintf(buf, 4, fmt, (void*)1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dp", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
+            SafeSNPrintf(buf, 4, fmt, (void*)1));
+  EXPECT_EQ("0x0", std::string(buf));
+  SafeSPrintf(fmt, "%%%dp",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%p", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // String
+  EXPECT_EQ(1, SafeSPrintf(buf, "%s", "A"));
+  EXPECT_EQ("A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2s", "A"));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02s", "A"));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2s", "AAA"));
+  EXPECT_EQ("AAA", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2s", "A"));
+  EXPECT_EQ("%-2s", std::string(buf));
+  SafeSPrintf(fmt, "%%%ds", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
+            SafeSNPrintf(buf, 4, fmt, "A"));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%ds", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max() - 1,
+            SafeSNPrintf(buf, 4, fmt, "A"));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%%ds",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, "A"));
+  EXPECT_EQ("%s", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, "A"), "padding <= max_padding");
+#endif
+}
+
+TEST(SafeSPrintfTestPA, EmbeddedNul) {
+  char buf[] = {'X', 'X', 'X', 'X'};
+  EXPECT_EQ(2, SafeSPrintf(buf, "%3c", 0));
+  EXPECT_EQ(' ', buf[0]);
+  EXPECT_EQ(' ', buf[1]);
+  EXPECT_EQ(0, buf[2]);
+  EXPECT_EQ('X', buf[3]);
+
+  // Check handling of a NUL format character. N.B. this takes two different
+  // code paths depending on whether we are actually passing arguments. If
+  // we don't have any arguments, we are running in the fast-path code, that
+  // looks (almost) like a strncpy().
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+  EXPECT_EQ("%%", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+  EXPECT_EQ("%%", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTestPA, EmitNULL) {
+  char buf[40];
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion-null"
+#endif
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", NULL));
+  EXPECT_EQ("0", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%p", NULL));
+  EXPECT_EQ("0x0", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%s", NULL));
+  EXPECT_EQ("<NULL>", std::string(buf));
+#if defined(__GCC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+TEST(SafeSPrintfTestPA, EmitNullptr) {
+  char buf[40];
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", nullptr));
+  EXPECT_EQ("0", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%p", nullptr));
+  EXPECT_EQ("0x0", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%s", nullptr));
+  EXPECT_EQ("<NULL>", std::string(buf));
+}
+
+TEST(SafeSPrintfTestPA, PointerSize) {
+  // The internal data representation is a 64bit value, independent of the
+  // native word size. We want to perform sign-extension for signed integers,
+  // but we want to avoid doing so for pointer types. This could be a
+  // problem on systems, where pointers are only 32bit. This tests verifies
+  // that there is no such problem.
+  char* str = reinterpret_cast<char*>(0x80000000u);
+  void* ptr = str;
+  char buf[40];
+  EXPECT_EQ(10, SafeSPrintf(buf, "%p", str));
+  EXPECT_EQ("0x80000000", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%p", ptr));
+  EXPECT_EQ("0x80000000", std::string(buf));
+}
+
+}  // namespace partition_alloc::internal::base::strings
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util.cc
new file mode 100644
index 0000000..a4b4350
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util.cc
@@ -0,0 +1,35 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util.h"
+
+#include <cstring>
+
+namespace partition_alloc::internal::base::strings {
+
+const char* FindLastOf(const char* text, const char* characters) {
+  size_t length = strlen(text);
+  const char* ptr = text + length - 1;
+  while (ptr >= text) {
+    if (strchr(characters, *ptr)) {
+      return ptr;
+    }
+    --ptr;
+  }
+  return nullptr;
+}
+
+const char* FindLastNotOf(const char* text, const char* characters) {
+  size_t length = strlen(text);
+  const char* ptr = text + length - 1;
+  while (ptr >= text) {
+    if (!strchr(characters, *ptr)) {
+      return ptr;
+    }
+    --ptr;
+  }
+  return nullptr;
+}
+
+}  // namespace partition_alloc::internal::base::strings
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util.h
new file mode 100644
index 0000000..597f74c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util.h
@@ -0,0 +1,19 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_STRING_UTIL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_STRING_UTIL_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc::internal::base::strings {
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+const char* FindLastOf(const char* text, const char* characters);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+const char* FindLastNotOf(const char* text, const char* characters);
+
+}  // namespace partition_alloc::internal::base::strings
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_STRING_UTIL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util_pa_unittest.cc
new file mode 100644
index 0000000..d864335
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util_pa_unittest.cc
@@ -0,0 +1,38 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/string_util.h"
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal::base::strings {
+
+TEST(PartitionAllocStringUtilTest, FindLastOf) {
+  EXPECT_EQ('c', *FindLastOf("abcdefg", "abc"));
+  EXPECT_EQ('b', *FindLastOf("abcdefg", "abC"));
+  EXPECT_EQ('g', *FindLastOf("abcdefg", "g"));
+
+  EXPECT_EQ("abbbb", std::string(FindLastOf("aaabbbb", "a")));
+  EXPECT_EQ("b", std::string(FindLastOf("aaabbbb", "ab")));
+
+  EXPECT_EQ(nullptr, FindLastOf("abcdefg", "\0"));
+  EXPECT_EQ(nullptr, FindLastOf("abcdefg", "hijk"));
+  EXPECT_EQ(nullptr, FindLastOf("abcdefg", ""));
+}
+
+TEST(PartitionAllocStringUtilTest, FindLastNotOf) {
+  EXPECT_EQ('g', *FindLastNotOf("abcdefg", "abc"));
+  EXPECT_EQ('g', *FindLastNotOf("abcdefg", "abC"));
+  EXPECT_EQ('f', *FindLastNotOf("abcdefg", "g"));
+
+  EXPECT_EQ("b", std::string(FindLastNotOf("aaabbbb", "a")));
+  EXPECT_EQ(nullptr, FindLastNotOf("aaabbbb", "ab"));
+
+  EXPECT_EQ('g', *FindLastNotOf("abcdefg", "\0"));
+  EXPECT_EQ('g', *FindLastNotOf("abcdefg", "hijk"));
+  EXPECT_EQ('g', *FindLastNotOf("abcdefg", ""));
+}
+
+}  // namespace partition_alloc::internal::base::strings
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf.cc
new file mode 100644
index 0000000..3d29018
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf.cc
@@ -0,0 +1,49 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/scoped_clear_last_error.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+
+namespace partition_alloc::internal::base {
+
+std::string PA_PRINTF_FORMAT(1, 2)
+    TruncatingStringPrintf(const char* format, ...) {
+  base::ScopedClearLastError last_error;
+  char stack_buf[kMaxLengthOfTruncatingStringPrintfResult + 1];
+  va_list arguments;
+  va_start(arguments, format);
+#if BUILDFLAG(IS_WIN)
+  int result = vsnprintf_s(stack_buf, std::size(stack_buf), _TRUNCATE, format,
+                           arguments);
+#else
+  int result = vsnprintf(stack_buf, std::size(stack_buf), format, arguments);
+#endif
+  va_end(arguments);
+#if BUILDFLAG(IS_WIN)
+  // If an output error is encountered or data is larger than count,
+  // a negative value is returned. So to see whether an output error is really
+  // encountered or not, need to see errno. If errno == EINVAL or
+  // errno == ERANGE, an output error is encountered. If not, an output is
+  // just truncated.
+  if (result < 0 && (errno == EINVAL || errno == ERANGE)) {
+    return std::string();
+  }
+#else
+  // If an output error is encountered, a negative value is returned.
+  // In the case, return an empty string.
+  if (result < 0) {
+    return std::string();
+  }
+#endif
+  // If result is equal or larger than std::size(stack_buf), the output was
+  // truncated. ::base::StringPrintf doesn't truncate output.
+  return std::string(stack_buf);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf.h
new file mode 100644
index 0000000..67733c6
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf.h
@@ -0,0 +1,29 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_STRINGPRINTF_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_STRINGPRINTF_H_
+
+#include <stdarg.h>  // va_list
+
+#include <string>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base {
+
+// Since Only SystemErrorCodeToString and partition_alloc_perftests use
+// StringPrintf, make StringPrintf not to support too long results.
+// Instead, define max result length and truncate such results.
+static constexpr size_t kMaxLengthOfTruncatingStringPrintfResult = 255U;
+
+// Return a C++ string given printf-like input.
+[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) std::string
+    TruncatingStringPrintf(const char* format, ...) PA_PRINTF_FORMAT(1, 2);
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_STRINGS_STRINGPRINTF_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf_pa_unittest.cc
new file mode 100644
index 0000000..d3f673d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf_pa_unittest.cc
@@ -0,0 +1,44 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf.h"
+
+#include <errno.h>
+#include <stddef.h>
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal::base {
+
+TEST(PartitionAllocStringPrintfTest, TruncatingStringPrintfEmpty) {
+  EXPECT_EQ("", TruncatingStringPrintf("%s", ""));
+}
+
+TEST(PartitionAllocStringPrintfTest, TruncatingStringPrintfMisc) {
+  EXPECT_EQ("123hello w",
+            TruncatingStringPrintf("%3d%2s %1c", 123, "hello", 'w'));
+}
+
+// Test that TruncatingStringPrintf truncates too long result.
+// The original TruncatingStringPrintf does not truncate. Instead, it allocates
+// memory and returns an entire result.
+TEST(PartitionAllocStringPrintfTest, TruncatingStringPrintfTruncatesResult) {
+  std::vector<char> buffer;
+  buffer.resize(kMaxLengthOfTruncatingStringPrintfResult + 1);
+  std::fill(buffer.begin(), buffer.end(), 'a');
+  buffer.push_back('\0');
+  std::string result = TruncatingStringPrintf("%s", buffer.data());
+  EXPECT_EQ(kMaxLengthOfTruncatingStringPrintfResult, result.length());
+  EXPECT_EQ(std::string::npos, result.find_first_not_of('a'));
+}
+
+// Test that TruncatingStringPrintf does not change errno.
+TEST(PartitionAllocStringPrintfTest, TruncatingStringPrintfErrno) {
+  errno = 1;
+  EXPECT_EQ("", TruncatingStringPrintf("%s", ""));
+  EXPECT_EQ(1, errno);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info.h
new file mode 100644
index 0000000..d9e12ff
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info.h
@@ -0,0 +1,29 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc::internal::base {
+
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) SysInfo {
+ public:
+  // Retrieves detailed numeric values for the OS version.
+  // DON'T USE THIS ON THE MAC OR WINDOWS to determine the current OS release
+  // for OS version-specific feature checks and workarounds. If you must use an
+  // OS version check instead of a feature check, use
+  // base::mac::MacOSMajorVersion() from base/mac/mac_util.h, or
+  // base::win::GetVersion() from base/win/windows_version.h.
+  static void OperatingSystemVersionNumbers(int32_t* major_version,
+                                            int32_t* minor_version,
+                                            int32_t* bugfix_version);
+};
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info_ios.mm b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info_ios.mm
new file mode 100644
index 0000000..324af9d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info_ios.mm
@@ -0,0 +1,24 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+
+namespace partition_alloc::internal::base {
+
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+                                            int32_t* minor_version,
+                                            int32_t* bugfix_version) {
+  NSOperatingSystemVersion version =
+      [[NSProcessInfo processInfo] operatingSystemVersion];
+  *major_version = saturated_cast<int32_t>(version.majorVersion);
+  *minor_version = saturated_cast<int32_t>(version.minorVersion);
+  *bugfix_version = saturated_cast<int32_t>(version.patchVersion);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info_mac.mm b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info_mac.mm
new file mode 100644
index 0000000..b420af9
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info_mac.mm
@@ -0,0 +1,24 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/system/sys_info.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+
+namespace partition_alloc::internal::base {
+
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+                                            int32_t* minor_version,
+                                            int32_t* bugfix_version) {
+  NSOperatingSystemVersion version =
+      NSProcessInfo.processInfo.operatingSystemVersion;
+  *major_version = saturated_cast<int32_t>(version.majorVersion);
+  *minor_version = saturated_cast<int32_t>(version.minorVersion);
+  *bugfix_version = saturated_cast<int32_t>(version.patchVersion);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h
new file mode 100644
index 0000000..61f1348
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h
@@ -0,0 +1,264 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header file contains macro definitions for thread safety annotations
+// that allow developers to document the locking policies of multi-threaded
+// code. The annotations can also help program analysis tools to identify
+// potential thread safety issues.
+//
+// Note that no analysis is done inside constructors and destructors,
+// regardless of what attributes are used. See
+// https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#no-checking-inside-constructors-and-destructors
+// for details.
+//
+// Note that the annotations we use are described as deprecated in the Clang
+// documentation, linked below. E.g. we use PA_EXCLUSIVE_LOCKS_REQUIRED where
+// the Clang docs use REQUIRES.
+//
+// http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+//
+// We use the deprecated Clang annotations to match Abseil (relevant header
+// linked below) and its ecosystem of libraries. We will follow Abseil with
+// respect to upgrading to more modern annotations.
+//
+// https://github.com/abseil/abseil-cpp/blob/master/absl/base/thread_annotations.h
+//
+// These annotations are implemented using compiler attributes. Using the macros
+// defined here instead of raw attributes allow for portability and future
+// compatibility.
+//
+// When referring to mutexes in the arguments of the attributes, you should
+// use variable names or more complex expressions (e.g. my_object->mutex_)
+// that evaluate to a concrete mutex object whenever possible. If the mutex
+// you want to refer to is not in scope, you may use a member pointer
+// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREAD_ANNOTATIONS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREAD_ANNOTATIONS_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "build/build_config.h"
+
+#if defined(__clang__)
+#define PA_THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define PA_THREAD_ANNOTATION_ATTRIBUTE__(x)  // no-op
+#endif
+
+// PA_GUARDED_BY()
+//
+// Documents if a shared field or global variable needs to be protected by a
+// mutex. PA_GUARDED_BY() allows the user to specify a particular mutex that
+// should be held when accessing the annotated variable.
+//
+// Example:
+//
+//   Mutex mu;
+//   int p1 PA_GUARDED_BY(mu);
+#define PA_GUARDED_BY(x) PA_THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+
+// PA_PT_GUARDED_BY()
+//
+// Documents if the memory location pointed to by a pointer should be guarded
+// by a mutex when dereferencing the pointer.
+//
+// Example:
+//   Mutex mu;
+//   int *p1 PA_PT_GUARDED_BY(mu);
+//
+// Note that a pointer variable to a shared memory location could itself be a
+// shared variable.
+//
+// Example:
+//
+//     // `q`, guarded by `mu1`, points to a shared memory location that is
+//     // guarded by `mu2`:
+//     int *q PA_GUARDED_BY(mu1) PA_PT_GUARDED_BY(mu2);
+#define PA_PT_GUARDED_BY(x) PA_THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+
+// PA_ACQUIRED_AFTER() / PA_ACQUIRED_BEFORE()
+//
+// Documents the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both PA_ACQUIRED_AFTER
+// and PA_ACQUIRED_BEFORE.)
+//
+// Example:
+//
+//   Mutex m1;
+//   Mutex m2 PA_ACQUIRED_AFTER(m1);
+#define PA_ACQUIRED_AFTER(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+
+#define PA_ACQUIRED_BEFORE(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+// PA_EXCLUSIVE_LOCKS_REQUIRED() / PA_SHARED_LOCKS_REQUIRED()
+//
+// Documents a function that expects a mutex to be held prior to entry.
+// The mutex is expected to be held both on entry to, and exit from, the
+// function.
+//
+// Example:
+//
+//   Mutex mu1, mu2;
+//   int a PA_GUARDED_BY(mu1);
+//   int b PA_GUARDED_BY(mu2);
+//
+//   void foo() PA_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... };
+#define PA_EXCLUSIVE_LOCKS_REQUIRED(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+
+#define PA_SHARED_LOCKS_REQUIRED(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+// PA_LOCKS_EXCLUDED()
+//
+// Documents the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (as Abseil's `Mutex` locks are
+// non-reentrant).
+#define PA_LOCKS_EXCLUDED(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+// PA_LOCK_RETURNED()
+//
+// Documents a function that returns a mutex without acquiring it.  For example,
+// a public getter method that returns a pointer to a private mutex should
+// be annotated with PA_LOCK_RETURNED.
+#define PA_LOCK_RETURNED(x) PA_THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+// PA_LOCKABLE
+//
+// Documents if a class/type is a lockable type (such as the `Mutex` class).
+#define PA_LOCKABLE PA_THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+
+// PA_SCOPED_LOCKABLE
+//
+// Documents if a class does RAII locking (such as the `MutexLock` class).
+// The constructor should use `PA_*_LOCK_FUNCTION()` to specify the mutex that
+// is acquired, and the destructor should use `PA_UNLOCK_FUNCTION()` with no
+// arguments; the analysis will assume that the destructor unlocks whatever the
+// constructor locked.
+#define PA_SCOPED_LOCKABLE PA_THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+// PA_EXCLUSIVE_LOCK_FUNCTION()
+//
+// Documents functions that acquire a lock in the body of a function, and do
+// not release it.
+#define PA_EXCLUSIVE_LOCK_FUNCTION(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+
+// PA_SHARED_LOCK_FUNCTION()
+//
+// Documents functions that acquire a shared (reader) lock in the body of a
+// function, and do not release it.
+#define PA_SHARED_LOCK_FUNCTION(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+
+// PA_UNLOCK_FUNCTION()
+//
+// Documents functions that expect a lock to be held on entry to the function,
+// and release it in the body of the function.
+#define PA_UNLOCK_FUNCTION(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+
+// PA_EXCLUSIVE_TRYLOCK_FUNCTION() / PA_SHARED_TRYLOCK_FUNCTION()
+//
+// Documents functions that try to acquire a lock, and return success or failure
+// (or a non-boolean value that can be interpreted as a boolean).
+// The first argument should be `true` for functions that return `true` on
+// success, or `false` for functions that return `false` on success. The second
+// argument specifies the mutex that is locked on success. If unspecified, this
+// mutex is assumed to be `this`.
+#define PA_EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+
+#define PA_SHARED_TRYLOCK_FUNCTION(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+
+// PA_ASSERT_EXCLUSIVE_LOCK() / PA_ASSERT_SHARED_LOCK()
+//
+// Documents functions that dynamically check to see if a lock is held, and fail
+// if it is not held.
+#define PA_ASSERT_EXCLUSIVE_LOCK(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+
+#define PA_ASSERT_SHARED_LOCK(...) \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
+
+// PA_NO_THREAD_SAFETY_ANALYSIS
+//
+// Turns off thread safety checking within the body of a particular function.
+// This annotation is used to mark functions that are known to be correct, but
+// the locking behavior is more complicated than the analyzer can handle.
+#define PA_NO_THREAD_SAFETY_ANALYSIS \
+  PA_THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+//------------------------------------------------------------------------------
+// Tool-Supplied Annotations
+//------------------------------------------------------------------------------
+
+// PA_TS_UNCHECKED should be placed around lock expressions that are not valid
+// C++ syntax, but which are present for documentation purposes.  These
+// annotations will be ignored by the analysis.
+#define PA_TS_UNCHECKED(x) ""
+
+// PA_TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
+// It is used by automated tools to mark and disable invalid expressions.
+// The annotation should either be fixed, or changed to PA_TS_UNCHECKED.
+#define PA_TS_FIXME(x) ""
+
+// Like PA_NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of
+// a particular function.  However, this attribute is used to mark functions
+// that are incorrect and need to be fixed.  It is used by automated tools to
+// avoid breaking the build when the analysis is updated.
+// Code owners are expected to eventually fix the routine.
+#define PA_NO_THREAD_SAFETY_ANALYSIS_FIXME PA_NO_THREAD_SAFETY_ANALYSIS
+
+// Similar to PA_NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a
+// PA_GUARDED_BY annotation that needs to be fixed, because it is producing
+// thread safety warning.  It disables the PA_GUARDED_BY.
+#define PA_GUARDED_BY_FIXME(x)
+
+// Disables warnings for a single read operation.  This can be used to avoid
+// warnings when it is known that the read is not actually involved in a race,
+// but the compiler cannot confirm that.
+#define PA_TS_UNCHECKED_READ(x) \
+  partition_alloc::internal::thread_safety_analysis::ts_unchecked_read(x)
+
+namespace partition_alloc::internal::thread_safety_analysis {
+
+// Takes a reference to a guarded data member, and returns an unguarded
+// reference.
+template <typename T>
+inline const T& ts_unchecked_read(const T& v) PA_NO_THREAD_SAFETY_ANALYSIS {
+  return v;
+}
+
+template <typename T>
+inline T& ts_unchecked_read(T& v) PA_NO_THREAD_SAFETY_ANALYSIS {
+  return v;
+}
+
+}  // namespace partition_alloc::internal::thread_safety_analysis
+
+// The above is imported as-is from abseil-cpp. The following Chromium-specific
+// synonyms are added for Chromium concepts (SequenceChecker/ThreadChecker).
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+
+// Equivalent to PA_GUARDED_BY for SequenceChecker/ThreadChecker. Currently,
+#define PA_GUARDED_BY_CONTEXT(name) PA_GUARDED_BY(name)
+
+// Equivalent to PA_EXCLUSIVE_LOCKS_REQUIRED for SequenceChecker/ThreadChecker.
+#define PA_VALID_CONTEXT_REQUIRED(name) PA_EXCLUSIVE_LOCKS_REQUIRED(name)
+
+#else  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+#define PA_GUARDED_BY_CONTEXT(name)
+#define PA_VALID_CONTEXT_REQUIRED(name)
+
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREAD_ANNOTATIONS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations_pa_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations_pa_unittest.cc
new file mode 100644
index 0000000..5384e47
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations_pa_unittest.cc
@@ -0,0 +1,58 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+class PA_LOCKABLE Lock {
+ public:
+  void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION() {}
+  void Release() PA_UNLOCK_FUNCTION() {}
+};
+
+class PA_SCOPED_LOCKABLE AutoLock {
+ public:
+  AutoLock(Lock& lock) PA_EXCLUSIVE_LOCK_FUNCTION(lock) : lock_(lock) {
+    lock.Acquire();
+  }
+  ~AutoLock() PA_UNLOCK_FUNCTION() { lock_.Release(); }
+
+ private:
+  Lock& lock_;
+};
+
+class ThreadSafe {
+ public:
+  void ExplicitIncrement();
+  void ImplicitIncrement();
+
+ private:
+  Lock lock_;
+  int counter_ PA_GUARDED_BY(lock_);
+};
+
+void ThreadSafe::ExplicitIncrement() {
+  lock_.Acquire();
+  ++counter_;
+  lock_.Release();
+}
+
+void ThreadSafe::ImplicitIncrement() {
+  AutoLock auto_lock(lock_);
+  counter_++;
+}
+
+TEST(PartitionAllocThreadAnnotationsTest, ExplicitIncrement) {
+  ThreadSafe thread_safe;
+  thread_safe.ExplicitIncrement();
+}
+TEST(PartitionAllocThreadAnnotationsTest, ImplicitIncrement) {
+  ThreadSafe thread_safe;
+  thread_safe.ImplicitIncrement();
+}
+
+}  // anonymous namespace
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations_pa_unittest.nc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations_pa_unittest.nc
new file mode 100644
index 0000000..f056a47
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations_pa_unittest.nc
@@ -0,0 +1,71 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// https://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+
+namespace {
+
+class PA_LOCKABLE Lock {
+ public:
+  void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION() {}
+  void Release() PA_UNLOCK_FUNCTION() {}
+};
+
+class PA_SCOPED_LOCKABLE AutoLock {
+ public:
+  AutoLock(Lock& lock) PA_EXCLUSIVE_LOCK_FUNCTION(lock) : lock_(lock) {
+    lock.Acquire();
+  }
+  ~AutoLock() PA_UNLOCK_FUNCTION() { lock_.Release(); }
+
+ private:
+  Lock& lock_;
+};
+class ThreadSafe {
+ public:
+  void BuggyIncrement();
+ private:
+  Lock lock_;
+  int counter_ PA_GUARDED_BY(lock_);
+};
+
+#if defined(NCTEST_LOCK_WITHOUT_UNLOCK)  // [r"fatal error: mutex 'lock_' is still held at the end of function"]
+
+void ThreadSafe::BuggyIncrement() {
+  lock_.Acquire();
+  ++counter_;
+  // Forgot to release the lock.
+}
+
+#elif defined(NCTEST_ACCESS_WITHOUT_LOCK)  // [r"fatal error: writing variable 'counter_' requires holding mutex 'lock_' exclusively"]
+
+void ThreadSafe::BuggyIncrement() {
+  // Member access without holding the lock guarding it.
+  ++counter_;
+}
+
+#elif defined(NCTEST_ACCESS_WITHOUT_SCOPED_LOCK)  // [r"fatal error: writing variable 'counter_' requires holding mutex 'lock_' exclusively"]
+
+void ThreadSafe::BuggyIncrement() {
+  {
+    AutoLock auto_lock(lock_);
+    // The AutoLock will go out of scope before the guarded member access.
+  }
+  ++counter_;
+}
+
+#elif defined(NCTEST_GUARDED_BY_WRONG_TYPE)  // [r"fatal error: 'guarded_by' attribute requires arguments whose type is annotated"]
+
+int not_lockable;
+int global_counter PA_GUARDED_BY(not_lockable);
+
+// Defined to avoid link error.
+void ThreadSafe::BuggyIncrement() { }
+
+#endif
+
+}  // anonymous namespace
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.cc
new file mode 100644
index 0000000..8f18536
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.cc
@@ -0,0 +1,30 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+
+namespace partition_alloc::internal::base {
+
+namespace {
+
+// SetThreadNameHook is invoked by EnablePCScan(). EnablePCScan() will be
+// invoked soon after running RunBrowser, RunZygote, and RunContentProcess.
+// So g_set_thread_name_proc can be non-atomic.
+SetThreadNameProc g_set_thread_name_proc = nullptr;
+
+}  // namespace
+
+void PlatformThread::SetThreadNameHook(SetThreadNameProc hook) {
+  g_set_thread_name_proc = hook;
+}
+
+// static
+void PlatformThread::SetName(const std::string& name) {
+  if (!g_set_thread_name_proc) {
+    return;
+  }
+  g_set_thread_name_proc(name);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h
new file mode 100644
index 0000000..043c4b3
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h
@@ -0,0 +1,110 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: You should *NOT* be using this class directly.  PlatformThread is
+// the low-level platform-specific abstraction to the OS's threading interface.
+// You should instead be using a message-loop driven Thread, see thread.h.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_ref.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/windows_types.h"
+#elif BUILDFLAG(IS_FUCHSIA)
+#include <zircon/types.h>
+#elif BUILDFLAG(IS_APPLE)
+#include <mach/mach_types.h>
+#elif BUILDFLAG(IS_POSIX)
+#include <pthread.h>
+#include <unistd.h>
+#endif
+
+namespace partition_alloc::internal::base {
+
+// Used for logging. Always an integer value.
+#if BUILDFLAG(IS_WIN)
+typedef DWORD PlatformThreadId;
+#elif BUILDFLAG(IS_FUCHSIA)
+typedef zx_handle_t PlatformThreadId;
+#elif BUILDFLAG(IS_APPLE)
+typedef mach_port_t PlatformThreadId;
+#elif BUILDFLAG(IS_POSIX)
+typedef pid_t PlatformThreadId;
+#endif
+
+// Used to operate on threads.
+class PlatformThreadHandle {
+ public:
+#if BUILDFLAG(IS_WIN)
+  typedef void* Handle;
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  typedef pthread_t Handle;
+#endif
+
+  constexpr PlatformThreadHandle() : handle_(0) {}
+
+  explicit constexpr PlatformThreadHandle(Handle handle) : handle_(handle) {}
+
+  bool is_equal(const PlatformThreadHandle& other) const {
+    return handle_ == other.handle_;
+  }
+
+  bool is_null() const { return !handle_; }
+
+  Handle platform_handle() const { return handle_; }
+
+ private:
+  Handle handle_;
+};
+
+const PlatformThreadId kInvalidThreadId(0);
+
+typedef void (*SetThreadNameProc)(const std::string&);
+
+// A namespace for low-level thread functions.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) PlatformThread {
+ public:
+  PlatformThread() = delete;
+  PlatformThread(const PlatformThread&) = delete;
+  PlatformThread& operator=(const PlatformThread&) = delete;
+
+  // Gets the current thread id, which may be useful for logging purposes.
+  static PlatformThreadId CurrentId();
+
+  // Gets the current thread reference, which can be used to check if
+  // we're on the right thread quickly.
+  static PlatformThreadRef CurrentRef();
+
+  // Get the handle representing the current thread. On Windows, this is a
+  // pseudo handle constant which will always represent the thread using it and
+  // hence should not be shared with other threads nor be used to differentiate
+  // the current thread from another.
+  static PlatformThreadHandle CurrentHandle();
+
+  // Sleeps for the specified duration (real-time; ignores time overrides).
+  // Note: The sleep duration may be in base::Time or base::TimeTicks, depending
+  // on platform. If you're looking to use this in unit tests testing delayed
+  // tasks, this will be unreliable - instead, use
+  // base::test::TaskEnvironment with MOCK_TIME mode.
+  static void Sleep(TimeDelta duration);
+
+  // Sets the thread name visible to debuggers/tools. This will try to
+  // initialize the context for current thread unless it's a WorkerThread.
+  static void SetName(const std::string& name);
+
+  static void SetThreadNameHook(SetThreadNameProc hook);
+};
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_android_for_testing.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_android_for_testing.cc
new file mode 100644
index 0000000..7a846b1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_android_for_testing.cc
@@ -0,0 +1,30 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
+
+#include <pthread.h>
+#include <stddef.h>
+
+namespace partition_alloc::internal::base {
+
+void InitThreading() {}
+
+void TerminateOnThread() {
+  // partition alloc tests don't use AttachCurrentThread(), because
+  // the tests don't set / get any thread priority. So no need to do
+  // "base::android::DetachFromVM();" here.
+}
+
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
+#if !defined(ADDRESS_SANITIZER)
+  return 0;
+#else
+  // AddressSanitizer bloats the stack approximately 2x. Default stack size of
+  // 1Mb is not enough for some tests (see http://crbug.com/263749 for example).
+  return 2 * (1 << 20);  // 2Mb
+#endif
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_apple_for_testing.mm b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_apple_for_testing.mm
new file mode 100644
index 0000000..77374bc
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_apple_for_testing.mm
@@ -0,0 +1,91 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
+
+#import <Foundation/Foundation.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach/thread_policy.h>
+#include <mach/thread_switch.h>
+#include <stddef.h>
+#include <sys/resource.h>
+
+#include <algorithm>
+#include <atomic>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base {
+
+// If Foundation is to be used on more than one thread, it must know that the
+// application is multithreaded.  Since it's possible to enter Foundation code
+// from threads created by pthread_thread_create, Foundation won't necessarily
+// be aware that the application is multithreaded.  Spawning an NSThread is
+// enough to get Foundation to set up for multithreaded operation, so this is
+// done if necessary before pthread_thread_create spawns any threads.
+//
+// https://developer.apple.com/documentation/foundation/nsthread/1410702-ismultithreaded
+void InitThreading() {
+  static BOOL multithreaded = [NSThread isMultiThreaded];
+  if (!multithreaded) {
+    // +[NSObject class] is idempotent.
+    [NSThread detachNewThreadSelector:@selector(class)
+                             toTarget:[NSObject class]
+                           withObject:nil];
+    multithreaded = YES;
+
+    PA_BASE_DCHECK([NSThread isMultiThreaded]);
+  }
+}
+
+// static
+void PlatformThreadForTesting::YieldCurrentThread() {
+  // Don't use sched_yield(), as it can lead to 10ms delays.
+  //
+  // This only depresses the thread priority for 1ms, which is more in line
+  // with what calling code likely wants. See this bug in webkit for context:
+  // https://bugs.webkit.org/show_bug.cgi?id=204871
+  mach_msg_timeout_t timeout_ms = 1;
+  thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DEPRESS, timeout_ms);
+}
+
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
+#if BUILDFLAG(IS_IOS)
+  return 0;
+#else
+  // The macOS default for a pthread stack size is 512kB.
+  // Libc-594.1.4/pthreads/pthread.c's pthread_attr_init uses
+  // DEFAULT_STACK_SIZE for this purpose.
+  //
+  // 512kB isn't quite generous enough for some deeply recursive threads that
+  // otherwise request the default stack size by specifying 0. Here, adopt
+  // glibc's behavior as on Linux, which is to use the current stack size
+  // limit (ulimit -s) as the default stack size. See
+  // glibc-2.11.1/nptl/nptl-init.c's __pthread_initialize_minimal_internal. To
+  // avoid setting the limit below the macOS default or the minimum usable
+  // stack size, these values are also considered. If any of these values
+  // can't be determined, or if stack size is unlimited (ulimit -s unlimited),
+  // stack_size is left at 0 to get the system default.
+  //
+  // macOS normally only applies ulimit -s to the main thread stack. On
+  // contemporary macOS and Linux systems alike, this value is generally 8MB
+  // or in that neighborhood.
+  size_t default_stack_size = 0;
+  struct rlimit stack_rlimit;
+  if (pthread_attr_getstacksize(&attributes, &default_stack_size) == 0 &&
+      getrlimit(RLIMIT_STACK, &stack_rlimit) == 0 &&
+      stack_rlimit.rlim_cur != RLIM_INFINITY) {
+    default_stack_size = std::max(
+        std::max(default_stack_size, static_cast<size_t>(PTHREAD_STACK_MIN)),
+        static_cast<size_t>(stack_rlimit.rlim_cur));
+  }
+  return default_stack_size;
+#endif
+}
+
+void TerminateOnThread() {}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h
new file mode 100644
index 0000000..b1fd67d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h
@@ -0,0 +1,73 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: You should *NOT* be using this class directly.  PlatformThread is
+// the low-level platform-specific abstraction to the OS's threading interface.
+// You should instead be using a message-loop driven Thread, see thread.h.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_FOR_TESTING_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_FOR_TESTING_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base {
+
+// A namespace for low-level thread functions.
+class PlatformThreadForTesting : public PlatformThread {
+ public:
+  // Implement this interface to run code on a background thread.  Your
+  // ThreadMain method will be called on the newly created thread.
+  class Delegate {
+   public:
+    virtual void ThreadMain() = 0;
+
+   protected:
+    virtual ~Delegate() = default;
+  };
+
+  PlatformThreadForTesting() = delete;
+  PlatformThreadForTesting(const PlatformThreadForTesting&) = delete;
+  PlatformThreadForTesting& operator=(const PlatformThreadForTesting&) = delete;
+
+  // Yield the current thread so another thread can be scheduled.
+  //
+  // Note: this is likely not the right call to make in most situations. If this
+  // is part of a spin loop, consider base::Lock, which likely has better tail
+  // latency. Yielding the thread has different effects depending on the
+  // platform, system load, etc., and can result in yielding the CPU for less
+  // than 1us, or many tens of ms.
+  static void YieldCurrentThread();
+
+  // Creates a new thread.  The `stack_size` parameter can be 0 to indicate
+  // that the default stack size should be used.  Upon success,
+  // `*thread_handle` will be assigned a handle to the newly created thread,
+  // and `delegate`'s ThreadMain method will be executed on the newly created
+  // thread.
+  // NOTE: When you are done with the thread handle, you must call Join to
+  // release system resources associated with the thread.  You must ensure that
+  // the Delegate object outlives the thread.
+  static bool Create(size_t stack_size,
+                     Delegate* delegate,
+                     PlatformThreadHandle* thread_handle);
+
+  // Joins with a thread created via the Create function.  This function blocks
+  // the caller until the designated thread exits.  This will invalidate
+  // `thread_handle`.
+  static void Join(PlatformThreadHandle thread_handle);
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  // Returns the default thread stack size set by chrome. If we do not
+  // explicitly set default size then returns 0.
+  static size_t GetDefaultThreadStackSize();
+#endif
+};
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREADD_FOR_TESTING_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc
new file mode 100644
index 0000000..8454d37
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
+
+#include <pthread.h>
+#include <stddef.h>
+
+namespace partition_alloc::internal::base {
+
+void InitThreading() {}
+
+void TerminateOnThread() {}
+
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
+  return 0;
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h
new file mode 100644
index 0000000..3c53be4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h
@@ -0,0 +1,24 @@
+// Copyright 2015 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base::internal {
+
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+// Current thread id is cached in thread local storage for performance reasons.
+// In some rare cases it's important to invalidate that cache explicitly (e.g.
+// after going through clone() syscall which does not call pthread_atfork()
+// handlers).
+// This can only be called when the process is single-threaded.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) void InvalidateTidCache();
+#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+}  // namespace partition_alloc::internal::base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_linux_for_testing.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_linux_for_testing.cc
new file mode 100644
index 0000000..0f0e4b4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_linux_for_testing.cc
@@ -0,0 +1,26 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
+
+#include <pthread.h>
+#include <stddef.h>
+
+namespace partition_alloc::internal::base {
+
+void InitThreading() {}
+
+void TerminateOnThread() {}
+
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
+#if !defined(THREAD_SANITIZER)
+  return 0;
+#else   // defined(THREAD_SANITIZER)
+  // ThreadSanitizer bloats the stack heavily. Evidence has been that the
+  // default stack size isn't enough for some browser tests.
+  return 2 * (1 << 23);  // 2 times 8192K (the default stack size on Linux).
+#endif  // defined(THREAD_SANITIZER)
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix.cc
new file mode 100644
index 0000000..eaa6d4a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix.cc
@@ -0,0 +1,147 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+
+#include <errno.h>
+#include <pthread.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#include <sys/syscall.h>
+#include <atomic>
+#endif
+
+#if BUILDFLAG(IS_FUCHSIA)
+#include <zircon/process.h>
+#endif
+
+namespace partition_alloc::internal::base {
+
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+namespace {
+
+// Store the thread ids in local storage since calling the SWI can be
+// expensive and PlatformThread::CurrentId is used liberally.
+thread_local pid_t g_thread_id = -1;
+
+// A boolean value that indicates that the value stored in |g_thread_id| on the
+// main thread is invalid, because it hasn't been updated since the process
+// forked.
+//
+// This used to work by setting |g_thread_id| to -1 in a pthread_atfork handler.
+// However, when a multithreaded process forks, it is only allowed to call
+// async-signal-safe functions until it calls an exec() syscall. However,
+// accessing TLS may allocate (see crbug.com/1275748), which is not
+// async-signal-safe and therefore causes deadlocks, corruption, and crashes.
+//
+// It's Atomic to placate TSAN.
+std::atomic<bool> g_main_thread_tid_cache_valid = false;
+
+// Tracks whether the current thread is the main thread, and therefore whether
+// |g_main_thread_tid_cache_valid| is relevant for the current thread. This is
+// also updated by PlatformThread::CurrentId().
+thread_local bool g_is_main_thread = true;
+
+class InitAtFork {
+ public:
+  InitAtFork() {
+    pthread_atfork(nullptr, nullptr, internal::InvalidateTidCache);
+  }
+};
+
+}  // namespace
+
+namespace internal {
+
+void InvalidateTidCache() {
+  g_main_thread_tid_cache_valid.store(false, std::memory_order_relaxed);
+}
+
+}  // namespace internal
+
+#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+// static
+PlatformThreadId PlatformThread::CurrentId() {
+  // Pthreads doesn't have the concept of a thread ID, so we have to reach down
+  // into the kernel.
+#if BUILDFLAG(IS_APPLE)
+  return pthread_mach_thread_np(pthread_self());
+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+  static InitAtFork init_at_fork;
+  if (g_thread_id == -1 ||
+      (g_is_main_thread &&
+       !g_main_thread_tid_cache_valid.load(std::memory_order_relaxed))) {
+    // Update the cached tid.
+    g_thread_id = syscall(__NR_gettid);
+    // If this is the main thread, we can mark the tid_cache as valid.
+    // Otherwise, stop the current thread from always entering this slow path.
+    if (g_thread_id == getpid()) {
+      g_main_thread_tid_cache_valid.store(true, std::memory_order_relaxed);
+    } else {
+      g_is_main_thread = false;
+    }
+  } else {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    if (g_thread_id != syscall(__NR_gettid)) {
+      PA_RAW_LOG(
+          FATAL,
+          "Thread id stored in TLS is different from thread id returned by "
+          "the system. It is likely that the process was forked without going "
+          "through fork().");
+    }
+#endif
+  }
+  return g_thread_id;
+#elif BUILDFLAG(IS_ANDROID)
+  // Note: do not cache the return value inside a thread_local variable on
+  // Android (as above). The reasons are:
+  // - thread_local is slow on Android (goes through emutls)
+  // - gettid() is fast, since its return value is cached in pthread (in the
+  //   thread control block of pthread). See gettid.c in bionic.
+  return gettid();
+#elif BUILDFLAG(IS_FUCHSIA)
+  return zx_thread_self();
+#elif BUILDFLAG(IS_SOLARIS) || BUILDFLAG(IS_QNX)
+  return pthread_self();
+#elif BUILDFLAG(IS_POSIX) && BUILDFLAG(IS_AIX)
+  return pthread_self();
+#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_AIX)
+  return reinterpret_cast<int64_t>(pthread_self());
+#endif
+}
+
+// static
+PlatformThreadRef PlatformThread::CurrentRef() {
+  return PlatformThreadRef(pthread_self());
+}
+
+// static
+void PlatformThread::Sleep(TimeDelta duration) {
+  struct timespec sleep_time, remaining;
+
+  // Break the duration into seconds and nanoseconds.
+  // NOTE: TimeDelta's microseconds are int64s while timespec's
+  // nanoseconds are longs, so this unpacking must prevent overflow.
+  sleep_time.tv_sec = duration.InSeconds();
+  duration -= Seconds(sleep_time.tv_sec);
+  sleep_time.tv_nsec = duration.InMicroseconds() * 1000;  // nanoseconds
+
+  while (nanosleep(&sleep_time, &remaining) == -1 && errno == EINTR) {
+    sleep_time = remaining;
+  }
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix_for_testing.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix_for_testing.cc
new file mode 100644
index 0000000..250de61
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix_for_testing.cc
@@ -0,0 +1,149 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
+
+#include <errno.h>
+#include <pthread.h>
+#include <sched.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <memory>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_FUCHSIA)
+#include <zircon/process.h>
+#else
+#include <sys/resource.h>
+#endif
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h"
+#endif
+
+namespace partition_alloc::internal::base {
+
+void InitThreading();
+void TerminateOnThread();
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes);
+
+namespace {
+
+struct ThreadParams {
+  PlatformThreadForTesting::Delegate* delegate = nullptr;
+};
+
+void* ThreadFunc(void* params) {
+  PlatformThreadForTesting::Delegate* delegate = nullptr;
+
+  {
+    std::unique_ptr<ThreadParams> thread_params(
+        static_cast<ThreadParams*>(params));
+
+    delegate = thread_params->delegate;
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
+    PCScan::NotifyThreadCreated(GetStackPointer());
+#endif
+  }
+
+  delegate->ThreadMain();
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
+  PCScan::NotifyThreadDestroyed();
+#endif
+
+  TerminateOnThread();
+  return nullptr;
+}
+
+bool CreateThread(size_t stack_size,
+                  PlatformThreadForTesting::Delegate* delegate,
+                  PlatformThreadHandle* thread_handle) {
+  PA_BASE_DCHECK(thread_handle);
+  base::InitThreading();
+
+  pthread_attr_t attributes;
+  pthread_attr_init(&attributes);
+
+  // Get a better default if available.
+  if (stack_size == 0) {
+    stack_size = base::GetDefaultThreadStackSize(attributes);
+  }
+
+  if (stack_size > 0) {
+    pthread_attr_setstacksize(&attributes, stack_size);
+  }
+
+  std::unique_ptr<ThreadParams> params(new ThreadParams);
+  params->delegate = delegate;
+
+  pthread_t handle;
+  int err = pthread_create(&handle, &attributes, ThreadFunc, params.get());
+  bool success = !err;
+  if (success) {
+    // ThreadParams should be deleted on the created thread after used.
+    std::ignore = params.release();
+  } else {
+    // Value of |handle| is undefined if pthread_create fails.
+    handle = 0;
+    errno = err;
+    PA_PLOG(ERROR) << "pthread_create";
+  }
+  *thread_handle = PlatformThreadHandle(handle);
+
+  pthread_attr_destroy(&attributes);
+
+  return success;
+}
+
+}  // namespace
+
+#if !BUILDFLAG(IS_APPLE)
+// static
+void PlatformThreadForTesting::YieldCurrentThread() {
+  sched_yield();
+}
+#endif  // !BUILDFLAG(IS_APPLE)
+
+// static
+bool PlatformThreadForTesting::Create(size_t stack_size,
+                                      Delegate* delegate,
+                                      PlatformThreadHandle* thread_handle) {
+  return CreateThread(stack_size, delegate, thread_handle);
+}
+
+// static
+void PlatformThreadForTesting::Join(PlatformThreadHandle thread_handle) {
+  // Joining another thread may block the current thread for a long time, since
+  // the thread referred to by |thread_handle| may still be running long-lived /
+  // blocking tasks.
+
+  // Remove ScopedBlockingCallWithBaseSyncPrimitives, because only partition
+  // alloc tests use PlatformThread::Join. So there is no special requirement
+  // to monitor blocking calls
+  // (by using ThreadGroupImpl::WorkerThreadDelegateImpl).
+  //
+  // base::internal::ScopedBlockingCallWithBaseSyncPrimitives
+  //   scoped_blocking_call(base::BlockingType::MAY_BLOCK);
+  PA_BASE_CHECK(0 == pthread_join(thread_handle.platform_handle(), nullptr));
+}
+
+// static
+size_t PlatformThreadForTesting::GetDefaultThreadStackSize() {
+  pthread_attr_t attributes;
+  pthread_attr_init(&attributes);
+  return base::GetDefaultThreadStackSize(attributes);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_ref.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_ref.h
new file mode 100644
index 0000000..47d40a8
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_ref.h
@@ -0,0 +1,56 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: *DO NOT* use this class directly. base::PlatformThreadRef is a
+// low-level platform-specific abstraction to the OS's threading interface.
+// Instead, consider using a message-loop driven base::Thread, see
+// base/threading/thread.h.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_REF_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_REF_H_
+
+#include <iosfwd>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/windows_types.h"
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+#include <pthread.h>
+#endif
+
+namespace partition_alloc::internal::base {
+
+// Used for thread checking and debugging.
+// Meant to be as fast as possible.
+// These are produced by PlatformThread::CurrentRef(), and used to later
+// check if we are on the same thread or not by using ==. These are safe
+// to copy between threads, but can't be copied to another process as they
+// have no meaning there. Also, the internal identifier can be re-used
+// after a thread dies, so a PlatformThreadRef cannot be reliably used
+// to distinguish a new thread from an old, dead thread.
+class PlatformThreadRef {
+ public:
+#if BUILDFLAG(IS_WIN)
+  using RefType = DWORD;
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  using RefType = pthread_t;
+#endif
+
+  constexpr PlatformThreadRef() = default;
+  explicit constexpr PlatformThreadRef(RefType id) : id_(id) {}
+
+  bool operator==(PlatformThreadRef other) const { return id_ == other.id_; }
+  bool operator!=(PlatformThreadRef other) const { return id_ != other.id_; }
+
+  bool is_null() const { return id_ == 0; }
+
+ private:
+  RefType id_ = 0;
+};
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_THREADING_PLATFORM_THREAD_REF_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_win.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_win.cc
new file mode 100644
index 0000000..c2e9028
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_win.cc
@@ -0,0 +1,43 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+
+#include <stddef.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.h"
+
+#include <windows.h>
+
+namespace partition_alloc::internal::base {
+
+// static
+PlatformThreadId PlatformThread::CurrentId() {
+  return ::GetCurrentThreadId();
+}
+
+// static
+PlatformThreadRef PlatformThread::CurrentRef() {
+  return PlatformThreadRef(::GetCurrentThreadId());
+}
+
+// static
+PlatformThreadHandle PlatformThread::CurrentHandle() {
+  return PlatformThreadHandle(::GetCurrentThread());
+}
+
+// static
+void PlatformThread::Sleep(TimeDelta duration) {
+  // When measured with a high resolution clock, Sleep() sometimes returns much
+  // too early. We may need to call it repeatedly to get the desired duration.
+  // PlatformThread::Sleep doesn't support mock-time, so this always uses
+  // real-time.
+  const TimeTicks end = subtle::TimeTicksNowIgnoringOverride() + duration;
+  for (TimeTicks now = subtle::TimeTicksNowIgnoringOverride(); now < end;
+       now = subtle::TimeTicksNowIgnoringOverride()) {
+    ::Sleep(static_cast<DWORD>((end - now).InMillisecondsRoundedUp()));
+  }
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_win_for_testing.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_win_for_testing.cc
new file mode 100644
index 0000000..466ff0e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_win_for_testing.cc
@@ -0,0 +1,199 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
+
+#include <stddef.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "build/build_config.h"
+
+#include <windows.h>
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h"
+#endif
+
+namespace partition_alloc::internal::base {
+
+namespace {
+
+// base/win/scoped_handle.h looks too much to just run partition_alloc
+// tests.
+class ScopedHandle {
+ public:
+  ScopedHandle() : handle_(INVALID_HANDLE_VALUE) {}
+
+  ~ScopedHandle() {
+    if (handle_ != INVALID_HANDLE_VALUE) {
+      CloseHandle(handle_);
+    }
+    handle_ = INVALID_HANDLE_VALUE;
+  }
+
+  void Set(HANDLE handle) {
+    if (handle != handle_) {
+      if (handle != INVALID_HANDLE_VALUE) {
+        CloseHandle(handle_);
+      }
+      handle_ = handle;
+    }
+  }
+
+ private:
+  HANDLE handle_;
+};
+
+struct ThreadParams {
+  PlatformThreadForTesting::Delegate* delegate = nullptr;
+};
+
+DWORD __stdcall ThreadFunc(void* params) {
+  ThreadParams* thread_params = static_cast<ThreadParams*>(params);
+  PlatformThreadForTesting::Delegate* delegate = thread_params->delegate;
+
+  // Retrieve a copy of the thread handle to use as the key in the
+  // thread name mapping.
+  PlatformThreadHandle::Handle platform_handle;
+  BOOL did_dup = DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
+                                 GetCurrentProcess(), &platform_handle, 0,
+                                 FALSE, DUPLICATE_SAME_ACCESS);
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
+  PCScan::NotifyThreadCreated(GetStackPointer());
+#endif
+
+  ScopedHandle scoped_platform_handle;
+  if (did_dup) {
+    scoped_platform_handle.Set(platform_handle);
+  }
+
+  delete thread_params;
+  delegate->ThreadMain();
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
+  PCScan::NotifyThreadDestroyed();
+#endif
+  return 0;
+}
+
+// CreateThreadInternal() matches PlatformThread::CreateWithPriority(), except
+// that |out_thread_handle| may be nullptr, in which case a non-joinable thread
+// is created.
+bool CreateThreadInternal(size_t stack_size,
+                          PlatformThreadForTesting::Delegate* delegate,
+                          PlatformThreadHandle* out_thread_handle) {
+  unsigned int flags = 0;
+  if (stack_size > 0) {
+    flags = STACK_SIZE_PARAM_IS_A_RESERVATION;
+#if defined(ARCH_CPU_32_BITS)
+  } else {
+    // The process stack size is increased to give spaces to |RendererMain| in
+    // |chrome/BUILD.gn|, but keep the default stack size of other threads to
+    // 1MB for the address space pressure.
+    flags = STACK_SIZE_PARAM_IS_A_RESERVATION;
+    static BOOL is_wow64 = -1;
+    if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64)) {
+      is_wow64 = FALSE;
+    }
+    // When is_wow64 is set that means we are running on 64-bit Windows and we
+    // get 4 GiB of address space. In that situation we can afford to use 1 MiB
+    // of address space for stacks. When running on 32-bit Windows we only get
+    // 2 GiB of address space so we need to conserve. Typically stack usage on
+    // these threads is only about 100 KiB.
+    if (is_wow64) {
+      stack_size = 1024 * 1024;
+    } else {
+      stack_size = 512 * 1024;
+    }
+#endif
+  }
+
+  ThreadParams* params = new ThreadParams;
+  params->delegate = delegate;
+
+  // Using CreateThread here vs _beginthreadex makes thread creation a bit
+  // faster and doesn't require the loader lock to be available.  Our code will
+  // have to work running on CreateThread() threads anyway, since we run code on
+  // the Windows thread pool, etc.  For some background on the difference:
+  // http://www.microsoft.com/msj/1099/win32/win321099.aspx
+  void* thread_handle =
+      ::CreateThread(nullptr, stack_size, ThreadFunc, params, flags, nullptr);
+
+  if (!thread_handle) {
+    DWORD last_error = ::GetLastError();
+
+    switch (last_error) {
+      case ERROR_NOT_ENOUGH_MEMORY:
+      case ERROR_OUTOFMEMORY:
+      case ERROR_COMMITMENT_LIMIT:
+        TerminateBecauseOutOfMemory(stack_size);
+        break;
+
+      default:
+        break;
+    }
+
+    delete params;
+    return false;
+  }
+
+  if (out_thread_handle) {
+    *out_thread_handle = PlatformThreadHandle(thread_handle);
+  } else {
+    CloseHandle(thread_handle);
+  }
+  return true;
+}
+
+}  // namespace
+
+// static
+void PlatformThreadForTesting::YieldCurrentThread() {
+  ::Sleep(0);
+}
+
+// static
+void PlatformThreadForTesting::Join(PlatformThreadHandle thread_handle) {
+  PA_BASE_DCHECK(thread_handle.platform_handle());
+
+  DWORD thread_id = 0;
+  thread_id = ::GetThreadId(thread_handle.platform_handle());
+  DWORD last_error = 0;
+  if (!thread_id) {
+    last_error = ::GetLastError();
+  }
+
+  // Record information about the exiting thread in case joining hangs.
+  base::debug::Alias(&thread_id);
+  base::debug::Alias(&last_error);
+
+  // Remove ScopedBlockingCallWithBaseSyncPrimitives, because only partition
+  // alloc tests use PlatformThread::Join. So there is no special requirement
+  // to monitor blocking calls
+  // (by using ThreadGroupImpl::WorkerThreadDelegateImpl).
+  //
+  // base::internal::ScopedBlockingCallWithBaseSyncPrimitives
+  //   scoped_blocking_call(base::BlockingType::MAY_BLOCK);
+
+  // Wait for the thread to exit.  It should already have terminated but make
+  // sure this assumption is valid.
+  PA_BASE_CHECK(WAIT_OBJECT_0 ==
+                WaitForSingleObject(thread_handle.platform_handle(), INFINITE));
+  CloseHandle(thread_handle.platform_handle());
+}
+
+// static
+bool PlatformThreadForTesting::Create(size_t stack_size,
+                                      Delegate* delegate,
+                                      PlatformThreadHandle* thread_handle) {
+  PA_BASE_DCHECK(thread_handle);
+  return CreateThreadInternal(stack_size, delegate, thread_handle);
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.cc
new file mode 100644
index 0000000..184c66b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.cc
@@ -0,0 +1,280 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+
+#include <atomic>
+#include <cmath>
+#include <limits>
+#include <ostream>
+#include <tuple>
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.h"
+
+namespace partition_alloc::internal::base {
+
+namespace internal {
+
+std::atomic<TimeNowFunction> g_time_now_function{
+    &subtle::TimeNowIgnoringOverride};
+
+std::atomic<TimeNowFunction> g_time_now_from_system_time_function{
+    &subtle::TimeNowFromSystemTimeIgnoringOverride};
+
+std::atomic<TimeTicksNowFunction> g_time_ticks_now_function{
+    &subtle::TimeTicksNowIgnoringOverride};
+
+std::atomic<ThreadTicksNowFunction> g_thread_ticks_now_function{
+    &subtle::ThreadTicksNowIgnoringOverride};
+
+}  // namespace internal
+
+// TimeDelta ------------------------------------------------------------------
+
+int TimeDelta::InDays() const {
+  if (!is_inf()) {
+    return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+  }
+  return (delta_ < 0) ? std::numeric_limits<int>::min()
+                      : std::numeric_limits<int>::max();
+}
+
+int TimeDelta::InDaysFloored() const {
+  if (!is_inf()) {
+    const int result = delta_ / Time::kMicrosecondsPerDay;
+    // Convert |result| from truncating to flooring.
+    return (result * Time::kMicrosecondsPerDay > delta_) ? (result - 1)
+                                                         : result;
+  }
+  return (delta_ < 0) ? std::numeric_limits<int>::min()
+                      : std::numeric_limits<int>::max();
+}
+
+double TimeDelta::InMillisecondsF() const {
+  if (!is_inf()) {
+    return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+  }
+  return (delta_ < 0) ? -std::numeric_limits<double>::infinity()
+                      : std::numeric_limits<double>::infinity();
+}
+
+int64_t TimeDelta::InMilliseconds() const {
+  if (!is_inf()) {
+    return delta_ / Time::kMicrosecondsPerMillisecond;
+  }
+  return (delta_ < 0) ? std::numeric_limits<int64_t>::min()
+                      : std::numeric_limits<int64_t>::max();
+}
+
+int64_t TimeDelta::InMillisecondsRoundedUp() const {
+  if (!is_inf()) {
+    const int64_t result = delta_ / Time::kMicrosecondsPerMillisecond;
+    // Convert |result| from truncating to ceiling.
+    return (delta_ > result * Time::kMicrosecondsPerMillisecond) ? (result + 1)
+                                                                 : result;
+  }
+  return delta_;
+}
+
+double TimeDelta::InMicrosecondsF() const {
+  if (!is_inf()) {
+    return static_cast<double>(delta_);
+  }
+  return (delta_ < 0) ? -std::numeric_limits<double>::infinity()
+                      : std::numeric_limits<double>::infinity();
+}
+
+TimeDelta TimeDelta::CeilToMultiple(TimeDelta interval) const {
+  if (is_inf() || interval.is_zero()) {
+    return *this;
+  }
+  const TimeDelta remainder = *this % interval;
+  if (delta_ < 0) {
+    return *this - remainder;
+  }
+  return remainder.is_zero() ? *this
+                             : (*this - remainder + interval.magnitude());
+}
+
+TimeDelta TimeDelta::FloorToMultiple(TimeDelta interval) const {
+  if (is_inf() || interval.is_zero()) {
+    return *this;
+  }
+  const TimeDelta remainder = *this % interval;
+  if (delta_ < 0) {
+    return remainder.is_zero() ? *this
+                               : (*this - remainder - interval.magnitude());
+  }
+  return *this - remainder;
+}
+
+TimeDelta TimeDelta::RoundToMultiple(TimeDelta interval) const {
+  if (is_inf() || interval.is_zero()) {
+    return *this;
+  }
+  if (interval.is_inf()) {
+    return TimeDelta();
+  }
+  const TimeDelta half = interval.magnitude() / 2;
+  return (delta_ < 0) ? (*this - half).CeilToMultiple(interval)
+                      : (*this + half).FloorToMultiple(interval);
+}
+
+// Time -----------------------------------------------------------------------
+
+// static
+Time Time::Now() {
+  return internal::g_time_now_function.load(std::memory_order_relaxed)();
+}
+
+// static
+Time Time::NowFromSystemTime() {
+  // Just use g_time_now_function because it returns the system time.
+  return internal::g_time_now_from_system_time_function.load(
+      std::memory_order_relaxed)();
+}
+
+time_t Time::ToTimeT() const {
+  if (is_null()) {
+    return 0;  // Preserve 0 so we can tell it doesn't exist.
+  }
+  if (!is_inf() && ((std::numeric_limits<int64_t>::max() -
+                     kTimeTToMicrosecondsOffset) > us_)) {
+    return (*this - UnixEpoch()).InSeconds();
+  }
+  return (us_ < 0) ? std::numeric_limits<time_t>::min()
+                   : std::numeric_limits<time_t>::max();
+}
+
+// static
+Time Time::FromSecondsSinceUnixEpoch(double dt) {
+  // Preserve 0 so we can tell it doesn't exist.
+  return (dt == 0 || std::isnan(dt)) ? Time() : (UnixEpoch() + Seconds(dt));
+}
+
+double Time::InSecondsFSinceUnixEpoch() const {
+  if (is_null()) {
+    return 0;  // Preserve 0 so we can tell it doesn't exist.
+  }
+  if (!is_inf()) {
+    return (*this - UnixEpoch()).InSecondsF();
+  }
+  return (us_ < 0) ? -std::numeric_limits<double>::infinity()
+                   : std::numeric_limits<double>::infinity();
+}
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+// static
+Time Time::FromTimeSpec(const timespec& ts) {
+  return FromSecondsSinceUnixEpoch(ts.tv_sec + static_cast<double>(ts.tv_nsec) /
+                                                   kNanosecondsPerSecond);
+}
+#endif
+
+// static
+Time Time::FromMillisecondsSinceUnixEpoch(double ms_since_epoch) {
+  // The epoch is a valid time, so this constructor doesn't interpret 0 as the
+  // null time.
+  return UnixEpoch() + Milliseconds(ms_since_epoch);
+}
+
+double Time::InMillisecondsFSinceUnixEpoch() const {
+  // Preserve 0 so the invalid result doesn't depend on the platform.
+  return is_null() ? 0 : InMillisecondsFSinceUnixEpochIgnoringNull();
+}
+
+double Time::InMillisecondsFSinceUnixEpochIgnoringNull() const {
+  // Preserve max and min without offset to prevent over/underflow.
+  if (!is_inf()) {
+    return (*this - UnixEpoch()).InMillisecondsF();
+  }
+  return (us_ < 0) ? -std::numeric_limits<double>::infinity()
+                   : std::numeric_limits<double>::infinity();
+}
+
+Time Time::FromMillisecondsSinceUnixEpoch(int64_t ms_since_epoch) {
+  return UnixEpoch() + Milliseconds(ms_since_epoch);
+}
+
+int64_t Time::InMillisecondsSinceUnixEpoch() const {
+  // Preserve 0 so the invalid result doesn't depend on the platform.
+  if (is_null()) {
+    return 0;
+  }
+  if (!is_inf()) {
+    return (*this - UnixEpoch()).InMilliseconds();
+  }
+  return (us_ < 0) ? std::numeric_limits<int64_t>::min()
+                   : std::numeric_limits<int64_t>::max();
+}
+
+// static
+bool Time::FromMillisecondsSinceUnixEpoch(int64_t unix_milliseconds,
+                                          Time* time) {
+  // Adjust the provided time from milliseconds since the Unix epoch (1970) to
+  // microseconds since the Windows epoch (1601), avoiding overflows.
+  CheckedNumeric<int64_t> checked_microseconds_win_epoch = unix_milliseconds;
+  checked_microseconds_win_epoch *= kMicrosecondsPerMillisecond;
+  checked_microseconds_win_epoch += kTimeTToMicrosecondsOffset;
+  *time = Time(checked_microseconds_win_epoch.ValueOrDefault(0));
+  return checked_microseconds_win_epoch.IsValid();
+}
+
+int64_t Time::ToRoundedDownMillisecondsSinceUnixEpoch() const {
+  constexpr int64_t kEpochOffsetMillis =
+      kTimeTToMicrosecondsOffset / kMicrosecondsPerMillisecond;
+  static_assert(kTimeTToMicrosecondsOffset % kMicrosecondsPerMillisecond == 0,
+                "assumption: no epoch offset sub-milliseconds");
+
+  // Compute the milliseconds since UNIX epoch without the possibility of
+  // under/overflow. Round the result towards -infinity.
+  //
+  // If |us_| is negative and includes fractions of a millisecond, subtract one
+  // more to effect the round towards -infinity. C-style integer truncation
+  // takes care of all other cases.
+  const int64_t millis = us_ / kMicrosecondsPerMillisecond;
+  const int64_t submillis = us_ % kMicrosecondsPerMillisecond;
+  return millis - kEpochOffsetMillis - (submillis < 0);
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+// static
+TimeTicks TimeTicks::Now() {
+  return internal::g_time_ticks_now_function.load(std::memory_order_relaxed)();
+}
+
+// static
+TimeTicks TimeTicks::UnixEpoch() {
+  static const TimeTicks epoch([]() {
+    return subtle::TimeTicksNowIgnoringOverride() -
+           (subtle::TimeNowIgnoringOverride() - Time::UnixEpoch());
+  }());
+  return epoch;
+}
+
+TimeTicks TimeTicks::SnappedToNextTick(TimeTicks tick_phase,
+                                       TimeDelta tick_interval) const {
+  // |interval_offset| is the offset from |this| to the next multiple of
+  // |tick_interval| after |tick_phase|, possibly negative if in the past.
+  TimeDelta interval_offset = (tick_phase - *this) % tick_interval;
+  // If |this| is exactly on the interval (i.e. offset==0), don't adjust.
+  // Otherwise, if |tick_phase| was in the past, adjust forward to the next
+  // tick after |this|.
+  if (!interval_offset.is_zero() && tick_phase < *this) {
+    interval_offset += tick_interval;
+  }
+  return *this + interval_offset;
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+// static
+ThreadTicks ThreadTicks::Now() {
+  return internal::g_thread_ticks_now_function.load(
+      std::memory_order_relaxed)();
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h
new file mode 100644
index 0000000..52550a0
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h
@@ -0,0 +1,1022 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// `Time` represents an absolute point in coordinated universal time (UTC),
+// internally represented as microseconds (s/1,000,000) since the Windows epoch
+// (1601-01-01 00:00:00 UTC). System-dependent clock interface routines are
+// defined in time_PLATFORM.cc. Note that values for `Time` may skew and jump
+// around as the operating system makes adjustments to synchronize (e.g., with
+// NTP servers). Thus, client code that uses the `Time` class must account for
+// this.
+//
+// `TimeDelta` represents a duration of time, internally represented in
+// microseconds.
+//
+// `TimeTicks` and `ThreadTicks` represent an abstract time that is most of the
+// time incrementing, for use in measuring time durations. Internally, they are
+// represented in microseconds. They cannot be converted to a human-readable
+// time, but are guaranteed not to decrease (unlike the `Time` class). Note
+// that `TimeTicks` may "stand still" (e.g., if the computer is suspended), and
+// `ThreadTicks` will "stand still" whenever the thread has been de-scheduled
+// by the operating system.
+//
+// All time classes are copyable, assignable, and occupy 64 bits per instance.
+// Prefer to pass them by value, e.g.:
+//
+//   void MyFunction(TimeDelta arg);
+//
+// All time classes support `operator<<` with logging streams, e.g. `LOG(INFO)`.
+// For human-readable formatting, use //base/i18n/time_formatting.h.
+//
+// Example use cases for different time classes:
+//
+//   Time:        Interpreting the wall-clock time provided by a remote system.
+//                Detecting whether cached resources have expired. Providing the
+//                user with a display of the current date and time. Determining
+//                the amount of time between events across re-boots of the
+//                machine.
+//
+//   TimeTicks:   Tracking the amount of time a task runs. Executing delayed
+//                tasks at the right time. Computing presentation timestamps.
+//                Synchronizing audio and video using TimeTicks as a common
+//                reference clock (lip-sync). Measuring network round-trip
+//                latency.
+//
+//   ThreadTicks: Benchmarking how long the current thread has been doing actual
+//                work.
+//
+// Serialization:
+//
+// Use the helpers in //base/json/values_util.h when serializing `Time`
+// or `TimeDelta` to/from `base::Value`.
+//
+// Otherwise:
+//
+// - Time: use `FromDeltaSinceWindowsEpoch()`/`ToDeltaSinceWindowsEpoch()`.
+// - TimeDelta: use `base::Microseconds()`/`InMicroseconds()`.
+//
+// `TimeTicks` and `ThreadTicks` do not have a stable origin; serialization for
+// the purpose of persistence is not supported.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_TIME_TIME_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_TIME_TIME_H_
+
+#include <stdint.h>
+#include <time.h>
+
+#include <iosfwd>
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/chromeos_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/clamped_math.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_APPLE)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#endif  // BUILDFLAG(IS_APPLE)
+
+#if BUILDFLAG(IS_FUCHSIA)
+#include <zircon/types.h>
+#endif
+
+#if BUILDFLAG(IS_APPLE)
+#include <CoreFoundation/CoreFoundation.h>
+#include <mach/mach_time.h>
+// Avoid Mac system header macro leak.
+#undef TYPE_BOOL
+#endif
+
+#if BUILDFLAG(IS_ANDROID)
+#include <jni.h>
+#endif
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#if BUILDFLAG(IS_WIN)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/windows_types.h"
+
+namespace ABI {
+namespace Windows {
+namespace Foundation {
+struct DateTime;
+}  // namespace Foundation
+}  // namespace Windows
+}  // namespace ABI
+#endif
+
+namespace partition_alloc::internal::base {
+
+class TimeDelta;
+
+template <typename T>
+constexpr TimeDelta Microseconds(T n);
+
+#if BUILDFLAG(IS_WIN)
+class PlatformThreadHandle;
+#endif
+
+// TimeDelta ------------------------------------------------------------------
+
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) TimeDelta {
+ public:
+  constexpr TimeDelta() = default;
+
+#if BUILDFLAG(IS_WIN)
+  static TimeDelta FromQPCValue(LONGLONG qpc_value);
+  // TODO(crbug.com/989694): Avoid base::TimeDelta factory functions
+  // based on absolute time
+  static TimeDelta FromFileTime(FILETIME ft);
+  static TimeDelta FromWinrtDateTime(ABI::Windows::Foundation::DateTime dt);
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  static TimeDelta FromTimeSpec(const timespec& ts);
+#endif
+#if BUILDFLAG(IS_FUCHSIA)
+  static TimeDelta FromZxDuration(zx_duration_t nanos);
+#endif
+#if BUILDFLAG(IS_APPLE)
+  static TimeDelta FromMachTime(uint64_t mach_time);
+#endif  // BUILDFLAG(IS_APPLE)
+
+  // Converts an integer value representing TimeDelta to a class. This is used
+  // when deserializing a |TimeDelta| structure, using a value known to be
+  // compatible. It is not provided as a constructor because the integer type
+  // may be unclear from the perspective of a caller.
+  //
+  // DEPRECATED - Do not use in new code. http://crbug.com/634507
+  static constexpr TimeDelta FromInternalValue(int64_t delta) {
+    return TimeDelta(delta);
+  }
+
+  // Returns the maximum time delta, which should be greater than any reasonable
+  // time delta we might compare it to. If converted to double with ToDouble()
+  // it becomes an IEEE double infinity. Use FiniteMax() if you want a very
+  // large number that doesn't do this. TimeDelta math saturates at the end
+  // points so adding to TimeDelta::Max() leaves the value unchanged.
+  // Subtracting should leave the value unchanged but currently changes it
+  // TODO(https://crbug.com/869387).
+  static constexpr TimeDelta Max();
+
+  // Returns the minimum time delta, which should be less than than any
+  // reasonable time delta we might compare it to. For more details see the
+  // comments for Max().
+  static constexpr TimeDelta Min();
+
+  // Returns the maximum time delta which is not equivalent to infinity. Only
+  // subtracting a finite time delta from this time delta has a defined result.
+  static constexpr TimeDelta FiniteMax();
+
+  // Returns the minimum time delta which is not equivalent to -infinity. Only
+  // adding a finite time delta to this time delta has a defined result.
+  static constexpr TimeDelta FiniteMin();
+
+  // Returns the internal numeric value of the TimeDelta object. Please don't
+  // use this and do arithmetic on it, as it is more error prone than using the
+  // provided operators.
+  // For serializing, use FromInternalValue to reconstitute.
+  //
+  // DEPRECATED - Do not use in new code. http://crbug.com/634507
+  constexpr int64_t ToInternalValue() const { return delta_; }
+
+  // Returns the magnitude (absolute value) of this TimeDelta.
+  constexpr TimeDelta magnitude() const { return TimeDelta(delta_.Abs()); }
+
+  // Returns true if the time delta is a zero, positive or negative time delta.
+  constexpr bool is_zero() const { return delta_ == 0; }
+  constexpr bool is_positive() const { return delta_ > 0; }
+  constexpr bool is_negative() const { return delta_ < 0; }
+
+  // Returns true if the time delta is the maximum/minimum time delta.
+  constexpr bool is_max() const { return *this == Max(); }
+  constexpr bool is_min() const { return *this == Min(); }
+  constexpr bool is_inf() const { return is_min() || is_max(); }
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  struct timespec ToTimeSpec() const;
+#endif
+#if BUILDFLAG(IS_FUCHSIA)
+  zx_duration_t ToZxDuration() const;
+#endif
+#if BUILDFLAG(IS_WIN)
+  ABI::Windows::Foundation::DateTime ToWinrtDateTime() const;
+#endif
+
+  // Returns the frequency in Hertz (cycles per second) that has a period of
+  // *this.
+  constexpr double ToHz() const;
+
+  // Returns the time delta in some unit. Minimum argument values return as
+  // -inf for doubles and min type values otherwise. Maximum ones are treated as
+  // +inf for doubles and max type values otherwise. Their results will produce
+  // an is_min() or is_max() TimeDelta. The InXYZF versions return a floating
+  // point value. The InXYZ versions return a truncated value (aka rounded
+  // towards zero, std::trunc() behavior). The InXYZFloored() versions round to
+  // lesser integers (std::floor() behavior). The XYZRoundedUp() versions round
+  // up to greater integers (std::ceil() behavior). WARNING: Floating point
+  // arithmetic is such that XXX(t.InXXXF()) may not precisely equal |t|.
+  // Hence, floating point values should not be used for storage.
+  int InDays() const;
+  int InDaysFloored() const;
+  constexpr int InHours() const;
+  constexpr int InMinutes() const;
+  constexpr double InSecondsF() const;
+  constexpr int64_t InSeconds() const;
+  double InMillisecondsF() const;
+  int64_t InMilliseconds() const;
+  int64_t InMillisecondsRoundedUp() const;
+  constexpr int64_t InMicroseconds() const { return delta_; }
+  double InMicrosecondsF() const;
+  constexpr int64_t InNanoseconds() const;
+
+  // Computations with other deltas.
+  constexpr TimeDelta operator+(TimeDelta other) const;
+  constexpr TimeDelta operator-(TimeDelta other) const;
+
+  constexpr TimeDelta& operator+=(TimeDelta other) {
+    return *this = (*this + other);
+  }
+  constexpr TimeDelta& operator-=(TimeDelta other) {
+    return *this = (*this - other);
+  }
+  constexpr TimeDelta operator-() const {
+    if (!is_inf()) {
+      return TimeDelta(-delta_);
+    }
+    return (delta_ < 0) ? Max() : Min();
+  }
+
+  // Computations with numeric types.
+  template <typename T>
+  constexpr TimeDelta operator*(T a) const {
+    return TimeDelta(int64_t{delta_ * a});
+  }
+  template <typename T>
+  constexpr TimeDelta operator/(T a) const {
+    return TimeDelta(int64_t{delta_ / a});
+  }
+  template <typename T>
+  constexpr TimeDelta& operator*=(T a) {
+    return *this = (*this * a);
+  }
+  template <typename T>
+  constexpr TimeDelta& operator/=(T a) {
+    return *this = (*this / a);
+  }
+
+  // This does floating-point division. For an integer result, either call
+  // IntDiv(), or (possibly clearer) use this operator with
+  // base::Clamp{Ceil,Floor,Round}() or base::saturated_cast() (for truncation).
+  // Note that converting to double here drops precision to 53 bits.
+  constexpr double operator/(TimeDelta a) const {
+    // 0/0 and inf/inf (any combination of positive and negative) are invalid
+    // (they are almost certainly not intentional, and result in NaN, which
+    // turns into 0 if clamped to an integer; this makes introducing subtle bugs
+    // too easy).
+    PA_BASE_CHECK(!is_zero() || !a.is_zero());
+    PA_BASE_CHECK(!is_inf() || !a.is_inf());
+
+    return ToDouble() / a.ToDouble();
+  }
+  constexpr int64_t IntDiv(TimeDelta a) const {
+    if (!is_inf() && !a.is_zero()) {
+      return int64_t{delta_ / a.delta_};
+    }
+
+    // For consistency, use the same edge case CHECKs and behavior as the code
+    // above.
+    PA_BASE_CHECK(!is_zero() || !a.is_zero());
+    PA_BASE_CHECK(!is_inf() || !a.is_inf());
+    return ((delta_ < 0) == (a.delta_ < 0))
+               ? std::numeric_limits<int64_t>::max()
+               : std::numeric_limits<int64_t>::min();
+  }
+
+  constexpr TimeDelta operator%(TimeDelta a) const {
+    return TimeDelta(
+        (is_inf() || a.is_zero() || a.is_inf()) ? delta_ : (delta_ % a.delta_));
+  }
+  constexpr TimeDelta& operator%=(TimeDelta other) {
+    return *this = (*this % other);
+  }
+
+  // Comparison operators.
+  constexpr bool operator==(TimeDelta other) const {
+    return delta_ == other.delta_;
+  }
+  constexpr bool operator!=(TimeDelta other) const {
+    return delta_ != other.delta_;
+  }
+  constexpr bool operator<(TimeDelta other) const {
+    return delta_ < other.delta_;
+  }
+  constexpr bool operator<=(TimeDelta other) const {
+    return delta_ <= other.delta_;
+  }
+  constexpr bool operator>(TimeDelta other) const {
+    return delta_ > other.delta_;
+  }
+  constexpr bool operator>=(TimeDelta other) const {
+    return delta_ >= other.delta_;
+  }
+
+  // Returns this delta, ceiled/floored/rounded-away-from-zero to the nearest
+  // multiple of |interval|.
+  TimeDelta CeilToMultiple(TimeDelta interval) const;
+  TimeDelta FloorToMultiple(TimeDelta interval) const;
+  TimeDelta RoundToMultiple(TimeDelta interval) const;
+
+ private:
+  // Constructs a delta given the duration in microseconds. This is private
+  // to avoid confusion by callers with an integer constructor. Use
+  // base::Seconds, base::Milliseconds, etc. instead.
+  constexpr explicit TimeDelta(int64_t delta_us) : delta_(delta_us) {}
+  constexpr explicit TimeDelta(ClampedNumeric<int64_t> delta_us)
+      : delta_(delta_us) {}
+
+  // Returns a double representation of this TimeDelta's tick count.  In
+  // particular, Max()/Min() are converted to +/-infinity.
+  constexpr double ToDouble() const {
+    if (!is_inf()) {
+      return static_cast<double>(delta_);
+    }
+    return (delta_ < 0) ? -std::numeric_limits<double>::infinity()
+                        : std::numeric_limits<double>::infinity();
+  }
+
+  // Delta in microseconds.
+  ClampedNumeric<int64_t> delta_ = 0;
+};
+
+constexpr TimeDelta TimeDelta::operator+(TimeDelta other) const {
+  if (!other.is_inf()) {
+    return TimeDelta(delta_ + other.delta_);
+  }
+
+  // Additions involving two infinities are only valid if signs match.
+  PA_BASE_CHECK(!is_inf() || (delta_ == other.delta_));
+  return other;
+}
+
+constexpr TimeDelta TimeDelta::operator-(TimeDelta other) const {
+  if (!other.is_inf()) {
+    return TimeDelta(delta_ - other.delta_);
+  }
+
+  // Subtractions involving two infinities are only valid if signs differ.
+  PA_BASE_CHECK(int64_t{delta_} != int64_t{other.delta_});
+  return (other.delta_ < 0) ? Max() : Min();
+}
+
+template <typename T>
+constexpr TimeDelta operator*(T a, TimeDelta td) {
+  return td * a;
+}
+
+// TimeBase--------------------------------------------------------------------
+
+// Do not reference the time_internal::TimeBase template class directly.  Please
+// use one of the time subclasses instead, and only reference the public
+// TimeBase members via those classes.
+namespace time_internal {
+
+// Provides value storage and comparison/math operations common to all time
+// classes. Each subclass provides for strong type-checking to ensure
+// semantically meaningful comparison/math of time values from the same clock
+// source or timeline.
+template <class TimeClass>
+class TimeBase {
+ public:
+  static constexpr int64_t kHoursPerDay = 24;
+  static constexpr int64_t kSecondsPerMinute = 60;
+  static constexpr int64_t kMinutesPerHour = 60;
+  static constexpr int64_t kSecondsPerHour =
+      kSecondsPerMinute * kMinutesPerHour;
+  static constexpr int64_t kMillisecondsPerSecond = 1000;
+  static constexpr int64_t kMillisecondsPerDay =
+      kMillisecondsPerSecond * kSecondsPerHour * kHoursPerDay;
+  static constexpr int64_t kMicrosecondsPerMillisecond = 1000;
+  static constexpr int64_t kMicrosecondsPerSecond =
+      kMicrosecondsPerMillisecond * kMillisecondsPerSecond;
+  static constexpr int64_t kMicrosecondsPerMinute =
+      kMicrosecondsPerSecond * kSecondsPerMinute;
+  static constexpr int64_t kMicrosecondsPerHour =
+      kMicrosecondsPerMinute * kMinutesPerHour;
+  static constexpr int64_t kMicrosecondsPerDay =
+      kMicrosecondsPerHour * kHoursPerDay;
+  static constexpr int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+  static constexpr int64_t kNanosecondsPerMicrosecond = 1000;
+  static constexpr int64_t kNanosecondsPerSecond =
+      kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
+
+  // Returns true if this object has not been initialized.
+  //
+  // Warning: Be careful when writing code that performs math on time values,
+  // since it's possible to produce a valid "zero" result that should not be
+  // interpreted as a "null" value.
+  constexpr bool is_null() const { return us_ == 0; }
+
+  // Returns true if this object represents the maximum/minimum time.
+  constexpr bool is_max() const { return *this == Max(); }
+  constexpr bool is_min() const { return *this == Min(); }
+  constexpr bool is_inf() const { return is_min() || is_max(); }
+
+  // Returns the maximum/minimum times, which should be greater/less than than
+  // any reasonable time with which we might compare it.
+  static constexpr TimeClass Max() {
+    return TimeClass(std::numeric_limits<int64_t>::max());
+  }
+
+  static constexpr TimeClass Min() {
+    return TimeClass(std::numeric_limits<int64_t>::min());
+  }
+
+  // For legacy serialization only. When serializing to `base::Value`, prefer
+  // the helpers from //base/json/values_util.h instead. Otherwise, use
+  // `Time::ToDeltaSinceWindowsEpoch()` for `Time` and
+  // `TimeDelta::InMiseconds()` for `TimeDelta`. See http://crbug.com/634507.
+  constexpr int64_t ToInternalValue() const { return us_; }
+
+  // The amount of time since the origin (or "zero") point. This is a syntactic
+  // convenience to aid in code readability, mainly for debugging/testing use
+  // cases.
+  //
+  // Warning: While the Time subclass has a fixed origin point, the origin for
+  // the other subclasses can vary each time the application is restarted.
+  constexpr TimeDelta since_origin() const;
+
+  // Compute the difference between two times.
+  constexpr TimeDelta operator-(const TimeBase<TimeClass>& other) const;
+
+  // Return a new time modified by some delta.
+  constexpr TimeClass operator+(TimeDelta delta) const;
+  constexpr TimeClass operator-(TimeDelta delta) const;
+
+  // Modify by some time delta.
+  constexpr TimeClass& operator+=(TimeDelta delta) {
+    return static_cast<TimeClass&>(*this = (*this + delta));
+  }
+  constexpr TimeClass& operator-=(TimeDelta delta) {
+    return static_cast<TimeClass&>(*this = (*this - delta));
+  }
+
+  // Comparison operators
+  constexpr bool operator==(const TimeBase<TimeClass>& other) const {
+    return us_ == other.us_;
+  }
+  constexpr bool operator!=(const TimeBase<TimeClass>& other) const {
+    return us_ != other.us_;
+  }
+  constexpr bool operator<(const TimeBase<TimeClass>& other) const {
+    return us_ < other.us_;
+  }
+  constexpr bool operator<=(const TimeBase<TimeClass>& other) const {
+    return us_ <= other.us_;
+  }
+  constexpr bool operator>(const TimeBase<TimeClass>& other) const {
+    return us_ > other.us_;
+  }
+  constexpr bool operator>=(const TimeBase<TimeClass>& other) const {
+    return us_ >= other.us_;
+  }
+
+ protected:
+  constexpr explicit TimeBase(int64_t us) : us_(us) {}
+
+  // Time value in a microsecond timebase.
+  int64_t us_;
+};
+
+#if BUILDFLAG(IS_WIN)
+#if defined(ARCH_CPU_ARM64)
+// TSCTicksPerSecond is not supported on Windows on Arm systems because the
+// cycle-counting methods use the actual CPU cycle count, and not a consistent
+// incrementing counter.
+#else
+// Returns true if the CPU support constant rate TSC.
+[[nodiscard]] PA_COMPONENT_EXPORT(
+    PARTITION_ALLOC_BASE) bool HasConstantRateTSC();
+
+// Returns the frequency of the TSC in ticks per second, or 0 if it hasn't
+// been measured yet. Needs to be guarded with a call to HasConstantRateTSC().
+[[nodiscard]] PA_COMPONENT_EXPORT(
+    PARTITION_ALLOC_BASE) double TSCTicksPerSecond();
+#endif
+#endif  // BUILDFLAG(IS_WIN)
+
+}  // namespace time_internal
+
+template <class TimeClass>
+inline constexpr TimeClass operator+(TimeDelta delta, TimeClass t) {
+  return t + delta;
+}
+
+// Time -----------------------------------------------------------------------
+
+// Represents a wall clock time in UTC. Values are not guaranteed to be
+// monotonically non-decreasing and are subject to large amounts of skew.
+// Time is stored internally as microseconds since the Windows epoch (1601).
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) Time
+    : public time_internal::TimeBase<Time> {
+ public:
+  // Offset of UNIX epoch (1970-01-01 00:00:00 UTC) from Windows FILETIME epoch
+  // (1601-01-01 00:00:00 UTC), in microseconds. This value is derived from the
+  // following: ((1970-1601)*365+89)*24*60*60*1000*1000, where 89 is the number
+  // of leap year days between 1601 and 1970: (1970-1601)/4 excluding 1700,
+  // 1800, and 1900.
+  static constexpr int64_t kTimeTToMicrosecondsOffset =
+      INT64_C(11644473600000000);
+
+#if BUILDFLAG(IS_WIN)
+  // To avoid overflow in QPC to Microseconds calculations, since we multiply
+  // by kMicrosecondsPerSecond, then the QPC value should not exceed
+  // (2^63 - 1) / 1E6. If it exceeds that threshold, we divide then multiply.
+  static constexpr int64_t kQPCOverflowThreshold = INT64_C(0x8637BD05AF7);
+#endif
+
+  // Contains the NULL time. Use Time::Now() to get the current time.
+  constexpr Time() : TimeBase(0) {}
+
+  // Returns the time for epoch in Unix-like system (Jan 1, 1970).
+  static constexpr Time UnixEpoch() { return Time(kTimeTToMicrosecondsOffset); }
+
+  // Returns the current time. Watch out, the system might adjust its clock
+  // in which case time will actually go backwards. We don't guarantee that
+  // times are increasing, or that two calls to Now() won't be the same.
+  static Time Now();
+
+  // Returns the current time. Same as Now() except that this function always
+  // uses system time so that there are no discrepancies between the returned
+  // time and system time even on virtual environments including our test bot.
+  // For timing sensitive unittests, this function should be used.
+  static Time NowFromSystemTime();
+
+  // Converts to/from TimeDeltas relative to the Windows epoch (1601-01-01
+  // 00:00:00 UTC).
+  //
+  // For serialization, when handling `base::Value`, prefer the helpers in
+  // //base/json/values_util.h instead. Otherwise, use these methods for
+  // opaque serialization and deserialization, e.g.
+  //
+  //   // Serialization:
+  //   base::Time last_updated = ...;
+  //   SaveToDatabase(last_updated.ToDeltaSinceWindowsEpoch().InMicroseconds());
+  //
+  //   // Deserialization:
+  //   base::Time last_updated = base::Time::FromDeltaSinceWindowsEpoch(
+  //       base::Microseconds(LoadFromDatabase()));
+  //
+  // Do not use `FromInternalValue()` or `ToInternalValue()` for this purpose.
+  static constexpr Time FromDeltaSinceWindowsEpoch(TimeDelta delta) {
+    return Time(delta.InMicroseconds());
+  }
+
+  constexpr TimeDelta ToDeltaSinceWindowsEpoch() const {
+    return Microseconds(us_);
+  }
+
+  // Converts to/from time_t in UTC and a Time class.
+  static constexpr Time FromTimeT(time_t tt);
+  time_t ToTimeT() const;
+
+  // Converts time to/from a double which is the number of seconds since epoch
+  // (Jan 1, 1970).  Webkit uses this format to represent time.
+  // Because WebKit initializes double time value to 0 to indicate "not
+  // initialized", we map it to empty Time object that also means "not
+  // initialized".
+  static Time FromSecondsSinceUnixEpoch(double dt);
+  double InSecondsFSinceUnixEpoch() const;
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  // Converts the timespec structure to time. MacOS X 10.8.3 (and tentatively,
+  // earlier versions) will have the |ts|'s tv_nsec component zeroed out,
+  // having a 1 second resolution, which agrees with
+  // https://developer.apple.com/legacy/library/#technotes/tn/tn1150.html#HFSPlusDates.
+  static Time FromTimeSpec(const timespec& ts);
+#endif
+
+  // Converts to/from the Javascript convention for times, a number of
+  // milliseconds since the epoch:
+  // https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Date/getTime.
+  //
+  // Don't use InMillisecondsFSinceUnixEpoch() in new code, since it contains a
+  // subtle hack (only exactly 1601-01-01 00:00 UTC is represented as 1970-01-01
+  // 00:00 UTC), and that is not appropriate for general use. Try to use
+  // InMillisecondsFSinceUnixEpochIgnoringNull() unless you have a very good
+  // reason to use InMillisecondsFSinceUnixEpoch().
+  static Time FromMillisecondsSinceUnixEpoch(double ms_since_epoch);
+  double InMillisecondsFSinceUnixEpoch() const;
+  double InMillisecondsFSinceUnixEpochIgnoringNull() const;
+
+  // Converts to/from Java convention for times, a number of milliseconds since
+  // the epoch. Because the Java format has less resolution, converting to Java
+  // time is a lossy operation.
+  static Time FromMillisecondsSinceUnixEpoch(int64_t ms_since_epoch);
+  int64_t InMillisecondsSinceUnixEpoch() const;
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  static Time FromTimeVal(struct timeval t);
+  struct timeval ToTimeVal() const;
+#endif
+
+#if BUILDFLAG(IS_FUCHSIA)
+  static Time FromZxTime(zx_time_t time);
+  zx_time_t ToZxTime() const;
+#endif
+
+#if BUILDFLAG(IS_APPLE)
+  static Time FromCFAbsoluteTime(CFAbsoluteTime t);
+  CFAbsoluteTime ToCFAbsoluteTime() const;
+#if defined(__OBJC__)
+  static Time FromNSDate(NSDate* date);
+  NSDate* ToNSDate() const;
+#endif
+#endif
+
+#if BUILDFLAG(IS_WIN)
+  static Time FromFileTime(FILETIME ft);
+  FILETIME ToFileTime() const;
+#endif  // BUILDFLAG(IS_WIN)
+
+  // For legacy deserialization only. Converts an integer value representing
+  // Time to a class. This may be used when deserializing a |Time| structure,
+  // using a value known to be compatible. It is not provided as a constructor
+  // because the integer type may be unclear from the perspective of a caller.
+  //
+  // DEPRECATED - Do not use in new code. When deserializing from `base::Value`,
+  // prefer the helpers from //base/json/values_util.h instead.
+  // Otherwise, use `Time::FromDeltaSinceWindowsEpoch()` for `Time` and
+  // `TimeDelta::FromMiseconds()` for `TimeDelta`. http://crbug.com/634507
+  static constexpr Time FromInternalValue(int64_t us) { return Time(us); }
+
+ private:
+  friend class time_internal::TimeBase<Time>;
+
+  constexpr explicit Time(int64_t microseconds_since_win_epoch)
+      : TimeBase(microseconds_since_win_epoch) {}
+
+  // Converts the provided time in milliseconds since the Unix epoch (1970) to a
+  // Time object, avoiding overflows.
+  [[nodiscard]] static bool FromMillisecondsSinceUnixEpoch(
+      int64_t unix_milliseconds,
+      Time* time);
+
+  // Returns the milliseconds since the Unix epoch (1970), rounding the
+  // microseconds towards -infinity.
+  int64_t ToRoundedDownMillisecondsSinceUnixEpoch() const;
+};
+
+// Factory methods that return a TimeDelta of the given unit.
+// WARNING: Floating point arithmetic is such that XXX(t.InXXXF()) may not
+// precisely equal |t|. Hence, floating point values should not be used for
+// storage.
+
+template <typename T>
+constexpr TimeDelta Days(T n) {
+  return TimeDelta::FromInternalValue(MakeClampedNum(n) *
+                                      Time::kMicrosecondsPerDay);
+}
+template <typename T>
+constexpr TimeDelta Hours(T n) {
+  return TimeDelta::FromInternalValue(MakeClampedNum(n) *
+                                      Time::kMicrosecondsPerHour);
+}
+template <typename T>
+constexpr TimeDelta Minutes(T n) {
+  return TimeDelta::FromInternalValue(MakeClampedNum(n) *
+                                      Time::kMicrosecondsPerMinute);
+}
+template <typename T>
+constexpr TimeDelta Seconds(T n) {
+  return TimeDelta::FromInternalValue(MakeClampedNum(n) *
+                                      Time::kMicrosecondsPerSecond);
+}
+template <typename T>
+constexpr TimeDelta Milliseconds(T n) {
+  return TimeDelta::FromInternalValue(MakeClampedNum(n) *
+                                      Time::kMicrosecondsPerMillisecond);
+}
+template <typename T>
+constexpr TimeDelta Microseconds(T n) {
+  return TimeDelta::FromInternalValue(MakeClampedNum(n));
+}
+template <typename T>
+constexpr TimeDelta Nanoseconds(T n) {
+  return TimeDelta::FromInternalValue(MakeClampedNum(n) /
+                                      Time::kNanosecondsPerMicrosecond);
+}
+template <typename T>
+constexpr TimeDelta Hertz(T n) {
+  return n ? TimeDelta::FromInternalValue(Time::kMicrosecondsPerSecond /
+                                          MakeClampedNum(n))
+           : TimeDelta::Max();
+}
+
+// TimeDelta functions that must appear below the declarations of Time/TimeDelta
+
+constexpr double TimeDelta::ToHz() const {
+  return Seconds(1) / *this;
+}
+
+constexpr int TimeDelta::InHours() const {
+  // saturated_cast<> is necessary since very large (but still less than
+  // min/max) deltas would result in overflow.
+  return saturated_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+constexpr int TimeDelta::InMinutes() const {
+  // saturated_cast<> is necessary since very large (but still less than
+  // min/max) deltas would result in overflow.
+  return saturated_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+constexpr double TimeDelta::InSecondsF() const {
+  if (!is_inf()) {
+    return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+  }
+  return (delta_ < 0) ? -std::numeric_limits<double>::infinity()
+                      : std::numeric_limits<double>::infinity();
+}
+
+constexpr int64_t TimeDelta::InSeconds() const {
+  return is_inf() ? delta_ : (delta_ / Time::kMicrosecondsPerSecond);
+}
+
+constexpr int64_t TimeDelta::InNanoseconds() const {
+  return base::ClampMul(delta_, Time::kNanosecondsPerMicrosecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::Max() {
+  return TimeDelta(std::numeric_limits<int64_t>::max());
+}
+
+// static
+constexpr TimeDelta TimeDelta::Min() {
+  return TimeDelta(std::numeric_limits<int64_t>::min());
+}
+
+// static
+constexpr TimeDelta TimeDelta::FiniteMax() {
+  return TimeDelta(std::numeric_limits<int64_t>::max() - 1);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FiniteMin() {
+  return TimeDelta(std::numeric_limits<int64_t>::min() + 1);
+}
+
+// TimeBase functions that must appear below the declarations of Time/TimeDelta
+namespace time_internal {
+
+template <class TimeClass>
+constexpr TimeDelta TimeBase<TimeClass>::since_origin() const {
+  return Microseconds(us_);
+}
+
+template <class TimeClass>
+constexpr TimeDelta TimeBase<TimeClass>::operator-(
+    const TimeBase<TimeClass>& other) const {
+  return Microseconds(us_ - other.us_);
+}
+
+template <class TimeClass>
+constexpr TimeClass TimeBase<TimeClass>::operator+(TimeDelta delta) const {
+  return TimeClass((Microseconds(us_) + delta).InMicroseconds());
+}
+
+template <class TimeClass>
+constexpr TimeClass TimeBase<TimeClass>::operator-(TimeDelta delta) const {
+  return TimeClass((Microseconds(us_) - delta).InMicroseconds());
+}
+
+}  // namespace time_internal
+
+// Time functions that must appear below the declarations of Time/TimeDelta
+
+// static
+constexpr Time Time::FromTimeT(time_t tt) {
+  if (tt == 0) {
+    return Time();  // Preserve 0 so we can tell it doesn't exist.
+  }
+  return (tt == std::numeric_limits<time_t>::max())
+             ? Max()
+             : (UnixEpoch() + Seconds(tt));
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+// Represents monotonically non-decreasing clock time.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) TimeTicks
+    : public time_internal::TimeBase<TimeTicks> {
+ public:
+  // The underlying clock used to generate new TimeTicks.
+  enum class Clock {
+    FUCHSIA_ZX_CLOCK_MONOTONIC,
+    LINUX_CLOCK_MONOTONIC,
+    IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME,
+    MAC_MACH_ABSOLUTE_TIME,
+    WIN_QPC,
+    WIN_ROLLOVER_PROTECTED_TIME_GET_TIME
+  };
+
+  constexpr TimeTicks() : TimeBase(0) {}
+
+  // Platform-dependent tick count representing "right now." When
+  // IsHighResolution() returns false, the resolution of the clock could be
+  // as coarse as ~15.6ms. Otherwise, the resolution should be no worse than one
+  // microsecond.
+  static TimeTicks Now();
+
+  // Returns true if the high resolution clock is working on this system and
+  // Now() will return high resolution values. Note that, on systems where the
+  // high resolution clock works but is deemed inefficient, the low resolution
+  // clock will be used instead.
+  [[nodiscard]] static bool IsHighResolution();
+
+  // Returns true if TimeTicks is consistent across processes, meaning that
+  // timestamps taken on different processes can be safely compared with one
+  // another. (Note that, even on platforms where this returns true, time values
+  // from different threads that are within one tick of each other must be
+  // considered to have an ambiguous ordering.)
+  [[nodiscard]] static bool IsConsistentAcrossProcesses();
+
+#if BUILDFLAG(IS_FUCHSIA)
+  // Converts between TimeTicks and an ZX_CLOCK_MONOTONIC zx_time_t value.
+  static TimeTicks FromZxTime(zx_time_t nanos_since_boot);
+  zx_time_t ToZxTime() const;
+#endif
+
+#if BUILDFLAG(IS_WIN)
+  // Translates an absolute QPC timestamp into a TimeTicks value. The returned
+  // value has the same origin as Now(). Do NOT attempt to use this if
+  // IsHighResolution() returns false.
+  static TimeTicks FromQPCValue(LONGLONG qpc_value);
+#endif
+
+#if BUILDFLAG(IS_APPLE)
+  static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time);
+
+  // Sets the current Mach timebase to `timebase`. Returns the old timebase.
+  static mach_timebase_info_data_t SetMachTimebaseInfoForTesting(
+      mach_timebase_info_data_t timebase);
+
+#endif  // BUILDFLAG(IS_APPLE)
+
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(PA_IS_CHROMEOS_ASH)
+  // Converts to TimeTicks the value obtained from SystemClock.uptimeMillis().
+  // Note: this conversion may be non-monotonic in relation to previously
+  // obtained TimeTicks::Now() values because of the truncation (to
+  // milliseconds) performed by uptimeMillis().
+  static TimeTicks FromUptimeMillis(int64_t uptime_millis_value);
+
+#endif  // BUILDFLAG(IS_ANDROID) || BUILDFLAG(PA_IS_CHROMEOS_ASH)
+
+#if BUILDFLAG(IS_ANDROID)
+  // Converts to TimeTicks the value obtained from System.nanoTime(). This
+  // conversion will be monotonic in relation to previously obtained
+  // TimeTicks::Now() values as the clocks are based on the same posix monotonic
+  // clock, with nanoTime() potentially providing higher resolution.
+  static TimeTicks FromJavaNanoTime(int64_t nano_time_value);
+
+  // Truncates the TimeTicks value to the precision of SystemClock#uptimeMillis.
+  // Note that the clocks already share the same monotonic clock source.
+  jlong ToUptimeMillis() const;
+
+  // Returns the TimeTicks value as microseconds in the timebase of
+  // SystemClock#uptimeMillis.
+  // Note that the clocks already share the same monotonic clock source.
+  //
+  // System.nanoTime() may be used to get sub-millisecond precision in Java code
+  // and may be compared against this value as the two share the same clock
+  // source (though be sure to convert nanos to micros).
+  jlong ToUptimeMicros() const;
+
+#endif  // BUILDFLAG(IS_ANDROID)
+
+  // Get an estimate of the TimeTick value at the time of the UnixEpoch. Because
+  // Time and TimeTicks respond differently to user-set time and NTP
+  // adjustments, this number is only an estimate. Nevertheless, this can be
+  // useful when you need to relate the value of TimeTicks to a real time and
+  // date. Note: Upon first invocation, this function takes a snapshot of the
+  // realtime clock to establish a reference point.  This function will return
+  // the same value for the duration of the application, but will be different
+  // in future application runs.
+  static TimeTicks UnixEpoch();
+
+  // Returns |this| snapped to the next tick, given a |tick_phase| and
+  // repeating |tick_interval| in both directions. |this| may be before,
+  // after, or equal to the |tick_phase|.
+  TimeTicks SnappedToNextTick(TimeTicks tick_phase,
+                              TimeDelta tick_interval) const;
+
+  // Returns an enum indicating the underlying clock being used to generate
+  // TimeTicks timestamps. This function should only be used for debugging and
+  // logging purposes.
+  static Clock GetClock();
+
+  // Converts an integer value representing TimeTicks to a class. This may be
+  // used when deserializing a |TimeTicks| structure, using a value known to be
+  // compatible. It is not provided as a constructor because the integer type
+  // may be unclear from the perspective of a caller.
+  //
+  // DEPRECATED - Do not use in new code. For deserializing TimeTicks values,
+  // prefer TimeTicks + TimeDelta(); however, be aware that the origin is not
+  // fixed and may vary. Serializing for persistence is strongly discouraged.
+  // http://crbug.com/634507
+  static constexpr TimeTicks FromInternalValue(int64_t us) {
+    return TimeTicks(us);
+  }
+
+ protected:
+#if BUILDFLAG(IS_WIN)
+  typedef DWORD (*TickFunctionType)(void);
+  static TickFunctionType SetMockTickFunction(TickFunctionType ticker);
+#endif
+
+ private:
+  friend class time_internal::TimeBase<TimeTicks>;
+
+  // Please use Now() to create a new object. This is for internal use
+  // and testing.
+  constexpr explicit TimeTicks(int64_t us) : TimeBase(us) {}
+};
+
+// ThreadTicks ----------------------------------------------------------------
+
+// Represents a clock, specific to a particular thread, than runs only while the
+// thread is running.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) ThreadTicks
+    : public time_internal::TimeBase<ThreadTicks> {
+ public:
+  constexpr ThreadTicks() : TimeBase(0) {}
+
+  // Returns true if ThreadTicks::Now() is supported on this system.
+  [[nodiscard]] static bool IsSupported() {
+#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+    BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
+    return true;
+#elif BUILDFLAG(IS_WIN)
+    return IsSupportedWin();
+#else
+    return false;
+#endif
+  }
+
+  // Waits until the initialization is completed. Needs to be guarded with a
+  // call to IsSupported().
+  static void WaitUntilInitialized() {
+#if BUILDFLAG(IS_WIN)
+    WaitUntilInitializedWin();
+#endif
+  }
+
+  // Returns thread-specific CPU-time on systems that support this feature.
+  // Needs to be guarded with a call to IsSupported(). Use this timer
+  // to (approximately) measure how much time the calling thread spent doing
+  // actual work vs. being de-scheduled. May return bogus results if the thread
+  // migrates to another CPU between two calls. Returns an empty ThreadTicks
+  // object until the initialization is completed. If a clock reading is
+  // absolutely needed, call WaitUntilInitialized() before this method.
+  static ThreadTicks Now();
+
+#if BUILDFLAG(IS_WIN)
+  // Similar to Now() above except this returns thread-specific CPU time for an
+  // arbitrary thread. All comments for Now() method above apply apply to this
+  // method as well.
+  static ThreadTicks GetForThread(const PlatformThreadHandle& thread_handle);
+#endif
+
+  // Converts an integer value representing ThreadTicks to a class. This may be
+  // used when deserializing a |ThreadTicks| structure, using a value known to
+  // be compatible. It is not provided as a constructor because the integer type
+  // may be unclear from the perspective of a caller.
+  //
+  // DEPRECATED - Do not use in new code. For deserializing ThreadTicks values,
+  // prefer ThreadTicks + TimeDelta(); however, be aware that the origin is not
+  // fixed and may vary. Serializing for persistence is strongly
+  // discouraged. http://crbug.com/634507
+  static constexpr ThreadTicks FromInternalValue(int64_t us) {
+    return ThreadTicks(us);
+  }
+
+ private:
+  friend class time_internal::TimeBase<ThreadTicks>;
+
+  // Please use Now() or GetForThread() to create a new object. This is for
+  // internal use and testing.
+  constexpr explicit ThreadTicks(int64_t us) : TimeBase(us) {}
+
+#if BUILDFLAG(IS_WIN)
+  [[nodiscard]] static bool IsSupportedWin();
+  static void WaitUntilInitializedWin();
+#endif
+};
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_TIME_TIME_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_android.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_android.cc
new file mode 100644
index 0000000..52845a5
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_android.cc
@@ -0,0 +1,65 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+
+namespace partition_alloc::internal::base {
+
+// static
+TimeTicks TimeTicks::FromUptimeMillis(int64_t uptime_millis_value) {
+  // The implementation of the SystemClock.uptimeMillis() in AOSP uses the same
+  // clock as base::TimeTicks::Now(): clock_gettime(CLOCK_MONOTONIC), see in
+  // platform/system/code:
+  // 1. libutils/SystemClock.cpp
+  // 2. libutils/Timers.cpp
+  //
+  // We are not aware of any motivations for Android OEMs to modify the AOSP
+  // implementation of either uptimeMillis() or clock_gettime(CLOCK_MONOTONIC),
+  // so we assume that there are no such customizations.
+  //
+  // Under these assumptions the conversion is as safe as copying the value of
+  // base::TimeTicks::Now() with a loss of sub-millisecond precision.
+  return TimeTicks(uptime_millis_value * Time::kMicrosecondsPerMillisecond);
+}
+
+// This file is included on chromeos_ash because it needs to interpret
+// UptimeMillis values from the Android container.
+#if BUILDFLAG(IS_ANDROID)
+
+// static
+TimeTicks TimeTicks::FromJavaNanoTime(int64_t nano_time_value) {
+  // The implementation of the System.nanoTime() in AOSP uses the same
+  // clock as UptimeMillis() and base::TimeTicks::Now():
+  // clock_gettime(CLOCK_MONOTONIC), see ojluni/src/main/native/System.c in
+  // AOSP.
+  //
+  // From Android documentation on android.os.SystemClock:
+  //   [uptimeMillis()] is the basis for most interval timing such as
+  //   Thread.sleep(millls), Object.wait(millis), and System.nanoTime().
+  //
+  // We are not aware of any motivations for Android OEMs to modify the AOSP
+  // implementation of either uptimeMillis(), nanoTime, or
+  // clock_gettime(CLOCK_MONOTONIC), so we assume that there are no such
+  // customizations.
+  //
+  // Under these assumptions the conversion is as safe as copying the value of
+  // base::TimeTicks::Now() without the (theoretical) sub-microsecond
+  // resolution.
+  return TimeTicks(nano_time_value / Time::kNanosecondsPerMicrosecond);
+}
+
+jlong TimeTicks::ToUptimeMillis() const {
+  // See FromUptimeMillis. UptimeMillis and TimeTicks use the same clock source,
+  // and only differ in resolution.
+  return us_ / Time::kMicrosecondsPerMillisecond;
+}
+
+jlong TimeTicks::ToUptimeMicros() const {
+  // Same as ToUptimeMillis but maintains sub-millisecond precision.
+  return us_;
+}
+
+#endif  // BUILDFLAG(IS_ANDROID)
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_apple.mm b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_apple.mm
new file mode 100644
index 0000000..0e05d4b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_apple.mm
@@ -0,0 +1,226 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+
+#import <Foundation/Foundation.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+
+#if BUILDFLAG(IS_IOS)
+#include <errno.h>
+#endif
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base {
+
+namespace {
+
+// Returns a pointer to the initialized Mach timebase info struct.
+mach_timebase_info_data_t* MachTimebaseInfo() {
+  static mach_timebase_info_data_t timebase_info = []() {
+    mach_timebase_info_data_t info;
+    kern_return_t kr = mach_timebase_info(&info);
+    PA_BASE_DCHECK(kr == KERN_SUCCESS) << "mach_timebase_info";
+    PA_BASE_DCHECK(info.numer);
+    PA_BASE_DCHECK(info.denom);
+    return info;
+  }();
+  return &timebase_info;
+}
+
+int64_t MachTimeToMicroseconds(uint64_t mach_time) {
+  // timebase_info gives us the conversion factor between absolute time tick
+  // units and nanoseconds.
+  mach_timebase_info_data_t* timebase_info = MachTimebaseInfo();
+
+  // Take the fast path when the conversion is 1:1. The result will for sure fit
+  // into an int_64 because we're going from nanoseconds to microseconds.
+  if (timebase_info->numer == timebase_info->denom) {
+    return static_cast<int64_t>(mach_time / Time::kNanosecondsPerMicrosecond);
+  }
+
+  uint64_t microseconds = 0;
+  const uint64_t divisor =
+      timebase_info->denom * Time::kNanosecondsPerMicrosecond;
+
+  // Microseconds is mach_time * timebase.numer /
+  // (timebase.denom * kNanosecondsPerMicrosecond). Divide first to reduce
+  // the chance of overflow. Also stash the remainder right now, a likely
+  // byproduct of the division.
+  microseconds = mach_time / divisor;
+  const uint64_t mach_time_remainder = mach_time % divisor;
+
+  // Now multiply, keeping an eye out for overflow.
+  PA_BASE_CHECK(!__builtin_umulll_overflow(microseconds, timebase_info->numer,
+                                           &microseconds));
+
+  // By dividing first we lose precision. Regain it by adding back the
+  // microseconds from the remainder, with an eye out for overflow.
+  uint64_t least_significant_microseconds =
+      (mach_time_remainder * timebase_info->numer) / divisor;
+  PA_BASE_CHECK(!__builtin_uaddll_overflow(
+      microseconds, least_significant_microseconds, &microseconds));
+
+  // Don't bother with the rollover handling that the Windows version does.
+  // The returned time in microseconds is enough for 292,277 years (starting
+  // from 2^63 because the returned int64_t is signed,
+  // 9223372036854775807 / (1e6 * 60 * 60 * 24 * 365.2425) = 292,277).
+  return checked_cast<int64_t>(microseconds);
+}
+
+// Returns monotonically growing number of ticks in microseconds since some
+// unspecified starting point.
+int64_t ComputeCurrentTicks() {
+  // mach_absolute_time is it when it comes to ticks on the Mac.  Other calls
+  // with less precision (such as TickCount) just call through to
+  // mach_absolute_time.
+  return MachTimeToMicroseconds(mach_absolute_time());
+}
+
+int64_t ComputeThreadTicks() {
+  // The pthreads library keeps a cached reference to the thread port, which
+  // does not have to be released like mach_thread_self() does.
+  mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
+  if (thread_port == MACH_PORT_NULL) {
+    PA_DLOG(ERROR) << "Failed to get pthread_mach_thread_np()";
+    return 0;
+  }
+
+  mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
+  thread_basic_info_data_t thread_info_data;
+
+  kern_return_t kr = thread_info(
+      thread_port, THREAD_BASIC_INFO,
+      reinterpret_cast<thread_info_t>(&thread_info_data), &thread_info_count);
+  PA_BASE_DCHECK(kr == KERN_SUCCESS) << "thread_info";
+
+  CheckedNumeric<int64_t> absolute_micros(thread_info_data.user_time.seconds +
+                                          thread_info_data.system_time.seconds);
+  absolute_micros *= Time::kMicrosecondsPerSecond;
+  absolute_micros += (thread_info_data.user_time.microseconds +
+                      thread_info_data.system_time.microseconds);
+  return absolute_micros.ValueOrDie();
+}
+
+}  // namespace
+
+// The Time routines in this file use Mach and CoreFoundation APIs, since the
+// POSIX definition of time_t in Mac OS X wraps around after 2038--and
+// there are already cookie expiration dates, etc., past that time out in
+// the field.  Using CFDate prevents that problem, and using mach_absolute_time
+// for TimeTicks gives us nice high-resolution interval timing.
+
+// Time -----------------------------------------------------------------------
+
+namespace subtle {
+Time TimeNowIgnoringOverride() {
+  return Time::FromCFAbsoluteTime(CFAbsoluteTimeGetCurrent());
+}
+
+Time TimeNowFromSystemTimeIgnoringOverride() {
+  // Just use TimeNowIgnoringOverride() because it returns the system time.
+  return TimeNowIgnoringOverride();
+}
+}  // namespace subtle
+
+// static
+Time Time::FromCFAbsoluteTime(CFAbsoluteTime t) {
+  static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
+                "CFAbsoluteTime must have an infinity value");
+  if (t == 0) {
+    return Time();  // Consider 0 as a null Time.
+  }
+  return (t == std::numeric_limits<CFAbsoluteTime>::infinity())
+             ? Max()
+             : (UnixEpoch() +
+                Seconds(double{t + kCFAbsoluteTimeIntervalSince1970}));
+}
+
+CFAbsoluteTime Time::ToCFAbsoluteTime() const {
+  static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
+                "CFAbsoluteTime must have an infinity value");
+  if (is_null()) {
+    return 0;  // Consider 0 as a null Time.
+  }
+  return is_max() ? std::numeric_limits<CFAbsoluteTime>::infinity()
+                  : (CFAbsoluteTime{(*this - UnixEpoch()).InSecondsF()} -
+                     kCFAbsoluteTimeIntervalSince1970);
+}
+
+// static
+Time Time::FromNSDate(NSDate* date) {
+  PA_BASE_DCHECK(date);
+  return FromCFAbsoluteTime(date.timeIntervalSinceReferenceDate);
+}
+
+NSDate* Time::ToNSDate() const {
+  return [NSDate dateWithTimeIntervalSinceReferenceDate:ToCFAbsoluteTime()];
+}
+
+// TimeDelta ------------------------------------------------------------------
+
+// static
+TimeDelta TimeDelta::FromMachTime(uint64_t mach_time) {
+  return Microseconds(MachTimeToMicroseconds(mach_time));
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+namespace subtle {
+TimeTicks TimeTicksNowIgnoringOverride() {
+  return TimeTicks() + Microseconds(ComputeCurrentTicks());
+}
+}  // namespace subtle
+
+// static
+bool TimeTicks::IsHighResolution() {
+  return true;
+}
+
+// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+  return true;
+}
+
+// static
+TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) {
+  return TimeTicks(MachTimeToMicroseconds(mach_absolute_time));
+}
+
+// static
+mach_timebase_info_data_t TimeTicks::SetMachTimebaseInfoForTesting(
+    mach_timebase_info_data_t timebase) {
+  mach_timebase_info_data_t orig_timebase = *MachTimebaseInfo();
+
+  *MachTimebaseInfo() = timebase;
+
+  return orig_timebase;
+}
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+  return Clock::MAC_MACH_ABSOLUTE_TIME;
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+namespace subtle {
+ThreadTicks ThreadTicksNowIgnoringOverride() {
+  return ThreadTicks() + Microseconds(ComputeThreadTicks());
+}
+}  // namespace subtle
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_conversion_posix.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_conversion_posix.cc
new file mode 100644
index 0000000..0544c1d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_conversion_posix.cc
@@ -0,0 +1,69 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <time.h>
+
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+
+namespace partition_alloc::internal::base {
+
+// static
+TimeDelta TimeDelta::FromTimeSpec(const timespec& ts) {
+  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+struct timespec TimeDelta::ToTimeSpec() const {
+  int64_t microseconds = InMicroseconds();
+  time_t seconds = 0;
+  if (microseconds >= Time::kMicrosecondsPerSecond) {
+    seconds = InSeconds();
+    microseconds -= seconds * Time::kMicrosecondsPerSecond;
+  }
+  struct timespec result = {
+      seconds,
+      static_cast<long>(microseconds * Time::kNanosecondsPerMicrosecond)};
+  return result;
+}
+
+// static
+Time Time::FromTimeVal(struct timeval t) {
+  PA_BASE_DCHECK(t.tv_usec < static_cast<int>(Time::kMicrosecondsPerSecond));
+  PA_BASE_DCHECK(t.tv_usec >= 0);
+  if (t.tv_usec == 0 && t.tv_sec == 0) {
+    return Time();
+  }
+  if (t.tv_usec == static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1 &&
+      t.tv_sec == std::numeric_limits<time_t>::max()) {
+    return Max();
+  }
+  return Time((static_cast<int64_t>(t.tv_sec) * Time::kMicrosecondsPerSecond) +
+              t.tv_usec + kTimeTToMicrosecondsOffset);
+}
+
+struct timeval Time::ToTimeVal() const {
+  struct timeval result;
+  if (is_null()) {
+    result.tv_sec = 0;
+    result.tv_usec = 0;
+    return result;
+  }
+  if (is_max()) {
+    result.tv_sec = std::numeric_limits<time_t>::max();
+    result.tv_usec = static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1;
+    return result;
+  }
+  int64_t us = us_ - kTimeTToMicrosecondsOffset;
+  result.tv_sec = us / Time::kMicrosecondsPerSecond;
+  result.tv_usec = us % Time::kMicrosecondsPerSecond;
+  return result;
+}
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_fuchsia.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_fuchsia.cc
new file mode 100644
index 0000000..45f5127
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_fuchsia.cc
@@ -0,0 +1,97 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+
+#include <threads.h>
+#include <zircon/syscalls.h>
+#include <zircon/threads.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.h"
+
+namespace partition_alloc::internal::base {
+
+// Time -----------------------------------------------------------------------
+
+namespace subtle {
+Time TimeNowIgnoringOverride() {
+  timespec ts;
+  int status = timespec_get(&ts, TIME_UTC);
+  PA_BASE_CHECK(status != 0);
+  return Time::FromTimeSpec(ts);
+}
+
+Time TimeNowFromSystemTimeIgnoringOverride() {
+  // Just use TimeNowIgnoringOverride() because it returns the system time.
+  return TimeNowIgnoringOverride();
+}
+}  // namespace subtle
+
+// TimeTicks ------------------------------------------------------------------
+
+namespace subtle {
+TimeTicks TimeTicksNowIgnoringOverride() {
+  const zx_time_t nanos_since_boot = zx_clock_get_monotonic();
+  PA_BASE_CHECK(0 != nanos_since_boot);
+  return TimeTicks::FromZxTime(nanos_since_boot);
+}
+}  // namespace subtle
+
+// static
+TimeDelta TimeDelta::FromZxDuration(zx_duration_t nanos) {
+  return Nanoseconds(nanos);
+}
+
+zx_duration_t TimeDelta::ToZxDuration() const {
+  return InNanoseconds();
+}
+
+// static
+Time Time::FromZxTime(zx_time_t nanos_since_unix_epoch) {
+  return UnixEpoch() + Nanoseconds(nanos_since_unix_epoch);
+}
+
+zx_time_t Time::ToZxTime() const {
+  return (*this - UnixEpoch()).InNanoseconds();
+}
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+  return Clock::FUCHSIA_ZX_CLOCK_MONOTONIC;
+}
+
+// static
+bool TimeTicks::IsHighResolution() {
+  return true;
+}
+
+// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+  return true;
+}
+
+// static
+TimeTicks TimeTicks::FromZxTime(zx_time_t nanos_since_boot) {
+  return TimeTicks() + Nanoseconds(nanos_since_boot);
+}
+
+zx_time_t TimeTicks::ToZxTime() const {
+  return (*this - TimeTicks()).InNanoseconds();
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+namespace subtle {
+ThreadTicks ThreadTicksNowIgnoringOverride() {
+  zx_info_thread_stats_t info;
+  zx_status_t status = zx_object_get_info(thrd_get_zx_handle(thrd_current()),
+                                          ZX_INFO_THREAD_STATS, &info,
+                                          sizeof(info), nullptr, nullptr);
+  PA_BASE_CHECK(status == ZX_OK);
+  return ThreadTicks() + Nanoseconds(info.total_runtime);
+}
+}  // namespace subtle
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_now_posix.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_now_posix.cc
new file mode 100644
index 0000000..47da2d5
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_now_posix.cc
@@ -0,0 +1,122 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <time.h>
+
+#include "build/build_config.h"
+#if BUILDFLAG(IS_ANDROID) && !defined(__LP64__)
+#include <time64.h>
+#endif
+#include <unistd.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_math.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.h"
+
+// Ensure the Fuchsia and Mac builds do not include this module. Instead,
+// non-POSIX implementation is used for sampling the system clocks.
+#if BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_APPLE)
+#error "This implementation is for POSIX platforms other than Fuchsia or Mac."
+#endif
+
+namespace partition_alloc::internal::base {
+
+namespace {
+
+int64_t ConvertTimespecToMicros(const struct timespec& ts) {
+  // On 32-bit systems, the calculation cannot overflow int64_t.
+  // 2**32 * 1000000 + 2**64 / 1000 < 2**63
+  if (sizeof(ts.tv_sec) <= 4 && sizeof(ts.tv_nsec) <= 8) {
+    int64_t result = ts.tv_sec;
+    result *= Time::kMicrosecondsPerSecond;
+    result += (ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+    return result;
+  }
+  CheckedNumeric<int64_t> result(ts.tv_sec);
+  result *= Time::kMicrosecondsPerSecond;
+  result += (ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+  return result.ValueOrDie();
+}
+
+// Helper function to get results from clock_gettime() and convert to a
+// microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
+// on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
+// _POSIX_MONOTONIC_CLOCK to -1.
+#if (BUILDFLAG(IS_POSIX) && defined(_POSIX_MONOTONIC_CLOCK) && \
+     _POSIX_MONOTONIC_CLOCK >= 0) ||                           \
+    BUILDFLAG(IS_BSD) || BUILDFLAG(IS_ANDROID)
+int64_t ClockNow(clockid_t clk_id) {
+  struct timespec ts;
+  PA_BASE_CHECK(clock_gettime(clk_id, &ts) == 0);
+  return ConvertTimespecToMicros(ts);
+}
+#else  // _POSIX_MONOTONIC_CLOCK
+#error No usable tick clock function on this platform.
+#endif  // _POSIX_MONOTONIC_CLOCK
+
+}  // namespace
+
+// Time -----------------------------------------------------------------------
+
+namespace subtle {
+Time TimeNowIgnoringOverride() {
+  struct timeval tv;
+  struct timezone tz = {0, 0};  // UTC
+  PA_BASE_CHECK(gettimeofday(&tv, &tz) == 0);
+  // Combine seconds and microseconds in a 64-bit field containing microseconds
+  // since the epoch.  That's enough for nearly 600 centuries.  Adjust from
+  // Unix (1970) to Windows (1601) epoch.
+  return Time() +
+         Microseconds((tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec) +
+                      Time::kTimeTToMicrosecondsOffset);
+}
+
+Time TimeNowFromSystemTimeIgnoringOverride() {
+  // Just use TimeNowIgnoringOverride() because it returns the system time.
+  return TimeNowIgnoringOverride();
+}
+}  // namespace subtle
+
+// TimeTicks ------------------------------------------------------------------
+
+namespace subtle {
+TimeTicks TimeTicksNowIgnoringOverride() {
+  return TimeTicks() + Microseconds(ClockNow(CLOCK_MONOTONIC));
+}
+}  // namespace subtle
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+  return Clock::LINUX_CLOCK_MONOTONIC;
+}
+
+// static
+bool TimeTicks::IsHighResolution() {
+  return true;
+}
+
+// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+  return true;
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+namespace subtle {
+ThreadTicks ThreadTicksNowIgnoringOverride() {
+#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+    BUILDFLAG(IS_ANDROID)
+  return ThreadTicks() + Microseconds(ClockNow(CLOCK_THREAD_CPUTIME_ID));
+#else
+  PA_NOTREACHED();
+#endif
+}
+}  // namespace subtle
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.cc
new file mode 100644
index 0000000..f0a1d24
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.cc
@@ -0,0 +1,45 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+
+namespace partition_alloc::internal::base::subtle {
+
+// static
+bool ScopedTimeClockOverrides::overrides_active_ = false;
+
+ScopedTimeClockOverrides::ScopedTimeClockOverrides(
+    TimeNowFunction time_override,
+    TimeTicksNowFunction time_ticks_override,
+    ThreadTicksNowFunction thread_ticks_override) {
+  PA_BASE_DCHECK(!overrides_active_);
+  overrides_active_ = true;
+  if (time_override) {
+    internal::g_time_now_function.store(time_override,
+                                        std::memory_order_relaxed);
+    internal::g_time_now_from_system_time_function.store(
+        time_override, std::memory_order_relaxed);
+  }
+  if (time_ticks_override) {
+    internal::g_time_ticks_now_function.store(time_ticks_override,
+                                              std::memory_order_relaxed);
+  }
+  if (thread_ticks_override) {
+    internal::g_thread_ticks_now_function.store(thread_ticks_override,
+                                                std::memory_order_relaxed);
+  }
+}
+
+ScopedTimeClockOverrides::~ScopedTimeClockOverrides() {
+  internal::g_time_now_function.store(&TimeNowIgnoringOverride);
+  internal::g_time_now_from_system_time_function.store(
+      &TimeNowFromSystemTimeIgnoringOverride);
+  internal::g_time_ticks_now_function.store(&TimeTicksNowIgnoringOverride);
+  internal::g_thread_ticks_now_function.store(&ThreadTicksNowIgnoringOverride);
+  overrides_active_ = false;
+}
+
+}  // namespace partition_alloc::internal::base::subtle
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.h
new file mode 100644
index 0000000..1763774
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.h
@@ -0,0 +1,86 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_TIME_TIME_OVERRIDE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_TIME_TIME_OVERRIDE_H_
+
+#include <atomic>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base {
+
+using TimeNowFunction = decltype(&Time::Now);
+using TimeTicksNowFunction = decltype(&TimeTicks::Now);
+using ThreadTicksNowFunction = decltype(&ThreadTicks::Now);
+
+// Time overrides should be used with extreme caution. Discuss with //base/time
+// OWNERS before adding a new one.
+namespace subtle {
+
+// Override the return value of Time::Now and Time::NowFromSystemTime /
+// TimeTicks::Now / ThreadTicks::Now to emulate time, e.g. for tests or to
+// modify progression of time. It is recommended that the override be set while
+// single-threaded and before the first call to Now() to avoid threading issues
+// and inconsistencies in returned values. Overriding time while other threads
+// are running is very subtle and should be reserved for developer only use
+// cases (e.g. virtual time in devtools) where any flakiness caused by a racy
+// time update isn't surprising. Instantiating a ScopedTimeClockOverrides while
+// other threads are running might break their expectation that TimeTicks and
+// ThreadTicks increase monotonically. Nested overrides are not allowed.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) ScopedTimeClockOverrides {
+ public:
+  // Pass |nullptr| for any override if it shouldn't be overriden.
+  ScopedTimeClockOverrides(TimeNowFunction time_override,
+                           TimeTicksNowFunction time_ticks_override,
+                           ThreadTicksNowFunction thread_ticks_override);
+
+  ScopedTimeClockOverrides(const ScopedTimeClockOverrides&) = delete;
+  ScopedTimeClockOverrides& operator=(const ScopedTimeClockOverrides&) = delete;
+
+  // Restores the platform default Now() functions.
+  ~ScopedTimeClockOverrides();
+
+  static bool overrides_active() { return overrides_active_; }
+
+ private:
+  static bool overrides_active_;
+};
+
+// These methods return the platform default Time::Now / TimeTicks::Now /
+// ThreadTicks::Now values even while an override is in place. These methods
+// should only be used in places where emulated time should be disregarded. For
+// example, they can be used to implement test timeouts for tests that may
+// override time.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE) Time TimeNowIgnoringOverride();
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+Time TimeNowFromSystemTimeIgnoringOverride();
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+TimeTicks TimeTicksNowIgnoringOverride();
+PA_COMPONENT_EXPORT(PARTITION_ALLOC_BASE)
+ThreadTicks ThreadTicksNowIgnoringOverride();
+
+}  // namespace subtle
+
+namespace internal {
+
+// These function pointers are used by platform-independent implementations of
+// the Now() methods and ScopedTimeClockOverrides. They are set to point to the
+// respective NowIgnoringOverride functions by default, but can also be set by
+// platform-specific code to select a default implementation at runtime, thereby
+// avoiding the indirection via the NowIgnoringOverride functions. Note that the
+// pointers can be overridden and later reset to the NowIgnoringOverride
+// functions by ScopedTimeClockOverrides.
+extern std::atomic<TimeNowFunction> g_time_now_function;
+extern std::atomic<TimeNowFunction> g_time_now_from_system_time_function;
+extern std::atomic<TimeTicksNowFunction> g_time_ticks_now_function;
+extern std::atomic<ThreadTicksNowFunction> g_thread_ticks_now_function;
+
+}  // namespace internal
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_TIME_TIME_OVERRIDE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_win.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_win.cc
new file mode 100644
index 0000000..8c09a3a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_win.cc
@@ -0,0 +1,565 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Windows Timer Primer
+//
+// A good article:  http://www.ddj.com/windows/184416651
+// A good mozilla bug:  http://bugzilla.mozilla.org/show_bug.cgi?id=363258
+//
+// The default windows timer, GetSystemTimeAsFileTime is not very precise.
+// It is only good to ~15.5ms.
+//
+// QueryPerformanceCounter is the logical choice for a high-precision timer.
+// However, it is known to be buggy on some hardware.  Specifically, it can
+// sometimes "jump".  On laptops, QPC can also be very expensive to call.
+// It's 3-4x slower than timeGetTime() on desktops, but can be 10x slower
+// on laptops.  A unittest exists which will show the relative cost of various
+// timers on any system.
+//
+// The next logical choice is timeGetTime().  timeGetTime has a precision of
+// 1ms, but only if you call APIs (timeBeginPeriod()) which affect all other
+// applications on the system.  By default, precision is only 15.5ms.
+// Unfortunately, we don't want to call timeBeginPeriod because we don't
+// want to affect other applications.  Further, on mobile platforms, use of
+// faster multimedia timers can hurt battery life.  See the intel
+// article about this here:
+// http://softwarecommunity.intel.com/articles/eng/1086.htm
+//
+// To work around all this, we're going to generally use timeGetTime().  We
+// will only increase the system-wide timer if we're not running on battery
+// power.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+
+#include <windows.foundation.h>
+#include <windows.h>
+
+#include <mmsystem.h>
+
+#include <stdint.h>
+
+#include <atomic>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bit_cast.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal::base {
+
+namespace {
+
+// From MSDN, FILETIME "Contains a 64-bit value representing the number of
+// 100-nanosecond intervals since January 1, 1601 (UTC)."
+int64_t FileTimeToMicroseconds(const FILETIME& ft) {
+  // Need to bit_cast to fix alignment, then divide by 10 to convert
+  // 100-nanoseconds to microseconds. This only works on little-endian
+  // machines.
+  return bit_cast<int64_t, FILETIME>(ft) / 10;
+}
+
+bool CanConvertToFileTime(int64_t us) {
+  return us >= 0 && us <= (std::numeric_limits<int64_t>::max() / 10);
+}
+
+FILETIME MicrosecondsToFileTime(int64_t us) {
+  PA_BASE_DCHECK(CanConvertToFileTime(us))
+      << "Out-of-range: Cannot convert " << us
+      << " microseconds to FILETIME units.";
+
+  // Multiply by 10 to convert microseconds to 100-nanoseconds. Bit_cast will
+  // handle alignment problems. This only works on little-endian machines.
+  return bit_cast<FILETIME, int64_t>(us * 10);
+}
+
+int64_t CurrentWallclockMicroseconds() {
+  FILETIME ft;
+  ::GetSystemTimeAsFileTime(&ft);
+  return FileTimeToMicroseconds(ft);
+}
+
+// Time between resampling the un-granular clock for this API.
+constexpr TimeDelta kMaxTimeToAvoidDrift = Seconds(60);
+
+int64_t g_initial_time = 0;
+TimeTicks g_initial_ticks;
+
+void InitializeClock() {
+  g_initial_ticks = subtle::TimeTicksNowIgnoringOverride();
+  g_initial_time = CurrentWallclockMicroseconds();
+}
+
+// Returns the current value of the performance counter.
+uint64_t QPCNowRaw() {
+  LARGE_INTEGER perf_counter_now = {};
+  // According to the MSDN documentation for QueryPerformanceCounter(), this
+  // will never fail on systems that run XP or later.
+  // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
+  ::QueryPerformanceCounter(&perf_counter_now);
+  return perf_counter_now.QuadPart;
+}
+
+}  // namespace
+
+// Time -----------------------------------------------------------------------
+
+namespace subtle {
+Time TimeNowIgnoringOverride() {
+  if (g_initial_time == 0) {
+    InitializeClock();
+  }
+
+  // We implement time using the high-resolution timers so that we can get
+  // timeouts which are smaller than 10-15ms.  If we just used
+  // CurrentWallclockMicroseconds(), we'd have the less-granular timer.
+  //
+  // To make this work, we initialize the clock (g_initial_time) and the
+  // counter (initial_ctr).  To compute the initial time, we can check
+  // the number of ticks that have elapsed, and compute the delta.
+  //
+  // To avoid any drift, we periodically resync the counters to the system
+  // clock.
+  while (true) {
+    TimeTicks ticks = TimeTicksNowIgnoringOverride();
+
+    // Calculate the time elapsed since we started our timer
+    TimeDelta elapsed = ticks - g_initial_ticks;
+
+    // Check if enough time has elapsed that we need to resync the clock.
+    if (elapsed > kMaxTimeToAvoidDrift) {
+      InitializeClock();
+      continue;
+    }
+
+    return Time() + elapsed + Microseconds(g_initial_time);
+  }
+}
+
+Time TimeNowFromSystemTimeIgnoringOverride() {
+  // Force resync.
+  InitializeClock();
+  return Time() + Microseconds(g_initial_time);
+}
+}  // namespace subtle
+
+// static
+Time Time::FromFileTime(FILETIME ft) {
+  if (bit_cast<int64_t, FILETIME>(ft) == 0) {
+    return Time();
+  }
+  if (ft.dwHighDateTime == std::numeric_limits<DWORD>::max() &&
+      ft.dwLowDateTime == std::numeric_limits<DWORD>::max()) {
+    return Max();
+  }
+  return Time(FileTimeToMicroseconds(ft));
+}
+
+FILETIME Time::ToFileTime() const {
+  if (is_null()) {
+    return bit_cast<FILETIME, int64_t>(0);
+  }
+  if (is_max()) {
+    FILETIME result;
+    result.dwHighDateTime = std::numeric_limits<DWORD>::max();
+    result.dwLowDateTime = std::numeric_limits<DWORD>::max();
+    return result;
+  }
+  return MicrosecondsToFileTime(us_);
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+namespace {
+
+// We define a wrapper to adapt between the __stdcall and __cdecl call of the
+// mock function, and to avoid a static constructor.  Assigning an import to a
+// function pointer directly would require setup code to fetch from the IAT.
+DWORD timeGetTimeWrapper() {
+  return timeGetTime();
+}
+
+DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
+
+// A structure holding the most significant bits of "last seen" and a
+// "rollover" counter.
+union LastTimeAndRolloversState {
+  // The state as a single 32-bit opaque value.
+  std::atomic<int32_t> as_opaque_32{0};
+
+  // The state as usable values.
+  struct {
+    // The top 8-bits of the "last" time. This is enough to check for rollovers
+    // and the small bit-size means fewer CompareAndSwap operations to store
+    // changes in state, which in turn makes for fewer retries.
+    uint8_t last_8;
+    // A count of the number of detected rollovers. Using this as bits 47-32
+    // of the upper half of a 64-bit value results in a 48-bit tick counter.
+    // This extends the total rollover period from about 49 days to about 8800
+    // years while still allowing it to be stored with last_8 in a single
+    // 32-bit value.
+    uint16_t rollovers;
+  } as_values;
+};
+std::atomic<int32_t> g_last_time_and_rollovers = 0;
+static_assert(sizeof(LastTimeAndRolloversState) <=
+                  sizeof(g_last_time_and_rollovers),
+              "LastTimeAndRolloversState does not fit in a single atomic word");
+
+// We use timeGetTime() to implement TimeTicks::Now().  This can be problematic
+// because it returns the number of milliseconds since Windows has started,
+// which will roll over the 32-bit value every ~49 days.  We try to track
+// rollover ourselves, which works if TimeTicks::Now() is called at least every
+// 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
+TimeTicks RolloverProtectedNow() {
+  LastTimeAndRolloversState state;
+  DWORD now;  // DWORD is always unsigned 32 bits.
+
+  while (true) {
+    // Fetch the "now" and "last" tick values, updating "last" with "now" and
+    // incrementing the "rollovers" counter if the tick-value has wrapped back
+    // around. Atomic operations ensure that both "last" and "rollovers" are
+    // always updated together.
+    int32_t original =
+        g_last_time_and_rollovers.load(std::memory_order_acquire);
+    state.as_opaque_32 = original;
+    now = g_tick_function();
+    uint8_t now_8 = static_cast<uint8_t>(now >> 24);
+    if (now_8 < state.as_values.last_8) {
+      ++state.as_values.rollovers;
+    }
+    state.as_values.last_8 = now_8;
+
+    // If the state hasn't changed, exit the loop.
+    if (state.as_opaque_32 == original) {
+      break;
+    }
+
+    // Save the changed state. If the existing value is unchanged from the
+    // original, exit the loop.
+    int32_t check = g_last_time_and_rollovers.compare_exchange_strong(
+        original, state.as_opaque_32, std::memory_order_release);
+    if (check == original) {
+      break;
+    }
+
+    // Another thread has done something in between so retry from the top.
+  }
+
+  return TimeTicks() +
+         Milliseconds(now +
+                      (static_cast<uint64_t>(state.as_values.rollovers) << 32));
+}
+
+// Discussion of tick counter options on Windows:
+//
+// (1) CPU cycle counter. (Retrieved via RDTSC)
+// The CPU counter provides the highest resolution time stamp and is the least
+// expensive to retrieve. However, on older CPUs, two issues can affect its
+// reliability: First it is maintained per processor and not synchronized
+// between processors. Also, the counters will change frequency due to thermal
+// and power changes, and stop in some states.
+//
+// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
+// resolution (<1 microsecond) time stamp. On most hardware running today, it
+// auto-detects and uses the constant-rate RDTSC counter to provide extremely
+// efficient and reliable time stamps.
+//
+// On older CPUs where RDTSC is unreliable, it falls back to using more
+// expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
+// PM timer, and can involve system calls; and all this is up to the HAL (with
+// some help from ACPI). According to
+// http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
+// worst case, it gets the counter from the rollover interrupt on the
+// programmable interrupt timer. In best cases, the HAL may conclude that the
+// RDTSC counter runs at a constant frequency, then it uses that instead. On
+// multiprocessor machines, it will try to verify the values returned from
+// RDTSC on each processor are consistent with each other, and apply a handful
+// of workarounds for known buggy hardware. In other words, QPC is supposed to
+// give consistent results on a multiprocessor computer, but for older CPUs it
+// can be unreliable due bugs in BIOS or HAL.
+//
+// (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
+// milliseconds) time stamp but is comparatively less expensive to retrieve and
+// more reliable. Time::EnableHighResolutionTimer() and
+// Time::ActivateHighResolutionTimer() can be called to alter the resolution of
+// this timer; and also other Windows applications can alter it, affecting this
+// one.
+
+TimeTicks InitialNowFunction();
+
+// See "threading notes" in InitializeNowFunctionPointer() for details on how
+// concurrent reads/writes to these globals has been made safe.
+std::atomic<TimeTicksNowFunction> g_time_ticks_now_ignoring_override_function{
+    &InitialNowFunction};
+int64_t g_qpc_ticks_per_second = 0;
+
+TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
+  // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
+  // InitializeNowFunctionPointer(), has happened by this point.
+  std::atomic_thread_fence(std::memory_order_acquire);
+
+  PA_BASE_DCHECK(g_qpc_ticks_per_second > 0);
+
+  // If the QPC Value is below the overflow threshold, we proceed with
+  // simple multiply and divide.
+  if (qpc_value < Time::kQPCOverflowThreshold) {
+    return Microseconds(qpc_value * Time::kMicrosecondsPerSecond /
+                        g_qpc_ticks_per_second);
+  }
+  // Otherwise, calculate microseconds in a round about manner to avoid
+  // overflow and precision issues.
+  int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
+  int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
+  return Microseconds((whole_seconds * Time::kMicrosecondsPerSecond) +
+                      ((leftover_ticks * Time::kMicrosecondsPerSecond) /
+                       g_qpc_ticks_per_second));
+}
+
+TimeTicks QPCNow() {
+  return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw());
+}
+
+void InitializeNowFunctionPointer() {
+  LARGE_INTEGER ticks_per_sec = {};
+  if (!QueryPerformanceFrequency(&ticks_per_sec)) {
+    ticks_per_sec.QuadPart = 0;
+  }
+
+  // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
+  // the low-resolution clock.
+  //
+  // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
+  // will still use the low-resolution clock. A CPU lacking a non-stop time
+  // counter will cause Windows to provide an alternate QPC implementation that
+  // works, but is expensive to use.
+  //
+  // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
+  // ~72% of users fall within this category.
+  CPU cpu;
+  const TimeTicksNowFunction now_function =
+      (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter())
+          ? &RolloverProtectedNow
+          : &QPCNow;
+
+  // Threading note 1: In an unlikely race condition, it's possible for two or
+  // more threads to enter InitializeNowFunctionPointer() in parallel. This is
+  // not a problem since all threads end up writing out the same values
+  // to the global variables, and those variable being atomic are safe to read
+  // from other threads.
+  //
+  // Threading note 2: A release fence is placed here to ensure, from the
+  // perspective of other threads using the function pointers, that the
+  // assignment to |g_qpc_ticks_per_second| happens before the function pointers
+  // are changed.
+  g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
+  std::atomic_thread_fence(std::memory_order_release);
+  // Also set g_time_ticks_now_function to avoid the additional indirection via
+  // TimeTicksNowIgnoringOverride() for future calls to TimeTicks::Now(), only
+  // if it wasn't already overridden to a different value. memory_order_relaxed
+  // is sufficient since an explicit fence was inserted above.
+  base::TimeTicksNowFunction initial_time_ticks_now_function =
+      &subtle::TimeTicksNowIgnoringOverride;
+  internal::g_time_ticks_now_function.compare_exchange_strong(
+      initial_time_ticks_now_function, now_function, std::memory_order_relaxed);
+  g_time_ticks_now_ignoring_override_function.store(now_function,
+                                                    std::memory_order_relaxed);
+}
+
+TimeTicks InitialNowFunction() {
+  InitializeNowFunctionPointer();
+  return g_time_ticks_now_ignoring_override_function.load(
+      std::memory_order_relaxed)();
+}
+
+}  // namespace
+
+// static
+TimeTicks::TickFunctionType TimeTicks::SetMockTickFunction(
+    TickFunctionType ticker) {
+  TickFunctionType old = g_tick_function;
+  g_tick_function = ticker;
+  g_last_time_and_rollovers.store(0, std::memory_order_relaxed);
+  return old;
+}
+
+namespace subtle {
+TimeTicks TimeTicksNowIgnoringOverride() {
+  return g_time_ticks_now_ignoring_override_function.load(
+      std::memory_order_relaxed)();
+}
+}  // namespace subtle
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+  return Clock::WIN_ROLLOVER_PROTECTED_TIME_GET_TIME;
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+namespace subtle {
+ThreadTicks ThreadTicksNowIgnoringOverride() {
+  return ThreadTicks::GetForThread(PlatformThread::CurrentHandle());
+}
+}  // namespace subtle
+
+// static
+ThreadTicks ThreadTicks::GetForThread(
+    const PlatformThreadHandle& thread_handle) {
+  PA_BASE_DCHECK(IsSupported());
+
+#if defined(ARCH_CPU_ARM64)
+  // QueryThreadCycleTime versus TSCTicksPerSecond doesn't have much relation to
+  // actual elapsed time on Windows on Arm, because QueryThreadCycleTime is
+  // backed by the actual number of CPU cycles executed, rather than a
+  // constant-rate timer like Intel. To work around this, use GetThreadTimes
+  // (which isn't as accurate but is meaningful as a measure of elapsed
+  // per-thread time).
+  FILETIME creation_time, exit_time, kernel_time, user_time;
+  ::GetThreadTimes(thread_handle.platform_handle(), &creation_time, &exit_time,
+                   &kernel_time, &user_time);
+
+  const int64_t us = FileTimeToMicroseconds(user_time);
+#else
+  // Get the number of TSC ticks used by the current thread.
+  ULONG64 thread_cycle_time = 0;
+  ::QueryThreadCycleTime(thread_handle.platform_handle(), &thread_cycle_time);
+
+  // Get the frequency of the TSC.
+  const double tsc_ticks_per_second = time_internal::TSCTicksPerSecond();
+  if (tsc_ticks_per_second == 0) {
+    return ThreadTicks();
+  }
+
+  // Return the CPU time of the current thread.
+  const double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
+  const int64_t us =
+      static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond);
+#endif
+
+  return ThreadTicks(us);
+}
+
+// static
+bool ThreadTicks::IsSupportedWin() {
+#if defined(ARCH_CPU_ARM64)
+  // The Arm implementation does not use QueryThreadCycleTime and therefore does
+  // not care about the time stamp counter.
+  return true;
+#else
+  return time_internal::HasConstantRateTSC();
+#endif
+}
+
+// static
+void ThreadTicks::WaitUntilInitializedWin() {
+#if !defined(ARCH_CPU_ARM64)
+  while (time_internal::TSCTicksPerSecond() == 0) {
+    ::Sleep(10);
+  }
+#endif
+}
+
+// static
+TimeTicks TimeTicks::FromQPCValue(LONGLONG qpc_value) {
+  return TimeTicks() + QPCValueToTimeDelta(qpc_value);
+}
+
+// TimeDelta ------------------------------------------------------------------
+
+// static
+TimeDelta TimeDelta::FromQPCValue(LONGLONG qpc_value) {
+  return QPCValueToTimeDelta(qpc_value);
+}
+
+// static
+TimeDelta TimeDelta::FromFileTime(FILETIME ft) {
+  return Microseconds(FileTimeToMicroseconds(ft));
+}
+
+// static
+TimeDelta TimeDelta::FromWinrtDateTime(ABI::Windows::Foundation::DateTime dt) {
+  // UniversalTime is 100 ns intervals since January 1, 1601 (UTC)
+  return Microseconds(dt.UniversalTime / 10);
+}
+
+ABI::Windows::Foundation::DateTime TimeDelta::ToWinrtDateTime() const {
+  ABI::Windows::Foundation::DateTime date_time;
+  date_time.UniversalTime = InMicroseconds() * 10;
+  return date_time;
+}
+
+#if !defined(ARCH_CPU_ARM64)
+namespace time_internal {
+
+bool HasConstantRateTSC() {
+  static bool is_supported = CPU().has_non_stop_time_stamp_counter();
+  return is_supported;
+}
+
+double TSCTicksPerSecond() {
+  PA_BASE_DCHECK(HasConstantRateTSC());
+  // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
+  // frequency, because there is no guarantee that the TSC frequency is equal to
+  // the performance counter frequency.
+  // The TSC frequency is cached in a static variable because it takes some time
+  // to compute it.
+  static double tsc_ticks_per_second = 0;
+  if (tsc_ticks_per_second != 0) {
+    return tsc_ticks_per_second;
+  }
+
+  // Increase the thread priority to reduces the chances of having a context
+  // switch during a reading of the TSC and the performance counter.
+  const int previous_priority = ::GetThreadPriority(::GetCurrentThread());
+  ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+
+  // The first time that this function is called, make an initial reading of the
+  // TSC and the performance counter.
+
+  static const uint64_t tsc_initial = __rdtsc();
+  static const uint64_t perf_counter_initial = QPCNowRaw();
+
+  // Make a another reading of the TSC and the performance counter every time
+  // that this function is called.
+  const uint64_t tsc_now = __rdtsc();
+  const uint64_t perf_counter_now = QPCNowRaw();
+
+  // Reset the thread priority.
+  ::SetThreadPriority(::GetCurrentThread(), previous_priority);
+
+  // Make sure that at least 50 ms elapsed between the 2 readings. The first
+  // time that this function is called, we don't expect this to be the case.
+  // Note: The longer the elapsed time between the 2 readings is, the more
+  //   accurate the computed TSC frequency will be. The 50 ms value was
+  //   chosen because local benchmarks show that it allows us to get a
+  //   stddev of less than 1 tick/us between multiple runs.
+  // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
+  //   this will never fail on systems that run XP or later.
+  //   https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
+  LARGE_INTEGER perf_counter_frequency = {};
+  ::QueryPerformanceFrequency(&perf_counter_frequency);
+  PA_BASE_DCHECK(perf_counter_now >= perf_counter_initial);
+  const uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
+  const double elapsed_time_seconds =
+      perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
+
+  constexpr double kMinimumEvaluationPeriodSeconds = 0.05;
+  if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds) {
+    return 0;
+  }
+
+  // Compute the frequency of the TSC.
+  PA_BASE_DCHECK(tsc_now >= tsc_initial);
+  const uint64_t tsc_ticks = tsc_now - tsc_initial;
+  tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
+
+  return tsc_ticks_per_second;
+}
+
+}  // namespace time_internal
+#endif  // defined(ARCH_CPU_ARM64)
+
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/types/strong_alias.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/types/strong_alias.h
new file mode 100644
index 0000000..fe9bde9
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/types/strong_alias.h
@@ -0,0 +1,142 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_TYPES_STRONG_ALIAS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_TYPES_STRONG_ALIAS_H_
+
+#include <functional>
+#include <type_traits>
+#include <utility>
+
+namespace partition_alloc::internal::base {
+
+// A type-safe alternative for a typedef or a 'using' directive.
+//
+// C++ currently does not support type-safe typedefs, despite multiple proposals
+// (ex. http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3515.pdf). The
+// next best thing is to try and emulate them in library code.
+//
+// The motivation is to disallow several classes of errors:
+//
+// using Orange = int;
+// using Apple = int;
+// Apple apple(2);
+// Orange orange = apple;  // Orange should not be able to become an Apple.
+// Orange x = orange + apple;  // Shouldn't add Oranges and Apples.
+// if (orange > apple);  // Shouldn't compare Apples to Oranges.
+// void foo(Orange);
+// void foo(Apple);  // Redefinition.
+// etc.
+//
+// StrongAlias may instead be used as follows:
+//
+// using Orange = StrongAlias<class OrangeTag, int>;
+// using Apple = StrongAlias<class AppleTag, int>;
+// using Banana = StrongAlias<class BananaTag, std::string>;
+// Apple apple(2);
+// Banana banana("Hello");
+// Orange orange = apple;  // Does not compile.
+// Orange other_orange = orange;  // Compiles, types match.
+// Orange x = orange + apple;  // Does not compile.
+// Orange y = Orange(orange.value() + apple.value());  // Compiles.
+// Orange z = Orange(banana->size() + *other_orange);  // Compiles.
+// if (orange > apple);  // Does not compile.
+// if (orange > other_orange);  // Compiles.
+// void foo(Orange);
+// void foo(Apple);  // Compiles into separate overload.
+//
+// StrongAlias is a zero-cost abstraction, it's compiled away.
+//
+// TagType is an empty tag class (also called "phantom type") that only serves
+// the type system to differentiate between different instantiations of the
+// template.
+// UnderlyingType may be almost any value type. Note that some methods of the
+// StrongAlias may be unavailable (ie. produce elaborate compilation errors when
+// used) if UnderlyingType doesn't support them.
+//
+// StrongAlias only directly exposes comparison operators (for convenient use in
+// ordered containers) and a Hasher struct (for unordered_map/set). It's
+// impossible, without reflection, to expose all methods of the UnderlyingType
+// in StrongAlias's interface. It's also potentially unwanted (ex. you don't
+// want to be able to add two StrongAliases that represent socket handles).
+// A getter and dereference operators are provided in case you need to access
+// the UnderlyingType.
+//
+// See also
+// - //styleguide/c++/blink-c++.md which provides recommendation and examples of
+//   using StrongAlias<Tag, bool> instead of a bare bool.
+// - IdType<...> which provides helpers for specializing StrongAlias to be
+//   used as an id.
+// - TokenType<...> which provides helpers for specializing StrongAlias to be
+//   used as a wrapper of base::UnguessableToken.
+template <typename TagType, typename UnderlyingType>
+class StrongAlias {
+ public:
+  constexpr StrongAlias() = default;
+  constexpr explicit StrongAlias(const UnderlyingType& v) : value_(v) {}
+  constexpr explicit StrongAlias(UnderlyingType&& v) noexcept
+      : value_(std::move(v)) {}
+
+  constexpr UnderlyingType* operator->() { return &value_; }
+  constexpr const UnderlyingType* operator->() const { return &value_; }
+
+  constexpr UnderlyingType& operator*() & { return value_; }
+  constexpr const UnderlyingType& operator*() const& { return value_; }
+  constexpr UnderlyingType&& operator*() && { return std::move(value_); }
+  constexpr const UnderlyingType&& operator*() const&& {
+    return std::move(value_);
+  }
+
+  constexpr UnderlyingType& value() & { return value_; }
+  constexpr const UnderlyingType& value() const& { return value_; }
+  constexpr UnderlyingType&& value() && { return std::move(value_); }
+  constexpr const UnderlyingType&& value() const&& { return std::move(value_); }
+
+  constexpr explicit operator const UnderlyingType&() const& { return value_; }
+
+  constexpr bool operator==(const StrongAlias& other) const {
+    return value_ == other.value_;
+  }
+  constexpr bool operator!=(const StrongAlias& other) const {
+    return value_ != other.value_;
+  }
+  constexpr bool operator<(const StrongAlias& other) const {
+    return value_ < other.value_;
+  }
+  constexpr bool operator<=(const StrongAlias& other) const {
+    return value_ <= other.value_;
+  }
+  constexpr bool operator>(const StrongAlias& other) const {
+    return value_ > other.value_;
+  }
+  constexpr bool operator>=(const StrongAlias& other) const {
+    return value_ >= other.value_;
+  }
+
+  // Hasher to use in std::unordered_map, std::unordered_set, etc.
+  //
+  // Example usage:
+  //     using MyType = base::StrongAlias<...>;
+  //     using MySet = std::unordered_set<MyType, typename MyType::Hasher>;
+  //
+  // https://google.github.io/styleguide/cppguide.html#std_hash asks to avoid
+  // defining specializations of `std::hash` - this is why the hasher needs to
+  // be explicitly specified and why the following code will *not* work:
+  //     using MyType = base::StrongAlias<...>;
+  //     using MySet = std::unordered_set<MyType>;  // This won't work.
+  struct Hasher {
+    using argument_type = StrongAlias;
+    using result_type = std::size_t;
+    result_type operator()(const argument_type& id) const {
+      return std::hash<UnderlyingType>()(id.value());
+    }
+  };
+
+ protected:
+  UnderlyingType value_;
+};
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_TYPES_STRONG_ALIAS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/win_handle_types.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/win_handle_types.h
new file mode 100644
index 0000000..0783dec
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/win_handle_types.h
@@ -0,0 +1,16 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_WIN_WIN_HANDLE_TYPES_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_WIN_WIN_HANDLE_TYPES_H_
+
+// Forward declare Windows compatible handles.
+
+#define PA_WINDOWS_HANDLE_TYPE(name) \
+  struct name##__;                   \
+  typedef struct name##__* name;
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/win_handle_types_list.inc"
+#undef PA_WINDOWS_HANDLE_TYPE
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_WIN_WIN_HANDLE_TYPES_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/win/win_handle_types_list.inc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/win_handle_types_list.inc
similarity index 100%
rename from base/allocator/partition_allocator/partition_alloc_base/win/win_handle_types_list.inc
rename to base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/win_handle_types_list.inc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/windows_types.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/windows_types.h
new file mode 100644
index 0000000..25fb5b3
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/windows_types.h
@@ -0,0 +1,90 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains defines and typedefs that allow popular Windows types to
+// be used without the overhead of including windows.h.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_WIN_WINDOWS_TYPES_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_WIN_WINDOWS_TYPES_H_
+
+// Needed for function prototypes.
+#include <specstrings.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// typedef and define the most commonly used Windows integer types.
+
+typedef unsigned long DWORD;
+typedef long LONG;
+typedef __int64 LONGLONG;
+typedef unsigned __int64 ULONGLONG;
+
+#define VOID void
+typedef char CHAR;
+typedef short SHORT;
+typedef long LONG;
+typedef int INT;
+typedef unsigned int UINT;
+typedef unsigned int* PUINT;
+typedef unsigned __int64 UINT64;
+typedef void* LPVOID;
+typedef void* PVOID;
+typedef void* HANDLE;
+typedef int BOOL;
+typedef unsigned char BYTE;
+typedef BYTE BOOLEAN;
+typedef DWORD ULONG;
+typedef unsigned short WORD;
+typedef WORD UWORD;
+typedef WORD ATOM;
+
+// Forward declare some Windows struct/typedef sets.
+
+typedef struct _RTL_SRWLOCK RTL_SRWLOCK;
+typedef RTL_SRWLOCK SRWLOCK, *PSRWLOCK;
+
+typedef struct _FILETIME FILETIME;
+
+struct PA_CHROME_SRWLOCK {
+  PVOID Ptr;
+};
+
+// The trailing white-spaces after this macro are required, for compatibility
+// with the definition in winnt.h.
+// clang-format off
+#define RTL_SRWLOCK_INIT {0}                            // NOLINT
+// clang-format on
+#define SRWLOCK_INIT RTL_SRWLOCK_INIT
+
+// clang-format on
+
+// Define some macros needed when prototyping Windows functions.
+
+#define DECLSPEC_IMPORT __declspec(dllimport)
+#define WINBASEAPI DECLSPEC_IMPORT
+#define WINAPI __stdcall
+
+// Needed for LockImpl.
+WINBASEAPI _Releases_exclusive_lock_(*SRWLock) VOID WINAPI
+    ReleaseSRWLockExclusive(_Inout_ PSRWLOCK SRWLock);
+WINBASEAPI BOOLEAN WINAPI TryAcquireSRWLockExclusive(_Inout_ PSRWLOCK SRWLock);
+
+// Needed for thread_local_storage.h
+WINBASEAPI LPVOID WINAPI TlsGetValue(_In_ DWORD dwTlsIndex);
+
+WINBASEAPI BOOL WINAPI TlsSetValue(_In_ DWORD dwTlsIndex,
+                                   _In_opt_ LPVOID lpTlsValue);
+
+WINBASEAPI _Check_return_ _Post_equals_last_error_ DWORD WINAPI
+    GetLastError(VOID);
+
+WINBASEAPI VOID WINAPI SetLastError(_In_ DWORD dwErrCode);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_BASE_WIN_WINDOWS_TYPES_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h
new file mode 100644
index 0000000..fae09d7
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h
@@ -0,0 +1,165 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CHECK_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CHECK_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "build/build_config.h"
+
+// When PartitionAlloc is used as the default allocator, we cannot use the
+// regular (D)CHECK() macros, as they allocate internally. When an assertion is
+// triggered, they format strings, leading to reentrancy in the code, which none
+// of PartitionAlloc is designed to support (and especially not for error
+// paths).
+//
+// As a consequence:
+// - When PartitionAlloc is not malloc(), use the regular macros
+// - Otherwise, crash immediately. This provides worse error messages though.
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && !PA_BASE_CHECK_WILL_STREAM()
+
+// For official build discard log strings to reduce binary bloat.
+// See base/check.h for implementation details.
+#define PA_CHECK(condition)                        \
+  PA_UNLIKELY(!(condition)) ? PA_IMMEDIATE_CRASH() \
+                            : PA_EAT_CHECK_STREAM_PARAMS()
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+#define PA_DCHECK(condition) PA_CHECK(condition)
+#else
+#define PA_DCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+#define PA_PCHECK(condition)                                 \
+  if (!(condition)) {                                        \
+    int error = errno;                                       \
+    ::partition_alloc::internal::base::debug::Alias(&error); \
+    PA_IMMEDIATE_CRASH();                                    \
+  }
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+#define PA_DPCHECK(condition) PA_PCHECK(condition)
+#else
+#define PA_DPCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+#else  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
+       // !PA_BASE_CHECK_WILL_STREAM()
+#define PA_CHECK(condition) PA_BASE_CHECK(condition)
+#define PA_DCHECK(condition) PA_BASE_DCHECK(condition)
+#define PA_PCHECK(condition) PA_BASE_PCHECK(condition)
+#define PA_DPCHECK(condition) PA_BASE_DPCHECK(condition)
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
+        // !PA_BASE_CHECK_WILL_STREAM()
+
+// Expensive dchecks that run within *Scan. These checks are only enabled in
+// debug builds with dchecks enabled.
+#if !defined(NDEBUG)
+#define PA_SCAN_DCHECK_IS_ON() BUILDFLAG(PA_DCHECK_IS_ON)
+#else
+#define PA_SCAN_DCHECK_IS_ON() 0
+#endif
+
+#if PA_SCAN_DCHECK_IS_ON()
+#define PA_SCAN_DCHECK(expr) PA_DCHECK(expr)
+#else
+#define PA_SCAN_DCHECK(expr) PA_EAT_CHECK_STREAM_PARAMS(!(expr))
+#endif
+
+#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
+
+// Use this macro to assert on things that are conditionally constexpr as
+// determined by PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR or
+// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR. Where fixed at compile time, this
+// is a static_assert. Where determined at run time, this is a PA_CHECK.
+// Therefore, this macro must only be used where both a static_assert and a
+// PA_CHECK would be viable, that is, within a function, and ideally a function
+// that executes only once, early in the program, such as during initialization.
+#define STATIC_ASSERT_OR_PA_CHECK(condition, message) \
+  static_assert(condition, message)
+
+#else
+
+#define STATIC_ASSERT_OR_PA_CHECK(condition, message) \
+  do {                                                \
+    PA_CHECK(condition) << (message);                 \
+  } while (false)
+
+#endif
+
+// alignas(16) DebugKv causes breakpad_unittests and sandbox_linux_unittests
+// failures on android-marshmallow-x86-rel because of SIGSEGV.
+#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_X86_FAMILY) && \
+    defined(ARCH_CPU_32_BITS)
+#define PA_DEBUGKV_ALIGN alignas(8)
+#else
+#define PA_DEBUGKV_ALIGN alignas(16)
+#endif
+
+namespace partition_alloc::internal {
+
+static constexpr size_t kDebugKeyMaxLength = 8ull;
+
+// Used for PA_DEBUG_DATA_ON_STACK, below.
+struct PA_DEBUGKV_ALIGN DebugKv {
+  // 16 bytes object aligned on 16 bytes, to make it easier to see in crash
+  // reports.
+  char k[kDebugKeyMaxLength] = {};  // Not necessarily 0-terminated.
+  uint64_t v = 0;
+
+  DebugKv(const char* key, uint64_t value) : v(value) {
+    // Fill with ' ', so that the stack dump is nicer to read.  Not using
+    // memset() on purpose, this header is included from *many* places.
+    for (size_t index = 0; index < sizeof k; index++) {
+      k[index] = ' ';
+    }
+
+    for (size_t index = 0; index < sizeof k; index++) {
+      k[index] = key[index];
+      if (key[index] == '\0') {
+        break;
+      }
+    }
+  }
+};
+
+}  // namespace partition_alloc::internal
+
+#define PA_CONCAT(x, y) x##y
+#define PA_CONCAT2(x, y) PA_CONCAT(x, y)
+#define PA_DEBUG_UNIQUE_NAME PA_CONCAT2(kv, __LINE__)
+
+// Puts a key-value pair on the stack for debugging. `base::debug::Alias()`
+// makes sure a local variable is saved on the stack, but the variables can be
+// hard to find in crash reports, particularly if the frame pointer is not
+// present / invalid.
+//
+// This puts a key right before the value on the stack. The key has to be a C
+// string, which gets truncated if it's longer than 8 characters.
+// Example use:
+// PA_DEBUG_DATA_ON_STACK("size", 0x42)
+//
+// Sample output in lldb:
+// (lldb) x 0x00007fffffffd0d0 0x00007fffffffd0f0
+// 0x7fffffffd0d0: 73 69 7a 65 00 00 00 00 42 00 00 00 00 00 00 00
+// size............
+//
+// With gdb, one can use:
+// x/8g <STACK_POINTER>
+// to see the data. With lldb, "x <STACK_POINTER> <FRAME_POJNTER>" can be used.
+#define PA_DEBUG_DATA_ON_STACK(name, value)                               \
+  static_assert(sizeof name <=                                            \
+                ::partition_alloc::internal::kDebugKeyMaxLength + 1);     \
+  ::partition_alloc::internal::DebugKv PA_DEBUG_UNIQUE_NAME{name, value}; \
+  ::partition_alloc::internal::base::debug::Alias(&PA_DEBUG_UNIQUE_NAME);
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CHECK_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h
new file mode 100644
index 0000000..9abb934
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h
@@ -0,0 +1,338 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CONFIG_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CONFIG_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "build/build_config.h"
+
+// PA_CONFIG() uses a similar trick as BUILDFLAG() to allow the compiler catch
+// typos or a missing #include.
+//
+// -----------------------------------------------------------------------------
+// Housekeeping Rules
+// -----------------------------------------------------------------------------
+// 1. Prefix all config macros in this file with PA_CONFIG_ and define them in
+//    a function-like manner, e.g. PA_CONFIG_MY_SETTING().
+// 2. Both positive and negative cases must be defined.
+// 3. Don't use PA_CONFIG_MY_SETTING() directly outside of this file, use
+//    PA_CONFIG(flag-without-PA_CONFIG_) instead, e.g. PA_CONFIG(MY_SETTING).
+// 4. Do not use PA_CONFIG() when defining config macros, or it will lead to
+//    recursion. Either use #if/#else, or PA_CONFIG_MY_SETTING() directly.
+// 5. Try to use constexpr instead of macros wherever possible.
+// TODO(bartekn): Convert macros to constexpr or BUILDFLAG as much as possible.
+#define PA_CONFIG(flag) (PA_CONFIG_##flag())
+
+// Assert that the heuristic in partition_alloc.gni is accurate on supported
+// configurations.
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+static_assert(sizeof(void*) == 8, "");
+#else
+static_assert(sizeof(void*) != 8, "");
+#endif  // PA_CONFIG(HAS_64_BITS_POINTERS)
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS) && \
+    (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP)
+#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 1
+#else
+#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 0
+#endif
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_IOS)
+// Allow PA to select an alternate pool size at run-time before initialization,
+// rather than using a single constexpr value.
+//
+// This is needed on iOS because iOS test processes can't handle large pools
+// (see crbug.com/1250788).
+//
+// This setting is specific to 64-bit, as 32-bit has a different implementation.
+#define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 1
+#else
+#define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 0
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_IOS)
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS) && \
+    (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
+#include <linux/version.h>
+// TODO(bikineev): Enable for ChromeOS.
+#define PA_CONFIG_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED() \
+  (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
+#else
+#define PA_CONFIG_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED() 0
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS) &&
+        // (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
+
+#if BUILDFLAG(USE_STARSCAN)
+// Use card table to avoid races for PCScan configuration without safepoints.
+// The card table provides the guaranteee that for a marked card the underling
+// super-page is fully initialized.
+#define PA_CONFIG_STARSCAN_USE_CARD_TABLE() 1
+#else
+// The card table is permanently disabled for 32-bit.
+#define PA_CONFIG_STARSCAN_USE_CARD_TABLE() 0
+#endif  // BUILDFLAG(USE_STARSCAN)
+
+// Use batched freeing when sweeping pages. This builds up a freelist in the
+// scanner thread and appends to the slot-span's freelist only once.
+#define PA_CONFIG_STARSCAN_BATCHED_FREE() 1
+
+// TODO(bikineev): Temporarily disable inlining in *Scan to get clearer
+// stacktraces.
+#define PA_CONFIG_STARSCAN_NOINLINE_SCAN_FUNCTIONS() 1
+
+// TODO(bikineev): Temporarily disable *Scan in MemoryReclaimer as it seems to
+// cause significant jank.
+#define PA_CONFIG_STARSCAN_ENABLE_STARSCAN_ON_RECLAIM() 0
+
+// Double free detection comes with expensive cmpxchg (with the loop around it).
+// We currently disable it to improve the runtime.
+#define PA_CONFIG_STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED() 0
+
+// POSIX is not only UNIX, e.g. macOS and other OSes. We do use Linux-specific
+// features such as futex(2).
+#define PA_CONFIG_HAS_LINUX_KERNEL() \
+  (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID))
+
+// On some platforms, we implement locking by spinning in userspace, then going
+// into the kernel only if there is contention. This requires platform support,
+// namely:
+// - On Linux, futex(2)
+// - On Windows, a fast userspace "try" operation which is available with
+//   SRWLock
+// - On macOS, pthread_mutex_trylock() is fast by default starting with macOS
+//   10.14. Chromium targets an earlier version, so it cannot be known at
+//   compile-time. So we use something different.
+//   TODO(https://crbug.com/1459032): macOS 10.15 is now required; switch to
+//   better locking.
+// - Otherwise, on POSIX we assume that a fast userspace pthread_mutex_trylock()
+//   is available.
+//
+// Otherwise, a userspace spinlock implementation is used.
+#if PA_CONFIG(HAS_LINUX_KERNEL) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || \
+    BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+#define PA_CONFIG_HAS_FAST_MUTEX() 1
+#else
+#define PA_CONFIG_HAS_FAST_MUTEX() 0
+#endif
+
+// If defined, enables zeroing memory on Free() with roughly 1% probability.
+// This applies only to normal buckets, as direct-map allocations are always
+// decommitted.
+// TODO(bartekn): Re-enable once PartitionAlloc-Everywhere evaluation is done.
+#define PA_CONFIG_ZERO_RANDOMLY_ON_FREE() 0
+
+// Need TLS support.
+#define PA_CONFIG_THREAD_CACHE_SUPPORTED() \
+  (BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_FUCHSIA))
+
+// Too expensive for official builds, as it adds cache misses to all
+// allocations. On the other hand, we want wide metrics coverage to get
+// realistic profiles.
+#define PA_CONFIG_THREAD_CACHE_ALLOC_STATS() \
+  (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && !defined(OFFICIAL_BUILD))
+
+// Optional statistics collection. Lightweight, contrary to the ones above,
+// hence enabled by default.
+#define PA_CONFIG_THREAD_CACHE_ENABLE_STATISTICS() 1
+
+// Enable free list shadow entry to strengthen hardening as much as possible.
+// The shadow entry is an inversion (bitwise-NOT) of the encoded `next` pointer.
+//
+// Disabled when ref-count is placed in the previous slot, as it will overlap
+// with the shadow for the smallest slots.
+//
+// Disabled on Big Endian CPUs, because encoding is also a bitwise-NOT there,
+// making the shadow entry equal to the original, valid pointer to the next
+// slot. In case Use-after-Free happens, we'd rather not hand out a valid,
+// ready-to-use pointer.
+#define PA_CONFIG_HAS_FREELIST_SHADOW_ENTRY()    \
+  (!BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && \
+   defined(ARCH_CPU_LITTLE_ENDIAN))
+
+#define PA_CONFIG_HAS_MEMORY_TAGGING()              \
+  (defined(ARCH_CPU_ARM64) && defined(__clang__) && \
+   !defined(ADDRESS_SANITIZER) &&                   \
+   (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID)))
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+static_assert(sizeof(void*) == 8);
+#endif
+
+// If memory tagging is enabled with BRP previous slot, the MTE tag and BRP ref
+// count will cause a race (crbug.com/1445816). To prevent this, the
+// ref_count_size is increased to the MTE granule size and the ref count is not
+// tagged.
+#if PA_CONFIG(HAS_MEMORY_TAGGING) &&            \
+    BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
+    BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+#define PA_CONFIG_INCREASE_REF_COUNT_SIZE_FOR_MTE() 1
+#else
+#define PA_CONFIG_INCREASE_REF_COUNT_SIZE_FOR_MTE() 0
+#endif
+
+// Specifies whether allocation extras need to be added.
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+#define PA_CONFIG_EXTRAS_REQUIRED() 1
+#else
+#define PA_CONFIG_EXTRAS_REQUIRED() 0
+#endif
+
+// Count and total wall clock time spent in memory related system calls. This
+// doesn't cover all system calls, in particular the ones related to locking.
+//
+// Not enabled by default, as it has a runtime cost, and causes issues with some
+// builds (e.g. Windows).
+// However the total count is collected on all platforms.
+#define PA_CONFIG_COUNT_SYSCALL_TIME() 0
+
+// On Windows, |thread_local| variables cannot be marked "dllexport", see
+// compiler error C2492 at
+// https://docs.microsoft.com/en-us/cpp/error-messages/compiler-errors-1/compiler-error-c2492?view=msvc-160.
+// Don't use it there.
+//
+// On macOS and iOS:
+// - With PartitionAlloc-Everywhere, thread_local allocates, reentering the
+//   allocator.
+// - Component builds triggered a clang bug: crbug.com/1243375
+//
+// On GNU/Linux and ChromeOS:
+// - `thread_local` allocates, reentering the allocator.
+//
+// Regardless, the "normal" TLS access is fast on x86_64 (see partition_tls.h),
+// so don't bother with thread_local anywhere.
+#define PA_CONFIG_THREAD_LOCAL_TLS()                                           \
+  (!(BUILDFLAG(IS_WIN) && defined(COMPONENT_BUILD)) && !BUILDFLAG(IS_APPLE) && \
+   !BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CHROMEOS))
+
+// When PartitionAlloc is malloc(), detect malloc() becoming re-entrant by
+// calling malloc() again.
+//
+// Limitations:
+// - BUILDFLAG(PA_DCHECK_IS_ON) due to runtime cost
+// - thread_local TLS to simplify the implementation
+// - Not on Android due to bot failures
+#if BUILDFLAG(PA_DCHECK_IS_ON) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
+    PA_CONFIG(THREAD_LOCAL_TLS) && !BUILDFLAG(IS_ANDROID)
+#define PA_CONFIG_HAS_ALLOCATION_GUARD() 1
+#else
+#define PA_CONFIG_HAS_ALLOCATION_GUARD() 0
+#endif
+
+// On Android, we have to go through emutls, since this is always a shared
+// library, so don't bother.
+#if PA_CONFIG(THREAD_LOCAL_TLS) && !BUILDFLAG(IS_ANDROID)
+#define PA_CONFIG_THREAD_CACHE_FAST_TLS() 1
+#else
+#define PA_CONFIG_THREAD_CACHE_FAST_TLS() 0
+#endif
+
+// Lazy commit should only be enabled on Windows, because commit charge is
+// only meaningful and limited on Windows. It affects performance on other
+// platforms and is simply not needed there due to OS supporting overcommit.
+#if BUILDFLAG(IS_WIN)
+constexpr bool kUseLazyCommit = true;
+#else
+constexpr bool kUseLazyCommit = false;
+#endif
+
+// On these platforms, lock all the partitions before fork(), and unlock after.
+// This may be required on more platforms in the future.
+#define PA_CONFIG_HAS_ATFORK_HANDLER() \
+  (BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS))
+
+// PartitionAlloc uses PartitionRootEnumerator to acquire all
+// PartitionRoots at BeforeFork and to release at AfterFork.
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_CONFIG(HAS_ATFORK_HANDLER)
+#define PA_CONFIG_USE_PARTITION_ROOT_ENUMERATOR() 1
+#else
+#define PA_CONFIG_USE_PARTITION_ROOT_ENUMERATOR() 0
+#endif
+
+// Due to potential conflict with the free list pointer in the "previous slot"
+// mode in the smallest bucket, we can't check both the cookie and the dangling
+// raw_ptr at the same time.
+#define PA_CONFIG_REF_COUNT_CHECK_COOKIE()         \
+  (!(BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) &&  \
+     BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)) && \
+   (BUILDFLAG(PA_DCHECK_IS_ON) ||                  \
+    BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)))
+
+// Use available space in the reference count to store the initially requested
+// size from the application. This is used for debugging.
+#if !PA_CONFIG(REF_COUNT_CHECK_COOKIE) && \
+    !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+// Set to 1 when needed.
+#define PA_CONFIG_REF_COUNT_STORE_REQUESTED_SIZE() 0
+#else
+// You probably want it at 0, outside of local testing, or else
+// PartitionRefCount will grow past 8B.
+#define PA_CONFIG_REF_COUNT_STORE_REQUESTED_SIZE() 0
+#endif
+
+#if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE) && \
+    PA_CONFIG(REF_COUNT_CHECK_COOKIE)
+#error "Cannot use a cookie *and* store the allocation size"
+#endif
+
+// Prefer smaller slot spans.
+//
+// Smaller slot spans may improve dirty memory fragmentation, but may also
+// increase address space usage.
+//
+// This is intended to roll out more broadly, but only enabled on Linux for now
+// to get performance bot and real-world data pre-A/B experiment.
+//
+// Also enabled on ARM64 macOS, as the 16kiB pages on this platform lead to
+// larger slot spans.
+#define PA_CONFIG_PREFER_SMALLER_SLOT_SPANS() \
+  (BUILDFLAG(IS_LINUX) || (BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64)))
+
+// Enable shadow metadata.
+//
+// With this flag, shadow pools will be mapped, on which writable shadow
+// metadatas are placed, and the real metadatas are set to read-only instead.
+// This feature is only enabled with 64-bit environment because pools work
+// differently with 32-bits pointers (see glossary).
+#if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \
+    BUILDFLAG(HAS_64_BIT_POINTERS)
+#define PA_CONFIG_ENABLE_SHADOW_METADATA() 1
+#else
+#define PA_CONFIG_ENABLE_SHADOW_METADATA() 0
+#endif
+
+// According to crbug.com/1349955#c24, macOS 11 has a bug where they asset that
+// malloc_size() of an allocation is equal to the requested size. This is
+// generally not true. The assert passed only because it happened to be true for
+// the sizes they requested. BRP changes that, hence can't be deployed without a
+// workaround.
+//
+// The bug has been fixed in macOS 12. Here we can only check the platform, and
+// the version is checked dynamically later.
+#define PA_CONFIG_ENABLE_MAC11_MALLOC_SIZE_HACK() \
+  (BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && BUILDFLAG(IS_MAC))
+
+#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+
+#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
+#error "Dynamically selected pool size is currently not supported"
+#endif
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+// TODO(1376980): Address MTE once it's enabled.
+#error "Compressed pointers don't support tag in the upper bits"
+#endif
+
+#endif  // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+
+// PA_CONFIG(IS_NONCLANG_MSVC): mimics the compound condition used by
+// Chromium's `//base/compiler_specific.h` to detect true (non-Clang)
+// MSVC.
+#if defined(COMPILER_MSVC) && !defined(__clang__)
+#define PA_CONFIG_IS_NONCLANG_MSVC() 1
+#else
+#define PA_CONFIG_IS_NONCLANG_MSVC() 0
+#endif
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CONFIG_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h
new file mode 100644
index 0000000..57dc167
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h
@@ -0,0 +1,510 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CONSTANTS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CONSTANTS_H_
+
+#include <algorithm>
+#include <climits>
+#include <cstddef>
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_types.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/flags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
+#include <mach/vm_page_size.h>
+#endif
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#endif
+
+namespace partition_alloc {
+
+namespace internal {
+// Bit flag constants used as `flag` argument of PartitionRoot::Alloc<flags>,
+// AlignedAlloc, etc.
+enum class AllocFlags {
+  kNone = 0,
+  kReturnNull = 1 << 0,
+  kZeroFill = 1 << 1,
+  // Don't allow allocation override hooks. Override hooks are expected to
+  // check for the presence of this flag and return false if it is active.
+  kNoOverrideHooks = 1 << 2,
+  // Never let a memory tool like ASan (if active) perform the allocation.
+  kNoMemoryToolOverride = 1 << 3,
+  // Don't allow any hooks (override or observers).
+  kNoHooks = 1 << 4,  // Internal.
+  // If the allocation requires a "slow path" (such as allocating/committing a
+  // new slot span), return nullptr instead. Note this makes all large
+  // allocations return nullptr, such as direct-mapped ones, and even for
+  // smaller ones, a nullptr value is common.
+  kFastPathOrReturnNull = 1 << 5,  // Internal.
+  // An allocation override hook should tag the allocated memory for MTE.
+  kMemoryShouldBeTaggedForMte = 1 << 6,  // Internal.
+  kMaxValue = kMemoryShouldBeTaggedForMte,
+};
+PA_DEFINE_OPERATORS_FOR_FLAGS(AllocFlags);
+
+// Bit flag constants used as `flag` argument of PartitionRoot::Free<flags>.
+enum class FreeFlags {
+  kNone = 0,
+  // See AllocFlags::kNoMemoryToolOverride.
+  kNoMemoryToolOverride = 1 << 0,
+  // Don't allow any hooks (override or observers).
+  kNoHooks = 1 << 1,  // Internal.
+  // Quarantine for a while to ensure no UaF from on-stack pointers.
+  kSchedulerLoopQuarantine = 1 << 2,
+  kMaxValue = kSchedulerLoopQuarantine,
+};
+PA_DEFINE_OPERATORS_FOR_FLAGS(FreeFlags);
+}  // namespace internal
+
+using internal::AllocFlags;
+using internal::FreeFlags;
+
+namespace internal {
+
+// Size of a cache line. Not all CPUs in the world have a 64 bytes cache line
+// size, but as of 2021, most do. This is in particular the case for almost all
+// x86_64 and almost all ARM CPUs supported by Chromium. As this is used for
+// static alignment, we cannot query the CPU at runtime to determine the actual
+// alignment, so use 64 bytes everywhere. Since this is only used to avoid false
+// sharing, getting this wrong only results in lower performance, not incorrect
+// code.
+constexpr size_t kPartitionCachelineSize = 64;
+
+// Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size.
+// It is typical for a `PartitionPage` to be based on multiple system pages.
+// Most references to "page" refer to `PartitionPage`s.
+//
+// *Super pages* are the underlying system allocations we make. Super pages
+// contain multiple partition pages and include space for a small amount of
+// metadata per partition page.
+//
+// Inside super pages, we store *slot spans*. A slot span is a continguous range
+// of one or more `PartitionPage`s that stores allocations of the same size.
+// Slot span sizes are adjusted depending on the allocation size, to make sure
+// the packing does not lead to unused (wasted) space at the end of the last
+// system page of the span. For our current maximum slot span size of 64 KiB and
+// other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
+// up against the end of a system page.
+
+#if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONGARCH64)
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PartitionPageShift() {
+  return 16;  // 64 KiB
+}
+#elif defined(ARCH_CPU_PPC64)
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PartitionPageShift() {
+  return 18;  // 256 KiB
+}
+#elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
+    (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PartitionPageShift() {
+  return PageAllocationGranularityShift() + 2;
+}
+#else
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PartitionPageShift() {
+  return 14;  // 16 KiB
+}
+#endif
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PartitionPageSize() {
+  return 1 << PartitionPageShift();
+}
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PartitionPageOffsetMask() {
+  return PartitionPageSize() - 1;
+}
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+PartitionPageBaseMask() {
+  return ~PartitionPageOffsetMask();
+}
+
+// Number of system pages per regular slot span. Above this limit, we call it
+// a single-slot span, as the span literally hosts only one slot, and has
+// somewhat different implementation. At run-time, single-slot spans can be
+// differentiated with a call to CanStoreRawSize().
+// TODO: Should this be 1 on platforms with page size larger than 4kB, e.g.
+// ARM macOS or defined(_MIPS_ARCH_LOONGSON)?
+constexpr size_t kMaxPartitionPagesPerRegularSlotSpan = 4;
+
+// To avoid fragmentation via never-used freelist entries, we hand out partition
+// freelist sections gradually, in units of the dominant system page size. What
+// we're actually doing is avoiding filling the full `PartitionPage` (16 KiB)
+// with freelist pointers right away. Writing freelist pointers will fault and
+// dirty a private page, which is very wasteful if we never actually store
+// objects there.
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+NumSystemPagesPerPartitionPage() {
+  return PartitionPageSize() >> SystemPageShift();
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+MaxSystemPagesPerRegularSlotSpan() {
+  return NumSystemPagesPerPartitionPage() *
+         kMaxPartitionPagesPerRegularSlotSpan;
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+MaxRegularSlotSpanSize() {
+  return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
+}
+
+// The maximum size that is used in an alternate bucket distribution. After this
+// threshold, we only have 1 slot per slot-span, so external fragmentation
+// doesn't matter. So, using the alternate bucket distribution after this
+// threshold has no benefit, and only increases internal fragmentation.
+//
+// We would like this to be |MaxRegularSlotSpanSize()| on all platforms, but
+// this is not constexpr on all platforms, so on other platforms we hardcode it,
+// even though this may be too low, e.g. on systems with a page size >4KiB.
+constexpr size_t kHighThresholdForAlternateDistribution =
+#if PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
+    MaxRegularSlotSpanSize();
+#else
+    1 << 16;
+#endif
+
+// We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
+// These chunks are called *super pages*. We do this so that we can store
+// metadata in the first few pages of each 2 MiB-aligned section. This makes
+// freeing memory very fast. 2 MiB size & alignment were chosen, because this
+// virtual address block represents a full but single page table allocation on
+// ARM, ia32 and x64, which may be slightly more performance&memory efficient.
+// (Note, these super pages are backed by 4 KiB system pages and have nothing to
+// do with OS concept of "huge pages"/"large pages", even though the size
+// coincides.)
+//
+// The layout of the super page is as follows. The sizes below are the same for
+// 32- and 64-bit platforms.
+//
+//     +-----------------------+
+//     | Guard page (4 KiB)    |
+//     | Metadata page (4 KiB) |
+//     | Guard pages (8 KiB)   |
+//     | Free Slot Bitmap      |
+//     | *Scan State Bitmap    |
+//     | Slot span             |
+//     | Slot span             |
+//     | ...                   |
+//     | Slot span             |
+//     | Guard pages (16 KiB)  |
+//     +-----------------------+
+//
+// Free Slot Bitmap is only present when USE_FREESLOT_BITMAP is true. State
+// Bitmap is inserted for partitions that may have quarantine enabled.
+//
+// If refcount_at_end_allocation is enabled, RefcountBitmap(4KiB) is inserted
+// after the Metadata page for BackupRefPtr. The guard pages after the bitmap
+// will be 4KiB.
+//
+//...
+//     | Metadata page (4 KiB) |
+//     | RefcountBitmap (4 KiB)|
+//     | Guard pages (4 KiB)   |
+//...
+//
+// Each slot span is a contiguous range of one or more `PartitionPage`s. Note
+// that slot spans of different sizes may co-exist with one super page. Even
+// slot spans of the same size may support different slot sizes. However, all
+// slots within a span have to be of the same size.
+//
+// The metadata page has the following format. Note that the `PartitionPage`
+// that is not at the head of a slot span is "unused" (by most part, it only
+// stores the offset from the head page). In other words, the metadata for the
+// slot span is stored only in the first `PartitionPage` of the slot span.
+// Metadata accesses to other `PartitionPage`s are redirected to the first
+// `PartitionPage`.
+//
+//     +---------------------------------------------+
+//     | SuperPageExtentEntry (32 B)                 |
+//     | PartitionPage of slot span 1 (32 B, used)   |
+//     | PartitionPage of slot span 1 (32 B, unused) |
+//     | PartitionPage of slot span 1 (32 B, unused) |
+//     | PartitionPage of slot span 2 (32 B, used)   |
+//     | PartitionPage of slot span 3 (32 B, used)   |
+//     | ...                                         |
+//     | PartitionPage of slot span N (32 B, used)   |
+//     | PartitionPage of slot span N (32 B, unused) |
+//     | PartitionPage of slot span N (32 B, unused) |
+//     +---------------------------------------------+
+//
+// A direct-mapped page has an identical layout at the beginning to fake it
+// looking like a super page:
+//
+//     +---------------------------------+
+//     | Guard page (4 KiB)              |
+//     | Metadata page (4 KiB)           |
+//     | Guard pages (8 KiB)             |
+//     | Direct mapped object            |
+//     | Guard page (4 KiB, 32-bit only) |
+//     +---------------------------------+
+//
+// A direct-mapped page's metadata page has the following layout (on 64 bit
+// architectures. On 32 bit ones, the layout is identical, some sizes are
+// different due to smaller pointers.):
+//
+//     +----------------------------------+
+//     | SuperPageExtentEntry (32 B)      |
+//     | PartitionPage (32 B)             |
+//     | PartitionBucket (40 B)           |
+//     | PartitionDirectMapExtent (32 B)  |
+//     +----------------------------------+
+//
+// See |PartitionDirectMapMetadata| for details.
+
+constexpr size_t kGiB = 1024 * 1024 * 1024ull;
+constexpr size_t kSuperPageShift = 21;  // 2 MiB
+constexpr size_t kSuperPageSize = 1 << kSuperPageShift;
+constexpr size_t kSuperPageAlignment = kSuperPageSize;
+constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
+constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
+
+// PartitionAlloc's address space is split into pools. See `glossary.md`.
+
+enum pool_handle : unsigned {
+  kNullPoolHandle = 0u,
+
+  kRegularPoolHandle,
+  kBRPPoolHandle,
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  kConfigurablePoolHandle,
+#endif
+
+// New pool_handles will be added here.
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  // The thread isolated pool must come last since we write-protect its entry in
+  // the metadata tables, e.g. AddressPoolManager::aligned_pools_
+  kThreadIsolatedPoolHandle,
+#endif
+  kMaxPoolHandle
+};
+
+// kNullPoolHandle doesn't have metadata, hence - 1
+constexpr size_t kNumPools = kMaxPoolHandle - 1;
+
+// Maximum pool size. With exception of Configurable Pool, it is also
+// the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which
+// allows to choose a different size at initialization time for certain
+// configurations.
+//
+// Special-case Android and iOS, which incur test failures with larger
+// pools. Regardless, allocating >8GiB with malloc() on these platforms is
+// unrealistic as of 2022.
+//
+// When pointer compression is enabled, we cannot use large pools (at most
+// 8GB for each of the glued pools).
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) || \
+    BUILDFLAG(ENABLE_POINTER_COMPRESSION)
+constexpr size_t kPoolMaxSize = 8 * kGiB;
+#else
+constexpr size_t kPoolMaxSize = 16 * kGiB;
+#endif
+#else  // BUILDFLAG(HAS_64_BIT_POINTERS)
+constexpr size_t kPoolMaxSize = 4 * kGiB;
+#endif
+constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize;
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+static_assert(kThreadIsolatedPoolHandle == kNumPools,
+              "The thread isolated pool must come last since we write-protect "
+              "its metadata.");
+#endif
+
+// Slots larger than this size will not receive MTE protection. Pages intended
+// for allocations larger than this constant should not be backed with PROT_MTE
+// (which saves shadow tag memory). We also save CPU cycles by skipping tagging
+// of large areas which are less likely to benefit from MTE protection.
+constexpr size_t kMaxMemoryTaggingSize = 1024;
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+// Returns whether the tag of |object| overflowed, meaning the containing slot
+// needs to be moved to quarantine.
+PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
+  // The tag with which the slot is put to quarantine.
+  constexpr uintptr_t kOverflowTag = 0x0f00000000000000uLL;
+  static_assert((kOverflowTag & kPtrTagMask) != 0,
+                "Overflow tag must be in tag bits");
+  return (reinterpret_cast<uintptr_t>(object) & kPtrTagMask) == kOverflowTag;
+}
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+NumPartitionPagesPerSuperPage() {
+  return kSuperPageSize >> PartitionPageShift();
+}
+
+PA_ALWAYS_INLINE constexpr size_t MaxSuperPagesInPool() {
+  return kMaxSuperPagesInPool;
+}
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+// In 64-bit mode, the direct map allocation granularity is super page size,
+// because this is the reservation granularity of the pools.
+PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularity() {
+  return kSuperPageSize;
+}
+
+PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularityShift() {
+  return kSuperPageShift;
+}
+#else   // BUILDFLAG(HAS_64_BIT_POINTERS)
+// In 32-bit mode, address space is space is a scarce resource. Use the system
+// allocation granularity, which is the lowest possible address space allocation
+// unit. However, don't go below partition page size, so that pool bitmaps
+// don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+DirectMapAllocationGranularity() {
+  return std::max(PageAllocationGranularity(), PartitionPageSize());
+}
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+DirectMapAllocationGranularityShift() {
+  return std::max(PageAllocationGranularityShift(), PartitionPageShift());
+}
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+DirectMapAllocationGranularityOffsetMask() {
+  return DirectMapAllocationGranularity() - 1;
+}
+
+// The "order" of an allocation is closely related to the power-of-1 size of the
+// allocation. More precisely, the order is the bit index of the
+// most-significant-bit in the allocation size, where the bit numbers starts at
+// index 1 for the least-significant-bit.
+//
+// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
+// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
+
+// PartitionAlloc should return memory properly aligned for any type, to behave
+// properly as a generic allocator. This is not strictly required as long as
+// types are explicitly allocated with PartitionAlloc, but is to use it as a
+// malloc() implementation, and generally to match malloc()'s behavior.
+//
+// In practice, this means 8 bytes alignment on 32 bit architectures, and 16
+// bytes on 64 bit ones.
+//
+// Keep in sync with //tools/memory/partition_allocator/objects_per_size_py.
+constexpr size_t kMinBucketedOrder =
+    kAlignment == 16 ? 5 : 4;  // 2^(order - 1), that is 16 or 8.
+// The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
+constexpr size_t kMaxBucketedOrder = 20;
+constexpr size_t kNumBucketedOrders =
+    (kMaxBucketedOrder - kMinBucketedOrder) + 1;
+// 8 buckets per order (for the higher orders).
+// Note: this is not what is used by default, but the maximum amount of buckets
+// per order. By default, only 4 are used.
+constexpr size_t kNumBucketsPerOrderBits = 3;
+constexpr size_t kNumBucketsPerOrder = 1 << kNumBucketsPerOrderBits;
+constexpr size_t kNumBuckets = kNumBucketedOrders * kNumBucketsPerOrder;
+constexpr size_t kSmallestBucket = 1 << (kMinBucketedOrder - 1);
+constexpr size_t kMaxBucketSpacing =
+    1 << ((kMaxBucketedOrder - 1) - kNumBucketsPerOrderBits);
+constexpr size_t kMaxBucketed = (1 << (kMaxBucketedOrder - 1)) +
+                                ((kNumBucketsPerOrder - 1) * kMaxBucketSpacing);
+// Limit when downsizing a direct mapping using `realloc`:
+constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
+// Intentionally set to less than 2GiB to make sure that a 2GiB allocation
+// fails. This is a security choice in Chrome, to help making size_t vs int bugs
+// harder to exploit.
+
+// The definition of MaxDirectMapped does only depend on constants that are
+// unconditionally constexpr. Therefore it is not necessary to use
+// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR here.
+PA_ALWAYS_INLINE constexpr size_t MaxDirectMapped() {
+  // Subtract kSuperPageSize to accommodate for granularity inside
+  // PartitionRoot::GetDirectMapReservationSize.
+  return (1UL << 31) - kSuperPageSize;
+}
+
+// Max alignment supported by AlignedAlloc().
+// kSuperPageSize alignment can't be easily supported, because each super page
+// starts with guard pages & metadata.
+constexpr size_t kMaxSupportedAlignment = kSuperPageSize / 2;
+
+constexpr size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
+
+// When a SlotSpan becomes empty, the allocator tries to avoid re-using it
+// immediately, to help with fragmentation. At this point, it becomes dirty
+// committed memory, which we want to minimize. This could be decommitted
+// immediately, but that would imply doing a lot of system calls. In particular,
+// for single-slot SlotSpans, a malloc() / free() loop would cause a *lot* of
+// system calls.
+//
+// As an intermediate step, empty SlotSpans are placed into a per-partition
+// global ring buffer, giving the newly-empty SlotSpan a chance to be re-used
+// before getting decommitted. A new entry (i.e. a newly empty SlotSpan) taking
+// the place used by a previous one will lead the previous SlotSpan to be
+// decommitted immediately, provided that it is still empty.
+//
+// Setting this value higher means giving more time for reuse to happen, at the
+// cost of possibly increasing peak committed memory usage (and increasing the
+// size of PartitionRoot a bit, since the ring buffer is there). Note that the
+// ring buffer doesn't necessarily contain an empty SlotSpan, as SlotSpans are
+// *not* removed from it when re-used. So the ring buffer really is a buffer of
+// *possibly* empty SlotSpans.
+//
+// In all cases, PartitionRoot::PurgeMemory() with the
+// PurgeFlags::kDecommitEmptySlotSpans flag will eagerly decommit all entries
+// in the ring buffer, so with periodic purge enabled, this typically happens
+// every few seconds.
+constexpr size_t kEmptyCacheIndexBits = 7;
+// kMaxFreeableSpans is the buffer size, but is never used as an index value,
+// hence <= is appropriate.
+constexpr size_t kMaxFreeableSpans = 1 << kEmptyCacheIndexBits;
+constexpr size_t kDefaultEmptySlotSpanRingSize = 16;
+
+// If the total size in bytes of allocated but not committed pages exceeds this
+// value (probably it is a "out of virtual address space" crash), a special
+// crash stack trace is generated at
+// `PartitionOutOfMemoryWithLotsOfUncommitedPages`. This is to distinguish "out
+// of virtual address space" from "out of physical memory" in crash reports.
+constexpr size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024;  // 1 GiB
+
+// These byte values match tcmalloc.
+constexpr unsigned char kUninitializedByte = 0xAB;
+constexpr unsigned char kFreedByte = 0xCD;
+
+constexpr unsigned char kQuarantinedByte = 0xEF;
+
+// 1 is smaller than anything we can use, as it is not properly aligned. Not
+// using a large size, since PartitionBucket::slot_size is a uint32_t, and
+// static_cast<uint32_t>(-1) is too close to a "real" size.
+constexpr size_t kInvalidBucketSize = 1;
+
+#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+// Requested size that require the hack.
+constexpr size_t kMac11MallocSizeHackRequestedSize = 32;
+#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+}  // namespace internal
+
+// These constants are used outside PartitionAlloc itself, so we provide
+// non-internal aliases here.
+using ::partition_alloc::internal::kInvalidBucketSize;
+using ::partition_alloc::internal::kMaxSuperPagesInPool;
+using ::partition_alloc::internal::kMaxSupportedAlignment;
+using ::partition_alloc::internal::kNumBuckets;
+using ::partition_alloc::internal::kSuperPageSize;
+using ::partition_alloc::internal::MaxDirectMapped;
+using ::partition_alloc::internal::PartitionPageSize;
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h
new file mode 100644
index 0000000..f62db07
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h
@@ -0,0 +1,46 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_FOR_TESTING_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_FOR_TESTING_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+
+namespace partition_alloc {
+namespace internal {
+
+constexpr bool AllowLeaks = true;
+constexpr bool DisallowLeaks = false;
+
+// A subclass of PartitionAllocator for testing. It will free all resources,
+// i.e. allocated memory, memory inside freelist, and so on, when destructing
+// it or when manually invoking reset().
+// If need to check if there are any memory allocated but not freed yet,
+// use allow_leaks=false. We will see CHECK failure inside reset() if any
+// leak is detected. Otherwise (e.g. intentional leaks), use allow_leaks=true.
+template <bool allow_leaks>
+struct PartitionAllocatorForTesting : public PartitionAllocator {
+  PartitionAllocatorForTesting() : PartitionAllocator() {}
+
+  explicit PartitionAllocatorForTesting(PartitionOptions opts)
+      : PartitionAllocator(opts) {}
+
+  ~PartitionAllocatorForTesting() { reset(); }
+
+  PA_ALWAYS_INLINE void reset() {
+    PartitionAllocator::root()->ResetForTesting(allow_leaks);
+  }
+};
+
+}  // namespace internal
+
+using PartitionAllocatorForTesting =
+    internal::PartitionAllocatorForTesting<internal::DisallowLeaks>;
+
+using PartitionAllocatorAllowLeaksForTesting =
+    internal::PartitionAllocatorForTesting<internal::AllowLeaks>;
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_FOR_TESTING_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h
new file mode 100644
index 0000000..5087809
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h
@@ -0,0 +1,92 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_FORWARD_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_FORWARD_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+
+namespace partition_alloc {
+
+namespace internal {
+
+// Alignment has two constraints:
+// - Alignment requirement for scalar types: alignof(std::max_align_t)
+// - Alignment requirement for operator new().
+//
+// The two are separate on Windows 64 bits, where the first one is 8 bytes, and
+// the second one 16. We could technically return something different for
+// malloc() and operator new(), but this would complicate things, and most of
+// our allocations are presumably coming from operator new() anyway.
+constexpr size_t kAlignment =
+    std::max(alignof(max_align_t),
+             static_cast<size_t>(__STDCPP_DEFAULT_NEW_ALIGNMENT__));
+static_assert(kAlignment <= 16,
+              "PartitionAlloc doesn't support a fundamental alignment larger "
+              "than 16 bytes.");
+
+struct SlotSpanMetadata;
+class PA_LOCKABLE Lock;
+
+// This type trait verifies a type can be used as a pointer offset.
+//
+// We support pointer offsets in signed (ptrdiff_t) or unsigned (size_t) values.
+// Smaller types are also allowed.
+template <typename Z>
+static constexpr bool is_offset_type =
+    std::is_integral_v<Z> && sizeof(Z) <= sizeof(ptrdiff_t);
+
+}  // namespace internal
+
+class PartitionStatsDumper;
+
+struct PartitionRoot;
+
+namespace internal {
+// Declare PartitionRootLock() for thread analysis. Its implementation
+// is defined in partition_root.h.
+Lock& PartitionRootLock(PartitionRoot*);
+}  // namespace internal
+
+}  // namespace partition_alloc
+
+// From https://clang.llvm.org/docs/AttributeReference.html#malloc:
+//
+// The malloc attribute indicates that the function acts like a system memory
+// allocation function, returning a pointer to allocated storage disjoint from
+// the storage for any other object accessible to the caller.
+//
+// Note that it doesn't apply to realloc()-type functions, as they can return
+// the same pointer as the one passed as a parameter, as noted in e.g. stdlib.h
+// on Linux systems.
+#if PA_HAS_ATTRIBUTE(malloc)
+#define PA_MALLOC_FN __attribute__((malloc))
+#endif
+
+// Allows the compiler to assume that the return value is aligned on a
+// kAlignment boundary. This is useful for e.g. using aligned vector
+// instructions in the constructor for zeroing.
+#if PA_HAS_ATTRIBUTE(assume_aligned)
+#define PA_MALLOC_ALIGNED \
+  __attribute__((assume_aligned(::partition_alloc::internal::kAlignment)))
+#endif
+
+#if !defined(PA_MALLOC_FN)
+#define PA_MALLOC_FN
+#endif
+
+#if !defined(PA_MALLOC_ALIGNED)
+#define PA_MALLOC_ALIGNED
+#endif
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_FORWARD_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.cc
new file mode 100644
index 0000000..47be84c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.cc
@@ -0,0 +1,132 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h"
+
+#include <ostream>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+
+namespace partition_alloc {
+
+namespace {
+
+internal::Lock g_hook_lock;
+
+internal::Lock& GetHooksLock() {
+  return g_hook_lock;
+}
+
+}  // namespace
+
+std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false);
+std::atomic<PartitionAllocHooks::AllocationObserverHook*>
+    PartitionAllocHooks::allocation_observer_hook_(nullptr);
+std::atomic<PartitionAllocHooks::FreeObserverHook*>
+    PartitionAllocHooks::free_observer_hook_(nullptr);
+std::atomic<PartitionAllocHooks::AllocationOverrideHook*>
+    PartitionAllocHooks::allocation_override_hook_(nullptr);
+std::atomic<PartitionAllocHooks::FreeOverrideHook*>
+    PartitionAllocHooks::free_override_hook_(nullptr);
+std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
+    PartitionAllocHooks::realloc_override_hook_(nullptr);
+std::atomic<PartitionAllocHooks::QuarantineOverrideHook*>
+    PartitionAllocHooks::quarantine_override_hook_(nullptr);
+
+void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
+                                           FreeObserverHook* free_hook) {
+  internal::ScopedGuard guard(GetHooksLock());
+
+  // Chained hooks are not supported. Registering a non-null hook when a
+  // non-null hook is already registered indicates somebody is trying to
+  // overwrite a hook.
+  PA_CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
+           (!alloc_hook && !free_hook))
+      << "Overwriting already set observer hooks";
+  allocation_observer_hook_ = alloc_hook;
+  free_observer_hook_ = free_hook;
+
+  hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
+}
+
+void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
+                                           FreeOverrideHook* free_hook,
+                                           ReallocOverrideHook realloc_hook) {
+  internal::ScopedGuard guard(GetHooksLock());
+
+  PA_CHECK((!allocation_override_hook_ && !free_override_hook_ &&
+            !realloc_override_hook_) ||
+           (!alloc_hook && !free_hook && !realloc_hook))
+      << "Overwriting already set override hooks";
+  allocation_override_hook_ = alloc_hook;
+  free_override_hook_ = free_hook;
+  realloc_override_hook_ = realloc_hook;
+
+  hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
+}
+
+void PartitionAllocHooks::AllocationObserverHookIfEnabled(
+    const partition_alloc::AllocationNotificationData& notification_data) {
+  if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed)) {
+    hook(notification_data);
+  }
+}
+
+bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
+    void** out,
+    AllocFlags flags,
+    size_t size,
+    const char* type_name) {
+  if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed)) {
+    return hook(out, flags, size, type_name);
+  }
+  return false;
+}
+
+void PartitionAllocHooks::FreeObserverHookIfEnabled(
+    const FreeNotificationData& notification_data) {
+  if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed)) {
+    hook(notification_data);
+  }
+}
+
+bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
+  if (auto* hook = free_override_hook_.load(std::memory_order_relaxed)) {
+    return hook(address);
+  }
+  return false;
+}
+
+void PartitionAllocHooks::ReallocObserverHookIfEnabled(
+    const FreeNotificationData& free_notification_data,
+    const AllocationNotificationData& allocation_notification_data) {
+  // Report a reallocation as a free followed by an allocation.
+  AllocationObserverHook* allocation_hook =
+      allocation_observer_hook_.load(std::memory_order_relaxed);
+  FreeObserverHook* free_hook =
+      free_observer_hook_.load(std::memory_order_relaxed);
+  if (allocation_hook && free_hook) {
+    free_hook(free_notification_data);
+    allocation_hook(allocation_notification_data);
+  }
+}
+
+bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
+                                                       void* address) {
+  if (ReallocOverrideHook* hook =
+          realloc_override_hook_.load(std::memory_order_relaxed)) {
+    return hook(out, address);
+  }
+  return false;
+}
+
+// Do not unset the hook if there are remaining quarantined slots
+// not to break checks on unquarantining.
+void PartitionAllocHooks::SetQuarantineOverrideHook(
+    QuarantineOverrideHook* hook) {
+  quarantine_override_hook_.store(hook, std::memory_order_release);
+}
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h
new file mode 100644
index 0000000..4e74b19
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h
@@ -0,0 +1,102 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_HOOKS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_HOOKS_H_
+
+#include <atomic>
+#include <cstddef>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+
+namespace partition_alloc {
+
+class AllocationNotificationData;
+class FreeNotificationData;
+
+// PartitionAlloc supports setting hooks to observe allocations/frees as they
+// occur as well as 'override' hooks that allow overriding those operations.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
+ public:
+  // Log allocation and free events.
+  typedef void AllocationObserverHook(
+      const AllocationNotificationData& notification_data);
+  typedef void FreeObserverHook(const FreeNotificationData& notification_data);
+
+  // If it returns true, the allocation has been overridden with the pointer in
+  // *out.
+  typedef bool AllocationOverrideHook(void** out,
+                                      AllocFlags flags,
+                                      size_t size,
+                                      const char* type_name);
+  // If it returns true, then the allocation was overridden and has been freed.
+  typedef bool FreeOverrideHook(void* address);
+  // If it returns true, the underlying allocation is overridden and *out holds
+  // the size of the underlying allocation.
+  typedef bool ReallocOverrideHook(size_t* out, void* address);
+
+  // Special hook type, independent of the rest. Triggered when `free()` detects
+  // outstanding references to the allocation.
+  // IMPORTANT: Make sure the hook always overwrites `[address, address + size)`
+  // with a bit pattern that cannot be interpreted as a valid memory address.
+  typedef void QuarantineOverrideHook(void* address, size_t size);
+
+  // To unhook, call Set*Hooks with nullptrs.
+  static void SetObserverHooks(AllocationObserverHook* alloc_hook,
+                               FreeObserverHook* free_hook);
+  static void SetOverrideHooks(AllocationOverrideHook* alloc_hook,
+                               FreeOverrideHook* free_hook,
+                               ReallocOverrideHook realloc_hook);
+
+  // Helper method to check whether hooks are enabled. This is an optimization
+  // so that if a function needs to call observer and override hooks in two
+  // different places this value can be cached and only loaded once.
+  static bool AreHooksEnabled() {
+    return hooks_enabled_.load(std::memory_order_relaxed);
+  }
+
+  static void AllocationObserverHookIfEnabled(
+      const partition_alloc::AllocationNotificationData& notification_data);
+  static bool AllocationOverrideHookIfEnabled(void** out,
+                                              AllocFlags flags,
+                                              size_t size,
+                                              const char* type_name);
+
+  static void FreeObserverHookIfEnabled(
+      const FreeNotificationData& notification_data);
+  static bool FreeOverrideHookIfEnabled(void* address);
+
+  static void ReallocObserverHookIfEnabled(
+      const FreeNotificationData& free_notification_data,
+      const AllocationNotificationData& allocation_notification_data);
+  static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
+
+  PA_ALWAYS_INLINE static QuarantineOverrideHook* GetQuarantineOverrideHook() {
+    return quarantine_override_hook_.load(std::memory_order_acquire);
+  }
+
+  static void SetQuarantineOverrideHook(QuarantineOverrideHook* hook);
+
+ private:
+  // Single bool that is used to indicate whether observer or allocation hooks
+  // are set to reduce the numbers of loads required to check whether hooking is
+  // enabled.
+  static std::atomic<bool> hooks_enabled_;
+
+  // Lock used to synchronize Set*Hooks calls.
+  static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
+  static std::atomic<FreeObserverHook*> free_observer_hook_;
+
+  static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
+  static std::atomic<FreeOverrideHook*> free_override_hook_;
+  static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
+
+  static std::atomic<QuarantineOverrideHook*> quarantine_override_hook_;
+};
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ALLOC_HOOKS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_perftest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_perftest.cc
new file mode 100644
index 0000000..2c5505f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_perftest.cc
@@ -0,0 +1,520 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <atomic>
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/extended_api.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/strings/stringprintf.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
+#include "base/debug/debugging_buildflags.h"
+#include "base/timer/lap_timer.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_result_reporter.h"
+
+#if BUILDFLAG(IS_ANDROID) || defined(ARCH_CPU_32_BITS) || BUILDFLAG(IS_FUCHSIA)
+// Some tests allocate many GB of memory, which can cause issues on Android and
+// address-space exhaustion for any 32-bit process.
+#define MEMORY_CONSTRAINED
+#endif
+
+#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
+#include "base/allocator/dispatcher/dispatcher.h"
+#include "base/debug/allocation_trace.h"
+#endif
+
+namespace partition_alloc::internal {
+
+namespace {
+
+// Change kTimeLimit to something higher if you need more time to capture a
+// trace.
+constexpr ::base::TimeDelta kTimeLimit = ::base::Seconds(2);
+constexpr int kWarmupRuns = 10000;
+constexpr int kTimeCheckInterval = 100000;
+constexpr size_t kAllocSize = 40;
+
+// Size constants are mostly arbitrary, but try to simulate something like CSS
+// parsing which consists of lots of relatively small objects.
+constexpr int kMultiBucketMinimumSize = 24;
+constexpr int kMultiBucketIncrement = 13;
+// Final size is 24 + (13 * 22) = 310 bytes.
+constexpr int kMultiBucketRounds = 22;
+
+constexpr char kMetricPrefixMemoryAllocation[] = "MemoryAllocation.";
+constexpr char kMetricThroughput[] = "throughput";
+constexpr char kMetricTimePerAllocation[] = "time_per_allocation";
+
+perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
+  perf_test::PerfResultReporter reporter(kMetricPrefixMemoryAllocation,
+                                         story_name);
+  reporter.RegisterImportantMetric(kMetricThroughput, "runs/s");
+  reporter.RegisterImportantMetric(kMetricTimePerAllocation, "ns");
+  return reporter;
+}
+
+enum class AllocatorType {
+  kSystem,
+  kPartitionAlloc,
+  kPartitionAllocWithThreadCache,
+#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
+  kPartitionAllocWithAllocationStackTraceRecorder,
+#endif
+};
+
+class Allocator {
+ public:
+  Allocator() = default;
+  virtual ~Allocator() = default;
+  virtual void* Alloc(size_t size) = 0;
+  virtual void Free(void* data) = 0;
+};
+
+class SystemAllocator : public Allocator {
+ public:
+  SystemAllocator() = default;
+  ~SystemAllocator() override = default;
+  void* Alloc(size_t size) override { return malloc(size); }
+  void Free(void* data) override { free(data); }
+};
+
+class PartitionAllocator : public Allocator {
+ public:
+  PartitionAllocator() = default;
+  ~PartitionAllocator() override { alloc_.DestructForTesting(); }
+
+  void* Alloc(size_t size) override {
+    return alloc_.AllocInline<AllocFlags::kNoHooks>(size);
+  }
+  void Free(void* data) override {
+    // Even though it's easy to invoke the fast path with
+    // alloc_.Free<kNoHooks>(), we chose to use the slower path, because it's
+    // more common with PA-E.
+    PartitionRoot::FreeInlineInUnknownRoot<
+        partition_alloc::FreeFlags::kNoHooks>(data);
+  }
+
+ private:
+  PartitionRoot alloc_{PartitionOptions{}};
+};
+
+class PartitionAllocatorWithThreadCache : public Allocator {
+ public:
+  explicit PartitionAllocatorWithThreadCache(bool use_alternate_bucket_dist)
+      : scope_(allocator_.root()) {
+    ThreadCacheRegistry::Instance().PurgeAll();
+    if (!use_alternate_bucket_dist) {
+      allocator_.root()->SwitchToDenserBucketDistribution();
+    } else {
+      allocator_.root()->ResetBucketDistributionForTesting();
+    }
+  }
+  ~PartitionAllocatorWithThreadCache() override = default;
+
+  void* Alloc(size_t size) override {
+    return allocator_.root()->AllocInline<AllocFlags::kNoHooks>(size);
+  }
+  void Free(void* data) override {
+    // Even though it's easy to invoke the fast path with
+    // alloc_.Free<kNoHooks>(), we chose to use the slower path, because it's
+    // more common with PA-E.
+    PartitionRoot::FreeInlineInUnknownRoot<
+        partition_alloc::FreeFlags::kNoHooks>(data);
+  }
+
+ private:
+  static constexpr partition_alloc::PartitionOptions kOpts = {
+#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+    .thread_cache = PartitionOptions::kEnabled,
+#endif
+  };
+  PartitionAllocatorForTesting<internal::DisallowLeaks> allocator_{kOpts};
+  internal::ThreadCacheProcessScopeForTesting scope_;
+};
+
+#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
+class PartitionAllocatorWithAllocationStackTraceRecorder : public Allocator {
+ public:
+  explicit PartitionAllocatorWithAllocationStackTraceRecorder(
+      bool register_hooks)
+      : register_hooks_(register_hooks) {
+    if (register_hooks_) {
+      dispatcher_.InitializeForTesting(&recorder_);
+    }
+  }
+
+  ~PartitionAllocatorWithAllocationStackTraceRecorder() override {
+    if (register_hooks_) {
+      dispatcher_.ResetForTesting();
+    }
+  }
+
+  void* Alloc(size_t size) override { return alloc_.AllocInline(size); }
+
+  void Free(void* data) override {
+    // Even though it's easy to invoke the fast path with
+    // alloc_.Free<kNoHooks>(), we chose to use the slower path, because it's
+    // more common with PA-E.
+    PartitionRoot::FreeInlineInUnknownRoot<
+        partition_alloc::FreeFlags::kNoHooks>(data);
+  }
+
+ private:
+  bool const register_hooks_;
+  PartitionRoot alloc_{PartitionOptions{}};
+  ::base::allocator::dispatcher::Dispatcher& dispatcher_ =
+      ::base::allocator::dispatcher::Dispatcher::GetInstance();
+  ::base::debug::tracer::AllocationTraceRecorder recorder_;
+};
+#endif  // BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
+
+class TestLoopThread : public base::PlatformThreadForTesting::Delegate {
+ public:
+  TestLoopThread(float (*test_fn)(Allocator*), Allocator* allocator)
+      : test_fn_(test_fn), allocator_(allocator) {
+    PA_CHECK(base::PlatformThreadForTesting::Create(0, this, &thread_handle_));
+  }
+
+  float Run() {
+    base::PlatformThreadForTesting::Join(thread_handle_);
+    return laps_per_second_;
+  }
+
+  void ThreadMain() override { laps_per_second_ = test_fn_(allocator_); }
+
+  float (*test_fn_)(Allocator*) = nullptr;
+  Allocator* allocator_ = nullptr;
+  base::PlatformThreadHandle thread_handle_;
+  std::atomic<float> laps_per_second_;
+};
+
+void DisplayResults(const std::string& story_name,
+                    float iterations_per_second) {
+  auto reporter = SetUpReporter(story_name);
+  reporter.AddResult(kMetricThroughput, iterations_per_second);
+  reporter.AddResult(kMetricTimePerAllocation,
+                     static_cast<size_t>(1e9 / iterations_per_second));
+}
+
+class MemoryAllocationPerfNode {
+ public:
+  MemoryAllocationPerfNode* GetNext() const { return next_; }
+  void SetNext(MemoryAllocationPerfNode* p) { next_ = p; }
+  static void FreeAll(MemoryAllocationPerfNode* first, Allocator* alloc) {
+    MemoryAllocationPerfNode* cur = first;
+    while (cur != nullptr) {
+      MemoryAllocationPerfNode* next = cur->GetNext();
+      alloc->Free(cur);
+      cur = next;
+    }
+  }
+
+ private:
+  MemoryAllocationPerfNode* next_ = nullptr;
+};
+
+#if !defined(MEMORY_CONSTRAINED)
+float SingleBucket(Allocator* allocator) {
+  auto* first =
+      reinterpret_cast<MemoryAllocationPerfNode*>(allocator->Alloc(kAllocSize));
+  size_t allocated_memory = kAllocSize;
+
+  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
+  MemoryAllocationPerfNode* cur = first;
+  do {
+    auto* next = reinterpret_cast<MemoryAllocationPerfNode*>(
+        allocator->Alloc(kAllocSize));
+    PA_CHECK(next != nullptr);
+    cur->SetNext(next);
+    cur = next;
+    timer.NextLap();
+    allocated_memory += kAllocSize;
+    // With multiple threads, can get OOM otherwise.
+    if (allocated_memory > 200e6) {
+      cur->SetNext(nullptr);
+      MemoryAllocationPerfNode::FreeAll(first->GetNext(), allocator);
+      cur = first;
+      allocated_memory = kAllocSize;
+    }
+  } while (!timer.HasTimeLimitExpired());
+
+  // next_ = nullptr only works if the class constructor is called (it's not
+  // called in this case because then we can allocate arbitrary-length
+  // payloads.)
+  cur->SetNext(nullptr);
+  MemoryAllocationPerfNode::FreeAll(first, allocator);
+
+  return timer.LapsPerSecond();
+}
+#endif  // defined(MEMORY_CONSTRAINED)
+
+float SingleBucketWithFree(Allocator* allocator) {
+  // Allocate an initial element to make sure the bucket stays set up.
+  void* elem = allocator->Alloc(kAllocSize);
+
+  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
+  do {
+    void* cur = allocator->Alloc(kAllocSize);
+    PA_CHECK(cur != nullptr);
+    allocator->Free(cur);
+    timer.NextLap();
+  } while (!timer.HasTimeLimitExpired());
+
+  allocator->Free(elem);
+  return timer.LapsPerSecond();
+}
+
+#if !defined(MEMORY_CONSTRAINED)
+float MultiBucket(Allocator* allocator) {
+  auto* first =
+      reinterpret_cast<MemoryAllocationPerfNode*>(allocator->Alloc(kAllocSize));
+  MemoryAllocationPerfNode* cur = first;
+  size_t allocated_memory = kAllocSize;
+
+  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
+  do {
+    for (int i = 0; i < kMultiBucketRounds; i++) {
+      size_t size = kMultiBucketMinimumSize + (i * kMultiBucketIncrement);
+      auto* next =
+          reinterpret_cast<MemoryAllocationPerfNode*>(allocator->Alloc(size));
+      PA_CHECK(next != nullptr);
+      cur->SetNext(next);
+      cur = next;
+      allocated_memory += size;
+    }
+
+    // Can OOM with multiple threads.
+    if (allocated_memory > 100e6) {
+      cur->SetNext(nullptr);
+      MemoryAllocationPerfNode::FreeAll(first->GetNext(), allocator);
+      cur = first;
+      allocated_memory = kAllocSize;
+    }
+
+    timer.NextLap();
+  } while (!timer.HasTimeLimitExpired());
+
+  cur->SetNext(nullptr);
+  MemoryAllocationPerfNode::FreeAll(first, allocator);
+
+  return timer.LapsPerSecond() * kMultiBucketRounds;
+}
+#endif  // defined(MEMORY_CONSTRAINED)
+
+float MultiBucketWithFree(Allocator* allocator) {
+  std::vector<void*> elems;
+  elems.reserve(kMultiBucketRounds);
+  // Do an initial round of allocation to make sure that the buckets stay in
+  // use (and aren't accidentally released back to the OS).
+  for (int i = 0; i < kMultiBucketRounds; i++) {
+    void* cur =
+        allocator->Alloc(kMultiBucketMinimumSize + (i * kMultiBucketIncrement));
+    PA_CHECK(cur != nullptr);
+    elems.push_back(cur);
+  }
+
+  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
+  do {
+    for (int i = 0; i < kMultiBucketRounds; i++) {
+      void* cur = allocator->Alloc(kMultiBucketMinimumSize +
+                                   (i * kMultiBucketIncrement));
+      PA_CHECK(cur != nullptr);
+      allocator->Free(cur);
+    }
+    timer.NextLap();
+  } while (!timer.HasTimeLimitExpired());
+
+  for (void* ptr : elems) {
+    allocator->Free(ptr);
+  }
+
+  return timer.LapsPerSecond() * kMultiBucketRounds;
+}
+
+float DirectMapped(Allocator* allocator) {
+  constexpr size_t kSize = 2 * 1000 * 1000;
+
+  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
+  do {
+    void* cur = allocator->Alloc(kSize);
+    PA_CHECK(cur != nullptr);
+    allocator->Free(cur);
+    timer.NextLap();
+  } while (!timer.HasTimeLimitExpired());
+
+  return timer.LapsPerSecond();
+}
+
+std::unique_ptr<Allocator> CreateAllocator(AllocatorType type,
+                                           bool use_alternate_bucket_dist) {
+  switch (type) {
+    case AllocatorType::kSystem:
+      return std::make_unique<SystemAllocator>();
+    case AllocatorType::kPartitionAlloc:
+      return std::make_unique<PartitionAllocator>();
+    case AllocatorType::kPartitionAllocWithThreadCache:
+      return std::make_unique<PartitionAllocatorWithThreadCache>(
+          use_alternate_bucket_dist);
+#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
+    case AllocatorType::kPartitionAllocWithAllocationStackTraceRecorder:
+      return std::make_unique<
+          PartitionAllocatorWithAllocationStackTraceRecorder>(true);
+#endif
+  }
+}
+
+void LogResults(int thread_count,
+                AllocatorType alloc_type,
+                uint64_t total_laps_per_second,
+                uint64_t min_laps_per_second) {
+  PA_LOG(INFO) << "RESULTSCSV: " << thread_count << ","
+               << static_cast<int>(alloc_type) << "," << total_laps_per_second
+               << "," << min_laps_per_second;
+}
+
+void RunTest(int thread_count,
+             bool use_alternate_bucket_dist,
+             AllocatorType alloc_type,
+             float (*test_fn)(Allocator*),
+             float (*noisy_neighbor_fn)(Allocator*),
+             const char* story_base_name) {
+  auto alloc = CreateAllocator(alloc_type, use_alternate_bucket_dist);
+
+  std::unique_ptr<TestLoopThread> noisy_neighbor_thread = nullptr;
+  if (noisy_neighbor_fn) {
+    noisy_neighbor_thread =
+        std::make_unique<TestLoopThread>(noisy_neighbor_fn, alloc.get());
+  }
+
+  std::vector<std::unique_ptr<TestLoopThread>> threads;
+  for (int i = 0; i < thread_count; ++i) {
+    threads.push_back(std::make_unique<TestLoopThread>(test_fn, alloc.get()));
+  }
+
+  uint64_t total_laps_per_second = 0;
+  uint64_t min_laps_per_second = std::numeric_limits<uint64_t>::max();
+  for (int i = 0; i < thread_count; ++i) {
+    uint64_t laps_per_second = threads[i]->Run();
+    min_laps_per_second = std::min(laps_per_second, min_laps_per_second);
+    total_laps_per_second += laps_per_second;
+  }
+
+  if (noisy_neighbor_thread) {
+    noisy_neighbor_thread->Run();
+  }
+
+  char const* alloc_type_str;
+  switch (alloc_type) {
+    case AllocatorType::kSystem:
+      alloc_type_str = "System";
+      break;
+    case AllocatorType::kPartitionAlloc:
+      alloc_type_str = "PartitionAlloc";
+      break;
+    case AllocatorType::kPartitionAllocWithThreadCache:
+      alloc_type_str = "PartitionAllocWithThreadCache";
+      break;
+#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
+    case AllocatorType::kPartitionAllocWithAllocationStackTraceRecorder:
+      alloc_type_str = "PartitionAllocWithAllocationStackTraceRecorder";
+      break;
+#endif
+  }
+
+  std::string name = base::TruncatingStringPrintf(
+      "%s%s_%s_%d", kMetricPrefixMemoryAllocation, story_base_name,
+      alloc_type_str, thread_count);
+
+  DisplayResults(name + "_total", total_laps_per_second);
+  DisplayResults(name + "_worst", min_laps_per_second);
+  LogResults(thread_count, alloc_type, total_laps_per_second,
+             min_laps_per_second);
+}
+
+class PartitionAllocMemoryAllocationPerfTest
+    : public testing::TestWithParam<std::tuple<int, bool, AllocatorType>> {};
+
+// Only one partition with a thread cache: cannot use the thread cache when
+// PartitionAlloc is malloc().
+INSTANTIATE_TEST_SUITE_P(
+    ,
+    PartitionAllocMemoryAllocationPerfTest,
+    ::testing::Combine(
+        ::testing::Values(1, 2, 3, 4),
+        ::testing::Values(false, true),
+        ::testing::Values(
+            AllocatorType::kSystem,
+            AllocatorType::kPartitionAlloc,
+            AllocatorType::kPartitionAllocWithThreadCache
+#if BUILDFLAG(ENABLE_ALLOCATION_STACK_TRACE_RECORDER)
+            ,
+            AllocatorType::kPartitionAllocWithAllocationStackTraceRecorder
+#endif
+            )));
+
+// This test (and the other one below) allocates a large amount of memory, which
+// can cause issues on Android.
+#if !defined(MEMORY_CONSTRAINED)
+TEST_P(PartitionAllocMemoryAllocationPerfTest, SingleBucket) {
+  auto params = GetParam();
+  RunTest(std::get<int>(params), std::get<bool>(params),
+          std::get<AllocatorType>(params), SingleBucket, nullptr,
+          "SingleBucket");
+}
+#endif  // defined(MEMORY_CONSTRAINED)
+
+TEST_P(PartitionAllocMemoryAllocationPerfTest, SingleBucketWithFree) {
+  auto params = GetParam();
+  RunTest(std::get<int>(params), std::get<bool>(params),
+          std::get<AllocatorType>(params), SingleBucketWithFree, nullptr,
+          "SingleBucketWithFree");
+}
+
+#if !defined(MEMORY_CONSTRAINED)
+TEST_P(PartitionAllocMemoryAllocationPerfTest, MultiBucket) {
+  auto params = GetParam();
+  RunTest(std::get<int>(params), std::get<bool>(params),
+          std::get<AllocatorType>(params), MultiBucket, nullptr, "MultiBucket");
+}
+#endif  // defined(MEMORY_CONSTRAINED)
+
+TEST_P(PartitionAllocMemoryAllocationPerfTest, MultiBucketWithFree) {
+  auto params = GetParam();
+  RunTest(std::get<int>(params), std::get<bool>(params),
+          std::get<AllocatorType>(params), MultiBucketWithFree, nullptr,
+          "MultiBucketWithFree");
+}
+
+TEST_P(PartitionAllocMemoryAllocationPerfTest, DirectMapped) {
+  auto params = GetParam();
+  RunTest(std::get<int>(params), std::get<bool>(params),
+          std::get<AllocatorType>(params), DirectMapped, nullptr,
+          "DirectMapped");
+}
+
+#if !defined(MEMORY_CONSTRAINED)
+TEST_P(PartitionAllocMemoryAllocationPerfTest,
+       DISABLED_MultiBucketWithNoisyNeighbor) {
+  auto params = GetParam();
+  RunTest(std::get<int>(params), std::get<bool>(params),
+          std::get<AllocatorType>(params), MultiBucket, DirectMapped,
+          "MultiBucketWithNoisyNeighbor");
+}
+#endif  // !defined(MEMORY_CONSTRAINED)
+
+}  // namespace
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_unittest.cc
new file mode 100644
index 0000000..df74aff
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_unittest.cc
@@ -0,0 +1,5570 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <iostream>
+#include <limits>
+#include <memory>
+#include <random>
+#include <set>
+#include <tuple>
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/chromecast_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_cookie.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_ref_count.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_stats.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+#include "base/system/sys_info.h"
+#include "base/test/gtest_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(__ARM_FEATURE_MEMORY_TAGGING)
+#include <arm_acle.h>
+#endif
+
+#if BUILDFLAG(IS_POSIX)
+#if BUILDFLAG(IS_LINUX)
+// We need PKEY_DISABLE_WRITE in this file; glibc defines it in sys/mman.h but
+// it's actually Linux-specific and other Linux libcs define it in linux/mman.h.
+// We have to include both to be sure we get the definition.
+#include <linux/mman.h>
+#endif  // BUILDFLAG(IS_LINUX)
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#endif  // BUILDFLAG(IS_POSIX)
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_MAC)
+#include <OpenCL/opencl.h>
+#endif
+
+#if BUILDFLAG(IS_MAC)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.h"
+#endif
+
+#if BUILDFLAG(ENABLE_PKEYS)
+#include <sys/syscall.h>
+#endif
+
+// In the MTE world, the upper bits of a pointer can be decorated with a tag,
+// thus allowing many versions of the same pointer to exist. These macros take
+// that into account when comparing.
+#define PA_EXPECT_PTR_EQ(ptr1, ptr2) \
+  { EXPECT_EQ(UntagPtr(ptr1), UntagPtr(ptr2)); }
+#define PA_EXPECT_PTR_NE(ptr1, ptr2) \
+  { EXPECT_NE(UntagPtr(ptr1), UntagPtr(ptr2)); }
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+namespace {
+
+bool IsLargeMemoryDevice() {
+  // Treat any device with 4GiB or more of physical memory as a "large memory
+  // device". We check for slightly less than GiB so that devices with a small
+  // amount of memory not accessible to the OS still count as "large".
+  //
+  // Set to 4GiB, since we have 2GiB Android devices where tests flakily fail
+  // (e.g. Nexus 5X, crbug.com/1191195).
+  return base::SysInfo::AmountOfPhysicalMemory() >= 4000ULL * 1024 * 1024;
+}
+
+bool SetAddressSpaceLimit() {
+#if !defined(ARCH_CPU_64_BITS) || !BUILDFLAG(IS_POSIX)
+  // 32 bits => address space is limited already.
+  return true;
+#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE)
+  // macOS will accept, but not enforce, |RLIMIT_AS| changes. See
+  // https://crbug.com/435269 and rdar://17576114.
+  //
+  // Note: This number must be not less than 6 GB, because with
+  // sanitizer_coverage_flags=edge, it reserves > 5 GB of address space. See
+  // https://crbug.com/674665.
+  const size_t kAddressSpaceLimit = static_cast<size_t>(6144) * 1024 * 1024;
+  struct rlimit limit;
+  if (getrlimit(RLIMIT_DATA, &limit) != 0) {
+    return false;
+  }
+  if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
+    limit.rlim_cur = kAddressSpaceLimit;
+    if (setrlimit(RLIMIT_DATA, &limit) != 0) {
+      return false;
+    }
+  }
+  return true;
+#else
+  return false;
+#endif
+}
+
+bool ClearAddressSpaceLimit() {
+#if !defined(ARCH_CPU_64_BITS) || !BUILDFLAG(IS_POSIX)
+  return true;
+#elif BUILDFLAG(IS_POSIX)
+  struct rlimit limit;
+  if (getrlimit(RLIMIT_DATA, &limit) != 0) {
+    return false;
+  }
+  limit.rlim_cur = limit.rlim_max;
+  if (setrlimit(RLIMIT_DATA, &limit) != 0) {
+    return false;
+  }
+  return true;
+#else
+  return false;
+#endif
+}
+
+const size_t kTestSizes[] = {
+    1,
+    17,
+    100,
+    partition_alloc::internal::SystemPageSize(),
+    partition_alloc::internal::SystemPageSize() + 1,
+    partition_alloc::PartitionRoot::GetDirectMapSlotSize(100),
+    1 << 20,
+    1 << 21,
+};
+constexpr size_t kTestSizesCount = std::size(kTestSizes);
+
+template <
+    partition_alloc::AllocFlags alloc_flags,
+    partition_alloc::FreeFlags free_flags = partition_alloc::FreeFlags::kNone>
+void AllocateRandomly(partition_alloc::PartitionRoot* root, size_t count) {
+  std::vector<void*> allocations(count, nullptr);
+  for (size_t i = 0; i < count; ++i) {
+    const size_t size =
+        kTestSizes[partition_alloc::internal::base::RandGenerator(
+            kTestSizesCount)];
+    allocations[i] = root->Alloc<alloc_flags>(size);
+    EXPECT_NE(nullptr, allocations[i]) << " size: " << size << " i: " << i;
+  }
+
+  for (size_t i = 0; i < count; ++i) {
+    if (allocations[i]) {
+      root->Free(allocations[i]);
+    }
+  }
+}
+
+void HandleOOM(size_t unused_size) {
+  PA_LOG(FATAL) << "Out of memory";
+}
+
+int g_dangling_raw_ptr_detected_count = 0;
+int g_dangling_raw_ptr_released_count = 0;
+
+class CountDanglingRawPtr {
+ public:
+  CountDanglingRawPtr() {
+    g_dangling_raw_ptr_detected_count = 0;
+    g_dangling_raw_ptr_released_count = 0;
+    old_detected_fn_ = partition_alloc::GetDanglingRawPtrDetectedFn();
+    old_released_fn_ = partition_alloc::GetDanglingRawPtrReleasedFn();
+
+    partition_alloc::SetDanglingRawPtrDetectedFn(
+        CountDanglingRawPtr::DanglingRawPtrDetected);
+    partition_alloc::SetDanglingRawPtrReleasedFn(
+        CountDanglingRawPtr::DanglingRawPtrReleased);
+  }
+  ~CountDanglingRawPtr() {
+    partition_alloc::SetDanglingRawPtrDetectedFn(old_detected_fn_);
+    partition_alloc::SetDanglingRawPtrReleasedFn(old_released_fn_);
+  }
+
+ private:
+  static void DanglingRawPtrDetected(uintptr_t) {
+    g_dangling_raw_ptr_detected_count++;
+  }
+  static void DanglingRawPtrReleased(uintptr_t) {
+    g_dangling_raw_ptr_released_count++;
+  }
+
+  partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
+  partition_alloc::DanglingRawPtrReleasedFn* old_released_fn_;
+};
+
+}  // namespace
+
+// Note: This test exercises interfaces inside the `partition_alloc`
+// namespace, but inspects objects inside `partition_alloc::internal`.
+// For ease of reading, the tests are placed into the latter namespace.
+namespace partition_alloc::internal {
+
+using BucketDistribution = PartitionRoot::BucketDistribution;
+using SlotSpan = SlotSpanMetadata;
+
+const size_t kTestAllocSize = 16;
+
+#if !BUILDFLAG(PA_DCHECK_IS_ON)
+const size_t kPointerOffset = kPartitionRefCountOffsetAdjustment;
+const size_t kExtraAllocSizeWithoutRefCount = 0ull;
+#else
+const size_t kPointerOffset = kPartitionRefCountOffsetAdjustment;
+const size_t kExtraAllocSizeWithoutRefCount = kCookieSize;
+#endif
+
+const char* type_name = nullptr;
+
+void SetDistributionForPartitionRoot(PartitionRoot* root,
+                                     BucketDistribution distribution) {
+  switch (distribution) {
+    case BucketDistribution::kNeutral:
+      root->ResetBucketDistributionForTesting();
+      break;
+    case BucketDistribution::kDenser:
+      root->SwitchToDenserBucketDistribution();
+      break;
+  }
+}
+
+struct PartitionAllocTestParam {
+  BucketDistribution bucket_distribution;
+  bool use_pkey_pool;
+  size_t ref_count_size;
+};
+
+const std::vector<PartitionAllocTestParam> GetPartitionAllocTestParams() {
+  std::vector<size_t> ref_count_sizes = {0, 8, 16};
+  // sizeof(PartitionRefCount) == 8 under some configurations, so we can't force
+  // the size down to 4.
+#if !PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE) && \
+    !PA_CONFIG(REF_COUNT_CHECK_COOKIE) &&         \
+    !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+  ref_count_sizes.push_back(4);
+#endif
+  // Using MTE or Mac13 workaroud increases extras size without increasing
+  // sizeof(PartitionRefCount), so we don't have to exclude it here, as long as
+  // ExtraAllocSize() accounts for it.
+
+  std::vector<PartitionAllocTestParam> params;
+  for (size_t ref_count_size : ref_count_sizes) {
+    params.emplace_back(PartitionAllocTestParam{BucketDistribution::kNeutral,
+                                                false, ref_count_size});
+    params.emplace_back(PartitionAllocTestParam{BucketDistribution::kDenser,
+                                                false, ref_count_size});
+#if BUILDFLAG(ENABLE_PKEYS)
+    if (CPUHasPkeySupport()) {
+      params.emplace_back(PartitionAllocTestParam{BucketDistribution::kNeutral,
+                                                  true, ref_count_size});
+      params.emplace_back(PartitionAllocTestParam{BucketDistribution::kDenser,
+                                                  true, ref_count_size});
+    }
+#endif
+  }
+  return params;
+}
+
+class PartitionAllocTest
+    : public testing::TestWithParam<PartitionAllocTestParam> {
+ protected:
+  class ScopedPageAllocation {
+   public:
+    ScopedPageAllocation(PartitionAllocator& allocator,
+                         base::CheckedNumeric<size_t> npages)
+        : allocator_(allocator),
+          npages_(npages),
+          ptr_(static_cast<char*>(allocator_.root()->Alloc(
+              (npages * SystemPageSize() - ExtraAllocSize(allocator_))
+                  .ValueOrDie(),
+              type_name))) {}
+
+    ~ScopedPageAllocation() { allocator_.root()->Free(ptr_); }
+
+    void TouchAllPages() {
+      memset(ptr_, 'A',
+             ((npages_ * SystemPageSize()) - ExtraAllocSize(allocator_))
+                 .ValueOrDie());
+    }
+
+    void* PageAtIndex(size_t index) {
+      return ptr_ - kPointerOffset + (SystemPageSize() * index);
+    }
+
+   private:
+    PartitionAllocator& allocator_;
+    const base::CheckedNumeric<size_t> npages_;
+    char* ptr_;
+  };
+
+  PartitionAllocTest() = default;
+
+  ~PartitionAllocTest() override = default;
+
+  struct PartitionTestOptions {
+    bool use_memory_reclaimer = false;
+    bool uncap_empty_slot_span_memory = false;
+    bool set_bucket_distribution = false;
+  };
+
+  void InitializeTestRoot(PartitionRoot* root,
+                          PartitionOptions opts,
+                          PartitionTestOptions test_opts) {
+    root->Init(opts);
+    if (test_opts.use_memory_reclaimer) {
+      MemoryReclaimer::Instance()->RegisterPartition(root);
+    }
+    if (test_opts.uncap_empty_slot_span_memory) {
+      root->UncapEmptySlotSpanMemoryForTesting();
+    }
+    if (test_opts.set_bucket_distribution) {
+      SetDistributionForPartitionRoot(root, GetBucketDistribution());
+    }
+  }
+
+  std::unique_ptr<PartitionRoot> CreateCustomTestRoot(
+      PartitionOptions opts,
+      PartitionTestOptions test_opts) {
+    auto root = std::make_unique<PartitionRoot>();
+    InitializeTestRoot(root.get(), opts, test_opts);
+    return root;
+  }
+
+  void InitializeMainTestAllocators() {
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    PartitionOptions::EnableToggle enable_backup_ref_ptr =
+        PartitionOptions::kEnabled;
+#endif
+#if BUILDFLAG(ENABLE_PKEYS)
+    int pkey = PkeyAlloc(UseThreadIsolatedPool() ? 0 : PKEY_DISABLE_WRITE);
+    if (pkey != -1) {
+      pkey_ = pkey;
+    }
+    // We always want to have a pkey allocator initialized to make sure that the
+    // other pools still work. As part of the initializition, we tag some memory
+    // with the new pkey, effectively making it read-only. So there's some
+    // potential for breakage that this should catch.
+    InitializeTestRoot(pkey_allocator.root(),
+                       PartitionOptions{
+                           .aligned_alloc = PartitionOptions::kAllowed,
+                           .ref_count_size = GetParam().ref_count_size,
+                           .thread_isolation = ThreadIsolationOption(pkey_),
+                       },
+                       PartitionTestOptions{.use_memory_reclaimer = true});
+
+    ThreadIsolationOption thread_isolation_opt;
+    if (UseThreadIsolatedPool() && pkey_ != kInvalidPkey) {
+      thread_isolation_opt = ThreadIsolationOption(pkey_);
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+      // BRP and thread isolated mode use different pools, so they can't be
+      // enabled at the same time.
+      enable_backup_ref_ptr = PartitionOptions::kDisabled;
+#endif
+    }
+#endif  // BUILDFLAG(ENABLE_PKEYS)
+    InitializeTestRoot(
+        allocator.root(),
+        PartitionOptions {
+#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
+    BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+          // AlignedAlloc() can't be called when BRP is in the
+          // "before allocation" mode, because this mode adds extras before
+          // the allocation. Extras after the allocation are ok.
+          .aligned_alloc = PartitionOptions::kAllowed,
+#endif
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+          .backup_ref_ptr = enable_backup_ref_ptr,
+#endif
+          .ref_count_size = GetParam().ref_count_size,
+#if BUILDFLAG(ENABLE_PKEYS)
+          .thread_isolation = thread_isolation_opt,
+#endif
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+          .memory_tagging = {
+            .enabled =
+                partition_alloc::internal::base::CPU::GetInstanceNoAllocation()
+                        .has_mte()
+                    ? PartitionOptions::kEnabled
+                    : PartitionOptions::kDisabled,
+          }
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+        },
+        PartitionTestOptions{.use_memory_reclaimer = true,
+                             .uncap_empty_slot_span_memory = true,
+                             .set_bucket_distribution = true});
+
+    InitializeTestRoot(
+        aligned_allocator.root(),
+        PartitionOptions{
+            .aligned_alloc = PartitionOptions::kAllowed,
+            .ref_count_size = GetParam().ref_count_size,
+        },
+        PartitionTestOptions{.use_memory_reclaimer = true,
+                             .uncap_empty_slot_span_memory = true,
+                             .set_bucket_distribution = true});
+  }
+
+  size_t RealAllocSize() const {
+    return partition_alloc::internal::base::bits::AlignUp(
+        kTestAllocSize + ExtraAllocSize(allocator), kAlignment);
+  }
+
+  void SetUp() override {
+    PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
+        StraightenLargerSlotSpanFreeListsMode::kOnlyWhenUnprovisioning);
+    PartitionRoot::SetSortSmallerSlotSpanFreeListsEnabled(true);
+    PartitionRoot::SetSortActiveSlotSpansEnabled(true);
+    PartitionAllocGlobalInit(HandleOOM);
+    InitializeMainTestAllocators();
+
+    test_bucket_index_ = SizeToIndex(RealAllocSize());
+  }
+
+  size_t SizeToIndex(size_t size) {
+    const auto distribution_to_use = GetBucketDistribution();
+    return PartitionRoot::SizeToBucketIndex(size, distribution_to_use);
+  }
+
+  size_t SizeToBucketSize(size_t size) {
+    const auto index = SizeToIndex(size);
+    return allocator.root()->buckets[index].slot_size;
+  }
+
+  void TearDown() override {
+    allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
+                                  PurgeFlags::kDiscardUnusedSystemPages);
+    PartitionAllocGlobalUninitForTesting();
+#if BUILDFLAG(ENABLE_PKEYS)
+    if (pkey_ != kInvalidPkey) {
+      PkeyFree(pkey_);
+    }
+#endif
+  }
+
+  static size_t ExtraAllocSize(const PartitionAllocator& allocator) {
+    size_t ref_count_size = 0;
+    // Duplicate the logic from PartitionRoot::Init().
+    if (allocator.root()->brp_enabled()) {
+      ref_count_size = GetParam().ref_count_size;
+      if (!ref_count_size) {
+        ref_count_size = kPartitionRefCountSizeAdjustment;
+      }
+      ref_count_size = AlignUpRefCountSizeForMac(ref_count_size);
+#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
+      if (allocator.root()->IsMemoryTaggingEnabled()) {
+        ref_count_size = partition_alloc::internal::base::bits::AlignUp(
+            ref_count_size, kMemTagGranuleSize);
+      }
+#endif  // PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
+    }
+    return kExtraAllocSizeWithoutRefCount + ref_count_size;
+  }
+
+  size_t GetNumPagesPerSlotSpan(size_t size) {
+    size_t real_size = size + ExtraAllocSize(allocator);
+    size_t bucket_index = SizeToIndex(real_size);
+    PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
+    // TODO(tasak): make get_pages_per_slot_span() available at
+    // partition_alloc_unittest.cc. Is it allowable to make the code from
+    // partition_bucet.cc to partition_bucket.h?
+    return (bucket->num_system_pages_per_slot_span +
+            (NumSystemPagesPerPartitionPage() - 1)) /
+           NumSystemPagesPerPartitionPage();
+  }
+
+  SlotSpan* GetFullSlotSpan(size_t size) {
+    size_t real_size = size + ExtraAllocSize(allocator);
+    size_t bucket_index = SizeToIndex(real_size);
+    PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
+    size_t num_slots =
+        (bucket->num_system_pages_per_slot_span * SystemPageSize()) /
+        bucket->slot_size;
+    uintptr_t first = 0;
+    uintptr_t last = 0;
+    size_t i;
+    for (i = 0; i < num_slots; ++i) {
+      void* ptr = allocator.root()->Alloc(size, type_name);
+      EXPECT_TRUE(ptr);
+      if (!i) {
+        first = allocator.root()->ObjectToSlotStart(ptr);
+      } else if (i == num_slots - 1) {
+        last = allocator.root()->ObjectToSlotStart(ptr);
+      }
+    }
+    EXPECT_EQ(SlotSpan::FromSlotStart(first), SlotSpan::FromSlotStart(last));
+    if (bucket->num_system_pages_per_slot_span ==
+        NumSystemPagesPerPartitionPage()) {
+      EXPECT_EQ(first & PartitionPageBaseMask(),
+                last & PartitionPageBaseMask());
+    }
+    EXPECT_EQ(num_slots, bucket->active_slot_spans_head->num_allocated_slots);
+    EXPECT_EQ(nullptr, bucket->active_slot_spans_head->get_freelist_head());
+    EXPECT_TRUE(bucket->is_valid());
+    EXPECT_TRUE(bucket->active_slot_spans_head !=
+                SlotSpan::get_sentinel_slot_span());
+    EXPECT_TRUE(bucket->active_slot_spans_head->is_full());
+    return bucket->active_slot_spans_head;
+  }
+
+  void CycleFreeCache(size_t size) {
+    for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
+      void* ptr = allocator.root()->Alloc(size, type_name);
+      auto* slot_span =
+          SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+      auto* bucket = slot_span->bucket;
+      EXPECT_EQ(1u, bucket->active_slot_spans_head->num_allocated_slots);
+      allocator.root()->Free(ptr);
+      EXPECT_EQ(0u, bucket->active_slot_spans_head->num_allocated_slots);
+      EXPECT_TRUE(bucket->active_slot_spans_head->in_empty_cache() ||
+                  bucket->active_slot_spans_head ==
+                      SlotSpanMetadata::get_sentinel_slot_span());
+    }
+  }
+
+  enum ReturnNullTestMode {
+    kPartitionAlloc,
+    kPartitionRealloc,
+  };
+
+  void DoReturnNullTest(size_t alloc_size, ReturnNullTestMode mode) {
+    // TODO(crbug.com/678782): Where necessary and possible, disable the
+    // platform's OOM-killing behavior. OOM-killing makes this test flaky on
+    // low-memory devices.
+    if (!IsLargeMemoryDevice()) {
+      PA_LOG(WARNING)
+          << "Skipping test on this device because of crbug.com/678782";
+      PA_LOG(FATAL) << "Passed DoReturnNullTest";
+    }
+
+    ASSERT_TRUE(SetAddressSpaceLimit());
+
+    // Work out the number of allocations for 6 GB of memory.
+    const int num_allocations = (6 * 1024 * 1024) / (alloc_size / 1024);
+
+    void** ptrs = static_cast<void**>(
+        allocator.root()->Alloc(num_allocations * sizeof(void*), type_name));
+    int i;
+
+    for (i = 0; i < num_allocations; ++i) {
+      switch (mode) {
+        case kPartitionAlloc: {
+          ptrs[i] = allocator.root()->Alloc<AllocFlags::kReturnNull>(alloc_size,
+                                                                     type_name);
+          break;
+        }
+        case kPartitionRealloc: {
+          ptrs[i] =
+              allocator.root()->Alloc<AllocFlags::kReturnNull>(1, type_name);
+          ptrs[i] = allocator.root()->Realloc<AllocFlags::kReturnNull>(
+              ptrs[i], alloc_size, type_name);
+          break;
+        }
+      }
+
+      if (!i) {
+        EXPECT_TRUE(ptrs[0]);
+      }
+      if (!ptrs[i]) {
+        ptrs[i] = allocator.root()->Alloc<AllocFlags::kReturnNull>(alloc_size,
+                                                                   type_name);
+        EXPECT_FALSE(ptrs[i]);
+        break;
+      }
+    }
+
+    // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
+    // we're not actually testing anything here.
+    EXPECT_LT(i, num_allocations);
+
+    // Free, reallocate and free again each block we allocated. We do this to
+    // check that freeing memory also works correctly after a failed allocation.
+    for (--i; i >= 0; --i) {
+      allocator.root()->Free(ptrs[i]);
+      ptrs[i] = allocator.root()->Alloc<AllocFlags::kReturnNull>(alloc_size,
+                                                                 type_name);
+      EXPECT_TRUE(ptrs[i]);
+      allocator.root()->Free(ptrs[i]);
+    }
+
+    allocator.root()->Free(ptrs);
+
+    EXPECT_TRUE(ClearAddressSpaceLimit());
+    PA_LOG(FATAL) << "Passed DoReturnNullTest";
+  }
+
+  void RunRefCountReallocSubtest(size_t orig_size, size_t new_size);
+
+  PA_NOINLINE PA_MALLOC_FN void* Alloc(size_t size) {
+    return allocator.root()->Alloc(size);
+  }
+
+  PA_NOINLINE void Free(void* ptr) { allocator.root()->Free(ptr); }
+
+  BucketDistribution GetBucketDistribution() const {
+    return GetParam().bucket_distribution;
+  }
+
+  bool UseThreadIsolatedPool() const { return GetParam().use_pkey_pool; }
+  bool UseBRPPool() const { return allocator.root()->brp_enabled(); }
+
+  partition_alloc::PartitionAllocatorForTesting allocator;
+  partition_alloc::PartitionAllocatorForTesting aligned_allocator;
+#if BUILDFLAG(ENABLE_PKEYS)
+  partition_alloc::PartitionAllocatorForTesting pkey_allocator;
+#endif
+  size_t test_bucket_index_;
+
+#if BUILDFLAG(ENABLE_PKEYS)
+  int pkey_ = kInvalidPkey;
+#endif
+};
+
+// Death tests misbehave on Android, http://crbug.com/643760.
+#if defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
+#define PA_HAS_DEATH_TESTS
+
+class PartitionAllocDeathTest : public PartitionAllocTest {};
+
+INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
+                         PartitionAllocDeathTest,
+                         testing::ValuesIn(GetPartitionAllocTestParams()));
+
+#endif
+
+namespace {
+
+void FreeFullSlotSpan(PartitionRoot* root, SlotSpan* slot_span) {
+  EXPECT_TRUE(slot_span->is_full());
+  size_t size = slot_span->bucket->slot_size;
+  size_t num_slots =
+      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
+      size;
+  EXPECT_EQ(num_slots, slot_span->num_allocated_slots);
+  uintptr_t address = SlotSpan::ToSlotSpanStart(slot_span);
+  size_t i;
+  for (i = 0; i < num_slots; ++i) {
+    root->Free(root->SlotStartToObject(address));
+    address += size;
+  }
+  EXPECT_TRUE(slot_span->is_empty());
+}
+
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+bool CheckPageInCore(void* ptr, bool in_core) {
+  unsigned char ret = 0;
+  EXPECT_EQ(0, mincore(ptr, SystemPageSize(), &ret));
+  return in_core == (ret & 1);
+}
+
+#define CHECK_PAGE_IN_CORE(ptr, in_core) \
+  EXPECT_TRUE(CheckPageInCore(ptr, in_core))
+#else
+#define CHECK_PAGE_IN_CORE(ptr, in_core) (void)(0)
+#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+class MockPartitionStatsDumper : public PartitionStatsDumper {
+ public:
+  MockPartitionStatsDumper() = default;
+
+  void PartitionDumpTotals(const char* partition_name,
+                           const PartitionMemoryStats* stats) override {
+    EXPECT_GE(stats->total_mmapped_bytes, stats->total_resident_bytes);
+    EXPECT_EQ(total_resident_bytes, stats->total_resident_bytes);
+    EXPECT_EQ(total_active_bytes, stats->total_active_bytes);
+    EXPECT_EQ(total_decommittable_bytes, stats->total_decommittable_bytes);
+    EXPECT_EQ(total_discardable_bytes, stats->total_discardable_bytes);
+  }
+
+  void PartitionsDumpBucketStats(
+      [[maybe_unused]] const char* partition_name,
+      const PartitionBucketMemoryStats* stats) override {
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(0u, stats->bucket_slot_size & sizeof(void*));
+    bucket_stats.push_back(*stats);
+    total_resident_bytes += stats->resident_bytes;
+    total_active_bytes += stats->active_bytes;
+    total_decommittable_bytes += stats->decommittable_bytes;
+    total_discardable_bytes += stats->discardable_bytes;
+  }
+
+  bool IsMemoryAllocationRecorded() {
+    return total_resident_bytes != 0 && total_active_bytes != 0;
+  }
+
+  const PartitionBucketMemoryStats* GetBucketStats(size_t bucket_size) {
+    for (auto& stat : bucket_stats) {
+      if (stat.bucket_slot_size == bucket_size) {
+        return &stat;
+      }
+    }
+    return nullptr;
+  }
+
+ private:
+  size_t total_resident_bytes = 0;
+  size_t total_active_bytes = 0;
+  size_t total_decommittable_bytes = 0;
+  size_t total_discardable_bytes = 0;
+
+  std::vector<PartitionBucketMemoryStats> bucket_stats;
+};
+
+}  // namespace
+
+INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
+                         PartitionAllocTest,
+                         testing::ValuesIn(GetPartitionAllocTestParams()));
+
+// Check that the most basic of allocate / free pairs work.
+TEST_P(PartitionAllocTest, Basic) {
+  PartitionRoot::Bucket* bucket =
+      &allocator.root()->buckets[test_bucket_index_];
+  auto* seed_slot_span = SlotSpan::get_sentinel_slot_span();
+
+  EXPECT_FALSE(bucket->empty_slot_spans_head);
+  EXPECT_FALSE(bucket->decommitted_slot_spans_head);
+  EXPECT_EQ(seed_slot_span, bucket->active_slot_spans_head);
+  EXPECT_EQ(nullptr, bucket->active_slot_spans_head->next_slot_span);
+
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  EXPECT_EQ(kPointerOffset, UntagPtr(ptr) & PartitionPageOffsetMask());
+  // Check that the offset appears to include a guard page.
+  EXPECT_EQ(PartitionPageSize() +
+                partition_alloc::internal::ReservedFreeSlotBitmapSize() +
+                kPointerOffset,
+            UntagPtr(ptr) & kSuperPageOffsetMask);
+
+  allocator.root()->Free(ptr);
+  // Expect that the last active slot span gets noticed as empty but doesn't get
+  // decommitted.
+  EXPECT_TRUE(bucket->empty_slot_spans_head);
+  EXPECT_FALSE(bucket->decommitted_slot_spans_head);
+}
+
+// Test multiple allocations, and freelist handling.
+TEST_P(PartitionAllocTest, MultiAlloc) {
+  void* ptr1 = allocator.root()->Alloc(kTestAllocSize, type_name);
+  void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr1);
+  EXPECT_TRUE(ptr2);
+  ptrdiff_t diff = UntagPtr(ptr2) - UntagPtr(ptr1);
+  EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
+
+  // Check that we re-use the just-freed slot.
+  allocator.root()->Free(ptr2);
+  ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr2);
+  diff = UntagPtr(ptr2) - UntagPtr(ptr1);
+  EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
+  allocator.root()->Free(ptr1);
+  ptr1 = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr1);
+  diff = UntagPtr(ptr2) - UntagPtr(ptr1);
+  EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize()), diff);
+
+  void* ptr3 = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr3);
+  diff = UntagPtr(ptr3) - UntagPtr(ptr1);
+  EXPECT_EQ(static_cast<ptrdiff_t>(RealAllocSize() * 2), diff);
+
+  allocator.root()->Free(ptr1);
+  allocator.root()->Free(ptr2);
+  allocator.root()->Free(ptr3);
+}
+
+// Test a bucket with multiple slot spans.
+TEST_P(PartitionAllocTest, MultiSlotSpans) {
+  PartitionRoot::Bucket* bucket =
+      &allocator.root()->buckets[test_bucket_index_];
+
+  auto* slot_span = GetFullSlotSpan(kTestAllocSize);
+  FreeFullSlotSpan(allocator.root(), slot_span);
+  EXPECT_TRUE(bucket->empty_slot_spans_head);
+  EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
+  EXPECT_EQ(nullptr, slot_span->next_slot_span);
+  EXPECT_EQ(0u, slot_span->num_allocated_slots);
+
+  slot_span = GetFullSlotSpan(kTestAllocSize);
+  auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
+
+  EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
+  EXPECT_EQ(nullptr, slot_span2->next_slot_span);
+  EXPECT_EQ(SlotSpan::ToSlotSpanStart(slot_span) & kSuperPageBaseMask,
+            SlotSpan::ToSlotSpanStart(slot_span2) & kSuperPageBaseMask);
+
+  // Fully free the non-current slot span. This will leave us with no current
+  // active slot span because one is empty and the other is full.
+  FreeFullSlotSpan(allocator.root(), slot_span);
+  EXPECT_EQ(0u, slot_span->num_allocated_slots);
+  EXPECT_TRUE(bucket->empty_slot_spans_head);
+  EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
+            bucket->active_slot_spans_head);
+
+  // Allocate a new slot span, it should pull from the freelist.
+  slot_span = GetFullSlotSpan(kTestAllocSize);
+  EXPECT_FALSE(bucket->empty_slot_spans_head);
+  EXPECT_EQ(slot_span, bucket->active_slot_spans_head);
+
+  FreeFullSlotSpan(allocator.root(), slot_span);
+  FreeFullSlotSpan(allocator.root(), slot_span2);
+  EXPECT_EQ(0u, slot_span->num_allocated_slots);
+  EXPECT_EQ(0u, slot_span2->num_allocated_slots);
+  EXPECT_EQ(0u, slot_span2->num_unprovisioned_slots);
+  EXPECT_TRUE(slot_span2->in_empty_cache());
+}
+
+// Test some finer aspects of internal slot span transitions.
+TEST_P(PartitionAllocTest, SlotSpanTransitions) {
+  PartitionRoot::Bucket* bucket =
+      &allocator.root()->buckets[test_bucket_index_];
+
+  auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
+  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
+  EXPECT_EQ(nullptr, slot_span1->next_slot_span);
+  auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
+  EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
+  EXPECT_EQ(nullptr, slot_span2->next_slot_span);
+
+  // Bounce slot_span1 back into the non-full list then fill it up again.
+  void* ptr = allocator.root()->SlotStartToObject(
+      SlotSpan::ToSlotSpanStart(slot_span1));
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
+  std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
+  EXPECT_EQ(slot_span2, bucket->active_slot_spans_head->next_slot_span);
+
+  // Allocating another slot span at this point should cause us to scan over
+  // slot_span1 (which is both full and NOT our current slot span), and evict it
+  // from the freelist. Older code had a O(n^2) condition due to failure to do
+  // this.
+  auto* slot_span3 = GetFullSlotSpan(kTestAllocSize);
+  EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
+  EXPECT_EQ(nullptr, slot_span3->next_slot_span);
+
+  // Work out a pointer into slot_span2 and free it.
+  ptr = allocator.root()->SlotStartToObject(
+      SlotSpan::ToSlotSpanStart(slot_span2));
+  allocator.root()->Free(ptr);
+  // Trying to allocate at this time should cause us to cycle around to
+  // slot_span2 and find the recently freed slot.
+  void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
+  PA_EXPECT_PTR_EQ(ptr, ptr2);
+  EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
+  EXPECT_EQ(slot_span3, slot_span2->next_slot_span);
+
+  // Work out a pointer into slot_span1 and free it. This should pull the slot
+  // span back into the list of available slot spans.
+  ptr = allocator.root()->SlotStartToObject(
+      SlotSpan::ToSlotSpanStart(slot_span1));
+  allocator.root()->Free(ptr);
+  // This allocation should be satisfied by slot_span1.
+  ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
+  PA_EXPECT_PTR_EQ(ptr, ptr2);
+  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
+  EXPECT_EQ(slot_span2, slot_span1->next_slot_span);
+
+  FreeFullSlotSpan(allocator.root(), slot_span3);
+  FreeFullSlotSpan(allocator.root(), slot_span2);
+  FreeFullSlotSpan(allocator.root(), slot_span1);
+
+  // Allocating whilst in this state exposed a bug, so keep the test.
+  ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  allocator.root()->Free(ptr);
+}
+
+// Test that ExtraAllocSize() is exactly what PA takes away from the slot for
+// extras.
+TEST_P(PartitionAllocTest, ExtraAllocSize) {
+  // There is a bucket with a slot size exactly that (asserted below).
+  size_t slot_size = 64;
+  size_t bucket_index =
+      allocator.root()->SizeToBucketIndex(slot_size, GetBucketDistribution());
+  PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
+  ASSERT_EQ(bucket->slot_size, slot_size);
+
+  // The first allocation is expected to span exactly the capcity of the slot.
+  // The second one should overflow into a higher-size slot, and not fill its
+  // capacity.
+  size_t requested_size1 = slot_size - ExtraAllocSize(allocator);
+  size_t requested_size2 = requested_size1 + 1;
+  void* ptr1 = allocator.root()->Alloc(requested_size1);
+  void* ptr2 = allocator.root()->Alloc(requested_size2);
+  size_t capacity1 = allocator.root()->AllocationCapacityFromSlotStart(
+      allocator.root()->ObjectToSlotStart(ptr1));
+  size_t capacity2 = allocator.root()->AllocationCapacityFromSlotStart(
+      allocator.root()->ObjectToSlotStart(ptr2));
+  EXPECT_EQ(capacity1, requested_size1);
+  EXPECT_LT(capacity1, capacity2);
+  EXPECT_LT(requested_size2, capacity2);
+  allocator.root()->Free(ptr1);
+  allocator.root()->Free(ptr2);
+}
+
+TEST_P(PartitionAllocTest, PreferSlotSpansWithProvisionedEntries) {
+  size_t size = SystemPageSize() - ExtraAllocSize(allocator);
+  size_t real_size = size + ExtraAllocSize(allocator);
+  size_t bucket_index =
+      allocator.root()->SizeToBucketIndex(real_size, GetBucketDistribution());
+  PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
+  ASSERT_EQ(bucket->slot_size, real_size);
+  size_t slots_per_span = bucket->num_system_pages_per_slot_span;
+
+  // Make 10 full slot spans.
+  constexpr int kSpans = 10;
+  std::vector<std::vector<void*>> allocated_memory_spans(kSpans);
+  for (int span_index = 0; span_index < kSpans; span_index++) {
+    for (size_t i = 0; i < slots_per_span; i++) {
+      allocated_memory_spans[span_index].push_back(
+          allocator.root()->Alloc(size));
+    }
+  }
+
+  // Reverse ordering, since a newly non-full span is placed at the head of the
+  // active list.
+  for (int span_index = kSpans - 1; span_index >= 0; span_index--) {
+    allocator.root()->Free(allocated_memory_spans[span_index].back());
+    allocated_memory_spans[span_index].pop_back();
+  }
+
+  // Since slot spans are large enough and we freed memory from the end, the
+  // slot spans become partially provisioned after PurgeMemory().
+  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
+                                PurgeFlags::kDiscardUnusedSystemPages);
+  std::vector<SlotSpanMetadata*> active_slot_spans;
+  for (auto* span = bucket->active_slot_spans_head; span;
+       span = span->next_slot_span) {
+    active_slot_spans.push_back(span);
+    ASSERT_EQ(span->num_unprovisioned_slots, 1u);
+    // But no freelist entries.
+    ASSERT_FALSE(span->get_freelist_head());
+  }
+
+  // Free one entry in the middle span, creating a freelist entry.
+  constexpr size_t kSpanIndex = 5;
+  allocator.root()->Free(allocated_memory_spans[kSpanIndex].back());
+  allocated_memory_spans[kSpanIndex].pop_back();
+
+  ASSERT_TRUE(active_slot_spans[kSpanIndex]->get_freelist_head());
+  ASSERT_FALSE(bucket->active_slot_spans_head->get_freelist_head());
+
+  // It must come from the middle slot span even though the first one has
+  // unprovisioned space.
+  void* new_ptr = allocator.root()->Alloc(size);
+
+  // Comes from the middle slot span, since it has a freelist entry.
+  auto* new_active_slot_span = active_slot_spans[kSpanIndex];
+  ASSERT_FALSE(new_active_slot_span->get_freelist_head());
+
+  // The middle slot span was moved to the front.
+  active_slot_spans.erase(active_slot_spans.begin() + kSpanIndex);
+  active_slot_spans.insert(active_slot_spans.begin(), new_active_slot_span);
+
+  // Check slot span ordering.
+  int index = 0;
+  for (auto* span = bucket->active_slot_spans_head; span;
+       span = span->next_slot_span) {
+    EXPECT_EQ(span, active_slot_spans[index]);
+    index++;
+  }
+  EXPECT_EQ(index, kSpans);
+
+  allocator.root()->Free(new_ptr);
+  for (int span_index = 0; span_index < kSpans; span_index++) {
+    for (void* ptr : allocated_memory_spans[span_index]) {
+      allocator.root()->Free(ptr);
+    }
+  }
+}
+
+// Test some corner cases relating to slot span transitions in the internal
+// free slot span list metadata bucket.
+TEST_P(PartitionAllocTest, FreeSlotSpanListSlotSpanTransitions) {
+  PartitionRoot::Bucket* bucket =
+      &allocator.root()->buckets[test_bucket_index_];
+
+  size_t num_to_fill_free_list_slot_span =
+      PartitionPageSize() / (sizeof(SlotSpan) + ExtraAllocSize(allocator));
+  // The +1 is because we need to account for the fact that the current slot
+  // span never gets thrown on the freelist.
+  ++num_to_fill_free_list_slot_span;
+  auto slot_spans =
+      std::make_unique<SlotSpan*[]>(num_to_fill_free_list_slot_span);
+
+  size_t i;
+  for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
+    slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
+  }
+  EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
+            bucket->active_slot_spans_head);
+  for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
+    FreeFullSlotSpan(allocator.root(), slot_spans[i]);
+  }
+  EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
+  EXPECT_TRUE(bucket->empty_slot_spans_head);
+
+  // Allocate / free in a different bucket size so we get control of a
+  // different free slot span list. We need two slot spans because one will be
+  // the last active slot span and not get freed.
+  auto* slot_span1 = GetFullSlotSpan(kTestAllocSize * 2);
+  auto* slot_span2 = GetFullSlotSpan(kTestAllocSize * 2);
+  FreeFullSlotSpan(allocator.root(), slot_span1);
+  FreeFullSlotSpan(allocator.root(), slot_span2);
+
+  for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
+    slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
+  }
+  EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
+            bucket->active_slot_spans_head);
+
+  for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
+    FreeFullSlotSpan(allocator.root(), slot_spans[i]);
+  }
+  EXPECT_EQ(SlotSpan::get_sentinel_slot_span(), bucket->active_slot_spans_head);
+  EXPECT_TRUE(bucket->empty_slot_spans_head);
+}
+
+// Test a large series of allocations that cross more than one underlying
+// super page.
+TEST_P(PartitionAllocTest, MultiPageAllocs) {
+  size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
+  // 1 super page has 2 guard partition pages and a tag bitmap.
+  size_t num_slot_spans_needed =
+      (NumPartitionPagesPerSuperPage() - 2 -
+       partition_alloc::internal::NumPartitionPagesPerFreeSlotBitmap()) /
+      num_pages_per_slot_span;
+
+  // We need one more slot span in order to cross super page boundary.
+  ++num_slot_spans_needed;
+
+  EXPECT_GT(num_slot_spans_needed, 1u);
+  auto slot_spans = std::make_unique<SlotSpan*[]>(num_slot_spans_needed);
+  uintptr_t first_super_page_base = 0;
+  size_t i;
+  for (i = 0; i < num_slot_spans_needed; ++i) {
+    slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
+    uintptr_t slot_span_start = SlotSpan::ToSlotSpanStart(slot_spans[i]);
+    if (!i) {
+      first_super_page_base = slot_span_start & kSuperPageBaseMask;
+    }
+    if (i == num_slot_spans_needed - 1) {
+      uintptr_t second_super_page_base = slot_span_start & kSuperPageBaseMask;
+      uintptr_t second_super_page_offset =
+          slot_span_start & kSuperPageOffsetMask;
+      EXPECT_FALSE(second_super_page_base == first_super_page_base);
+      // Check that we allocated a guard page and the reserved tag bitmap for
+      // the second page.
+      EXPECT_EQ(PartitionPageSize() +
+                    partition_alloc::internal::ReservedFreeSlotBitmapSize(),
+                second_super_page_offset);
+    }
+  }
+  for (i = 0; i < num_slot_spans_needed; ++i) {
+    FreeFullSlotSpan(allocator.root(), slot_spans[i]);
+  }
+}
+
+// Test the generic allocation functions that can handle arbitrary sizes and
+// reallocing etc.
+TEST_P(PartitionAllocTest, Alloc) {
+  void* ptr = allocator.root()->Alloc(1, type_name);
+  EXPECT_TRUE(ptr);
+  allocator.root()->Free(ptr);
+  ptr = allocator.root()->Alloc(kMaxBucketed + 1, type_name);
+  EXPECT_TRUE(ptr);
+  allocator.root()->Free(ptr);
+
+  // To make both alloc(x + 1) and alloc(x + kSmallestBucket) to allocate from
+  // the same bucket, partition_alloc::internal::base::bits::AlignUp(1 + x +
+  // ExtraAllocSize(allocator), kAlignment)
+  // == partition_alloc::internal::base::bits::AlignUp(kSmallestBucket + x +
+  // ExtraAllocSize(allocator), kAlignment), because slot_size is multiples of
+  // kAlignment. So (x + ExtraAllocSize(allocator)) must be multiples of
+  // kAlignment. x =
+  // partition_alloc::internal::base::bits::AlignUp(ExtraAllocSize(allocator),
+  // kAlignment) - ExtraAllocSize(allocator);
+  size_t base_size = partition_alloc::internal::base::bits::AlignUp(
+                         ExtraAllocSize(allocator), kAlignment) -
+                     ExtraAllocSize(allocator);
+  ptr = allocator.root()->Alloc(base_size + 1, type_name);
+  EXPECT_TRUE(ptr);
+  void* orig_ptr = ptr;
+  char* char_ptr = static_cast<char*>(ptr);
+  *char_ptr = 'A';
+
+  // Change the size of the realloc, remaining inside the same bucket.
+  void* new_ptr = allocator.root()->Realloc(ptr, base_size + 2, type_name);
+  PA_EXPECT_PTR_EQ(ptr, new_ptr);
+  new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
+  PA_EXPECT_PTR_EQ(ptr, new_ptr);
+  new_ptr =
+      allocator.root()->Realloc(ptr, base_size + kSmallestBucket, type_name);
+  PA_EXPECT_PTR_EQ(ptr, new_ptr);
+
+  // Change the size of the realloc, switching buckets.
+  new_ptr = allocator.root()->Realloc(ptr, base_size + kSmallestBucket + 1,
+                                      type_name);
+  PA_EXPECT_PTR_NE(new_ptr, ptr);
+  // Check that the realloc copied correctly.
+  char* new_char_ptr = static_cast<char*>(new_ptr);
+  EXPECT_EQ(*new_char_ptr, 'A');
+#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+  // Subtle: this checks for an old bug where we copied too much from the
+  // source of the realloc. The condition can be detected by a trashing of
+  // the uninitialized value in the space of the upsized allocation.
+  EXPECT_EQ(kUninitializedByte,
+            static_cast<unsigned char>(*(new_char_ptr + kSmallestBucket)));
+#endif
+  *new_char_ptr = 'B';
+  // The realloc moved. To check that the old allocation was freed, we can
+  // do an alloc of the old allocation size and check that the old allocation
+  // address is at the head of the freelist and reused.
+  void* reused_ptr = allocator.root()->Alloc(base_size + 1, type_name);
+  PA_EXPECT_PTR_EQ(reused_ptr, orig_ptr);
+  allocator.root()->Free(reused_ptr);
+
+  // Downsize the realloc.
+  ptr = new_ptr;
+  new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
+  PA_EXPECT_PTR_EQ(new_ptr, orig_ptr);
+  new_char_ptr = static_cast<char*>(new_ptr);
+  EXPECT_EQ(*new_char_ptr, 'B');
+  *new_char_ptr = 'C';
+
+  // Upsize the realloc to outside the partition.
+  ptr = new_ptr;
+  new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed + 1, type_name);
+  PA_EXPECT_PTR_NE(new_ptr, ptr);
+  new_char_ptr = static_cast<char*>(new_ptr);
+  EXPECT_EQ(*new_char_ptr, 'C');
+  *new_char_ptr = 'D';
+
+  // Upsize and downsize the realloc, remaining outside the partition.
+  ptr = new_ptr;
+  new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 10, type_name);
+  new_char_ptr = static_cast<char*>(new_ptr);
+  EXPECT_EQ(*new_char_ptr, 'D');
+  *new_char_ptr = 'E';
+  ptr = new_ptr;
+  new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 2, type_name);
+  new_char_ptr = static_cast<char*>(new_ptr);
+  EXPECT_EQ(*new_char_ptr, 'E');
+  *new_char_ptr = 'F';
+
+  // Downsize the realloc to inside the partition.
+  ptr = new_ptr;
+  new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
+  PA_EXPECT_PTR_NE(new_ptr, ptr);
+  PA_EXPECT_PTR_EQ(new_ptr, orig_ptr);
+  new_char_ptr = static_cast<char*>(new_ptr);
+  EXPECT_EQ(*new_char_ptr, 'F');
+
+  allocator.root()->Free(new_ptr);
+}
+
+// Test the generic allocation functions can handle some specific sizes of
+// interest.
+TEST_P(PartitionAllocTest, AllocSizes) {
+  {
+    void* ptr = allocator.root()->Alloc(0, type_name);
+    EXPECT_TRUE(ptr);
+    allocator.root()->Free(ptr);
+  }
+
+  {
+    // PartitionPageSize() is interesting because it results in just one
+    // allocation per page, which tripped up some corner cases.
+    const size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
+    void* ptr = allocator.root()->Alloc(size, type_name);
+    EXPECT_TRUE(ptr);
+    void* ptr2 = allocator.root()->Alloc(size, type_name);
+    EXPECT_TRUE(ptr2);
+    allocator.root()->Free(ptr);
+    // Should be freeable at this point.
+    auto* slot_span =
+        SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+    EXPECT_TRUE(slot_span->in_empty_cache());
+    allocator.root()->Free(ptr2);
+  }
+
+  {
+    // Single-slot slot span size.
+    const size_t size =
+        PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan + 1;
+
+    void* ptr = allocator.root()->Alloc(size, type_name);
+    EXPECT_TRUE(ptr);
+    memset(ptr, 'A', size);
+    void* ptr2 = allocator.root()->Alloc(size, type_name);
+    EXPECT_TRUE(ptr2);
+    void* ptr3 = allocator.root()->Alloc(size, type_name);
+    EXPECT_TRUE(ptr3);
+    void* ptr4 = allocator.root()->Alloc(size, type_name);
+    EXPECT_TRUE(ptr4);
+
+    auto* slot_span = SlotSpanMetadata::FromSlotStart(
+        allocator.root()->ObjectToSlotStart(ptr));
+    auto* slot_span2 =
+        SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr3));
+    EXPECT_NE(slot_span, slot_span2);
+
+    allocator.root()->Free(ptr);
+    allocator.root()->Free(ptr3);
+    allocator.root()->Free(ptr2);
+    // Should be freeable at this point.
+    EXPECT_TRUE(slot_span->in_empty_cache());
+    EXPECT_EQ(0u, slot_span->num_allocated_slots);
+    EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
+    void* new_ptr_1 = allocator.root()->Alloc(size, type_name);
+    PA_EXPECT_PTR_EQ(ptr2, new_ptr_1);
+    void* new_ptr_2 = allocator.root()->Alloc(size, type_name);
+    PA_EXPECT_PTR_EQ(ptr3, new_ptr_2);
+
+    allocator.root()->Free(new_ptr_1);
+    allocator.root()->Free(new_ptr_2);
+    allocator.root()->Free(ptr4);
+
+#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+    // |SlotSpanMetadata::Free| must poison the slot's contents with
+    // |kFreedByte|.
+    EXPECT_EQ(kFreedByte,
+              *(static_cast<unsigned char*>(new_ptr_1) + (size - 1)));
+#endif
+  }
+
+  // Can we allocate a massive (128MB) size?
+  // Add +1, to test for cookie writing alignment issues.
+  // Test this only if the device has enough memory or it might fail due
+  // to OOM.
+  if (IsLargeMemoryDevice()) {
+    void* ptr = allocator.root()->Alloc(128 * 1024 * 1024 + 1, type_name);
+    allocator.root()->Free(ptr);
+  }
+
+  {
+    // Check a more reasonable, but still direct mapped, size.
+    // Chop a system page and a byte off to test for rounding errors.
+    size_t size = 20 * 1024 * 1024;
+    ASSERT_GT(size, kMaxBucketed);
+    size -= SystemPageSize();
+    size -= 1;
+    void* ptr = allocator.root()->Alloc(size, type_name);
+    char* char_ptr = static_cast<char*>(ptr);
+    *(char_ptr + (size - 1)) = 'A';
+    allocator.root()->Free(ptr);
+
+    // Can we free null?
+    allocator.root()->Free(nullptr);
+
+    // Do we correctly get a null for a failed allocation?
+    EXPECT_EQ(nullptr, allocator.root()->Alloc<AllocFlags::kReturnNull>(
+                           3u * 1024 * 1024 * 1024, type_name));
+  }
+}
+
+// Test that we can fetch the real allocated size after an allocation.
+TEST_P(PartitionAllocTest, AllocGetSizeAndStart) {
+  void* ptr;
+  size_t requested_size, actual_capacity, predicted_capacity;
+
+  // Allocate something small.
+  requested_size = 511 - ExtraAllocSize(allocator);
+  predicted_capacity =
+      allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
+  ptr = allocator.root()->Alloc(requested_size, type_name);
+  EXPECT_TRUE(ptr);
+  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
+  actual_capacity =
+      allocator.root()->AllocationCapacityFromSlotStart(slot_start);
+  EXPECT_EQ(predicted_capacity, actual_capacity);
+  EXPECT_LT(requested_size, actual_capacity);
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  if (UseBRPPool()) {
+    uintptr_t address = UntagPtr(ptr);
+    for (size_t offset = 0; offset < requested_size; ++offset) {
+      EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
+                slot_start);
+    }
+  }
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  allocator.root()->Free(ptr);
+
+  // Allocate a size that should be a perfect match for a bucket, because it
+  // is an exact power of 2.
+  requested_size = (256 * 1024) - ExtraAllocSize(allocator);
+  predicted_capacity =
+      allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
+  ptr = allocator.root()->Alloc(requested_size, type_name);
+  EXPECT_TRUE(ptr);
+  slot_start = allocator.root()->ObjectToSlotStart(ptr);
+  actual_capacity =
+      allocator.root()->AllocationCapacityFromSlotStart(slot_start);
+  EXPECT_EQ(predicted_capacity, actual_capacity);
+  EXPECT_EQ(requested_size, actual_capacity);
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  if (UseBRPPool()) {
+    uintptr_t address = UntagPtr(ptr);
+    for (size_t offset = 0; offset < requested_size; offset += 877) {
+      EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
+                slot_start);
+    }
+  }
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  allocator.root()->Free(ptr);
+
+  // Allocate a size that is a system page smaller than a bucket.
+  // AllocationCapacityFromSlotStart() should return a larger size than we asked
+  // for now.
+  size_t num = 64;
+  while (num * SystemPageSize() >= 1024 * 1024) {
+    num /= 2;
+  }
+  requested_size =
+      num * SystemPageSize() - SystemPageSize() - ExtraAllocSize(allocator);
+  predicted_capacity =
+      allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
+  ptr = allocator.root()->Alloc(requested_size, type_name);
+  EXPECT_TRUE(ptr);
+  slot_start = allocator.root()->ObjectToSlotStart(ptr);
+  actual_capacity =
+      allocator.root()->AllocationCapacityFromSlotStart(slot_start);
+  EXPECT_EQ(predicted_capacity, actual_capacity);
+  EXPECT_EQ(requested_size + SystemPageSize(), actual_capacity);
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  if (UseBRPPool()) {
+    uintptr_t address = UntagPtr(ptr);
+    for (size_t offset = 0; offset < requested_size; offset += 4999) {
+      EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
+                slot_start);
+    }
+  }
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  allocator.root()->Free(ptr);
+
+  // Allocate the maximum allowed bucketed size.
+  requested_size = kMaxBucketed - ExtraAllocSize(allocator);
+  predicted_capacity =
+      allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
+  ptr = allocator.root()->Alloc(requested_size, type_name);
+  EXPECT_TRUE(ptr);
+  slot_start = allocator.root()->ObjectToSlotStart(ptr);
+  actual_capacity =
+      allocator.root()->AllocationCapacityFromSlotStart(slot_start);
+  EXPECT_EQ(predicted_capacity, actual_capacity);
+  EXPECT_EQ(requested_size, actual_capacity);
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  if (UseBRPPool()) {
+    uintptr_t address = UntagPtr(ptr);
+    for (size_t offset = 0; offset < requested_size; offset += 4999) {
+      EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
+                slot_start);
+    }
+  }
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+  // Check that we can write at the end of the reported size too.
+  char* char_ptr = static_cast<char*>(ptr);
+  *(char_ptr + (actual_capacity - 1)) = 'A';
+  allocator.root()->Free(ptr);
+
+  // Allocate something very large, and uneven.
+  if (IsLargeMemoryDevice()) {
+    requested_size = 128 * 1024 * 1024 - 33;
+    predicted_capacity =
+        allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
+    ptr = allocator.root()->Alloc(requested_size, type_name);
+    EXPECT_TRUE(ptr);
+    slot_start = allocator.root()->ObjectToSlotStart(ptr);
+    actual_capacity =
+        allocator.root()->AllocationCapacityFromSlotStart(slot_start);
+    EXPECT_EQ(predicted_capacity, actual_capacity);
+
+    EXPECT_LT(requested_size, actual_capacity);
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    if (UseBRPPool()) {
+      uintptr_t address = UntagPtr(ptr);
+      for (size_t offset = 0; offset < requested_size; offset += 16111) {
+        EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
+                  slot_start);
+      }
+    }
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    allocator.root()->Free(ptr);
+  }
+
+  // Too large allocation.
+  requested_size = MaxDirectMapped() + 1;
+  predicted_capacity =
+      allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
+  EXPECT_EQ(requested_size, predicted_capacity);
+}
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+TEST_P(PartitionAllocTest, MTEProtectsFreedPtr) {
+  // This test checks that Arm's memory tagging extension (MTE) is correctly
+  // protecting freed pointers.
+  base::CPU cpu;
+  if (!cpu.has_mte()) {
+    // This test won't pass without MTE support.
+    GTEST_SKIP();
+  }
+
+  // Create an arbitrarily-sized small allocation.
+  size_t alloc_size = 64 - ExtraAllocSize(allocator);
+  uint64_t* ptr1 =
+      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
+  EXPECT_TRUE(ptr1);
+
+  // Invalidate the pointer by freeing it.
+  allocator.root()->Free(ptr1);
+
+  // When we immediately reallocate a pointer, we should see the same allocation
+  // slot but with a different tag (PA_EXPECT_PTR_EQ ignores the MTE tag).
+  uint64_t* ptr2 =
+      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
+  PA_EXPECT_PTR_EQ(ptr1, ptr2);
+  // The different tag bits mean that ptr1 is not the same as ptr2.
+  EXPECT_NE(ptr1, ptr2);
+
+  // When we free again, we expect a new tag for that area that's different from
+  // ptr1 and ptr2.
+  allocator.root()->Free(ptr2);
+  uint64_t* ptr3 =
+      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
+  PA_EXPECT_PTR_EQ(ptr2, ptr3);
+  EXPECT_NE(ptr1, ptr3);
+  EXPECT_NE(ptr2, ptr3);
+
+  // We don't check anything about ptr3, but we do clean it up to avoid DCHECKs.
+  allocator.root()->Free(ptr3);
+}
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+TEST_P(PartitionAllocTest, IsPtrWithinSameAlloc) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  const size_t kMinReasonableTestSize =
+      partition_alloc::internal::base::bits::AlignUp(
+          ExtraAllocSize(allocator) + 1, kAlignment);
+  ASSERT_GT(kMinReasonableTestSize, ExtraAllocSize(allocator));
+  const size_t kSizes[] = {kMinReasonableTestSize,
+                           256,
+                           SystemPageSize(),
+                           PartitionPageSize(),
+                           MaxRegularSlotSpanSize(),
+                           MaxRegularSlotSpanSize() + 1,
+                           MaxRegularSlotSpanSize() + SystemPageSize(),
+                           MaxRegularSlotSpanSize() + PartitionPageSize(),
+                           kMaxBucketed,
+                           kMaxBucketed + 1,
+                           kMaxBucketed + SystemPageSize(),
+                           kMaxBucketed + PartitionPageSize(),
+                           kSuperPageSize};
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  constexpr size_t kFarFarAwayDelta = 512 * kGiB;
+#else
+  constexpr size_t kFarFarAwayDelta = kGiB;
+#endif
+  for (size_t size : kSizes) {
+    size_t requested_size = size - ExtraAllocSize(allocator);
+    // For regular slot-span allocations, confirm the size fills the entire
+    // slot. Otherwise the test would be ineffective, as Partition Alloc has no
+    // ability to check against the actual allocated size.
+    // Single-slot slot-spans and direct map don't have that problem.
+    if (size <= MaxRegularSlotSpanSize()) {
+      ASSERT_EQ(requested_size,
+                allocator.root()->AllocationCapacityFromRequestedSize(
+                    requested_size));
+    }
+
+    constexpr size_t kNumRepeats = 3;
+    void* ptrs[kNumRepeats];
+    for (void*& ptr : ptrs) {
+      ptr = allocator.root()->Alloc(requested_size, type_name);
+      // Double check.
+      if (size <= MaxRegularSlotSpanSize()) {
+        uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
+        EXPECT_EQ(
+            requested_size,
+            allocator.root()->AllocationCapacityFromSlotStart(slot_start));
+      }
+
+      uintptr_t address = UntagPtr(ptr);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address, address - kFarFarAwayDelta, 0u),
+                PtrPosWithinAlloc::kFarOOB);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address, address - kSuperPageSize, 0u),
+                PtrPosWithinAlloc::kFarOOB);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address, address - 1, 0u),
+                PtrPosWithinAlloc::kFarOOB);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address, address, 0u),
+                PtrPosWithinAlloc::kInBounds);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size / 2, 0u),
+                PtrPosWithinAlloc::kInBounds);
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size - 1, 1u),
+                PtrPosWithinAlloc::kInBounds);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size, 1u),
+                PtrPosWithinAlloc::kAllocEnd);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size - 4, 4u),
+                PtrPosWithinAlloc::kInBounds);
+      for (size_t subtrahend = 0; subtrahend < 4; subtrahend++) {
+        EXPECT_EQ(IsPtrWithinSameAlloc(
+                      address, address + requested_size - subtrahend, 4u),
+                  PtrPosWithinAlloc::kAllocEnd);
+      }
+#else  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size, 0u),
+                PtrPosWithinAlloc::kInBounds);
+#endif
+      EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size + 1, 0u),
+                PtrPosWithinAlloc::kFarOOB);
+      EXPECT_EQ(IsPtrWithinSameAlloc(
+                    address, address + requested_size + kSuperPageSize, 0u),
+                PtrPosWithinAlloc::kFarOOB);
+      EXPECT_EQ(IsPtrWithinSameAlloc(
+                    address, address + requested_size + kFarFarAwayDelta, 0u),
+                PtrPosWithinAlloc::kFarOOB);
+      EXPECT_EQ(
+          IsPtrWithinSameAlloc(address + requested_size,
+                               address + requested_size + kFarFarAwayDelta, 0u),
+          PtrPosWithinAlloc::kFarOOB);
+      EXPECT_EQ(
+          IsPtrWithinSameAlloc(address + requested_size,
+                               address + requested_size + kSuperPageSize, 0u),
+          PtrPosWithinAlloc::kFarOOB);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
+                                     address + requested_size + 1, 0u),
+                PtrPosWithinAlloc::kFarOOB);
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 1,
+                                     address + requested_size - 1, 1u),
+                PtrPosWithinAlloc::kInBounds);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 1,
+                                     address + requested_size, 1u),
+                PtrPosWithinAlloc::kAllocEnd);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
+                                     address + requested_size, 1u),
+                PtrPosWithinAlloc::kAllocEnd);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 4,
+                                     address + requested_size - 4, 4u),
+                PtrPosWithinAlloc::kInBounds);
+      for (size_t addend = 1; addend < 4; addend++) {
+        EXPECT_EQ(
+            IsPtrWithinSameAlloc(address + requested_size - 4,
+                                 address + requested_size - 4 + addend, 4u),
+            PtrPosWithinAlloc::kAllocEnd);
+      }
+#else  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
+                                     address + requested_size, 0u),
+                PtrPosWithinAlloc::kInBounds);
+#endif
+      EXPECT_EQ(IsPtrWithinSameAlloc(
+                    address + requested_size,
+                    address + requested_size - (requested_size / 2), 0u),
+                PtrPosWithinAlloc::kInBounds);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size, address, 0u),
+                PtrPosWithinAlloc::kInBounds);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size, address - 1, 0u),
+                PtrPosWithinAlloc::kFarOOB);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
+                                     address - kSuperPageSize, 0u),
+                PtrPosWithinAlloc::kFarOOB);
+      EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
+                                     address - kFarFarAwayDelta, 0u),
+                PtrPosWithinAlloc::kFarOOB);
+    }
+
+    for (void* ptr : ptrs) {
+      allocator.root()->Free(ptr);
+    }
+  }
+}
+
+TEST_P(PartitionAllocTest, GetSlotStartMultiplePages) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  auto* root = allocator.root();
+  // Find the smallest bucket with multiple PartitionPages. When searching for
+  // a bucket here, we need to check two conditions:
+  // (1) The bucket is used in our current bucket distribution.
+  // (2) The bucket is large enough that our requested size (see below) will be
+  // non-zero.
+  size_t real_size = 0;
+  for (const auto& bucket : root->buckets) {
+    if ((root->buckets + SizeToIndex(bucket.slot_size))->slot_size !=
+        bucket.slot_size) {
+      continue;
+    }
+    if (bucket.slot_size <= ExtraAllocSize(allocator)) {
+      continue;
+    }
+    if (bucket.num_system_pages_per_slot_span >
+        NumSystemPagesPerPartitionPage()) {
+      real_size = bucket.slot_size;
+      break;
+    }
+  }
+
+  // Make sure that we've managed to find an appropriate bucket.
+  ASSERT_GT(real_size, 0u);
+
+  const size_t requested_size = real_size - ExtraAllocSize(allocator);
+  // Double check we don't end up with 0 or negative size.
+  EXPECT_GT(requested_size, 0u);
+  EXPECT_LE(requested_size, real_size);
+  const auto* bucket = allocator.root()->buckets + SizeToIndex(real_size);
+  EXPECT_EQ(bucket->slot_size, real_size);
+  // Make sure the test is testing multiple partition pages case.
+  EXPECT_GT(bucket->num_system_pages_per_slot_span,
+            PartitionPageSize() / SystemPageSize());
+  size_t num_slots =
+      (bucket->num_system_pages_per_slot_span * SystemPageSize()) / real_size;
+  std::vector<void*> ptrs;
+  for (size_t i = 0; i < num_slots; ++i) {
+    ptrs.push_back(allocator.root()->Alloc(requested_size, type_name));
+  }
+  for (void* ptr : ptrs) {
+    uintptr_t address = UntagPtr(ptr);
+    uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
+    EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start),
+              requested_size);
+    for (size_t offset = 0; offset < requested_size; offset += 13) {
+      EXPECT_EQ(PartitionAllocGetSlotStartInBRPPool(address + offset),
+                slot_start);
+    }
+    allocator.root()->Free(ptr);
+  }
+}
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+// Test the realloc() contract.
+TEST_P(PartitionAllocTest, Realloc) {
+  // realloc(0, size) should be equivalent to malloc().
+  void* ptr = allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
+  memset(ptr, 'A', kTestAllocSize);
+  auto* slot_span =
+      SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  // realloc(ptr, 0) should be equivalent to free().
+  void* ptr2 = allocator.root()->Realloc(ptr, 0, type_name);
+  EXPECT_EQ(nullptr, ptr2);
+  EXPECT_EQ(allocator.root()->ObjectToSlotStart(ptr),
+            UntagPtr(slot_span->get_freelist_head()));
+
+  // Test that growing an allocation with realloc() copies everything from the
+  // old allocation.
+  size_t size = SystemPageSize() - ExtraAllocSize(allocator);
+  // Confirm size fills the entire slot.
+  ASSERT_EQ(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
+  ptr = allocator.root()->Alloc(size, type_name);
+  memset(ptr, 'A', size);
+  ptr2 = allocator.root()->Realloc(ptr, size + 1, type_name);
+  PA_EXPECT_PTR_NE(ptr, ptr2);
+  char* char_ptr2 = static_cast<char*>(ptr2);
+  EXPECT_EQ('A', char_ptr2[0]);
+  EXPECT_EQ('A', char_ptr2[size - 1]);
+#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+  EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr2[size]));
+#endif
+
+  // Test that shrinking an allocation with realloc() also copies everything
+  // from the old allocation. Use |size - 1| to test what happens to the extra
+  // space before the cookie.
+  ptr = allocator.root()->Realloc(ptr2, size - 1, type_name);
+  PA_EXPECT_PTR_NE(ptr2, ptr);
+  char* char_ptr = static_cast<char*>(ptr);
+  EXPECT_EQ('A', char_ptr[0]);
+  EXPECT_EQ('A', char_ptr[size - 2]);
+#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+  EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr[size - 1]));
+#endif
+
+  allocator.root()->Free(ptr);
+
+  // Single-slot slot spans...
+  // Test that growing an allocation with realloc() copies everything from the
+  // old allocation.
+  size = MaxRegularSlotSpanSize() + 1;
+  ASSERT_LE(2 * size, kMaxBucketed);  // should be in single-slot span range
+  // Confirm size doesn't fill the entire slot.
+  ASSERT_LT(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
+  ptr = allocator.root()->Alloc(size, type_name);
+  memset(ptr, 'A', size);
+  ptr2 = allocator.root()->Realloc(ptr, size * 2, type_name);
+  PA_EXPECT_PTR_NE(ptr, ptr2);
+  char_ptr2 = static_cast<char*>(ptr2);
+  EXPECT_EQ('A', char_ptr2[0]);
+  EXPECT_EQ('A', char_ptr2[size - 1]);
+#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+  EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr2[size]));
+#endif
+  allocator.root()->Free(ptr2);
+
+  // Test that shrinking an allocation with realloc() also copies everything
+  // from the old allocation.
+  size = 2 * (MaxRegularSlotSpanSize() + 1);
+  ASSERT_GT(size / 2, MaxRegularSlotSpanSize());  // in single-slot span range
+  ptr = allocator.root()->Alloc(size, type_name);
+  memset(ptr, 'A', size);
+  ptr2 = allocator.root()->Realloc(ptr2, size / 2, type_name);
+  PA_EXPECT_PTR_NE(ptr, ptr2);
+  char_ptr2 = static_cast<char*>(ptr2);
+  EXPECT_EQ('A', char_ptr2[0]);
+  EXPECT_EQ('A', char_ptr2[size / 2 - 1]);
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // For single-slot slot spans, the cookie is always placed immediately after
+  // the allocation.
+  EXPECT_EQ(kCookieValue[0], static_cast<unsigned char>(char_ptr2[size / 2]));
+#endif
+  allocator.root()->Free(ptr2);
+
+  // Test that shrinking a direct mapped allocation happens in-place.
+  // Pick a large size so that Realloc doesn't think it's worthwhile to
+  // downsize even if one less super page is used (due to high granularity on
+  // 64-bit systems).
+  size = 10 * kSuperPageSize + SystemPageSize() - 42;
+  ASSERT_GT(size - 32 * SystemPageSize(), kMaxBucketed);
+  ptr = allocator.root()->Alloc(size, type_name);
+  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
+  size_t actual_capacity =
+      allocator.root()->AllocationCapacityFromSlotStart(slot_start);
+  ptr2 = allocator.root()->Realloc(ptr, size - SystemPageSize(), type_name);
+  uintptr_t slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
+  EXPECT_EQ(slot_start, slot_start2);
+  EXPECT_EQ(actual_capacity - SystemPageSize(),
+            allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
+  void* ptr3 =
+      allocator.root()->Realloc(ptr2, size - 32 * SystemPageSize(), type_name);
+  uintptr_t slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
+  EXPECT_EQ(slot_start2, slot_start3);
+  EXPECT_EQ(actual_capacity - 32 * SystemPageSize(),
+            allocator.root()->AllocationCapacityFromSlotStart(slot_start3));
+
+  // Test that a previously in-place shrunk direct mapped allocation can be
+  // expanded up again up to its original size.
+  ptr = allocator.root()->Realloc(ptr3, size, type_name);
+  slot_start = allocator.root()->ObjectToSlotStart(ptr);
+  EXPECT_EQ(slot_start3, slot_start);
+  EXPECT_EQ(actual_capacity,
+            allocator.root()->AllocationCapacityFromSlotStart(slot_start));
+
+  // Test that the allocation can be expanded in place up to its capacity.
+  ptr2 = allocator.root()->Realloc(ptr, actual_capacity, type_name);
+  slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
+  EXPECT_EQ(slot_start, slot_start2);
+  EXPECT_EQ(actual_capacity,
+            allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
+
+  // Test that a direct mapped allocation is performed not in-place when the
+  // new size is small enough.
+  ptr3 = allocator.root()->Realloc(ptr2, SystemPageSize(), type_name);
+  slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
+  EXPECT_NE(slot_start, slot_start3);
+
+  allocator.root()->Free(ptr3);
+}
+
+TEST_P(PartitionAllocTest, ReallocDirectMapAligned) {
+  size_t alignments[] = {
+      PartitionPageSize(),
+      2 * PartitionPageSize(),
+      kMaxSupportedAlignment / 2,
+      kMaxSupportedAlignment,
+  };
+
+  for (size_t alignment : alignments) {
+    // Test that shrinking a direct mapped allocation happens in-place.
+    // Pick a large size so that Realloc doesn't think it's worthwhile to
+    // downsize even if one less super page is used (due to high granularity on
+    // 64-bit systems), even if the alignment padding is taken out.
+    size_t size = 10 * kSuperPageSize + SystemPageSize() - 42;
+    ASSERT_GT(size, kMaxBucketed);
+    void* ptr =
+        allocator.root()->AllocInternalForTesting(size, alignment, type_name);
+    uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
+    size_t actual_capacity =
+        allocator.root()->AllocationCapacityFromSlotStart(slot_start);
+    void* ptr2 =
+        allocator.root()->Realloc(ptr, size - SystemPageSize(), type_name);
+    uintptr_t slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
+    EXPECT_EQ(slot_start, slot_start2);
+    EXPECT_EQ(actual_capacity - SystemPageSize(),
+              allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
+    void* ptr3 = allocator.root()->Realloc(ptr2, size - 32 * SystemPageSize(),
+                                           type_name);
+    uintptr_t slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
+    EXPECT_EQ(slot_start2, slot_start3);
+    EXPECT_EQ(actual_capacity - 32 * SystemPageSize(),
+              allocator.root()->AllocationCapacityFromSlotStart(slot_start3));
+
+    // Test that a previously in-place shrunk direct mapped allocation can be
+    // expanded up again up to its original size.
+    ptr = allocator.root()->Realloc(ptr3, size, type_name);
+    slot_start = allocator.root()->ObjectToSlotStart(ptr);
+    EXPECT_EQ(slot_start3, slot_start);
+    EXPECT_EQ(actual_capacity,
+              allocator.root()->AllocationCapacityFromSlotStart(slot_start));
+
+    // Test that the allocation can be expanded in place up to its capacity.
+    ptr2 = allocator.root()->Realloc(ptr, actual_capacity, type_name);
+    slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
+    EXPECT_EQ(slot_start, slot_start2);
+    EXPECT_EQ(actual_capacity,
+              allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
+
+    // Test that a direct mapped allocation is performed not in-place when the
+    // new size is small enough.
+    ptr3 = allocator.root()->Realloc(ptr2, SystemPageSize(), type_name);
+    slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
+    EXPECT_NE(slot_start2, slot_start3);
+
+    allocator.root()->Free(ptr3);
+  }
+}
+
+TEST_P(PartitionAllocTest, ReallocDirectMapAlignedRelocate) {
+  // Pick size such that the alignment will put it cross the super page
+  // boundary.
+  size_t size = 2 * kSuperPageSize - kMaxSupportedAlignment + SystemPageSize();
+  ASSERT_GT(size, kMaxBucketed);
+  void* ptr = allocator.root()->AllocInternalForTesting(
+      size, kMaxSupportedAlignment, type_name);
+  // Reallocating with the same size will actually relocate, because without a
+  // need for alignment we can downsize the reservation significantly.
+  void* ptr2 = allocator.root()->Realloc(ptr, size, type_name);
+  PA_EXPECT_PTR_NE(ptr, ptr2);
+  allocator.root()->Free(ptr2);
+
+  // Again pick size such that the alignment will put it cross the super page
+  // boundary, but this time make it so large that Realloc doesn't fing it worth
+  // shrinking.
+  size = 10 * kSuperPageSize - kMaxSupportedAlignment + SystemPageSize();
+  ASSERT_GT(size, kMaxBucketed);
+  ptr = allocator.root()->AllocInternalForTesting(size, kMaxSupportedAlignment,
+                                                  type_name);
+  ptr2 = allocator.root()->Realloc(ptr, size, type_name);
+  EXPECT_EQ(ptr, ptr2);
+  allocator.root()->Free(ptr2);
+}
+
+// Tests the handing out of freelists for partial slot spans.
+TEST_P(PartitionAllocTest, PartialPageFreelists) {
+  size_t big_size = SystemPageSize() - ExtraAllocSize(allocator);
+  size_t bucket_index = SizeToIndex(big_size + ExtraAllocSize(allocator));
+  PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
+  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
+
+  void* ptr = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr);
+
+  auto* slot_span =
+      SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  size_t total_slots =
+      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
+      (big_size + ExtraAllocSize(allocator));
+  EXPECT_EQ(4u, total_slots);
+  // The freelist should have one entry, because we were able to exactly fit
+  // one object slot and one freelist pointer (the null that the head points
+  // to) into a system page.
+  EXPECT_FALSE(slot_span->get_freelist_head());
+  EXPECT_EQ(1u, slot_span->num_allocated_slots);
+  EXPECT_EQ(3u, slot_span->num_unprovisioned_slots);
+
+  void* ptr2 = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr2);
+  EXPECT_FALSE(slot_span->get_freelist_head());
+  EXPECT_EQ(2u, slot_span->num_allocated_slots);
+  EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
+
+  void* ptr3 = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr3);
+  EXPECT_FALSE(slot_span->get_freelist_head());
+  EXPECT_EQ(3u, slot_span->num_allocated_slots);
+  EXPECT_EQ(1u, slot_span->num_unprovisioned_slots);
+
+  void* ptr4 = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr4);
+  EXPECT_FALSE(slot_span->get_freelist_head());
+  EXPECT_EQ(4u, slot_span->num_allocated_slots);
+  EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
+
+  void* ptr5 = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr5);
+
+  auto* slot_span2 =
+      SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr5));
+  EXPECT_EQ(1u, slot_span2->num_allocated_slots);
+
+  // Churn things a little whilst there's a partial slot span freelist.
+  allocator.root()->Free(ptr);
+  ptr = allocator.root()->Alloc(big_size, type_name);
+  void* ptr6 = allocator.root()->Alloc(big_size, type_name);
+
+  allocator.root()->Free(ptr);
+  allocator.root()->Free(ptr2);
+  allocator.root()->Free(ptr3);
+  allocator.root()->Free(ptr4);
+  allocator.root()->Free(ptr5);
+  allocator.root()->Free(ptr6);
+  EXPECT_TRUE(slot_span->in_empty_cache());
+  EXPECT_TRUE(slot_span2->in_empty_cache());
+  EXPECT_TRUE(slot_span2->get_freelist_head());
+  EXPECT_EQ(0u, slot_span2->num_allocated_slots);
+
+  // Size that's just above half a page.
+  size_t non_dividing_size =
+      SystemPageSize() / 2 + 1 - ExtraAllocSize(allocator);
+  bucket_index = SizeToIndex(non_dividing_size + ExtraAllocSize(allocator));
+  bucket = &allocator.root()->buckets[bucket_index];
+  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
+
+  ptr = allocator.root()->Alloc(non_dividing_size, type_name);
+  EXPECT_TRUE(ptr);
+
+  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  total_slots =
+      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
+      bucket->slot_size;
+
+  EXPECT_FALSE(slot_span->get_freelist_head());
+  EXPECT_EQ(1u, slot_span->num_allocated_slots);
+  EXPECT_EQ(total_slots - 1, slot_span->num_unprovisioned_slots);
+
+  ptr2 = allocator.root()->Alloc(non_dividing_size, type_name);
+  EXPECT_TRUE(ptr2);
+  EXPECT_TRUE(slot_span->get_freelist_head());
+  EXPECT_EQ(2u, slot_span->num_allocated_slots);
+  // 2 slots got provisioned: the first one fills the rest of the first (already
+  // provision page) and exceeds it by just a tad, thus leading to provisioning
+  // a new page, and the second one fully fits within that new page.
+  EXPECT_EQ(total_slots - 3, slot_span->num_unprovisioned_slots);
+
+  ptr3 = allocator.root()->Alloc(non_dividing_size, type_name);
+  EXPECT_TRUE(ptr3);
+  EXPECT_FALSE(slot_span->get_freelist_head());
+  EXPECT_EQ(3u, slot_span->num_allocated_slots);
+  EXPECT_EQ(total_slots - 3, slot_span->num_unprovisioned_slots);
+
+  allocator.root()->Free(ptr);
+  allocator.root()->Free(ptr2);
+  allocator.root()->Free(ptr3);
+  EXPECT_TRUE(slot_span->in_empty_cache());
+  EXPECT_TRUE(slot_span2->get_freelist_head());
+  EXPECT_EQ(0u, slot_span2->num_allocated_slots);
+
+  // And test a couple of sizes that do not cross SystemPageSize() with a
+  // single allocation.
+  size_t medium_size = (SystemPageSize() / 2) - ExtraAllocSize(allocator);
+  bucket_index = SizeToIndex(medium_size + ExtraAllocSize(allocator));
+  bucket = &allocator.root()->buckets[bucket_index];
+  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
+
+  ptr = allocator.root()->Alloc(medium_size, type_name);
+  EXPECT_TRUE(ptr);
+  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  EXPECT_EQ(1u, slot_span->num_allocated_slots);
+  total_slots =
+      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
+      (medium_size + ExtraAllocSize(allocator));
+  size_t first_slot_span_slots =
+      SystemPageSize() / (medium_size + ExtraAllocSize(allocator));
+  EXPECT_EQ(2u, first_slot_span_slots);
+  EXPECT_EQ(total_slots - first_slot_span_slots,
+            slot_span->num_unprovisioned_slots);
+
+  allocator.root()->Free(ptr);
+
+  size_t small_size = (SystemPageSize() / 4) - ExtraAllocSize(allocator);
+  bucket_index = SizeToIndex(small_size + ExtraAllocSize(allocator));
+  bucket = &allocator.root()->buckets[bucket_index];
+  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
+
+  ptr = allocator.root()->Alloc(small_size, type_name);
+  EXPECT_TRUE(ptr);
+  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  EXPECT_EQ(1u, slot_span->num_allocated_slots);
+  total_slots =
+      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
+      (small_size + ExtraAllocSize(allocator));
+  first_slot_span_slots =
+      SystemPageSize() / (small_size + ExtraAllocSize(allocator));
+  EXPECT_EQ(total_slots - first_slot_span_slots,
+            slot_span->num_unprovisioned_slots);
+
+  allocator.root()->Free(ptr);
+  EXPECT_TRUE(slot_span->get_freelist_head());
+  EXPECT_EQ(0u, slot_span->num_allocated_slots);
+
+  ASSERT_LT(ExtraAllocSize(allocator), 64u);
+  size_t very_small_size = (ExtraAllocSize(allocator) <= 32)
+                               ? (32 - ExtraAllocSize(allocator))
+                               : (64 - ExtraAllocSize(allocator));
+  size_t very_small_adjusted_size =
+      allocator.root()->AdjustSize0IfNeeded(very_small_size);
+  bucket_index =
+      SizeToIndex(very_small_adjusted_size + ExtraAllocSize(allocator));
+  bucket = &allocator.root()->buckets[bucket_index];
+  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
+
+  ptr = allocator.root()->Alloc(very_small_size, type_name);
+  EXPECT_TRUE(ptr);
+  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  EXPECT_EQ(1u, slot_span->num_allocated_slots);
+  size_t very_small_actual_size = allocator.root()->GetUsableSize(ptr);
+  total_slots =
+      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
+      (very_small_actual_size + ExtraAllocSize(allocator));
+  first_slot_span_slots =
+      SystemPageSize() / (very_small_actual_size + ExtraAllocSize(allocator));
+  EXPECT_EQ(total_slots - first_slot_span_slots,
+            slot_span->num_unprovisioned_slots);
+
+  allocator.root()->Free(ptr);
+  EXPECT_TRUE(slot_span->get_freelist_head());
+  EXPECT_EQ(0u, slot_span->num_allocated_slots);
+
+  // And try an allocation size (against the generic allocator) that is
+  // larger than a system page.
+  size_t page_and_a_half_size =
+      (SystemPageSize() + (SystemPageSize() / 2)) - ExtraAllocSize(allocator);
+  ptr = allocator.root()->Alloc(page_and_a_half_size, type_name);
+  EXPECT_TRUE(ptr);
+  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  EXPECT_EQ(1u, slot_span->num_allocated_slots);
+  // Only the first slot was provisioned, and that's the one that was just
+  // allocated so the free list is empty.
+  EXPECT_TRUE(!slot_span->get_freelist_head());
+  total_slots =
+      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
+      (page_and_a_half_size + ExtraAllocSize(allocator));
+  EXPECT_EQ(total_slots - 1, slot_span->num_unprovisioned_slots);
+  ptr2 = allocator.root()->Alloc(page_and_a_half_size, type_name);
+  EXPECT_TRUE(ptr);
+  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  EXPECT_EQ(2u, slot_span->num_allocated_slots);
+  // As above, only one slot was provisioned.
+  EXPECT_TRUE(!slot_span->get_freelist_head());
+  EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
+  allocator.root()->Free(ptr);
+  allocator.root()->Free(ptr2);
+
+  // And then make sure than exactly the page size only faults one page.
+  size_t page_size = SystemPageSize() - ExtraAllocSize(allocator);
+  ptr = allocator.root()->Alloc(page_size, type_name);
+  EXPECT_TRUE(ptr);
+  slot_span = SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  EXPECT_EQ(1u, slot_span->num_allocated_slots);
+  EXPECT_TRUE(slot_span->get_freelist_head());
+  total_slots =
+      (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
+      (page_size + ExtraAllocSize(allocator));
+  EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
+  allocator.root()->Free(ptr);
+}
+
+// Test some of the fragmentation-resistant properties of the allocator.
+TEST_P(PartitionAllocTest, SlotSpanRefilling) {
+  PartitionRoot::Bucket* bucket =
+      &allocator.root()->buckets[test_bucket_index_];
+
+  // Grab two full slot spans and a non-full slot span.
+  auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
+  auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  EXPECT_NE(slot_span1, bucket->active_slot_spans_head);
+  EXPECT_NE(slot_span2, bucket->active_slot_spans_head);
+  auto* slot_span =
+      SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  EXPECT_EQ(1u, slot_span->num_allocated_slots);
+
+  // Work out a pointer into slot_span2 and free it; and then slot_span1 and
+  // free it.
+  void* ptr2 = allocator.root()->SlotStartToObject(
+      SlotSpan::ToSlotSpanStart(slot_span1));
+  allocator.root()->Free(ptr2);
+  ptr2 = allocator.root()->SlotStartToObject(
+      SlotSpan::ToSlotSpanStart(slot_span2));
+  allocator.root()->Free(ptr2);
+
+  // If we perform two allocations from the same bucket now, we expect to
+  // refill both the nearly full slot spans.
+  std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
+  std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_EQ(1u, slot_span->num_allocated_slots);
+
+  FreeFullSlotSpan(allocator.root(), slot_span2);
+  FreeFullSlotSpan(allocator.root(), slot_span1);
+  allocator.root()->Free(ptr);
+}
+
+// Basic tests to ensure that allocations work for partial page buckets.
+TEST_P(PartitionAllocTest, PartialPages) {
+  // Find a size that is backed by a partial partition page.
+  size_t size = sizeof(void*);
+  size_t bucket_index;
+
+  PartitionRoot::Bucket* bucket = nullptr;
+  constexpr size_t kMaxSize = 4000u;
+  while (size < kMaxSize) {
+    bucket_index = SizeToIndex(size + ExtraAllocSize(allocator));
+    bucket = &allocator.root()->buckets[bucket_index];
+    if (bucket->num_system_pages_per_slot_span %
+        NumSystemPagesPerPartitionPage()) {
+      break;
+    }
+    size += sizeof(void*);
+  }
+  EXPECT_LT(size, kMaxSize);
+
+  auto* slot_span1 = GetFullSlotSpan(size);
+  auto* slot_span2 = GetFullSlotSpan(size);
+  FreeFullSlotSpan(allocator.root(), slot_span2);
+  FreeFullSlotSpan(allocator.root(), slot_span1);
+}
+
+// Test correct handling if our mapping collides with another.
+TEST_P(PartitionAllocTest, MappingCollision) {
+  size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
+  // The -2 is because the first and last partition pages in a super page are
+  // guard pages. We also discount the partition pages used for the tag bitmap.
+  size_t num_slot_span_needed =
+      (NumPartitionPagesPerSuperPage() - 2 -
+       partition_alloc::internal::NumPartitionPagesPerFreeSlotBitmap()) /
+      num_pages_per_slot_span;
+  size_t num_partition_pages_needed =
+      num_slot_span_needed * num_pages_per_slot_span;
+
+  auto first_super_page_pages =
+      std::make_unique<SlotSpan*[]>(num_partition_pages_needed);
+  auto second_super_page_pages =
+      std::make_unique<SlotSpan*[]>(num_partition_pages_needed);
+
+  size_t i;
+  for (i = 0; i < num_partition_pages_needed; ++i) {
+    first_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
+  }
+
+  uintptr_t slot_span_start =
+      SlotSpan::ToSlotSpanStart(first_super_page_pages[0]);
+  EXPECT_EQ(PartitionPageSize() +
+                partition_alloc::internal::ReservedFreeSlotBitmapSize(),
+            slot_span_start & kSuperPageOffsetMask);
+  uintptr_t super_page =
+      slot_span_start - PartitionPageSize() -
+      partition_alloc::internal::ReservedFreeSlotBitmapSize();
+  // Map a single system page either side of the mapping for our allocations,
+  // with the goal of tripping up alignment of the next mapping.
+  uintptr_t map1 =
+      AllocPages(super_page - PageAllocationGranularity(),
+                 PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kInaccessible),
+                 PageTag::kPartitionAlloc);
+  EXPECT_TRUE(map1);
+  uintptr_t map2 =
+      AllocPages(super_page + kSuperPageSize, PageAllocationGranularity(),
+                 PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kInaccessible),
+                 PageTag::kPartitionAlloc);
+  EXPECT_TRUE(map2);
+
+  for (i = 0; i < num_partition_pages_needed; ++i) {
+    second_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
+  }
+
+  FreePages(map1, PageAllocationGranularity());
+  FreePages(map2, PageAllocationGranularity());
+
+  super_page = SlotSpan::ToSlotSpanStart(second_super_page_pages[0]);
+  EXPECT_EQ(PartitionPageSize() +
+                partition_alloc::internal::ReservedFreeSlotBitmapSize(),
+            super_page & kSuperPageOffsetMask);
+  super_page -= PartitionPageSize() +
+                partition_alloc::internal::ReservedFreeSlotBitmapSize();
+  // Map a single system page either side of the mapping for our allocations,
+  // with the goal of tripping up alignment of the next mapping.
+  map1 = AllocPages(super_page - PageAllocationGranularity(),
+                    PageAllocationGranularity(), PageAllocationGranularity(),
+                    PageAccessibilityConfiguration(
+                        PageAccessibilityConfiguration::kReadWriteTagged),
+                    PageTag::kPartitionAlloc);
+  EXPECT_TRUE(map1);
+  map2 = AllocPages(super_page + kSuperPageSize, PageAllocationGranularity(),
+                    PageAllocationGranularity(),
+                    PageAccessibilityConfiguration(
+                        PageAccessibilityConfiguration::kReadWriteTagged),
+                    PageTag::kPartitionAlloc);
+  EXPECT_TRUE(map2);
+  EXPECT_TRUE(TrySetSystemPagesAccess(
+      map1, PageAllocationGranularity(),
+      PageAccessibilityConfiguration(
+          PageAccessibilityConfiguration::kInaccessible)));
+  EXPECT_TRUE(TrySetSystemPagesAccess(
+      map2, PageAllocationGranularity(),
+      PageAccessibilityConfiguration(
+          PageAccessibilityConfiguration::kInaccessible)));
+
+  auto* slot_span_in_third_super_page = GetFullSlotSpan(kTestAllocSize);
+  FreePages(map1, PageAllocationGranularity());
+  FreePages(map2, PageAllocationGranularity());
+
+  EXPECT_EQ(0u, SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
+                    PartitionPageOffsetMask());
+
+  // And make sure we really did get a page in a new superpage.
+  EXPECT_NE(
+      SlotSpan::ToSlotSpanStart(first_super_page_pages[0]) & kSuperPageBaseMask,
+      SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
+          kSuperPageBaseMask);
+  EXPECT_NE(SlotSpan::ToSlotSpanStart(second_super_page_pages[0]) &
+                kSuperPageBaseMask,
+            SlotSpan::ToSlotSpanStart(slot_span_in_third_super_page) &
+                kSuperPageBaseMask);
+
+  FreeFullSlotSpan(allocator.root(), slot_span_in_third_super_page);
+  for (i = 0; i < num_partition_pages_needed; ++i) {
+    FreeFullSlotSpan(allocator.root(), first_super_page_pages[i]);
+    FreeFullSlotSpan(allocator.root(), second_super_page_pages[i]);
+  }
+}
+
+// Tests that slot spans in the free slot span cache do get freed as
+// appropriate.
+TEST_P(PartitionAllocTest, FreeCache) {
+  EXPECT_EQ(0U, allocator.root()->get_total_size_of_committed_pages());
+
+  size_t big_size = 1000 - ExtraAllocSize(allocator);
+  size_t bucket_index = SizeToIndex(big_size + ExtraAllocSize(allocator));
+  PartitionBucket* bucket = &allocator.root()->buckets[bucket_index];
+
+  void* ptr = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr);
+  auto* slot_span =
+      SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
+  EXPECT_EQ(1u, slot_span->num_allocated_slots);
+  // Lazy commit commits only needed pages.
+  size_t expected_committed_size =
+      kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
+  EXPECT_EQ(expected_committed_size,
+            allocator.root()->get_total_size_of_committed_pages());
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(0u, slot_span->num_allocated_slots);
+  EXPECT_TRUE(slot_span->in_empty_cache());
+  EXPECT_TRUE(slot_span->get_freelist_head());
+
+  CycleFreeCache(kTestAllocSize);
+
+  // Flushing the cache should have really freed the unused slot spans.
+  EXPECT_FALSE(slot_span->get_freelist_head());
+  EXPECT_FALSE(slot_span->in_empty_cache());
+  EXPECT_EQ(0u, slot_span->num_allocated_slots);
+  size_t num_system_pages_per_slot_span = allocator.root()
+                                              ->buckets[test_bucket_index_]
+                                              .num_system_pages_per_slot_span;
+  size_t expected_size =
+      kUseLazyCommit ? SystemPageSize()
+                     : num_system_pages_per_slot_span * SystemPageSize();
+  EXPECT_EQ(expected_size,
+            allocator.root()->get_total_size_of_committed_pages());
+
+  // Check that an allocation works ok whilst in this state (a free'd slot span
+  // as the active slot spans head).
+  ptr = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_FALSE(bucket->empty_slot_spans_head);
+  allocator.root()->Free(ptr);
+
+  // Also check that a slot span that is bouncing immediately between empty and
+  // used does not get freed.
+  for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
+    ptr = allocator.root()->Alloc(big_size, type_name);
+    EXPECT_TRUE(slot_span->get_freelist_head());
+    allocator.root()->Free(ptr);
+    EXPECT_TRUE(slot_span->get_freelist_head());
+  }
+  EXPECT_EQ(expected_committed_size,
+            allocator.root()->get_total_size_of_committed_pages());
+}
+
+// Tests for a bug we had with losing references to free slot spans.
+TEST_P(PartitionAllocTest, LostFreeSlotSpansBug) {
+  size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
+
+  void* ptr = allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr);
+  void* ptr2 = allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr2);
+
+  SlotSpanMetadata* slot_span =
+      SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+  SlotSpanMetadata* slot_span2 = SlotSpanMetadata::FromSlotStart(
+      allocator.root()->ObjectToSlotStart(ptr2));
+  PartitionBucket* bucket = slot_span->bucket;
+
+  EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
+  EXPECT_EQ(1u, slot_span->num_allocated_slots);
+  EXPECT_EQ(1u, slot_span2->num_allocated_slots);
+  EXPECT_TRUE(slot_span->is_full());
+  EXPECT_TRUE(slot_span2->is_full());
+  // The first span was kicked out from the active list, but the second one
+  // wasn't.
+  EXPECT_TRUE(slot_span->marked_full);
+  EXPECT_FALSE(slot_span2->marked_full);
+
+  allocator.root()->Free(ptr);
+  allocator.root()->Free(ptr2);
+
+  EXPECT_TRUE(bucket->empty_slot_spans_head);
+  EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
+  EXPECT_EQ(0u, slot_span->num_allocated_slots);
+  EXPECT_EQ(0u, slot_span2->num_allocated_slots);
+  EXPECT_FALSE(slot_span->is_full());
+  EXPECT_FALSE(slot_span->is_full());
+  EXPECT_FALSE(slot_span->marked_full);
+  EXPECT_FALSE(slot_span2->marked_full);
+  EXPECT_TRUE(slot_span->get_freelist_head());
+  EXPECT_TRUE(slot_span2->get_freelist_head());
+
+  CycleFreeCache(kTestAllocSize);
+
+  EXPECT_FALSE(slot_span->get_freelist_head());
+  EXPECT_FALSE(slot_span2->get_freelist_head());
+
+  EXPECT_TRUE(bucket->empty_slot_spans_head);
+  EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
+  EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
+            bucket->active_slot_spans_head);
+
+  // At this moment, we have two decommitted slot spans, on the empty list.
+  ptr = allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr);
+  allocator.root()->Free(ptr);
+
+  EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
+            bucket->active_slot_spans_head);
+  EXPECT_TRUE(bucket->empty_slot_spans_head);
+  EXPECT_TRUE(bucket->decommitted_slot_spans_head);
+
+  CycleFreeCache(kTestAllocSize);
+
+  // We're now set up to trigger a historical bug by scanning over the active
+  // slot spans list. The current code gets into a different state, but we'll
+  // keep the test as being an interesting corner case.
+  ptr = allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr);
+  allocator.root()->Free(ptr);
+
+  EXPECT_TRUE(bucket->is_valid());
+  EXPECT_TRUE(bucket->empty_slot_spans_head);
+  EXPECT_TRUE(bucket->decommitted_slot_spans_head);
+}
+
+#if defined(PA_HAS_DEATH_TESTS)
+
+// Unit tests that check if an allocation fails in "return null" mode,
+// repeating it doesn't crash, and still returns null. The tests need to
+// stress memory subsystem limits to do so, hence they try to allocate
+// 6 GB of memory, each with a different per-allocation block sizes.
+//
+// On 64-bit systems we need to restrict the address space to force allocation
+// failure, so these tests run only on POSIX systems that provide setrlimit(),
+// and use it to limit address space to 6GB.
+//
+// Disable these tests on Android because, due to the allocation-heavy behavior,
+// they tend to get OOM-killed rather than pass.
+//
+// Disable these test on Windows, since they run slower, so tend to timout and
+// cause flake.
+#if !BUILDFLAG(IS_WIN) &&                                      \
+        (!defined(ARCH_CPU_64_BITS) ||                         \
+         (BUILDFLAG(IS_POSIX) &&                               \
+          !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)))) || \
+    BUILDFLAG(IS_FUCHSIA)
+#define MAYBE_RepeatedAllocReturnNullDirect RepeatedAllocReturnNullDirect
+#define MAYBE_RepeatedReallocReturnNullDirect RepeatedReallocReturnNullDirect
+#else
+#define MAYBE_RepeatedAllocReturnNullDirect \
+  DISABLED_RepeatedAllocReturnNullDirect
+#define MAYBE_RepeatedReallocReturnNullDirect \
+  DISABLED_RepeatedReallocReturnNullDirect
+#endif
+
+// The following four tests wrap a called function in an expect death statement
+// to perform their test, because they are non-hermetic. Specifically they are
+// going to attempt to exhaust the allocatable memory, which leaves the
+// allocator in a bad global state.
+// Performing them as death tests causes them to be forked into their own
+// process, so they won't pollute other tests.
+//
+// These tests are *very* slow when BUILDFLAG(PA_DCHECK_IS_ON), because they
+// memset() many GiB of data (see crbug.com/1168168).
+// TODO(lizeb): make these tests faster.
+TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedAllocReturnNullDirect) {
+  // A direct-mapped allocation size.
+  size_t direct_map_size = 32 * 1024 * 1024;
+  ASSERT_GT(direct_map_size, kMaxBucketed);
+  EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionAlloc),
+               "Passed DoReturnNullTest");
+}
+
+// Repeating above test with Realloc
+TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedReallocReturnNullDirect) {
+  size_t direct_map_size = 32 * 1024 * 1024;
+  ASSERT_GT(direct_map_size, kMaxBucketed);
+  EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionRealloc),
+               "Passed DoReturnNullTest");
+}
+
+// TODO(crbug.com/1348221) re-enable the tests below, once the allocator
+// actually returns nullptr for non direct-mapped allocations.
+// When doing so, they will need to be made MAYBE_ like those above.
+//
+// Tests "return null" with a 512 kB block size.
+TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedAllocReturnNull) {
+  // A single-slot but non-direct-mapped allocation size.
+  size_t single_slot_size = 512 * 1024;
+  ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
+  ASSERT_LE(single_slot_size, kMaxBucketed);
+  EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionAlloc),
+               "Passed DoReturnNullTest");
+}
+
+// Repeating above test with Realloc.
+TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedReallocReturnNull) {
+  size_t single_slot_size = 512 * 1024;
+  ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
+  ASSERT_LE(single_slot_size, kMaxBucketed);
+  EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionRealloc),
+               "Passed DoReturnNullTest");
+}
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+// Check that Arm's memory tagging extension (MTE) is correctly protecting
+// freed pointers. Writes to a free pointer should result in a crash.
+TEST_P(PartitionAllocDeathTest, MTEProtectsFreedPtr) {
+  base::CPU cpu;
+  if (!cpu.has_mte()) {
+    // This test won't pass on systems without MTE.
+    GTEST_SKIP();
+  }
+
+  constexpr uint64_t kCookie = 0x1234567890ABCDEF;
+  constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;
+
+  // Make an arbitrary-sized small allocation.
+  size_t alloc_size = 64 - ExtraAllocSize(allocator);
+  uint64_t* ptr =
+      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
+  EXPECT_TRUE(ptr);
+
+  // Check that the allocation's writable.
+  *ptr = kCookie;
+
+  // Invalidate ptr by freeing it.
+  allocator.root()->Free(ptr);
+
+  // Writing to ptr after free() should crash
+  EXPECT_EXIT(
+      {
+        // Should be in synchronous MTE mode for running this test.
+        *ptr = kQuarantined;
+      },
+      testing::KilledBySignal(SIGSEGV), "");
+}
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+// Make sure that malloc(-1) dies.
+// In the past, we had an integer overflow that would alias malloc(-1) to
+// malloc(0), which is not good.
+TEST_P(PartitionAllocDeathTest, LargeAllocs) {
+  // Largest alloc.
+  EXPECT_DEATH(allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
+  // And the smallest allocation we expect to die.
+  // TODO(bartekn): Separate into its own test, as it wouldn't run (same below).
+  EXPECT_DEATH(allocator.root()->Alloc(MaxDirectMapped() + 1, type_name), "");
+}
+
+// These tests don't work deterministically when BRP is enabled on certain
+// architectures. On Free(), BRP's ref-count gets overwritten by an encoded
+// freelist pointer. On little-endian 64-bit architectures, this happens to be
+// always an even number, which will triggers BRP's own CHECK (sic!). On other
+// architectures, it's likely to be an odd number >1, which will fool BRP into
+// thinking the memory isn't freed and still referenced, thus making it
+// quarantine it and return early, before PA_CHECK(slot_start != freelist_head)
+// is reached.
+// TODO(bartekn): Enable in the BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) case.
+#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
+    (BUILDFLAG(HAS_64_BIT_POINTERS) && defined(ARCH_CPU_LITTLE_ENDIAN))
+
+// Check that our immediate double-free detection works.
+TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree) {
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  allocator.root()->Free(ptr);
+  EXPECT_DEATH(allocator.root()->Free(ptr), "");
+}
+
+// As above, but when this isn't the only slot in the span.
+TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree2ndSlot) {
+  void* ptr0 = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr0);
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  allocator.root()->Free(ptr);
+  EXPECT_DEATH(allocator.root()->Free(ptr), "");
+  allocator.root()->Free(ptr0);
+}
+
+// Check that our double-free detection based on |num_allocated_slots| not going
+// below 0 works.
+//
+// Unlike in ImmediateDoubleFree test, we can't have a 2ndSlot version, as this
+// protection wouldn't work when there is another slot present in the span. It
+// will prevent |num_allocated_slots| from going below 0.
+TEST_P(PartitionAllocDeathTest, NumAllocatedSlotsDoubleFree) {
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr2);
+  allocator.root()->Free(ptr);
+  allocator.root()->Free(ptr2);
+  // This is not an immediate double-free so our immediate detection won't
+  // fire. However, it does take |num_allocated_slots| to -1, which is illegal
+  // and should be trapped.
+  EXPECT_DEATH(allocator.root()->Free(ptr), "");
+}
+
+#endif  // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
+        // (BUILDFLAG(HAS_64_BIT_POINTERS) && defined(ARCH_CPU_LITTLE_ENDIAN))
+
+// Check that guard pages are present where expected.
+TEST_P(PartitionAllocDeathTest, DirectMapGuardPages) {
+  const size_t kSizes[] = {
+      kMaxBucketed + ExtraAllocSize(allocator) + 1,
+      kMaxBucketed + SystemPageSize(), kMaxBucketed + PartitionPageSize(),
+      partition_alloc::internal::base::bits::AlignUp(
+          kMaxBucketed + kSuperPageSize, kSuperPageSize) -
+          PartitionRoot::GetDirectMapMetadataAndGuardPagesSize()};
+  for (size_t size : kSizes) {
+    ASSERT_GT(size, kMaxBucketed);
+    size -= ExtraAllocSize(allocator);
+    EXPECT_GT(size, kMaxBucketed)
+        << "allocation not large enough for direct allocation";
+    void* ptr = allocator.root()->Alloc(size, type_name);
+
+    EXPECT_TRUE(ptr);
+    char* char_ptr = static_cast<char*>(ptr) - kPointerOffset;
+
+    EXPECT_DEATH(*(char_ptr - 1) = 'A', "");
+    EXPECT_DEATH(*(char_ptr + partition_alloc::internal::base::bits::AlignUp(
+                                  size, SystemPageSize())) = 'A',
+                 "");
+
+    allocator.root()->Free(ptr);
+  }
+}
+
+// These tests rely on precise layout. They handle cookie, not ref-count.
+#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
+    PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+
+TEST_P(PartitionAllocDeathTest, UseAfterFreeDetection) {
+  base::CPU cpu;
+  void* data = allocator.root()->Alloc(100);
+  allocator.root()->Free(data);
+
+  // use after free, not crashing here, but the next allocation should crash,
+  // since we corrupted the freelist.
+  memset(data, 0x42, 100);
+  EXPECT_DEATH(allocator.root()->Alloc(100), "");
+}
+
+TEST_P(PartitionAllocDeathTest, FreelistCorruption) {
+  base::CPU cpu;
+  const size_t alloc_size = 2 * sizeof(void*);
+  void** fake_freelist_entry =
+      static_cast<void**>(allocator.root()->Alloc(alloc_size));
+  fake_freelist_entry[0] = nullptr;
+  fake_freelist_entry[1] = nullptr;
+
+  void** uaf_data = static_cast<void**>(allocator.root()->Alloc(alloc_size));
+  allocator.root()->Free(uaf_data);
+  // Try to confuse the allocator. This is still easy to circumvent willingly,
+  // "just" need to set uaf_data[1] to ~uaf_data[0].
+  void* previous_uaf_data = uaf_data[0];
+  uaf_data[0] = fake_freelist_entry;
+  EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");
+
+  // Restore the freelist entry value, otherwise freelist corruption is detected
+  // in TearDown(), crashing this process.
+  uaf_data[0] = previous_uaf_data;
+
+  allocator.root()->Free(fake_freelist_entry);
+}
+
+// With BUILDFLAG(PA_DCHECK_IS_ON), cookie already handles off-by-one detection.
+#if !BUILDFLAG(PA_DCHECK_IS_ON)
+TEST_P(PartitionAllocDeathTest, OffByOneDetection) {
+  base::CPU cpu;
+  const size_t alloc_size = 2 * sizeof(void*);
+  char* array = static_cast<char*>(allocator.root()->Alloc(alloc_size));
+  if (cpu.has_mte()) {
+    EXPECT_DEATH(array[alloc_size] = 'A', "");
+  } else {
+    char previous_value = array[alloc_size];
+    // volatile is required to prevent the compiler from getting too clever and
+    // eliding the out-of-bounds write. The root cause is that the PA_MALLOC_FN
+    // annotation tells the compiler (among other things) that the returned
+    // value cannot alias anything.
+    *const_cast<volatile char*>(&array[alloc_size]) = 'A';
+    // Crash at the next allocation. This assumes that we are touching a new,
+    // non-randomized slot span, where the next slot to be handed over to the
+    // application directly follows the current one.
+    EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");
+
+    // Restore integrity, otherwise the process will crash in TearDown().
+    array[alloc_size] = previous_value;
+  }
+}
+
+TEST_P(PartitionAllocDeathTest, OffByOneDetectionWithRealisticData) {
+  base::CPU cpu;
+  const size_t alloc_size = 2 * sizeof(void*);
+  void** array = static_cast<void**>(allocator.root()->Alloc(alloc_size));
+  char valid;
+  if (cpu.has_mte()) {
+    EXPECT_DEATH(array[2] = &valid, "");
+  } else {
+    void* previous_value = array[2];
+    // As above, needs volatile to convince the compiler to perform the write.
+    *const_cast<void* volatile*>(&array[2]) = &valid;
+    // Crash at the next allocation. This assumes that we are touching a new,
+    // non-randomized slot span, where the next slot to be handed over to the
+    // application directly follows the current one.
+    EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");
+    array[2] = previous_value;
+  }
+}
+#endif  // !BUILDFLAG(PA_DCHECK_IS_ON)
+
+#endif  // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
+        // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
+
+#endif  // !defined(PA_HAS_DEATH_TESTS)
+
+// Tests that |PartitionDumpStats| and |PartitionDumpStats| run without
+// crashing and return non-zero values when memory is allocated.
+TEST_P(PartitionAllocTest, DumpMemoryStats) {
+  {
+    void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+    MockPartitionStatsDumper mock_stats_dumper;
+    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                &mock_stats_dumper);
+    EXPECT_TRUE(mock_stats_dumper.IsMemoryAllocationRecorded());
+    allocator.root()->Free(ptr);
+  }
+
+  // This series of tests checks the active -> empty -> decommitted states.
+  {
+    {
+      void* ptr =
+          allocator.root()->Alloc(2048 - ExtraAllocSize(allocator), type_name);
+      MockPartitionStatsDumper dumper;
+      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                  &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(2048u, stats->bucket_slot_size);
+      EXPECT_EQ(2048u, stats->active_bytes);
+      EXPECT_EQ(1u, stats->active_count);
+      EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(0u, stats->num_full_slot_spans);
+      EXPECT_EQ(1u, stats->num_active_slot_spans);
+      EXPECT_EQ(0u, stats->num_empty_slot_spans);
+      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
+      allocator.root()->Free(ptr);
+    }
+
+    {
+      MockPartitionStatsDumper dumper;
+      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                  &dumper);
+      EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(2048u, stats->bucket_slot_size);
+      EXPECT_EQ(0u, stats->active_bytes);
+      EXPECT_EQ(0u, stats->active_count);
+      EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
+      EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(0u, stats->num_full_slot_spans);
+      EXPECT_EQ(0u, stats->num_active_slot_spans);
+      EXPECT_EQ(1u, stats->num_empty_slot_spans);
+      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
+    }
+
+    // TODO(crbug.com/722911): Commenting this out causes this test to fail when
+    // run singly (--gtest_filter=PartitionAllocTest.DumpMemoryStats), but not
+    // when run with the others (--gtest_filter=PartitionAllocTest.*).
+    CycleFreeCache(kTestAllocSize);
+
+    {
+      MockPartitionStatsDumper dumper;
+      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                  &dumper);
+      EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(2048u, stats->bucket_slot_size);
+      EXPECT_EQ(0u, stats->active_bytes);
+      EXPECT_EQ(0u, stats->active_count);
+      EXPECT_EQ(0u, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(0u, stats->num_full_slot_spans);
+      EXPECT_EQ(0u, stats->num_active_slot_spans);
+      EXPECT_EQ(0u, stats->num_empty_slot_spans);
+      EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
+    }
+  }
+
+  // This test checks for correct empty slot span list accounting.
+  {
+    size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
+    void* ptr1 = allocator.root()->Alloc(size, type_name);
+    void* ptr2 = allocator.root()->Alloc(size, type_name);
+    allocator.root()->Free(ptr1);
+    allocator.root()->Free(ptr2);
+
+    CycleFreeCache(kTestAllocSize);
+
+    ptr1 = allocator.root()->Alloc(size, type_name);
+
+    {
+      MockPartitionStatsDumper dumper;
+      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                  &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(PartitionPageSize());
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(PartitionPageSize(), stats->bucket_slot_size);
+      EXPECT_EQ(PartitionPageSize(), stats->active_bytes);
+      EXPECT_EQ(1u, stats->active_count);
+      EXPECT_EQ(PartitionPageSize(), stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(1u, stats->num_full_slot_spans);
+      EXPECT_EQ(0u, stats->num_active_slot_spans);
+      EXPECT_EQ(0u, stats->num_empty_slot_spans);
+      EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
+    }
+    allocator.root()->Free(ptr1);
+  }
+
+  // This test checks for correct direct mapped accounting.
+  {
+    size_t size_smaller = kMaxBucketed + 1;
+    size_t size_bigger = (kMaxBucketed * 2) + 1;
+    size_t real_size_smaller =
+        (size_smaller + SystemPageOffsetMask()) & SystemPageBaseMask();
+    size_t real_size_bigger =
+        (size_bigger + SystemPageOffsetMask()) & SystemPageBaseMask();
+    void* ptr = allocator.root()->Alloc(size_smaller, type_name);
+    void* ptr2 = allocator.root()->Alloc(size_bigger, type_name);
+
+    {
+      MockPartitionStatsDumper dumper;
+      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                  &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(real_size_smaller);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_TRUE(stats->is_direct_map);
+      EXPECT_EQ(real_size_smaller, stats->bucket_slot_size);
+      EXPECT_EQ(real_size_smaller, stats->active_bytes);
+      EXPECT_EQ(1u, stats->active_count);
+      EXPECT_EQ(real_size_smaller, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(1u, stats->num_full_slot_spans);
+      EXPECT_EQ(0u, stats->num_active_slot_spans);
+      EXPECT_EQ(0u, stats->num_empty_slot_spans);
+      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
+
+      stats = dumper.GetBucketStats(real_size_bigger);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_TRUE(stats->is_direct_map);
+      EXPECT_EQ(real_size_bigger, stats->bucket_slot_size);
+      EXPECT_EQ(real_size_bigger, stats->active_bytes);
+      EXPECT_EQ(1u, stats->active_count);
+      EXPECT_EQ(real_size_bigger, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(1u, stats->num_full_slot_spans);
+      EXPECT_EQ(0u, stats->num_active_slot_spans);
+      EXPECT_EQ(0u, stats->num_empty_slot_spans);
+      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
+    }
+
+    allocator.root()->Free(ptr2);
+    allocator.root()->Free(ptr);
+
+    // Whilst we're here, allocate again and free with different ordering to
+    // give a workout to our linked list code.
+    ptr = allocator.root()->Alloc(size_smaller, type_name);
+    ptr2 = allocator.root()->Alloc(size_bigger, type_name);
+    allocator.root()->Free(ptr);
+    allocator.root()->Free(ptr2);
+  }
+
+  // This test checks large-but-not-quite-direct allocations.
+  {
+    const size_t requested_size = 16 * SystemPageSize();
+    void* ptr = allocator.root()->Alloc(requested_size + 1, type_name);
+
+    {
+      MockPartitionStatsDumper dumper;
+      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                  &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      size_t slot_size = SizeToBucketSize(requested_size + 1);
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(slot_size);
+      ASSERT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_FALSE(stats->is_direct_map);
+      EXPECT_EQ(slot_size, stats->bucket_slot_size);
+      EXPECT_EQ(requested_size + 1 + ExtraAllocSize(allocator),
+                stats->active_bytes);
+      EXPECT_EQ(1u, stats->active_count);
+      EXPECT_EQ(slot_size, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ((slot_size - (requested_size + 1)) / SystemPageSize() *
+                    SystemPageSize(),
+                stats->discardable_bytes);
+      EXPECT_EQ(1u, stats->num_full_slot_spans);
+      EXPECT_EQ(0u, stats->num_active_slot_spans);
+      EXPECT_EQ(0u, stats->num_empty_slot_spans);
+      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
+    }
+
+    allocator.root()->Free(ptr);
+
+    {
+      MockPartitionStatsDumper dumper;
+      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                  &dumper);
+      EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+      size_t slot_size = SizeToBucketSize(requested_size + 1);
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(slot_size);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_FALSE(stats->is_direct_map);
+      EXPECT_EQ(slot_size, stats->bucket_slot_size);
+      EXPECT_EQ(0u, stats->active_bytes);
+      EXPECT_EQ(0u, stats->active_count);
+      EXPECT_EQ(slot_size, stats->resident_bytes);
+      EXPECT_EQ(slot_size, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->num_full_slot_spans);
+      EXPECT_EQ(0u, stats->num_active_slot_spans);
+      EXPECT_EQ(1u, stats->num_empty_slot_spans);
+      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
+    }
+
+    void* ptr2 = allocator.root()->Alloc(requested_size + SystemPageSize() + 1,
+                                         type_name);
+    EXPECT_EQ(ptr, ptr2);
+
+    {
+      MockPartitionStatsDumper dumper;
+      allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                  &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      size_t slot_size =
+          SizeToBucketSize(requested_size + SystemPageSize() + 1);
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(slot_size);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_FALSE(stats->is_direct_map);
+      EXPECT_EQ(slot_size, stats->bucket_slot_size);
+      EXPECT_EQ(
+          requested_size + SystemPageSize() + 1 + ExtraAllocSize(allocator),
+          stats->active_bytes);
+      EXPECT_EQ(1u, stats->active_count);
+      EXPECT_EQ(slot_size, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ((slot_size - (requested_size + SystemPageSize() + 1)) /
+                    SystemPageSize() * SystemPageSize(),
+                stats->discardable_bytes);
+      EXPECT_EQ(1u, stats->num_full_slot_spans);
+      EXPECT_EQ(0u, stats->num_active_slot_spans);
+      EXPECT_EQ(0u, stats->num_empty_slot_spans);
+      EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
+    }
+
+    allocator.root()->Free(ptr2);
+  }
+}
+
+// Tests the API to purge freeable memory.
+TEST_P(PartitionAllocTest, Purge) {
+  char* ptr = static_cast<char*>(
+      allocator.root()->Alloc(2048 - ExtraAllocSize(allocator), type_name));
+  allocator.root()->Free(ptr);
+  {
+    MockPartitionStatsDumper dumper;
+    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                &dumper);
+    EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+    const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+    EXPECT_TRUE(stats);
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
+    EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
+  }
+  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
+  {
+    MockPartitionStatsDumper dumper;
+    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                &dumper);
+    EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+    const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+    EXPECT_TRUE(stats);
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(0u, stats->decommittable_bytes);
+    EXPECT_EQ(0u, stats->resident_bytes);
+  }
+  // Calling purge again here is a good way of testing we didn't mess up the
+  // state of the free cache ring.
+  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
+
+  // A single-slot but non-direct-mapped allocation size.
+  size_t single_slot_size = 512 * 1024;
+  ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
+  ASSERT_LE(single_slot_size, kMaxBucketed);
+  char* big_ptr =
+      static_cast<char*>(allocator.root()->Alloc(single_slot_size, type_name));
+  allocator.root()->Free(big_ptr);
+  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
+
+  CHECK_PAGE_IN_CORE(ptr - kPointerOffset, false);
+  CHECK_PAGE_IN_CORE(big_ptr - kPointerOffset, false);
+}
+
+// Tests that we prefer to allocate into a non-empty partition page over an
+// empty one. This is an important aspect of minimizing memory usage for some
+// allocation sizes, particularly larger ones.
+TEST_P(PartitionAllocTest, PreferActiveOverEmpty) {
+  size_t size = (SystemPageSize() * 2) - ExtraAllocSize(allocator);
+  // Allocate 3 full slot spans worth of 8192-byte allocations.
+  // Each slot span for this size is 16384 bytes, or 1 partition page and 2
+  // slots.
+  void* ptr1 = allocator.root()->Alloc(size, type_name);
+  void* ptr2 = allocator.root()->Alloc(size, type_name);
+  void* ptr3 = allocator.root()->Alloc(size, type_name);
+  void* ptr4 = allocator.root()->Alloc(size, type_name);
+  void* ptr5 = allocator.root()->Alloc(size, type_name);
+  void* ptr6 = allocator.root()->Alloc(size, type_name);
+
+  SlotSpanMetadata* slot_span1 = SlotSpanMetadata::FromSlotStart(
+      allocator.root()->ObjectToSlotStart(ptr1));
+  SlotSpanMetadata* slot_span2 = SlotSpanMetadata::FromSlotStart(
+      allocator.root()->ObjectToSlotStart(ptr3));
+  SlotSpanMetadata* slot_span3 = SlotSpanMetadata::FromSlotStart(
+      allocator.root()->ObjectToSlotStart(ptr6));
+  EXPECT_NE(slot_span1, slot_span2);
+  EXPECT_NE(slot_span2, slot_span3);
+  PartitionBucket* bucket = slot_span1->bucket;
+  EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
+
+  // Free up the 2nd slot in each slot span.
+  // This leaves the active list containing 3 slot spans, each with 1 used and 1
+  // free slot. The active slot span will be the one containing ptr1.
+  allocator.root()->Free(ptr6);
+  allocator.root()->Free(ptr4);
+  allocator.root()->Free(ptr2);
+  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
+
+  // Empty the middle slot span in the active list.
+  allocator.root()->Free(ptr3);
+  EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
+
+  // Empty the first slot span in the active list -- also the current slot span.
+  allocator.root()->Free(ptr1);
+
+  // A good choice here is to re-fill the third slot span since the first two
+  // are empty. We used to fail that.
+  void* ptr7 = allocator.root()->Alloc(size, type_name);
+  PA_EXPECT_PTR_EQ(ptr6, ptr7);
+  EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
+
+  allocator.root()->Free(ptr5);
+  allocator.root()->Free(ptr7);
+}
+
+// Tests the API to purge discardable memory.
+TEST_P(PartitionAllocTest, PurgeDiscardableSecondPage) {
+  // Free the second of two 4096 byte allocations and then purge.
+  void* ptr1 = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  char* ptr2 = static_cast<char*>(allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name));
+  allocator.root()->Free(ptr2);
+  SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
+      allocator.root()->ObjectToSlotStart(ptr1));
+  EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
+  {
+    MockPartitionStatsDumper dumper;
+    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                &dumper);
+    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+    const PartitionBucketMemoryStats* stats =
+        dumper.GetBucketStats(SystemPageSize());
+    EXPECT_TRUE(stats);
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(0u, stats->decommittable_bytes);
+    EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
+    EXPECT_EQ(SystemPageSize(), stats->active_bytes);
+    EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
+  }
+  CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
+  EXPECT_EQ(3u, slot_span->num_unprovisioned_slots);
+
+  allocator.root()->Free(ptr1);
+}
+
+TEST_P(PartitionAllocTest, PurgeDiscardableFirstPage) {
+  // Free the first of two 4096 byte allocations and then purge.
+  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name));
+  void* ptr2 = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  allocator.root()->Free(ptr1);
+  {
+    MockPartitionStatsDumper dumper;
+    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                &dumper);
+    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+    const PartitionBucketMemoryStats* stats =
+        dumper.GetBucketStats(SystemPageSize());
+    EXPECT_TRUE(stats);
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(0u, stats->decommittable_bytes);
+#if BUILDFLAG(IS_WIN)
+    EXPECT_EQ(0u, stats->discardable_bytes);
+#else
+    EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
+#endif
+    EXPECT_EQ(SystemPageSize(), stats->active_bytes);
+    EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
+  }
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
+
+  allocator.root()->Free(ptr2);
+}
+
+TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAlloc) {
+  const size_t requested_size = 2.5 * SystemPageSize();
+  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name));
+  void* ptr2 = allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name);
+  void* ptr3 = allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name);
+  void* ptr4 = allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name);
+  memset(ptr1, 'A', requested_size - ExtraAllocSize(allocator));
+  memset(ptr2, 'A', requested_size - ExtraAllocSize(allocator));
+  allocator.root()->Free(ptr1);
+  allocator.root()->Free(ptr2);
+  {
+    MockPartitionStatsDumper dumper;
+    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                &dumper);
+    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+    const PartitionBucketMemoryStats* stats =
+        dumper.GetBucketStats(requested_size);
+    EXPECT_TRUE(stats);
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(0u, stats->decommittable_bytes);
+#if BUILDFLAG(IS_WIN)
+    EXPECT_EQ(3 * SystemPageSize(), stats->discardable_bytes);
+#else
+    EXPECT_EQ(4 * SystemPageSize(), stats->discardable_bytes);
+#endif
+    EXPECT_EQ(requested_size * 2, stats->active_bytes);
+    EXPECT_EQ(10 * SystemPageSize(), stats->resident_bytes);
+  }
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  // Except for Windows, the first page is discardable because the freelist
+  // pointer on this page is nullptr. Note that CHECK_PAGE_IN_CORE only executes
+  // checks for Linux and ChromeOS, not for Windows.
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), false);
+
+  allocator.root()->Free(ptr3);
+  allocator.root()->Free(ptr4);
+}
+
+TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAllocOnSlotBoundary) {
+  const size_t requested_size = 2.5 * SystemPageSize();
+  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name));
+  void* ptr2 = allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name);
+  void* ptr3 = allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name);
+  void* ptr4 = allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name);
+  memset(ptr1, 'A', requested_size - ExtraAllocSize(allocator));
+  memset(ptr2, 'A', requested_size - ExtraAllocSize(allocator));
+  allocator.root()->Free(ptr2);
+  allocator.root()->Free(ptr1);
+  {
+    MockPartitionStatsDumper dumper;
+    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                &dumper);
+    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+    const PartitionBucketMemoryStats* stats =
+        dumper.GetBucketStats(requested_size);
+    EXPECT_TRUE(stats);
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(0u, stats->decommittable_bytes);
+#if BUILDFLAG(IS_WIN)
+    EXPECT_EQ(3 * SystemPageSize(), stats->discardable_bytes);
+#else
+    EXPECT_EQ(4 * SystemPageSize(), stats->discardable_bytes);
+#endif
+    EXPECT_EQ(requested_size * 2, stats->active_bytes);
+    EXPECT_EQ(10 * SystemPageSize(), stats->resident_bytes);
+  }
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
+  // Except for Windows, the third page is discardable because the freelist
+  // pointer on this page is nullptr. Note that CHECK_PAGE_IN_CORE only executes
+  // checks for Linux and ChromeOS, not for Windows.
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), false);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), false);
+
+  allocator.root()->Free(ptr3);
+  allocator.root()->Free(ptr4);
+}
+
+TEST_P(PartitionAllocTest, PurgeDiscardableManyPages) {
+  // On systems with large pages, use less pages because:
+  // 1) There must be a bucket for kFirstAllocPages * SystemPageSize(), and
+  // 2) On low-end systems, using too many large pages can OOM during the test
+  const bool kHasLargePages = SystemPageSize() > 4096;
+  const size_t kFirstAllocPages = kHasLargePages ? 32 : 64;
+  const size_t kSecondAllocPages = kHasLargePages ? 31 : 61;
+
+  // Detect case (1) from above.
+  PA_DCHECK(kFirstAllocPages * SystemPageSize() < (1UL << kMaxBucketedOrder));
+
+  const size_t kDeltaPages = kFirstAllocPages - kSecondAllocPages;
+
+  {
+    ScopedPageAllocation p(allocator, kFirstAllocPages);
+    p.TouchAllPages();
+  }
+
+  ScopedPageAllocation p(allocator, kSecondAllocPages);
+
+  MockPartitionStatsDumper dumper;
+  allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                              &dumper);
+  EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+  const PartitionBucketMemoryStats* stats =
+      dumper.GetBucketStats(kFirstAllocPages * SystemPageSize());
+  EXPECT_TRUE(stats);
+  EXPECT_TRUE(stats->is_valid);
+  EXPECT_EQ(0u, stats->decommittable_bytes);
+  EXPECT_EQ(kDeltaPages * SystemPageSize(), stats->discardable_bytes);
+  EXPECT_EQ(kSecondAllocPages * SystemPageSize(), stats->active_bytes);
+  EXPECT_EQ(kFirstAllocPages * SystemPageSize(), stats->resident_bytes);
+
+  for (size_t i = 0; i < kFirstAllocPages; i++) {
+    CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
+  }
+
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+
+  for (size_t i = 0; i < kSecondAllocPages; i++) {
+    CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
+  }
+  for (size_t i = kSecondAllocPages; i < kFirstAllocPages; i++) {
+    CHECK_PAGE_IN_CORE(p.PageAtIndex(i), false);
+  }
+}
+
+TEST_P(PartitionAllocTest, PurgeDiscardableWithFreeListStraightening) {
+  // This sub-test tests truncation of the provisioned slots in a trickier
+  // case where the freelist is rewritten.
+  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
+  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name));
+  void* ptr2 = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  void* ptr3 = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  void* ptr4 = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  ptr1[0] = 'A';
+  ptr1[SystemPageSize()] = 'A';
+  ptr1[SystemPageSize() * 2] = 'A';
+  ptr1[SystemPageSize() * 3] = 'A';
+  SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
+      allocator.root()->ObjectToSlotStart(ptr1));
+  allocator.root()->Free(ptr2);
+  allocator.root()->Free(ptr4);
+  allocator.root()->Free(ptr1);
+  EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
+
+  {
+    MockPartitionStatsDumper dumper;
+    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                &dumper);
+    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+    const PartitionBucketMemoryStats* stats =
+        dumper.GetBucketStats(SystemPageSize());
+    EXPECT_TRUE(stats);
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(0u, stats->decommittable_bytes);
+#if BUILDFLAG(IS_WIN)
+    EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
+#else
+    EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
+#endif
+    EXPECT_EQ(SystemPageSize(), stats->active_bytes);
+    EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
+  }
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  EXPECT_EQ(1u, slot_span->num_unprovisioned_slots);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
+
+  // Let's check we didn't brick the freelist.
+  void* ptr1b = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  PA_EXPECT_PTR_EQ(ptr1, ptr1b);
+  void* ptr2b = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  PA_EXPECT_PTR_EQ(ptr2, ptr2b);
+  EXPECT_FALSE(slot_span->get_freelist_head());  // ptr4 was unprovisioned
+  void* ptr4b = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  PA_EXPECT_PTR_EQ(ptr4, ptr4b);
+  EXPECT_FALSE(slot_span->get_freelist_head());
+
+  // Free objects such that they're in this order on the list:
+  //   head -> ptr2 -> ptr3 -> ptr1
+  // However, ptr4 is still unfreed preventing any unprovisioning.
+  allocator.root()->Free(ptr1);
+  allocator.root()->Free(ptr3);
+  allocator.root()->Free(ptr2);
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  // The test by default runs in
+  // StraightenLargerSlotSpanFreeListsMode::kOnlyWhenUnprovisioning mode, so the
+  // freelist wasn't modified, and the allocations will happen in LIFO order.
+  ptr2b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
+                                  type_name);
+  PA_EXPECT_PTR_EQ(ptr2, ptr2b);
+  void* ptr3b = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  PA_EXPECT_PTR_EQ(ptr3, ptr3b);
+  ptr1b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
+                                  type_name);
+  PA_EXPECT_PTR_EQ(ptr1, ptr1b);
+  EXPECT_FALSE(slot_span->get_freelist_head());
+
+  // Free objects such that they're in this order on the list:
+  //   head -> ptr2 -> ptr3 -> ptr1
+  // However, ptr4 is still unfreed preventing any unprovisioning.
+  allocator.root()->Free(ptr1);
+  allocator.root()->Free(ptr3);
+  allocator.root()->Free(ptr2);
+  PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
+      StraightenLargerSlotSpanFreeListsMode::kAlways);
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  // In StraightenLargerSlotSpanFreeListsMode::kAlways mode, the freelist is
+  // ordered from left to right.
+  ptr1b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
+                                  type_name);
+  PA_EXPECT_PTR_EQ(ptr1, ptr1b);
+  ptr2b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
+                                  type_name);
+  PA_EXPECT_PTR_EQ(ptr2, ptr2b);
+  ptr3b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
+                                  type_name);
+  PA_EXPECT_PTR_EQ(ptr3, ptr3b);
+  EXPECT_FALSE(slot_span->get_freelist_head());
+
+  // Free objects such that they're in this order on the list:
+  //   head -> ptr2 -> ptr4 -> ptr1
+  // ptr3 is still unfreed preventing unprovisioning of ptr1 and ptr2, but not
+  // ptr4.
+  allocator.root()->Free(ptr1);
+  allocator.root()->Free(ptr4);
+  allocator.root()->Free(ptr2);
+  PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
+      StraightenLargerSlotSpanFreeListsMode::kNever);
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  // In StraightenLargerSlotSpanFreeListsMode::kNever mode, unprovisioned
+  // entries willbe removed form the freelist but the list won't be reordered.
+  ptr2b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
+                                  type_name);
+  PA_EXPECT_PTR_EQ(ptr2, ptr2b);
+  ptr1b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
+                                  type_name);
+  PA_EXPECT_PTR_EQ(ptr1, ptr1b);
+  EXPECT_FALSE(slot_span->get_freelist_head());
+  ptr4b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
+                                  type_name);
+  PA_EXPECT_PTR_EQ(ptr4, ptr4b);
+  EXPECT_FALSE(slot_span->get_freelist_head());
+
+  // Clean up.
+  allocator.root()->Free(ptr1);
+  allocator.root()->Free(ptr2);
+  allocator.root()->Free(ptr3);
+  allocator.root()->Free(ptr4);
+}
+
+TEST_P(PartitionAllocTest, PurgeDiscardableDoubleTruncateFreeList) {
+  // This sub-test is similar, but tests a double-truncation.
+  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
+  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name));
+  void* ptr2 = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  void* ptr3 = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  void* ptr4 = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  ptr1[0] = 'A';
+  ptr1[SystemPageSize()] = 'A';
+  ptr1[SystemPageSize() * 2] = 'A';
+  ptr1[SystemPageSize() * 3] = 'A';
+  SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
+      allocator.root()->ObjectToSlotStart(ptr1));
+  allocator.root()->Free(ptr4);
+  allocator.root()->Free(ptr3);
+  EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
+
+  {
+    MockPartitionStatsDumper dumper;
+    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                &dumper);
+    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+    const PartitionBucketMemoryStats* stats =
+        dumper.GetBucketStats(SystemPageSize());
+    EXPECT_TRUE(stats);
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(0u, stats->decommittable_bytes);
+    EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
+    EXPECT_EQ(2 * SystemPageSize(), stats->active_bytes);
+    EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
+  }
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), false);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
+
+  EXPECT_FALSE(slot_span->get_freelist_head());
+
+  allocator.root()->Free(ptr1);
+  allocator.root()->Free(ptr2);
+}
+
+TEST_P(PartitionAllocTest, PurgeDiscardableSmallSlotsWithTruncate) {
+  size_t requested_size = 0.5 * SystemPageSize();
+  char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name));
+  void* ptr2 = allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name);
+  void* ptr3 = allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name);
+  void* ptr4 = allocator.root()->Alloc(
+      requested_size - ExtraAllocSize(allocator), type_name);
+  allocator.root()->Free(ptr3);
+  allocator.root()->Free(ptr4);
+  SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
+      allocator.root()->ObjectToSlotStart(ptr1));
+  EXPECT_EQ(4u, slot_span->num_unprovisioned_slots);
+  {
+    MockPartitionStatsDumper dumper;
+    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                &dumper);
+    EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+    const PartitionBucketMemoryStats* stats =
+        dumper.GetBucketStats(requested_size);
+    EXPECT_TRUE(stats);
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(0u, stats->decommittable_bytes);
+    EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
+    EXPECT_EQ(requested_size * 2, stats->active_bytes);
+    EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
+  }
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+  CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
+  EXPECT_EQ(6u, slot_span->num_unprovisioned_slots);
+
+  allocator.root()->Free(ptr1);
+  allocator.root()->Free(ptr2);
+}
+
+TEST_P(PartitionAllocTest, ActiveListMaintenance) {
+  size_t size = SystemPageSize() - ExtraAllocSize(allocator);
+  size_t real_size = size + ExtraAllocSize(allocator);
+  size_t bucket_index =
+      allocator.root()->SizeToBucketIndex(real_size, GetBucketDistribution());
+  PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
+  ASSERT_EQ(bucket->slot_size, real_size);
+  size_t slots_per_span = bucket->num_system_pages_per_slot_span;
+
+  // Make 10 full slot spans.
+  constexpr int kSpans = 10;
+  std::vector<std::vector<void*>> allocated_memory_spans(kSpans);
+  for (int span_index = 0; span_index < kSpans; span_index++) {
+    for (size_t i = 0; i < slots_per_span; i++) {
+      allocated_memory_spans[span_index].push_back(
+          allocator.root()->Alloc(size));
+    }
+  }
+
+  // Free one entry in the middle span, creating a partial slot span.
+  constexpr size_t kSpanIndex = 5;
+  allocator.root()->Free(allocated_memory_spans[kSpanIndex].back());
+  allocated_memory_spans[kSpanIndex].pop_back();
+
+  // Empty the last slot span.
+  for (void* ptr : allocated_memory_spans[kSpans - 1]) {
+    allocator.root()->Free(ptr);
+  }
+  allocated_memory_spans.pop_back();
+
+  // The active list now is:
+  // Partial -> Empty -> Full -> Full -> ... -> Full
+  bucket->MaintainActiveList();
+
+  // Only one entry in the active list.
+  ASSERT_NE(bucket->active_slot_spans_head,
+            SlotSpanMetadata::get_sentinel_slot_span());
+  EXPECT_FALSE(bucket->active_slot_spans_head->next_slot_span);
+
+  // The empty list has 1 entry.
+  ASSERT_NE(bucket->empty_slot_spans_head,
+            SlotSpanMetadata::get_sentinel_slot_span());
+  EXPECT_FALSE(bucket->empty_slot_spans_head->next_slot_span);
+
+  // The rest are full slot spans.
+  EXPECT_EQ(8u, bucket->num_full_slot_spans);
+
+  // Free all memory.
+  for (const auto& span : allocated_memory_spans) {
+    for (void* ptr : span) {
+      allocator.root()->Free(ptr);
+    }
+  }
+}
+
+TEST_P(PartitionAllocTest, ReallocMovesCookie) {
+  // Resize so as to be sure to hit a "resize in place" case, and ensure that
+  // use of the entire result is compatible with the debug mode's cookie, even
+  // when the bucket size is large enough to span more than one partition page
+  // and we can track the "raw" size. See https://crbug.com/709271
+  static const size_t kSize = MaxRegularSlotSpanSize();
+  void* ptr = allocator.root()->Alloc(kSize + 1, type_name);
+  EXPECT_TRUE(ptr);
+
+  memset(ptr, 0xbd, kSize + 1);
+  ptr = allocator.root()->Realloc(ptr, kSize + 2, type_name);
+  EXPECT_TRUE(ptr);
+
+  memset(ptr, 0xbd, kSize + 2);
+  allocator.root()->Free(ptr);
+}
+
+TEST_P(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
+  // For crbug.com/781473
+  static constexpr size_t kSize = 264;
+  void* ptr = allocator.root()->Alloc(kSize, type_name);
+  EXPECT_TRUE(ptr);
+
+  ptr = allocator.root()->Realloc(ptr, kSize + 16, type_name);
+  EXPECT_TRUE(ptr);
+
+  allocator.root()->Free(ptr);
+}
+
+TEST_P(PartitionAllocTest, ZeroFill) {
+  static constexpr size_t kAllZerosSentinel =
+      std::numeric_limits<size_t>::max();
+  for (size_t size : kTestSizes) {
+    char* p = static_cast<char*>(
+        allocator.root()->Alloc<AllocFlags::kZeroFill>(size));
+    size_t non_zero_position = kAllZerosSentinel;
+    for (size_t i = 0; i < size; ++i) {
+      if (0 != p[i]) {
+        non_zero_position = i;
+        break;
+      }
+    }
+    EXPECT_EQ(kAllZerosSentinel, non_zero_position)
+        << "test allocation size: " << size;
+    allocator.root()->Free(p);
+  }
+
+  for (int i = 0; i < 10; ++i) {
+    SCOPED_TRACE(i);
+    AllocateRandomly<AllocFlags::kZeroFill>(allocator.root(), 250);
+  }
+}
+
+TEST_P(PartitionAllocTest, SchedulerLoopQuarantine) {
+  SchedulerLoopQuarantine& list =
+      allocator.root()->GetSchedulerLoopQuarantineForTesting();
+
+  constexpr size_t kCapacityInBytes = std::numeric_limits<size_t>::max();
+  size_t original_capacity_in_bytes = list.GetCapacityInBytes();
+  list.SetCapacityInBytesForTesting(kCapacityInBytes);
+
+  for (size_t size : kTestSizes) {
+    SCOPED_TRACE(size);
+
+    void* object = allocator.root()->Alloc(size);
+    allocator.root()->Free<FreeFlags::kSchedulerLoopQuarantine>(object);
+
+    ASSERT_TRUE(list.IsQuarantinedForTesting(object));
+  }
+
+  for (int i = 0; i < 10; ++i) {
+    SCOPED_TRACE(i);
+    AllocateRandomly<AllocFlags::kNone, FreeFlags::kSchedulerLoopQuarantine>(
+        allocator.root(), 250);
+  }
+
+  list.Purge();
+  list.SetCapacityInBytesForTesting(original_capacity_in_bytes);
+}
+
+TEST_P(PartitionAllocTest, Bug_897585) {
+  // Need sizes big enough to be direct mapped and a delta small enough to
+  // allow re-use of the slot span when cookied. These numbers fall out of the
+  // test case in the indicated bug.
+  size_t kInitialSize = 983050;
+  size_t kDesiredSize = 983100;
+  ASSERT_GT(kInitialSize, kMaxBucketed);
+  ASSERT_GT(kDesiredSize, kMaxBucketed);
+  void* ptr = allocator.root()->Alloc<AllocFlags::kReturnNull>(kInitialSize);
+  ASSERT_NE(nullptr, ptr);
+  ptr = allocator.root()->Realloc<AllocFlags::kReturnNull>(ptr, kDesiredSize,
+                                                           nullptr);
+  ASSERT_NE(nullptr, ptr);
+  memset(ptr, 0xbd, kDesiredSize);
+  allocator.root()->Free(ptr);
+}
+
+TEST_P(PartitionAllocTest, OverrideHooks) {
+  constexpr size_t kOverriddenSize = 1234;
+  constexpr const char* kOverriddenType = "Overridden type";
+  constexpr unsigned char kOverriddenChar = 'A';
+
+  // Marked static so that we can use them in non-capturing lambdas below.
+  // (Non-capturing lambdas convert directly to function pointers.)
+  static volatile bool free_called = false;
+  static void* overridden_allocation = nullptr;
+  overridden_allocation = malloc(kOverriddenSize);
+  memset(overridden_allocation, kOverriddenChar, kOverriddenSize);
+
+  PartitionAllocHooks::SetOverrideHooks(
+      [](void** out, AllocFlags flags, size_t size,
+         const char* type_name) -> bool {
+        if (size == kOverriddenSize && type_name == kOverriddenType) {
+          *out = overridden_allocation;
+          return true;
+        }
+        return false;
+      },
+      [](void* address) -> bool {
+        if (address == overridden_allocation) {
+          free_called = true;
+          return true;
+        }
+        return false;
+      },
+      [](size_t* out, void* address) -> bool {
+        if (address == overridden_allocation) {
+          *out = kOverriddenSize;
+          return true;
+        }
+        return false;
+      });
+
+  void* ptr = allocator.root()->Alloc<AllocFlags::kReturnNull>(kOverriddenSize,
+                                                               kOverriddenType);
+  ASSERT_EQ(ptr, overridden_allocation);
+
+  allocator.root()->Free(ptr);
+  EXPECT_TRUE(free_called);
+
+  // overridden_allocation has not actually been freed so we can now immediately
+  // realloc it.
+  free_called = false;
+  ptr = allocator.root()->Realloc<AllocFlags::kReturnNull>(ptr, 1, nullptr);
+  ASSERT_NE(ptr, nullptr);
+  EXPECT_NE(ptr, overridden_allocation);
+  EXPECT_TRUE(free_called);
+  EXPECT_EQ(*(char*)ptr, kOverriddenChar);
+  allocator.root()->Free(ptr);
+
+  PartitionAllocHooks::SetOverrideHooks(nullptr, nullptr, nullptr);
+  free(overridden_allocation);
+}
+
+TEST_P(PartitionAllocTest, Alignment) {
+  std::vector<void*> allocated_ptrs;
+
+  for (size_t size = 1; size <= PartitionPageSize(); size <<= 1) {
+    if (size <= ExtraAllocSize(allocator)) {
+      continue;
+    }
+    size_t requested_size = size - ExtraAllocSize(allocator);
+
+    // All allocations which are not direct-mapped occupy contiguous slots of a
+    // span, starting on a page boundary. This means that allocations are first
+    // rounded up to the nearest bucket size, then have an address of the form:
+    //   (partition-page-aligned address) + i * bucket_size.
+    //
+    // All powers of two are bucket sizes, meaning that all power of two
+    // allocations smaller than a page will be aligned on the allocation size.
+    size_t expected_alignment = size;
+    for (int index = 0; index < 3; index++) {
+      void* ptr = allocator.root()->Alloc(requested_size);
+      allocated_ptrs.push_back(ptr);
+      EXPECT_EQ(0u,
+                allocator.root()->ObjectToSlotStart(ptr) % expected_alignment)
+          << (index + 1) << "-th allocation of size=" << size;
+    }
+  }
+
+  for (void* ptr : allocated_ptrs) {
+    allocator.root()->Free(ptr);
+  }
+}
+
+TEST_P(PartitionAllocTest, FundamentalAlignment) {
+  // See the test above for details. Essentially, checking the bucket size is
+  // sufficient to ensure that alignment will always be respected, as long as
+  // the fundamental alignment is <= 16 bytes.
+  size_t fundamental_alignment = kAlignment;
+  for (size_t size = 0; size < SystemPageSize(); size++) {
+    // Allocate several pointers, as the first one in use in a size class will
+    // be aligned on a page boundary.
+    void* ptr = allocator.root()->Alloc(size);
+    void* ptr2 = allocator.root()->Alloc(size);
+    void* ptr3 = allocator.root()->Alloc(size);
+
+    EXPECT_EQ(UntagPtr(ptr) % fundamental_alignment, 0u);
+    EXPECT_EQ(UntagPtr(ptr2) % fundamental_alignment, 0u);
+    EXPECT_EQ(UntagPtr(ptr3) % fundamental_alignment, 0u);
+
+    uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+    // The capacity(C) is slot size - ExtraAllocSize(allocator).
+    // Since slot size is multiples of kAlignment,
+    // C % kAlignment == (slot_size - ExtraAllocSize(allocator)) % kAlignment.
+    // C % kAlignment == (-ExtraAllocSize(allocator)) % kAlignment.
+    // Since kCookieSize is a multiple of kAlignment,
+    // C % kAlignment == (-kInSlotRefCountBufferSize) % kAlignment
+    // == (kAlignment - kInSlotRefCountBufferSize) % kAlignment.
+    EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start) %
+                  fundamental_alignment,
+              UseBRPPool()
+                  ? (-ExtraAllocSize(allocator) % fundamental_alignment)
+                  : 0);
+#else
+    EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start) %
+                  fundamental_alignment,
+              -ExtraAllocSize(allocator) % fundamental_alignment);
+#endif
+
+    allocator.root()->Free(ptr);
+    allocator.root()->Free(ptr2);
+    allocator.root()->Free(ptr3);
+  }
+}
+
+void VerifyAlignment(PartitionRoot* root, size_t size, size_t alignment) {
+  std::vector<void*> allocated_ptrs;
+
+  for (int index = 0; index < 3; index++) {
+    void* ptr = root->AlignedAlloc(alignment, size);
+    ASSERT_TRUE(ptr);
+    allocated_ptrs.push_back(ptr);
+    EXPECT_EQ(0ull, UntagPtr(ptr) % alignment)
+        << (index + 1) << "-th allocation of size=" << size
+        << ", alignment=" << alignment;
+  }
+
+  for (void* ptr : allocated_ptrs) {
+    root->Free(ptr);
+  }
+}
+
+TEST_P(PartitionAllocTest, AlignedAllocations) {
+  size_t alloc_sizes[] = {1,
+                          10,
+                          100,
+                          1000,
+                          10000,
+                          60000,
+                          70000,
+                          130000,
+                          500000,
+                          900000,
+                          kMaxBucketed + 1,
+                          2 * kMaxBucketed,
+                          kSuperPageSize - 2 * PartitionPageSize(),
+                          4 * kMaxBucketed};
+  for (size_t alloc_size : alloc_sizes) {
+    for (size_t alignment = 1; alignment <= kMaxSupportedAlignment;
+         alignment <<= 1) {
+      VerifyAlignment(aligned_allocator.root(), alloc_size, alignment);
+
+      // Verify alignment on the regular allocator only when BRP is off, or when
+      // it's on in the "previous slot" mode. See the comment in SetUp().
+#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
+    BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+      VerifyAlignment(allocator.root(), alloc_size, alignment);
+#endif
+    }
+  }
+}
+
+// Test that the optimized `GetSlotNumber` implementation produces valid
+// results.
+TEST_P(PartitionAllocTest, OptimizedGetSlotNumber) {
+  for (size_t i = 0; i < kNumBuckets; ++i) {
+    auto& bucket = allocator.root()->buckets[i];
+    if (SizeToIndex(bucket.slot_size) != i) {
+      continue;
+    }
+    for (size_t slot = 0, offset = 0; slot < bucket.get_slots_per_span();
+         ++slot, offset += bucket.slot_size) {
+      EXPECT_EQ(slot, bucket.GetSlotNumber(offset));
+      EXPECT_EQ(slot, bucket.GetSlotNumber(offset + bucket.slot_size / 2));
+      EXPECT_EQ(slot, bucket.GetSlotNumber(offset + bucket.slot_size - 1));
+    }
+  }
+}
+
+TEST_P(PartitionAllocTest, GetUsableSizeNull) {
+  EXPECT_EQ(0ULL, PartitionRoot::GetUsableSize(nullptr));
+}
+
+TEST_P(PartitionAllocTest, GetUsableSize) {
+#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+  allocator.root()->EnableMac11MallocSizeHackForTesting(
+      GetParam().ref_count_size);
+#endif
+  size_t delta = 31;
+  for (size_t size = 1; size <= kMinDirectMappedDownsize; size += delta) {
+    void* ptr = allocator.root()->Alloc(size);
+    EXPECT_TRUE(ptr);
+    size_t usable_size = PartitionRoot::GetUsableSize(ptr);
+    size_t usable_size_with_hack =
+        PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(ptr);
+#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+    if (size != internal::kMac11MallocSizeHackRequestedSize)
+#endif
+      EXPECT_EQ(usable_size_with_hack, usable_size);
+    EXPECT_LE(size, usable_size);
+    memset(ptr, 0xDE, usable_size);
+    // Should not crash when free the ptr.
+    allocator.root()->Free(ptr);
+  }
+}
+
+#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+TEST_P(PartitionAllocTest, GetUsableSizeWithMac11MallocSizeHack) {
+  if (internal::base::mac::MacOSMajorVersion() != 11) {
+    GTEST_SKIP() << "Skipping because the test is for Mac11.";
+  }
+
+  allocator.root()->EnableMac11MallocSizeHackForTesting(
+      GetParam().ref_count_size);
+  size_t size = internal::kMac11MallocSizeHackRequestedSize;
+  void* ptr = allocator.root()->Alloc(size);
+  size_t usable_size = PartitionRoot::GetUsableSize(ptr);
+  size_t usable_size_with_hack =
+      PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(ptr);
+  EXPECT_EQ(usable_size,
+            allocator.root()->settings.mac11_malloc_size_hack_usable_size_);
+  EXPECT_EQ(usable_size_with_hack, size);
+
+  allocator.root()->Free(ptr);
+}
+#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+
+TEST_P(PartitionAllocTest, Bookkeeping) {
+  auto& root = *allocator.root();
+
+  EXPECT_EQ(0U, root.total_size_of_committed_pages);
+  EXPECT_EQ(0U, root.max_size_of_committed_pages);
+  EXPECT_EQ(0U, root.get_total_size_of_allocated_bytes());
+  EXPECT_EQ(0U, root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(0U, root.total_size_of_super_pages);
+  size_t small_size = 1000;
+
+  // A full slot span of size 1 partition page is committed.
+  void* ptr = root.Alloc(small_size - ExtraAllocSize(allocator), type_name);
+  // Lazy commit commits only needed pages.
+  size_t expected_committed_size =
+      kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
+  size_t expected_super_pages_size = kSuperPageSize;
+  size_t expected_max_committed_size = expected_committed_size;
+  size_t bucket_index = SizeToIndex(small_size - ExtraAllocSize(allocator));
+  PartitionBucket* bucket = &root.buckets[bucket_index];
+  size_t expected_total_allocated_size = bucket->slot_size;
+  size_t expected_max_allocated_size = expected_total_allocated_size;
+
+  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+  EXPECT_EQ(expected_total_allocated_size,
+            root.get_total_size_of_allocated_bytes());
+  EXPECT_EQ(expected_max_allocated_size,
+            root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+
+  // Freeing memory doesn't result in decommitting pages right away.
+  root.Free(ptr);
+  expected_total_allocated_size = 0U;
+  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+  EXPECT_EQ(expected_total_allocated_size,
+            root.get_total_size_of_allocated_bytes());
+  EXPECT_EQ(expected_max_allocated_size,
+            root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+
+  // Allocating the same size lands it in the same slot span.
+  ptr = root.Alloc(small_size - ExtraAllocSize(allocator), type_name);
+  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+  EXPECT_EQ(expected_max_allocated_size,
+            root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+
+  // Freeing memory doesn't result in decommitting pages right away.
+  root.Free(ptr);
+  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+  EXPECT_EQ(expected_max_allocated_size,
+            root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+
+  // Allocating another size commits another slot span.
+  ptr = root.Alloc(2 * small_size - ExtraAllocSize(allocator), type_name);
+  expected_committed_size +=
+      kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
+  expected_max_committed_size =
+      std::max(expected_max_committed_size, expected_committed_size);
+  expected_max_allocated_size =
+      std::max(expected_max_allocated_size, static_cast<size_t>(2048));
+  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+  EXPECT_EQ(expected_max_allocated_size,
+            root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+
+  // Freeing memory doesn't result in decommitting pages right away.
+  root.Free(ptr);
+  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+  EXPECT_EQ(expected_max_allocated_size,
+            root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+
+  // Single-slot slot spans...
+  //
+  // When the system page size is larger than 4KiB, we don't necessarily have
+  // enough space in the superpage to store two of the largest bucketed
+  // allocations, particularly when we reserve extra space for e.g. bitmaps.
+  // To avoid this, we use something just below kMaxBucketed.
+  size_t big_size = kMaxBucketed * 4 / 5 - SystemPageSize();
+
+  ASSERT_GT(big_size, MaxRegularSlotSpanSize());
+  ASSERT_LE(big_size, kMaxBucketed);
+  bucket_index = SizeToIndex(big_size - ExtraAllocSize(allocator));
+  bucket = &root.buckets[bucket_index];
+  // Assert the allocation doesn't fill the entire span nor entire partition
+  // page, to make the test more interesting.
+  ASSERT_LT(big_size, bucket->get_bytes_per_span());
+  ASSERT_NE(big_size % PartitionPageSize(), 0U);
+  ptr = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
+  expected_committed_size += bucket->get_bytes_per_span();
+  expected_max_committed_size =
+      std::max(expected_max_committed_size, expected_committed_size);
+  expected_total_allocated_size += bucket->get_bytes_per_span();
+  expected_max_allocated_size =
+      std::max(expected_max_allocated_size, expected_total_allocated_size);
+  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+  EXPECT_EQ(expected_total_allocated_size,
+            root.get_total_size_of_allocated_bytes());
+  EXPECT_EQ(expected_max_allocated_size,
+            root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+
+  // Allocating 2nd time doesn't overflow the super page...
+  void* ptr2 = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
+  expected_committed_size += bucket->get_bytes_per_span();
+  expected_max_committed_size =
+      std::max(expected_max_committed_size, expected_committed_size);
+  expected_total_allocated_size += bucket->get_bytes_per_span();
+  expected_max_allocated_size =
+      std::max(expected_max_allocated_size, expected_total_allocated_size);
+  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+  EXPECT_EQ(expected_total_allocated_size,
+            root.get_total_size_of_allocated_bytes());
+  EXPECT_EQ(expected_max_allocated_size,
+            root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+
+  // ... but 3rd time does.
+  void* ptr3 = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
+  expected_committed_size += bucket->get_bytes_per_span();
+  expected_max_committed_size =
+      std::max(expected_max_committed_size, expected_committed_size);
+  expected_total_allocated_size += bucket->get_bytes_per_span();
+  expected_max_allocated_size =
+      std::max(expected_max_allocated_size, expected_total_allocated_size);
+  expected_super_pages_size += kSuperPageSize;
+  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+  EXPECT_EQ(expected_total_allocated_size,
+            root.get_total_size_of_allocated_bytes());
+  EXPECT_EQ(expected_max_allocated_size,
+            root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+
+  // Freeing memory doesn't result in decommitting pages right away.
+  root.Free(ptr);
+  root.Free(ptr2);
+  root.Free(ptr3);
+  expected_total_allocated_size -= 3 * bucket->get_bytes_per_span();
+  expected_max_allocated_size =
+      std::max(expected_max_allocated_size, expected_total_allocated_size);
+  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+  EXPECT_EQ(expected_total_allocated_size,
+            root.get_total_size_of_allocated_bytes());
+  EXPECT_EQ(expected_max_allocated_size,
+            root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+
+  // Now everything should be decommitted. The reserved space for super pages
+  // stays the same and will never go away (by design).
+  root.PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
+  expected_committed_size = 0;
+  EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+  EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+  EXPECT_EQ(expected_total_allocated_size,
+            root.get_total_size_of_allocated_bytes());
+  EXPECT_EQ(expected_max_allocated_size,
+            root.get_max_size_of_allocated_bytes());
+  EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+
+  // None of the above should affect the direct map space.
+  EXPECT_EQ(0U, root.total_size_of_direct_mapped_pages);
+
+  size_t huge_sizes[] = {
+      kMaxBucketed + SystemPageSize(),
+      kMaxBucketed + SystemPageSize() + 123,
+      kSuperPageSize - PageAllocationGranularity(),
+      kSuperPageSize - SystemPageSize() - PartitionPageSize(),
+      kSuperPageSize - PartitionPageSize(),
+      kSuperPageSize - SystemPageSize(),
+      kSuperPageSize,
+      kSuperPageSize + SystemPageSize(),
+      kSuperPageSize + PartitionPageSize(),
+      kSuperPageSize + SystemPageSize() + PartitionPageSize(),
+      kSuperPageSize + PageAllocationGranularity(),
+      kSuperPageSize + DirectMapAllocationGranularity(),
+  };
+  size_t alignments[] = {
+      PartitionPageSize(),
+      2 * PartitionPageSize(),
+      kMaxSupportedAlignment / 2,
+      kMaxSupportedAlignment,
+  };
+  for (size_t huge_size : huge_sizes) {
+    ASSERT_GT(huge_size, kMaxBucketed);
+    for (size_t alignment : alignments) {
+      // For direct map, we commit only as many pages as needed.
+      size_t aligned_size = partition_alloc::internal::base::bits::AlignUp(
+          huge_size, SystemPageSize());
+      ptr = root.AllocInternalForTesting(huge_size - ExtraAllocSize(allocator),
+                                         alignment, type_name);
+      expected_committed_size += aligned_size;
+      expected_max_committed_size =
+          std::max(expected_max_committed_size, expected_committed_size);
+      expected_total_allocated_size += aligned_size;
+      expected_max_allocated_size =
+          std::max(expected_max_allocated_size, expected_total_allocated_size);
+      // The total reserved map includes metadata and guard pages at the ends.
+      // It also includes alignment. However, these would double count the first
+      // partition page, so it needs to be subtracted.
+      size_t surrounding_pages_size =
+          PartitionRoot::GetDirectMapMetadataAndGuardPagesSize() + alignment -
+          PartitionPageSize();
+      size_t expected_direct_map_size =
+          partition_alloc::internal::base::bits::AlignUp(
+              aligned_size + surrounding_pages_size,
+              DirectMapAllocationGranularity());
+      EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+      EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+      EXPECT_EQ(expected_total_allocated_size,
+                root.get_total_size_of_allocated_bytes());
+      EXPECT_EQ(expected_max_allocated_size,
+                root.get_max_size_of_allocated_bytes());
+      EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+      EXPECT_EQ(expected_direct_map_size,
+                root.total_size_of_direct_mapped_pages);
+
+      // Freeing memory in the diret map decommits pages right away. The address
+      // space is released for re-use too.
+      root.Free(ptr);
+      expected_committed_size -= aligned_size;
+      expected_direct_map_size = 0;
+      expected_max_committed_size =
+          std::max(expected_max_committed_size, expected_committed_size);
+      expected_total_allocated_size -= aligned_size;
+      expected_max_allocated_size =
+          std::max(expected_max_allocated_size, expected_total_allocated_size);
+      EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
+      EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
+      EXPECT_EQ(expected_total_allocated_size,
+                root.get_total_size_of_allocated_bytes());
+      EXPECT_EQ(expected_max_allocated_size,
+                root.get_max_size_of_allocated_bytes());
+      EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
+      EXPECT_EQ(expected_direct_map_size,
+                root.total_size_of_direct_mapped_pages);
+    }
+  }
+}
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+TEST_P(PartitionAllocTest, RefCountBasic) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  constexpr uint64_t kCookie = 0x1234567890ABCDEF;
+  constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;
+
+  size_t alloc_size = 64 - ExtraAllocSize(allocator);
+  uint64_t* ptr1 =
+      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
+  EXPECT_TRUE(ptr1);
+
+  *ptr1 = kCookie;
+
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr1));
+  EXPECT_TRUE(ref_count->IsAliveWithNoKnownRefs());
+
+  ref_count->Acquire();
+  EXPECT_FALSE(ref_count->Release());
+  EXPECT_TRUE(ref_count->IsAliveWithNoKnownRefs());
+  EXPECT_EQ(*ptr1, kCookie);
+
+  ref_count->AcquireFromUnprotectedPtr();
+  EXPECT_FALSE(ref_count->IsAliveWithNoKnownRefs());
+
+  allocator.root()->Free(ptr1);
+  // The allocation shouldn't be reclaimed, and its contents should be zapped.
+  // Retag ptr1 to get its correct MTE tag.
+  ptr1 = TagPtr(ptr1);
+  EXPECT_NE(*ptr1, kCookie);
+  EXPECT_EQ(*ptr1, kQuarantined);
+
+  // The allocator should not reuse the original slot since its reference count
+  // doesn't equal zero.
+  uint64_t* ptr2 =
+      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
+  EXPECT_NE(ptr1, ptr2);
+  allocator.root()->Free(ptr2);
+
+  // When the last reference is released, the slot should become reusable.
+  // Retag ref_count because PartitionAlloc retags ptr to enforce quarantine.
+  ref_count = TagPtr(ref_count);
+  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
+  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr1));
+  uint64_t* ptr3 =
+      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
+  EXPECT_EQ(ptr1, ptr3);
+  allocator.root()->Free(ptr3);
+}
+
+void PartitionAllocTest::RunRefCountReallocSubtest(size_t orig_size,
+                                                   size_t new_size) {
+  void* ptr1 = allocator.root()->Alloc(orig_size, type_name);
+  EXPECT_TRUE(ptr1);
+
+  auto* ref_count1 =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr1));
+  EXPECT_TRUE(ref_count1->IsAliveWithNoKnownRefs());
+
+  ref_count1->AcquireFromUnprotectedPtr();
+  EXPECT_FALSE(ref_count1->IsAliveWithNoKnownRefs());
+
+  void* ptr2 = allocator.root()->Realloc(ptr1, new_size, type_name);
+  EXPECT_TRUE(ptr2);
+
+  // PartitionAlloc may retag memory areas on realloc (even if they
+  // do not move), so recover the true tag here.
+  ref_count1 = TagPtr(ref_count1);
+
+  // Re-query ref-count. It may have moved if Realloc changed the slot.
+  auto* ref_count2 =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr2));
+
+  if (UntagPtr(ptr1) == UntagPtr(ptr2)) {
+    // If the slot didn't change, ref-count should stay the same.
+    EXPECT_EQ(ref_count1, ref_count2);
+    EXPECT_FALSE(ref_count2->IsAliveWithNoKnownRefs());
+
+    EXPECT_FALSE(ref_count2->ReleaseFromUnprotectedPtr());
+  } else {
+    // If the allocation was moved to another slot, the old ref-count stayed
+    // in the same location in memory, is no longer alive, but still has a
+    // reference. The new ref-count is alive, but has no references.
+    EXPECT_NE(ref_count1, ref_count2);
+    EXPECT_FALSE(ref_count1->IsAlive());
+    EXPECT_FALSE(ref_count1->IsAliveWithNoKnownRefs());
+    EXPECT_TRUE(ref_count2->IsAliveWithNoKnownRefs());
+
+    EXPECT_TRUE(ref_count1->ReleaseFromUnprotectedPtr());
+    PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr1));
+  }
+
+  allocator.root()->Free(ptr2);
+}
+
+TEST_P(PartitionAllocTest, RefCountRealloc) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  size_t alloc_sizes[] = {500, 5000, 50000, 400000};
+
+  for (size_t alloc_size : alloc_sizes) {
+    alloc_size -= ExtraAllocSize(allocator);
+    RunRefCountReallocSubtest(alloc_size, alloc_size - 9);
+    RunRefCountReallocSubtest(alloc_size, alloc_size + 9);
+    RunRefCountReallocSubtest(alloc_size, alloc_size * 2);
+    RunRefCountReallocSubtest(alloc_size, alloc_size / 2);
+  }
+}
+
+int g_unretained_dangling_raw_ptr_detected_count = 0;
+
+class UnretainedDanglingRawPtrTest : public PartitionAllocTest {
+ public:
+  void SetUp() override {
+    PartitionAllocTest::SetUp();
+    g_unretained_dangling_raw_ptr_detected_count = 0;
+    old_detected_fn_ = partition_alloc::GetUnretainedDanglingRawPtrDetectedFn();
+
+    partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
+        &UnretainedDanglingRawPtrTest::DanglingRawPtrDetected);
+    old_unretained_dangling_ptr_enabled_ =
+        partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(true);
+  }
+  void TearDown() override {
+    partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(old_detected_fn_);
+    partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(
+        old_unretained_dangling_ptr_enabled_);
+    PartitionAllocTest::TearDown();
+  }
+
+ private:
+  static void DanglingRawPtrDetected(uintptr_t) {
+    g_unretained_dangling_raw_ptr_detected_count++;
+  }
+
+  partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
+  bool old_unretained_dangling_ptr_enabled_;
+};
+
+INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
+                         UnretainedDanglingRawPtrTest,
+                         testing::ValuesIn(GetPartitionAllocTestParams()));
+
+TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrNoReport) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
+  ref_count->Acquire();
+  EXPECT_TRUE(ref_count->IsAlive());
+  // Allocation is still live, so calling ReportIfDangling() should not result
+  // in any detections.
+  ref_count->ReportIfDangling();
+  EXPECT_EQ(g_unretained_dangling_raw_ptr_detected_count, 0);
+  EXPECT_FALSE(ref_count->Release());
+  allocator.root()->Free(ptr);
+}
+
+TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrShouldReport) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
+  ref_count->AcquireFromUnprotectedPtr();
+  EXPECT_TRUE(ref_count->IsAlive());
+  allocator.root()->Free(ptr);
+  // At this point, memory shouldn't be alive...
+  EXPECT_FALSE(ref_count->IsAlive());
+  // ...and we should report the ptr as dangling.
+  ref_count->ReportIfDangling();
+  EXPECT_EQ(g_unretained_dangling_raw_ptr_detected_count, 1);
+  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
+
+  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
+}
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+TEST_P(PartitionAllocTest, BackupRefPtrGuardRegion) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  size_t alignment = internal::PageAllocationGranularity();
+
+  uintptr_t requested_address;
+  memset(&requested_address, internal::kQuarantinedByte,
+         sizeof(requested_address));
+  requested_address = RoundDownToPageAllocationGranularity(requested_address);
+
+  uintptr_t allocated_address =
+      AllocPages(requested_address, alignment, alignment,
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWrite),
+                 PageTag::kPartitionAlloc);
+  EXPECT_NE(allocated_address, requested_address);
+
+  if (allocated_address) {
+    FreePages(allocated_address, alignment);
+  }
+}
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+
+// Allocate memory, and reference it from 3 raw_ptr. Among them 2 will be
+// dangling.
+TEST_P(PartitionAllocTest, DanglingPtr) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  CountDanglingRawPtr dangling_checks;
+
+  // Allocate memory, and reference it from 3 raw_ptr.
+  uint64_t* ptr = static_cast<uint64_t*>(
+      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
+  ref_count->Acquire();
+  ref_count->Acquire();
+  ref_count->Acquire();
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The first raw_ptr stops referencing it, before the memory has been
+  // released.
+  EXPECT_FALSE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
+  // Free it. This creates two dangling pointer.
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The dangling raw_ptr stop referencing it.
+  EXPECT_FALSE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The dangling raw_ptr stop referencing it again.
+  EXPECT_TRUE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+#else
+  // Free it. This creates two dangling pointer.
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The dangling raw_ptr stop referencing it.
+  EXPECT_FALSE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
+
+  // The dangling raw_ptr stop referencing it again.
+  EXPECT_TRUE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 2);
+#endif
+
+  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
+}
+
+// Allocate memory, and reference it from 3
+// raw_ptr<T, DisableDanglingPtrDetection>. Among them 2 will be dangling. This
+// doesn't trigger any dangling raw_ptr checks.
+TEST_P(PartitionAllocTest, DanglingDanglingPtr) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  CountDanglingRawPtr dangling_checks;
+
+  // Allocate memory, and reference it from 3 raw_ptr.
+  uint64_t* ptr = static_cast<uint64_t*>(
+      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
+  ref_count->AcquireFromUnprotectedPtr();
+  ref_count->AcquireFromUnprotectedPtr();
+  ref_count->AcquireFromUnprotectedPtr();
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The first raw_ptr<T, DisableDanglingPtrDetection> stops referencing it,
+  // before the memory has been released.
+  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // Free it. This creates two dangling raw_ptr<T, DisableDanglingPtrDetection>.
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The dangling raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
+  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The dangling raw_ptr<T, DisableDanglingPtrDetection> stop referencing it
+  // again.
+  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
+}
+
+// When 'free' is called, it remain one raw_ptr<> and one
+// raw_ptr<T, DisableDanglingPtrDetection>. The raw_ptr<> is released first.
+TEST_P(PartitionAllocTest, DanglingMixedReleaseRawPtrFirst) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  CountDanglingRawPtr dangling_checks;
+
+  uint64_t* ptr = static_cast<uint64_t*>(
+      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
+  // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
+  ref_count->AcquireFromUnprotectedPtr();
+  ref_count->Acquire();
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
+  // Free it.
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<> stops referencing it.
+  EXPECT_FALSE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
+  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+#else
+  // Free it.
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<> stops referencing it.
+  EXPECT_FALSE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
+
+  // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
+  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
+#endif
+
+  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
+}
+
+// When 'free' is called, it remain one raw_ptr<> and one
+// raw_ptr<T, DisableDanglingPtrDetection>.
+// The raw_ptr<T, DisableDanglingPtrDetection> is released first. This
+// triggers the dangling raw_ptr<> checks.
+TEST_P(PartitionAllocTest, DanglingMixedReleaseDanglingPtrFirst) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  CountDanglingRawPtr dangling_checks;
+
+  void* ptr =
+      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
+  // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
+  ref_count->AcquireFromUnprotectedPtr();
+  ref_count->Acquire();
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
+  // Free it.
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<> stops referencing it.
+  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
+  EXPECT_TRUE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+#else
+  // Free it.
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<> stops referencing it.
+  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
+  EXPECT_TRUE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
+#endif
+
+  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
+}
+
+// When 'free' is called, it remains one
+// raw_ptr<T, DisableDanglingPtrDetection>, then it is used to acquire one
+// dangling raw_ptr<>. Release the raw_ptr<> first.
+TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtr) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  CountDanglingRawPtr dangling_checks;
+
+  void* ptr =
+      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
+  // Acquire a raw_ptr<T, DisableDanglingPtrDetection>.
+  ref_count->AcquireFromUnprotectedPtr();
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // Free it once.
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // A raw_ptr<> starts referencing it.
+  ref_count->Acquire();
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<> stops referencing it.
+  EXPECT_FALSE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<T, DisableDanglingPtrDetection> stops referencing it.
+  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
+}
+
+// Same as 'DanglingPtrUsedToAcquireNewRawPtr', but release the
+// raw_ptr<T, DisableDanglingPtrDetection> before the raw_ptr<>.
+TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtrVariant) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  CountDanglingRawPtr dangling_checks;
+
+  void* ptr =
+      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
+  // Acquire a raw_ptr<T, DisableDanglingPtrDetection>.
+  ref_count->AcquireFromUnprotectedPtr();
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // Free it.
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // A raw_ptr<> starts referencing it.
+  ref_count->Acquire();
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<> stops referencing it.
+  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<T, DisableDanglingPtrDetection> stops referencing it.
+  EXPECT_TRUE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
+}
+
+// Acquire a raw_ptr<T>, and release it before freeing memory. In the
+// background, there is one raw_ptr<T, DisableDanglingPtrDetection>. This
+// doesn't trigger any dangling raw_ptr<T> checks.
+TEST_P(PartitionAllocTest, RawPtrReleasedBeforeFree) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  CountDanglingRawPtr dangling_checks;
+
+  void* ptr =
+      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
+  // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
+  ref_count->Acquire();
+  ref_count->AcquireFromUnprotectedPtr();
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // Release the raw_ptr<>.
+  EXPECT_FALSE(ref_count->Release());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // Free it.
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
+  EXPECT_TRUE(ref_count->ReleaseFromUnprotectedPtr());
+  EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
+  EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
+
+  PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
+}
+
+#if defined(PA_HAS_DEATH_TESTS)
+// DCHECK message are stripped in official build. It causes death tests with
+// matchers to fail.
+#if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)
+
+// Acquire() once, Release() twice => CRASH
+TEST_P(PartitionAllocDeathTest, ReleaseUnderflowRawPtr) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  void* ptr =
+      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
+  ref_count->Acquire();
+  EXPECT_FALSE(ref_count->Release());
+  EXPECT_DCHECK_DEATH(ref_count->Release());
+  allocator.root()->Free(ptr);
+}
+
+// AcquireFromUnprotectedPtr() once, ReleaseFromUnprotectedPtr() twice => CRASH
+TEST_P(PartitionAllocDeathTest, ReleaseUnderflowDanglingPtr) {
+  if (!UseBRPPool()) {
+    return;
+  }
+
+  void* ptr =
+      allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
+  auto* ref_count =
+      PartitionRefCountPointer(allocator.root()->ObjectToSlotStart(ptr));
+  ref_count->AcquireFromUnprotectedPtr();
+  EXPECT_FALSE(ref_count->ReleaseFromUnprotectedPtr());
+  EXPECT_DCHECK_DEATH(ref_count->ReleaseFromUnprotectedPtr());
+  allocator.root()->Free(ptr);
+}
+
+#endif  //! defined(OFFICIAL_BUILD) || !defined(NDEBUG)
+#endif  // defined(PA_HAS_DEATH_TESTS)
+#endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+
+TEST_P(PartitionAllocTest, ReservationOffset) {
+  // For normal buckets, offset should be kOffsetTagNormalBuckets.
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  uintptr_t address = UntagPtr(ptr);
+  EXPECT_EQ(kOffsetTagNormalBuckets, *ReservationOffsetPointer(address));
+  allocator.root()->Free(ptr);
+
+  // For direct-map,
+  size_t large_size = kSuperPageSize * 5 + PartitionPageSize() * .5f;
+  ASSERT_GT(large_size, kMaxBucketed);
+  ptr = allocator.root()->Alloc(large_size, type_name);
+  EXPECT_TRUE(ptr);
+  address = UntagPtr(ptr);
+  EXPECT_EQ(0U, *ReservationOffsetPointer(address));
+  EXPECT_EQ(1U, *ReservationOffsetPointer(address + kSuperPageSize));
+  EXPECT_EQ(2U, *ReservationOffsetPointer(address + kSuperPageSize * 2));
+  EXPECT_EQ(3U, *ReservationOffsetPointer(address + kSuperPageSize * 3));
+  EXPECT_EQ(4U, *ReservationOffsetPointer(address + kSuperPageSize * 4));
+  EXPECT_EQ(5U, *ReservationOffsetPointer(address + kSuperPageSize * 5));
+
+  // In-place realloc doesn't affect the offsets.
+  void* new_ptr = allocator.root()->Realloc(ptr, large_size * .8, type_name);
+  EXPECT_EQ(new_ptr, ptr);
+  EXPECT_EQ(0U, *ReservationOffsetPointer(address));
+  EXPECT_EQ(1U, *ReservationOffsetPointer(address + kSuperPageSize));
+  EXPECT_EQ(2U, *ReservationOffsetPointer(address + kSuperPageSize * 2));
+  EXPECT_EQ(3U, *ReservationOffsetPointer(address + kSuperPageSize * 3));
+  EXPECT_EQ(4U, *ReservationOffsetPointer(address + kSuperPageSize * 4));
+  EXPECT_EQ(5U, *ReservationOffsetPointer(address + kSuperPageSize * 5));
+
+  allocator.root()->Free(ptr);
+  // After free, the offsets must be kOffsetTagNotAllocated.
+  EXPECT_EQ(kOffsetTagNotAllocated, *ReservationOffsetPointer(address));
+  EXPECT_EQ(kOffsetTagNotAllocated,
+            *ReservationOffsetPointer(address + kSuperPageSize));
+  EXPECT_EQ(kOffsetTagNotAllocated,
+            *ReservationOffsetPointer(address + kSuperPageSize * 2));
+  EXPECT_EQ(kOffsetTagNotAllocated,
+            *ReservationOffsetPointer(address + kSuperPageSize * 3));
+  EXPECT_EQ(kOffsetTagNotAllocated,
+            *ReservationOffsetPointer(address + kSuperPageSize * 4));
+  EXPECT_EQ(kOffsetTagNotAllocated,
+            *ReservationOffsetPointer(address + kSuperPageSize * 5));
+}
+
+TEST_P(PartitionAllocTest, GetReservationStart) {
+  size_t large_size = kSuperPageSize * 3 + PartitionPageSize() * .5f;
+  ASSERT_GT(large_size, kMaxBucketed);
+  void* ptr = allocator.root()->Alloc(large_size, type_name);
+  EXPECT_TRUE(ptr);
+  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
+  uintptr_t reservation_start = slot_start - PartitionPageSize();
+  EXPECT_EQ(0U, reservation_start & DirectMapAllocationGranularityOffsetMask());
+
+  uintptr_t address = UntagPtr(ptr);
+  for (uintptr_t a = address; a < address + large_size; ++a) {
+    uintptr_t address2 = GetDirectMapReservationStart(a) + PartitionPageSize();
+    EXPECT_EQ(slot_start, address2);
+  }
+
+  EXPECT_EQ(reservation_start, GetDirectMapReservationStart(slot_start));
+
+  allocator.root()->Free(ptr);
+}
+
+TEST_P(PartitionAllocTest, CheckReservationType) {
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  uintptr_t address = UntagPtr(ptr);
+  uintptr_t address_to_check = address;
+  EXPECT_FALSE(IsReservationStart(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
+  EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
+  address_to_check = address + kTestAllocSize - 1;
+  EXPECT_FALSE(IsReservationStart(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
+  EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
+  address_to_check =
+      partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
+  EXPECT_TRUE(IsReservationStart(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
+  EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
+  allocator.root()->Free(ptr);
+  // Freeing keeps a normal-bucket super page in memory.
+  address_to_check =
+      partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
+  EXPECT_TRUE(IsReservationStart(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
+  EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
+
+  size_t large_size = 2 * kSuperPageSize;
+  ASSERT_GT(large_size, kMaxBucketed);
+  ptr = allocator.root()->Alloc(large_size, type_name);
+  EXPECT_TRUE(ptr);
+  address = UntagPtr(ptr);
+  address_to_check = address;
+  EXPECT_FALSE(IsReservationStart(address_to_check));
+  EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
+  EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
+  address_to_check =
+      partition_alloc::internal::base::bits::AlignUp(address, kSuperPageSize);
+  EXPECT_FALSE(IsReservationStart(address_to_check));
+  EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
+  EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
+  address_to_check = address + large_size - 1;
+  EXPECT_FALSE(IsReservationStart(address_to_check));
+  EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
+  EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
+  address_to_check =
+      partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
+  EXPECT_TRUE(IsReservationStart(address_to_check));
+  EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
+  EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
+  EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
+  allocator.root()->Free(ptr);
+  // Freeing releases direct-map super pages.
+  address_to_check =
+      partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // Expect to DCHECK on unallocated region.
+  EXPECT_DEATH_IF_SUPPORTED(IsReservationStart(address_to_check), "");
+#endif
+  EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
+  EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
+  EXPECT_FALSE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
+}
+
+// Test for crash http://crbug.com/1169003.
+TEST_P(PartitionAllocTest, CrossPartitionRootRealloc) {
+  // Size is large enough to satisfy it from a single-slot slot span
+  size_t test_size = MaxRegularSlotSpanSize() - ExtraAllocSize(allocator);
+  void* ptr = allocator.root()->Alloc<AllocFlags::kReturnNull>(test_size);
+  EXPECT_TRUE(ptr);
+
+  // Create new root and call PurgeMemory to simulate ConfigurePartitions().
+  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
+                                PurgeFlags::kDiscardUnusedSystemPages);
+  std::unique_ptr<PartitionRoot> new_root = CreateCustomTestRoot(
+      PartitionOptions{
+          .ref_count_size = GetParam().ref_count_size,
+      },
+      PartitionTestOptions{.set_bucket_distribution = true});
+
+  // Realloc from |allocator.root()| into |new_root|.
+  void* ptr2 = new_root->Realloc<AllocFlags::kReturnNull>(ptr, test_size + 1024,
+                                                          nullptr);
+  EXPECT_TRUE(ptr2);
+  PA_EXPECT_PTR_NE(ptr, ptr2);
+}
+
+TEST_P(PartitionAllocTest, FastPathOrReturnNull) {
+  size_t allocation_size = 64;
+  // The very first allocation is never a fast path one, since it needs a new
+  // super page and a new partition page.
+  EXPECT_FALSE(allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
+      allocation_size, ""));
+  void* ptr = allocator.root()->Alloc(allocation_size);
+  ASSERT_TRUE(ptr);
+
+  // Next one is, since the partition page has been activated.
+  void* ptr2 = allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
+      allocation_size, "");
+  EXPECT_TRUE(ptr2);
+
+  // First allocation of a different bucket is slow.
+  EXPECT_FALSE(allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
+      2 * allocation_size, ""));
+
+  size_t allocated_size = 2 * allocation_size;
+  std::vector<void*> ptrs;
+  while (void* new_ptr =
+             allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
+                 allocation_size, "")) {
+    ptrs.push_back(new_ptr);
+    allocated_size += allocation_size;
+  }
+  EXPECT_LE(allocated_size,
+            PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan);
+
+  for (void* ptr_to_free : ptrs) {
+    allocator.root()->Free<FreeFlags::kNoHooks>(ptr_to_free);
+  }
+
+  allocator.root()->Free<FreeFlags::kNoHooks>(ptr);
+  allocator.root()->Free<FreeFlags::kNoHooks>(ptr2);
+}
+
+#if defined(PA_HAS_DEATH_TESTS)
+// DCHECK message are stripped in official build. It causes death tests with
+// matchers to fail.
+#if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)
+
+TEST_P(PartitionAllocDeathTest, CheckTriggered) {
+  EXPECT_DCHECK_DEATH_WITH(PA_CHECK(5 == 7), "Check failed.*5 == 7");
+  EXPECT_DEATH(PA_CHECK(5 == 7), "Check failed.*5 == 7");
+}
+
+#endif  // !defined(OFFICIAL_BUILD) && !defined(NDEBUG)
+#endif  // defined(PA_HAS_DEATH_TESTS)
+
+// Not on chromecast, since gtest considers extra output from itself as a test
+// failure:
+// https://ci.chromium.org/ui/p/chromium/builders/ci/Cast%20Audio%20Linux/98492/overview
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(PA_HAS_DEATH_TESTS) && \
+    !BUILDFLAG(PA_IS_CASTOS)
+
+namespace {
+
+PA_NOINLINE void FreeForTest(void* data) {
+  free(data);
+}
+
+class ThreadDelegateForPreforkHandler
+    : public base::PlatformThreadForTesting::Delegate {
+ public:
+  ThreadDelegateForPreforkHandler(std::atomic<bool>& please_stop,
+                                  std::atomic<int>& started_threads,
+                                  const int alloc_size)
+      : please_stop_(please_stop),
+        started_threads_(started_threads),
+        alloc_size_(alloc_size) {}
+
+  void ThreadMain() override {
+    started_threads_++;
+    while (!please_stop_.load(std::memory_order_relaxed)) {
+      void* ptr = malloc(alloc_size_);
+
+      // A simple malloc() / free() pair can be discarded by the compiler (and
+      // is), making the test fail. It is sufficient to make |FreeForTest()| a
+      // PA_NOINLINE function for the call to not be eliminated, but it is
+      // required.
+      FreeForTest(ptr);
+    }
+  }
+
+ private:
+  std::atomic<bool>& please_stop_;
+  std::atomic<int>& started_threads_;
+  const int alloc_size_;
+};
+
+}  // namespace
+
+// Disabled because executing it causes Gtest to show a warning in the output,
+// which confuses the runner on some platforms, making the test report an
+// "UNKNOWN" status even though it succeeded.
+TEST_P(PartitionAllocTest, DISABLED_PreforkHandler) {
+  std::atomic<bool> please_stop;
+  std::atomic<int> started_threads{0};
+
+  // Continuously allocates / frees memory, bypassing the thread cache. This
+  // makes it likely that this thread will own the lock, and that the
+  // EXPECT_EXIT() part will deadlock.
+  constexpr size_t kAllocSize = ThreadCache::kLargeSizeThreshold + 1;
+  ThreadDelegateForPreforkHandler delegate(please_stop, started_threads,
+                                           kAllocSize);
+
+  constexpr int kThreads = 4;
+  base::PlatformThreadHandle thread_handles[kThreads];
+  for (auto& thread_handle : thread_handles) {
+    base::PlatformThreadForTesting::Create(0, &delegate, &thread_handle);
+  }
+  // Make sure all threads are actually already running.
+  while (started_threads != kThreads) {
+  }
+
+  EXPECT_EXIT(
+      {
+        void* ptr = malloc(kAllocSize);
+        FreeForTest(ptr);
+        exit(1);
+      },
+      ::testing::ExitedWithCode(1), "");
+
+  please_stop.store(true);
+  for (auto& thread_handle : thread_handles) {
+    base::PlatformThreadForTesting::Join(thread_handle);
+  }
+}
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
+        // PA_CONFIG(HAS_DEATH_TESTS) && !BUILDFLAG(PA_IS_CASTOS)
+
+// Checks the bucket index logic.
+TEST_P(PartitionAllocTest, GetIndex) {
+  BucketIndexLookup lookup{};
+
+  for (size_t size = 0; size < kMaxBucketed; size++) {
+    size_t index = BucketIndexLookup::GetIndex(size);
+    ASSERT_GE(lookup.bucket_sizes()[index], size);
+  }
+
+  // Make sure that power-of-two have exactly matching buckets.
+  for (size_t size = (1 << (kMinBucketedOrder - 1)); size < kMaxBucketed;
+       size <<= 1) {
+    size_t index = BucketIndexLookup::GetIndex(size);
+    ASSERT_EQ(lookup.bucket_sizes()[index], size);
+  }
+}
+
+// Used to check alignment. If the compiler understands the annotations, the
+// zeroing in the constructor uses aligned SIMD instructions.
+TEST_P(PartitionAllocTest, MallocFunctionAnnotations) {
+  struct TestStruct {
+    uint64_t a = 0;
+    uint64_t b = 0;
+  };
+
+  void* buffer = Alloc(sizeof(TestStruct));
+  // Should use "mov*a*ps" on x86_64.
+  auto* x = new (buffer) TestStruct();
+
+  EXPECT_EQ(x->a, 0u);
+  Free(buffer);
+}
+
+// Test that the ConfigurablePool works properly.
+TEST_P(PartitionAllocTest, ConfigurablePool) {
+  EXPECT_FALSE(IsConfigurablePoolAvailable());
+
+  // The rest is only applicable to 64-bit mode
+#if defined(ARCH_CPU_64_BITS)
+  // Repeat the test for every possible Pool size
+  const size_t max_pool_size = PartitionAddressSpace::ConfigurablePoolMaxSize();
+  const size_t min_pool_size = PartitionAddressSpace::ConfigurablePoolMinSize();
+  for (size_t pool_size = max_pool_size; pool_size >= min_pool_size;
+       pool_size /= 2) {
+    PA_DCHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(pool_size));
+    EXPECT_FALSE(IsConfigurablePoolAvailable());
+    uintptr_t pool_base =
+        AllocPages(pool_size, pool_size,
+                   PageAccessibilityConfiguration(
+                       PageAccessibilityConfiguration::kInaccessible),
+                   PageTag::kPartitionAlloc);
+    EXPECT_NE(0u, pool_base);
+    PartitionAddressSpace::InitConfigurablePool(pool_base, pool_size);
+
+    EXPECT_TRUE(IsConfigurablePoolAvailable());
+
+    std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
+        PartitionOptions{
+            .use_configurable_pool = PartitionOptions::kAllowed,
+            .ref_count_size = GetParam().ref_count_size,
+        },
+        PartitionTestOptions{.uncap_empty_slot_span_memory = true,
+                             .set_bucket_distribution = true});
+
+    const size_t count = 250;
+    std::vector<void*> allocations(count, nullptr);
+    for (size_t i = 0; i < count; ++i) {
+      const size_t size = kTestSizes[base::RandGenerator(kTestSizesCount)];
+      allocations[i] = root->Alloc(size);
+      EXPECT_NE(nullptr, allocations[i]);
+      // We don't Untag allocations here because MTE is disabled for
+      // configurable pools used by V8.
+      // https://bugs.chromium.org/p/v8/issues/detail?id=13117
+      uintptr_t allocation_base = reinterpret_cast<uintptr_t>(allocations[i]);
+      EXPECT_EQ(allocation_base, UntagPtr(allocations[i]));
+      EXPECT_TRUE(allocation_base >= pool_base &&
+                  allocation_base < pool_base + pool_size);
+    }
+
+    PartitionAddressSpace::UninitConfigurablePoolForTesting();
+    FreePages(pool_base, pool_size);
+  }
+
+#endif  // defined(ARCH_CPU_64_BITS)
+}
+
+TEST_P(PartitionAllocTest, EmptySlotSpanSizeIsCapped) {
+  // Use another root, since the ones from the test harness disable the empty
+  // slot span size cap.
+  std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
+      PartitionOptions{
+          .ref_count_size = GetParam().ref_count_size,
+      },
+      PartitionTestOptions{.set_bucket_distribution = true});
+
+  // Allocate some memory, don't free it to keep committed memory.
+  std::vector<void*> allocated_memory;
+  const size_t size = SystemPageSize();
+  const size_t count = 400;
+  for (size_t i = 0; i < count; i++) {
+    void* ptr = root->Alloc(size);
+    allocated_memory.push_back(ptr);
+  }
+  ASSERT_GE(root->total_size_of_committed_pages.load(std::memory_order_relaxed),
+            size * count);
+
+  // To create empty slot spans, allocate from single-slot slot spans, 128kiB at
+  // a time.
+  std::vector<void*> single_slot_allocated_memory;
+  constexpr size_t single_slot_count = kDefaultEmptySlotSpanRingSize - 1;
+  const size_t single_slot_size = MaxRegularSlotSpanSize() + 1;
+  // Make sure that even with allocation size rounding up, a single allocation
+  // is still below the threshold.
+  ASSERT_LT(MaxRegularSlotSpanSize() * 2,
+            ((count * size) >> root->max_empty_slot_spans_dirty_bytes_shift));
+  for (size_t i = 0; i < single_slot_count; i++) {
+    void* ptr = root->Alloc(single_slot_size);
+    single_slot_allocated_memory.push_back(ptr);
+  }
+
+  // Free everything at once, creating as many empty slot spans as there are
+  // allocations (since they are from single-slot slot spans).
+  for (void* ptr : single_slot_allocated_memory) {
+    root->Free(ptr);
+  }
+
+  // Still have some committed empty slot spans.
+  // PA_TS_UNCHECKED_READ() is not an issue here, since everything is
+  // single-threaded.
+  EXPECT_GT(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes), 0u);
+  // But not all, as the cap triggered.
+  EXPECT_LT(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
+            single_slot_count * single_slot_size);
+
+  // Nothing left after explicit purge.
+  root->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
+  EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes), 0u);
+
+  for (void* ptr : allocated_memory) {
+    root->Free(ptr);
+  }
+}
+
+TEST_P(PartitionAllocTest, IncreaseEmptySlotSpanRingSize) {
+  std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
+      PartitionOptions{
+          .ref_count_size = GetParam().ref_count_size,
+      },
+      PartitionTestOptions{.uncap_empty_slot_span_memory = true,
+                           .set_bucket_distribution = true});
+
+  std::vector<void*> single_slot_allocated_memory;
+  constexpr size_t single_slot_count = kDefaultEmptySlotSpanRingSize + 10;
+  const size_t single_slot_size = MaxRegularSlotSpanSize() + 1;
+  const size_t bucket_size =
+      root->buckets[SizeToIndex(single_slot_size)].slot_size;
+
+  for (size_t i = 0; i < single_slot_count; i++) {
+    void* ptr = root->Alloc(single_slot_size);
+    single_slot_allocated_memory.push_back(ptr);
+  }
+
+  // Free everything at once, creating as many empty slot spans as there are
+  // allocations (since they are from single-slot slot spans).
+  for (void* ptr : single_slot_allocated_memory) {
+    root->Free(ptr);
+  }
+  single_slot_allocated_memory.clear();
+
+  // Some of the free()-s above overflowed the slot span ring.
+  EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
+            kDefaultEmptySlotSpanRingSize * bucket_size);
+
+  // Now can cache more slot spans.
+  root->EnableLargeEmptySlotSpanRing();
+
+  constexpr size_t single_slot_large_count = kDefaultEmptySlotSpanRingSize + 10;
+  for (size_t i = 0; i < single_slot_large_count; i++) {
+    void* ptr = root->Alloc(single_slot_size);
+    single_slot_allocated_memory.push_back(ptr);
+  }
+
+  for (void* ptr : single_slot_allocated_memory) {
+    root->Free(ptr);
+  }
+  single_slot_allocated_memory.clear();
+
+  // No overflow this time.
+  EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
+            single_slot_large_count * bucket_size);
+
+  constexpr size_t single_slot_too_many_count = kMaxFreeableSpans + 10;
+  for (size_t i = 0; i < single_slot_too_many_count; i++) {
+    void* ptr = root->Alloc(single_slot_size);
+    single_slot_allocated_memory.push_back(ptr);
+  }
+
+  for (void* ptr : single_slot_allocated_memory) {
+    root->Free(ptr);
+  }
+  single_slot_allocated_memory.clear();
+
+  // Overflow still works.
+  EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
+            kMaxFreeableSpans * bucket_size);
+}
+
+#if BUILDFLAG(PA_IS_CAST_ANDROID) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+extern "C" {
+void* __real_malloc(size_t);
+}  // extern "C"
+
+TEST_P(PartitionAllocTest, HandleMixedAllocations) {
+  void* ptr = __real_malloc(12);
+  // Should not crash, no test assertion.
+  free(ptr);
+}
+#endif
+
+TEST_P(PartitionAllocTest, SortFreelist) {
+  const size_t count = 100;
+  const size_t allocation_size = 1;
+  void* first_ptr = allocator.root()->Alloc(allocation_size);
+
+  std::vector<void*> allocations;
+  for (size_t i = 0; i < count; ++i) {
+    allocations.push_back(allocator.root()->Alloc(allocation_size));
+  }
+
+  // Shuffle and free memory out of order.
+  std::random_device rd;
+  std::mt19937 generator(rd());
+  std::shuffle(allocations.begin(), allocations.end(), generator);
+
+  // Keep one allocation alive (first_ptr), so that the SlotSpan is not fully
+  // empty.
+  for (void* ptr : allocations) {
+    allocator.root()->Free(ptr);
+  }
+  allocations.clear();
+
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+
+  size_t bucket_index =
+      SizeToIndex(allocation_size + ExtraAllocSize(allocator));
+  auto& bucket = allocator.root()->buckets[bucket_index];
+  EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
+
+  // Can sort again.
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
+
+  for (size_t i = 0; i < count; ++i) {
+    allocations.push_back(allocator.root()->Alloc(allocation_size));
+    // Allocating keeps the freelist sorted.
+    EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
+  }
+
+  // Check that it is sorted.
+  for (size_t i = 1; i < allocations.size(); i++) {
+    EXPECT_LT(UntagPtr(allocations[i - 1]), UntagPtr(allocations[i]));
+  }
+
+  for (void* ptr : allocations) {
+    allocator.root()->Free(ptr);
+    // Free()-ing memory destroys order.  Not looking at the head of the active
+    // list, as it is not necessarily the one from which |ptr| came from.
+    auto* slot_span =
+        SlotSpan::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
+    EXPECT_FALSE(slot_span->freelist_is_sorted());
+  }
+
+  allocator.root()->Free(first_ptr);
+}
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_LINUX) && \
+    defined(ARCH_CPU_64_BITS)
+TEST_P(PartitionAllocTest, CrashOnUnknownPointer) {
+  int not_a_heap_object = 42;
+  EXPECT_DEATH(allocator.root()->Free(&not_a_heap_object), "");
+}
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
+        // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_64_BITS)
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_MAC)
+
+// Adapted from crashpad tests.
+class ScopedOpenCLNoOpKernel {
+ public:
+  ScopedOpenCLNoOpKernel()
+      : context_(nullptr),
+        program_(nullptr),
+        kernel_(nullptr),
+        success_(false) {}
+
+  ScopedOpenCLNoOpKernel(const ScopedOpenCLNoOpKernel&) = delete;
+  ScopedOpenCLNoOpKernel& operator=(const ScopedOpenCLNoOpKernel&) = delete;
+
+  ~ScopedOpenCLNoOpKernel() {
+    if (kernel_) {
+      cl_int rv = clReleaseKernel(kernel_);
+      EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseKernel";
+    }
+
+    if (program_) {
+      cl_int rv = clReleaseProgram(program_);
+      EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseProgram";
+    }
+
+    if (context_) {
+      cl_int rv = clReleaseContext(context_);
+      EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseContext";
+    }
+  }
+
+  void SetUp() {
+    cl_platform_id platform_id;
+    cl_int rv = clGetPlatformIDs(1, &platform_id, nullptr);
+    ASSERT_EQ(rv, CL_SUCCESS) << "clGetPlatformIDs";
+    cl_device_id device_id;
+    rv =
+        clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_CPU, 1, &device_id, nullptr);
+#if defined(ARCH_CPU_ARM64)
+    // CL_DEVICE_TYPE_CPU doesn’t seem to work at all on arm64, meaning that
+    // these weird OpenCL modules probably don’t show up there at all. Keep this
+    // test even on arm64 in case this ever does start working.
+    if (rv == CL_INVALID_VALUE) {
+      return;
+    }
+#endif  // ARCH_CPU_ARM64
+    ASSERT_EQ(rv, CL_SUCCESS) << "clGetDeviceIDs";
+
+    context_ = clCreateContext(nullptr, 1, &device_id, nullptr, nullptr, &rv);
+    ASSERT_EQ(rv, CL_SUCCESS) << "clCreateContext";
+
+    const char* sources[] = {
+        "__kernel void NoOp(void) {barrier(CLK_LOCAL_MEM_FENCE);}",
+    };
+    const size_t source_lengths[] = {
+        strlen(sources[0]),
+    };
+    static_assert(std::size(sources) == std::size(source_lengths),
+                  "arrays must be parallel");
+
+    program_ = clCreateProgramWithSource(context_, std::size(sources), sources,
+                                         source_lengths, &rv);
+    ASSERT_EQ(rv, CL_SUCCESS) << "clCreateProgramWithSource";
+
+    rv = clBuildProgram(program_, 1, &device_id, "-cl-opt-disable", nullptr,
+                        nullptr);
+    ASSERT_EQ(rv, CL_SUCCESS) << "clBuildProgram";
+
+    kernel_ = clCreateKernel(program_, "NoOp", &rv);
+    ASSERT_EQ(rv, CL_SUCCESS) << "clCreateKernel";
+
+    success_ = true;
+  }
+
+  bool success() const { return success_; }
+
+ private:
+  cl_context context_;
+  cl_program program_;
+  cl_kernel kernel_;
+  bool success_;
+};
+
+// On macOS 10.11, allocations are made with PartitionAlloc, but the pointer
+// is incorrectly passed by CoreFoundation to the previous default zone,
+// causing crashes. This is intended to detect these issues regressing in future
+// versions of macOS.
+TEST_P(PartitionAllocTest, OpenCL) {
+  ScopedOpenCLNoOpKernel kernel;
+  kernel.SetUp();
+#if !defined(ARCH_CPU_ARM64)
+  ASSERT_TRUE(kernel.success());
+#endif
+}
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
+        // BUILDFLAG(IS_MAC)
+
+TEST_P(PartitionAllocTest, SmallSlotSpanWaste) {
+  for (PartitionRoot::Bucket& bucket : allocator.root()->buckets) {
+    const size_t slot_size = bucket.slot_size;
+    if (slot_size == kInvalidBucketSize) {
+      continue;
+    }
+
+    size_t small_system_page_count =
+        partition_alloc::internal::ComputeSystemPagesPerSlotSpan(
+            bucket.slot_size, true);
+    size_t small_waste =
+        (small_system_page_count * SystemPageSize()) % slot_size;
+
+    EXPECT_LT(small_waste, .05 * SystemPageSize());
+    if (slot_size <= MaxRegularSlotSpanSize()) {
+      EXPECT_LE(small_system_page_count, MaxSystemPagesPerRegularSlotSpan());
+    }
+  }
+}
+
+TEST_P(PartitionAllocTest, SortActiveSlotSpans) {
+  auto run_test = [](size_t count) {
+    PartitionBucket bucket;
+    bucket.Init(16);
+    bucket.active_slot_spans_head = nullptr;
+
+    std::vector<SlotSpanMetadata> slot_spans;
+    slot_spans.reserve(count);
+
+    // Add slot spans with random freelist length.
+    for (size_t i = 0; i < count; i++) {
+      slot_spans.emplace_back(&bucket);
+      auto& slot_span = slot_spans.back();
+      slot_span.num_unprovisioned_slots =
+          partition_alloc::internal::base::RandGenerator(
+              bucket.get_slots_per_span() / 2);
+      slot_span.num_allocated_slots =
+          partition_alloc::internal::base::RandGenerator(
+              bucket.get_slots_per_span() - slot_span.num_unprovisioned_slots);
+      slot_span.next_slot_span = bucket.active_slot_spans_head;
+      bucket.active_slot_spans_head = &slot_span;
+    }
+
+    bucket.SortActiveSlotSpans();
+
+    std::set<SlotSpanMetadata*> seen_slot_spans;
+    std::vector<SlotSpanMetadata*> sorted_slot_spans;
+    for (auto* slot_span = bucket.active_slot_spans_head; slot_span;
+         slot_span = slot_span->next_slot_span) {
+      sorted_slot_spans.push_back(slot_span);
+      seen_slot_spans.insert(slot_span);
+    }
+
+    // None repeated, none missing.
+    EXPECT_EQ(seen_slot_spans.size(), sorted_slot_spans.size());
+    EXPECT_EQ(seen_slot_spans.size(), slot_spans.size());
+
+    // The first slot spans are sorted.
+    size_t sorted_spans_count =
+        std::min(PartitionBucket::kMaxSlotSpansToSort, count);
+    EXPECT_TRUE(std::is_sorted(sorted_slot_spans.begin(),
+                               sorted_slot_spans.begin() + sorted_spans_count,
+                               partition_alloc::internal::CompareSlotSpans));
+
+    // Slot spans with no freelist entries are at the end of the sorted run.
+    auto has_empty_freelist = [](SlotSpanMetadata* a) {
+      return a->GetFreelistLength() == 0;
+    };
+    auto it = std::find_if(sorted_slot_spans.begin(),
+                           sorted_slot_spans.begin() + sorted_spans_count,
+                           has_empty_freelist);
+    if (it != sorted_slot_spans.end()) {
+      EXPECT_TRUE(std::all_of(it,
+                              sorted_slot_spans.begin() + sorted_spans_count,
+                              has_empty_freelist));
+    }
+  };
+
+  // Everything is sorted.
+  run_test(PartitionBucket::kMaxSlotSpansToSort / 2);
+  // Only the first slot spans are sorted.
+  run_test(PartitionBucket::kMaxSlotSpansToSort * 2);
+
+  // Corner cases.
+  run_test(0);
+  run_test(1);
+}
+
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsUsedAfterAlloc) {
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
+  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
+
+  allocator.root()->Free(ptr);
+}
+
+TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsFreeAfterFree) {
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
+  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
+
+  allocator.root()->Free(ptr);
+  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
+}
+
+TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterDecommit) {
+  void* ptr1 = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr1);
+  allocator.root()->Free(ptr1);
+
+  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
+  // Decommit the slot span. Bitmap will be rewritten in Decommit().
+  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
+  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
+}
+
+TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterPurge) {
+  void* ptr1 = allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name);
+  char* ptr2 = static_cast<char*>(allocator.root()->Alloc(
+      SystemPageSize() - ExtraAllocSize(allocator), type_name));
+  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr2);
+  allocator.root()->Free(ptr2);
+
+  CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
+  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
+  // Bitmap will be rewritten in PartitionPurgeSlotSpan().
+  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
+  CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
+  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
+
+  allocator.root()->Free(ptr1);
+}
+
+#endif  // BUILDFLAG(USE_FREESLOT_BITMAP)
+
+}  // namespace partition_alloc::internal
+
+#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_bucket.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_bucket.cc
new file mode 100644
index 0000000..51f2c9b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_bucket.cc
@@ -0,0 +1,1474 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <tuple>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_direct_map_extent.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#endif
+
+namespace partition_alloc::internal {
+
+namespace {
+
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+PA_ALWAYS_INLINE uintptr_t ShadowMetadataStart(uintptr_t super_page,
+                                               pool_handle pool) {
+  uintptr_t shadow_metadata_start =
+      super_page + SystemPageSize() + ShadowPoolOffset(pool);
+  PA_DCHECK(!PartitionAddressSpace::IsInRegularPool(shadow_metadata_start));
+  PA_DCHECK(!PartitionAddressSpace::IsInBRPPool(shadow_metadata_start));
+  return shadow_metadata_start;
+}
+#endif
+
+[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryMappingFailure(
+    PartitionRoot* root,
+    size_t size) PA_LOCKS_EXCLUDED(PartitionRootLock(root)) {
+  PA_NO_CODE_FOLDING();
+  root->OutOfMemory(size);
+  PA_IMMEDIATE_CRASH();  // Not required, kept as documentation.
+}
+
+[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryCommitFailure(
+    PartitionRoot* root,
+    size_t size) PA_LOCKS_EXCLUDED(PartitionRootLock(root)) {
+  PA_NO_CODE_FOLDING();
+  root->OutOfMemory(size);
+  PA_IMMEDIATE_CRASH();  // Not required, kept as documentation.
+}
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+// |start| has to be aligned to kSuperPageSize, but |end| doesn't. This means
+// that a partial super page is allowed at the end. Since the block list uses
+// kSuperPageSize granularity, a partial super page is considered blocked if
+// there is a raw_ptr<T> pointing anywhere in that super page, even if doesn't
+// point to that partially allocated region.
+bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
+  PA_DCHECK(!(start % kSuperPageSize));
+  for (uintptr_t super_page = start; super_page < end;
+       super_page += kSuperPageSize) {
+    // If any blocked super page is found inside the given memory region,
+    // the memory region is blocked.
+    if (!AddressPoolManagerBitmap::IsAllowedSuperPageForBRPPool(super_page)) {
+      AddressPoolManagerBitmap::IncrementBlocklistHitCount();
+      return false;
+    }
+  }
+  return true;
+}
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS) &&
+        // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+// Reserves |requested_size| worth of super pages from the specified pool.
+// If BRP pool is requested this function will honor BRP block list.
+//
+// The returned address will be aligned to kSuperPageSize, and so
+// |requested_address| should be. |requested_size| doesn't have to be, however.
+//
+// |requested_address| is merely a hint, which will be attempted, but easily
+// given up on if doesn't work the first time.
+//
+// The function doesn't need to hold root->lock_ or any other locks, because:
+// - It (1) reserves memory, (2) then consults AreAllowedSuperPagesForBRPPool
+//   for that memory, and (3) returns the memory if
+//   allowed, or unreserves and decommits if not allowed. So no other
+//   overlapping region can be allocated while executing
+//   AreAllowedSuperPagesForBRPPool.
+// - IsAllowedSuperPageForBRPPool (used by AreAllowedSuperPagesForBRPPool) is
+//   designed to not need locking.
+uintptr_t ReserveMemoryFromPool(pool_handle pool,
+                                uintptr_t requested_address,
+                                size_t requested_size) {
+  PA_DCHECK(!(requested_address % kSuperPageSize));
+
+  uintptr_t reserved_address = AddressPoolManager::GetInstance().Reserve(
+      pool, requested_address, requested_size);
+
+  // In 32-bit mode, when allocating from BRP pool, verify that the requested
+  // allocation honors the block list. Find a better address otherwise.
+#if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  if (pool == kBRPPoolHandle) {
+    constexpr int kMaxRandomAddressTries = 10;
+    for (int i = 0; i < kMaxRandomAddressTries; ++i) {
+      if (!reserved_address ||
+          AreAllowedSuperPagesForBRPPool(reserved_address,
+                                         reserved_address + requested_size)) {
+        break;
+      }
+      AddressPoolManager::GetInstance().UnreserveAndDecommit(
+          pool, reserved_address, requested_size);
+      // No longer try to honor |requested_address|, because it didn't work for
+      // us last time.
+      reserved_address =
+          AddressPoolManager::GetInstance().Reserve(pool, 0, requested_size);
+    }
+
+    // If the allocation attempt succeeds, we will break out of the following
+    // loop immediately.
+    //
+    // Last resort: sequentially scan the whole 32-bit address space. The number
+    // of blocked super-pages should be very small, so we expect to practically
+    // never need to run the following code. Note that it may fail to find an
+    // available super page, e.g., when it becomes available after the scan
+    // passes through it, but we accept the risk.
+    for (uintptr_t address_to_try = kSuperPageSize; address_to_try != 0;
+         address_to_try += kSuperPageSize) {
+      if (!reserved_address ||
+          AreAllowedSuperPagesForBRPPool(reserved_address,
+                                         reserved_address + requested_size)) {
+        break;
+      }
+      AddressPoolManager::GetInstance().UnreserveAndDecommit(
+          pool, reserved_address, requested_size);
+      // Reserve() can return a different pointer than attempted.
+      reserved_address = AddressPoolManager::GetInstance().Reserve(
+          pool, address_to_try, requested_size);
+    }
+
+    // If the loop ends naturally, the last allocated region hasn't been
+    // verified. Do it now.
+    if (reserved_address &&
+        !AreAllowedSuperPagesForBRPPool(reserved_address,
+                                        reserved_address + requested_size)) {
+      AddressPoolManager::GetInstance().UnreserveAndDecommit(
+          pool, reserved_address, requested_size);
+      reserved_address = 0;
+    }
+  }
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS) &&
+        // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+  // Only mark the region as belonging to the pool after it has passed the
+  // blocklist check in order to avoid a potential race with destructing a
+  // raw_ptr<T> object that points to non-PA memory in another thread.
+  // If `MarkUsed` was called earlier, the other thread could incorrectly
+  // determine that the allocation had come form PartitionAlloc.
+  if (reserved_address) {
+    AddressPoolManager::GetInstance().MarkUsed(pool, reserved_address,
+                                               requested_size);
+  }
+#endif
+
+  PA_DCHECK(!(reserved_address % kSuperPageSize));
+  return reserved_address;
+}
+
+SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
+                                     AllocFlags flags,
+                                     size_t raw_size,
+                                     size_t slot_span_alignment) {
+  PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
+            base::bits::IsPowerOfTwo(slot_span_alignment));
+
+  // No static EXCLUSIVE_LOCKS_REQUIRED(), as the checker doesn't understand
+  // scoped unlocking.
+  PartitionRootLock(root).AssertAcquired();
+
+  const bool return_null = ContainsFlags(flags, AllocFlags::kReturnNull);
+  if (PA_UNLIKELY(raw_size > MaxDirectMapped())) {
+    if (return_null) {
+      return nullptr;
+    }
+
+    // The lock is here to protect PA from:
+    // 1. Concurrent calls
+    // 2. Reentrant calls
+    //
+    // This is fine here however, as:
+    // 1. Concurrency: |PartitionRoot::OutOfMemory()| never returns, so the lock
+    //    will not be re-acquired, which would lead to acting on inconsistent
+    //    data that could have been modified in-between releasing and acquiring
+    //    it.
+    // 2. Reentrancy: This is why we release the lock. On some platforms,
+    //    terminating the process may free() memory, or even possibly try to
+    //    allocate some. Calling free() is fine, but will deadlock since
+    //    |PartitionRoot::lock_| is not recursive.
+    //
+    // Supporting reentrant calls properly is hard, and not a requirement for
+    // PA. However up to that point, we've only *read* data, not *written* to
+    // any state. Reentrant calls are then fine, especially as we don't continue
+    // on this path. The only downside is possibly endless recursion if the OOM
+    // handler allocates and fails to use UncheckedMalloc() or equivalent, but
+    // that's violating the contract of base::TerminateBecauseOutOfMemory().
+    ScopedUnlockGuard unlock{PartitionRootLock(root)};
+    PartitionExcessiveAllocationSize(raw_size);
+  }
+
+  PartitionDirectMapExtent* map_extent = nullptr;
+  PartitionPage* page = nullptr;
+
+  {
+    // Getting memory for direct-mapped allocations doesn't interact with the
+    // rest of the allocator, but takes a long time, as it involves several
+    // system calls. Although no mmap() (or equivalent) calls are made on
+    // 64 bit systems, page permissions are changed with mprotect(), which is
+    // a syscall.
+    //
+    // These calls are almost always slow (at least a couple us per syscall on a
+    // desktop Linux machine), and they also have a very long latency tail,
+    // possibly from getting descheduled. As a consequence, we should not hold
+    // the lock when performing a syscall. This is not the only problematic
+    // location, but since this one doesn't interact with the rest of the
+    // allocator, we can safely drop and then re-acquire the lock.
+    //
+    // Note that this only affects allocations that are not served out of the
+    // thread cache, but as a simple example the buffer partition in blink is
+    // frequently used for large allocations (e.g. ArrayBuffer), and frequent,
+    // small ones (e.g. WTF::String), and does not have a thread cache.
+    ScopedUnlockGuard scoped_unlock{PartitionRootLock(root)};
+
+    const size_t slot_size = PartitionRoot::GetDirectMapSlotSize(raw_size);
+    // The super page starts with a partition page worth of metadata and guard
+    // pages, hence alignment requests ==PartitionPageSize() will be
+    // automatically satisfied. Padding is needed for higher-order alignment
+    // requests. Note, |slot_span_alignment| is at least 1 partition page.
+    const size_t padding_for_alignment =
+        slot_span_alignment - PartitionPageSize();
+    const size_t reservation_size = PartitionRoot::GetDirectMapReservationSize(
+        raw_size + padding_for_alignment);
+    PA_DCHECK(reservation_size >= raw_size);
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    const size_t available_reservation_size =
+        reservation_size - padding_for_alignment -
+        PartitionRoot::GetDirectMapMetadataAndGuardPagesSize();
+    PA_DCHECK(slot_size <= available_reservation_size);
+#endif
+
+    pool_handle pool = root->ChoosePool();
+    uintptr_t reservation_start;
+    {
+      // Reserving memory from the pool is actually not a syscall on 64 bit
+      // platforms.
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+      ScopedSyscallTimer timer{root};
+#endif
+      reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size);
+    }
+    if (PA_UNLIKELY(!reservation_start)) {
+      if (return_null) {
+        return nullptr;
+      }
+
+      PartitionOutOfMemoryMappingFailure(root, reservation_size);
+    }
+
+    root->total_size_of_direct_mapped_pages.fetch_add(
+        reservation_size, std::memory_order_relaxed);
+
+    // Shift by 1 partition page (metadata + guard pages) and alignment padding.
+    const uintptr_t slot_start =
+        reservation_start + PartitionPageSize() + padding_for_alignment;
+
+    {
+      ScopedSyscallTimer timer{root};
+      RecommitSystemPages(reservation_start + SystemPageSize(),
+                          SystemPageSize(),
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+                          root->PageAccessibilityWithThreadIsolationIfEnabled(
+                              PageAccessibilityConfiguration::kRead),
+#else
+                          root->PageAccessibilityWithThreadIsolationIfEnabled(
+                              PageAccessibilityConfiguration::kReadWrite),
+#endif
+                          PageAccessibilityDisposition::kRequireUpdate);
+    }
+
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+    // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is
+    // used, allocate a SystemPage for RefCount "bitmap" (only one of its
+    // elements will be used).
+    if (pool == kBRPPoolHandle) {
+      ScopedSyscallTimer timer{root};
+      RecommitSystemPages(reservation_start + SystemPageSize() * 2,
+                          SystemPageSize(),
+                          root->PageAccessibilityWithThreadIsolationIfEnabled(
+                              PageAccessibilityConfiguration::kReadWrite),
+                          PageAccessibilityDisposition::kRequireUpdate);
+    }
+#endif
+
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+    {
+      ScopedSyscallTimer timer{root};
+      RecommitSystemPages(ShadowMetadataStart(reservation_start, pool),
+                          SystemPageSize(),
+                          root->PageAccessibilityWithThreadIsolationIfEnabled(
+                              PageAccessibilityConfiguration::kReadWrite),
+                          PageAccessibilityDisposition::kRequireUpdate);
+    }
+#endif
+
+    // No need to hold root->lock_. Now that memory is reserved, no other
+    // overlapping region can be allocated (because of how pools work),
+    // so no other thread can update the same offset table entries at the
+    // same time. Furthermore, nobody will be ready these offsets until this
+    // function returns.
+    auto* offset_ptr = ReservationOffsetPointer(reservation_start);
+    [[maybe_unused]] const auto* offset_ptr_end =
+        GetReservationOffsetTableEnd(reservation_start);
+
+    // |raw_size| > MaxBucketed(). So |reservation_size| > 0.
+    PA_DCHECK(reservation_size > 0);
+    const uint16_t offset_end = (reservation_size - 1) >> kSuperPageShift;
+    for (uint16_t offset = 0; offset <= offset_end; ++offset) {
+      PA_DCHECK(offset < kOffsetTagNormalBuckets);
+      PA_DCHECK(offset_ptr < offset_ptr_end);
+      *offset_ptr++ = offset;
+    }
+
+    auto* super_page_extent = PartitionSuperPageToExtent(reservation_start);
+    super_page_extent->root = root;
+    // The new structures are all located inside a fresh system page so they
+    // will all be zeroed out. These DCHECKs are for documentation and to assert
+    // our expectations of the kernel.
+    PA_DCHECK(!super_page_extent->number_of_consecutive_super_pages);
+    PA_DCHECK(!super_page_extent->next);
+
+    PartitionPage* first_page =
+        reinterpret_cast<PartitionPage*>(super_page_extent) + 1;
+    page = PartitionPage::FromAddr(slot_start);
+    // |first_page| and |page| may be equal, if there is no alignment padding.
+    if (page != first_page) {
+      PA_DCHECK(page > first_page);
+      PA_DCHECK(page - first_page <= PartitionPage::kMaxSlotSpanMetadataOffset);
+      PA_CHECK(!first_page->is_valid);
+      first_page->has_valid_span_after_this = true;
+      first_page->slot_span_metadata_offset = page - first_page;
+    }
+    auto* metadata = reinterpret_cast<PartitionDirectMapMetadata*>(page);
+    // Since direct map metadata is larger than PartitionPage, make sure the
+    // first and the last bytes are on the same system page, i.e. within the
+    // super page metadata region.
+    PA_DCHECK(base::bits::AlignDown(reinterpret_cast<uintptr_t>(metadata),
+                                    SystemPageSize()) ==
+              base::bits::AlignDown(reinterpret_cast<uintptr_t>(metadata) +
+                                        sizeof(PartitionDirectMapMetadata) - 1,
+                                    SystemPageSize()));
+    PA_DCHECK(page == &metadata->page);
+    page->is_valid = true;
+    PA_DCHECK(!page->has_valid_span_after_this);
+    PA_DCHECK(!page->slot_span_metadata_offset);
+    PA_DCHECK(!page->slot_span_metadata.next_slot_span);
+    PA_DCHECK(!page->slot_span_metadata.marked_full);
+    PA_DCHECK(!page->slot_span_metadata.num_allocated_slots);
+    PA_DCHECK(!page->slot_span_metadata.num_unprovisioned_slots);
+    PA_DCHECK(!page->slot_span_metadata.in_empty_cache());
+
+    PA_DCHECK(!metadata->subsequent_page.subsequent_page_metadata.raw_size);
+    // Raw size is set later, by the caller.
+    metadata->subsequent_page.slot_span_metadata_offset = 1;
+
+    PA_DCHECK(!metadata->bucket.active_slot_spans_head);
+    PA_DCHECK(!metadata->bucket.empty_slot_spans_head);
+    PA_DCHECK(!metadata->bucket.decommitted_slot_spans_head);
+    PA_DCHECK(!metadata->bucket.num_system_pages_per_slot_span);
+    PA_DCHECK(!metadata->bucket.num_full_slot_spans);
+    metadata->bucket.slot_size = slot_size;
+
+    new (&page->slot_span_metadata) SlotSpanMetadata(&metadata->bucket);
+
+    // It is typically possible to map a large range of inaccessible pages, and
+    // this is leveraged in multiple places, including the pools. However,
+    // this doesn't mean that we can commit all this memory.  For the vast
+    // majority of allocations, this just means that we crash in a slightly
+    // different place, but for callers ready to handle failures, we have to
+    // return nullptr. See crbug.com/1187404.
+    //
+    // Note that we didn't check above, because if we cannot even commit a
+    // single page, then this is likely hopeless anyway, and we will crash very
+    // soon.
+    //
+    // Direct map never uses tagging, as size is always >kMaxMemoryTaggingSize.
+    PA_DCHECK(raw_size > kMaxMemoryTaggingSize);
+    const bool ok = root->TryRecommitSystemPagesForData(
+        slot_start, slot_size, PageAccessibilityDisposition::kRequireUpdate,
+        false);
+    if (!ok) {
+      if (!return_null) {
+        PartitionOutOfMemoryCommitFailure(root, slot_size);
+      }
+
+      {
+        ScopedSyscallTimer timer{root};
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+        AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
+                                                     reservation_size);
+#endif
+        AddressPoolManager::GetInstance().UnreserveAndDecommit(
+            pool, reservation_start, reservation_size);
+      }
+
+      root->total_size_of_direct_mapped_pages.fetch_sub(
+          reservation_size, std::memory_order_relaxed);
+
+      return nullptr;
+    }
+
+    auto* next_entry = EncodedNextFreelistEntry::EmplaceAndInitNull(slot_start);
+    page->slot_span_metadata.SetFreelistHead(next_entry);
+
+    map_extent = &metadata->direct_map_extent;
+    map_extent->reservation_size = reservation_size;
+    map_extent->padding_for_alignment = padding_for_alignment;
+    map_extent->bucket = &metadata->bucket;
+  }
+
+  PartitionRootLock(root).AssertAcquired();
+
+  // Maintain the doubly-linked list of all direct mappings.
+  map_extent->next_extent = root->direct_map_list;
+  if (map_extent->next_extent) {
+    map_extent->next_extent->prev_extent = map_extent;
+  }
+  map_extent->prev_extent = nullptr;
+  root->direct_map_list = map_extent;
+
+  return &page->slot_span_metadata;
+}
+
+uint8_t ComputeSystemPagesPerSlotSpanPreferSmall(size_t slot_size) {
+  if (slot_size > MaxRegularSlotSpanSize()) {
+    // This is technically not needed, as for now all the larger slot sizes are
+    // multiples of the system page size.
+    return base::bits::AlignUp(slot_size, SystemPageSize()) / SystemPageSize();
+  }
+
+  // Smaller slot spans waste less address space, as well as potentially lower
+  // fragmentation:
+  // - Address space: This comes from fuller SuperPages (since the tail end of a
+  //   SuperPage is more likely to be used when the slot span is smaller. Also,
+  //   if a slot span is partially used, a smaller slot span will use less
+  //   address space.
+  // - In-slot fragmentation: Slot span management code will prioritize
+  //   almost-full slot spans, as well as trying to keep empty slot spans
+  //   empty. The more granular this logic can work, the better.
+  //
+  // Since metadata space overhead is constant per-PartitionPage, keeping
+  // smaller slot spans makes sense.
+  //
+  // Underlying memory allocation is done per-PartitionPage, but memory commit
+  // is done per system page. This means that we prefer to fill the entirety of
+  // a PartitionPage with a slot span, but we can tolerate some system pages
+  // being empty at the end, as these will not cost committed or dirty memory.
+  //
+  // The choice below is, for multi-slot slot spans:
+  // - If a full PartitionPage slot span is possible with less than 2% of a
+  //   *single* system page wasted, use it. The smallest possible size wins.
+  // - Otherwise, select the size with the smallest virtual address space
+  //   loss. Allow a SlotSpan to leave some slack in its PartitionPage, up to
+  //   1/4 of the total.
+  for (size_t partition_page_count = 1;
+       partition_page_count <= kMaxPartitionPagesPerRegularSlotSpan;
+       partition_page_count++) {
+    size_t candidate_size = partition_page_count * PartitionPageSize();
+    size_t waste = candidate_size % slot_size;
+    if (waste <= .02 * SystemPageSize()) {
+      return partition_page_count * NumSystemPagesPerPartitionPage();
+    }
+  }
+
+  size_t best_count = 0;
+  size_t best_waste = std::numeric_limits<size_t>::max();
+  for (size_t partition_page_count = 1;
+       partition_page_count <= kMaxPartitionPagesPerRegularSlotSpan;
+       partition_page_count++) {
+    // Prefer no slack.
+    for (size_t slack = 0; slack < partition_page_count; slack++) {
+      size_t system_page_count =
+          partition_page_count * NumSystemPagesPerPartitionPage() - slack;
+      size_t candidate_size = system_page_count * SystemPageSize();
+      size_t waste = candidate_size % slot_size;
+      if (waste < best_waste) {
+        best_waste = waste;
+        best_count = system_page_count;
+      }
+    }
+  }
+  return best_count;
+}
+
+uint8_t ComputeSystemPagesPerSlotSpanInternal(size_t slot_size) {
+  // This works out reasonably for the current bucket sizes of the generic
+  // allocator, and the current values of partition page size and constants.
+  // Specifically, we have enough room to always pack the slots perfectly into
+  // some number of system pages. The only waste is the waste associated with
+  // unfaulted pages (i.e. wasted address space).
+  // TODO: we end up using a lot of system pages for very small sizes. For
+  // example, we'll use 12 system pages for slot size 24. The slot size is so
+  // small that the waste would be tiny with just 4, or 1, system pages.  Later,
+  // we can investigate whether there are anti-fragmentation benefits to using
+  // fewer system pages.
+  double best_waste_ratio = 1.0f;
+  uint16_t best_pages = 0;
+  if (slot_size > MaxRegularSlotSpanSize()) {
+    // TODO(ajwong): Why is there a DCHECK here for this?
+    // http://crbug.com/776537
+    PA_DCHECK(!(slot_size % SystemPageSize()));
+    best_pages = static_cast<uint16_t>(slot_size >> SystemPageShift());
+    PA_CHECK(best_pages <= std::numeric_limits<uint8_t>::max());
+    return static_cast<uint8_t>(best_pages);
+  }
+  PA_DCHECK(slot_size <= MaxRegularSlotSpanSize());
+  for (uint16_t i = NumSystemPagesPerPartitionPage() - 1;
+       i <= MaxSystemPagesPerRegularSlotSpan(); ++i) {
+    size_t page_size = i << SystemPageShift();
+    size_t num_slots = page_size / slot_size;
+    size_t waste = page_size - (num_slots * slot_size);
+    // Leaving a page unfaulted is not free; the page will occupy an empty page
+    // table entry.  Make a simple attempt to account for that.
+    //
+    // TODO(ajwong): This looks wrong. PTEs are allocated for all pages
+    // regardless of whether or not they are wasted. Should it just
+    // be waste += i * sizeof(void*)?
+    // http://crbug.com/776537
+    size_t num_remainder_pages = i & (NumSystemPagesPerPartitionPage() - 1);
+    size_t num_unfaulted_pages =
+        num_remainder_pages
+            ? (NumSystemPagesPerPartitionPage() - num_remainder_pages)
+            : 0;
+    waste += sizeof(void*) * num_unfaulted_pages;
+    double waste_ratio =
+        static_cast<double>(waste) / static_cast<double>(page_size);
+    if (waste_ratio < best_waste_ratio) {
+      best_waste_ratio = waste_ratio;
+      best_pages = i;
+    }
+  }
+  PA_DCHECK(best_pages > 0);
+  PA_CHECK(best_pages <= MaxSystemPagesPerRegularSlotSpan());
+  return static_cast<uint8_t>(best_pages);
+}
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+// Returns size that should be tagged. Avoiding the previous slot ref count if
+// it exists to avoid a race (crbug.com/1445816).
+PA_ALWAYS_INLINE size_t TagSizeForSlot(PartitionRoot* root, size_t slot_size) {
+#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
+  return slot_size - root->settings.ref_count_size;
+#else
+  return slot_size;
+#endif
+}
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+}  // namespace
+
+uint8_t ComputeSystemPagesPerSlotSpan(size_t slot_size,
+                                      bool prefer_smaller_slot_spans) {
+  if (prefer_smaller_slot_spans) {
+    size_t system_page_count =
+        ComputeSystemPagesPerSlotSpanPreferSmall(slot_size);
+    size_t waste = (system_page_count * SystemPageSize()) % slot_size;
+    // In case the waste is too large (more than 5% of a page), don't try to use
+    // the "small" slot span formula. This happens when we have a lot of
+    // buckets, in some cases the formula doesn't find a nice, small size.
+    if (waste <= .05 * SystemPageSize()) {
+      return system_page_count;
+    }
+  }
+
+  return ComputeSystemPagesPerSlotSpanInternal(slot_size);
+}
+
+void PartitionBucket::Init(uint32_t new_slot_size) {
+  slot_size = new_slot_size;
+  slot_size_reciprocal = kReciprocalMask / new_slot_size + 1;
+  active_slot_spans_head = SlotSpanMetadata::get_sentinel_slot_span_non_const();
+  empty_slot_spans_head = nullptr;
+  decommitted_slot_spans_head = nullptr;
+  num_full_slot_spans = 0;
+  bool prefer_smaller_slot_spans =
+#if PA_CONFIG(PREFER_SMALLER_SLOT_SPANS)
+      true
+#else
+      false
+#endif
+      ;
+  num_system_pages_per_slot_span =
+      ComputeSystemPagesPerSlotSpan(slot_size, prefer_smaller_slot_spans);
+}
+
+PA_ALWAYS_INLINE SlotSpanMetadata* PartitionBucket::AllocNewSlotSpan(
+    PartitionRoot* root,
+    AllocFlags flags,
+    size_t slot_span_alignment) {
+  PA_DCHECK(!(root->next_partition_page % PartitionPageSize()));
+  PA_DCHECK(!(root->next_partition_page_end % PartitionPageSize()));
+
+  size_t num_partition_pages = get_pages_per_slot_span();
+  size_t slot_span_reservation_size = num_partition_pages
+                                      << PartitionPageShift();
+  size_t slot_span_committed_size = get_bytes_per_span();
+  PA_DCHECK(num_partition_pages <= NumPartitionPagesPerSuperPage());
+  PA_DCHECK(slot_span_committed_size % SystemPageSize() == 0);
+  PA_DCHECK(slot_span_committed_size <= slot_span_reservation_size);
+
+  uintptr_t adjusted_next_partition_page =
+      base::bits::AlignUp(root->next_partition_page, slot_span_alignment);
+  if (PA_UNLIKELY(adjusted_next_partition_page + slot_span_reservation_size >
+                  root->next_partition_page_end)) {
+    // AllocNewSuperPage() may crash (e.g. address space exhaustion), put data
+    // on stack.
+    PA_DEBUG_DATA_ON_STACK("slotsize", slot_size);
+    PA_DEBUG_DATA_ON_STACK("spansize", slot_span_reservation_size);
+
+    // In this case, we can no longer hand out pages from the current super page
+    // allocation. Get a new super page.
+    if (!AllocNewSuperPage(root, flags)) {
+      return nullptr;
+    }
+    // AllocNewSuperPage() updates root->next_partition_page, re-query.
+    adjusted_next_partition_page =
+        base::bits::AlignUp(root->next_partition_page, slot_span_alignment);
+    PA_CHECK(adjusted_next_partition_page + slot_span_reservation_size <=
+             root->next_partition_page_end);
+  }
+
+  auto* gap_start_page = PartitionPage::FromAddr(root->next_partition_page);
+  auto* gap_end_page = PartitionPage::FromAddr(adjusted_next_partition_page);
+  for (auto* page = gap_start_page; page < gap_end_page; ++page) {
+    PA_DCHECK(!page->is_valid);
+    page->has_valid_span_after_this = 1;
+  }
+  root->next_partition_page =
+      adjusted_next_partition_page + slot_span_reservation_size;
+
+  uintptr_t slot_span_start = adjusted_next_partition_page;
+  auto* slot_span = &gap_end_page->slot_span_metadata;
+  InitializeSlotSpan(slot_span);
+  // Now that slot span is initialized, it's safe to call FromSlotStart.
+  PA_DCHECK(slot_span == SlotSpanMetadata::FromSlotStart(slot_span_start));
+
+  // System pages in the super page come in a decommited state. Commit them
+  // before vending them back.
+  // If lazy commit is enabled, pages will be committed when provisioning slots,
+  // in ProvisionMoreSlotsAndAllocOne(), not here.
+  if (!kUseLazyCommit) {
+    PA_DEBUG_DATA_ON_STACK("slotsize", slot_size);
+    PA_DEBUG_DATA_ON_STACK("spansize", slot_span_reservation_size);
+    PA_DEBUG_DATA_ON_STACK("spancmt", slot_span_committed_size);
+
+    root->RecommitSystemPagesForData(
+        slot_span_start, slot_span_committed_size,
+        PageAccessibilityDisposition::kRequireUpdate,
+        slot_size <= kMaxMemoryTaggingSize);
+  }
+
+  PA_CHECK(get_slots_per_span() <= kMaxSlotsPerSlotSpan);
+
+  // Double check that we had enough space in the super page for the new slot
+  // span.
+  PA_DCHECK(root->next_partition_page <= root->next_partition_page_end);
+
+  return slot_span;
+}
+
+uintptr_t PartitionBucket::AllocNewSuperPageSpan(PartitionRoot* root,
+                                                 size_t super_page_count,
+                                                 AllocFlags flags) {
+  PA_CHECK(super_page_count > 0);
+  PA_CHECK(super_page_count <=
+           std::numeric_limits<size_t>::max() / kSuperPageSize);
+  // Need a new super page. We want to allocate super pages in a contiguous
+  // address region as much as possible. This is important for not causing
+  // page table bloat and not fragmenting address spaces in 32 bit
+  // architectures.
+  uintptr_t requested_address = root->next_super_page;
+  pool_handle pool = root->ChoosePool();
+  uintptr_t super_page_span_start = ReserveMemoryFromPool(
+      pool, requested_address, super_page_count * kSuperPageSize);
+  if (PA_UNLIKELY(!super_page_span_start)) {
+    if (ContainsFlags(flags, AllocFlags::kReturnNull)) {
+      return 0;
+    }
+
+    // Didn't manage to get a new uncommitted super page -> address space issue.
+    ::partition_alloc::internal::ScopedUnlockGuard unlock{
+        PartitionRootLock(root)};
+    PartitionOutOfMemoryMappingFailure(root, kSuperPageSize);
+  }
+
+  uintptr_t super_page_span_end =
+      super_page_span_start + super_page_count * kSuperPageSize;
+  for (uintptr_t super_page = super_page_span_start;
+       super_page < super_page_span_end; super_page += kSuperPageSize) {
+    InitializeSuperPage(root, super_page, 0);
+  }
+  return super_page_span_start;
+}
+
+PA_ALWAYS_INLINE uintptr_t
+PartitionBucket::AllocNewSuperPage(PartitionRoot* root, AllocFlags flags) {
+  auto super_page = AllocNewSuperPageSpan(root, 1, flags);
+  if (PA_UNLIKELY(!super_page)) {
+    // If the `kReturnNull` flag isn't set and the allocation attempt fails,
+    // `AllocNewSuperPageSpan` should've failed with an OOM crash.
+    PA_DCHECK(ContainsFlags(flags, AllocFlags::kReturnNull));
+    return 0;
+  }
+  return SuperPagePayloadBegin(super_page, root->IsQuarantineAllowed());
+}
+
+PA_ALWAYS_INLINE uintptr_t
+PartitionBucket::InitializeSuperPage(PartitionRoot* root,
+                                     uintptr_t super_page,
+                                     uintptr_t requested_address) {
+  *ReservationOffsetPointer(super_page) = kOffsetTagNormalBuckets;
+
+  root->total_size_of_super_pages.fetch_add(kSuperPageSize,
+                                            std::memory_order_relaxed);
+
+  root->next_super_page = super_page + kSuperPageSize;
+  uintptr_t state_bitmap =
+      super_page + PartitionPageSize() +
+      (is_direct_mapped() ? 0 : ReservedFreeSlotBitmapSize());
+#if BUILDFLAG(USE_STARSCAN)
+  PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap);
+  const size_t state_bitmap_reservation_size =
+      root->IsQuarantineAllowed() ? ReservedStateBitmapSize() : 0;
+  const size_t state_bitmap_size_to_commit =
+      root->IsQuarantineAllowed() ? CommittedStateBitmapSize() : 0;
+  PA_DCHECK(state_bitmap_reservation_size % PartitionPageSize() == 0);
+  PA_DCHECK(state_bitmap_size_to_commit % SystemPageSize() == 0);
+  PA_DCHECK(state_bitmap_size_to_commit <= state_bitmap_reservation_size);
+  uintptr_t payload = state_bitmap + state_bitmap_reservation_size;
+#else
+  uintptr_t payload = state_bitmap;
+#endif  // BUILDFLAG(USE_STARSCAN)
+
+  root->next_partition_page = payload;
+  root->next_partition_page_end = root->next_super_page - PartitionPageSize();
+  PA_DCHECK(payload ==
+            SuperPagePayloadBegin(super_page, root->IsQuarantineAllowed()));
+  PA_DCHECK(root->next_partition_page_end == SuperPagePayloadEnd(super_page));
+
+  // Keep the first partition page in the super page inaccessible to serve as a
+  // guard page, except an "island" in the middle where we put page metadata and
+  // also a tiny amount of extent metadata.
+  {
+    ScopedSyscallTimer timer{root};
+    RecommitSystemPages(super_page + SystemPageSize(), SystemPageSize(),
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+                        root->PageAccessibilityWithThreadIsolationIfEnabled(
+                            PageAccessibilityConfiguration::kRead),
+#else
+                        root->PageAccessibilityWithThreadIsolationIfEnabled(
+                            PageAccessibilityConfiguration::kReadWrite),
+#endif
+                        PageAccessibilityDisposition::kRequireUpdate);
+  }
+
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+  // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is
+  // used, allocate a SystemPage for RefCount bitmap.
+  if (root->ChoosePool() == kBRPPoolHandle) {
+    ScopedSyscallTimer timer{root};
+    RecommitSystemPages(super_page + SystemPageSize() * 2, SystemPageSize(),
+                        root->PageAccessibilityWithThreadIsolationIfEnabled(
+                            PageAccessibilityConfiguration::kReadWrite),
+                        PageAccessibilityDisposition::kRequireUpdate);
+  }
+#endif
+
+#if PA_CONFIG(ENABLE_SHADOW_METADATA)
+  {
+    ScopedSyscallTimer timer{root};
+    RecommitSystemPages(ShadowMetadataStart(super_page, root->ChoosePool()),
+                        SystemPageSize(),
+                        root->PageAccessibilityWithThreadIsolationIfEnabled(
+                            PageAccessibilityConfiguration::kReadWrite),
+                        PageAccessibilityDisposition::kRequireUpdate);
+  }
+#endif
+
+  // If we were after a specific address, but didn't get it, assume that
+  // the system chose a lousy address. Here most OS'es have a default
+  // algorithm that isn't randomized. For example, most Linux
+  // distributions will allocate the mapping directly before the last
+  // successful mapping, which is far from random. So we just get fresh
+  // randomness for the next mapping attempt.
+  if (requested_address && requested_address != super_page) {
+    root->next_super_page = 0;
+  }
+
+  // We allocated a new super page so update super page metadata.
+  // First check if this is a new extent or not.
+  auto* latest_extent = PartitionSuperPageToExtent(super_page);
+  // By storing the root in every extent metadata object, we have a fast way
+  // to go from a pointer within the partition to the root object.
+  latest_extent->root = root;
+  // Most new extents will be part of a larger extent, and these two fields
+  // are unused, but we initialize them to 0 so that we get a clear signal
+  // in case they are accidentally used.
+  latest_extent->number_of_consecutive_super_pages = 0;
+  latest_extent->next = nullptr;
+  latest_extent->number_of_nonempty_slot_spans = 0;
+
+  PartitionSuperPageExtentEntry* current_extent = root->current_extent;
+  const bool is_new_extent = super_page != requested_address;
+  if (PA_UNLIKELY(is_new_extent)) {
+    if (PA_UNLIKELY(!current_extent)) {
+      PA_DCHECK(!root->first_extent);
+      root->first_extent = latest_extent;
+    } else {
+      PA_DCHECK(current_extent->number_of_consecutive_super_pages);
+      current_extent->next = latest_extent;
+    }
+    root->current_extent = latest_extent;
+    latest_extent->number_of_consecutive_super_pages = 1;
+  } else {
+    // We allocated next to an existing extent so just nudge the size up a
+    // little.
+    PA_DCHECK(current_extent->number_of_consecutive_super_pages);
+    ++current_extent->number_of_consecutive_super_pages;
+    PA_DCHECK(payload > SuperPagesBeginFromExtent(current_extent) &&
+              payload < SuperPagesEndFromExtent(current_extent));
+  }
+
+  // If PCScan is used, commit the state bitmap. Otherwise, leave it uncommitted
+  // and let PartitionRoot::RegisterScannableRoot() commit it when needed. Make
+  // sure to register the super-page after it has been fully initialized.
+  // Otherwise, the concurrent scanner may try to access |extent->root| which
+  // could be not initialized yet.
+#if BUILDFLAG(USE_STARSCAN)
+  if (root->IsQuarantineEnabled()) {
+    {
+      ScopedSyscallTimer timer{root};
+      RecommitSystemPages(state_bitmap, state_bitmap_size_to_commit,
+                          root->PageAccessibilityWithThreadIsolationIfEnabled(
+                              PageAccessibilityConfiguration::kReadWrite),
+                          PageAccessibilityDisposition::kRequireUpdate);
+    }
+    PCScan::RegisterNewSuperPage(root, super_page);
+  }
+#endif  // BUILDFLAG(USE_STARSCAN)
+
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+  // Commit the pages for freeslot bitmap.
+  if (!is_direct_mapped()) {
+    uintptr_t freeslot_bitmap_addr = super_page + PartitionPageSize();
+    PA_DCHECK(SuperPageFreeSlotBitmapAddr(super_page) == freeslot_bitmap_addr);
+    ScopedSyscallTimer timer{root};
+    RecommitSystemPages(freeslot_bitmap_addr, CommittedFreeSlotBitmapSize(),
+                        root->PageAccessibilityWithThreadIsolationIfEnabled(
+                            PageAccessibilityConfiguration::kReadWrite),
+                        PageAccessibilityDisposition::kRequireUpdate);
+  }
+#endif
+
+  return payload;
+}
+
+PA_ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(
+    SlotSpanMetadata* slot_span) {
+  new (slot_span) SlotSpanMetadata(this);
+
+  slot_span->Reset();
+
+  uint16_t num_partition_pages = get_pages_per_slot_span();
+  auto* page = reinterpret_cast<PartitionPage*>(slot_span);
+  for (uint16_t i = 0; i < num_partition_pages; ++i, ++page) {
+    PA_DCHECK(i <= PartitionPage::kMaxSlotSpanMetadataOffset);
+    page->slot_span_metadata_offset = i;
+    page->is_valid = true;
+  }
+}
+
+PA_ALWAYS_INLINE uintptr_t
+PartitionBucket::ProvisionMoreSlotsAndAllocOne(PartitionRoot* root,
+                                               SlotSpanMetadata* slot_span) {
+  PA_DCHECK(slot_span != SlotSpanMetadata::get_sentinel_slot_span());
+  size_t num_slots = slot_span->num_unprovisioned_slots;
+  PA_DCHECK(num_slots);
+  PA_DCHECK(num_slots <= get_slots_per_span());
+  // We should only get here when _every_ slot is either used or unprovisioned.
+  // (The third possible state is "on the freelist". If we have a non-empty
+  // freelist, we should not get here.)
+  PA_DCHECK(num_slots + slot_span->num_allocated_slots == get_slots_per_span());
+  // Similarly, make explicitly sure that the freelist is empty.
+  PA_DCHECK(!slot_span->get_freelist_head());
+  PA_DCHECK(!slot_span->is_full());
+
+  uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
+  // If we got here, the first unallocated slot is either partially or fully on
+  // an uncommitted page. If the latter, it must be at the start of that page.
+  uintptr_t return_slot =
+      slot_span_start + (slot_size * slot_span->num_allocated_slots);
+  uintptr_t next_slot = return_slot + slot_size;
+  uintptr_t commit_start = base::bits::AlignUp(return_slot, SystemPageSize());
+  PA_DCHECK(next_slot > commit_start);
+  uintptr_t commit_end = base::bits::AlignUp(next_slot, SystemPageSize());
+  // If the slot was partially committed, |return_slot| and |next_slot| fall
+  // in different pages. If the slot was fully uncommitted, |return_slot| points
+  // to the page start and |next_slot| doesn't, thus only the latter gets
+  // rounded up.
+  PA_DCHECK(commit_end > commit_start);
+
+  // The slot being returned is considered allocated.
+  slot_span->num_allocated_slots++;
+  // Round down, because a slot that doesn't fully fit in the new page(s) isn't
+  // provisioned.
+  size_t slots_to_provision = (commit_end - return_slot) / slot_size;
+  slot_span->num_unprovisioned_slots -= slots_to_provision;
+  PA_DCHECK(slot_span->num_allocated_slots +
+                slot_span->num_unprovisioned_slots <=
+            get_slots_per_span());
+
+  // If lazy commit is enabled, meaning system pages in the slot span come
+  // in an initially decommitted state, commit them here.
+  // Note, we can't use PageAccessibilityDisposition::kAllowKeepForPerf, because
+  // we have no knowledge which pages have been committed before (it doesn't
+  // matter on Windows anyway).
+  if (kUseLazyCommit) {
+    // TODO(lizeb): Handle commit failure.
+    root->RecommitSystemPagesForData(
+        commit_start, commit_end - commit_start,
+        PageAccessibilityDisposition::kRequireUpdate,
+        slot_size <= kMaxMemoryTaggingSize);
+  }
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  const bool use_tagging =
+      root->IsMemoryTaggingEnabled() && slot_size <= kMaxMemoryTaggingSize;
+  if (PA_LIKELY(use_tagging)) {
+    // Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
+    TagMemoryRangeRandomly(return_slot, TagSizeForSlot(root, slot_size));
+  }
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+  // Add all slots that fit within so far committed pages to the free list.
+  EncodedNextFreelistEntry* prev_entry = nullptr;
+  uintptr_t next_slot_end = next_slot + slot_size;
+  size_t free_list_entries_added = 0;
+  while (next_slot_end <= commit_end) {
+    void* next_slot_ptr;
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+    if (PA_LIKELY(use_tagging)) {
+      // Ensure the MTE-tag of the memory pointed by other provisioned slot is
+      // unguessable. They will be returned to the app as is, and the MTE-tag
+      // will only change upon calling Free().
+      next_slot_ptr =
+          TagMemoryRangeRandomly(next_slot, TagSizeForSlot(root, slot_size));
+    } else {
+      // No MTE-tagging for larger slots, just cast.
+      next_slot_ptr = reinterpret_cast<void*>(next_slot);
+    }
+#else  // PA_CONFIG(HAS_MEMORY_TAGGING)
+    next_slot_ptr = reinterpret_cast<void*>(next_slot);
+#endif
+    auto* entry = EncodedNextFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
+    if (!slot_span->get_freelist_head()) {
+      PA_DCHECK(!prev_entry);
+      PA_DCHECK(!free_list_entries_added);
+      slot_span->SetFreelistHead(entry);
+    } else {
+      PA_DCHECK(free_list_entries_added);
+      prev_entry->SetNext(entry);
+    }
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+    FreeSlotBitmapMarkSlotAsFree(next_slot);
+#endif
+    next_slot = next_slot_end;
+    next_slot_end = next_slot + slot_size;
+    prev_entry = entry;
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    free_list_entries_added++;
+#endif
+  }
+
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+  FreeSlotBitmapMarkSlotAsFree(return_slot);
+#endif
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // The only provisioned slot not added to the free list is the one being
+  // returned.
+  PA_DCHECK(slots_to_provision == free_list_entries_added + 1);
+  // We didn't necessarily provision more than one slot (e.g. if |slot_size|
+  // is large), meaning that |slot_span->freelist_head| can be nullptr.
+  if (slot_span->get_freelist_head()) {
+    PA_DCHECK(free_list_entries_added);
+    slot_span->get_freelist_head()->CheckFreeList(slot_size);
+  }
+#endif
+
+  // We had no free slots, and created some (potentially 0) in sorted order.
+  slot_span->set_freelist_sorted();
+
+  return return_slot;
+}
+
+bool PartitionBucket::SetNewActiveSlotSpan() {
+  SlotSpanMetadata* slot_span = active_slot_spans_head;
+  if (slot_span == SlotSpanMetadata::get_sentinel_slot_span()) {
+    return false;
+  }
+
+  SlotSpanMetadata* next_slot_span;
+
+  // The goal here is to find a suitable slot span in the active list. Suitable
+  // slot spans are |is_active()|, i.e. they either have (a) freelist entries,
+  // or (b) unprovisioned free space. The first case is preferable, since it
+  // doesn't cost a system call, and doesn't cause new memory to become dirty.
+  //
+  // While looking for a new slot span, active list maintenance is performed,
+  // that is:
+  // - Empty and decommitted slot spans are moved to their respective lists.
+  // - Full slot spans are removed from the active list but are not moved
+  //   anywhere. They could be tracked in a separate list, but this would
+  //   increase cost non trivially. Indeed, a full slot span is likely to become
+  //   non-full at some point (due to a free() hitting it). Since we only have
+  //   space in the metadata for a single linked list pointer, removing the
+  //   newly-non-full slot span from the "full" list would require walking it
+  //   (to know what's before it in the full list).
+  //
+  // Since we prefer slot spans with provisioned freelist entries, maintenance
+  // happens in two stages:
+  // 1. Walk the list to find candidates. Each of the skipped slot span is moved
+  //    to either:
+  //   - one of the long-lived lists: empty, decommitted
+  //   - the temporary "active slots spans with no freelist entry" list
+  //   - Nowhere for full slot spans.
+  // 2. Once we have a candidate:
+  //   - Set it as the new active list head
+  //   - Reattach the temporary list
+  //
+  // Note that in most cases, the whole list will not be walked and maintained
+  // at this stage.
+
+  SlotSpanMetadata* to_provision_head = nullptr;
+  SlotSpanMetadata* to_provision_tail = nullptr;
+
+  for (; slot_span; slot_span = next_slot_span) {
+    next_slot_span = slot_span->next_slot_span;
+    PA_DCHECK(slot_span->bucket == this);
+    PA_DCHECK(slot_span != empty_slot_spans_head);
+    PA_DCHECK(slot_span != decommitted_slot_spans_head);
+
+    if (slot_span->is_active()) {
+      // Has provisioned slots.
+      if (slot_span->get_freelist_head()) {
+        // Will use this slot span, no need to go further.
+        break;
+      } else {
+        // Keeping head and tail because we don't want to reverse the list.
+        if (!to_provision_head) {
+          to_provision_head = slot_span;
+        }
+        if (to_provision_tail) {
+          to_provision_tail->next_slot_span = slot_span;
+        }
+        to_provision_tail = slot_span;
+        slot_span->next_slot_span = nullptr;
+      }
+    } else if (slot_span->is_empty()) {
+      slot_span->next_slot_span = empty_slot_spans_head;
+      empty_slot_spans_head = slot_span;
+    } else if (PA_LIKELY(slot_span->is_decommitted())) {
+      slot_span->next_slot_span = decommitted_slot_spans_head;
+      decommitted_slot_spans_head = slot_span;
+    } else {
+      PA_DCHECK(slot_span->is_full());
+      // Move this slot span... nowhere, and also mark it as full. We need it
+      // marked so that free'ing can tell, and move it back into the active
+      // list.
+      slot_span->marked_full = 1;
+      ++num_full_slot_spans;
+      // Overflow. Most likely a correctness issue in the code.  It is in theory
+      // possible that the number of full slot spans really reaches (1 << 24),
+      // but this is very unlikely (and not possible with most pool settings).
+      PA_CHECK(num_full_slot_spans);
+      // Not necessary but might help stop accidents.
+      slot_span->next_slot_span = nullptr;
+    }
+  }
+
+  bool usable_active_list_head = false;
+  // Found an active slot span with provisioned entries on the freelist.
+  if (slot_span) {
+    usable_active_list_head = true;
+    // We have active slot spans with unprovisioned entries. Re-attach them into
+    // the active list, past the span with freelist entries.
+    if (to_provision_head) {
+      auto* next = slot_span->next_slot_span;
+      slot_span->next_slot_span = to_provision_head;
+      to_provision_tail->next_slot_span = next;
+    }
+    active_slot_spans_head = slot_span;
+  } else if (to_provision_head) {
+    usable_active_list_head = true;
+    // Need to provision new slots.
+    active_slot_spans_head = to_provision_head;
+  } else {
+    // Active list is now empty.
+    active_slot_spans_head =
+        SlotSpanMetadata::get_sentinel_slot_span_non_const();
+  }
+
+  return usable_active_list_head;
+}
+
+void PartitionBucket::MaintainActiveList() {
+  SlotSpanMetadata* slot_span = active_slot_spans_head;
+  if (slot_span == SlotSpanMetadata::get_sentinel_slot_span()) {
+    return;
+  }
+
+  SlotSpanMetadata* new_active_slot_spans_head = nullptr;
+  SlotSpanMetadata* new_active_slot_spans_tail = nullptr;
+
+  SlotSpanMetadata* next_slot_span;
+  for (; slot_span; slot_span = next_slot_span) {
+    next_slot_span = slot_span->next_slot_span;
+
+    if (slot_span->is_active()) {
+      // Ordering in the active slot span list matters, don't reverse it.
+      if (!new_active_slot_spans_head) {
+        new_active_slot_spans_head = slot_span;
+      }
+      if (new_active_slot_spans_tail) {
+        new_active_slot_spans_tail->next_slot_span = slot_span;
+      }
+      new_active_slot_spans_tail = slot_span;
+      slot_span->next_slot_span = nullptr;
+    } else if (slot_span->is_empty()) {
+      // For the empty and decommitted lists, LIFO ordering makes sense (since
+      // it would lead to reusing memory which has been touched relatively
+      // recently, which only matters for committed spans though).
+      slot_span->next_slot_span = empty_slot_spans_head;
+      empty_slot_spans_head = slot_span;
+    } else if (slot_span->is_decommitted()) {
+      slot_span->next_slot_span = decommitted_slot_spans_head;
+      decommitted_slot_spans_head = slot_span;
+    } else {
+      // Full slot spans are not tracked, just accounted for.
+      PA_DCHECK(slot_span->is_full());
+      slot_span->marked_full = 1;
+      ++num_full_slot_spans;
+      PA_CHECK(num_full_slot_spans);  // Overflow.
+      slot_span->next_slot_span = nullptr;
+    }
+  }
+
+  if (!new_active_slot_spans_head) {
+    new_active_slot_spans_head =
+        SlotSpanMetadata::get_sentinel_slot_span_non_const();
+  }
+  active_slot_spans_head = new_active_slot_spans_head;
+}
+
+void PartitionBucket::SortSmallerSlotSpanFreeLists() {
+  for (auto* slot_span = active_slot_spans_head; slot_span;
+       slot_span = slot_span->next_slot_span) {
+    // No need to sort the freelist if it's already sorted. Note that if the
+    // freelist is sorted, this means that it didn't change at all since the
+    // last call. This may be a good signal to shrink it if possible (if an
+    // entire OS page is free, we can decommit it).
+    //
+    // Besides saving CPU, this also avoids touching memory of fully idle slot
+    // spans, which may required paging.
+    if (slot_span->num_allocated_slots > 0 &&
+        !slot_span->freelist_is_sorted()) {
+      slot_span->SortFreelist();
+    }
+  }
+}
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool CompareSlotSpans(SlotSpanMetadata* a, SlotSpanMetadata* b) {
+  auto criteria_tuple = [](SlotSpanMetadata const* a) {
+    size_t freelist_length = a->GetFreelistLength();
+    // The criteria are, in order (hence the lexicographic comparison below):
+    // 1. Prefer slot spans with freelist entries. The ones without freelist
+    //    entries would be skipped in SetNewActiveSlotSpan() anyway.
+    // 2. Then the ones with the fewest freelist entries. They are either close
+    //    to being full (for the provisioned memory), or close to being pushed
+    //    at the end of the list (since they would not have freelist entries
+    //    anymore, and would either fall into the first case, or be skipped by
+    //    SetNewActiveSlotSpan()).
+    // 3. The ones with the fewer unprovisioned slots, meaning that they are
+    //    close to being completely full.
+    //
+    // Note that this sorting order is not necessarily the best one when slot
+    // spans are partially provisioned. From local testing, in steady-state,
+    // most slot spans are entirely provisioned (or decommitted), which may be a
+    // consequence of the lack of partial slot span decommit, or of fairly
+    // effective fragmentation avoidance heuristics. Make sure to evaluate
+    // whether an alternative sorting order (sorting according to freelist size
+    // + unprovisioned slots) makes more sense.
+    return std::tuple<bool, size_t, size_t>{
+        freelist_length == 0, freelist_length, a->num_unprovisioned_slots};
+  };
+
+  return criteria_tuple(a) < criteria_tuple(b);
+}
+
+void PartitionBucket::SortActiveSlotSpans() {
+  // Sorting up to |kMaxSlotSpansToSort| slot spans. This is capped for two
+  // reasons:
+  // - Limiting execution time
+  // - Current code cannot allocate.
+  //
+  // In practice though, it's rare to have that many active slot spans.
+  SlotSpanMetadata* active_spans_array[kMaxSlotSpansToSort];
+  size_t index = 0;
+  SlotSpanMetadata* overflow_spans_start = nullptr;
+
+  for (auto* slot_span = active_slot_spans_head; slot_span;
+       slot_span = slot_span->next_slot_span) {
+    if (index < kMaxSlotSpansToSort) {
+      active_spans_array[index++] = slot_span;
+    } else {
+      // Starting from this one, not sorting the slot spans.
+      overflow_spans_start = slot_span;
+      break;
+    }
+  }
+
+  // We sort the active slot spans so that allocations are preferably serviced
+  // from the fullest ones. This way we hope to reduce fragmentation by keeping
+  // as few slot spans as full as possible.
+  //
+  // With perfect information on allocation lifespan, we would be able to pack
+  // allocations and get almost no fragmentation. This is obviously not the
+  // case, so we have partially full SlotSpans. Nevertheless, as a heuristic we
+  // want to:
+  // - Keep almost-empty slot spans as empty as possible
+  // - Keep mostly-full slot spans as full as possible
+  //
+  // The first part is done in the hope that future free()s will make these
+  // slot spans completely empty, allowing us to reclaim them. To that end, sort
+  // SlotSpans periodically so that the fullest ones are preferred.
+  //
+  // std::sort() is not completely guaranteed to never allocate memory. However,
+  // it may not throw std::bad_alloc, which constrains the implementation. In
+  // addition, this is protected by the reentrancy guard, so we would detect
+  // such an allocation.
+  std::sort(active_spans_array, active_spans_array + index, CompareSlotSpans);
+
+  active_slot_spans_head = overflow_spans_start;
+
+  // Reverse order, since we insert at the head of the list.
+  for (int i = index - 1; i >= 0; i--) {
+    if (active_spans_array[i] == SlotSpanMetadata::get_sentinel_slot_span()) {
+      // The sentinel is const, don't try to write to it.
+      PA_DCHECK(active_slot_spans_head == nullptr);
+    } else {
+      active_spans_array[i]->next_slot_span = active_slot_spans_head;
+    }
+    active_slot_spans_head = active_spans_array[i];
+  }
+}
+
+uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
+                                         AllocFlags flags,
+                                         size_t raw_size,
+                                         size_t slot_span_alignment,
+                                         bool* is_already_zeroed) {
+  PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
+            base::bits::IsPowerOfTwo(slot_span_alignment));
+
+  // The slow path is called when the freelist is empty. The only exception is
+  // when a higher-order alignment is requested, in which case the freelist
+  // logic is bypassed and we go directly for slot span allocation.
+  bool allocate_aligned_slot_span = slot_span_alignment > PartitionPageSize();
+  PA_DCHECK(!active_slot_spans_head->get_freelist_head() ||
+            allocate_aligned_slot_span);
+
+  SlotSpanMetadata* new_slot_span = nullptr;
+  // |new_slot_span->bucket| will always be |this|, except when |this| is the
+  // sentinel bucket, which is used to signal a direct mapped allocation.  In
+  // this case |new_bucket| will be set properly later. This avoids a read for
+  // most allocations.
+  PartitionBucket* new_bucket = this;
+  *is_already_zeroed = false;
+
+  // For the PartitionRoot::Alloc() API, we have a bunch of buckets
+  // marked as special cases. We bounce them through to the slow path so that
+  // we can still have a blazing fast hot path due to lack of corner-case
+  // branches.
+  //
+  // Note: The ordering of the conditionals matter! In particular,
+  // SetNewActiveSlotSpan() has a side-effect even when returning
+  // false where it sweeps the active list and may move things into the empty or
+  // decommitted lists which affects the subsequent conditional.
+  if (PA_UNLIKELY(is_direct_mapped())) {
+    PA_DCHECK(raw_size > kMaxBucketed);
+    PA_DCHECK(this == &root->sentinel_bucket);
+    PA_DCHECK(active_slot_spans_head ==
+              SlotSpanMetadata::get_sentinel_slot_span());
+
+    // No fast path for direct-mapped allocations.
+    if (ContainsFlags(flags, AllocFlags::kFastPathOrReturnNull)) {
+      return 0;
+    }
+
+    new_slot_span =
+        PartitionDirectMap(root, flags, raw_size, slot_span_alignment);
+    if (new_slot_span) {
+      new_bucket = new_slot_span->bucket;
+    }
+    // Memory from PageAllocator is always zeroed.
+    *is_already_zeroed = true;
+  } else if (PA_LIKELY(!allocate_aligned_slot_span && SetNewActiveSlotSpan())) {
+    // First, did we find an active slot span in the active list?
+    new_slot_span = active_slot_spans_head;
+    PA_DCHECK(new_slot_span->is_active());
+  } else if (PA_LIKELY(!allocate_aligned_slot_span &&
+                       (empty_slot_spans_head != nullptr ||
+                        decommitted_slot_spans_head != nullptr))) {
+    // Second, look in our lists of empty and decommitted slot spans.
+    // Check empty slot spans first, which are preferred, but beware that an
+    // empty slot span might have been decommitted.
+    while (PA_LIKELY((new_slot_span = empty_slot_spans_head) != nullptr)) {
+      PA_DCHECK(new_slot_span->bucket == this);
+      PA_DCHECK(new_slot_span->is_empty() || new_slot_span->is_decommitted());
+      empty_slot_spans_head = new_slot_span->next_slot_span;
+      // Accept the empty slot span unless it got decommitted.
+      if (new_slot_span->get_freelist_head()) {
+        new_slot_span->next_slot_span = nullptr;
+        new_slot_span->ToSuperPageExtent()
+            ->IncrementNumberOfNonemptySlotSpans();
+
+        // Re-activating an empty slot span, update accounting.
+        size_t dirty_size = base::bits::AlignUp(
+            new_slot_span->GetProvisionedSize(), SystemPageSize());
+        PA_DCHECK(root->empty_slot_spans_dirty_bytes >= dirty_size);
+        root->empty_slot_spans_dirty_bytes -= dirty_size;
+
+        break;
+      }
+      PA_DCHECK(new_slot_span->is_decommitted());
+      new_slot_span->next_slot_span = decommitted_slot_spans_head;
+      decommitted_slot_spans_head = new_slot_span;
+    }
+    if (PA_UNLIKELY(!new_slot_span) &&
+        PA_LIKELY(decommitted_slot_spans_head != nullptr)) {
+      // Commit can be expensive, don't do it.
+      if (ContainsFlags(flags, AllocFlags::kFastPathOrReturnNull)) {
+        return 0;
+      }
+
+      new_slot_span = decommitted_slot_spans_head;
+      PA_DCHECK(new_slot_span->bucket == this);
+      PA_DCHECK(new_slot_span->is_decommitted());
+      decommitted_slot_spans_head = new_slot_span->next_slot_span;
+
+      // If lazy commit is enabled, pages will be recommitted when provisioning
+      // slots, in ProvisionMoreSlotsAndAllocOne(), not here.
+      if (!kUseLazyCommit) {
+        uintptr_t slot_span_start =
+            SlotSpanMetadata::ToSlotSpanStart(new_slot_span);
+        // Since lazy commit isn't used, we have a guarantee that all slot span
+        // pages have been previously committed, and then decommitted using
+        // PageAccessibilityDisposition::kAllowKeepForPerf, so use the
+        // same option as an optimization.
+        // TODO(lizeb): Handle commit failure.
+        root->RecommitSystemPagesForData(
+            slot_span_start, new_slot_span->bucket->get_bytes_per_span(),
+            PageAccessibilityDisposition::kAllowKeepForPerf,
+            slot_size <= kMaxMemoryTaggingSize);
+      }
+
+      new_slot_span->Reset();
+      *is_already_zeroed = DecommittedMemoryIsAlwaysZeroed();
+    }
+    PA_DCHECK(new_slot_span);
+  } else {
+    // Getting a new slot span is expensive, don't do it.
+    if (ContainsFlags(flags, AllocFlags::kFastPathOrReturnNull)) {
+      return 0;
+    }
+
+    // Third. If we get here, we need a brand new slot span.
+    // TODO(bartekn): For single-slot slot spans, we can use rounded raw_size
+    // as slot_span_committed_size.
+    new_slot_span = AllocNewSlotSpan(root, flags, slot_span_alignment);
+    // New memory from PageAllocator is always zeroed.
+    *is_already_zeroed = true;
+  }
+
+  // Bail if we had a memory allocation failure.
+  if (PA_UNLIKELY(!new_slot_span)) {
+    PA_DCHECK(active_slot_spans_head ==
+              SlotSpanMetadata::get_sentinel_slot_span());
+    if (ContainsFlags(flags, AllocFlags::kReturnNull)) {
+      return 0;
+    }
+    // See comment in PartitionDirectMap() for unlocking.
+    ScopedUnlockGuard unlock{PartitionRootLock(root)};
+    root->OutOfMemory(raw_size);
+    PA_IMMEDIATE_CRASH();  // Not required, kept as documentation.
+  }
+
+  PA_DCHECK(new_bucket != &root->sentinel_bucket);
+  new_bucket->active_slot_spans_head = new_slot_span;
+  if (new_slot_span->CanStoreRawSize()) {
+    new_slot_span->SetRawSize(raw_size);
+  }
+
+  // If we found an active slot span with free slots, or an empty slot span, we
+  // have a usable freelist head.
+  if (PA_LIKELY(new_slot_span->get_freelist_head() != nullptr)) {
+    EncodedNextFreelistEntry* entry =
+        new_slot_span->PopForAlloc(new_bucket->slot_size);
+
+    // We may have set *is_already_zeroed to true above, make sure that the
+    // freelist entry doesn't contain data. Either way, it wouldn't be a good
+    // idea to let users see our internal data.
+    uintptr_t slot_start = entry->ClearForAllocation();
+    return slot_start;
+  }
+
+  // Otherwise, we need to provision more slots by committing more pages. Build
+  // the free list for the newly provisioned slots.
+  PA_DCHECK(new_slot_span->num_unprovisioned_slots);
+  return ProvisionMoreSlotsAndAllocOne(root, new_slot_span);
+}
+
+uintptr_t PartitionBucket::AllocNewSuperPageSpanForGwpAsan(
+    PartitionRoot* root,
+    size_t super_page_count,
+    AllocFlags flags) {
+  return AllocNewSuperPageSpan(root, super_page_count, flags);
+}
+
+void PartitionBucket::InitializeSlotSpanForGwpAsan(
+    SlotSpanMetadata* slot_span) {
+  InitializeSlotSpan(slot_span);
+}
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h b/base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h
new file mode 100644
index 0000000..ff8fca4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h
@@ -0,0 +1,226 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_BUCKET_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_BUCKET_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h"
+
+namespace partition_alloc::internal {
+
+constexpr inline int kPartitionNumSystemPagesPerSlotSpanBits = 8;
+
+// Visible for testing.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+uint8_t ComputeSystemPagesPerSlotSpan(size_t slot_size,
+                                      bool prefer_smaller_slot_spans);
+
+// Visible for testing.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool CompareSlotSpans(SlotSpanMetadata* a, SlotSpanMetadata* b);
+
+struct PartitionBucket {
+  // Accessed most in hot path => goes first. Only nullptr for invalid buckets,
+  // may be pointing to the sentinel.
+  SlotSpanMetadata* active_slot_spans_head;
+
+  SlotSpanMetadata* empty_slot_spans_head;
+  SlotSpanMetadata* decommitted_slot_spans_head;
+  uint32_t slot_size;
+  uint32_t num_system_pages_per_slot_span
+      : kPartitionNumSystemPagesPerSlotSpanBits;
+  uint32_t num_full_slot_spans : 24;
+
+  // `slot_size_reciprocal` is used to improve the performance of
+  // `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
+  // chosen to provide the desired accuracy. As a result, we can replace a slow
+  // integer division (or modulo) operation with a pair of multiplication and a
+  // bit shift, i.e. `value / size` becomes `(value * size_reciprocal) >> M`.
+  uint64_t slot_size_reciprocal;
+
+  // This is `M` from the formula above. For accurate results, both `value` and
+  // `size`, which are bound by `kMaxBucketed` for our purposes, must be less
+  // than `2 ** (M / 2)`. On the other hand, the result of the expression
+  // `3 * M / 2` must be less than 64, otherwise integer overflow can occur.
+  static constexpr uint64_t kReciprocalShift = 42;
+  static constexpr uint64_t kReciprocalMask = (1ull << kReciprocalShift) - 1;
+  static_assert(
+      kMaxBucketed < (1 << (kReciprocalShift / 2)),
+      "GetSlotOffset may produce an incorrect result when kMaxBucketed is too "
+      "large.");
+
+  static constexpr size_t kMaxSlotSpansToSort = 200;
+
+  // Public API.
+  PA_COMPONENT_EXPORT(PARTITION_ALLOC) void Init(uint32_t new_slot_size);
+
+  // Sets |is_already_zeroed| to true if the allocation was satisfied by
+  // requesting (a) new page(s) from the operating system, or false otherwise.
+  // This enables an optimization for when callers use
+  // |AllocFlags::kZeroFill|: there is no need to call memset on fresh
+  // pages; the OS has already zeroed them. (See
+  // |PartitionRoot::AllocFromBucket|.)
+  //
+  // Note the matching Free() functions are in SlotSpanMetadata.
+  PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t
+      SlowPathAlloc(PartitionRoot* root,
+                    AllocFlags flags,
+                    size_t raw_size,
+                    size_t slot_span_alignment,
+                    bool* is_already_zeroed)
+          PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
+
+  PA_ALWAYS_INLINE bool CanStoreRawSize() const {
+    // For direct-map as well as single-slot slot spans (recognized by checking
+    // against |MaxRegularSlotSpanSize()|), we have some spare metadata space in
+    // subsequent PartitionPage to store the raw size. It isn't only metadata
+    // space though, slot spans that have more than one slot can't have raw size
+    // stored, because we wouldn't know which slot it applies to.
+    if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize())) {
+      return false;
+    }
+
+    PA_DCHECK((slot_size % SystemPageSize()) == 0);
+    PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1);
+
+    return true;
+  }
+
+  // Some buckets are pseudo-buckets, which are disabled because they would
+  // otherwise not fulfill alignment constraints.
+  PA_ALWAYS_INLINE bool is_valid() const {
+    return active_slot_spans_head != nullptr;
+  }
+  PA_ALWAYS_INLINE bool is_direct_mapped() const {
+    return !num_system_pages_per_slot_span;
+  }
+  PA_ALWAYS_INLINE size_t get_bytes_per_span() const {
+    // Cannot overflow, num_system_pages_per_slot_span is a bitfield, and 255
+    // pages fit in a size_t.
+    static_assert(kPartitionNumSystemPagesPerSlotSpanBits <= 8, "");
+    return static_cast<size_t>(num_system_pages_per_slot_span)
+           << SystemPageShift();
+  }
+  PA_ALWAYS_INLINE size_t get_slots_per_span() const {
+    size_t ret = GetSlotNumber(get_bytes_per_span());
+    PA_DCHECK(ret <= kMaxSlotsPerSlotSpan);
+    return ret;
+  }
+  // Returns a natural number of partition pages (calculated by
+  // ComputeSystemPagesPerSlotSpan()) to allocate from the current super page
+  // when the bucket runs out of slots.
+  PA_ALWAYS_INLINE size_t get_pages_per_slot_span() const {
+    // Rounds up to nearest multiple of NumSystemPagesPerPartitionPage().
+    return (num_system_pages_per_slot_span +
+            (NumSystemPagesPerPartitionPage() - 1)) /
+           NumSystemPagesPerPartitionPage();
+  }
+
+  // This helper function scans a bucket's active slot span list for a suitable
+  // new active slot span.  When it finds a suitable new active slot span (one
+  // that has free slots and is not empty), it is set as the new active slot
+  // span. If there is no suitable new active slot span, the current active slot
+  // span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential
+  // slot spans are scanned, they are tidied up according to their state. Empty
+  // slot spans are swept on to the empty list, decommitted slot spans on to the
+  // decommitted list and full slot spans are unlinked from any list.
+  //
+  // This is where the guts of the bucket maintenance is done!
+  bool SetNewActiveSlotSpan();
+
+  // Walks the entire active slot span list, and perform regular maintenance,
+  // where empty, decommitted and full slot spans are moved to their
+  // steady-state place.
+  PA_COMPONENT_EXPORT(PARTITION_ALLOC) void MaintainActiveList();
+
+  // Returns a slot number starting from the beginning of the slot span.
+  PA_ALWAYS_INLINE size_t GetSlotNumber(size_t offset_in_slot_span) const {
+    // See the static assertion for `kReciprocalShift` above.
+    PA_DCHECK(offset_in_slot_span <= kMaxBucketed);
+    PA_DCHECK(slot_size <= kMaxBucketed);
+
+    const size_t offset_in_slot =
+        ((offset_in_slot_span * slot_size_reciprocal) >> kReciprocalShift);
+    PA_DCHECK(offset_in_slot_span / slot_size == offset_in_slot);
+
+    return offset_in_slot;
+  }
+
+  // Sort the freelists of all slot spans.
+  void SortSmallerSlotSpanFreeLists();
+  // Sort the active slot span list in ascending freelist length.
+  PA_COMPONENT_EXPORT(PARTITION_ALLOC) void SortActiveSlotSpans();
+
+  // We need `AllocNewSuperPageSpan` and `InitializeSlotSpan` to stay
+  // PA_ALWAYS_INLINE for speed, but we also need to use them from a separate
+  // compilation unit.
+  uintptr_t AllocNewSuperPageSpanForGwpAsan(PartitionRoot* root,
+                                            size_t super_page_count,
+                                            AllocFlags flags)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
+  void InitializeSlotSpanForGwpAsan(SlotSpanMetadata* slot_span);
+
+ private:
+  // Allocates several consecutive super pages. Returns the address of the first
+  // super page.
+  PA_ALWAYS_INLINE uintptr_t AllocNewSuperPageSpan(PartitionRoot* root,
+                                                   size_t super_page_count,
+                                                   AllocFlags flags)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
+  // Allocates a new slot span with size |num_partition_pages| from the
+  // current extent. Metadata within this slot span will be initialized.
+  // Returns nullptr on error.
+  PA_ALWAYS_INLINE SlotSpanMetadata* AllocNewSlotSpan(
+      PartitionRoot* root,
+      AllocFlags flags,
+      size_t slot_span_alignment)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
+
+  // Allocates a new super page from the current extent, if possible. All
+  // slot-spans will be in the decommitted state. Returns the address of the
+  // super page's payload, or 0 on error.
+  PA_ALWAYS_INLINE uintptr_t AllocNewSuperPage(PartitionRoot* root,
+                                               AllocFlags flags)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
+
+  // Each bucket allocates a slot span when it runs out of slots.
+  // A slot span's size is equal to get_pages_per_slot_span() number of
+  // partition pages. This function initializes all PartitionPage within the
+  // span to point to the first PartitionPage which holds all the metadata
+  // for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket
+  // as the owner of the span. It does NOT put the slots into the bucket's
+  // freelist.
+  PA_ALWAYS_INLINE void InitializeSlotSpan(SlotSpanMetadata* slot_span);
+
+  // Initializes a super page. Returns the address of the super page's payload.
+  PA_ALWAYS_INLINE uintptr_t InitializeSuperPage(PartitionRoot* root,
+                                                 uintptr_t super_page,
+                                                 uintptr_t requested_address)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
+  // Commit 1 or more pages in |slot_span|, enough to get the next slot, which
+  // is returned by this function. If more slots fit into the committed pages,
+  // they'll be added to the free list of the slot span (note that next pointers
+  // are stored inside the slots).
+  // The free list must be empty when calling this function.
+  //
+  // If |slot_span| was freshly allocated, it must have been passed through
+  // InitializeSlotSpan() first.
+  PA_ALWAYS_INLINE uintptr_t
+  ProvisionMoreSlotsAndAllocOne(PartitionRoot* root,
+                                SlotSpanMetadata* slot_span)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
+};
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_BUCKET_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_bucket_lookup.h b/base/allocator/partition_allocator/src/partition_alloc/partition_bucket_lookup.h
new file mode 100644
index 0000000..0c08630
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_bucket_lookup.h
@@ -0,0 +1,303 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_BUCKET_LOOKUP_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_BUCKET_LOOKUP_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+
+namespace partition_alloc::internal {
+
+// Don't use an anonymous namespace for the constants because it can inhibit
+// collapsing them together, even when they are tagged as inline.
+
+// Precalculate some shift and mask constants used in the hot path.
+// Example: malloc(41) == 101001 binary.
+// Order is 6 (1 << 6-1) == 32 is highest bit set.
+// order_index is the next three MSB == 010 == 2.
+// sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
+// for the sub_order_index).
+constexpr uint8_t OrderIndexShift(uint8_t order) {
+  if (order < kNumBucketsPerOrderBits + 1) {
+    return 0;
+  }
+
+  return order - (kNumBucketsPerOrderBits + 1);
+}
+
+constexpr size_t OrderSubIndexMask(uint8_t order) {
+  if (order == kBitsPerSizeT) {
+    return static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1);
+  }
+
+  return ((static_cast<size_t>(1) << order) - 1) >>
+         (kNumBucketsPerOrderBits + 1);
+}
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+#define PA_BITS_PER_SIZE_T 64
+static_assert(kBitsPerSizeT == 64, "");
+#else
+#define PA_BITS_PER_SIZE_T 32
+static_assert(kBitsPerSizeT == 32, "");
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+inline constexpr uint8_t kOrderIndexShift[PA_BITS_PER_SIZE_T + 1] = {
+    OrderIndexShift(0),  OrderIndexShift(1),  OrderIndexShift(2),
+    OrderIndexShift(3),  OrderIndexShift(4),  OrderIndexShift(5),
+    OrderIndexShift(6),  OrderIndexShift(7),  OrderIndexShift(8),
+    OrderIndexShift(9),  OrderIndexShift(10), OrderIndexShift(11),
+    OrderIndexShift(12), OrderIndexShift(13), OrderIndexShift(14),
+    OrderIndexShift(15), OrderIndexShift(16), OrderIndexShift(17),
+    OrderIndexShift(18), OrderIndexShift(19), OrderIndexShift(20),
+    OrderIndexShift(21), OrderIndexShift(22), OrderIndexShift(23),
+    OrderIndexShift(24), OrderIndexShift(25), OrderIndexShift(26),
+    OrderIndexShift(27), OrderIndexShift(28), OrderIndexShift(29),
+    OrderIndexShift(30), OrderIndexShift(31), OrderIndexShift(32),
+#if PA_BITS_PER_SIZE_T == 64
+    OrderIndexShift(33), OrderIndexShift(34), OrderIndexShift(35),
+    OrderIndexShift(36), OrderIndexShift(37), OrderIndexShift(38),
+    OrderIndexShift(39), OrderIndexShift(40), OrderIndexShift(41),
+    OrderIndexShift(42), OrderIndexShift(43), OrderIndexShift(44),
+    OrderIndexShift(45), OrderIndexShift(46), OrderIndexShift(47),
+    OrderIndexShift(48), OrderIndexShift(49), OrderIndexShift(50),
+    OrderIndexShift(51), OrderIndexShift(52), OrderIndexShift(53),
+    OrderIndexShift(54), OrderIndexShift(55), OrderIndexShift(56),
+    OrderIndexShift(57), OrderIndexShift(58), OrderIndexShift(59),
+    OrderIndexShift(60), OrderIndexShift(61), OrderIndexShift(62),
+    OrderIndexShift(63), OrderIndexShift(64)
+#endif
+};
+
+inline constexpr size_t kOrderSubIndexMask[PA_BITS_PER_SIZE_T + 1] = {
+    OrderSubIndexMask(0),  OrderSubIndexMask(1),  OrderSubIndexMask(2),
+    OrderSubIndexMask(3),  OrderSubIndexMask(4),  OrderSubIndexMask(5),
+    OrderSubIndexMask(6),  OrderSubIndexMask(7),  OrderSubIndexMask(8),
+    OrderSubIndexMask(9),  OrderSubIndexMask(10), OrderSubIndexMask(11),
+    OrderSubIndexMask(12), OrderSubIndexMask(13), OrderSubIndexMask(14),
+    OrderSubIndexMask(15), OrderSubIndexMask(16), OrderSubIndexMask(17),
+    OrderSubIndexMask(18), OrderSubIndexMask(19), OrderSubIndexMask(20),
+    OrderSubIndexMask(21), OrderSubIndexMask(22), OrderSubIndexMask(23),
+    OrderSubIndexMask(24), OrderSubIndexMask(25), OrderSubIndexMask(26),
+    OrderSubIndexMask(27), OrderSubIndexMask(28), OrderSubIndexMask(29),
+    OrderSubIndexMask(30), OrderSubIndexMask(31), OrderSubIndexMask(32),
+#if PA_BITS_PER_SIZE_T == 64
+    OrderSubIndexMask(33), OrderSubIndexMask(34), OrderSubIndexMask(35),
+    OrderSubIndexMask(36), OrderSubIndexMask(37), OrderSubIndexMask(38),
+    OrderSubIndexMask(39), OrderSubIndexMask(40), OrderSubIndexMask(41),
+    OrderSubIndexMask(42), OrderSubIndexMask(43), OrderSubIndexMask(44),
+    OrderSubIndexMask(45), OrderSubIndexMask(46), OrderSubIndexMask(47),
+    OrderSubIndexMask(48), OrderSubIndexMask(49), OrderSubIndexMask(50),
+    OrderSubIndexMask(51), OrderSubIndexMask(52), OrderSubIndexMask(53),
+    OrderSubIndexMask(54), OrderSubIndexMask(55), OrderSubIndexMask(56),
+    OrderSubIndexMask(57), OrderSubIndexMask(58), OrderSubIndexMask(59),
+    OrderSubIndexMask(60), OrderSubIndexMask(61), OrderSubIndexMask(62),
+    OrderSubIndexMask(63), OrderSubIndexMask(64)
+#endif
+};
+
+// The class used to generate the bucket lookup table at compile-time.
+class BucketIndexLookup final {
+ public:
+  PA_ALWAYS_INLINE static constexpr uint16_t GetIndexForNeutralBuckets(
+      size_t size);
+  PA_ALWAYS_INLINE static constexpr uint16_t GetIndexForDenserBuckets(
+      size_t size);
+  PA_ALWAYS_INLINE static constexpr uint16_t GetIndex(size_t size);
+
+  constexpr BucketIndexLookup() {
+    constexpr uint16_t sentinel_bucket_index = kNumBuckets;
+
+    InitBucketSizes();
+
+    uint16_t* bucket_index_ptr = &bucket_index_lookup_[0];
+    uint16_t bucket_index = 0;
+
+    // Very small allocations, smaller than the first bucketed order ->
+    // everything goes to the first bucket.
+    for (uint8_t order = 0; order < kMinBucketedOrder; ++order) {
+      for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
+        *bucket_index_ptr++ = 0;
+      }
+    }
+
+    // Normal buckets.
+    for (uint8_t order = kMinBucketedOrder; order <= kMaxBucketedOrder;
+         ++order) {
+      size_t size = static_cast<size_t>(1) << (order - 1);
+      size_t current_increment = size >> kNumBucketsPerOrderBits;
+      for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
+        *bucket_index_ptr++ = bucket_index;
+
+        // For small sizes, buckets are close together (current_increment is
+        // small). For instance, for:
+        // - kAlignment == 16 (which is the case on most 64 bit systems)
+        // - kNumBucketsPerOrder == 4
+        //
+        // The 3 next buckets after 16 are {20, 24, 28}. None of these are a
+        // multiple of kAlignment, so they use the next bucket, that is 32 here.
+        if (size % kAlignment != 0) {
+          PA_DCHECK(bucket_sizes_[bucket_index] > size);
+          // Do not increment bucket_index, since in the example above
+          // current_size may be 20, and bucket_sizes_[bucket_index] == 32.
+        } else {
+          PA_DCHECK(bucket_sizes_[bucket_index] == size);
+          bucket_index++;
+        }
+
+        size += current_increment;
+      }
+    }
+
+    // Direct-mapped, and overflow.
+    for (uint8_t order = kMaxBucketedOrder + 1; order <= kBitsPerSizeT;
+         ++order) {
+      for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
+        *bucket_index_ptr++ = sentinel_bucket_index;
+      }
+    }
+
+    // Smaller because some buckets are not valid due to alignment constraints.
+    PA_DCHECK(bucket_index < kNumBuckets);
+    PA_DCHECK(bucket_index_ptr == bucket_index_lookup_ + ((kBitsPerSizeT + 1) *
+                                                          kNumBucketsPerOrder));
+    // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
+    // which tries to overflow to a non-existent order.
+    *bucket_index_ptr = sentinel_bucket_index;
+  }
+  constexpr const size_t* bucket_sizes() const { return &bucket_sizes_[0]; }
+
+ private:
+  constexpr void InitBucketSizes() {
+    size_t current_size = kSmallestBucket;
+    size_t current_increment = kSmallestBucket >> kNumBucketsPerOrderBits;
+    size_t* bucket_size = &bucket_sizes_[0];
+    for (size_t i = 0; i < kNumBucketedOrders; ++i) {
+      for (size_t j = 0; j < kNumBucketsPerOrder; ++j) {
+        // All bucket sizes have to be multiples of kAlignment, skip otherwise.
+        if (current_size % kAlignment == 0) {
+          *bucket_size = current_size;
+          ++bucket_size;
+        }
+        current_size += current_increment;
+      }
+      current_increment <<= 1;
+    }
+
+    // The remaining buckets are invalid.
+    while (bucket_size < bucket_sizes_ + kNumBuckets) {
+      *(bucket_size++) = kInvalidBucketSize;
+    }
+  }
+
+  size_t bucket_sizes_[kNumBuckets]{};
+  // The bucket lookup table lets us map a size_t to a bucket quickly.
+  // The trailing +1 caters for the overflow case for very large allocation
+  // sizes.  It is one flat array instead of a 2D array because in the 2D
+  // world, we'd need to index array[blah][max+1] which risks undefined
+  // behavior.
+  uint16_t
+      bucket_index_lookup_[((kBitsPerSizeT + 1) * kNumBucketsPerOrder) + 1]{};
+};
+
+PA_ALWAYS_INLINE constexpr size_t RoundUpToPowerOfTwo(size_t size) {
+  const size_t n = 1 << base::bits::Log2Ceiling(static_cast<uint32_t>(size));
+  PA_DCHECK(size <= n);
+  return n;
+}
+
+PA_ALWAYS_INLINE constexpr size_t RoundUpSize(size_t size) {
+  const size_t next_power = RoundUpToPowerOfTwo(size);
+  const size_t prev_power = next_power >> 1;
+  PA_DCHECK(size <= next_power);
+  PA_DCHECK(prev_power < size);
+  if (size <= prev_power * 5 / 4) {
+    return prev_power * 5 / 4;
+  } else {
+    return next_power;
+  }
+}
+
+PA_ALWAYS_INLINE constexpr uint16_t RoundUpToOdd(uint16_t size) {
+  return (size % 2 == 0) + size;
+}
+
+// static
+PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndexForDenserBuckets(
+    size_t size) {
+  // This forces the bucket table to be constant-initialized and immediately
+  // materialized in the binary.
+  constexpr BucketIndexLookup lookup{};
+  const size_t order =
+      kBitsPerSizeT -
+      static_cast<size_t>(base::bits::CountLeadingZeroBits(size));
+  // The order index is simply the next few bits after the most significant
+  // bit.
+  const size_t order_index =
+      (size >> kOrderIndexShift[order]) & (kNumBucketsPerOrder - 1);
+  // And if the remaining bits are non-zero we must bump the bucket up.
+  const size_t sub_order_index = size & kOrderSubIndexMask[order];
+  const uint16_t index =
+      lookup.bucket_index_lookup_[(order << kNumBucketsPerOrderBits) +
+                                  order_index + !!sub_order_index];
+  PA_DCHECK(index <= kNumBuckets);  // Last one is the sentinel bucket.
+  return index;
+}
+
+// static
+PA_ALWAYS_INLINE constexpr uint16_t
+BucketIndexLookup::GetIndexForNeutralBuckets(size_t size) {
+  const auto index = GetIndexForDenserBuckets(size);
+  // Below the minimum size, 4 and 8 bucket distributions are the same, since we
+  // can't fit any more buckets per order; this is due to alignment
+  // requirements: each bucket must be a multiple of the alignment, which
+  // implies the difference between buckets must also be a multiple of the
+  // alignment. In smaller orders, this limits the number of buckets we can
+  // have per order. So, for these small order, we do not want to skip every
+  // second bucket.
+  //
+  // We also do not want to go about the index for the max bucketed size.
+  if (size > kAlignment * kNumBucketsPerOrder &&
+      index < GetIndexForDenserBuckets(kMaxBucketed)) {
+    return RoundUpToOdd(index);
+  } else {
+    return index;
+  }
+}
+
+// static
+PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndex(size_t size) {
+  // For any order 2^N, under the denser bucket distribution ("Distribution A"),
+  // we have 4 evenly distributed buckets: 2^N, 1.25*2^N, 1.5*2^N, and 1.75*2^N.
+  // These numbers represent the maximum size of an allocation that can go into
+  // a given bucket.
+  //
+  // Under the less dense bucket distribution ("Distribution B"), we only have
+  // 2 buckets for the same order 2^N: 2^N and 1.25*2^N.
+  //
+  // Everything that would be mapped to the last two buckets of an order under
+  // Distribution A is instead mapped to the first bucket of the next order
+  // under Distribution B. The following diagram shows roughly what this looks
+  // like for the order starting from 2^10, as an example.
+  //
+  // A: ... | 2^10 | 1.25*2^10 | 1.5*2^10 | 1.75*2^10 | 2^11 | ...
+  // B: ... | 2^10 | 1.25*2^10 | -------- | --------- | 2^11 | ...
+  //
+  // So, an allocation of size 1.4*2^10 would go into the 1.5*2^10 bucket under
+  // Distribution A, but to the 2^11 bucket under Distribution B.
+  if (1 << 8 < size && size < kHighThresholdForAlternateDistribution) {
+    return BucketIndexLookup::GetIndexForNeutralBuckets(RoundUpSize(size));
+  }
+  return BucketIndexLookup::GetIndexForNeutralBuckets(size);
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_BUCKET_LOOKUP_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_cookie.h b/base/allocator/partition_allocator/src/partition_alloc/partition_cookie.h
new file mode 100644
index 0000000..d031e28
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_cookie.h
@@ -0,0 +1,49 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_COOKIE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_COOKIE_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+
+namespace partition_alloc::internal {
+
+static constexpr size_t kCookieSize = 16;
+
+// Cookie is enabled for debug builds.
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+
+inline constexpr unsigned char kCookieValue[kCookieSize] = {
+    0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
+    0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
+
+constexpr size_t kPartitionCookieSizeAdjustment = kCookieSize;
+
+PA_ALWAYS_INLINE void PartitionCookieCheckValue(unsigned char* cookie_ptr) {
+  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) {
+    PA_DCHECK(*cookie_ptr == kCookieValue[i]);
+  }
+}
+
+PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) {
+  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) {
+    *cookie_ptr = kCookieValue[i];
+  }
+}
+
+#else
+
+constexpr size_t kPartitionCookieSizeAdjustment = 0;
+
+PA_ALWAYS_INLINE void PartitionCookieCheckValue(unsigned char* address) {}
+
+PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) {}
+
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_COOKIE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_dcheck_helper.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_dcheck_helper.cc
new file mode 100644
index 0000000..4ba50c7
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_dcheck_helper.cc
@@ -0,0 +1,70 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_dcheck_helper.h"
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+
+namespace partition_alloc::internal {
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+
+void DCheckIsValidSlotSpan(internal::SlotSpanMetadata* slot_span) {
+  PartitionRoot* root = PartitionRoot::FromSlotSpan(slot_span);
+  PA_DCHECK(root->inverted_self == ~reinterpret_cast<uintptr_t>(root));
+}
+
+void DCheckIsValidShiftFromSlotStart(internal::SlotSpanMetadata* slot_span,
+                                     uintptr_t shift_from_slot_start) {
+  PartitionRoot* root = PartitionRoot::FromSlotSpan(slot_span);
+  PA_DCHECK(shift_from_slot_start >= root->settings.extras_offset);
+  // Use <= to allow an address immediately past the object.
+  PA_DCHECK(shift_from_slot_start <=
+            root->settings.extras_offset + root->GetSlotUsableSize(slot_span));
+}
+
+void DCheckIsWithInSuperPagePayload(uintptr_t address) {
+  uintptr_t super_page = address & kSuperPageBaseMask;
+  auto* extent = PartitionSuperPageToExtent(super_page);
+  PA_DCHECK(IsWithinSuperPagePayload(address,
+                                     IsManagedByNormalBuckets(address) &&
+                                         extent->root->IsQuarantineAllowed()));
+}
+
+void DCheckIsValidObjectAddress(internal::SlotSpanMetadata* slot_span,
+                                uintptr_t object_addr) {
+  uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
+  auto* root = PartitionRoot::FromSlotSpan(slot_span);
+  PA_DCHECK((object_addr - slot_span_start) % slot_span->bucket->slot_size ==
+            root->settings.extras_offset);
+}
+
+void DCheckNumberOfPartitionPagesInSuperPagePayload(
+    const PartitionSuperPageExtentEntry* entry,
+    const PartitionRoot* root,
+    size_t number_of_nonempty_slot_spans) {
+  uintptr_t super_page = base::bits::AlignDown(
+      reinterpret_cast<uintptr_t>(entry), kSuperPageAlignment);
+  size_t number_of_partition_pages_in_superpage_payload =
+      SuperPagePayloadSize(super_page, root->IsQuarantineAllowed()) /
+      PartitionPageSize();
+  PA_DCHECK(number_of_partition_pages_in_superpage_payload >
+            number_of_nonempty_slot_spans);
+}
+
+void DCheckRootLockIsAcquired(PartitionRoot* root) {
+  PartitionRootLock(root).AssertAcquired();
+}
+
+void DCheckRootLockOfSlotSpanIsAcquired(internal::SlotSpanMetadata* slot_span) {
+  DCheckRootLockIsAcquired(PartitionRoot::FromSlotSpan(slot_span));
+}
+
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_dcheck_helper.h b/base/allocator/partition_allocator/src/partition_alloc/partition_dcheck_helper.h
new file mode 100644
index 0000000..2170a67
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_dcheck_helper.h
@@ -0,0 +1,68 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_DCHECK_HELPER_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_DCHECK_HELPER_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+
+namespace partition_alloc::internal {
+
+struct PartitionSuperPageExtentEntry;
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+
+// To allow these asserts to have empty bodies in no-DCHECK() builds, while
+// avoiding issues with circular includes.
+#define PA_EMPTY_BODY_IF_DCHECK_IS_OFF()
+// Export symbol if dcheck-is-on. Because the body is not empty.
+#define PA_EXPORT_IF_DCHECK_IS_ON() PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+
+#else  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+// The static_assert() eats follow-on semicolons.
+#define PA_EMPTY_BODY_IF_DCHECK_IS_OFF() \
+  {}                                     \
+  static_assert(true)
+// inline if dcheck-is-off so it's no overhead.
+#define PA_EXPORT_IF_DCHECK_IS_ON() PA_ALWAYS_INLINE
+
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+PA_EXPORT_IF_DCHECK_IS_ON()
+void DCheckIsValidSlotSpan(internal::SlotSpanMetadata* slot_span)
+    PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
+
+PA_EXPORT_IF_DCHECK_IS_ON()
+void DCheckIsWithInSuperPagePayload(uintptr_t address)
+    PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
+
+PA_EXPORT_IF_DCHECK_IS_ON()
+void DCheckNumberOfPartitionPagesInSuperPagePayload(
+    const PartitionSuperPageExtentEntry* entry,
+    const PartitionRoot* root,
+    size_t number_of_nonempty_slot_spans) PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
+
+PA_EXPORT_IF_DCHECK_IS_ON()
+void DCheckIsValidShiftFromSlotStart(internal::SlotSpanMetadata* slot_span,
+                                     size_t shift_from_slot_start)
+    PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
+
+// Checks that the object is exactly |extras_offset| away from a multiple of
+// slot size (i.e. from a slot start).
+PA_EXPORT_IF_DCHECK_IS_ON()
+void DCheckIsValidObjectAddress(internal::SlotSpanMetadata* slot_span,
+                                uintptr_t object_addr)
+    PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
+
+PA_EXPORT_IF_DCHECK_IS_ON()
+void DCheckRootLockIsAcquired(PartitionRoot* root)
+    PA_EMPTY_BODY_IF_DCHECK_IS_OFF();
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_DCHECK_HELPER_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_direct_map_extent.h b/base/allocator/partition_allocator/src/partition_alloc/partition_direct_map_extent.h
new file mode 100644
index 0000000..23bac08
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_direct_map_extent.h
@@ -0,0 +1,68 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_DIRECT_MAP_EXTENT_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_DIRECT_MAP_EXTENT_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+
+namespace partition_alloc::internal {
+
+struct PartitionDirectMapExtent {
+  PartitionDirectMapExtent* next_extent;
+  PartitionDirectMapExtent* prev_extent;
+  PartitionBucket* bucket;
+  // Size of the entire reservation, including guard pages, meta-data,
+  // padding for alignment before allocation, and padding for granularity at the
+  // end of the allocation.
+  size_t reservation_size;
+  // Padding between the first partition page (guard pages + meta-data) and
+  // the allocation.
+  size_t padding_for_alignment;
+
+  PA_ALWAYS_INLINE static PartitionDirectMapExtent* FromSlotSpan(
+      SlotSpanMetadata* slot_span);
+};
+
+// Metadata page for direct-mapped allocations.
+struct PartitionDirectMapMetadata {
+  // |page| and |subsequent_page| are needed to match the layout of normal
+  // buckets (specifically, of single-slot slot spans), with the caveat that
+  // only the first subsequent page is needed (for SubsequentPageMetadata) and
+  // others aren't used for direct map.
+  PartitionPage page;
+  PartitionPage subsequent_page;
+  // The following fields are metadata specific to direct map allocations. All
+  // these fields will easily fit into the precalculated metadata region,
+  // because a direct map allocation starts no further than half way through the
+  // super page.
+  PartitionBucket bucket;
+  PartitionDirectMapExtent direct_map_extent;
+
+  PA_ALWAYS_INLINE static PartitionDirectMapMetadata* FromSlotSpan(
+      SlotSpanMetadata* slot_span);
+};
+
+PA_ALWAYS_INLINE PartitionDirectMapMetadata*
+PartitionDirectMapMetadata::FromSlotSpan(SlotSpanMetadata* slot_span) {
+  PA_DCHECK(slot_span->bucket->is_direct_mapped());
+  // |*slot_span| is the first field of |PartitionDirectMapMetadata|, just cast.
+  auto* metadata = reinterpret_cast<PartitionDirectMapMetadata*>(slot_span);
+  PA_DCHECK(&metadata->page.slot_span_metadata == slot_span);
+  return metadata;
+}
+
+PA_ALWAYS_INLINE PartitionDirectMapExtent*
+PartitionDirectMapExtent::FromSlotSpan(SlotSpanMetadata* slot_span) {
+  PA_DCHECK(slot_span->bucket->is_direct_mapped());
+  return &PartitionDirectMapMetadata::FromSlotSpan(slot_span)
+              ->direct_map_extent;
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_DIRECT_MAP_EXTENT_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.cc
new file mode 100644
index 0000000..ed292d7
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.cc
@@ -0,0 +1,18 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+
+namespace partition_alloc::internal {
+
+void FreelistCorruptionDetected(size_t slot_size) {
+  // Make it visible in minidumps.
+  PA_DEBUG_DATA_ON_STACK("slotsize", slot_size);
+  PA_IMMEDIATE_CRASH();
+}
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.h b/base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.h
new file mode 100644
index 0000000..5a43def
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.h
@@ -0,0 +1,51 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_FREELIST_ENTRY_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_FREELIST_ENTRY_H_
+
+#include <cstddef>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+
+namespace partition_alloc::internal {
+
+[[noreturn]] PA_NOINLINE PA_COMPONENT_EXPORT(
+    PARTITION_ALLOC) void FreelistCorruptionDetected(size_t slot_size);
+
+}  // namespace partition_alloc::internal
+
+#if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
+#include "base/allocator/partition_allocator/src/partition_alloc/pool_offset_freelist.h"  // IWYU pragma: export
+#else
+#include "base/allocator/partition_allocator/src/partition_alloc/encoded_next_freelist.h"  // IWYU pragma: export
+#endif  // BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
+
+namespace partition_alloc::internal {
+
+// Assertions that are agnostic to the implementation of the freelist.
+
+static_assert(kSmallestBucket >= sizeof(EncodedNextFreelistEntry),
+              "Need enough space for freelist entries in the smallest slot");
+
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+// The smallest bucket actually used. Note that the smallest request is 1 (if
+// it's 0, it gets patched to 1), and ref-count gets added to it.
+namespace {
+constexpr size_t kSmallestUsedBucket =
+    base::bits::AlignUp(1 + sizeof(PartitionRefCount), kSmallestBucket);
+}
+static_assert(kSmallestUsedBucket >=
+                  sizeof(EncodedNextFreelistEntry) + sizeof(PartitionRefCount),
+              "Need enough space for freelist entries and the ref-count in the "
+              "smallest *used* slot");
+#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_FREELIST_ENTRY_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_lock.h b/base/allocator/partition_allocator/src/partition_alloc/partition_lock.h
new file mode 100644
index 0000000..b3b28db
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_lock.h
@@ -0,0 +1,149 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_LOCK_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_LOCK_H_
+
+#include <atomic>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/spinning_mutex.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal {
+
+class PA_LOCKABLE Lock {
+ public:
+  inline constexpr Lock();
+  void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION() {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+    LiftThreadIsolationScope lift_thread_isolation_restrictions;
+#endif
+
+    // When PartitionAlloc is malloc(), it can easily become reentrant. For
+    // instance, a DCHECK() triggers in external code (such as
+    // base::Lock). DCHECK() error message formatting allocates, which triggers
+    // PartitionAlloc, and then we get reentrancy, and in this case infinite
+    // recursion.
+    //
+    // To avoid that, crash quickly when the code becomes reentrant.
+    base::PlatformThreadRef current_thread = base::PlatformThread::CurrentRef();
+    if (!lock_.Try()) {
+      // The lock wasn't free when we tried to acquire it. This can be because
+      // another thread or *this* thread was holding it.
+      //
+      // If it's this thread holding it, then it cannot have become free in the
+      // meantime, and the current value of |owning_thread_ref_| is valid, as it
+      // was set by this thread. Assuming that writes to |owning_thread_ref_|
+      // are atomic, then if it's us, we are trying to recursively acquire a
+      // non-recursive lock.
+      //
+      // Note that we don't rely on a DCHECK() in base::Lock(), as it would
+      // itself allocate. Meaning that without this code, a reentrancy issue
+      // hangs on Linux.
+      if (PA_UNLIKELY(owning_thread_ref_.load(std::memory_order_acquire) ==
+                      current_thread)) {
+        // Trying to acquire lock while it's held by this thread: reentrancy
+        // issue.
+        PA_IMMEDIATE_CRASH();
+      }
+      lock_.Acquire();
+    }
+    owning_thread_ref_.store(current_thread, std::memory_order_release);
+#else
+    lock_.Acquire();
+#endif
+  }
+
+  void Release() PA_UNLOCK_FUNCTION() {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+    LiftThreadIsolationScope lift_thread_isolation_restrictions;
+#endif
+    owning_thread_ref_.store(base::PlatformThreadRef(),
+                             std::memory_order_release);
+#endif
+    lock_.Release();
+  }
+  void AssertAcquired() const PA_ASSERT_EXCLUSIVE_LOCK() {
+    lock_.AssertAcquired();
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+    LiftThreadIsolationScope lift_thread_isolation_restrictions;
+#endif
+    PA_DCHECK(owning_thread_ref_.load(std ::memory_order_acquire) ==
+              base::PlatformThread::CurrentRef());
+#endif
+  }
+
+  void Reinit() PA_UNLOCK_FUNCTION() {
+    lock_.AssertAcquired();
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    owning_thread_ref_.store(base::PlatformThreadRef(),
+                             std::memory_order_release);
+#endif
+    lock_.Reinit();
+  }
+
+ private:
+  SpinningMutex lock_;
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // Should in theory be protected by |lock_|, but we need to read it to detect
+  // recursive lock acquisition (and thus, the allocator becoming reentrant).
+  std::atomic<base::PlatformThreadRef> owning_thread_ref_ =
+      base::PlatformThreadRef();
+#endif
+};
+
+class PA_SCOPED_LOCKABLE ScopedGuard {
+ public:
+  explicit ScopedGuard(Lock& lock) PA_EXCLUSIVE_LOCK_FUNCTION(lock)
+      : lock_(lock) {
+    lock_.Acquire();
+  }
+  ~ScopedGuard() PA_UNLOCK_FUNCTION() { lock_.Release(); }
+
+ private:
+  Lock& lock_;
+};
+
+class PA_SCOPED_LOCKABLE ScopedUnlockGuard {
+ public:
+  explicit ScopedUnlockGuard(Lock& lock) PA_UNLOCK_FUNCTION(lock)
+      : lock_(lock) {
+    lock_.Release();
+  }
+  ~ScopedUnlockGuard() PA_EXCLUSIVE_LOCK_FUNCTION() { lock_.Acquire(); }
+
+ private:
+  Lock& lock_;
+};
+
+constexpr Lock::Lock() = default;
+
+// We want PartitionRoot to not have a global destructor, so this should not
+// have one.
+static_assert(std::is_trivially_destructible_v<Lock>, "");
+
+}  // namespace partition_alloc::internal
+
+namespace base {
+namespace internal {
+
+using PartitionLock = ::partition_alloc::internal::Lock;
+using PartitionAutoLock = ::partition_alloc::internal::ScopedGuard;
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_LOCK_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_lock_perftest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_lock_perftest.cc
new file mode 100644
index 0000000..27503b5
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_lock_perftest.cc
@@ -0,0 +1,127 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/timer/lap_timer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_result_reporter.h"
+
+namespace partition_alloc::internal {
+
+namespace {
+
+constexpr int kWarmupRuns = 1;
+constexpr ::base::TimeDelta kTimeLimit = ::base::Seconds(1);
+constexpr int kTimeCheckInterval = 100000;
+
+constexpr char kMetricPrefixLock[] = "PartitionLock.";
+constexpr char kMetricLockUnlockThroughput[] = "lock_unlock_throughput";
+constexpr char kMetricLockUnlockLatency[] = "lock_unlock_latency_ns";
+constexpr char kStoryBaseline[] = "baseline_story";
+constexpr char kStoryWithCompetingThread[] = "with_competing_thread";
+
+perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
+  perf_test::PerfResultReporter reporter(kMetricPrefixLock, story_name);
+  reporter.RegisterImportantMetric(kMetricLockUnlockThroughput, "runs/s");
+  reporter.RegisterImportantMetric(kMetricLockUnlockLatency, "ns");
+  return reporter;
+}
+
+class Spin : public base::PlatformThreadForTesting::Delegate {
+ public:
+  Spin(Lock* lock, uint32_t* data)
+      : lock_(lock), data_(data), should_stop_(false) {}
+  ~Spin() override = default;
+
+  void ThreadMain() override {
+    started_count_++;
+    // Local variable to avoid "cache line ping-pong" from influencing the
+    // results.
+    uint32_t count = 0;
+    while (!should_stop_.load(std::memory_order_relaxed)) {
+      lock_->Acquire();
+      count++;
+      lock_->Release();
+    }
+
+    lock_->Acquire();
+    (*data_) += count;
+    lock_->Release();
+  }
+
+  // Called from another thread to stop the loop.
+  void Stop() { should_stop_ = true; }
+  int started_count() const { return started_count_; }
+
+ private:
+  Lock* lock_;
+  uint32_t* data_ GUARDED_BY(lock_);
+  std::atomic<bool> should_stop_;
+  std::atomic<int> started_count_{0};
+};
+
+}  // namespace
+
+TEST(PartitionLockPerfTest, Simple) {
+  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
+  [[maybe_unused]] uint32_t data = 0;
+
+  Lock lock;
+
+  do {
+    lock.Acquire();
+    data += 1;
+    lock.Release();
+    timer.NextLap();
+  } while (!timer.HasTimeLimitExpired());
+
+  auto reporter = SetUpReporter(kStoryBaseline);
+  reporter.AddResult(kMetricLockUnlockThroughput, timer.LapsPerSecond());
+  reporter.AddResult(kMetricLockUnlockLatency, 1e9 / timer.LapsPerSecond());
+}
+
+TEST(PartitionLockPerfTest, WithCompetingThreads) {
+  uint32_t data = 0;
+
+  Lock lock;
+
+  // Starts a competing thread executing the same loop as this thread.
+  Spin thread_main(&lock, &data);
+  std::vector<base::PlatformThreadHandle> thread_handles;
+  constexpr int kThreads = 4;
+
+  for (int i = 0; i < kThreads; i++) {
+    base::PlatformThreadHandle thread_handle;
+    ASSERT_TRUE(base::PlatformThreadForTesting::Create(0, &thread_main,
+                                                       &thread_handle));
+    thread_handles.push_back(thread_handle);
+  }
+  // Wait for all the threads to start.
+  while (thread_main.started_count() != kThreads) {
+  }
+
+  ::base::LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
+  do {
+    lock.Acquire();
+    data += 1;
+    lock.Release();
+    timer.NextLap();
+  } while (!timer.HasTimeLimitExpired());
+
+  thread_main.Stop();
+  for (int i = 0; i < kThreads; i++) {
+    base::PlatformThreadForTesting::Join(thread_handles[i]);
+  }
+
+  auto reporter = SetUpReporter(kStoryWithCompetingThread);
+  reporter.AddResult(kMetricLockUnlockThroughput, timer.LapsPerSecond());
+  reporter.AddResult(kMetricLockUnlockLatency, 1e9 / timer.LapsPerSecond());
+}
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_lock_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_lock_unittest.cc
new file mode 100644
index 0000000..6d1376e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_lock_unittest.cc
@@ -0,0 +1,240 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal {
+
+TEST(PartitionAllocLockTest, Simple) {
+  Lock lock;
+  lock.Acquire();
+  lock.Release();
+}
+
+namespace {
+
+Lock g_lock;
+
+}  // namespace
+
+TEST(PartitionAllocLockTest, StaticLockStartsUnlocked) {
+  g_lock.Acquire();
+  g_lock.Release();
+}
+
+namespace {
+
+class ThreadDelegateForContended
+    : public base::PlatformThreadForTesting::Delegate {
+ public:
+  explicit ThreadDelegateForContended(Lock& start_lock,
+                                      Lock& lock,
+                                      int iterations,
+                                      int& counter)
+      : start_lock_(start_lock),
+        lock_(lock),
+        iterations_(iterations),
+        counter_(counter) {}
+
+  void ThreadMain() override {
+    start_lock_.Acquire();
+    start_lock_.Release();
+
+    for (int i = 0; i < iterations_; i++) {
+      lock_.Acquire();
+      ++counter_;
+      lock_.Release();
+    }
+  }
+
+ private:
+  Lock& start_lock_;
+  Lock& lock_;
+  const int iterations_;
+  int& counter_;
+};
+
+}  // namespace
+
+TEST(PartitionAllocLockTest, Contended) {
+  int counter = 0;  // *Not* atomic.
+  std::vector<internal::base::PlatformThreadHandle> thread_handles;
+  constexpr int iterations_per_thread = 1000000;
+  constexpr int num_threads = 4;
+
+  Lock lock;
+  Lock start_lock;
+
+  ThreadDelegateForContended delegate(start_lock, lock, iterations_per_thread,
+                                      counter);
+
+  start_lock.Acquire();  // Make sure that the threads compete, by waiting until
+                         // all of them have at least been created.
+  for (int i = 0; i < num_threads; ++i) {
+    base::PlatformThreadHandle handle;
+    base::PlatformThreadForTesting::Create(0, &delegate, &handle);
+    thread_handles.push_back(handle);
+  }
+
+  start_lock.Release();
+
+  for (int i = 0; i < num_threads; ++i) {
+    base::PlatformThreadForTesting::Join(thread_handles[i]);
+  }
+  EXPECT_EQ(iterations_per_thread * num_threads, counter);
+}
+
+namespace {
+
+class ThreadDelegateForSlowThreads
+    : public base::PlatformThreadForTesting::Delegate {
+ public:
+  explicit ThreadDelegateForSlowThreads(Lock& start_lock,
+                                        Lock& lock,
+                                        int iterations,
+                                        int& counter)
+      : start_lock_(start_lock),
+        lock_(lock),
+        iterations_(iterations),
+        counter_(counter) {}
+
+  void ThreadMain() override {
+    start_lock_.Acquire();
+    start_lock_.Release();
+
+    for (int i = 0; i < iterations_; i++) {
+      lock_.Acquire();
+      ++counter_;
+      // Hold the lock for a while, to force futex()-based locks to sleep.
+      base::PlatformThread::Sleep(base::Milliseconds(1));
+      lock_.Release();
+    }
+  }
+
+ private:
+  Lock& start_lock_;
+  Lock& lock_;
+  const int iterations_;
+  int& counter_;
+};
+
+}  // namespace
+
+TEST(PartitionAllocLockTest, SlowThreads) {
+  int counter = 0;  // *Not* atomic.
+  std::vector<base::PlatformThreadHandle> thread_handles;
+  constexpr int iterations_per_thread = 100;
+  constexpr int num_threads = 4;
+
+  Lock lock;
+  Lock start_lock;
+
+  ThreadDelegateForSlowThreads delegate(start_lock, lock, iterations_per_thread,
+                                        counter);
+
+  start_lock.Acquire();  // Make sure that the threads compete, by waiting until
+                         // all of them have at least been created.
+  for (int i = 0; i < num_threads; i++) {
+    base::PlatformThreadHandle handle;
+    base::PlatformThreadForTesting::Create(0, &delegate, &handle);
+    thread_handles.push_back(handle);
+  }
+
+  start_lock.Release();
+
+  for (int i = 0; i < num_threads; i++) {
+    base::PlatformThreadForTesting::Join(thread_handles[i]);
+  }
+  EXPECT_EQ(iterations_per_thread * num_threads, counter);
+}
+
+TEST(PartitionAllocLockTest, AssertAcquired) {
+  Lock lock;
+  lock.Acquire();
+  lock.AssertAcquired();
+  lock.Release();
+}
+
+// AssertAcquired() is only enforced with DCHECK()s.
+#if defined(GTEST_HAS_DEATH_TEST) && BUILDFLAG(PA_DCHECK_IS_ON)
+
+TEST(PartitionAllocLockTest, AssertAcquiredDeathTest) {
+  Lock lock;
+  EXPECT_DEATH(lock.AssertAcquired(), "");
+}
+
+namespace {
+
+class ThreadDelegateForAssertAcquiredAnotherThreadHoldsTheLock
+    : public base::PlatformThreadForTesting::Delegate {
+ public:
+  explicit ThreadDelegateForAssertAcquiredAnotherThreadHoldsTheLock(Lock& lock)
+      : lock_(lock) {}
+
+  void ThreadMain() PA_NO_THREAD_SAFETY_ANALYSIS override { lock_.Acquire(); }
+
+ private:
+  Lock& lock_;
+};
+
+}  // namespace
+
+TEST(PartitionAllocLockTest, AssertAcquiredAnotherThreadHoldsTheLock) {
+  Lock lock;
+  // PA_NO_THREAD_SAFETY_ANALYSIS: The checker rightfully points out that the
+  // lock is still held at the end of the function, which is what we want here.
+  ThreadDelegateForAssertAcquiredAnotherThreadHoldsTheLock delegate(lock);
+  base::PlatformThreadHandle handle;
+  base::PlatformThreadForTesting::Create(0, &delegate, &handle);
+  // Join before the test, otherwise some platforms' gtest have trouble with
+  // EXPECT_DEATH() and multiple live threads.
+  base::PlatformThreadForTesting::Join(handle);
+
+  EXPECT_DEATH(lock.AssertAcquired(), "");
+}
+
+#if BUILDFLAG(IS_APPLE)
+
+namespace {
+
+class ThreadDelegateForReinitInOtherThread
+    : public base::PlatformThreadForTesting::Delegate {
+ public:
+  explicit ThreadDelegateForReinitInOtherThread(Lock& lock) : lock_(lock) {}
+
+  void ThreadMain() PA_NO_THREAD_SAFETY_ANALYSIS override {
+    lock_.Reinit();
+    lock_.Acquire();
+    lock_.Release();
+  }
+
+ private:
+  Lock& lock_;
+};
+
+}  // namespace
+
+// On Apple OSes, it is not allowed to unlock a lock from another thread, so
+// we need to re-initialize it.
+TEST(PartitionAllocLockTest, ReinitInOtherThread) PA_NO_THREAD_SAFETY_ANALYSIS {
+  Lock lock;
+  lock.Acquire();
+
+  ThreadDelegateForReinitInOtherThread delegate(lock);
+  base::PlatformThreadHandle handle;
+  base::PlatformThreadForTesting::Create(0, &delegate, &handle);
+  base::PlatformThreadForTesting::Join(handle);
+}
+#endif  // BUILDFLAG(IS_APPLE)
+
+#endif  // defined(GTEST_HAS_DEATH_TEST) && BUILDFLAG(PA_DCHECK_IS_ON)
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_oom.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_oom.cc
new file mode 100644
index 0000000..1027801
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_oom.cc
@@ -0,0 +1,37 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_oom.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal {
+
+OomFunction g_oom_handling_function = nullptr;
+
+PA_NOINLINE PA_NOT_TAIL_CALLED void PartitionExcessiveAllocationSize(
+    size_t size) {
+  PA_NO_CODE_FOLDING();
+  OOM_CRASH(size);
+}
+
+#if !defined(ARCH_CPU_64_BITS)
+PA_NOINLINE PA_NOT_TAIL_CALLED void
+PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) {
+  PA_NO_CODE_FOLDING();
+  OOM_CRASH(size);
+}
+
+[[noreturn]] PA_NOT_TAIL_CALLED PA_NOINLINE void
+PartitionOutOfMemoryWithLargeVirtualSize(size_t virtual_size) {
+  PA_NO_CODE_FOLDING();
+  OOM_CRASH(virtual_size);
+}
+
+#endif  // !defined(ARCH_CPU_64_BITS)
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_oom.h b/base/allocator/partition_allocator/src/partition_alloc/partition_oom.h
new file mode 100644
index 0000000..640b1ca
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_oom.h
@@ -0,0 +1,40 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Holds functions for generating OOM errors from PartitionAlloc. This is
+// distinct from oom.h in that it is meant only for use in PartitionAlloc.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_OOM_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_OOM_H_
+
+#include <stddef.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "build/build_config.h"
+
+namespace partition_alloc {
+
+using OomFunction = void (*)(size_t);
+
+namespace internal {
+
+// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
+extern OomFunction g_oom_handling_function;
+
+[[noreturn]] PA_NOINLINE PA_COMPONENT_EXPORT(
+    PARTITION_ALLOC) void PartitionExcessiveAllocationSize(size_t size);
+
+#if !defined(ARCH_CPU_64_BITS)
+[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(
+    size_t size);
+[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryWithLargeVirtualSize(
+    size_t virtual_size);
+#endif
+
+}  // namespace internal
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_OOM_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_page.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_page.cc
new file mode 100644
index 0000000..920dc13
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_page.cc
@@ -0,0 +1,369 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+
+#include <algorithm>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_direct_map_extent.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+
+namespace partition_alloc::internal {
+
+namespace {
+
+void UnmapNow(uintptr_t reservation_start,
+              size_t reservation_size,
+              pool_handle pool);
+
+PA_ALWAYS_INLINE void PartitionDirectUnmap(SlotSpanMetadata* slot_span) {
+  auto* root = PartitionRoot::FromSlotSpan(slot_span);
+  PartitionRootLock(root).AssertAcquired();
+  auto* extent = PartitionDirectMapExtent::FromSlotSpan(slot_span);
+
+  // Maintain the doubly-linked list of all direct mappings.
+  if (extent->prev_extent) {
+    PA_DCHECK(extent->prev_extent->next_extent == extent);
+    extent->prev_extent->next_extent = extent->next_extent;
+  } else {
+    root->direct_map_list = extent->next_extent;
+  }
+  if (extent->next_extent) {
+    PA_DCHECK(extent->next_extent->prev_extent == extent);
+    extent->next_extent->prev_extent = extent->prev_extent;
+  }
+
+  // The actual decommit is deferred below after releasing the lock.
+  root->DecreaseCommittedPages(slot_span->bucket->slot_size);
+
+  size_t reservation_size = extent->reservation_size;
+  PA_DCHECK(!(reservation_size & DirectMapAllocationGranularityOffsetMask()));
+  PA_DCHECK(root->total_size_of_direct_mapped_pages >= reservation_size);
+  root->total_size_of_direct_mapped_pages -= reservation_size;
+
+  uintptr_t reservation_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
+  // The mapping may start at an unspecified location within a super page, but
+  // we always reserve memory aligned to super page size.
+  reservation_start = base::bits::AlignDown(reservation_start, kSuperPageSize);
+
+  // All the metadata have been updated above, in particular the mapping has
+  // been unlinked. We can safely release the memory outside the lock, which is
+  // important as decommitting memory can be expensive.
+  //
+  // This can create a fake "address space exhaustion" OOM, in the case where
+  // e.g. a large allocation is freed on a thread, and another large one is made
+  // from another *before* UnmapNow() has finished running. In this case the
+  // second one may not find enough space in the pool, and fail. This is
+  // expected to be very rare though, and likely preferable to holding the lock
+  // while releasing the address space.
+  ScopedUnlockGuard unlock{PartitionRootLock(root)};
+  ScopedSyscallTimer timer{root};
+  UnmapNow(reservation_start, reservation_size, root->ChoosePool());
+}
+
+}  // namespace
+
+PA_ALWAYS_INLINE void SlotSpanMetadata::RegisterEmpty() {
+  PA_DCHECK(is_empty());
+  auto* root = PartitionRoot::FromSlotSpan(this);
+  PartitionRootLock(root).AssertAcquired();
+
+  root->empty_slot_spans_dirty_bytes +=
+      base::bits::AlignUp(GetProvisionedSize(), SystemPageSize());
+
+  ToSuperPageExtent()->DecrementNumberOfNonemptySlotSpans();
+
+  // If the slot span is already registered as empty, give it another life.
+  if (in_empty_cache_) {
+    PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans);
+    PA_DCHECK(root->global_empty_slot_span_ring[empty_cache_index_] == this);
+    root->global_empty_slot_span_ring[empty_cache_index_] = nullptr;
+  }
+
+  int16_t current_index = root->global_empty_slot_span_ring_index;
+  SlotSpanMetadata* slot_span_to_decommit =
+      root->global_empty_slot_span_ring[current_index];
+  // The slot span might well have been re-activated, filled up, etc. before we
+  // get around to looking at it here.
+  if (slot_span_to_decommit) {
+    slot_span_to_decommit->DecommitIfPossible(root);
+  }
+
+  // We put the empty slot span on our global list of "slot spans that were once
+  // empty", thus providing it a bit of breathing room to get re-used before we
+  // really free it. This reduces the number of system calls. Otherwise any
+  // free() from a single-slot slot span would lead to a syscall, for instance.
+  root->global_empty_slot_span_ring[current_index] = this;
+  empty_cache_index_ = current_index;
+  in_empty_cache_ = 1;
+  ++current_index;
+  if (current_index == root->global_empty_slot_span_ring_size) {
+    current_index = 0;
+  }
+  root->global_empty_slot_span_ring_index = current_index;
+
+  // Avoid wasting too much memory on empty slot spans. Note that we only divide
+  // by powers of two, since division can be very slow, and this path is taken
+  // for every single-slot slot span deallocation.
+  //
+  // Empty slot spans are also all decommitted with MemoryReclaimer, but it may
+  // never run, be delayed arbitrarily, and/or miss large memory spikes.
+  size_t max_empty_dirty_bytes =
+      root->total_size_of_committed_pages.load(std::memory_order_relaxed) >>
+      root->max_empty_slot_spans_dirty_bytes_shift;
+  if (root->empty_slot_spans_dirty_bytes > max_empty_dirty_bytes) {
+    root->ShrinkEmptySlotSpansRing(std::min(
+        root->empty_slot_spans_dirty_bytes / 2, max_empty_dirty_bytes));
+  }
+}
+// static
+const SlotSpanMetadata SlotSpanMetadata::sentinel_slot_span_;
+
+// static
+const SlotSpanMetadata* SlotSpanMetadata::get_sentinel_slot_span() {
+  return &sentinel_slot_span_;
+}
+
+// static
+SlotSpanMetadata* SlotSpanMetadata::get_sentinel_slot_span_non_const() {
+  return const_cast<SlotSpanMetadata*>(&sentinel_slot_span_);
+}
+
+SlotSpanMetadata::SlotSpanMetadata(PartitionBucket* bucket)
+    : bucket(bucket), can_store_raw_size_(bucket->CanStoreRawSize()) {}
+
+void SlotSpanMetadata::FreeSlowPath(size_t number_of_freed) {
+  DCheckRootLockIsAcquired(PartitionRoot::FromSlotSpan(this));
+  PA_DCHECK(this != get_sentinel_slot_span());
+
+  // The caller has already modified |num_allocated_slots|. It is a
+  // responsibility of this function to react to it, and update the state. We
+  // can get here only if the slot span is marked full and/or is now empty. Both
+  // are possible at the same time, which can happen when the caller lowered
+  // |num_allocated_slots| from "all" to 0 (common for single-slot spans). First
+  // execute the "is marked full" path, as it sets up |active_slot_spans_head|
+  // in a way later needed for the "is empty" path.
+  if (marked_full) {
+    // Direct map slot spans aren't added to any lists, hence never marked full.
+    PA_DCHECK(!bucket->is_direct_mapped());
+    // Double check that the slot span was full.
+    PA_DCHECK(num_allocated_slots ==
+              bucket->get_slots_per_span() - number_of_freed);
+    marked_full = 0;
+    // Fully used slot span became partially used. It must be put back on the
+    // non-full list. Also make it the current slot span to increase the
+    // chances of it being filled up again. The old current slot span will be
+    // the next slot span.
+    PA_DCHECK(!next_slot_span);
+    if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span())) {
+      next_slot_span = bucket->active_slot_spans_head;
+    }
+    bucket->active_slot_spans_head = this;
+    PA_CHECK(bucket->num_full_slot_spans);  // Underflow.
+    --bucket->num_full_slot_spans;
+  }
+
+  if (PA_LIKELY(num_allocated_slots == 0)) {
+    // Slot span became fully unused.
+    if (PA_UNLIKELY(bucket->is_direct_mapped())) {
+      PartitionDirectUnmap(this);
+      return;
+    }
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    freelist_head->CheckFreeList(bucket->slot_size);
+#endif
+    // If it's the current active slot span, change it. We bounce the slot span
+    // to the empty list as a force towards defragmentation.
+    if (PA_LIKELY(this == bucket->active_slot_spans_head)) {
+      bucket->SetNewActiveSlotSpan();
+    }
+    PA_DCHECK(bucket->active_slot_spans_head != this);
+
+    if (CanStoreRawSize()) {
+      SetRawSize(0);
+    }
+
+    RegisterEmpty();
+  }
+}
+
+void SlotSpanMetadata::Decommit(PartitionRoot* root) {
+  PartitionRootLock(root).AssertAcquired();
+  PA_DCHECK(is_empty());
+  PA_DCHECK(!bucket->is_direct_mapped());
+  uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(this);
+  // If lazy commit is enabled, only provisioned slots are committed.
+  size_t dirty_size =
+      base::bits::AlignUp(GetProvisionedSize(), SystemPageSize());
+  size_t size_to_decommit =
+      kUseLazyCommit ? dirty_size : bucket->get_bytes_per_span();
+
+  PA_DCHECK(root->empty_slot_spans_dirty_bytes >= dirty_size);
+  root->empty_slot_spans_dirty_bytes -= dirty_size;
+
+  // Not decommitted slot span must've had at least 1 allocation.
+  PA_DCHECK(size_to_decommit > 0);
+  root->DecommitSystemPagesForData(
+      slot_span_start, size_to_decommit,
+      PageAccessibilityDisposition::kAllowKeepForPerf);
+
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+  FreeSlotBitmapReset(slot_span_start, slot_span_start + size_to_decommit,
+                      bucket->slot_size);
+#endif
+
+  // We actually leave the decommitted slot span in the active list. We'll sweep
+  // it on to the decommitted list when we next walk the active list.
+  // Pulling this trick enables us to use a singly-linked list for all
+  // cases, which is critical in keeping the slot span metadata structure down
+  // to 32 bytes in size.
+  SetFreelistHead(nullptr);
+  num_unprovisioned_slots = 0;
+  PA_DCHECK(is_decommitted());
+  PA_DCHECK(bucket);
+}
+
+void SlotSpanMetadata::DecommitIfPossible(PartitionRoot* root) {
+  PartitionRootLock(root).AssertAcquired();
+  PA_DCHECK(in_empty_cache_);
+  PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans);
+  PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index_]);
+  in_empty_cache_ = 0;
+  if (is_empty()) {
+    Decommit(root);
+  }
+}
+
+void SlotSpanMetadata::SortFreelist() {
+  std::bitset<kMaxSlotsPerSlotSpan> free_slots;
+  uintptr_t slot_span_start = ToSlotSpanStart(this);
+
+  size_t num_provisioned_slots =
+      bucket->get_slots_per_span() - num_unprovisioned_slots;
+  PA_CHECK(num_provisioned_slots <= kMaxSlotsPerSlotSpan);
+
+  size_t num_free_slots = 0;
+  size_t slot_size = bucket->slot_size;
+  for (EncodedNextFreelistEntry* head = freelist_head; head;
+       head = head->GetNext(slot_size)) {
+    ++num_free_slots;
+    size_t offset_in_slot_span = SlotStartPtr2Addr(head) - slot_span_start;
+    size_t slot_number = bucket->GetSlotNumber(offset_in_slot_span);
+    PA_DCHECK(slot_number < num_provisioned_slots);
+    free_slots[slot_number] = true;
+  }
+  PA_DCHECK(num_free_slots == GetFreelistLength());
+
+  // Empty or single-element list is always sorted.
+  if (num_free_slots > 1) {
+    EncodedNextFreelistEntry* back = nullptr;
+    EncodedNextFreelistEntry* head = nullptr;
+
+    for (size_t slot_number = 0; slot_number < num_provisioned_slots;
+         slot_number++) {
+      if (free_slots[slot_number]) {
+        uintptr_t slot_start = slot_span_start + (slot_size * slot_number);
+        auto* entry = EncodedNextFreelistEntry::EmplaceAndInitNull(slot_start);
+
+        if (!head) {
+          head = entry;
+        } else {
+          back->SetNext(entry);
+        }
+
+        back = entry;
+      }
+    }
+    SetFreelistHead(head);
+  }
+
+  freelist_is_sorted_ = true;
+}
+
+namespace {
+
+void UnmapNow(uintptr_t reservation_start,
+              size_t reservation_size,
+              pool_handle pool) {
+  PA_DCHECK(reservation_start && reservation_size > 0);
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  if (pool == kBRPPoolHandle) {
+    // In 32-bit mode, the beginning of a reservation may be excluded from the
+    // BRP pool, so shift the pointer. Other pools don't have this logic.
+    PA_DCHECK(IsManagedByPartitionAllocBRPPool(
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+        reservation_start
+#else
+        reservation_start +
+        AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
+            AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+        ));
+  } else
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  {
+    PA_DCHECK(pool == kRegularPoolHandle
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+              || pool == kThreadIsolatedPoolHandle
+#endif
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+              ||
+              (IsConfigurablePoolAvailable() && pool == kConfigurablePoolHandle)
+#endif
+    );
+    // Non-BRP pools don't need adjustment that BRP needs in 32-bit mode.
+    PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) ||
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+              IsManagedByPartitionAllocThreadIsolatedPool(reservation_start) ||
+#endif
+              IsManagedByPartitionAllocConfigurablePool(reservation_start));
+  }
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+  PA_DCHECK((reservation_start & kSuperPageOffsetMask) == 0);
+  uintptr_t reservation_end = reservation_start + reservation_size;
+  auto* offset_ptr = ReservationOffsetPointer(reservation_start);
+  // Reset the offset table entries for the given memory before unreserving
+  // it. Since the memory is not unreserved and not available for other
+  // threads, the table entries for the memory are not modified by other
+  // threads either. So we can update the table entries without race
+  // condition.
+  uint16_t i = 0;
+  for (uintptr_t address = reservation_start; address < reservation_end;
+       address += kSuperPageSize) {
+    PA_DCHECK(offset_ptr < GetReservationOffsetTableEnd(address));
+    PA_DCHECK(*offset_ptr == i++);
+    *offset_ptr++ = kOffsetTagNotAllocated;
+  }
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+  AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
+                                               reservation_size);
+#endif
+
+  // After resetting the table entries, unreserve and decommit the memory.
+  AddressPoolManager::GetInstance().UnreserveAndDecommit(
+      pool, reservation_start, reservation_size);
+}
+
+}  // namespace
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_page.h b/base/allocator/partition_allocator/src/partition_alloc/partition_page.h
new file mode 100644
index 0000000..54803c8
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_page.h
@@ -0,0 +1,812 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_PAGE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_PAGE_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_types.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_dcheck_helper.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_superpage_extent_entry.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/state_bitmap.h"
+#endif
+
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_ref_count.h"
+#endif
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#endif
+
+namespace partition_alloc::internal {
+
+#if BUILDFLAG(USE_STARSCAN)
+using AllocationStateMap =
+    StateBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
+#endif
+
+// Metadata of the slot span.
+//
+// Some notes on slot span states. It can be in one of four major states:
+// 1) Active.
+// 2) Full.
+// 3) Empty.
+// 4) Decommitted.
+// An active slot span has available free slots, as well as allocated ones.
+// A full slot span has no free slots. An empty slot span has no allocated
+// slots, and a decommitted slot span is an empty one that had its backing
+// memory released back to the system.
+//
+// There are three linked lists tracking slot spans. The "active" list is an
+// approximation of a list of active slot spans. It is an approximation because
+// full, empty and decommitted slot spans may briefly be present in the list
+// until we next do a scan over it. The "empty" list holds mostly empty slot
+// spans, but may briefly hold decommitted ones too. The "decommitted" list
+// holds only decommitted slot spans.
+//
+// The significant slot span transitions are:
+// - Free() will detect when a full slot span has a slot freed and immediately
+//   return the slot span to the head of the active list.
+// - Free() will detect when a slot span is fully emptied. It _may_ add it to
+//   the empty list or it _may_ leave it on the active list until a future
+//   list scan.
+// - Alloc() _may_ scan the active page list in order to fulfil the request.
+//   If it does this, full, empty and decommitted slot spans encountered will be
+//   booted out of the active list. If there are no suitable active slot spans
+//   found, an empty or decommitted slot spans (if one exists) will be pulled
+//   from the empty/decommitted list on to the active list.
+#pragma pack(push, 1)
+struct SlotSpanMetadata {
+ private:
+  EncodedNextFreelistEntry* freelist_head = nullptr;
+
+ public:
+  // TODO(lizeb): Make as many fields as possible private or const, to
+  // encapsulate things more clearly.
+  SlotSpanMetadata* next_slot_span = nullptr;
+  PartitionBucket* const bucket = nullptr;
+
+  // CHECK()ed in AllocNewSlotSpan().
+  // The maximum number of bits needed to cover all currently supported OSes.
+  static constexpr size_t kMaxSlotsPerSlotSpanBits = 13;
+  static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), "");
+
+  // |marked_full| isn't equivalent to being full. Slot span is marked as full
+  // iff it isn't on the active slot span list (or any other list).
+  uint32_t marked_full : 1;
+  // |num_allocated_slots| is 0 for empty or decommitted slot spans, which can
+  // be further differentiated by checking existence of the freelist.
+  uint32_t num_allocated_slots : kMaxSlotsPerSlotSpanBits;
+  uint32_t num_unprovisioned_slots : kMaxSlotsPerSlotSpanBits;
+
+ private:
+  const uint32_t can_store_raw_size_ : 1;
+  uint32_t freelist_is_sorted_ : 1;
+  uint32_t unused1_ : (32 - 1 - 2 * kMaxSlotsPerSlotSpanBits - 1 - 1);
+  // If |in_empty_cache_|==1, |empty_cache_index| is undefined and mustn't be
+  // used.
+  uint16_t in_empty_cache_ : 1;
+  uint16_t empty_cache_index_ : kEmptyCacheIndexBits;  // < kMaxFreeableSpans.
+  uint16_t unused2_ : (16 - 1 - kEmptyCacheIndexBits);
+  // Can use only 48 bits (6B) in this bitfield, as this structure is embedded
+  // in PartitionPage which has 2B worth of fields and must fit in 32B.
+
+ public:
+  PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+  explicit SlotSpanMetadata(PartitionBucket* bucket);
+
+  inline SlotSpanMetadata(const SlotSpanMetadata&);
+
+  // Public API
+  // Note the matching Alloc() functions are in PartitionPage.
+  PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) void FreeSlowPath(
+      size_t number_of_freed);
+  PA_ALWAYS_INLINE EncodedNextFreelistEntry* PopForAlloc(size_t size);
+  PA_ALWAYS_INLINE void Free(uintptr_t ptr, PartitionRoot* root);
+  // Appends the passed freelist to the slot-span's freelist. Please note that
+  // the function doesn't increment the tags of the passed freelist entries,
+  // since FreeInline() did it already.
+  PA_ALWAYS_INLINE void AppendFreeList(EncodedNextFreelistEntry* head,
+                                       EncodedNextFreelistEntry* tail,
+                                       size_t number_of_freed,
+                                       PartitionRoot* root);
+
+  void Decommit(PartitionRoot* root);
+  void DecommitIfPossible(PartitionRoot* root);
+
+  // Sorts the freelist in ascending addresses order.
+  void SortFreelist();
+  // Inserts the slot span into the empty ring, making space for the new slot
+  // span, and potentially shrinking the ring.
+  void RegisterEmpty();
+
+  // Pointer/address manipulation functions. These must be static as the input
+  // |slot_span| pointer may be the result of an offset calculation and
+  // therefore cannot be trusted. The objective of these functions is to
+  // sanitize this input.
+  PA_ALWAYS_INLINE static uintptr_t ToSlotSpanStart(
+      const SlotSpanMetadata* slot_span);
+  PA_ALWAYS_INLINE static SlotSpanMetadata* FromAddr(uintptr_t address);
+  PA_ALWAYS_INLINE static SlotSpanMetadata* FromSlotStart(uintptr_t slot_start);
+  PA_ALWAYS_INLINE static SlotSpanMetadata* FromObject(void* object);
+  PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerAddr(
+      uintptr_t address);
+  PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerPtr(void* ptr);
+
+  PA_ALWAYS_INLINE PartitionSuperPageExtentEntry* ToSuperPageExtent() const;
+
+  // Checks if it is feasible to store raw_size.
+  PA_ALWAYS_INLINE bool CanStoreRawSize() const { return can_store_raw_size_; }
+  // The caller is responsible for ensuring that raw_size can be stored before
+  // calling Set/GetRawSize.
+  PA_ALWAYS_INLINE void SetRawSize(size_t raw_size);
+  PA_ALWAYS_INLINE size_t GetRawSize() const;
+
+  PA_ALWAYS_INLINE EncodedNextFreelistEntry* get_freelist_head() const {
+    return freelist_head;
+  }
+  PA_ALWAYS_INLINE void SetFreelistHead(EncodedNextFreelistEntry* new_head);
+
+  // Returns size of the region used within a slot. The used region comprises
+  // of actual allocated data, extras and possibly empty space in the middle.
+  PA_ALWAYS_INLINE size_t GetUtilizedSlotSize() const {
+    // The returned size can be:
+    // - The slot size for small buckets.
+    // - Exact size needed to satisfy allocation (incl. extras), for large
+    //   buckets and direct-mapped allocations (see also the comment in
+    //   CanStoreRawSize() for more info).
+    if (PA_LIKELY(!CanStoreRawSize())) {
+      return bucket->slot_size;
+    }
+    return GetRawSize();
+  }
+
+  // This includes padding due to rounding done at allocation; we don't know the
+  // requested size at deallocation, so we use this in both places.
+  PA_ALWAYS_INLINE size_t GetSlotSizeForBookkeeping() const {
+    // This could be more precise for allocations where CanStoreRawSize()
+    // returns true (large allocations). However this is called for *every*
+    // allocation, so we don't want an extra branch there.
+    return bucket->slot_size;
+  }
+
+  // Returns the total size of the slots that are currently provisioned.
+  PA_ALWAYS_INLINE size_t GetProvisionedSize() const {
+    size_t num_provisioned_slots =
+        bucket->get_slots_per_span() - num_unprovisioned_slots;
+    size_t provisioned_size = num_provisioned_slots * bucket->slot_size;
+    PA_DCHECK(provisioned_size <= bucket->get_bytes_per_span());
+    return provisioned_size;
+  }
+
+  // Return the number of entries in the freelist.
+  size_t GetFreelistLength() const {
+    size_t num_provisioned_slots =
+        bucket->get_slots_per_span() - num_unprovisioned_slots;
+    return num_provisioned_slots - num_allocated_slots;
+  }
+
+  PA_ALWAYS_INLINE void Reset();
+
+  // TODO(ajwong): Can this be made private?  https://crbug.com/787153
+  PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+  static const SlotSpanMetadata* get_sentinel_slot_span();
+  // The sentinel is not supposed to be modified and hence we mark it as const
+  // under the hood. However, we often store it together with mutable metadata
+  // objects and need a non-const pointer.
+  // You can use this function for this case, but you need to ensure that the
+  // returned object will not be written to.
+  static SlotSpanMetadata* get_sentinel_slot_span_non_const();
+
+  // Slot span state getters.
+  PA_ALWAYS_INLINE bool is_active() const;
+  PA_ALWAYS_INLINE bool is_full() const;
+  PA_ALWAYS_INLINE bool is_empty() const;
+  PA_ALWAYS_INLINE bool is_decommitted() const;
+  PA_ALWAYS_INLINE bool in_empty_cache() const { return in_empty_cache_; }
+  PA_ALWAYS_INLINE bool freelist_is_sorted() const {
+    return freelist_is_sorted_;
+  }
+  PA_ALWAYS_INLINE void set_freelist_sorted() { freelist_is_sorted_ = true; }
+
+ private:
+  // sentinel_slot_span_ is used as a sentinel to indicate that there is no slot
+  // span in the active list. We could use nullptr, but in that case we need to
+  // add a null-check branch to the hot allocation path. We want to avoid that.
+  //
+  // Note, this declaration is kept in the header as opposed to an anonymous
+  // namespace so the getter can be fully inlined.
+  static const SlotSpanMetadata sentinel_slot_span_;
+  // For the sentinel.
+  inline constexpr SlotSpanMetadata() noexcept;
+};
+#pragma pack(pop)
+static_assert(sizeof(SlotSpanMetadata) <= kPageMetadataSize,
+              "SlotSpanMetadata must fit into a Page Metadata slot.");
+
+inline constexpr SlotSpanMetadata::SlotSpanMetadata() noexcept
+    : marked_full(0),
+      num_allocated_slots(0),
+      num_unprovisioned_slots(0),
+      can_store_raw_size_(false),
+      freelist_is_sorted_(true),
+      unused1_(0),
+      in_empty_cache_(0),
+      empty_cache_index_(0),
+      unused2_(0) {
+  (void)unused1_;
+  (void)unused2_;
+}
+
+inline SlotSpanMetadata::SlotSpanMetadata(const SlotSpanMetadata&) = default;
+
+// Metadata of a non-first partition page in a slot span.
+struct SubsequentPageMetadata {
+  // Raw size is the size needed to satisfy the allocation (requested size +
+  // extras). If available, it can be used to report better statistics or to
+  // bring protective cookie closer to the allocated memory.
+  //
+  // It can be used only if:
+  // - there is no more than one slot in the slot span (otherwise we wouldn't
+  //   know which slot the raw size applies to)
+  // - there is more than one partition page in the slot span (the metadata of
+  //   the first one is used to store slot information, but the second one is
+  //   available for extra information)
+  size_t raw_size;
+};
+
+// Each partition page has metadata associated with it. The metadata of the
+// first page of a slot span, describes that slot span. If a slot span spans
+// more than 1 page, the page metadata may contain rudimentary additional
+// information.
+// "Pack" the union so that common page metadata still fits within
+// kPageMetadataSize. (SlotSpanMetadata is also "packed".)
+#pragma pack(push, 1)
+struct PartitionPage {
+  union {
+    SlotSpanMetadata slot_span_metadata;
+
+    SubsequentPageMetadata subsequent_page_metadata;
+
+    // sizeof(PartitionPageMetadata) must always be:
+    // - a power of 2 (for fast modulo operations)
+    // - below kPageMetadataSize
+    //
+    // This makes sure that this is respected no matter the architecture.
+    char optional_padding[kPageMetadataSize - sizeof(uint8_t) - sizeof(bool)];
+  };
+
+  // The first PartitionPage of the slot span holds its metadata. This offset
+  // tells how many pages in from that first page we are.
+  // For direct maps, the first page metadata (that isn't super page extent
+  // entry) uses this field to tell how many pages to the right the direct map
+  // metadata starts.
+  //
+  // 6 bits is enough to represent all possible offsets, given that the smallest
+  // partition page is 16kiB and the offset won't exceed 1MiB.
+  static constexpr uint16_t kMaxSlotSpanMetadataBits = 6;
+  static constexpr uint16_t kMaxSlotSpanMetadataOffset =
+      (1 << kMaxSlotSpanMetadataBits) - 1;
+  uint8_t slot_span_metadata_offset : kMaxSlotSpanMetadataBits;
+
+  // |is_valid| tells whether the page is part of a slot span. If |false|,
+  // |has_valid_span_after_this| tells whether it's an unused region in between
+  // slot spans within the super page.
+  // Note, |is_valid| has been added for clarity, but if we ever need to save
+  // this bit, it can be inferred from:
+  //   |!slot_span_metadata_offset && slot_span_metadata->bucket|.
+  bool is_valid : 1;
+  bool has_valid_span_after_this : 1;
+  uint8_t unused;
+
+  PA_ALWAYS_INLINE static PartitionPage* FromAddr(uintptr_t address);
+};
+#pragma pack(pop)
+static_assert(sizeof(PartitionPage) == kPageMetadataSize,
+              "PartitionPage must be able to fit in a metadata slot");
+
+// Certain functions rely on PartitionPage being either SlotSpanMetadata or
+// SubsequentPageMetadata, and therefore freely casting between each other.
+static_assert(offsetof(PartitionPage, slot_span_metadata) == 0, "");
+static_assert(offsetof(PartitionPage, subsequent_page_metadata) == 0, "");
+
+PA_ALWAYS_INLINE PartitionPage* PartitionSuperPageToMetadataArea(
+    uintptr_t super_page) {
+  // This can't be just any super page, but it has to be the first super page of
+  // the reservation, as we assume here that the metadata is near its beginning.
+  PA_DCHECK(IsReservationStart(super_page));
+  PA_DCHECK(!(super_page & kSuperPageOffsetMask));
+  // The metadata area is exactly one system page (the guard page) into the
+  // super page.
+  return reinterpret_cast<PartitionPage*>(super_page + SystemPageSize());
+}
+
+PA_ALWAYS_INLINE const SubsequentPageMetadata* GetSubsequentPageMetadata(
+    const PartitionPage* page) {
+  return &(page + 1)->subsequent_page_metadata;
+}
+
+PA_ALWAYS_INLINE SubsequentPageMetadata* GetSubsequentPageMetadata(
+    PartitionPage* page) {
+  return &(page + 1)->subsequent_page_metadata;
+}
+
+PA_ALWAYS_INLINE PartitionSuperPageExtentEntry* PartitionSuperPageToExtent(
+    uintptr_t super_page) {
+  // The very first entry of the metadata is the super page extent entry.
+  return reinterpret_cast<PartitionSuperPageExtentEntry*>(
+      PartitionSuperPageToMetadataArea(super_page));
+}
+
+#if BUILDFLAG(USE_STARSCAN)
+
+// Size that should be reserved for state bitmap (if present) inside a super
+// page. Elements of a super page are partition-page-aligned, hence the returned
+// size is a multiple of partition page size.
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+ReservedStateBitmapSize() {
+  return base::bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize());
+}
+
+// Size that should be committed for state bitmap (if present) inside a super
+// page. It is a multiple of system page size.
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+CommittedStateBitmapSize() {
+  return base::bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize());
+}
+
+// Returns the address/pointer to the state bitmap in the super page. It's the
+// caller's responsibility to ensure that the bitmaps even exist.
+PA_ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
+  PA_DCHECK(!(super_page % kSuperPageAlignment));
+  return super_page + PartitionPageSize() +
+         (IsManagedByNormalBuckets(super_page) ? ReservedFreeSlotBitmapSize()
+                                               : 0);
+}
+
+PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
+    uintptr_t super_page) {
+  return reinterpret_cast<AllocationStateMap*>(
+      SuperPageStateBitmapAddr(super_page));
+}
+
+#else  // BUILDFLAG(USE_STARSCAN)
+
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+ReservedStateBitmapSize() {
+  return 0ull;
+}
+
+#endif  // BUILDFLAG(USE_STARSCAN)
+
+PA_ALWAYS_INLINE uintptr_t
+SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,
+                            bool with_quarantine) {
+  return PartitionPageSize() +
+         (is_managed_by_normal_buckets ? ReservedFreeSlotBitmapSize() : 0) +
+         (with_quarantine ? ReservedStateBitmapSize() : 0);
+}
+
+PA_ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page,
+                                                 bool with_quarantine) {
+  PA_DCHECK(!(super_page % kSuperPageAlignment));
+  return super_page +
+         SuperPagePayloadStartOffset(IsManagedByNormalBuckets(super_page),
+                                     with_quarantine);
+}
+
+PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEndOffset() {
+  return kSuperPageSize - PartitionPageSize();
+}
+
+PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEnd(uintptr_t super_page) {
+  PA_DCHECK(!(super_page % kSuperPageAlignment));
+  return super_page + SuperPagePayloadEndOffset();
+}
+
+PA_ALWAYS_INLINE size_t SuperPagePayloadSize(uintptr_t super_page,
+                                             bool with_quarantine) {
+  return SuperPagePayloadEnd(super_page) -
+         SuperPagePayloadBegin(super_page, with_quarantine);
+}
+
+PA_ALWAYS_INLINE PartitionSuperPageExtentEntry*
+SlotSpanMetadata::ToSuperPageExtent() const {
+  uintptr_t super_page = reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask;
+  return PartitionSuperPageToExtent(super_page);
+}
+
+// Returns whether the pointer lies within the super page's payload area (i.e.
+// area devoted to slot spans). It doesn't check whether it's within a valid
+// slot span. It merely ensures it doesn't fall in a meta-data region that would
+// surely never contain user data.
+PA_ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address,
+                                               bool with_quarantine) {
+  // Quarantine can only be enabled for normal buckets in the current code.
+  PA_DCHECK(!with_quarantine || IsManagedByNormalBuckets(address));
+  uintptr_t super_page = address & kSuperPageBaseMask;
+  uintptr_t payload_start = SuperPagePayloadBegin(super_page, with_quarantine);
+  uintptr_t payload_end = SuperPagePayloadEnd(super_page);
+  return address >= payload_start && address < payload_end;
+}
+
+// Converts from an address inside a super page into a pointer to the
+// PartitionPage object (within super pages's metadata) that describes the
+// partition page where |address| is located. |address| doesn't have to be
+// located within a valid (i.e. allocated) slot span, but must be within the
+// super page's payload area (i.e. area devoted to slot spans).
+//
+// While it is generally valid for |ptr| to be in the middle of an allocation,
+// care has to be taken with direct maps that span multiple super pages. This
+// function's behavior is undefined if |ptr| lies in a subsequent super page.
+PA_ALWAYS_INLINE PartitionPage* PartitionPage::FromAddr(uintptr_t address) {
+  uintptr_t super_page = address & kSuperPageBaseMask;
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  PA_DCHECK(IsReservationStart(super_page));
+  DCheckIsWithInSuperPagePayload(address);
+#endif
+
+  uintptr_t partition_page_index =
+      (address & kSuperPageOffsetMask) >> PartitionPageShift();
+  // Index 0 is invalid because it is the super page extent metadata and the
+  // last index is invalid because the whole PartitionPage is set as guard
+  // pages. This repeats part of the payload PA_DCHECK above, which also checks
+  // for other exclusions.
+  PA_DCHECK(partition_page_index);
+  PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
+  return PartitionSuperPageToMetadataArea(super_page) + partition_page_index;
+}
+
+// Converts from a pointer to the SlotSpanMetadata object (within a super
+// pages's metadata) into a pointer to the beginning of the slot span. This
+// works on direct maps too.
+PA_ALWAYS_INLINE uintptr_t
+SlotSpanMetadata::ToSlotSpanStart(const SlotSpanMetadata* slot_span) {
+  uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(slot_span);
+  uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
+
+  // A valid |page| must be past the first guard System page and within
+  // the following metadata region.
+  PA_DCHECK(super_page_offset > SystemPageSize());
+  // Must be less than total metadata region.
+  PA_DCHECK(super_page_offset <
+            SystemPageSize() +
+                (NumPartitionPagesPerSuperPage() * kPageMetadataSize));
+  uintptr_t partition_page_index =
+      (super_page_offset - SystemPageSize()) >> kPageMetadataShift;
+  // Index 0 is invalid because it is the super page extent metadata and the
+  // last index is invalid because the whole PartitionPage is set as guard
+  // pages.
+  PA_DCHECK(partition_page_index);
+  PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
+  uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
+  return super_page_base + (partition_page_index << PartitionPageShift());
+}
+
+// Converts an address inside a slot span into a pointer to the SlotSpanMetadata
+// object (within super pages's metadata) that describes the slot span
+// containing that slot.
+//
+// CAUTION! For direct-mapped allocation, |address| has to be within the first
+// partition page.
+PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromAddr(
+    uintptr_t address) {
+  auto* page = PartitionPage::FromAddr(address);
+  PA_DCHECK(page->is_valid);
+  // Partition pages in the same slot span share the same SlotSpanMetadata
+  // object (located in the first PartitionPage object of that span). Adjust
+  // for that.
+  page -= page->slot_span_metadata_offset;
+  PA_DCHECK(page->is_valid);
+  PA_DCHECK(!page->slot_span_metadata_offset);
+  auto* slot_span = &page->slot_span_metadata;
+  // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
+  DCheckIsValidSlotSpan(slot_span);
+  // For direct map, if |address| doesn't point within the first partition page,
+  // |slot_span_metadata_offset| will be 0, |page| won't get shifted, leaving
+  // |slot_size| at 0.
+  PA_DCHECK(slot_span->bucket->slot_size);
+  return slot_span;
+}
+
+// Like |FromAddr|, but asserts that |slot_start| indeed points to the
+// beginning of a slot. It doesn't check if the slot is actually allocated.
+//
+// This works on direct maps too.
+PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromSlotStart(
+    uintptr_t slot_start) {
+  auto* slot_span = FromAddr(slot_start);
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // Checks that the pointer is a multiple of slot size.
+  uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
+  PA_DCHECK(!((slot_start - slot_span_start) % slot_span->bucket->slot_size));
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+  return slot_span;
+}
+
+// Like |FromAddr|, but asserts that |object| indeed points to the beginning of
+// an object. It doesn't check if the object is actually allocated.
+//
+// This works on direct maps too.
+PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObject(void* object) {
+  uintptr_t object_addr = ObjectPtr2Addr(object);
+  auto* slot_span = FromAddr(object_addr);
+  DCheckIsValidObjectAddress(slot_span, object_addr);
+  return slot_span;
+}
+
+// Like |FromAddr|, but asserts that |address| indeed points within an object.
+// It doesn't check if the object is actually allocated.
+//
+// CAUTION! For direct-mapped allocation, |address| has to be within the first
+// partition page.
+PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObjectInnerAddr(
+    uintptr_t address) {
+  auto* slot_span = FromAddr(address);
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // Checks that the address is within the expected object boundaries.
+  uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
+  uintptr_t shift_from_slot_start =
+      (address - slot_span_start) % slot_span->bucket->slot_size;
+  DCheckIsValidShiftFromSlotStart(slot_span, shift_from_slot_start);
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+  return slot_span;
+}
+
+PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObjectInnerPtr(
+    void* ptr) {
+  return FromObjectInnerAddr(ObjectInnerPtr2Addr(ptr));
+}
+
+PA_ALWAYS_INLINE void SlotSpanMetadata::SetRawSize(size_t raw_size) {
+  PA_DCHECK(CanStoreRawSize());
+  auto* subsequent_page_metadata =
+      GetSubsequentPageMetadata(reinterpret_cast<PartitionPage*>(this));
+  subsequent_page_metadata->raw_size = raw_size;
+}
+
+PA_ALWAYS_INLINE size_t SlotSpanMetadata::GetRawSize() const {
+  PA_DCHECK(CanStoreRawSize());
+  const auto* subsequent_page_metadata =
+      GetSubsequentPageMetadata(reinterpret_cast<const PartitionPage*>(this));
+  return subsequent_page_metadata->raw_size;
+}
+
+PA_ALWAYS_INLINE void SlotSpanMetadata::SetFreelistHead(
+    EncodedNextFreelistEntry* new_head) {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // |this| is in the metadata region, hence isn't MTE-tagged. Untag |new_head|
+  // as well.
+  uintptr_t new_head_untagged = UntagPtr(new_head);
+  PA_DCHECK(!new_head ||
+            (reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
+                (new_head_untagged & kSuperPageBaseMask));
+#endif
+  freelist_head = new_head;
+  // Inserted something new in the freelist, assume that it is not sorted
+  // anymore.
+  freelist_is_sorted_ = false;
+}
+
+PA_ALWAYS_INLINE EncodedNextFreelistEntry* SlotSpanMetadata::PopForAlloc(
+    size_t size) {
+  // Not using bucket->slot_size directly as the compiler doesn't know that
+  // |bucket->slot_size| is the same as |size|.
+  PA_DCHECK(size == bucket->slot_size);
+  EncodedNextFreelistEntry* result = freelist_head;
+  // Not setting freelist_is_sorted_ to false since this doesn't destroy
+  // ordering.
+  freelist_head = freelist_head->GetNext(size);
+  num_allocated_slots++;
+  return result;
+}
+
+PA_ALWAYS_INLINE void SlotSpanMetadata::Free(uintptr_t slot_start,
+                                             PartitionRoot* root)
+    // PartitionRootLock() is not defined inside partition_page.h, but
+    // static analysis doesn't require the implementation.
+    PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)) {
+  DCheckRootLockIsAcquired(root);
+  auto* entry = static_cast<internal::EncodedNextFreelistEntry*>(
+      SlotStartAddr2Ptr(slot_start));
+  // Catches an immediate double free.
+  PA_CHECK(entry != freelist_head);
+  // Look for double free one level deeper in debug.
+  PA_DCHECK(!freelist_head ||
+            entry != freelist_head->GetNext(bucket->slot_size));
+  entry->SetNext(freelist_head);
+  SetFreelistHead(entry);
+  // A best effort double-free check. Works only on empty slot spans.
+  PA_CHECK(num_allocated_slots);
+  --num_allocated_slots;
+  // If the span is marked full, or became empty, take the slow path to update
+  // internal state.
+  if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
+    FreeSlowPath(1);
+  } else {
+    // All single-slot allocations must go through the slow path to
+    // correctly update the raw size.
+    PA_DCHECK(!CanStoreRawSize());
+  }
+}
+
+PA_ALWAYS_INLINE void SlotSpanMetadata::AppendFreeList(
+    EncodedNextFreelistEntry* head,
+    EncodedNextFreelistEntry* tail,
+    size_t number_of_freed,
+    PartitionRoot* root) PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)) {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  DCheckRootLockIsAcquired(root);
+  PA_DCHECK(!tail->GetNext(bucket->slot_size));
+  PA_DCHECK(number_of_freed);
+  PA_DCHECK(num_allocated_slots);
+  if (CanStoreRawSize()) {
+    PA_DCHECK(number_of_freed == 1);
+  }
+  {
+    size_t number_of_entries = 0;
+    for (auto* entry = head; entry;
+         entry = entry->GetNext(bucket->slot_size), ++number_of_entries) {
+      uintptr_t untagged_entry = UntagPtr(entry);
+      // Check that all entries belong to this slot span.
+      PA_DCHECK(ToSlotSpanStart(this) <= untagged_entry);
+      PA_DCHECK(untagged_entry <
+                ToSlotSpanStart(this) + bucket->get_bytes_per_span());
+    }
+    PA_DCHECK(number_of_entries == number_of_freed);
+  }
+#endif
+
+  tail->SetNext(freelist_head);
+  SetFreelistHead(head);
+  PA_DCHECK(num_allocated_slots >= number_of_freed);
+  num_allocated_slots -= number_of_freed;
+  // If the span is marked full, or became empty, take the slow path to update
+  // internal state.
+  if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
+    FreeSlowPath(number_of_freed);
+  } else {
+    // All single-slot allocations must go through the slow path to
+    // correctly update the raw size.
+    PA_DCHECK(!CanStoreRawSize());
+  }
+}
+
+PA_ALWAYS_INLINE bool SlotSpanMetadata::is_active() const {
+  PA_DCHECK(this != get_sentinel_slot_span());
+  bool ret =
+      (num_allocated_slots > 0 && (freelist_head || num_unprovisioned_slots));
+  if (ret) {
+    PA_DCHECK(!marked_full);
+    PA_DCHECK(num_allocated_slots < bucket->get_slots_per_span());
+  }
+  return ret;
+}
+
+PA_ALWAYS_INLINE bool SlotSpanMetadata::is_full() const {
+  PA_DCHECK(this != get_sentinel_slot_span());
+  bool ret = (num_allocated_slots == bucket->get_slots_per_span());
+  if (ret) {
+    PA_DCHECK(!freelist_head);
+    PA_DCHECK(!num_unprovisioned_slots);
+    // May or may not be marked full, so don't check for that.
+  }
+  return ret;
+}
+
+PA_ALWAYS_INLINE bool SlotSpanMetadata::is_empty() const {
+  PA_DCHECK(this != get_sentinel_slot_span());
+  bool ret = (!num_allocated_slots && freelist_head);
+  if (ret) {
+    PA_DCHECK(!marked_full);
+  }
+  return ret;
+}
+
+PA_ALWAYS_INLINE bool SlotSpanMetadata::is_decommitted() const {
+  PA_DCHECK(this != get_sentinel_slot_span());
+  bool ret = (!num_allocated_slots && !freelist_head);
+  if (ret) {
+    PA_DCHECK(!marked_full);
+    PA_DCHECK(!num_unprovisioned_slots);
+    PA_DCHECK(!in_empty_cache_);
+  }
+  return ret;
+}
+
+PA_ALWAYS_INLINE void SlotSpanMetadata::Reset() {
+  PA_DCHECK(is_decommitted());
+
+  size_t num_slots_per_span = bucket->get_slots_per_span();
+  PA_DCHECK(num_slots_per_span <= kMaxSlotsPerSlotSpan);
+  num_unprovisioned_slots = static_cast<uint32_t>(num_slots_per_span);
+  PA_DCHECK(num_unprovisioned_slots);
+
+  ToSuperPageExtent()->IncrementNumberOfNonemptySlotSpans();
+
+  next_slot_span = nullptr;
+}
+
+#if BUILDFLAG(USE_STARSCAN)
+// Returns the state bitmap from an address within a normal-bucket super page.
+// It's the caller's responsibility to ensure that the bitmap exists.
+PA_ALWAYS_INLINE AllocationStateMap* StateBitmapFromAddr(uintptr_t address) {
+  PA_DCHECK(IsManagedByNormalBuckets(address));
+  uintptr_t super_page = address & kSuperPageBaseMask;
+  return SuperPageStateBitmap(super_page);
+}
+#endif  // BUILDFLAG(USE_STARSCAN)
+
+// Iterates over all slot spans in a super-page. |Callback| must return true if
+// early return is needed.
+template <typename Callback>
+void IterateSlotSpans(uintptr_t super_page,
+                      bool with_quarantine,
+                      Callback callback) {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  PA_DCHECK(!(super_page % kSuperPageAlignment));
+  auto* extent_entry = PartitionSuperPageToExtent(super_page);
+  DCheckRootLockIsAcquired(extent_entry->root);
+#endif
+
+  using Page = PartitionPage;
+  using SlotSpan = SlotSpanMetadata;
+  auto* const first_page =
+      Page::FromAddr(SuperPagePayloadBegin(super_page, with_quarantine));
+  auto* const last_page =
+      Page::FromAddr(SuperPagePayloadEnd(super_page) - PartitionPageSize());
+  Page* page;
+  SlotSpan* slot_span;
+  for (page = first_page; page <= last_page;) {
+    PA_DCHECK(!page->slot_span_metadata_offset);  // Ensure slot span beginning.
+    if (!page->is_valid) {
+      if (page->has_valid_span_after_this) {
+        // The page doesn't represent a valid slot span, but there is another
+        // one somewhere after this. Keep iterating to find it.
+        ++page;
+        continue;
+      }
+      // There are currently no valid spans from here on. No need to iterate
+      // the rest of the super page.
+      break;
+    }
+    slot_span = &page->slot_span_metadata;
+    if (callback(slot_span)) {
+      return;
+    }
+    page += slot_span->bucket->get_pages_per_slot_span();
+  }
+  // Each super page must have at least one valid slot span.
+  PA_DCHECK(page > first_page);
+  // Just a quick check that the search ended at a valid slot span and there
+  // was no unnecessary iteration over gaps afterwards.
+  PA_DCHECK(page == reinterpret_cast<Page*>(slot_span) +
+                        slot_span->bucket->get_pages_per_slot_span());
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_PAGE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h b/base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h
new file mode 100644
index 0000000..b7b1a81
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h
@@ -0,0 +1,33 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_PAGE_CONSTANTS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_PAGE_CONSTANTS_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal {
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE)
+// System page size is not a constant on Apple OSes, but is either 4 or 16kiB
+// (1 << 12 or 1 << 14), as checked in PartitionRoot::Init(). And
+// PartitionPageSize() is 4 times the OS page size.
+static constexpr size_t kMaxSlotsPerSlotSpan = 4 * (1 << 14) / kSmallestBucket;
+#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
+// System page size can be 4, 16, or 64 kiB on Linux on arm64. 64 kiB is
+// currently (kMaxSlotsPerSlotSpanBits == 13) not supported by the code,
+// so we use the 16 kiB maximum (64 kiB will crash).
+static constexpr size_t kMaxSlotsPerSlotSpan = 4 * (1 << 14) / kSmallestBucket;
+#else
+// A slot span can "span" multiple PartitionPages, but then its slot size is
+// larger, so it doesn't have as many slots.
+static constexpr size_t kMaxSlotsPerSlotSpan =
+    PartitionPageSize() / kSmallestBucket;
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE)
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_PAGE_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_ref_count.h b/base/allocator/partition_allocator/src/partition_alloc/partition_ref_count.h
new file mode 100644
index 0000000..0db9a26
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_ref_count.h
@@ -0,0 +1,498 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_REF_COUNT_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_REF_COUNT_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_MAC)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.h"
+#endif  // BUILDFLAG(IS_MAC)
+
+namespace partition_alloc::internal {
+
+// Aligns up (on 8B boundary) and returns `ref_count_size` if needed.
+// *  Known to be needed on MacOS 13: https://crbug.com/1378822.
+// *  Thought to be needed on MacOS 14: https://crbug.com/1457756.
+// *  No-op everywhere else.
+//
+// Placed outside `BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)`
+// intentionally to accommodate usage in contexts also outside
+// this gating.
+PA_ALWAYS_INLINE size_t AlignUpRefCountSizeForMac(size_t ref_count_size) {
+#if BUILDFLAG(IS_MAC)
+  if (internal::base::mac::MacOSMajorVersion() == 13 ||
+      internal::base::mac::MacOSMajorVersion() == 14) {
+    return internal::base::bits::AlignUp(ref_count_size, 8);
+  }
+#endif  // BUILDFLAG(IS_MAC)
+  return ref_count_size;
+}
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+// Special-purpose atomic reference count class used by RawPtrBackupRefImpl.
+// The least significant bit of the count is reserved for tracking the liveness
+// state of an allocation: it's set when the allocation is created and cleared
+// on free(). So the count can be:
+//
+// 1 for an allocation that is just returned from Alloc()
+// 2 * k + 1 for a "live" allocation with k references
+// 2 * k for an allocation with k dangling references after Free()
+//
+// This protects against double-free's, as we check whether the reference count
+// is odd in |ReleaseFromAllocator()|, and if not we have a double-free.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
+ public:
+  // This class holds an atomic bit field: `count_`. It holds up to 5 values:
+  //
+  // bits   name                   description
+  // -----  ---------------------  ----------------------------------------
+  // 0      is_allocated           Whether or not the memory is held by the
+  //                               allocator.
+  //                               - 1 at construction time.
+  //                               - Decreased in ReleaseFromAllocator();
+  //
+  // 1-31   ptr_count              Number of raw_ptr<T>.
+  //                               - Increased in Acquire()
+  //                               - Decreased in Release()
+  //
+  // 32     dangling_detected      A dangling raw_ptr<> has been detected.
+  // 33     needs_mac11_malloc_    Whether malloc_size() return value needs to
+  //          size_hack            be adjusted for this allocation.
+  //
+  // 34-63  unprotected_ptr_count  Number of
+  //                               raw_ptr<T, DisableDanglingPtrDetection>
+  //                               - Increased in AcquireFromUnprotectedPtr().
+  //                               - Decreased in ReleaseFromUnprotectedPtr().
+  //
+  // The allocation is reclaimed if all of:
+  // - |is_allocated|
+  // - |ptr_count|
+  // - |unprotected_ptr_count|
+  // are zero.
+  //
+  // During ReleaseFromAllocator(), if |ptr_count| is not zero,
+  // |dangling_detected| is set and the error is reported via
+  // DanglingRawPtrDetected(id). The matching DanglingRawPtrReleased(id) will be
+  // called when the last raw_ptr<> is released.
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+  using CountType = uint64_t;
+  static constexpr CountType kMemoryHeldByAllocatorBit = 0x0000'0000'0000'0001;
+  static constexpr CountType kPtrCountMask = 0x0000'0000'FFFF'FFFE;
+  static constexpr CountType kUnprotectedPtrCountMask = 0xFFFF'FFFC'0000'0000;
+  static constexpr CountType kDanglingRawPtrDetectedBit = 0x0000'0001'0000'0000;
+  static constexpr CountType kNeedsMac11MallocSizeHackBit =
+      0x0000'0002'0000'0000;
+
+  static constexpr CountType kPtrInc = 0x0000'0000'0000'0002;
+  static constexpr CountType kUnprotectedPtrInc = 0x0000'0004'0000'0000;
+#else
+  using CountType = uint32_t;
+  static constexpr CountType kMemoryHeldByAllocatorBit = 0x0000'0001;
+
+  static constexpr CountType kPtrCountMask = 0x7FFF'FFFE;
+  static constexpr CountType kUnprotectedPtrCountMask = 0x0000'0000;
+  static constexpr CountType kDanglingRawPtrDetectedBit = 0x0000'0000;
+  static constexpr CountType kNeedsMac11MallocSizeHackBit = 0x8000'0000;
+
+  static constexpr CountType kPtrInc = 0x0000'0002;
+#endif
+
+  PA_ALWAYS_INLINE explicit PartitionRefCount(
+      bool needs_mac11_malloc_size_hack);
+
+  // Incrementing the counter doesn't imply any visibility about modified
+  // memory, hence relaxed atomics. For decrement, visibility is required before
+  // the memory gets freed, necessitating an acquire/release barrier before
+  // freeing the memory.
+  //
+  // For details, see base::AtomicRefCount, which has the same constraints and
+  // characteristics.
+  //
+  // FYI: The assembly produced by the compiler on every platform, in particular
+  // the uint64_t fetch_add on 32bit CPU.
+  // https://docs.google.com/document/d/1cSTVDVEE-8l2dXLPcfyN75r6ihMbeiSp1ncL9ae3RZE
+  PA_ALWAYS_INLINE void Acquire() {
+    CheckCookieIfSupported();
+
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
+    constexpr CountType kInc = kUnprotectedPtrInc;
+    constexpr CountType kMask = kUnprotectedPtrCountMask;
+#else
+    constexpr CountType kInc = kPtrInc;
+    constexpr CountType kMask = kPtrCountMask;
+#endif
+    CountType old_count = count_.fetch_add(kInc, std::memory_order_relaxed);
+    // Check overflow.
+    PA_CHECK((old_count & kMask) != kMask);
+  }
+
+  // Similar to |Acquire()|, but for raw_ptr<T, DisableDanglingPtrDetection>
+  // instead of raw_ptr<T>.
+  PA_ALWAYS_INLINE void AcquireFromUnprotectedPtr() {
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+    CheckCookieIfSupported();
+    CountType old_count =
+        count_.fetch_add(kUnprotectedPtrInc, std::memory_order_relaxed);
+    // Check overflow.
+    PA_CHECK((old_count & kUnprotectedPtrCountMask) !=
+             kUnprotectedPtrCountMask);
+#else
+    Acquire();
+#endif
+  }
+
+  // Returns true if the allocation should be reclaimed.
+  PA_ALWAYS_INLINE bool Release() {
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
+    constexpr CountType kInc = kUnprotectedPtrInc;
+    constexpr CountType kMask = kUnprotectedPtrCountMask;
+#else
+    constexpr CountType kInc = kPtrInc;
+    constexpr CountType kMask = kPtrCountMask;
+#endif
+    CheckCookieIfSupported();
+
+    CountType old_count = count_.fetch_sub(kInc, std::memory_order_release);
+    // Check underflow.
+    PA_DCHECK(old_count & kMask);
+
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+    // If a dangling raw_ptr<> was detected, report it.
+    if (PA_UNLIKELY((old_count & kDanglingRawPtrDetectedBit) ==
+                    kDanglingRawPtrDetectedBit)) {
+      partition_alloc::internal::DanglingRawPtrReleased(
+          reinterpret_cast<uintptr_t>(this));
+    }
+#endif
+
+    return ReleaseCommon(old_count - kInc);
+  }
+
+  // Similar to |Release()|, but for raw_ptr<T, DisableDanglingPtrDetection>
+  // instead of raw_ptr<T>.
+  PA_ALWAYS_INLINE bool ReleaseFromUnprotectedPtr() {
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+    CheckCookieIfSupported();
+
+    CountType old_count =
+        count_.fetch_sub(kUnprotectedPtrInc, std::memory_order_release);
+    // Check underflow.
+    PA_DCHECK(old_count & kUnprotectedPtrCountMask);
+
+    return ReleaseCommon(old_count - kUnprotectedPtrInc);
+#else
+    return Release();
+#endif
+  }
+
+  // Returns true if the allocation should be reclaimed.
+  // This function should be called by the allocator during Free().
+  PA_ALWAYS_INLINE bool ReleaseFromAllocator() {
+    CheckCookieIfSupported();
+
+    // TODO(bartekn): Make the double-free check more effective. Once freed, the
+    // ref-count is overwritten by an encoded freelist-next pointer.
+    CountType old_count =
+        count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
+
+    if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
+      DoubleFreeOrCorruptionDetected(old_count);
+    }
+
+    // Release memory when no raw_ptr<> exists anymore:
+    static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
+    if (PA_LIKELY((old_count & mask) == 0)) {
+      std::atomic_thread_fence(std::memory_order_acquire);
+      // The allocation is about to get freed, so clear the cookie.
+      ClearCookieIfSupported();
+      return true;
+    }
+
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+    // There are some dangling raw_ptr<>. Turn on the error flag if it exists
+    // some which have not opted-out of being checked against being dangling:
+    if (PA_UNLIKELY(old_count & kPtrCountMask)) {
+      count_.fetch_or(kDanglingRawPtrDetectedBit, std::memory_order_relaxed);
+      partition_alloc::internal::DanglingRawPtrDetected(
+          reinterpret_cast<uintptr_t>(this));
+    }
+#endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+    return false;
+  }
+
+  // "IsAlive" means is allocated and not freed. "KnownRefs" refers to
+  // raw_ptr<T> references. There may be other references from raw pointers or
+  // unique_ptr, but we have no way of tracking them, so we hope for the best.
+  // To summarize, the function returns whether we believe the allocation can be
+  // safely freed.
+  PA_ALWAYS_INLINE bool IsAliveWithNoKnownRefs() {
+    CheckCookieIfSupported();
+    static constexpr CountType mask =
+        kMemoryHeldByAllocatorBit | kPtrCountMask | kUnprotectedPtrCountMask;
+    return (count_.load(std::memory_order_acquire) & mask) ==
+           kMemoryHeldByAllocatorBit;
+  }
+
+  PA_ALWAYS_INLINE bool IsAlive() {
+    bool alive =
+        count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit;
+    if (alive) {
+      CheckCookieIfSupported();
+    }
+    return alive;
+  }
+
+  // Called when a raw_ptr is not banning dangling ptrs, but the user still
+  // wants to ensure the pointer is not currently dangling. This is currently
+  // used in UnretainedWrapper to make sure callbacks are not invoked with
+  // dangling pointers. If such a raw_ptr exists but the allocation is no longer
+  // alive, then we have a dangling pointer to a dead object.
+  PA_ALWAYS_INLINE void ReportIfDangling() {
+    if (!IsAlive()) {
+      partition_alloc::internal::UnretainedDanglingRawPtrDetected(
+          reinterpret_cast<uintptr_t>(this));
+    }
+  }
+
+  // GWP-ASan slots are assigned an extra reference (note `kPtrInc` below) to
+  // make sure the `raw_ptr<T>` release operation will never attempt to call the
+  // PA `free` on such a slot. GWP-ASan takes the extra reference into account
+  // when determining whether the slot can be reused.
+  PA_ALWAYS_INLINE void InitalizeForGwpAsan() {
+#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
+    brp_cookie_ = CalculateCookie();
+#endif
+    count_.store(kPtrInc | kMemoryHeldByAllocatorBit,
+                 std::memory_order_release);
+  }
+
+  PA_ALWAYS_INLINE bool CanBeReusedByGwpAsan() {
+    static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
+    return (count_.load(std::memory_order_acquire) & mask) == kPtrInc;
+  }
+
+  bool NeedsMac11MallocSizeHack() {
+    return count_.load(std::memory_order_relaxed) &
+           kNeedsMac11MallocSizeHackBit;
+  }
+
+#if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
+  PA_ALWAYS_INLINE void SetRequestedSize(size_t size) {
+    requested_size_ = static_cast<uint32_t>(size);
+  }
+  PA_ALWAYS_INLINE uint32_t requested_size() const { return requested_size_; }
+#endif  // PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
+
+ private:
+  // The common parts shared by Release() and ReleaseFromUnprotectedPtr().
+  // Called after updating the ref counts, |count| is the new value of |count_|
+  // set by fetch_sub. Returns true if memory can be reclaimed.
+  PA_ALWAYS_INLINE bool ReleaseCommon(CountType count) {
+    // Do not release memory, if it is still held by any of:
+    // - The allocator
+    // - A raw_ptr<T>
+    // - A raw_ptr<T, DisableDanglingPtrDetection>
+    //
+    // Assuming this raw_ptr is not dangling, the memory must still be held at
+    // least by the allocator, so this is PA_LIKELY true.
+    if (PA_LIKELY((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
+                            kUnprotectedPtrCountMask)))) {
+      return false;  // Do not release the memory.
+    }
+
+    // In most thread-safe reference count implementations, an acquire
+    // barrier is required so that all changes made to an object from other
+    // threads are visible to its destructor. In our case, the destructor
+    // finishes before the final `Release` call, so it shouldn't be a problem.
+    // However, we will keep it as a precautionary measure.
+    std::atomic_thread_fence(std::memory_order_acquire);
+
+    // The allocation is about to get freed, so clear the cookie.
+    ClearCookieIfSupported();
+    return true;
+  }
+
+  // The cookie helps us ensure that:
+  // 1) The reference count pointer calculation is correct.
+  // 2) The returned allocation slot is not freed.
+  PA_ALWAYS_INLINE void CheckCookieIfSupported() {
+#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
+    PA_CHECK(brp_cookie_ == CalculateCookie());
+#endif
+  }
+
+  PA_ALWAYS_INLINE void ClearCookieIfSupported() {
+#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
+    brp_cookie_ = 0;
+#endif
+  }
+
+#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
+  PA_ALWAYS_INLINE uint32_t CalculateCookie() {
+    return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)) ^
+           kCookieSalt;
+  }
+#endif  // PA_CONFIG(REF_COUNT_CHECK_COOKIE)
+
+  [[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void
+  DoubleFreeOrCorruptionDetected(CountType count) {
+    PA_DEBUG_DATA_ON_STACK("refcount", count);
+    PA_NO_CODE_FOLDING();
+    PA_IMMEDIATE_CRASH();
+  }
+
+  // Note that in free slots, this is overwritten by encoded freelist
+  // pointer(s). The way the pointers are encoded on 64-bit little-endian
+  // architectures, count_ happens stay even, which works well with the
+  // double-free-detection in ReleaseFromAllocator(). Don't change the layout of
+  // this class, to preserve this functionality.
+  std::atomic<CountType> count_;
+
+#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
+  static constexpr uint32_t kCookieSalt = 0xc01dbeef;
+  volatile uint32_t brp_cookie_;
+#endif
+
+#if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
+  uint32_t requested_size_;
+#endif
+};
+
+PA_ALWAYS_INLINE PartitionRefCount::PartitionRefCount(
+    bool needs_mac11_malloc_size_hack)
+    : count_(kMemoryHeldByAllocatorBit |
+             (needs_mac11_malloc_size_hack ? kNeedsMac11MallocSizeHackBit : 0))
+#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
+      ,
+      brp_cookie_(CalculateCookie())
+#endif
+{
+}
+
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+
+static_assert(kAlignment % alignof(PartitionRefCount) == 0,
+              "kAlignment must be multiples of alignof(PartitionRefCount).");
+
+// Allocate extra space for the reference count to satisfy the alignment
+// requirement.
+static constexpr size_t kInSlotRefCountBufferSize = sizeof(PartitionRefCount);
+constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
+constexpr size_t kPartitionPastAllocationAdjustment = 0;
+
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+
+#if PA_CONFIG(REF_COUNT_CHECK_COOKIE) || \
+    PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
+static constexpr size_t kPartitionRefCountSizeShift = 4;
+#else
+static constexpr size_t kPartitionRefCountSizeShift = 3;
+#endif
+
+#else  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+
+#if PA_CONFIG(REF_COUNT_CHECK_COOKIE) && \
+    PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
+static constexpr size_t kPartitionRefCountSizeShift = 4;
+#elif PA_CONFIG(REF_COUNT_CHECK_COOKIE) || \
+    PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
+static constexpr size_t kPartitionRefCountSizeShift = 3;
+#else
+static constexpr size_t kPartitionRefCountSizeShift = 2;
+#endif
+
+#endif  // PA_CONFIG(REF_COUNT_CHECK_COOKIE)
+static_assert((1 << kPartitionRefCountSizeShift) == sizeof(PartitionRefCount));
+
+// We need one PartitionRefCount for each system page in a super page. They take
+// `x = sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize())` space.
+// They need to fit into a system page of metadata as sparsely as possible to
+// minimize cache line sharing, hence we calculate a multiplier as
+// `SystemPageSize() / x`.
+//
+// The multiplier is expressed as a bitshift to optimize the code generation.
+// SystemPageSize() isn't always a constrexpr, in which case the compiler
+// wouldn't know it's a power of two. The equivalence of these calculations is
+// checked in PartitionAllocGlobalInit().
+PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+GetPartitionRefCountIndexMultiplierShift() {
+  return SystemPageShift() * 2 - kSuperPageShift - kPartitionRefCountSizeShift;
+}
+
+PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
+    uintptr_t slot_start) {
+  if (PA_LIKELY(slot_start & SystemPageOffsetMask())) {
+    uintptr_t refcount_address = slot_start - sizeof(PartitionRefCount);
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+    PA_CHECK(refcount_address % alignof(PartitionRefCount) == 0);
+#endif
+    // No need to tag because the ref count is not protected by MTE.
+    return reinterpret_cast<PartitionRefCount*>(refcount_address);
+  } else {
+    // No need to tag, as the metadata region isn't protected by MTE.
+    PartitionRefCount* bitmap_base = reinterpret_cast<PartitionRefCount*>(
+        (slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
+    size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
+                   << GetPartitionRefCountIndexMultiplierShift();
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+    PA_CHECK(sizeof(PartitionRefCount) * index <= SystemPageSize());
+#endif
+    return bitmap_base + index;
+  }
+}
+
+#else  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+
+// Allocate extra space for the reference count to satisfy the alignment
+// requirement.
+static constexpr size_t kInSlotRefCountBufferSize = kAlignment;
+constexpr size_t kPartitionRefCountOffsetAdjustment = kInSlotRefCountBufferSize;
+
+// This is for adjustment of pointers right past the allocation, which may point
+// to the next slot. First subtract 1 to bring them to the intended slot, and
+// only then we'll be able to find ref-count in that slot.
+constexpr size_t kPartitionPastAllocationAdjustment = 1;
+
+PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
+    uintptr_t slot_start) {
+  // Have to MTE-tag, because the address is untagged, but lies within a slot
+  // area, which is protected by MTE.
+  return static_cast<PartitionRefCount*>(TagAddr(slot_start));
+}
+
+#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+
+static_assert(sizeof(PartitionRefCount) <= kInSlotRefCountBufferSize,
+              "PartitionRefCount should fit into the in-slot buffer.");
+
+#else  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+static constexpr size_t kInSlotRefCountBufferSize = 0;
+constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
+
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+constexpr size_t kPartitionRefCountSizeAdjustment = kInSlotRefCountBufferSize;
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_REF_COUNT_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_root.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_root.cc
new file mode 100644
index 0000000..d7ab6b3
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_root.cc
@@ -0,0 +1,1719 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_cookie.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_ref_count.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_MAC)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.h"
+#endif
+
+#if BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#endif
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_bitmap.h"
+#endif
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#include "wow64apiset.h"
+#endif
+
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#include <pthread.h>
+#endif
+
+namespace partition_alloc::internal {
+
+#if BUILDFLAG(RECORD_ALLOC_INFO)
+// Even if this is not hidden behind a BUILDFLAG, it should not use any memory
+// when recording is disabled, since it ends up in the .bss section.
+AllocInfo g_allocs = {};
+
+void RecordAllocOrFree(uintptr_t addr, size_t size) {
+  g_allocs.allocs[g_allocs.index.fetch_add(1, std::memory_order_relaxed) %
+                  kAllocInfoSize] = {addr, size};
+}
+#endif  // BUILDFLAG(RECORD_ALLOC_INFO)
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address,
+                                       uintptr_t test_address,
+                                       size_t type_size) {
+  // Required for pointers right past an allocation. See
+  // |PartitionAllocGetSlotStartInBRPPool()|.
+  uintptr_t adjusted_address =
+      orig_address - kPartitionPastAllocationAdjustment;
+  PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(adjusted_address));
+  DCheckIfManagedByPartitionAllocBRPPool(adjusted_address);
+
+  uintptr_t slot_start = PartitionAllocGetSlotStartInBRPPool(adjusted_address);
+  // Don't use |adjusted_address| beyond this point at all. It was needed to
+  // pick the right slot, but now we're dealing with very concrete addresses.
+  // Zero it just in case, to catch errors.
+  adjusted_address = 0;
+
+  auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
+  auto* root = PartitionRoot::FromSlotSpan(slot_span);
+  // Double check that ref-count is indeed present.
+  PA_DCHECK(root->brp_enabled());
+
+  uintptr_t object_addr = root->SlotStartToObjectAddr(slot_start);
+  uintptr_t object_end = object_addr + root->GetSlotUsableSize(slot_span);
+  if (test_address < object_addr || object_end < test_address) {
+    return PtrPosWithinAlloc::kFarOOB;
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+  } else if (object_end - type_size < test_address) {
+    // Not even a single element of the type referenced by the pointer can fit
+    // between the pointer and the end of the object.
+    return PtrPosWithinAlloc::kAllocEnd;
+#endif
+  } else {
+    return PtrPosWithinAlloc::kInBounds;
+  }
+}
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+}  // namespace partition_alloc::internal
+
+namespace partition_alloc {
+
+#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
+
+namespace {
+internal::Lock g_root_enumerator_lock;
+}
+
+internal::Lock& PartitionRoot::GetEnumeratorLock() {
+  return g_root_enumerator_lock;
+}
+
+namespace internal {
+
+class PartitionRootEnumerator {
+ public:
+  using EnumerateCallback = void (*)(PartitionRoot* root, bool in_child);
+  enum EnumerateOrder {
+    kNormal,
+    kReverse,
+  };
+
+  static PartitionRootEnumerator& Instance() {
+    static PartitionRootEnumerator instance;
+    return instance;
+  }
+
+  void Enumerate(EnumerateCallback callback,
+                 bool in_child,
+                 EnumerateOrder order) PA_NO_THREAD_SAFETY_ANALYSIS {
+    if (order == kNormal) {
+      PartitionRoot* root;
+      for (root = Head(partition_roots_); root != nullptr;
+           root = root->next_root) {
+        callback(root, in_child);
+      }
+    } else {
+      PA_DCHECK(order == kReverse);
+      PartitionRoot* root;
+      for (root = Tail(partition_roots_); root != nullptr;
+           root = root->prev_root) {
+        callback(root, in_child);
+      }
+    }
+  }
+
+  void Register(PartitionRoot* root) {
+    internal::ScopedGuard guard(PartitionRoot::GetEnumeratorLock());
+    root->next_root = partition_roots_;
+    root->prev_root = nullptr;
+    if (partition_roots_) {
+      partition_roots_->prev_root = root;
+    }
+    partition_roots_ = root;
+  }
+
+  void Unregister(PartitionRoot* root) {
+    internal::ScopedGuard guard(PartitionRoot::GetEnumeratorLock());
+    PartitionRoot* prev = root->prev_root;
+    PartitionRoot* next = root->next_root;
+    if (prev) {
+      PA_DCHECK(prev->next_root == root);
+      prev->next_root = next;
+    } else {
+      PA_DCHECK(partition_roots_ == root);
+      partition_roots_ = next;
+    }
+    if (next) {
+      PA_DCHECK(next->prev_root == root);
+      next->prev_root = prev;
+    }
+    root->next_root = nullptr;
+    root->prev_root = nullptr;
+  }
+
+ private:
+  constexpr PartitionRootEnumerator() = default;
+
+  PartitionRoot* Head(PartitionRoot* roots) { return roots; }
+
+  PartitionRoot* Tail(PartitionRoot* roots) PA_NO_THREAD_SAFETY_ANALYSIS {
+    if (!roots) {
+      return nullptr;
+    }
+    PartitionRoot* node = roots;
+    for (; node->next_root != nullptr; node = node->next_root)
+      ;
+    return node;
+  }
+
+  PartitionRoot* partition_roots_
+      PA_GUARDED_BY(PartitionRoot::GetEnumeratorLock()) = nullptr;
+};
+
+}  // namespace internal
+
+#endif  // PA_USE_PARTITION_ROOT_ENUMERATOR
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+namespace {
+
+#if PA_CONFIG(HAS_ATFORK_HANDLER)
+
+void LockRoot(PartitionRoot* root, bool) PA_NO_THREAD_SAFETY_ANALYSIS {
+  PA_DCHECK(root);
+  internal::PartitionRootLock(root).Acquire();
+}
+
+// PA_NO_THREAD_SAFETY_ANALYSIS: acquires the lock and doesn't release it, by
+// design.
+void BeforeForkInParent() PA_NO_THREAD_SAFETY_ANALYSIS {
+  // PartitionRoot::GetLock() is private. So use
+  // g_root_enumerator_lock here.
+  g_root_enumerator_lock.Acquire();
+  internal::PartitionRootEnumerator::Instance().Enumerate(
+      LockRoot, false,
+      internal::PartitionRootEnumerator::EnumerateOrder::kNormal);
+
+  ThreadCacheRegistry::GetLock().Acquire();
+}
+
+template <typename T>
+void UnlockOrReinit(T& lock, bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {
+  // Only re-init the locks in the child process, in the parent can unlock
+  // normally.
+  if (in_child) {
+    lock.Reinit();
+  } else {
+    lock.Release();
+  }
+}
+
+void UnlockOrReinitRoot(PartitionRoot* root,
+                        bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {
+  UnlockOrReinit(internal::PartitionRootLock(root), in_child);
+}
+
+void ReleaseLocks(bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {
+  // In reverse order, even though there are no lock ordering dependencies.
+  UnlockOrReinit(ThreadCacheRegistry::GetLock(), in_child);
+  internal::PartitionRootEnumerator::Instance().Enumerate(
+      UnlockOrReinitRoot, in_child,
+      internal::PartitionRootEnumerator::EnumerateOrder::kReverse);
+
+  // PartitionRoot::GetLock() is private. So use
+  // g_root_enumerator_lock here.
+  UnlockOrReinit(g_root_enumerator_lock, in_child);
+}
+
+void AfterForkInParent() {
+  ReleaseLocks(/* in_child = */ false);
+}
+
+void AfterForkInChild() {
+  ReleaseLocks(/* in_child = */ true);
+  // Unsafe, as noted in the name. This is fine here however, since at this
+  // point there is only one thread, this one (unless another post-fork()
+  // handler created a thread, but it would have needed to allocate, which would
+  // have deadlocked the process already).
+  //
+  // If we don't reclaim this memory, it is lost forever. Note that this is only
+  // really an issue if we fork() a multi-threaded process without calling
+  // exec() right away, which is discouraged.
+  ThreadCacheRegistry::Instance().ForcePurgeAllThreadAfterForkUnsafe();
+}
+#endif  // PA_CONFIG(HAS_ATFORK_HANDLER)
+
+std::atomic<bool> g_global_init_called;
+void PartitionAllocMallocInitOnce() {
+  bool expected = false;
+  // No need to block execution for potential concurrent initialization, merely
+  // want to make sure this is only called once.
+  if (!g_global_init_called.compare_exchange_strong(expected, true)) {
+    return;
+  }
+
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+  // When fork() is called, only the current thread continues to execute in the
+  // child process. If the lock is held, but *not* by this thread when fork() is
+  // called, we have a deadlock.
+  //
+  // The "solution" here is to acquire the lock on the forking thread before
+  // fork(), and keep it held until fork() is done, in the parent and the
+  // child. To clean up memory, we also must empty the thread caches in the
+  // child, which is easier, since no threads except for the current one are
+  // running right after the fork().
+  //
+  // This is not perfect though, since:
+  // - Multiple pre/post-fork() handlers can be registered, they are then run in
+  //   LIFO order for the pre-fork handler, and FIFO order for the post-fork
+  //   one. So unless we are the first to register a handler, if another handler
+  //   allocates, then we deterministically deadlock.
+  // - pthread handlers are *not* called when the application calls clone()
+  //   directly, which is what Chrome does to launch processes.
+  //
+  // However, no perfect solution really exists to make threads + fork()
+  // cooperate, but deadlocks are real (and fork() is used in DEATH_TEST()s),
+  // and other malloc() implementations use the same techniques.
+  int err =
+      pthread_atfork(BeforeForkInParent, AfterForkInParent, AfterForkInChild);
+  PA_CHECK(err == 0);
+#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+}
+
+}  // namespace
+
+#if BUILDFLAG(IS_APPLE)
+void PartitionAllocMallocHookOnBeforeForkInParent() {
+  BeforeForkInParent();
+}
+
+void PartitionAllocMallocHookOnAfterForkInParent() {
+  AfterForkInParent();
+}
+
+void PartitionAllocMallocHookOnAfterForkInChild() {
+  AfterForkInChild();
+}
+#endif  // BUILDFLAG(IS_APPLE)
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+namespace internal {
+
+namespace {
+// 64 was chosen arbitrarily, as it seems like a reasonable trade-off between
+// performance and purging opportunity. Higher value (i.e. smaller slots)
+// wouldn't necessarily increase chances of purging, but would result in
+// more work and larger |slot_usage| array. Lower value would probably decrease
+// chances of purging. Not empirically tested.
+constexpr size_t kMaxPurgeableSlotsPerSystemPage = 64;
+PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+MinPurgeableSlotSize() {
+  return SystemPageSize() / kMaxPurgeableSlotsPerSystemPage;
+}
+}  // namespace
+
+// The function attempts to unprovision unused slots and discard unused pages.
+// It may also "straighten" the free list.
+//
+// If `accounting_only` is set to true, no action is performed and the function
+// merely returns the number of bytes in the would-be discarded pages.
+static size_t PartitionPurgeSlotSpan(PartitionRoot* root,
+                                     internal::SlotSpanMetadata* slot_span,
+                                     bool accounting_only)
+    PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(root)) {
+  const internal::PartitionBucket* bucket = slot_span->bucket;
+  size_t slot_size = bucket->slot_size;
+
+  if (slot_size < MinPurgeableSlotSize() || !slot_span->num_allocated_slots) {
+    return 0;
+  }
+
+  size_t bucket_num_slots = bucket->get_slots_per_span();
+  size_t discardable_bytes = 0;
+
+  if (slot_span->CanStoreRawSize()) {
+    uint32_t utilized_slot_size = static_cast<uint32_t>(
+        RoundUpToSystemPage(slot_span->GetUtilizedSlotSize()));
+    discardable_bytes = bucket->slot_size - utilized_slot_size;
+    if (discardable_bytes && !accounting_only) {
+      uintptr_t slot_span_start =
+          internal::SlotSpanMetadata::ToSlotSpanStart(slot_span);
+      uintptr_t committed_data_end = slot_span_start + utilized_slot_size;
+      ScopedSyscallTimer timer{root};
+      DiscardSystemPages(committed_data_end, discardable_bytes);
+    }
+    return discardable_bytes;
+  }
+
+#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
+  constexpr size_t kMaxSlotCount =
+      (PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan) /
+      MinPurgeableSlotSize();
+#elif BUILDFLAG(IS_APPLE) || (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
+  // It's better for slot_usage to be stack-allocated and fixed-size, which
+  // demands that its size be constexpr. On IS_APPLE and Linux on arm64,
+  // PartitionPageSize() is always SystemPageSize() << 2, so regardless of
+  // what the run time page size is, kMaxSlotCount can always be simplified
+  // to this expression.
+  constexpr size_t kMaxSlotCount =
+      4 * kMaxPurgeableSlotsPerSystemPage *
+      internal::kMaxPartitionPagesPerRegularSlotSpan;
+  PA_CHECK(kMaxSlotCount == (PartitionPageSize() *
+                             internal::kMaxPartitionPagesPerRegularSlotSpan) /
+                                MinPurgeableSlotSize());
+#endif
+  PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
+  PA_DCHECK(slot_span->num_unprovisioned_slots < bucket_num_slots);
+  size_t num_provisioned_slots =
+      bucket_num_slots - slot_span->num_unprovisioned_slots;
+  char slot_usage[kMaxSlotCount];
+#if !BUILDFLAG(IS_WIN)
+  // The last freelist entry should not be discarded when using OS_WIN.
+  // DiscardVirtualMemory makes the contents of discarded memory undefined.
+  size_t last_slot = static_cast<size_t>(-1);
+#endif
+  memset(slot_usage, 1, num_provisioned_slots);
+  uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
+  // First, walk the freelist for this slot span and make a bitmap of which
+  // slots are not in use.
+  for (EncodedNextFreelistEntry* entry = slot_span->get_freelist_head(); entry;
+       entry = entry->GetNext(slot_size)) {
+    size_t slot_number =
+        bucket->GetSlotNumber(SlotStartPtr2Addr(entry) - slot_span_start);
+    PA_DCHECK(slot_number < num_provisioned_slots);
+    slot_usage[slot_number] = 0;
+#if !BUILDFLAG(IS_WIN)
+    // If we have a slot where the encoded next pointer is 0, we can actually
+    // discard that entry because touching a discarded page is guaranteed to
+    // return the original content or 0. (Note that this optimization won't be
+    // effective on big-endian machines because the masking function is
+    // negation.)
+    if (entry->IsEncodedNextPtrZero()) {
+      last_slot = slot_number;
+    }
+#endif
+  }
+
+  // If the slot(s) at the end of the slot span are not in use, we can truncate
+  // them entirely and rewrite the freelist.
+  size_t truncated_slots = 0;
+  while (!slot_usage[num_provisioned_slots - 1]) {
+    truncated_slots++;
+    num_provisioned_slots--;
+    PA_DCHECK(num_provisioned_slots);
+  }
+  // First, do the work of calculating the discardable bytes. Don't actually
+  // discard anything if `accounting_only` is set.
+  size_t unprovisioned_bytes = 0;
+  uintptr_t begin_addr = slot_span_start + (num_provisioned_slots * slot_size);
+  uintptr_t end_addr = begin_addr + (slot_size * truncated_slots);
+  if (truncated_slots) {
+    // The slots that do not contain discarded pages should not be included to
+    // |truncated_slots|. Detects those slots and fixes |truncated_slots| and
+    // |num_provisioned_slots| accordingly.
+    uintptr_t rounded_up_truncatation_begin_addr =
+        RoundUpToSystemPage(begin_addr);
+    while (begin_addr + slot_size <= rounded_up_truncatation_begin_addr) {
+      begin_addr += slot_size;
+      PA_DCHECK(truncated_slots);
+      --truncated_slots;
+      ++num_provisioned_slots;
+    }
+    begin_addr = rounded_up_truncatation_begin_addr;
+
+    // We round the end address here up and not down because we're at the end of
+    // a slot span, so we "own" all the way up the page boundary.
+    end_addr = RoundUpToSystemPage(end_addr);
+    PA_DCHECK(end_addr <= slot_span_start + bucket->get_bytes_per_span());
+    if (begin_addr < end_addr) {
+      unprovisioned_bytes = end_addr - begin_addr;
+      discardable_bytes += unprovisioned_bytes;
+    }
+  }
+
+  // If `accounting_only` isn't set, then take action to remove unprovisioned
+  // slots from the free list (if any) and "straighten" the list (if
+  // requested) to help reduce fragmentation in the future. Then
+  // discard/decommit the pages hosting the unprovisioned slots.
+  if (!accounting_only) {
+    auto straighten_mode =
+        PartitionRoot::GetStraightenLargerSlotSpanFreeListsMode();
+    bool straighten =
+        straighten_mode == StraightenLargerSlotSpanFreeListsMode::kAlways ||
+        (straighten_mode ==
+             StraightenLargerSlotSpanFreeListsMode::kOnlyWhenUnprovisioning &&
+         unprovisioned_bytes);
+
+    PA_DCHECK((unprovisioned_bytes > 0) == (truncated_slots > 0));
+    size_t new_unprovisioned_slots =
+        truncated_slots + slot_span->num_unprovisioned_slots;
+    PA_DCHECK(new_unprovisioned_slots <= bucket->get_slots_per_span());
+    slot_span->num_unprovisioned_slots = new_unprovisioned_slots;
+
+    size_t num_new_freelist_entries = 0;
+    internal::EncodedNextFreelistEntry* back = nullptr;
+    if (straighten) {
+      // Rewrite the freelist to "straighten" it. This achieves two things:
+      // getting rid of unprovisioned entries, ordering etnries based on how
+      // close they're to the slot span start. This reduces chances of
+      // allocating further slots, in hope that we'll get some unused pages at
+      // the end of the span that can be unprovisioned, thus reducing
+      // fragmentation.
+      for (size_t slot_index = 0; slot_index < num_provisioned_slots;
+           ++slot_index) {
+        if (slot_usage[slot_index]) {
+          continue;
+        }
+        // Add the slot to the end of the list. The most proper thing to do
+        // would be to null-terminate the new entry with:
+        //   auto* entry = EncodedNextFreelistEntry::EmplaceAndInitNull(
+        //       slot_span_start + (slot_size * slot_index));
+        // But no need to do this, as it's last-ness is likely temporary, and
+        // the next iteration's back->SetNext(), or the post-loop
+        // EncodedNextFreelistEntry::EmplaceAndInitNull(back) will override it
+        // anyway.
+        auto* entry = static_cast<EncodedNextFreelistEntry*>(
+            SlotStartAddr2Ptr(slot_span_start + (slot_size * slot_index)));
+        if (num_new_freelist_entries) {
+          back->SetNext(entry);
+        } else {
+          slot_span->SetFreelistHead(entry);
+        }
+        back = entry;
+        num_new_freelist_entries++;
+      }
+    } else if (unprovisioned_bytes) {
+      // If there are any unprovisioned entries, scan the list to remove them,
+      // without "straightening" it.
+      uintptr_t first_unprovisioned_slot =
+          slot_span_start + (num_provisioned_slots * slot_size);
+      bool skipped = false;
+      for (EncodedNextFreelistEntry* entry = slot_span->get_freelist_head();
+           entry; entry = entry->GetNext(slot_size)) {
+        uintptr_t entry_addr = SlotStartPtr2Addr(entry);
+        if (entry_addr >= first_unprovisioned_slot) {
+          skipped = true;
+          continue;
+        }
+        // If the last visited entry was skipped (due to being unprovisioned),
+        // update the next pointer of the last not skipped entry (or the head
+        // if no entry exists). Otherwise the link is already correct.
+        if (skipped) {
+          if (num_new_freelist_entries) {
+            back->SetNext(entry);
+          } else {
+            slot_span->SetFreelistHead(entry);
+          }
+          skipped = false;
+        }
+        back = entry;
+        num_new_freelist_entries++;
+      }
+    }
+    // If any of the above loops were executed, null-terminate the last entry,
+    // or the head if no entry exists.
+    if (straighten || unprovisioned_bytes) {
+      if (num_new_freelist_entries) {
+        PA_DCHECK(back);
+        EncodedNextFreelistEntry::EmplaceAndInitNull(back);
+#if !BUILDFLAG(IS_WIN)
+        // Memorize index of the last slot in the list, as it may be able to
+        // participate in an optimization related to page discaring (below), due
+        // to its next pointer encoded as 0.
+        last_slot =
+            bucket->GetSlotNumber(SlotStartPtr2Addr(back) - slot_span_start);
+#endif
+      } else {
+        PA_DCHECK(!back);
+        slot_span->SetFreelistHead(nullptr);
+      }
+      PA_DCHECK(num_new_freelist_entries ==
+                num_provisioned_slots - slot_span->num_allocated_slots);
+    }
+
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+    FreeSlotBitmapReset(slot_span_start + (slot_size * num_provisioned_slots),
+                        end_addr, slot_size);
+#endif
+
+    if (unprovisioned_bytes) {
+      if (!kUseLazyCommit) {
+        // Discard the memory.
+        ScopedSyscallTimer timer{root};
+        DiscardSystemPages(begin_addr, unprovisioned_bytes);
+      } else {
+        // See crbug.com/1431606 to understand the detail. LazyCommit depends
+        // on the design: both used slots and unused slots (=in the freelist)
+        // are committed. However this removes the unused slots from the
+        // freelist. So if using DiscardSystemPages() here, PartitionAlloc may
+        // commit the system pages which has been already committed again.
+        // This will make commited_size and max_committed_size metrics wrong.
+        // PA should use DecommitSystemPagesForData() instead.
+        root->DecommitSystemPagesForData(
+            begin_addr, unprovisioned_bytes,
+            PageAccessibilityDisposition::kAllowKeepForPerf);
+      }
+    }
+  }
+
+  if (slot_size < SystemPageSize()) {
+    // Returns here because implementing the following steps for smaller slot
+    // size will need a complicated logic and make the code messy.
+    return discardable_bytes;
+  }
+
+  // Next, walk the slots and for any not in use, consider which system pages
+  // are no longer needed. We can discard any system pages back to the system as
+  // long as we don't interfere with a freelist pointer or an adjacent used
+  // slot. Note they'll be automatically paged back in when touched, and
+  // zero-initialized (except Windows).
+  for (size_t i = 0; i < num_provisioned_slots; ++i) {
+    if (slot_usage[i]) {
+      continue;
+    }
+
+    // The first address we can safely discard is just after the freelist
+    // pointer. There's one optimization opportunity: if the freelist pointer is
+    // encoded as 0, we can discard that pointer value too (except on
+    // Windows).
+    begin_addr = slot_span_start + (i * slot_size);
+    end_addr = begin_addr + slot_size;
+    bool can_discard_free_list_pointer = false;
+#if !BUILDFLAG(IS_WIN)
+    if (i != last_slot) {
+      begin_addr += sizeof(internal::EncodedNextFreelistEntry);
+    } else {
+      can_discard_free_list_pointer = true;
+    }
+#else
+    begin_addr += sizeof(internal::EncodedNextFreelistEntry);
+#endif
+
+    uintptr_t rounded_up_begin_addr = RoundUpToSystemPage(begin_addr);
+    uintptr_t rounded_down_begin_addr = RoundDownToSystemPage(begin_addr);
+    end_addr = RoundDownToSystemPage(end_addr);
+
+    // |rounded_up_begin_addr| could be greater than |end_addr| only if slot
+    // size was less than system page size, or if free list pointer crossed the
+    // page boundary. Neither is possible here.
+    PA_DCHECK(rounded_up_begin_addr <= end_addr);
+
+    if (rounded_down_begin_addr < rounded_up_begin_addr && i != 0 &&
+        !slot_usage[i - 1] && can_discard_free_list_pointer) {
+      // This slot contains a partial page in the beginning. The rest of that
+      // page is contained in the slot[i-1], which is also discardable.
+      // Therefore we can discard this page.
+      begin_addr = rounded_down_begin_addr;
+    } else {
+      begin_addr = rounded_up_begin_addr;
+    }
+
+    if (begin_addr < end_addr) {
+      size_t partial_slot_bytes = end_addr - begin_addr;
+      discardable_bytes += partial_slot_bytes;
+      if (!accounting_only) {
+        // Discard the pages. But don't be tempted to decommit it (as done
+        // above), because here we're getting rid of provisioned pages amidst
+        // used pages, so we're relying on them to materialize automatically
+        // when the virtual address is accessed, so the mapping needs to be
+        // intact.
+        ScopedSyscallTimer timer{root};
+        DiscardSystemPages(begin_addr, partial_slot_bytes);
+      }
+    }
+  }
+
+  return discardable_bytes;
+}
+
+static void PartitionPurgeBucket(PartitionRoot* root,
+                                 internal::PartitionBucket* bucket)
+    PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(root)) {
+  if (bucket->active_slot_spans_head !=
+      internal::SlotSpanMetadata::get_sentinel_slot_span()) {
+    for (internal::SlotSpanMetadata* slot_span = bucket->active_slot_spans_head;
+         slot_span; slot_span = slot_span->next_slot_span) {
+      PA_DCHECK(slot_span !=
+                internal::SlotSpanMetadata::get_sentinel_slot_span());
+      PartitionPurgeSlotSpan(root, slot_span, false);
+    }
+  }
+}
+
+static void PartitionDumpSlotSpanStats(PartitionBucketMemoryStats* stats_out,
+                                       PartitionRoot* root,
+                                       internal::SlotSpanMetadata* slot_span)
+    PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(root)) {
+  uint16_t bucket_num_slots = slot_span->bucket->get_slots_per_span();
+
+  if (slot_span->is_decommitted()) {
+    ++stats_out->num_decommitted_slot_spans;
+    return;
+  }
+
+  stats_out->discardable_bytes += PartitionPurgeSlotSpan(root, slot_span, true);
+
+  if (slot_span->CanStoreRawSize()) {
+    stats_out->active_bytes += static_cast<uint32_t>(slot_span->GetRawSize());
+  } else {
+    stats_out->active_bytes +=
+        (slot_span->num_allocated_slots * stats_out->bucket_slot_size);
+  }
+  stats_out->active_count += slot_span->num_allocated_slots;
+
+  size_t slot_span_bytes_resident = RoundUpToSystemPage(
+      (bucket_num_slots - slot_span->num_unprovisioned_slots) *
+      stats_out->bucket_slot_size);
+  stats_out->resident_bytes += slot_span_bytes_resident;
+  if (slot_span->is_empty()) {
+    stats_out->decommittable_bytes += slot_span_bytes_resident;
+    ++stats_out->num_empty_slot_spans;
+  } else if (slot_span->is_full()) {
+    ++stats_out->num_full_slot_spans;
+  } else {
+    PA_DCHECK(slot_span->is_active());
+    ++stats_out->num_active_slot_spans;
+  }
+}
+
+static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
+                                     PartitionRoot* root,
+                                     const internal::PartitionBucket* bucket)
+    PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(root)) {
+  PA_DCHECK(!bucket->is_direct_mapped());
+  stats_out->is_valid = false;
+  // If the active slot span list is empty (==
+  // internal::SlotSpanMetadata::get_sentinel_slot_span()), the bucket might
+  // still need to be reported if it has a list of empty, decommitted or full
+  // slot spans.
+  if (bucket->active_slot_spans_head ==
+          internal::SlotSpanMetadata::get_sentinel_slot_span() &&
+      !bucket->empty_slot_spans_head && !bucket->decommitted_slot_spans_head &&
+      !bucket->num_full_slot_spans) {
+    return;
+  }
+
+  memset(stats_out, '\0', sizeof(*stats_out));
+  stats_out->is_valid = true;
+  stats_out->is_direct_map = false;
+  stats_out->num_full_slot_spans =
+      static_cast<size_t>(bucket->num_full_slot_spans);
+  stats_out->bucket_slot_size = bucket->slot_size;
+  uint16_t bucket_num_slots = bucket->get_slots_per_span();
+  size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
+  stats_out->allocated_slot_span_size = bucket->get_bytes_per_span();
+  stats_out->active_bytes = bucket->num_full_slot_spans * bucket_useful_storage;
+  stats_out->active_count = bucket->num_full_slot_spans * bucket_num_slots;
+  stats_out->resident_bytes =
+      bucket->num_full_slot_spans * stats_out->allocated_slot_span_size;
+
+  for (internal::SlotSpanMetadata* slot_span = bucket->empty_slot_spans_head;
+       slot_span; slot_span = slot_span->next_slot_span) {
+    PA_DCHECK(slot_span->is_empty() || slot_span->is_decommitted());
+    PartitionDumpSlotSpanStats(stats_out, root, slot_span);
+  }
+  for (internal::SlotSpanMetadata* slot_span =
+           bucket->decommitted_slot_spans_head;
+       slot_span; slot_span = slot_span->next_slot_span) {
+    PA_DCHECK(slot_span->is_decommitted());
+    PartitionDumpSlotSpanStats(stats_out, root, slot_span);
+  }
+
+  if (bucket->active_slot_spans_head !=
+      internal::SlotSpanMetadata::get_sentinel_slot_span()) {
+    for (internal::SlotSpanMetadata* slot_span = bucket->active_slot_spans_head;
+         slot_span; slot_span = slot_span->next_slot_span) {
+      PA_DCHECK(slot_span !=
+                internal::SlotSpanMetadata::get_sentinel_slot_span());
+      PartitionDumpSlotSpanStats(stats_out, root, slot_span);
+    }
+  }
+}
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+void DCheckIfManagedByPartitionAllocBRPPool(uintptr_t address) {
+  PA_DCHECK(IsManagedByPartitionAllocBRPPool(address));
+}
+#endif
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+void PartitionAllocThreadIsolationInit(ThreadIsolationOption thread_isolation) {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  ThreadIsolationSettings::settings.enabled = true;
+#endif
+  PartitionAddressSpace::InitThreadIsolatedPool(thread_isolation);
+  // Call WriteProtectThreadIsolatedGlobals last since we might not have write
+  // permissions to to globals afterwards.
+  WriteProtectThreadIsolatedGlobals(thread_isolation);
+}
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+}  // namespace internal
+
+[[noreturn]] PA_NOINLINE void PartitionRoot::OutOfMemory(size_t size) {
+  const size_t virtual_address_space_size =
+      total_size_of_super_pages.load(std::memory_order_relaxed) +
+      total_size_of_direct_mapped_pages.load(std::memory_order_relaxed);
+#if !defined(ARCH_CPU_64_BITS)
+  const size_t uncommitted_size =
+      virtual_address_space_size -
+      total_size_of_committed_pages.load(std::memory_order_relaxed);
+
+  // Check whether this OOM is due to a lot of super pages that are allocated
+  // but not committed, probably due to http://crbug.com/421387.
+  if (uncommitted_size > internal::kReasonableSizeOfUnusedPages) {
+    internal::PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
+  }
+
+#if BUILDFLAG(IS_WIN)
+  // If true then we are running on 64-bit Windows.
+  BOOL is_wow_64 = FALSE;
+  // Intentionally ignoring failures.
+  IsWow64Process(GetCurrentProcess(), &is_wow_64);
+  // 32-bit address space on Windows is typically either 2 GiB (on 32-bit
+  // Windows) or 4 GiB (on 64-bit Windows). 2.8 and 1.0 GiB are just rough
+  // guesses as to how much address space PA can consume (note that code,
+  // stacks, and other allocators will also consume address space).
+  const size_t kReasonableVirtualSize = (is_wow_64 ? 2800 : 1024) * 1024 * 1024;
+  // Make it obvious whether we are running on 64-bit Windows.
+  PA_DEBUG_DATA_ON_STACK("iswow64", static_cast<size_t>(is_wow_64));
+#else
+  constexpr size_t kReasonableVirtualSize =
+      // 1.5GiB elsewhere, since address space is typically 3GiB.
+      (1024 + 512) * 1024 * 1024;
+#endif
+  if (virtual_address_space_size > kReasonableVirtualSize) {
+    internal::PartitionOutOfMemoryWithLargeVirtualSize(
+        virtual_address_space_size);
+  }
+#endif  // #if !defined(ARCH_CPU_64_BITS)
+
+  // Out of memory can be due to multiple causes, such as:
+  // - Out of virtual address space in the desired pool
+  // - Out of commit due to either our process, or another one
+  // - Excessive allocations in the current process
+  //
+  // Saving these values make it easier to distinguish between these. See the
+  // documentation in PA_CONFIG(DEBUG_DATA_ON_STACK) on how to get these from
+  // minidumps.
+  PA_DEBUG_DATA_ON_STACK("va_size", virtual_address_space_size);
+  PA_DEBUG_DATA_ON_STACK("alloc", get_total_size_of_allocated_bytes());
+  PA_DEBUG_DATA_ON_STACK("commit", get_total_size_of_committed_pages());
+  PA_DEBUG_DATA_ON_STACK("size", size);
+
+  if (internal::g_oom_handling_function) {
+    (*internal::g_oom_handling_function)(size);
+  }
+  OOM_CRASH(size);
+}
+
+void PartitionRoot::DecommitEmptySlotSpans() {
+  ShrinkEmptySlotSpansRing(0);
+  // Just decommitted everything, and holding the lock, should be exactly 0.
+  PA_DCHECK(empty_slot_spans_dirty_bytes == 0);
+}
+
+void PartitionRoot::DestructForTesting() {
+  // We need to destruct the thread cache before we unreserve any of the super
+  // pages below, which we currently are not doing. So, we should only call
+  // this function on PartitionRoots without a thread cache.
+  PA_CHECK(!settings.with_thread_cache);
+  auto pool_handle = ChoosePool();
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  // The pages managed by thread isolated pool will be free-ed at
+  // UninitThreadIsolatedForTesting(). Don't invoke FreePages() for the pages.
+  if (pool_handle == internal::kThreadIsolatedPoolHandle) {
+    return;
+  }
+  PA_DCHECK(pool_handle < internal::kNumPools);
+#else
+  PA_DCHECK(pool_handle <= internal::kNumPools);
+#endif
+
+  auto* curr = first_extent;
+  while (curr != nullptr) {
+    auto* next = curr->next;
+    uintptr_t address = SuperPagesBeginFromExtent(curr);
+    size_t size =
+        internal::kSuperPageSize * curr->number_of_consecutive_super_pages;
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+    internal::AddressPoolManager::GetInstance().MarkUnused(pool_handle, address,
+                                                           size);
+#endif
+    internal::AddressPoolManager::GetInstance().UnreserveAndDecommit(
+        pool_handle, address, size);
+    curr = next;
+  }
+}
+
+#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+void PartitionRoot::InitMac11MallocSizeHackUsableSize(size_t ref_count_size) {
+  settings.mac11_malloc_size_hack_enabled_ = true;
+
+  // 0 means reserve just enough extras to fit PartitionRefCount.
+  if (!ref_count_size) {
+    ref_count_size = sizeof(internal::PartitionRefCount);
+  }
+  // Request of 32B will fall into a 48B bucket in the presence of BRP
+  // ref-count, yielding |48 - ref_count_size| of actual usable space.
+  settings.mac11_malloc_size_hack_usable_size_ = 48 - ref_count_size;
+}
+
+void PartitionRoot::EnableMac11MallocSizeHackForTesting(size_t ref_count_size) {
+  settings.mac11_malloc_size_hack_enabled_ = true;
+  InitMac11MallocSizeHackUsableSize(ref_count_size);
+}
+
+void PartitionRoot::EnableMac11MallocSizeHackIfNeeded(size_t ref_count_size) {
+  settings.mac11_malloc_size_hack_enabled_ =
+      settings.brp_enabled_ && internal::base::mac::MacOSMajorVersion() == 11;
+  if (settings.mac11_malloc_size_hack_enabled_) {
+    InitMac11MallocSizeHackUsableSize(ref_count_size);
+  }
+}
+#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !BUILDFLAG(HAS_64_BIT_POINTERS)
+namespace {
+std::atomic<bool> g_reserve_brp_guard_region_called;
+// An address constructed by repeating `kQuarantinedByte` shouldn't never point
+// to valid memory. Preemptively reserve a memory region around that address and
+// make it inaccessible. Not needed for 64-bit platforms where the address is
+// guaranteed to be non-canonical. Safe to call multiple times.
+void ReserveBackupRefPtrGuardRegionIfNeeded() {
+  bool expected = false;
+  // No need to block execution for potential concurrent initialization, merely
+  // want to make sure this is only called once.
+  if (!g_reserve_brp_guard_region_called.compare_exchange_strong(expected,
+                                                                 true)) {
+    return;
+  }
+
+  size_t alignment = internal::PageAllocationGranularity();
+  uintptr_t requested_address;
+  memset(&requested_address, internal::kQuarantinedByte,
+         sizeof(requested_address));
+  requested_address = RoundDownToPageAllocationGranularity(requested_address);
+
+  // Request several pages so that even unreasonably large C++ objects stay
+  // within the inaccessible region. If some of the pages can't be reserved,
+  // it's still preferable to try and reserve the rest.
+  for (size_t i = 0; i < 4; ++i) {
+    [[maybe_unused]] uintptr_t allocated_address =
+        AllocPages(requested_address, alignment, alignment,
+                   PageAccessibilityConfiguration(
+                       PageAccessibilityConfiguration::kInaccessible),
+                   PageTag::kPartitionAlloc);
+    requested_address += alignment;
+  }
+}
+}  // namespace
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
+        // !BUILDFLAG(HAS_64_BIT_POINTERS)
+
+void PartitionRoot::Init(PartitionOptions opts) {
+  {
+#if BUILDFLAG(IS_APPLE)
+    // Needed to statically bound page size, which is a runtime constant on
+    // apple OSes.
+    PA_CHECK((internal::SystemPageSize() == (size_t{1} << 12)) ||
+             (internal::SystemPageSize() == (size_t{1} << 14)));
+#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
+    // Check runtime pagesize. Though the code is currently the same, it is
+    // not merged with the IS_APPLE case above as a 1 << 16 case needs to be
+    // added here in the future, to allow 64 kiB pagesize. That is only
+    // supported on Linux on arm64, not on IS_APPLE, but not yet present here
+    // as the rest of the partition allocator does not currently support it.
+    PA_CHECK((internal::SystemPageSize() == (size_t{1} << 12)) ||
+             (internal::SystemPageSize() == (size_t{1} << 14)));
+#endif
+
+    ::partition_alloc::internal::ScopedGuard guard{lock_};
+    if (initialized) {
+      return;
+    }
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+    // Reserve address space for partition alloc.
+    internal::PartitionAddressSpace::Init();
+#endif
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !BUILDFLAG(HAS_64_BIT_POINTERS)
+    ReserveBackupRefPtrGuardRegionIfNeeded();
+#endif
+
+    settings.allow_aligned_alloc =
+        opts.aligned_alloc == PartitionOptions::kAllowed;
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    settings.use_cookie = true;
+#else
+    static_assert(!Settings::use_cookie);
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    settings.brp_enabled_ = opts.backup_ref_ptr == PartitionOptions::kEnabled;
+#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+    EnableMac11MallocSizeHackIfNeeded(opts.ref_count_size);
+#endif
+#else   // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    PA_CHECK(opts.backup_ref_ptr == PartitionOptions::kDisabled);
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    settings.use_configurable_pool =
+        (opts.use_configurable_pool == PartitionOptions::kAllowed) &&
+        IsConfigurablePoolAvailable();
+    PA_DCHECK(!settings.use_configurable_pool || IsConfigurablePoolAvailable());
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+    settings.memory_tagging_enabled_ =
+        opts.memory_tagging.enabled == PartitionOptions::kEnabled;
+    // Memory tagging is not supported in the configurable pool because MTE
+    // stores tagging information in the high bits of the pointer, it causes
+    // issues with components like V8's ArrayBuffers which use custom pointer
+    // representations. All custom representations encountered so far rely on an
+    // "is in configurable pool?" check, so we use that as a proxy.
+    PA_CHECK(!settings.memory_tagging_enabled_ ||
+             !settings.use_configurable_pool);
+
+    settings.memory_tagging_reporting_mode_ =
+        opts.memory_tagging.reporting_mode;
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+    // brp_enabled() is not supported in the configurable pool because
+    // BRP requires objects to be in a different Pool.
+    PA_CHECK(!(settings.use_configurable_pool && brp_enabled()));
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+    // BRP and thread isolated mode use different pools, so they can't be
+    // enabled at the same time.
+    PA_CHECK(!opts.thread_isolation.enabled ||
+             opts.backup_ref_ptr == PartitionOptions::kDisabled);
+    settings.thread_isolation = opts.thread_isolation;
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+    // Ref-count messes up alignment needed for AlignedAlloc, making this
+    // option incompatible. However, except in the
+    // PUT_REF_COUNT_IN_PREVIOUS_SLOT case.
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
+    !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+    PA_CHECK(!settings.allow_aligned_alloc || !settings.brp_enabled_);
+#endif
+
+#if PA_CONFIG(EXTRAS_REQUIRED)
+    settings.extras_size = 0;
+    settings.extras_offset = 0;
+
+    if (settings.use_cookie) {
+      settings.extras_size += internal::kPartitionCookieSizeAdjustment;
+    }
+
+    if (brp_enabled()) {
+      // TODO(tasak): In the PUT_REF_COUNT_IN_PREVIOUS_SLOT case, ref-count is
+      // stored out-of-line for single-slot slot spans, so no need to
+      // add/subtract its size in this case.
+      size_t ref_count_size = opts.ref_count_size;
+      if (!ref_count_size) {
+        ref_count_size = internal::kPartitionRefCountSizeAdjustment;
+      }
+      ref_count_size = internal::AlignUpRefCountSizeForMac(ref_count_size);
+#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
+      if (IsMemoryTaggingEnabled()) {
+        ref_count_size = internal::base::bits::AlignUp(
+            ref_count_size, internal::kMemTagGranuleSize);
+      }
+      settings.ref_count_size = ref_count_size;
+#endif  // PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
+      PA_CHECK(internal::kPartitionRefCountSizeAdjustment <= ref_count_size);
+      settings.extras_size += ref_count_size;
+      settings.extras_offset += internal::kPartitionRefCountOffsetAdjustment;
+    }
+#endif  // PA_CONFIG(EXTRAS_REQUIRED)
+
+    // Re-confirm the above PA_CHECKs, by making sure there are no
+    // pre-allocation extras when AlignedAlloc is allowed. Post-allocation
+    // extras are ok.
+    PA_CHECK(!settings.allow_aligned_alloc || !settings.extras_offset);
+
+    settings.quarantine_mode =
+#if BUILDFLAG(USE_STARSCAN)
+        (opts.star_scan_quarantine == PartitionOptions::kDisallowed
+             ? QuarantineMode::kAlwaysDisabled
+             : QuarantineMode::kDisabledByDefault);
+#else
+        QuarantineMode::kAlwaysDisabled;
+#endif  // BUILDFLAG(USE_STARSCAN)
+
+    // We mark the sentinel slot span as free to make sure it is skipped by our
+    // logic to find a new active slot span.
+    memset(&sentinel_bucket, 0, sizeof(sentinel_bucket));
+    sentinel_bucket.active_slot_spans_head =
+        SlotSpan::get_sentinel_slot_span_non_const();
+
+    // This is a "magic" value so we can test if a root pointer is valid.
+    inverted_self = ~reinterpret_cast<uintptr_t>(this);
+
+    // Set up the actual usable buckets first.
+    constexpr internal::BucketIndexLookup lookup{};
+    size_t bucket_index = 0;
+    while (lookup.bucket_sizes()[bucket_index] !=
+           internal::kInvalidBucketSize) {
+      buckets[bucket_index].Init(lookup.bucket_sizes()[bucket_index]);
+      bucket_index++;
+    }
+    PA_DCHECK(bucket_index < internal::kNumBuckets);
+
+    // Remaining buckets are not usable, and not real.
+    for (size_t index = bucket_index; index < internal::kNumBuckets; index++) {
+      // Cannot init with size 0 since it computes 1 / size, but make sure the
+      // bucket is invalid.
+      buckets[index].Init(internal::kInvalidBucketSize);
+      buckets[index].active_slot_spans_head = nullptr;
+      PA_DCHECK(!buckets[index].is_valid());
+    }
+
+#if !PA_CONFIG(THREAD_CACHE_SUPPORTED)
+    // TLS in ThreadCache not supported on other OSes.
+    settings.with_thread_cache = false;
+#else
+    ThreadCache::EnsureThreadSpecificDataInitialized();
+    settings.with_thread_cache =
+        (opts.thread_cache == PartitionOptions::kEnabled);
+
+    if (settings.with_thread_cache) {
+      ThreadCache::Init(this);
+    }
+#endif  // !PA_CONFIG(THREAD_CACHE_SUPPORTED)
+
+#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
+    internal::PartitionRootEnumerator::Instance().Register(this);
+#endif
+
+    initialized = true;
+  }
+
+  // Called without the lock, might allocate.
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  PartitionAllocMallocInitOnce();
+#endif
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  if (settings.thread_isolation.enabled) {
+    internal::PartitionAllocThreadIsolationInit(settings.thread_isolation);
+  }
+#endif
+}
+
+PartitionRoot::Settings::Settings() = default;
+
+PartitionRoot::PartitionRoot() : scheduler_loop_quarantine(this) {}
+
+PartitionRoot::PartitionRoot(PartitionOptions opts)
+    : scheduler_loop_quarantine(
+          this,
+          opts.scheduler_loop_quarantine_capacity_in_bytes) {
+  Init(opts);
+}
+
+PartitionRoot::~PartitionRoot() {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  PA_CHECK(!settings.with_thread_cache)
+      << "Must not destroy a partition with a thread cache";
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
+  if (initialized) {
+    internal::PartitionRootEnumerator::Instance().Unregister(this);
+  }
+#endif  // PA_CONFIG(USE_PARTITION_ALLOC_ENUMERATOR)
+}
+
+void PartitionRoot::EnableThreadCacheIfSupported() {
+#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
+  ::partition_alloc::internal::ScopedGuard guard{lock_};
+  PA_CHECK(!settings.with_thread_cache);
+  // By the time we get there, there may be multiple threads created in the
+  // process. Since `with_thread_cache` is accessed without a lock, it can
+  // become visible to another thread before the effects of
+  // `internal::ThreadCacheInit()` are visible. To prevent that, we fake thread
+  // cache creation being in-progress while this is running.
+  //
+  // This synchronizes with the acquire load in `MaybeInitThreadCacheAndAlloc()`
+  // to ensure that we don't create (and thus use) a ThreadCache before
+  // ThreadCache::Init()'s effects are visible.
+  int before =
+      thread_caches_being_constructed_.fetch_add(1, std::memory_order_acquire);
+  PA_CHECK(before == 0);
+  ThreadCache::Init(this);
+  thread_caches_being_constructed_.fetch_sub(1, std::memory_order_release);
+  settings.with_thread_cache = true;
+#endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
+}
+
+bool PartitionRoot::TryReallocInPlaceForDirectMap(
+    internal::SlotSpanMetadata* slot_span,
+    size_t requested_size) {
+  PA_DCHECK(slot_span->bucket->is_direct_mapped());
+  // Slot-span metadata isn't MTE-tagged.
+  PA_DCHECK(
+      internal::IsManagedByDirectMap(reinterpret_cast<uintptr_t>(slot_span)));
+
+  size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
+  auto* extent = DirectMapExtent::FromSlotSpan(slot_span);
+  size_t current_reservation_size = extent->reservation_size;
+  // Calculate the new reservation size the way PartitionDirectMap() would, but
+  // skip the alignment, because this call isn't requesting it.
+  size_t new_reservation_size = GetDirectMapReservationSize(raw_size);
+
+  // If new reservation would be larger, there is nothing we can do to
+  // reallocate in-place.
+  if (new_reservation_size > current_reservation_size) {
+    return false;
+  }
+
+  // Don't reallocate in-place if new reservation size would be less than 80 %
+  // of the current one, to avoid holding on to too much unused address space.
+  // Make this check before comparing slot sizes, as even with equal or similar
+  // slot sizes we can save a lot if the original allocation was heavily padded
+  // for alignment.
+  if ((new_reservation_size >> internal::SystemPageShift()) * 5 <
+      (current_reservation_size >> internal::SystemPageShift()) * 4) {
+    return false;
+  }
+
+  // Note that the new size isn't a bucketed size; this function is called
+  // whenever we're reallocating a direct mapped allocation, so calculate it
+  // the way PartitionDirectMap() would.
+  size_t new_slot_size = GetDirectMapSlotSize(raw_size);
+  if (new_slot_size < internal::kMinDirectMappedDownsize) {
+    return false;
+  }
+
+  // Past this point, we decided we'll attempt to reallocate without relocating,
+  // so we have to honor the padding for alignment in front of the original
+  // allocation, even though this function isn't requesting any alignment.
+
+  // bucket->slot_size is the currently committed size of the allocation.
+  size_t current_slot_size = slot_span->bucket->slot_size;
+  size_t current_usable_size = GetSlotUsableSize(slot_span);
+  uintptr_t slot_start = SlotSpan::ToSlotSpanStart(slot_span);
+  // This is the available part of the reservation up to which the new
+  // allocation can grow.
+  size_t available_reservation_size =
+      current_reservation_size - extent->padding_for_alignment -
+      PartitionRoot::GetDirectMapMetadataAndGuardPagesSize();
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  uintptr_t reservation_start = slot_start & internal::kSuperPageBaseMask;
+  PA_DCHECK(internal::IsReservationStart(reservation_start));
+  PA_DCHECK(slot_start + available_reservation_size ==
+            reservation_start + current_reservation_size -
+                GetDirectMapMetadataAndGuardPagesSize() +
+                internal::PartitionPageSize());
+#endif
+
+  PA_DCHECK(new_slot_size > internal::kMaxMemoryTaggingSize);
+  if (new_slot_size == current_slot_size) {
+    // No need to move any memory around, but update size and cookie below.
+    // That's because raw_size may have changed.
+  } else if (new_slot_size < current_slot_size) {
+    // Shrink by decommitting unneeded pages and making them inaccessible.
+    size_t decommit_size = current_slot_size - new_slot_size;
+    DecommitSystemPagesForData(slot_start + new_slot_size, decommit_size,
+                               PageAccessibilityDisposition::kRequireUpdate);
+    // Since the decommited system pages are still reserved, we don't need to
+    // change the entries for decommitted pages in the reservation offset table.
+  } else if (new_slot_size <= available_reservation_size) {
+    // Grow within the actually reserved address space. Just need to make the
+    // pages accessible again.
+    size_t recommit_slot_size_growth = new_slot_size - current_slot_size;
+    // Direct map never uses tagging, as size is always >kMaxMemoryTaggingSize.
+    RecommitSystemPagesForData(
+        slot_start + current_slot_size, recommit_slot_size_growth,
+        PageAccessibilityDisposition::kRequireUpdate, false);
+    // The recommited system pages had been already reserved and all the
+    // entries in the reservation offset table (for entire reservation_size
+    // region) have been already initialized.
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    memset(reinterpret_cast<void*>(slot_start + current_slot_size),
+           internal::kUninitializedByte, recommit_slot_size_growth);
+#endif
+  } else {
+    // We can't perform the realloc in-place.
+    // TODO: support this too when possible.
+    return false;
+  }
+
+  DecreaseTotalSizeOfAllocatedBytes(reinterpret_cast<uintptr_t>(slot_span),
+                                    slot_span->bucket->slot_size);
+  slot_span->SetRawSize(raw_size);
+  slot_span->bucket->slot_size = new_slot_size;
+  IncreaseTotalSizeOfAllocatedBytes(reinterpret_cast<uintptr_t>(slot_span),
+                                    slot_span->bucket->slot_size, raw_size);
+
+  // Always record in-place realloc() as free()+malloc() pair.
+  //
+  // The early returns above (`return false`) will fall back to free()+malloc(),
+  // so this is consistent.
+  auto* thread_cache = GetOrCreateThreadCache();
+  if (ThreadCache::IsValid(thread_cache)) {
+    thread_cache->RecordDeallocation(current_usable_size);
+    thread_cache->RecordAllocation(GetSlotUsableSize(slot_span));
+  }
+
+  // Write a new trailing cookie.
+  if (settings.use_cookie) {
+    auto* object = static_cast<unsigned char*>(SlotStartToObject(slot_start));
+    internal::PartitionCookieWriteValue(object + GetSlotUsableSize(slot_span));
+  }
+
+  return true;
+}
+
+bool PartitionRoot::TryReallocInPlaceForNormalBuckets(void* object,
+                                                      SlotSpan* slot_span,
+                                                      size_t new_size) {
+  uintptr_t slot_start = ObjectToSlotStart(object);
+  PA_DCHECK(internal::IsManagedByNormalBuckets(slot_start));
+
+  // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
+  // new size is a significant percentage smaller. We could do the same if we
+  // determine it is a win.
+  if (AllocationCapacityFromRequestedSize(new_size) !=
+      AllocationCapacityFromSlotStart(slot_start)) {
+    return false;
+  }
+  size_t current_usable_size = GetSlotUsableSize(slot_span);
+
+  // Trying to allocate |new_size| would use the same amount of underlying
+  // memory as we're already using, so re-use the allocation after updating
+  // statistics (and cookie, if present).
+  if (slot_span->CanStoreRawSize()) {
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && BUILDFLAG(PA_DCHECK_IS_ON)
+    internal::PartitionRefCount* old_ref_count;
+    if (brp_enabled()) {
+      old_ref_count = internal::PartitionRefCountPointer(slot_start);
+    }
+#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) &&
+        // BUILDFLAG(PA_DCHECK_IS_ON)
+    size_t new_raw_size = AdjustSizeForExtrasAdd(new_size);
+    slot_span->SetRawSize(new_raw_size);
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && BUILDFLAG(PA_DCHECK_IS_ON)
+    if (brp_enabled()) {
+      internal::PartitionRefCount* new_ref_count =
+          internal::PartitionRefCountPointer(slot_start);
+      PA_DCHECK(new_ref_count == old_ref_count);
+    }
+#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) &&
+        // BUILDFLAG(PA_DCHECK_IS_ON)
+    // Write a new trailing cookie only when it is possible to keep track
+    // raw size (otherwise we wouldn't know where to look for it later).
+    if (settings.use_cookie) {
+      internal::PartitionCookieWriteValue(static_cast<unsigned char*>(object) +
+                                          GetSlotUsableSize(slot_span));
+    }
+  }
+
+  // Always record a realloc() as a free() + malloc(), even if it's in
+  // place. When we cannot do it in place (`return false` above), the allocator
+  // falls back to free()+malloc(), so this is consistent.
+  ThreadCache* thread_cache = GetOrCreateThreadCache();
+  if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
+    thread_cache->RecordDeallocation(current_usable_size);
+    thread_cache->RecordAllocation(GetSlotUsableSize(slot_span));
+  }
+
+  return object;
+}
+
+void PartitionRoot::PurgeMemory(int flags) {
+  {
+    ::partition_alloc::internal::ScopedGuard guard{
+        internal::PartitionRootLock(this)};
+#if BUILDFLAG(USE_STARSCAN)
+    // Avoid purging if there is PCScan task currently scheduled. Since pcscan
+    // takes snapshot of all allocated pages, decommitting pages here (even
+    // under the lock) is racy.
+    // TODO(bikineev): Consider rescheduling the purging after PCScan.
+    if (PCScan::IsInProgress()) {
+      return;
+    }
+#endif  // BUILDFLAG(USE_STARSCAN)
+
+    if (flags & PurgeFlags::kDecommitEmptySlotSpans) {
+      DecommitEmptySlotSpans();
+    }
+    if (flags & PurgeFlags::kDiscardUnusedSystemPages) {
+      for (Bucket& bucket : buckets) {
+        if (bucket.slot_size == internal::kInvalidBucketSize) {
+          continue;
+        }
+
+        if (bucket.slot_size >= internal::MinPurgeableSlotSize()) {
+          internal::PartitionPurgeBucket(this, &bucket);
+        } else {
+          if (sort_smaller_slot_span_free_lists_) {
+            bucket.SortSmallerSlotSpanFreeLists();
+          }
+        }
+
+        // Do it at the end, as the actions above change the status of slot
+        // spans (e.g. empty -> decommitted).
+        bucket.MaintainActiveList();
+
+        if (sort_active_slot_spans_) {
+          bucket.SortActiveSlotSpans();
+        }
+      }
+    }
+  }
+}
+
+void PartitionRoot::ShrinkEmptySlotSpansRing(size_t limit) {
+  int16_t index = global_empty_slot_span_ring_index;
+  int16_t starting_index = index;
+  while (empty_slot_spans_dirty_bytes > limit) {
+    SlotSpan* slot_span = global_empty_slot_span_ring[index];
+    // The ring is not always full, may be nullptr.
+    if (slot_span) {
+      slot_span->DecommitIfPossible(this);
+      global_empty_slot_span_ring[index] = nullptr;
+    }
+    index += 1;
+    // Walk through the entirety of possible slots, even though the last ones
+    // are unused, if global_empty_slot_span_ring_size is smaller than
+    // kMaxFreeableSpans. It's simpler, and does not cost anything, since all
+    // the pointers are going to be nullptr.
+    if (index == internal::kMaxFreeableSpans) {
+      index = 0;
+    }
+
+    // Went around the whole ring, since this is locked,
+    // empty_slot_spans_dirty_bytes should be exactly 0.
+    if (index == starting_index) {
+      PA_DCHECK(empty_slot_spans_dirty_bytes == 0);
+      // Metrics issue, don't crash, return.
+      break;
+    }
+  }
+}
+
+void PartitionRoot::DumpStats(const char* partition_name,
+                              bool is_light_dump,
+                              PartitionStatsDumper* dumper) {
+  static const size_t kMaxReportableDirectMaps = 4096;
+  // Allocate on the heap rather than on the stack to avoid stack overflow
+  // skirmishes (on Windows, in particular). Allocate before locking below,
+  // otherwise when PartitionAlloc is malloc() we get reentrancy issues. This
+  // inflates reported values a bit for detailed dumps though, by 16kiB.
+  std::unique_ptr<uint32_t[]> direct_map_lengths;
+  if (!is_light_dump) {
+    direct_map_lengths =
+        std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
+  }
+  PartitionBucketMemoryStats bucket_stats[internal::kNumBuckets];
+  size_t num_direct_mapped_allocations = 0;
+  PartitionMemoryStats stats = {0};
+
+  stats.syscall_count = syscall_count.load(std::memory_order_relaxed);
+  stats.syscall_total_time_ns =
+      syscall_total_time_ns.load(std::memory_order_relaxed);
+
+  // Collect data with the lock held, cannot allocate or call third-party code
+  // below.
+  {
+    ::partition_alloc::internal::ScopedGuard guard{
+        internal::PartitionRootLock(this)};
+    PA_DCHECK(total_size_of_allocated_bytes <= max_size_of_allocated_bytes);
+
+    stats.total_mmapped_bytes =
+        total_size_of_super_pages.load(std::memory_order_relaxed) +
+        total_size_of_direct_mapped_pages.load(std::memory_order_relaxed);
+    stats.total_committed_bytes =
+        total_size_of_committed_pages.load(std::memory_order_relaxed);
+    stats.max_committed_bytes =
+        max_size_of_committed_pages.load(std::memory_order_relaxed);
+    stats.total_allocated_bytes = total_size_of_allocated_bytes;
+    stats.max_allocated_bytes = max_size_of_allocated_bytes;
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    stats.total_brp_quarantined_bytes =
+        total_size_of_brp_quarantined_bytes.load(std::memory_order_relaxed);
+    stats.total_brp_quarantined_count =
+        total_count_of_brp_quarantined_slots.load(std::memory_order_relaxed);
+    stats.cumulative_brp_quarantined_bytes =
+        cumulative_size_of_brp_quarantined_bytes.load(
+            std::memory_order_relaxed);
+    stats.cumulative_brp_quarantined_count =
+        cumulative_count_of_brp_quarantined_slots.load(
+            std::memory_order_relaxed);
+#endif
+
+    size_t direct_mapped_allocations_total_size = 0;
+    for (size_t i = 0; i < internal::kNumBuckets; ++i) {
+      const Bucket* bucket = &bucket_at(i);
+      // Don't report the pseudo buckets that the generic allocator sets up in
+      // order to preserve a fast size->bucket map (see
+      // PartitionRoot::Init() for details).
+      if (!bucket->is_valid()) {
+        bucket_stats[i].is_valid = false;
+      } else {
+        internal::PartitionDumpBucketStats(&bucket_stats[i], this, bucket);
+      }
+      if (bucket_stats[i].is_valid) {
+        stats.total_resident_bytes += bucket_stats[i].resident_bytes;
+        stats.total_active_bytes += bucket_stats[i].active_bytes;
+        stats.total_active_count += bucket_stats[i].active_count;
+        stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
+        stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
+      }
+    }
+
+    for (DirectMapExtent* extent = direct_map_list;
+         extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
+         extent = extent->next_extent, ++num_direct_mapped_allocations) {
+      PA_DCHECK(!extent->next_extent ||
+                extent->next_extent->prev_extent == extent);
+      size_t slot_size = extent->bucket->slot_size;
+      direct_mapped_allocations_total_size += slot_size;
+      if (is_light_dump) {
+        continue;
+      }
+      direct_map_lengths[num_direct_mapped_allocations] = slot_size;
+    }
+
+    stats.total_resident_bytes += direct_mapped_allocations_total_size;
+    stats.total_active_bytes += direct_mapped_allocations_total_size;
+    stats.total_active_count += num_direct_mapped_allocations;
+
+    stats.has_thread_cache = settings.with_thread_cache;
+    if (stats.has_thread_cache) {
+      ThreadCacheRegistry::Instance().DumpStats(
+          true, &stats.current_thread_cache_stats);
+      ThreadCacheRegistry::Instance().DumpStats(false,
+                                                &stats.all_thread_caches_stats);
+    }
+  }
+
+  // Do not hold the lock when calling |dumper|, as it may allocate.
+  if (!is_light_dump) {
+    for (auto& stat : bucket_stats) {
+      if (stat.is_valid) {
+        dumper->PartitionsDumpBucketStats(partition_name, &stat);
+      }
+    }
+
+    for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
+      uint32_t size = direct_map_lengths[i];
+
+      PartitionBucketMemoryStats mapped_stats = {};
+      mapped_stats.is_valid = true;
+      mapped_stats.is_direct_map = true;
+      mapped_stats.num_full_slot_spans = 1;
+      mapped_stats.allocated_slot_span_size = size;
+      mapped_stats.bucket_slot_size = size;
+      mapped_stats.active_bytes = size;
+      mapped_stats.active_count = 1;
+      mapped_stats.resident_bytes = size;
+      dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats);
+    }
+  }
+  dumper->PartitionDumpTotals(partition_name, &stats);
+}
+
+// static
+void PartitionRoot::DeleteForTesting(PartitionRoot* partition_root) {
+  if (partition_root->settings.with_thread_cache) {
+    ThreadCache::SwapForTesting(nullptr);
+    partition_root->settings.with_thread_cache = false;
+  }
+
+  partition_root->DestructForTesting();  // IN-TEST
+
+  delete partition_root;
+}
+
+void PartitionRoot::ResetForTesting(bool allow_leaks) {
+  if (settings.with_thread_cache) {
+    ThreadCache::SwapForTesting(nullptr);
+    settings.with_thread_cache = false;
+  }
+
+  ::partition_alloc::internal::ScopedGuard guard{
+      internal::PartitionRootLock(this)};
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  if (!allow_leaks) {
+    unsigned num_allocated_slots = 0;
+    for (Bucket& bucket : buckets) {
+      if (bucket.active_slot_spans_head !=
+          internal::SlotSpanMetadata::get_sentinel_slot_span()) {
+        for (internal::SlotSpanMetadata* slot_span =
+                 bucket.active_slot_spans_head;
+             slot_span; slot_span = slot_span->next_slot_span) {
+          num_allocated_slots += slot_span->num_allocated_slots;
+        }
+      }
+      // Full slot spans are nowhere. Need to see bucket.num_full_slot_spans
+      // to count the number of full slot spans' slots.
+      if (bucket.num_full_slot_spans) {
+        num_allocated_slots +=
+            bucket.num_full_slot_spans * bucket.get_slots_per_span();
+      }
+    }
+    PA_DCHECK(num_allocated_slots == 0);
+
+    // Check for direct-mapped allocations.
+    PA_DCHECK(!direct_map_list);
+  }
+#endif
+
+  DestructForTesting();  // IN-TEST
+
+#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
+  if (initialized) {
+    internal::PartitionRootEnumerator::Instance().Unregister(this);
+  }
+#endif  // PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
+
+  for (Bucket& bucket : buckets) {
+    bucket.active_slot_spans_head =
+        SlotSpan::get_sentinel_slot_span_non_const();
+    bucket.empty_slot_spans_head = nullptr;
+    bucket.decommitted_slot_spans_head = nullptr;
+    bucket.num_full_slot_spans = 0;
+  }
+
+  next_super_page = 0;
+  next_partition_page = 0;
+  next_partition_page_end = 0;
+  current_extent = nullptr;
+  first_extent = nullptr;
+
+  direct_map_list = nullptr;
+  for (auto*& entity : global_empty_slot_span_ring) {
+    entity = nullptr;
+  }
+
+  global_empty_slot_span_ring_index = 0;
+  global_empty_slot_span_ring_size = internal::kDefaultEmptySlotSpanRingSize;
+  initialized = false;
+}
+
+void PartitionRoot::ResetBookkeepingForTesting() {
+  ::partition_alloc::internal::ScopedGuard guard{
+      internal::PartitionRootLock(this)};
+  max_size_of_allocated_bytes = total_size_of_allocated_bytes;
+  max_size_of_committed_pages.store(total_size_of_committed_pages);
+}
+
+ThreadCache* PartitionRoot::MaybeInitThreadCache() {
+  auto* tcache = ThreadCache::Get();
+  // See comment in `EnableThreadCacheIfSupport()` for why this is an acquire
+  // load.
+  if (ThreadCache::IsTombstone(tcache) ||
+      thread_caches_being_constructed_.load(std::memory_order_acquire)) {
+    // Two cases:
+    // 1. Thread is being terminated, don't try to use the thread cache, and
+    //    don't try to resurrect it.
+    // 2. Someone, somewhere is currently allocating a thread cache. This may
+    //    be us, in which case we are re-entering and should not create a thread
+    //    cache. If it is not us, then this merely delays thread cache
+    //    construction a bit, which is not an issue.
+    return nullptr;
+  }
+
+  // There is no per-thread ThreadCache allocated here yet, and this partition
+  // has a thread cache, allocate a new one.
+  //
+  // The thread cache allocation itself will not reenter here, as it sidesteps
+  // the thread cache by using placement new and |RawAlloc()|. However,
+  // internally to libc, allocations may happen to create a new TLS
+  // variable. This would end up here again, which is not what we want (and
+  // likely is not supported by libc).
+  //
+  // To avoid this sort of reentrancy, increase the count of thread caches that
+  // are currently allocating a thread cache.
+  //
+  // Note that there is no deadlock or data inconsistency concern, since we do
+  // not hold the lock, and has such haven't touched any internal data.
+  int before =
+      thread_caches_being_constructed_.fetch_add(1, std::memory_order_relaxed);
+  PA_CHECK(before < std::numeric_limits<int>::max());
+  tcache = ThreadCache::Create(this);
+  thread_caches_being_constructed_.fetch_sub(1, std::memory_order_relaxed);
+
+  return tcache;
+}
+
+// static
+void PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
+    StraightenLargerSlotSpanFreeListsMode new_value) {
+  straighten_larger_slot_span_free_lists_ = new_value;
+}
+
+// static
+void PartitionRoot::SetSortSmallerSlotSpanFreeListsEnabled(bool new_value) {
+  sort_smaller_slot_span_free_lists_ = new_value;
+}
+
+// static
+void PartitionRoot::SetSortActiveSlotSpansEnabled(bool new_value) {
+  sort_active_slot_spans_ = new_value;
+}
+
+// Explicitly define common template instantiations to reduce compile time.
+#define EXPORT_TEMPLATE \
+  template PA_EXPORT_TEMPLATE_DEFINE(PA_COMPONENT_EXPORT(PARTITION_ALLOC))
+EXPORT_TEMPLATE void* PartitionRoot::Alloc<AllocFlags::kNone>(size_t,
+                                                              const char*);
+EXPORT_TEMPLATE void* PartitionRoot::Alloc<AllocFlags::kReturnNull>(
+    size_t,
+    const char*);
+EXPORT_TEMPLATE void*
+PartitionRoot::Realloc<AllocFlags::kNone, FreeFlags::kNone>(void*,
+                                                            size_t,
+                                                            const char*);
+EXPORT_TEMPLATE void*
+PartitionRoot::Realloc<AllocFlags::kReturnNull, FreeFlags::kNone>(void*,
+                                                                  size_t,
+                                                                  const char*);
+EXPORT_TEMPLATE void* PartitionRoot::AlignedAlloc<AllocFlags::kNone>(size_t,
+                                                                     size_t);
+#undef EXPORT_TEMPLATE
+
+static_assert(offsetof(PartitionRoot, sentinel_bucket) ==
+                  offsetof(PartitionRoot, buckets) +
+                      internal::kNumBuckets * sizeof(PartitionRoot::Bucket),
+              "sentinel_bucket must be just after the regular buckets.");
+
+static_assert(
+    offsetof(PartitionRoot, lock_) >= 64,
+    "The lock should not be on the same cacheline as the read-mostly flags");
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_root.h b/base/allocator/partition_allocator/src/partition_alloc/partition_root.h
new file mode 100644
index 0000000..23a514a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_root.h
@@ -0,0 +1,2461 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ROOT_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ROOT_H_
+
+// DESCRIPTION
+// PartitionRoot::Alloc() and PartitionRoot::Free() are approximately analogous
+// to malloc() and free().
+//
+// The main difference is that a PartitionRoot object must be supplied to these
+// functions, representing a specific "heap partition" that will be used to
+// satisfy the allocation. Different partitions are guaranteed to exist in
+// separate address spaces, including being separate from the main system
+// heap. If the contained objects are all freed, physical memory is returned to
+// the system but the address space remains reserved.  See PartitionAlloc.md for
+// other security properties PartitionAlloc provides.
+//
+// THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
+// PartitionAllocator classes. To minimize the instruction count to the fullest
+// extent possible, the PartitionRoot is really just a header adjacent to other
+// data areas provided by the allocator class.
+//
+// The constraints for PartitionRoot::Alloc() are:
+// - Multi-threaded use against a single partition is ok; locking is handled.
+// - Allocations of any arbitrary size can be handled (subject to a limit of
+//   INT_MAX bytes for security reasons).
+// - Bucketing is by approximate size, for example an allocation of 4000 bytes
+//   might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
+//   keep worst-case waste to ~10%.
+
+#include <algorithm>
+#include <atomic>
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_types.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/allocation_guard.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/chromecast_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/freeslot_bitmap.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/lightweight_quarantine.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_allocation_data.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/export_template.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_bucket.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_bucket_lookup.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_cookie.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_direct_map_extent.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_ref_count.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#endif
+
+namespace partition_alloc::internal {
+
+// We want this size to be big enough that we have time to start up other
+// scripts _before_ we wrap around.
+static constexpr size_t kAllocInfoSize = 1 << 24;
+
+struct AllocInfo {
+  std::atomic<size_t> index{0};
+  struct {
+    uintptr_t addr;
+    size_t size;
+  } allocs[kAllocInfoSize] = {};
+};
+
+#if BUILDFLAG(RECORD_ALLOC_INFO)
+extern AllocInfo g_allocs;
+
+void RecordAllocOrFree(uintptr_t addr, size_t size);
+#endif  // BUILDFLAG(RECORD_ALLOC_INFO)
+}  // namespace partition_alloc::internal
+
+namespace partition_alloc {
+
+namespace internal {
+// Avoid including partition_address_space.h from this .h file, by moving the
+// call to IsManagedByPartitionAllocBRPPool into the .cc file.
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void DCheckIfManagedByPartitionAllocBRPPool(uintptr_t address);
+#else
+PA_ALWAYS_INLINE void DCheckIfManagedByPartitionAllocBRPPool(
+    uintptr_t address) {}
+#endif
+
+#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
+class PartitionRootEnumerator;
+#endif
+
+}  // namespace internal
+
+// Bit flag constants used to purge memory.  See PartitionRoot::PurgeMemory.
+//
+// In order to support bit operations like `flag_a | flag_b`, the old-fashioned
+// enum (+ surrounding named struct) is used instead of enum class.
+struct PurgeFlags {
+  enum : int {
+    // Decommitting the ring list of empty slot spans is reasonably fast.
+    kDecommitEmptySlotSpans = 1 << 0,
+    // Discarding unused system pages is slower, because it involves walking all
+    // freelists in all active slot spans of all buckets >= system page
+    // size. It often frees a similar amount of memory to decommitting the empty
+    // slot spans, though.
+    kDiscardUnusedSystemPages = 1 << 1,
+    // Aggressively reclaim memory. This is meant to be used in low-memory
+    // situations, not for periodic memory reclaiming.
+    kAggressiveReclaim = 1 << 2,
+  };
+};
+
+// Options struct used to configure PartitionRoot and PartitionAllocator.
+struct PartitionOptions {
+  enum class AllowToggle : uint8_t {
+    kDisallowed,
+    kAllowed,
+  };
+  enum class EnableToggle : uint8_t {
+    kDisabled,
+    kEnabled,
+  };
+
+  // Expose the enum arms directly at the level of `PartitionOptions`,
+  // since the variant names are already sufficiently descriptive.
+  static constexpr auto kAllowed = AllowToggle::kAllowed;
+  static constexpr auto kDisallowed = AllowToggle::kDisallowed;
+  static constexpr auto kDisabled = EnableToggle::kDisabled;
+  static constexpr auto kEnabled = EnableToggle::kEnabled;
+
+  // By default all allocations will be aligned to `kAlignment`,
+  // likely to be 8B or 16B depending on platforms and toolchains.
+  // AlignedAlloc() allows to enforce higher alignment.
+  // This option determines whether it is supported for the partition.
+  // Allowing AlignedAlloc() comes at a cost of disallowing extras in front
+  // of the allocation.
+  AllowToggle aligned_alloc = kDisallowed;
+
+  EnableToggle thread_cache = kDisabled;
+  AllowToggle star_scan_quarantine = kDisallowed;
+  EnableToggle backup_ref_ptr = kDisabled;
+  AllowToggle use_configurable_pool = kDisallowed;
+
+  size_t ref_count_size = 0;
+
+  size_t scheduler_loop_quarantine_capacity_in_bytes = 0;
+
+  struct {
+    EnableToggle enabled = kDisabled;
+    TagViolationReportingMode reporting_mode =
+        TagViolationReportingMode::kUndefined;
+  } memory_tagging;
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  ThreadIsolationOption thread_isolation;
+#endif
+};
+
+// When/if free lists should be "straightened" when calling
+// PartitionRoot::PurgeMemory(..., accounting_only=false).
+enum class StraightenLargerSlotSpanFreeListsMode {
+  kNever,
+  kOnlyWhenUnprovisioning,
+  kAlways,
+};
+
+// Never instantiate a PartitionRoot directly, instead use
+// PartitionAllocator.
+struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
+  using SlotSpan = internal::SlotSpanMetadata;
+  using Page = internal::PartitionPage;
+  using Bucket = internal::PartitionBucket;
+  using FreeListEntry = internal::EncodedNextFreelistEntry;
+  using SuperPageExtentEntry = internal::PartitionSuperPageExtentEntry;
+  using DirectMapExtent = internal::PartitionDirectMapExtent;
+#if BUILDFLAG(USE_STARSCAN)
+  using PCScan = internal::PCScan;
+#endif
+
+  enum class QuarantineMode : uint8_t {
+    kAlwaysDisabled,
+    kDisabledByDefault,
+    kEnabled,
+  };
+
+  enum class ScanMode : uint8_t {
+    kDisabled,
+    kEnabled,
+  };
+
+  enum class BucketDistribution : uint8_t { kNeutral, kDenser };
+
+  // Root settings accessed on fast paths.
+  //
+  // Careful! PartitionAlloc's performance is sensitive to its layout.  Please
+  // put the fast-path objects in the struct below.
+  struct alignas(internal::kPartitionCachelineSize) Settings {
+    // Chromium-style: Complex constructor needs an explicit out-of-line
+    // constructor.
+    Settings();
+
+    // Defines whether objects should be quarantined for this root.
+    QuarantineMode quarantine_mode = QuarantineMode::kAlwaysDisabled;
+
+    // Defines whether the root should be scanned.
+    ScanMode scan_mode = ScanMode::kDisabled;
+
+    // It's important to default to the 'neutral' distribution, otherwise a
+    // switch from 'dense' -> 'neutral' would leave some buckets with dirty
+    // memory forever, since no memory would be allocated from these, their
+    // freelist would typically not be empty, making these unreclaimable.
+    BucketDistribution bucket_distribution = BucketDistribution::kNeutral;
+
+    bool with_thread_cache = false;
+
+    bool allow_aligned_alloc = false;
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    bool use_cookie = false;
+#else
+    static constexpr bool use_cookie = false;
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    bool brp_enabled_ = false;
+#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+    bool mac11_malloc_size_hack_enabled_ = false;
+    size_t mac11_malloc_size_hack_usable_size_ = 0;
+#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    bool use_configurable_pool = false;
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+    bool memory_tagging_enabled_ = false;
+    TagViolationReportingMode memory_tagging_reporting_mode_ =
+        TagViolationReportingMode::kUndefined;
+#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
+    size_t ref_count_size = 0;
+#endif  // PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+    ThreadIsolationOption thread_isolation;
+#endif
+
+#if PA_CONFIG(EXTRAS_REQUIRED)
+    uint32_t extras_size = 0;
+    uint32_t extras_offset = 0;
+#else
+    // Teach the compiler that code can be optimized in builds that use no
+    // extras.
+    static inline constexpr uint32_t extras_size = 0;
+    static inline constexpr uint32_t extras_offset = 0;
+#endif  // PA_CONFIG(EXTRAS_REQUIRED)
+  };
+
+  Settings settings;
+
+  // Not used on the fastest path (thread cache allocations), but on the fast
+  // path of the central allocator.
+  alignas(internal::kPartitionCachelineSize) internal::Lock lock_;
+
+  Bucket buckets[internal::kNumBuckets] = {};
+  Bucket sentinel_bucket{};
+
+  // All fields below this comment are not accessed on the fast path.
+  bool initialized = false;
+
+  // Bookkeeping.
+  // - total_size_of_super_pages - total virtual address space for normal bucket
+  //     super pages
+  // - total_size_of_direct_mapped_pages - total virtual address space for
+  //     direct-map regions
+  // - total_size_of_committed_pages - total committed pages for slots (doesn't
+  //     include metadata, bitmaps (if any), or any data outside or regions
+  //     described in #1 and #2)
+  // Invariant: total_size_of_allocated_bytes <=
+  //            total_size_of_committed_pages <
+  //                total_size_of_super_pages +
+  //                total_size_of_direct_mapped_pages.
+  // Invariant: total_size_of_committed_pages <= max_size_of_committed_pages.
+  // Invariant: total_size_of_allocated_bytes <= max_size_of_allocated_bytes.
+  // Invariant: max_size_of_allocated_bytes <= max_size_of_committed_pages.
+  // Since all operations on the atomic variables have relaxed semantics, we
+  // don't check these invariants with DCHECKs.
+  std::atomic<size_t> total_size_of_committed_pages{0};
+  std::atomic<size_t> max_size_of_committed_pages{0};
+  std::atomic<size_t> total_size_of_super_pages{0};
+  std::atomic<size_t> total_size_of_direct_mapped_pages{0};
+  size_t total_size_of_allocated_bytes
+      PA_GUARDED_BY(internal::PartitionRootLock(this)) = 0;
+  size_t max_size_of_allocated_bytes
+      PA_GUARDED_BY(internal::PartitionRootLock(this)) = 0;
+  // Atomic, because system calls can be made without the lock held.
+  std::atomic<uint64_t> syscall_count{};
+  std::atomic<uint64_t> syscall_total_time_ns{};
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  std::atomic<size_t> total_size_of_brp_quarantined_bytes{0};
+  std::atomic<size_t> total_count_of_brp_quarantined_slots{0};
+  std::atomic<size_t> cumulative_size_of_brp_quarantined_bytes{0};
+  std::atomic<size_t> cumulative_count_of_brp_quarantined_slots{0};
+#endif
+  // Slot span memory which has been provisioned, and is currently unused as
+  // it's part of an empty SlotSpan. This is not clean memory, since it has
+  // either been used for a memory allocation, and/or contains freelist
+  // entries. But it might have been moved to swap. Note that all this memory
+  // can be decommitted at any time.
+  size_t empty_slot_spans_dirty_bytes
+      PA_GUARDED_BY(internal::PartitionRootLock(this)) = 0;
+
+  // Only tolerate up to |total_size_of_committed_pages >>
+  // max_empty_slot_spans_dirty_bytes_shift| dirty bytes in empty slot
+  // spans. That is, the default value of 3 tolerates up to 1/8. Since
+  // |empty_slot_spans_dirty_bytes| is never strictly larger than
+  // total_size_of_committed_pages, setting this to 0 removes the cap. This is
+  // useful to make tests deterministic and easier to reason about.
+  int max_empty_slot_spans_dirty_bytes_shift = 3;
+
+  uintptr_t next_super_page = 0;
+  uintptr_t next_partition_page = 0;
+  uintptr_t next_partition_page_end = 0;
+  SuperPageExtentEntry* current_extent = nullptr;
+  SuperPageExtentEntry* first_extent = nullptr;
+  DirectMapExtent* direct_map_list
+      PA_GUARDED_BY(internal::PartitionRootLock(this)) = nullptr;
+  SlotSpan*
+      global_empty_slot_span_ring[internal::kMaxFreeableSpans] PA_GUARDED_BY(
+          internal::PartitionRootLock(this)) = {};
+  int16_t global_empty_slot_span_ring_index
+      PA_GUARDED_BY(internal::PartitionRootLock(this)) = 0;
+  int16_t global_empty_slot_span_ring_size
+      PA_GUARDED_BY(internal::PartitionRootLock(this)) =
+          internal::kDefaultEmptySlotSpanRingSize;
+
+  // Integrity check = ~reinterpret_cast<uintptr_t>(this).
+  uintptr_t inverted_self = 0;
+  std::atomic<int> thread_caches_being_constructed_{0};
+
+  bool quarantine_always_for_testing = false;
+
+  // NoDestructor because we don't need to dequarantine objects as the root
+  // associated with it is dying anyway.
+  internal::base::NoDestructor<internal::SchedulerLoopQuarantine>
+      scheduler_loop_quarantine;
+
+  PartitionRoot();
+  explicit PartitionRoot(PartitionOptions opts);
+
+  // TODO(tasak): remove ~PartitionRoot() after confirming all tests
+  // don't need ~PartitionRoot().
+  ~PartitionRoot();
+
+  // This will unreserve any space in the pool that the PartitionRoot is
+  // using. This is needed because many tests create and destroy many
+  // PartitionRoots over the lifetime of a process, which can exhaust the
+  // pool and cause tests to fail.
+  void DestructForTesting();
+
+#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+  void EnableMac11MallocSizeHackIfNeeded(size_t ref_count_size);
+  void EnableMac11MallocSizeHackForTesting(size_t ref_count_size);
+  void InitMac11MallocSizeHackUsableSize(size_t ref_count_size);
+#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+
+  // Public API
+  //
+  // Allocates out of the given bucket. Properly, this function should probably
+  // be in PartitionBucket, but because the implementation needs to be inlined
+  // for performance, and because it needs to inspect SlotSpanMetadata,
+  // it becomes impossible to have it in PartitionBucket as this causes a
+  // cyclical dependency on SlotSpanMetadata function implementations.
+  //
+  // Moving it a layer lower couples PartitionRoot and PartitionBucket, but
+  // preserves the layering of the includes.
+  void Init(PartitionOptions);
+
+  void EnableThreadCacheIfSupported();
+
+  PA_ALWAYS_INLINE static PartitionRoot* FromSlotSpan(SlotSpan* slot_span);
+  // These two functions work unconditionally for normal buckets.
+  // For direct map, they only work for the first super page of a reservation,
+  // (see partition_alloc_constants.h for the direct map allocation layout).
+  // In particular, the functions always work for a pointer to the start of a
+  // reservation.
+  PA_ALWAYS_INLINE static PartitionRoot* FromFirstSuperPage(
+      uintptr_t super_page);
+  PA_ALWAYS_INLINE static PartitionRoot* FromAddrInFirstSuperpage(
+      uintptr_t address);
+
+  PA_ALWAYS_INLINE void DecreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
+                                                          size_t len)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
+  PA_ALWAYS_INLINE void IncreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
+                                                          size_t len,
+                                                          size_t raw_size)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
+  PA_ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
+  PA_ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
+  PA_ALWAYS_INLINE void DecommitSystemPagesForData(
+      uintptr_t address,
+      size_t length,
+      PageAccessibilityDisposition accessibility_disposition)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
+  PA_ALWAYS_INLINE void RecommitSystemPagesForData(
+      uintptr_t address,
+      size_t length,
+      PageAccessibilityDisposition accessibility_disposition,
+      bool request_tagging)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
+  PA_ALWAYS_INLINE bool TryRecommitSystemPagesForData(
+      uintptr_t address,
+      size_t length,
+      PageAccessibilityDisposition accessibility_disposition,
+      bool request_tagging)
+      PA_LOCKS_EXCLUDED(internal::PartitionRootLock(this));
+
+  [[noreturn]] PA_NOINLINE void OutOfMemory(size_t size);
+
+  // Returns a pointer aligned on |alignment|, or nullptr.
+  //
+  // |alignment| has to be a power of two and a multiple of sizeof(void*) (as in
+  // posix_memalign() for POSIX systems). The returned pointer may include
+  // padding, and can be passed to |Free()| later.
+  //
+  // NOTE: This is incompatible with anything that adds extras before the
+  // returned pointer, such as ref-count.
+  template <AllocFlags flags = AllocFlags::kNone>
+  PA_NOINLINE void* AlignedAlloc(size_t alignment, size_t requested_size) {
+    return AlignedAllocInline<flags>(alignment, requested_size);
+  }
+  template <AllocFlags flags = AllocFlags::kNone>
+  PA_ALWAYS_INLINE void* AlignedAllocInline(size_t alignment,
+                                            size_t requested_size);
+
+  // PartitionAlloc supports multiple partitions, and hence multiple callers to
+  // these functions. Setting PA_ALWAYS_INLINE bloats code, and can be
+  // detrimental to performance, for instance if multiple callers are hot (by
+  // increasing cache footprint). Set PA_NOINLINE on the "basic" top-level
+  // functions to mitigate that for "vanilla" callers.
+  //
+  // |type_name == nullptr|: ONLY FOR TESTS except internal uses.
+  // You should provide |type_name| to make debugging easier.
+  template <AllocFlags flags = AllocFlags::kNone>
+  PA_NOINLINE PA_MALLOC_FN PA_MALLOC_ALIGNED void* Alloc(
+      size_t requested_size,
+      const char* type_name = nullptr) {
+    return AllocInline<flags>(requested_size, type_name);
+  }
+  template <AllocFlags flags = AllocFlags::kNone>
+  PA_ALWAYS_INLINE PA_MALLOC_FN PA_MALLOC_ALIGNED void* AllocInline(
+      size_t requested_size,
+      const char* type_name = nullptr) {
+    return AllocInternal<flags>(requested_size, internal::PartitionPageSize(),
+                                type_name);
+  }
+
+  // AllocInternal exposed for testing.
+  template <AllocFlags flags = AllocFlags::kNone>
+  PA_NOINLINE PA_MALLOC_FN PA_MALLOC_ALIGNED void* AllocInternalForTesting(
+      size_t requested_size,
+      size_t slot_span_alignment,
+      const char* type_name) {
+    return AllocInternal<flags>(requested_size, slot_span_alignment, type_name);
+  }
+
+  template <AllocFlags alloc_flags = AllocFlags::kNone,
+            FreeFlags free_flags = FreeFlags::kNone>
+  PA_NOINLINE PA_MALLOC_ALIGNED void* Realloc(void* ptr,
+                                              size_t new_size,
+                                              const char* type_name) {
+    return ReallocInline<alloc_flags, free_flags>(ptr, new_size, type_name);
+  }
+  template <AllocFlags alloc_flags = AllocFlags::kNone,
+            FreeFlags free_flags = FreeFlags::kNone>
+  PA_ALWAYS_INLINE PA_MALLOC_ALIGNED void* ReallocInline(void* ptr,
+                                                         size_t new_size,
+                                                         const char* type_name);
+
+  template <FreeFlags flags = FreeFlags::kNone>
+  PA_NOINLINE void Free(void* object) {
+    FreeInline<flags>(object);
+  }
+  template <FreeFlags flags = FreeFlags::kNone>
+  PA_ALWAYS_INLINE void FreeInline(void* object);
+
+  template <FreeFlags flags = FreeFlags::kNone>
+  PA_NOINLINE static void FreeInUnknownRoot(void* object) {
+    FreeInlineInUnknownRoot<flags>(object);
+  }
+  template <FreeFlags flags = FreeFlags::kNone>
+  PA_ALWAYS_INLINE static void FreeInlineInUnknownRoot(void* object);
+
+  // Immediately frees the pointer bypassing the quarantine. |slot_start| is the
+  // beginning of the slot that contains |object|.
+  PA_ALWAYS_INLINE void FreeNoHooksImmediate(void* object,
+                                             SlotSpan* slot_span,
+                                             uintptr_t slot_start);
+
+  PA_ALWAYS_INLINE size_t GetSlotUsableSize(SlotSpan* slot_span) {
+    return AdjustSizeForExtrasSubtract(slot_span->GetUtilizedSlotSize());
+  }
+
+  PA_ALWAYS_INLINE static size_t GetUsableSize(void* ptr);
+
+  // Same as GetUsableSize() except it adjusts the return value for macOS 11
+  // malloc_size() hack.
+  PA_ALWAYS_INLINE static size_t GetUsableSizeWithMac11MallocSizeHack(
+      void* ptr);
+
+  PA_ALWAYS_INLINE PageAccessibilityConfiguration
+  GetPageAccessibility(bool request_tagging) const;
+  PA_ALWAYS_INLINE PageAccessibilityConfiguration
+      PageAccessibilityWithThreadIsolationIfEnabled(
+          PageAccessibilityConfiguration::Permissions) const;
+
+  PA_ALWAYS_INLINE size_t
+  AllocationCapacityFromSlotStart(uintptr_t slot_start) const;
+  PA_ALWAYS_INLINE size_t
+  AllocationCapacityFromRequestedSize(size_t size) const;
+
+  PA_ALWAYS_INLINE bool IsMemoryTaggingEnabled() const;
+  PA_ALWAYS_INLINE TagViolationReportingMode
+  memory_tagging_reporting_mode() const;
+
+  // Frees memory from this partition, if possible, by decommitting pages or
+  // even entire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
+  void PurgeMemory(int flags);
+
+  // Reduces the size of the empty slot spans ring, until the dirty size is <=
+  // |limit|.
+  void ShrinkEmptySlotSpansRing(size_t limit)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
+  // The empty slot span ring starts "small", can be enlarged later. This
+  // improves performance by performing fewer system calls, at the cost of more
+  // memory usage.
+  void EnableLargeEmptySlotSpanRing() {
+    ::partition_alloc::internal::ScopedGuard locker{
+        internal::PartitionRootLock(this)};
+    global_empty_slot_span_ring_size = internal::kMaxFreeableSpans;
+  }
+
+  void DumpStats(const char* partition_name,
+                 bool is_light_dump,
+                 PartitionStatsDumper* partition_stats_dumper);
+
+  static void DeleteForTesting(PartitionRoot* partition_root);
+  void ResetForTesting(bool allow_leaks);
+  void ResetBookkeepingForTesting();
+
+  PA_ALWAYS_INLINE BucketDistribution GetBucketDistribution() const {
+    return settings.bucket_distribution;
+  }
+
+  static uint16_t SizeToBucketIndex(size_t size,
+                                    BucketDistribution bucket_distribution);
+
+  PA_ALWAYS_INLINE void FreeInSlotSpan(uintptr_t slot_start,
+                                       SlotSpan* slot_span)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
+
+  // Frees memory, with |slot_start| as returned by |RawAlloc()|.
+  PA_ALWAYS_INLINE void RawFree(uintptr_t slot_start);
+  PA_ALWAYS_INLINE void RawFree(uintptr_t slot_start, SlotSpan* slot_span)
+      PA_LOCKS_EXCLUDED(internal::PartitionRootLock(this));
+
+  PA_ALWAYS_INLINE void RawFreeBatch(FreeListEntry* head,
+                                     FreeListEntry* tail,
+                                     size_t size,
+                                     SlotSpan* slot_span)
+      PA_LOCKS_EXCLUDED(internal::PartitionRootLock(this));
+
+  PA_ALWAYS_INLINE void RawFreeWithThreadCache(uintptr_t slot_start,
+                                               SlotSpan* slot_span);
+
+  // This is safe to do because we are switching to a bucket distribution with
+  // more buckets, meaning any allocations we have done before the switch are
+  // guaranteed to have a bucket under the new distribution when they are
+  // eventually deallocated. We do not need synchronization here.
+  void SwitchToDenserBucketDistribution() {
+    settings.bucket_distribution = BucketDistribution::kDenser;
+  }
+  // Switching back to the less dense bucket distribution is ok during tests.
+  // At worst, we end up with deallocations that are sent to a bucket that we
+  // cannot allocate from, which will not cause problems besides wasting
+  // memory.
+  void ResetBucketDistributionForTesting() {
+    settings.bucket_distribution = BucketDistribution::kNeutral;
+  }
+
+  ThreadCache* thread_cache_for_testing() const {
+    return settings.with_thread_cache ? ThreadCache::Get() : nullptr;
+  }
+  size_t get_total_size_of_committed_pages() const {
+    return total_size_of_committed_pages.load(std::memory_order_relaxed);
+  }
+  size_t get_max_size_of_committed_pages() const {
+    return max_size_of_committed_pages.load(std::memory_order_relaxed);
+  }
+
+  size_t get_total_size_of_allocated_bytes() const {
+    // Since this is only used for bookkeeping, we don't care if the value is
+    // stale, so no need to get a lock here.
+    return PA_TS_UNCHECKED_READ(total_size_of_allocated_bytes);
+  }
+
+  size_t get_max_size_of_allocated_bytes() const {
+    // Since this is only used for bookkeeping, we don't care if the value is
+    // stale, so no need to get a lock here.
+    return PA_TS_UNCHECKED_READ(max_size_of_allocated_bytes);
+  }
+
+  internal::pool_handle ChoosePool() const {
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+    if (settings.use_configurable_pool) {
+      PA_DCHECK(IsConfigurablePoolAvailable());
+      return internal::kConfigurablePoolHandle;
+    }
+#endif
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+    if (settings.thread_isolation.enabled) {
+      return internal::kThreadIsolatedPoolHandle;
+    }
+#endif
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    return brp_enabled() ? internal::kBRPPoolHandle
+                         : internal::kRegularPoolHandle;
+#else
+    return internal::kRegularPoolHandle;
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  }
+
+  PA_ALWAYS_INLINE bool IsQuarantineAllowed() const {
+    return settings.quarantine_mode != QuarantineMode::kAlwaysDisabled;
+  }
+
+  PA_ALWAYS_INLINE bool IsQuarantineEnabled() const {
+    return settings.quarantine_mode == QuarantineMode::kEnabled;
+  }
+
+  PA_ALWAYS_INLINE bool ShouldQuarantine(void* object) const {
+    if (PA_UNLIKELY(settings.quarantine_mode != QuarantineMode::kEnabled)) {
+      return false;
+    }
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+    if (PA_UNLIKELY(quarantine_always_for_testing)) {
+      return true;
+    }
+    // If quarantine is enabled and the tag overflows, move the containing slot
+    // to quarantine, to prevent the attacker from exploiting a pointer that has
+    // an old tag.
+    if (PA_LIKELY(IsMemoryTaggingEnabled())) {
+      return internal::HasOverflowTag(object);
+    }
+    // Default behaviour if MTE is not enabled for this PartitionRoot.
+    return true;
+#else
+    return true;
+#endif
+  }
+
+  PA_ALWAYS_INLINE void SetQuarantineAlwaysForTesting(bool value) {
+    quarantine_always_for_testing = value;
+  }
+
+  PA_ALWAYS_INLINE bool IsScanEnabled() const {
+    // Enabled scan implies enabled quarantine.
+    PA_DCHECK(settings.scan_mode != ScanMode::kEnabled ||
+              IsQuarantineEnabled());
+    return settings.scan_mode == ScanMode::kEnabled;
+  }
+
+  PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+  GetDirectMapMetadataAndGuardPagesSize() {
+    // Because we need to fake a direct-map region to look like a super page, we
+    // need to allocate more pages around the payload:
+    // - The first partition page is a combination of metadata and guard region.
+    // - We also add a trailing guard page. In most cases, a system page would
+    //   suffice. But on 32-bit systems when BRP is on, we need a partition page
+    //   to match granularity of the BRP pool bitmap. For cosistency, we'll use
+    //   a partition page everywhere, which is cheap as it's uncommitted address
+    //   space anyway.
+    return 2 * internal::PartitionPageSize();
+  }
+
+  PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
+  GetDirectMapSlotSize(size_t raw_size) {
+    // Caller must check that the size is not above the MaxDirectMapped()
+    // limit before calling. This also guards against integer overflow in the
+    // calculation here.
+    PA_DCHECK(raw_size <= internal::MaxDirectMapped());
+    return partition_alloc::internal::base::bits::AlignUp(
+        raw_size, internal::SystemPageSize());
+  }
+
+  PA_ALWAYS_INLINE static size_t GetDirectMapReservationSize(
+      size_t padded_raw_size) {
+    // Caller must check that the size is not above the MaxDirectMapped()
+    // limit before calling. This also guards against integer overflow in the
+    // calculation here.
+    PA_DCHECK(padded_raw_size <= internal::MaxDirectMapped());
+    return partition_alloc::internal::base::bits::AlignUp(
+        padded_raw_size + GetDirectMapMetadataAndGuardPagesSize(),
+        internal::DirectMapAllocationGranularity());
+  }
+
+  PA_ALWAYS_INLINE size_t AdjustSize0IfNeeded(size_t size) const {
+    // There are known cases where allowing size 0 would lead to problems:
+    // 1. If extras are present only before allocation (e.g. BRP ref-count), the
+    //    extras will fill the entire kAlignment-sized slot, leading to
+    //    returning a pointer to the next slot. Realloc() calls
+    //    SlotSpanMetadata::FromObject() prior to subtracting extras, thus
+    //    potentially getting a wrong slot span.
+    // 2. If we put BRP ref-count in the previous slot, that slot may be free.
+    //    In this case, the slot needs to fit both, a free-list entry and a
+    //    ref-count. If sizeof(PartitionRefCount) is 8, it fills the entire
+    //    smallest slot on 32-bit systems (kSmallestBucket is 8), thus not
+    //    leaving space for the free-list entry.
+    // 3. On macOS and iOS, PartitionGetSizeEstimate() is used for two purposes:
+    //    as a zone dispatcher and as an underlying implementation of
+    //    malloc_size(3). As a zone dispatcher, zero has a special meaning of
+    //    "doesn't belong to this zone". When extras fill out the entire slot,
+    //    the usable size is 0, thus confusing the zone dispatcher.
+    //
+    // To save ourselves a branch on this hot path, we could eliminate this
+    // check at compile time for cases not listed above. The #if statement would
+    // be rather complex. Then there is also the fear of the unknown. The
+    // existing cases were discovered through obscure, painful-to-debug crashes.
+    // Better save ourselves trouble with not-yet-discovered cases.
+    if (PA_UNLIKELY(size == 0)) {
+      return 1;
+    }
+    return size;
+  }
+
+  // Adjusts the size by adding extras. Also include the 0->1 adjustment if
+  // needed.
+  PA_ALWAYS_INLINE size_t AdjustSizeForExtrasAdd(size_t size) const {
+    size = AdjustSize0IfNeeded(size);
+    PA_DCHECK(size + settings.extras_size >= size);
+    return size + settings.extras_size;
+  }
+
+  // Adjusts the size by subtracing extras. Doesn't include the 0->1 adjustment,
+  // which leads to an asymmetry with AdjustSizeForExtrasAdd, but callers of
+  // AdjustSizeForExtrasSubtract either expect the adjustment to be included, or
+  // are indifferent.
+  PA_ALWAYS_INLINE size_t AdjustSizeForExtrasSubtract(size_t size) const {
+    return size - settings.extras_size;
+  }
+
+  PA_ALWAYS_INLINE uintptr_t SlotStartToObjectAddr(uintptr_t slot_start) const {
+    // TODO(bartekn): Check that |slot_start| is indeed a slot start.
+    return slot_start + settings.extras_offset;
+  }
+
+  PA_ALWAYS_INLINE void* SlotStartToObject(uintptr_t slot_start) const {
+    // TODO(bartekn): Check that |slot_start| is indeed a slot start.
+    return internal::TagAddr(SlotStartToObjectAddr(slot_start));
+  }
+
+  PA_ALWAYS_INLINE void* TaggedSlotStartToObject(
+      void* tagged_slot_start) const {
+    // TODO(bartekn): Check that |tagged_slot_start| is indeed a slot start.
+    return reinterpret_cast<void*>(
+        SlotStartToObjectAddr(reinterpret_cast<uintptr_t>(tagged_slot_start)));
+  }
+
+  PA_ALWAYS_INLINE uintptr_t ObjectToSlotStart(void* object) const {
+    return UntagPtr(object) - settings.extras_offset;
+    // TODO(bartekn): Check that the result is indeed a slot start.
+  }
+
+  PA_ALWAYS_INLINE uintptr_t ObjectToTaggedSlotStart(void* object) const {
+    return reinterpret_cast<uintptr_t>(object) - settings.extras_offset;
+    // TODO(bartekn): Check that the result is indeed a slot start.
+  }
+
+  bool brp_enabled() const {
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    return settings.brp_enabled_;
+#else
+    return false;
+#endif
+  }
+
+  PA_ALWAYS_INLINE bool uses_configurable_pool() const {
+    return settings.use_configurable_pool;
+  }
+
+  // To make tests deterministic, it is necessary to uncap the amount of memory
+  // waste incurred by empty slot spans. Otherwise, the size of various
+  // freelists, and committed memory becomes harder to reason about (and
+  // brittle) with a single thread, and non-deterministic with several.
+  void UncapEmptySlotSpanMemoryForTesting() {
+    max_empty_slot_spans_dirty_bytes_shift = 0;
+  }
+
+  // Enables/disables the free list straightening for larger slot spans in
+  // PurgeMemory().
+  static void SetStraightenLargerSlotSpanFreeListsMode(
+      StraightenLargerSlotSpanFreeListsMode new_value);
+  // Enables/disables the free list sorting for smaller slot spans in
+  // PurgeMemory().
+  static void SetSortSmallerSlotSpanFreeListsEnabled(bool new_value);
+  // Enables/disables the sorting of active slot spans in PurgeMemory().
+  static void SetSortActiveSlotSpansEnabled(bool new_value);
+
+  static StraightenLargerSlotSpanFreeListsMode
+  GetStraightenLargerSlotSpanFreeListsMode() {
+    return straighten_larger_slot_span_free_lists_;
+  }
+
+  internal::SchedulerLoopQuarantine& GetSchedulerLoopQuarantineForTesting() {
+    // TODO(crbug.com/1462223): Implement thread-local version and return it
+    // here.
+    return *scheduler_loop_quarantine;
+  }
+
+ private:
+  static inline StraightenLargerSlotSpanFreeListsMode
+      straighten_larger_slot_span_free_lists_ =
+          StraightenLargerSlotSpanFreeListsMode::kOnlyWhenUnprovisioning;
+  static inline bool sort_smaller_slot_span_free_lists_ = true;
+  static inline bool sort_active_slot_spans_ = false;
+
+  // Common path of Free() and FreeInUnknownRoot(). Returns
+  // true if the caller should return immediately.
+  template <FreeFlags flags>
+  PA_ALWAYS_INLINE static bool FreeProlog(void* object,
+                                          const PartitionRoot* root);
+
+  // |buckets| has `kNumBuckets` elements, but we sometimes access it at index
+  // `kNumBuckets`, which is occupied by the sentinel bucket. The correct layout
+  // is enforced by a static_assert() in partition_root.cc, so this is
+  // fine. However, UBSAN is correctly pointing out that there is an
+  // out-of-bounds access, so disable it for these accesses.
+  //
+  // See crbug.com/1150772 for an instance of Clusterfuzz / UBSAN detecting
+  // this.
+  PA_ALWAYS_INLINE const Bucket& PA_NO_SANITIZE("undefined")
+      bucket_at(size_t i) const {
+    PA_DCHECK(i <= internal::kNumBuckets);
+    return buckets[i];
+  }
+
+  // Returns whether a |bucket| from |this| root is direct-mapped. This function
+  // does not touch |bucket|, contrary to  PartitionBucket::is_direct_mapped().
+  //
+  // This is meant to be used in hot paths, and particularly *before* going into
+  // the thread cache fast path. Indeed, real-world profiles show that accessing
+  // an allocation's bucket is responsible for a sizable fraction of *total*
+  // deallocation time. This can be understood because
+  // - All deallocations have to access the bucket to know whether it is
+  //   direct-mapped. If not (vast majority of allocations), it can go through
+  //   the fast path, i.e. thread cache.
+  // - The bucket is relatively frequently written to, by *all* threads
+  //   (e.g. every time a slot span becomes full or empty), so accessing it will
+  //   result in some amount of cacheline ping-pong.
+  PA_ALWAYS_INLINE bool IsDirectMappedBucket(Bucket* bucket) const {
+    // All regular allocations are associated with a bucket in the |buckets_|
+    // array. A range check is then sufficient to identify direct-mapped
+    // allocations.
+    bool ret = !(bucket >= this->buckets && bucket <= &this->sentinel_bucket);
+    PA_DCHECK(ret == bucket->is_direct_mapped());
+    return ret;
+  }
+
+  // Same as |Alloc()|, but allows specifying |slot_span_alignment|. It
+  // has to be a multiple of partition page size, greater than 0 and no greater
+  // than kMaxSupportedAlignment. If it equals exactly 1 partition page, no
+  // special action is taken as PartitionAlloc naturally guarantees this
+  // alignment, otherwise a sub-optimal allocation strategy is used to
+  // guarantee the higher-order alignment.
+  template <AllocFlags flags>
+  PA_ALWAYS_INLINE PA_MALLOC_FN PA_MALLOC_ALIGNED void* AllocInternal(
+      size_t requested_size,
+      size_t slot_span_alignment,
+      const char* type_name);
+
+  // Same as |AllocInternal()|, but don't handle allocation hooks.
+  template <AllocFlags flags = AllocFlags::kNone>
+  PA_ALWAYS_INLINE PA_MALLOC_FN PA_MALLOC_ALIGNED void* AllocInternalNoHooks(
+      size_t requested_size,
+      size_t slot_span_alignment);
+  // Allocates a memory slot, without initializing extras.
+  //
+  // - |flags| are as in Alloc().
+  // - |raw_size| accommodates for extras on top of Alloc()'s
+  //   |requested_size|.
+  // - |usable_size| and |is_already_zeroed| are output only. |usable_size| is
+  //   guaranteed to be larger or equal to Alloc()'s |requested_size|.
+  template <AllocFlags flags>
+  PA_ALWAYS_INLINE uintptr_t RawAlloc(Bucket* bucket,
+                                      size_t raw_size,
+                                      size_t slot_span_alignment,
+                                      size_t* usable_size,
+                                      bool* is_already_zeroed);
+  template <AllocFlags flags>
+  PA_ALWAYS_INLINE uintptr_t AllocFromBucket(Bucket* bucket,
+                                             size_t raw_size,
+                                             size_t slot_span_alignment,
+                                             size_t* usable_size,
+                                             bool* is_already_zeroed)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
+
+  // We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
+  // size as other alloc code.
+  template <AllocFlags flags>
+  PA_ALWAYS_INLINE static bool AllocWithMemoryToolProlog(size_t size) {
+    if (size > partition_alloc::internal::MaxDirectMapped()) {
+      if constexpr (ContainsFlags(flags, AllocFlags::kReturnNull)) {
+        // Early return indicating not to proceed with allocation
+        return false;
+      }
+      PA_CHECK(false);
+    }
+    return true;  // Allocation should proceed
+  }
+
+  bool TryReallocInPlaceForNormalBuckets(void* object,
+                                         SlotSpan* slot_span,
+                                         size_t new_size);
+  bool TryReallocInPlaceForDirectMap(internal::SlotSpanMetadata* slot_span,
+                                     size_t requested_size)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
+  void DecommitEmptySlotSpans()
+      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
+  PA_ALWAYS_INLINE void RawFreeLocked(uintptr_t slot_start)
+      PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
+  ThreadCache* MaybeInitThreadCache();
+
+  // May return an invalid thread cache.
+  PA_ALWAYS_INLINE ThreadCache* GetOrCreateThreadCache();
+  PA_ALWAYS_INLINE ThreadCache* GetThreadCache();
+
+  PA_ALWAYS_INLINE internal::SchedulerLoopQuarantine&
+  GetSchedulerLoopQuarantine();
+
+  PA_ALWAYS_INLINE AllocationNotificationData
+  CreateAllocationNotificationData(void* object,
+                                   size_t size,
+                                   const char* type_name) const;
+  PA_ALWAYS_INLINE static FreeNotificationData
+  CreateDefaultFreeNotificationData(void* address);
+  PA_ALWAYS_INLINE FreeNotificationData
+  CreateFreeNotificationData(void* address) const;
+
+#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
+  static internal::Lock& GetEnumeratorLock();
+
+  PartitionRoot* PA_GUARDED_BY(GetEnumeratorLock()) next_root = nullptr;
+  PartitionRoot* PA_GUARDED_BY(GetEnumeratorLock()) prev_root = nullptr;
+
+  friend class internal::PartitionRootEnumerator;
+#endif  // PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
+
+  friend class ThreadCache;
+};
+
+namespace internal {
+
+PA_ALWAYS_INLINE ::partition_alloc::internal::Lock& PartitionRootLock(
+    PartitionRoot* root) {
+  return root->lock_;
+}
+
+class ScopedSyscallTimer {
+ public:
+#if PA_CONFIG(COUNT_SYSCALL_TIME)
+  explicit ScopedSyscallTimer(PartitionRoot* root)
+      : root_(root), tick_(base::TimeTicks::Now()) {}
+
+  ~ScopedSyscallTimer() {
+    root_->syscall_count.fetch_add(1, std::memory_order_relaxed);
+
+    int64_t elapsed_nanos = (base::TimeTicks::Now() - tick_).InNanoseconds();
+    if (elapsed_nanos > 0) {
+      root_->syscall_total_time_ns.fetch_add(
+          static_cast<uint64_t>(elapsed_nanos), std::memory_order_relaxed);
+    }
+  }
+
+ private:
+  PartitionRoot* root_;
+  const base::TimeTicks tick_;
+#else
+  explicit ScopedSyscallTimer(PartitionRoot* root) {
+    root->syscall_count.fetch_add(1, std::memory_order_relaxed);
+  }
+#endif
+};
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+PA_ALWAYS_INLINE uintptr_t
+PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) {
+  PA_DCHECK(IsManagedByPartitionAllocBRPPool(address));
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  // Use this variant of GetDirectMapReservationStart as it has better
+  // performance.
+  uintptr_t offset = OffsetInBRPPool(address);
+  uintptr_t reservation_start =
+      GetDirectMapReservationStart(address, kBRPPoolHandle, offset);
+#else  // BUILDFLAG(HAS_64_BIT_POINTERS)
+  uintptr_t reservation_start = GetDirectMapReservationStart(address);
+#endif
+  if (!reservation_start) {
+    return 0;
+  }
+
+  // The direct map allocation may not start exactly from the first page, as
+  // there may be padding for alignment. The first page metadata holds an offset
+  // to where direct map metadata, and thus direct map start, are located.
+  auto* first_page =
+      PartitionPage::FromAddr(reservation_start + PartitionPageSize());
+  auto* page = first_page + first_page->slot_span_metadata_offset;
+  PA_DCHECK(page->is_valid);
+  PA_DCHECK(!page->slot_span_metadata_offset);
+  auto* slot_span = &page->slot_span_metadata;
+  uintptr_t slot_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  auto* metadata = PartitionDirectMapMetadata::FromSlotSpan(slot_span);
+  size_t padding_for_alignment =
+      metadata->direct_map_extent.padding_for_alignment;
+  PA_DCHECK(padding_for_alignment ==
+            static_cast<size_t>(page - first_page) * PartitionPageSize());
+  PA_DCHECK(slot_start ==
+            reservation_start + PartitionPageSize() + padding_for_alignment);
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+  return slot_start;
+}
+
+// Gets the address to the beginning of the allocated slot. The input |address|
+// can point anywhere in the slot, including the slot start as well as
+// immediately past the slot.
+//
+// This isn't a general purpose function, it is used specifically for obtaining
+// BackupRefPtr's ref-count. The caller is responsible for ensuring that the
+// ref-count is in place for this allocation.
+PA_ALWAYS_INLINE uintptr_t
+PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
+  // Adjust to support pointers right past the end of an allocation, which in
+  // some cases appear to point outside the designated allocation slot.
+  //
+  // If ref-count is present before the allocation, then adjusting a valid
+  // pointer down will not cause us to go down to the previous slot, otherwise
+  // no adjustment is needed (and likely wouldn't be correct as there is
+  // a risk of going down to the previous slot). Either way,
+  // kPartitionPastAllocationAdjustment takes care of that detail.
+  address -= kPartitionPastAllocationAdjustment;
+  PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(address));
+  DCheckIfManagedByPartitionAllocBRPPool(address);
+
+  uintptr_t directmap_slot_start =
+      PartitionAllocGetDirectMapSlotStartInBRPPool(address);
+  if (PA_UNLIKELY(directmap_slot_start)) {
+    return directmap_slot_start;
+  }
+  auto* slot_span = SlotSpanMetadata::FromAddr(address);
+  auto* root = PartitionRoot::FromSlotSpan(slot_span);
+  // Double check that ref-count is indeed present.
+  PA_DCHECK(root->brp_enabled());
+
+  // Get the offset from the beginning of the slot span.
+  uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(slot_span);
+  size_t offset_in_slot_span = address - slot_span_start;
+
+  auto* bucket = slot_span->bucket;
+  return slot_span_start +
+         bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span);
+}
+
+// Return values to indicate where a pointer is pointing relative to the bounds
+// of an allocation.
+enum class PtrPosWithinAlloc {
+  // When BACKUP_REF_PTR_POISON_OOB_PTR is disabled, end-of-allocation pointers
+  // are also considered in-bounds.
+  kInBounds,
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+  kAllocEnd,
+#endif
+  kFarOOB
+};
+
+// Checks whether `test_address` is in the same allocation slot as
+// `orig_address`.
+//
+// This can be called after adding or subtracting from the `orig_address`
+// to produce a different pointer which must still stay in the same allocation.
+//
+// The `type_size` is the size of the type that the raw_ptr is pointing to,
+// which may be the type the allocation is holding or a compatible pointer type
+// such as a base class or char*. It is used to detect pointers near the end of
+// the allocation but not strictly beyond it.
+//
+// This isn't a general purpose function. The caller is responsible for ensuring
+// that the ref-count is in place for this allocation.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address,
+                                       uintptr_t test_address,
+                                       size_t type_size);
+
+PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
+  PA_DCHECK(!PartitionRefCountPointer(slot_start)->IsAlive());
+
+  auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
+  auto* root = PartitionRoot::FromSlotSpan(slot_span);
+  // PartitionRefCount is required to be allocated inside a `PartitionRoot` that
+  // supports reference counts.
+  PA_DCHECK(root->brp_enabled());
+
+  // Iterating over the entire slot can be really expensive.
+#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+  auto hook = PartitionAllocHooks::GetQuarantineOverrideHook();
+  // If we have a hook the object segment is not necessarily filled
+  // with |kQuarantinedByte|.
+  if (PA_LIKELY(!hook)) {
+    unsigned char* object =
+        static_cast<unsigned char*>(root->SlotStartToObject(slot_start));
+    for (size_t i = 0; i < root->GetSlotUsableSize(slot_span); ++i) {
+      PA_DCHECK(object[i] == kQuarantinedByte);
+    }
+  }
+  DebugMemset(SlotStartAddr2Ptr(slot_start), kFreedByte,
+              slot_span->GetUtilizedSlotSize()
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+                  - sizeof(PartitionRefCount)
+#endif
+  );
+#endif
+
+  root->total_size_of_brp_quarantined_bytes.fetch_sub(
+      slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
+  root->total_count_of_brp_quarantined_slots.fetch_sub(
+      1, std::memory_order_relaxed);
+
+  root->RawFreeWithThreadCache(slot_start, slot_span);
+}
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+}  // namespace internal
+
+template <AllocFlags flags>
+PA_ALWAYS_INLINE uintptr_t
+PartitionRoot::AllocFromBucket(Bucket* bucket,
+                               size_t raw_size,
+                               size_t slot_span_alignment,
+                               size_t* usable_size,
+                               bool* is_already_zeroed) {
+  PA_DCHECK((slot_span_alignment >= internal::PartitionPageSize()) &&
+            internal::base::bits::IsPowerOfTwo(slot_span_alignment));
+  SlotSpan* slot_span = bucket->active_slot_spans_head;
+  // There always must be a slot span on the active list (could be a sentinel).
+  PA_DCHECK(slot_span);
+  // Check that it isn't marked full, which could only be true if the span was
+  // removed from the active list.
+  PA_DCHECK(!slot_span->marked_full);
+
+  uintptr_t slot_start =
+      internal::SlotStartPtr2Addr(slot_span->get_freelist_head());
+  // Use the fast path when a slot is readily available on the free list of the
+  // first active slot span. However, fall back to the slow path if a
+  // higher-order alignment is requested, because an inner slot of an existing
+  // slot span is unlikely to satisfy it.
+  if (PA_LIKELY(slot_span_alignment <= internal::PartitionPageSize() &&
+                slot_start)) {
+    *is_already_zeroed = false;
+    // This is a fast path, avoid calling GetSlotUsableSize() in Release builds
+    // as it is costlier. Copy its small bucket path instead.
+    *usable_size = AdjustSizeForExtrasSubtract(bucket->slot_size);
+    PA_DCHECK(*usable_size == GetSlotUsableSize(slot_span));
+
+    // If these DCHECKs fire, you probably corrupted memory.
+    // TODO(crbug.com/1257655): See if we can afford to make these CHECKs.
+    DCheckIsValidSlotSpan(slot_span);
+
+    // All large allocations must go through the slow path to correctly update
+    // the size metadata.
+    PA_DCHECK(!slot_span->CanStoreRawSize());
+    PA_DCHECK(!slot_span->bucket->is_direct_mapped());
+    void* entry = slot_span->PopForAlloc(bucket->slot_size);
+    PA_DCHECK(internal::SlotStartPtr2Addr(entry) == slot_start);
+
+    PA_DCHECK(slot_span->bucket == bucket);
+  } else {
+    slot_start = bucket->SlowPathAlloc(this, flags, raw_size,
+                                       slot_span_alignment, is_already_zeroed);
+    if (PA_UNLIKELY(!slot_start)) {
+      return 0;
+    }
+
+    slot_span = SlotSpan::FromSlotStart(slot_start);
+    // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
+    DCheckIsValidSlotSpan(slot_span);
+    // For direct mapped allocations, |bucket| is the sentinel.
+    PA_DCHECK((slot_span->bucket == bucket) ||
+              (slot_span->bucket->is_direct_mapped() &&
+               (bucket == &sentinel_bucket)));
+
+    *usable_size = GetSlotUsableSize(slot_span);
+  }
+  PA_DCHECK(slot_span->GetUtilizedSlotSize() <= slot_span->bucket->slot_size);
+  IncreaseTotalSizeOfAllocatedBytes(
+      slot_start, slot_span->GetSlotSizeForBookkeeping(), raw_size);
+
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+  if (!slot_span->bucket->is_direct_mapped()) {
+    internal::FreeSlotBitmapMarkSlotAsUsed(slot_start);
+  }
+#endif
+
+  return slot_start;
+}
+
+AllocationNotificationData PartitionRoot::CreateAllocationNotificationData(
+    void* object,
+    size_t size,
+    const char* type_name) const {
+  AllocationNotificationData notification_data(object, size, type_name);
+
+  if (IsMemoryTaggingEnabled()) {
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+    notification_data.SetMteReportingMode(memory_tagging_reporting_mode());
+#endif
+  }
+
+  return notification_data;
+}
+
+FreeNotificationData PartitionRoot::CreateDefaultFreeNotificationData(
+    void* address) {
+  return FreeNotificationData(address);
+}
+
+FreeNotificationData PartitionRoot::CreateFreeNotificationData(
+    void* address) const {
+  FreeNotificationData notification_data =
+      CreateDefaultFreeNotificationData(address);
+
+  if (IsMemoryTaggingEnabled()) {
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+    notification_data.SetMteReportingMode(memory_tagging_reporting_mode());
+#endif
+  }
+
+  return notification_data;
+}
+
+// static
+template <FreeFlags flags>
+PA_ALWAYS_INLINE bool PartitionRoot::FreeProlog(void* object,
+                                                const PartitionRoot* root) {
+  static_assert(AreValidFlags(flags));
+  if constexpr (ContainsFlags(flags, FreeFlags::kNoHooks)) {
+    return false;
+  }
+
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  if constexpr (!ContainsFlags(flags, FreeFlags::kNoMemoryToolOverride)) {
+    free(object);
+    return true;
+  }
+#endif  // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  if (PA_UNLIKELY(!object)) {
+    return true;
+  }
+
+  if (PartitionAllocHooks::AreHooksEnabled()) {
+    // A valid |root| might not be available if this function is called from
+    // |FreeInUnknownRoot| and not deducible if object originates from
+    // an override hook.
+    // TODO(crbug.com/1137393): See if we can make the root available more
+    // reliably or even make this function non-static.
+    auto notification_data = root ? root->CreateFreeNotificationData(object)
+                                  : CreateDefaultFreeNotificationData(object);
+    PartitionAllocHooks::FreeObserverHookIfEnabled(notification_data);
+    if (PartitionAllocHooks::FreeOverrideHookIfEnabled(object)) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+PA_ALWAYS_INLINE bool PartitionRoot::IsMemoryTaggingEnabled() const {
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  return settings.memory_tagging_enabled_;
+#else
+  return false;
+#endif
+}
+
+PA_ALWAYS_INLINE TagViolationReportingMode
+PartitionRoot::memory_tagging_reporting_mode() const {
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  return settings.memory_tagging_reporting_mode_;
+#else
+  return TagViolationReportingMode::kUndefined;
+#endif
+}
+
+// static
+template <FreeFlags flags>
+PA_ALWAYS_INLINE void PartitionRoot::FreeInlineInUnknownRoot(void* object) {
+  bool early_return = FreeProlog<flags>(object, nullptr);
+  if (early_return) {
+    return;
+  }
+
+  if (PA_UNLIKELY(!object)) {
+    return;
+  }
+
+  // Fetch the root from the address, and not SlotSpanMetadata. This is
+  // important, as obtaining it from SlotSpanMetadata is a slow operation
+  // (looking into the metadata area, and following a pointer), which can induce
+  // cache coherency traffic (since they're read on every free(), and written to
+  // on any malloc()/free() that is not a hit in the thread cache). This way we
+  // change the critical path from object -> slot_span -> root into two
+  // *parallel* ones:
+  // 1. object -> root
+  // 2. object -> slot_span (inside FreeInline)
+  uintptr_t object_addr = internal::ObjectPtr2Addr(object);
+  auto* root = FromAddrInFirstSuperpage(object_addr);
+  root->FreeInline<flags | FreeFlags::kNoHooks>(object);
+}
+
+template <FreeFlags flags>
+PA_ALWAYS_INLINE void PartitionRoot::FreeInline(void* object) {
+  // The correct PartitionRoot might not be deducible if the |object| originates
+  // from an override hook.
+  bool early_return = FreeProlog<flags>(object, this);
+  if (early_return) {
+    return;
+  }
+
+  if (PA_UNLIKELY(!object)) {
+    return;
+  }
+
+  // TODO(https://crbug.com/1497380): Collecting objects for
+  // `kSchedulerLoopQuarantine` here means it "delays" other checks (BRP
+  // refcount, cookie, etc.)
+  // For better debuggability, we should do these checks before quarantining.
+  if constexpr (ContainsFlags(flags, FreeFlags::kSchedulerLoopQuarantine)) {
+    GetSchedulerLoopQuarantine().Quarantine(
+        internal::LightweightQuarantineEntry(object));
+    return;
+  }
+
+  // Almost all calls to FreeNoNooks() will end up writing to |*object|, the
+  // only cases where we don't would be delayed free() in PCScan, but |*object|
+  // can be cold in cache.
+  PA_PREFETCH(object);
+
+  // On Android, malloc() interception is more fragile than on other
+  // platforms, as we use wrapped symbols. However, the pools allow us to
+  // quickly tell that a pointer was allocated with PartitionAlloc.
+  //
+  // This is a crash to detect imperfect symbol interception. However, we can
+  // forward allocations we don't own to the system malloc() implementation in
+  // these rare cases, assuming that some remain.
+  //
+  // On Android Chromecast devices, this is already checked in PartitionFree()
+  // in the shim.
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
+    (BUILDFLAG(IS_ANDROID) && !BUILDFLAG(PA_IS_CAST_ANDROID))
+  uintptr_t object_addr = internal::ObjectPtr2Addr(object);
+  PA_CHECK(IsManagedByPartitionAlloc(object_addr));
+#endif
+
+  SlotSpan* slot_span = SlotSpan::FromObject(object);
+  PA_DCHECK(PartitionRoot::FromSlotSpan(slot_span) == this);
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  if (PA_LIKELY(IsMemoryTaggingEnabled())) {
+    const size_t slot_size = slot_span->bucket->slot_size;
+    if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) {
+      // slot_span is untagged at this point, so we have to recover its tag
+      // again to increment and provide use-after-free mitigations.
+      size_t tag_size = slot_size;
+#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
+      tag_size -= settings.ref_count_size;
+#endif
+      void* retagged_slot_start = internal::TagMemoryRangeIncrement(
+          ObjectToTaggedSlotStart(object), tag_size);
+      // Incrementing the MTE-tag in the memory range invalidates the |object|'s
+      // tag, so it must be retagged.
+      object = TaggedSlotStartToObject(retagged_slot_start);
+    }
+  }
+#else
+  // We are going to read from |*slot_span| in all branches, but haven't done it
+  // yet.
+  //
+  // TODO(crbug.com/1207307): It would be much better to avoid touching
+  // |*slot_span| at all on the fast path, or at least to separate its read-only
+  // parts (i.e. bucket pointer) from the rest. Indeed, every thread cache miss
+  // (or batch fill) will *write* to |slot_span->freelist_head|, leading to
+  // cacheline ping-pong.
+  //
+  // Don't do it when memory tagging is enabled, as |*slot_span| has already
+  // been touched above.
+  PA_PREFETCH(slot_span);
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+  uintptr_t slot_start = ObjectToSlotStart(object);
+  PA_DCHECK(slot_span == SlotSpan::FromSlotStart(slot_start));
+
+#if BUILDFLAG(USE_STARSCAN)
+  // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
+  // default.
+  if (PA_UNLIKELY(ShouldQuarantine(object))) {
+    // PCScan safepoint. Call before potentially scheduling scanning task.
+    PCScan::JoinScanIfNeeded();
+    if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
+      PCScan::MoveToQuarantine(object, GetSlotUsableSize(slot_span), slot_start,
+                               slot_span->bucket->slot_size);
+      return;
+    }
+  }
+#endif  // BUILDFLAG(USE_STARSCAN)
+
+  FreeNoHooksImmediate(object, slot_span, slot_start);
+}
+
+PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooksImmediate(
+    void* object,
+    SlotSpan* slot_span,
+    uintptr_t slot_start) {
+  // The thread cache is added "in the middle" of the main allocator, that is:
+  // - After all the cookie/ref-count management
+  // - Before the "raw" allocator.
+  //
+  // On the deallocation side:
+  // 1. Check cookie/ref-count, adjust the pointer
+  // 2. Deallocation
+  //   a. Return to the thread cache if possible. If it succeeds, return.
+  //   b. Otherwise, call the "raw" allocator <-- Locking
+  PA_DCHECK(object);
+  PA_DCHECK(slot_span);
+  DCheckIsValidSlotSpan(slot_span);
+  PA_DCHECK(slot_start);
+
+  // Layout inside the slot:
+  //   |[refcnt]|...object...|[empty]|[cookie]|[unused]|
+  //            <--------(a)--------->
+  //   <--(b)--->         +          <--(b)--->
+  //   <-----------------(c)------------------>
+  //     (a) usable_size
+  //     (b) extras
+  //     (c) utilized_slot_size
+  //
+  // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is set, the layout is:
+  //   |...object...|[empty]|[cookie]|[unused]|[refcnt]|
+  //   <--------(a)--------->
+  //                        <--(b)--->   +    <--(b)--->
+  //   <-------------(c)------------->   +    <--(c)--->
+  //
+  // Note: ref-count and cookie can be 0-sized.
+  //
+  // For more context, see the other "Layout inside the slot" comment inside
+  // AllocInternalNoHooks().
+
+  if (settings.use_cookie) {
+    // Verify the cookie after the allocated region.
+    // If this assert fires, you probably corrupted memory.
+    internal::PartitionCookieCheckValue(static_cast<unsigned char*>(object) +
+                                        GetSlotUsableSize(slot_span));
+  }
+
+#if BUILDFLAG(USE_STARSCAN)
+  // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
+  // default.
+  if (PA_UNLIKELY(IsQuarantineEnabled())) {
+    if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
+      // Mark the state in the state bitmap as freed.
+      internal::StateBitmapFromAddr(slot_start)->Free(slot_start);
+    }
+  }
+#endif  // BUILDFLAG(USE_STARSCAN)
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  // TODO(keishi): Add PA_LIKELY when brp is fully enabled as |brp_enabled| will
+  // be false only for the aligned partition.
+  if (brp_enabled()) {
+    auto* ref_count = internal::PartitionRefCountPointer(slot_start);
+    // If there are no more references to the allocation, it can be freed
+    // immediately. Otherwise, defer the operation and zap the memory to turn
+    // potential use-after-free issues into unexploitable crashes.
+    if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs())) {
+      auto usable_size = GetSlotUsableSize(slot_span);
+      auto hook = PartitionAllocHooks::GetQuarantineOverrideHook();
+      if (PA_UNLIKELY(hook)) {
+        hook(object, usable_size);
+      } else {
+        internal::SecureMemset(object, internal::kQuarantinedByte, usable_size);
+      }
+    }
+
+    if (PA_UNLIKELY(!(ref_count->ReleaseFromAllocator()))) {
+      total_size_of_brp_quarantined_bytes.fetch_add(
+          slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
+      total_count_of_brp_quarantined_slots.fetch_add(1,
+                                                     std::memory_order_relaxed);
+      cumulative_size_of_brp_quarantined_bytes.fetch_add(
+          slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
+      cumulative_count_of_brp_quarantined_slots.fetch_add(
+          1, std::memory_order_relaxed);
+      return;
+    }
+  }
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+  // memset() can be really expensive.
+#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+  internal::DebugMemset(internal::SlotStartAddr2Ptr(slot_start),
+                        internal::kFreedByte,
+                        slot_span->GetUtilizedSlotSize()
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+                            - sizeof(internal::PartitionRefCount)
+#endif
+  );
+#elif PA_CONFIG(ZERO_RANDOMLY_ON_FREE)
+  // `memset` only once in a while: we're trading off safety for time
+  // efficiency.
+  if (PA_UNLIKELY(internal::RandomPeriod()) &&
+      !IsDirectMappedBucket(slot_span->bucket)) {
+    internal::SecureMemset(internal::SlotStartAddr2Ptr(slot_start), 0,
+                           slot_span->GetUtilizedSlotSize()
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+                               - sizeof(internal::PartitionRefCount)
+#endif
+    );
+  }
+#endif  // PA_CONFIG(ZERO_RANDOMLY_ON_FREE)
+
+  RawFreeWithThreadCache(slot_start, slot_span);
+}
+
+PA_ALWAYS_INLINE void PartitionRoot::FreeInSlotSpan(uintptr_t slot_start,
+                                                    SlotSpan* slot_span) {
+  DecreaseTotalSizeOfAllocatedBytes(slot_start,
+                                    slot_span->GetSlotSizeForBookkeeping());
+#if BUILDFLAG(USE_FREESLOT_BITMAP)
+  if (!slot_span->bucket->is_direct_mapped()) {
+    internal::FreeSlotBitmapMarkSlotAsFree(slot_start);
+  }
+#endif
+
+  return slot_span->Free(slot_start, this);
+}
+
+PA_ALWAYS_INLINE void PartitionRoot::RawFree(uintptr_t slot_start) {
+  SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
+  RawFree(slot_start, slot_span);
+}
+
+#if PA_CONFIG(IS_NONCLANG_MSVC)
+// MSVC only supports inline assembly on x86. This preprocessor directive
+// is intended to be a replacement for the same.
+//
+// TODO(crbug.com/1351310): Make sure inlining doesn't degrade this into
+// a no-op or similar. The documentation doesn't say.
+#pragma optimize("", off)
+#endif
+PA_ALWAYS_INLINE void PartitionRoot::RawFree(uintptr_t slot_start,
+                                             SlotSpan* slot_span) {
+  // At this point we are about to acquire the lock, so we try to minimize the
+  // risk of blocking inside the locked section.
+  //
+  // For allocations that are not direct-mapped, there will always be a store at
+  // the beginning of |*slot_start|, to link the freelist. This is why there is
+  // a prefetch of it at the beginning of the free() path.
+  //
+  // However, the memory which is being freed can be very cold (for instance
+  // during browser shutdown, when various caches are finally completely freed),
+  // and so moved to either compressed memory or swap. This means that touching
+  // it here can cause a major page fault. This is in turn will cause
+  // descheduling of the thread *while locked*. Since we don't have priority
+  // inheritance locks on most platforms, avoiding long locked periods relies on
+  // the OS having proper priority boosting. There is evidence
+  // (crbug.com/1228523) that this is not always the case on Windows, and a very
+  // low priority background thread can block the main one for a long time,
+  // leading to hangs.
+  //
+  // To mitigate that, make sure that we fault *before* locking. Note that this
+  // is useless for direct-mapped allocations (which are very rare anyway), and
+  // that this path is *not* taken for thread cache bucket purge (since it calls
+  // RawFreeLocked()). This is intentional, as the thread cache is purged often,
+  // and the memory has a consequence the memory has already been touched
+  // recently (to link the thread cache freelist).
+  *static_cast<volatile uintptr_t*>(internal::SlotStartAddr2Ptr(slot_start)) =
+      0;
+  // Note: even though we write to slot_start + sizeof(void*) as well, due to
+  // alignment constraints, the two locations are always going to be in the same
+  // OS page. No need to write to the second one as well.
+  //
+  // Do not move the store above inside the locked section.
+#if !(PA_CONFIG(IS_NONCLANG_MSVC))
+  __asm__ __volatile__("" : : "r"(slot_start) : "memory");
+#endif
+
+  ::partition_alloc::internal::ScopedGuard guard{
+      internal::PartitionRootLock(this)};
+  FreeInSlotSpan(slot_start, slot_span);
+}
+#if PA_CONFIG(IS_NONCLANG_MSVC)
+#pragma optimize("", on)
+#endif
+
+PA_ALWAYS_INLINE void PartitionRoot::RawFreeBatch(FreeListEntry* head,
+                                                  FreeListEntry* tail,
+                                                  size_t size,
+                                                  SlotSpan* slot_span) {
+  PA_DCHECK(head);
+  PA_DCHECK(tail);
+  PA_DCHECK(size > 0);
+  PA_DCHECK(slot_span);
+  DCheckIsValidSlotSpan(slot_span);
+  // The passed freelist is likely to be just built up, which means that the
+  // corresponding pages were faulted in (without acquiring the lock). So there
+  // is no need to touch pages manually here before the lock.
+  ::partition_alloc::internal::ScopedGuard guard{
+      internal::PartitionRootLock(this)};
+  // TODO(thiabaud): Fix the accounting here. The size is correct, but the
+  // pointer is not. This only affects local tools that record each allocation,
+  // not our metrics.
+  DecreaseTotalSizeOfAllocatedBytes(
+      0u, slot_span->GetSlotSizeForBookkeeping() * size);
+  slot_span->AppendFreeList(head, tail, size, this);
+}
+
+PA_ALWAYS_INLINE void PartitionRoot::RawFreeWithThreadCache(
+    uintptr_t slot_start,
+    SlotSpan* slot_span) {
+  // PA_LIKELY: performance-sensitive partitions have a thread cache,
+  // direct-mapped allocations are uncommon.
+  ThreadCache* thread_cache = GetThreadCache();
+  if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
+                !IsDirectMappedBucket(slot_span->bucket))) {
+    size_t bucket_index =
+        static_cast<size_t>(slot_span->bucket - this->buckets);
+    size_t slot_size;
+    if (PA_LIKELY(thread_cache->MaybePutInCache(slot_start, bucket_index,
+                                                &slot_size))) {
+      // This is a fast path, avoid calling GetSlotUsableSize() in Release
+      // builds as it is costlier. Copy its small bucket path instead.
+      PA_DCHECK(!slot_span->CanStoreRawSize());
+      size_t usable_size = AdjustSizeForExtrasSubtract(slot_size);
+      PA_DCHECK(usable_size == GetSlotUsableSize(slot_span));
+      thread_cache->RecordDeallocation(usable_size);
+      return;
+    }
+  }
+
+  if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
+    // Accounting must be done outside `RawFree()`, as it's also called from the
+    // thread cache. We would double-count otherwise.
+    //
+    // GetSlotUsableSize() will always give the correct result, and we are in
+    // a slow path here (since the thread cache case returned earlier).
+    size_t usable_size = GetSlotUsableSize(slot_span);
+    thread_cache->RecordDeallocation(usable_size);
+  }
+  RawFree(slot_start, slot_span);
+}
+
+PA_ALWAYS_INLINE void PartitionRoot::RawFreeLocked(uintptr_t slot_start) {
+  SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
+  // Direct-mapped deallocation releases then re-acquires the lock. The caller
+  // may not expect that, but we never call this function on direct-mapped
+  // allocations.
+  PA_DCHECK(!IsDirectMappedBucket(slot_span->bucket));
+  FreeInSlotSpan(slot_start, slot_span);
+}
+
+PA_ALWAYS_INLINE PartitionRoot* PartitionRoot::FromSlotSpan(
+    SlotSpan* slot_span) {
+  auto* extent_entry = reinterpret_cast<SuperPageExtentEntry*>(
+      reinterpret_cast<uintptr_t>(slot_span) & internal::SystemPageBaseMask());
+  return extent_entry->root;
+}
+
+PA_ALWAYS_INLINE PartitionRoot* PartitionRoot::FromFirstSuperPage(
+    uintptr_t super_page) {
+  PA_DCHECK(internal::IsReservationStart(super_page));
+  auto* extent_entry = internal::PartitionSuperPageToExtent(super_page);
+  PartitionRoot* root = extent_entry->root;
+  PA_DCHECK(root->inverted_self == ~reinterpret_cast<uintptr_t>(root));
+  return root;
+}
+
+PA_ALWAYS_INLINE PartitionRoot* PartitionRoot::FromAddrInFirstSuperpage(
+    uintptr_t address) {
+  uintptr_t super_page = address & internal::kSuperPageBaseMask;
+  PA_DCHECK(internal::IsReservationStart(super_page));
+  return FromFirstSuperPage(super_page);
+}
+
+PA_ALWAYS_INLINE void PartitionRoot::IncreaseTotalSizeOfAllocatedBytes(
+    uintptr_t addr,
+    size_t len,
+    size_t raw_size) {
+  total_size_of_allocated_bytes += len;
+  max_size_of_allocated_bytes =
+      std::max(max_size_of_allocated_bytes, total_size_of_allocated_bytes);
+#if BUILDFLAG(RECORD_ALLOC_INFO)
+  partition_alloc::internal::RecordAllocOrFree(addr | 0x01, raw_size);
+#endif  // BUILDFLAG(RECORD_ALLOC_INFO)
+}
+
+PA_ALWAYS_INLINE void PartitionRoot::DecreaseTotalSizeOfAllocatedBytes(
+    uintptr_t addr,
+    size_t len) {
+  // An underflow here means we've miscounted |total_size_of_allocated_bytes|
+  // somewhere.
+  PA_DCHECK(total_size_of_allocated_bytes >= len);
+  total_size_of_allocated_bytes -= len;
+#if BUILDFLAG(RECORD_ALLOC_INFO)
+  partition_alloc::internal::RecordAllocOrFree(addr | 0x00, len);
+#endif  // BUILDFLAG(RECORD_ALLOC_INFO)
+}
+
+PA_ALWAYS_INLINE void PartitionRoot::IncreaseCommittedPages(size_t len) {
+  const auto old_total =
+      total_size_of_committed_pages.fetch_add(len, std::memory_order_relaxed);
+
+  const auto new_total = old_total + len;
+
+  // This function is called quite frequently; to avoid performance problems, we
+  // don't want to hold a lock here, so we use compare and exchange instead.
+  size_t expected = max_size_of_committed_pages.load(std::memory_order_relaxed);
+  size_t desired;
+  do {
+    desired = std::max(expected, new_total);
+  } while (!max_size_of_committed_pages.compare_exchange_weak(
+      expected, desired, std::memory_order_relaxed, std::memory_order_relaxed));
+}
+
+PA_ALWAYS_INLINE void PartitionRoot::DecreaseCommittedPages(size_t len) {
+  total_size_of_committed_pages.fetch_sub(len, std::memory_order_relaxed);
+}
+
+PA_ALWAYS_INLINE void PartitionRoot::DecommitSystemPagesForData(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityDisposition accessibility_disposition) {
+  internal::ScopedSyscallTimer timer{this};
+  DecommitSystemPages(address, length, accessibility_disposition);
+  DecreaseCommittedPages(length);
+}
+
+// Not unified with TryRecommitSystemPagesForData() to preserve error codes.
+PA_ALWAYS_INLINE void PartitionRoot::RecommitSystemPagesForData(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityDisposition accessibility_disposition,
+    bool request_tagging) {
+  internal::ScopedSyscallTimer timer{this};
+
+  auto page_accessibility = GetPageAccessibility(request_tagging);
+  bool ok = TryRecommitSystemPages(address, length, page_accessibility,
+                                   accessibility_disposition);
+  if (PA_UNLIKELY(!ok)) {
+    // Decommit some memory and retry. The alternative is crashing.
+    DecommitEmptySlotSpans();
+    RecommitSystemPages(address, length, page_accessibility,
+                        accessibility_disposition);
+  }
+
+  IncreaseCommittedPages(length);
+}
+
+PA_ALWAYS_INLINE bool PartitionRoot::TryRecommitSystemPagesForData(
+    uintptr_t address,
+    size_t length,
+    PageAccessibilityDisposition accessibility_disposition,
+    bool request_tagging) {
+  internal::ScopedSyscallTimer timer{this};
+
+  auto page_accessibility = GetPageAccessibility(request_tagging);
+  bool ok = TryRecommitSystemPages(address, length, page_accessibility,
+                                   accessibility_disposition);
+  if (PA_UNLIKELY(!ok)) {
+    // Decommit some memory and retry. The alternative is crashing.
+    {
+      ::partition_alloc::internal::ScopedGuard guard(
+          internal::PartitionRootLock(this));
+      DecommitEmptySlotSpans();
+    }
+    ok = TryRecommitSystemPages(address, length, page_accessibility,
+                                accessibility_disposition);
+  }
+
+  if (ok) {
+    IncreaseCommittedPages(length);
+  }
+
+  return ok;
+}
+
+// static
+//
+// Returns the size available to the app. It can be equal or higher than the
+// requested size. If higher, the overage won't exceed what's actually usable
+// by the app without a risk of running out of an allocated region or into
+// PartitionAlloc's internal data. Used as malloc_usable_size and malloc_size.
+//
+// |ptr| should preferably point to the beginning of an object returned from
+// malloc() et al., but it doesn't have to. crbug.com/1292646 shows an example
+// where this isn't the case. Note, an inner object pointer won't work for
+// direct map, unless it is within the first partition page.
+PA_ALWAYS_INLINE size_t PartitionRoot::GetUsableSize(void* ptr) {
+  // malloc_usable_size() is expected to handle NULL gracefully and return 0.
+  if (!ptr) {
+    return 0;
+  }
+  auto* slot_span = SlotSpan::FromObjectInnerPtr(ptr);
+  auto* root = FromSlotSpan(slot_span);
+  return root->GetSlotUsableSize(slot_span);
+}
+
+PA_ALWAYS_INLINE size_t
+PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(void* ptr) {
+  // malloc_usable_size() is expected to handle NULL gracefully and return 0.
+  if (!ptr) {
+    return 0;
+  }
+  auto* slot_span = SlotSpan::FromObjectInnerPtr(ptr);
+  auto* root = FromSlotSpan(slot_span);
+  size_t usable_size = root->GetSlotUsableSize(slot_span);
+#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+  // Check |mac11_malloc_size_hack_enabled_| flag first as this doesn't
+  // concern OS versions other than macOS 11.
+  if (PA_UNLIKELY(root->settings.mac11_malloc_size_hack_enabled_ &&
+                  usable_size ==
+                      root->settings.mac11_malloc_size_hack_usable_size_)) {
+    uintptr_t slot_start =
+        internal::PartitionAllocGetSlotStartInBRPPool(UntagPtr(ptr));
+    auto* ref_count = internal::PartitionRefCountPointer(slot_start);
+    if (ref_count->NeedsMac11MallocSizeHack()) {
+      return internal::kMac11MallocSizeHackRequestedSize;
+    }
+  }
+#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+
+  return usable_size;
+}
+
+// Returns the page configuration to use when mapping slot spans for a given
+// partition root. ReadWriteTagged is used on MTE-enabled systems for
+// PartitionRoots supporting it.
+PA_ALWAYS_INLINE PageAccessibilityConfiguration
+PartitionRoot::GetPageAccessibility(bool request_tagging) const {
+  PageAccessibilityConfiguration::Permissions permissions =
+      PageAccessibilityConfiguration::kReadWrite;
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  if (IsMemoryTaggingEnabled() && request_tagging) {
+    permissions = PageAccessibilityConfiguration::kReadWriteTagged;
+  }
+#endif
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  return PageAccessibilityConfiguration(permissions, settings.thread_isolation);
+#else
+  return PageAccessibilityConfiguration(permissions);
+#endif
+}
+
+PA_ALWAYS_INLINE PageAccessibilityConfiguration
+PartitionRoot::PageAccessibilityWithThreadIsolationIfEnabled(
+    PageAccessibilityConfiguration::Permissions permissions) const {
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  return PageAccessibilityConfiguration(permissions, settings.thread_isolation);
+#endif
+  return PageAccessibilityConfiguration(permissions);
+}
+
+// Return the capacity of the underlying slot (adjusted for extras). This
+// doesn't mean this capacity is readily available. It merely means that if
+// a new allocation (or realloc) happened with that returned value, it'd use
+// the same amount of underlying memory.
+PA_ALWAYS_INLINE size_t
+PartitionRoot::AllocationCapacityFromSlotStart(uintptr_t slot_start) const {
+  auto* slot_span = SlotSpan::FromSlotStart(slot_start);
+  return AdjustSizeForExtrasSubtract(slot_span->bucket->slot_size);
+}
+
+// static
+PA_ALWAYS_INLINE uint16_t
+PartitionRoot::SizeToBucketIndex(size_t size,
+                                 BucketDistribution bucket_distribution) {
+  switch (bucket_distribution) {
+    case BucketDistribution::kNeutral:
+      return internal::BucketIndexLookup::GetIndexForNeutralBuckets(size);
+    case BucketDistribution::kDenser:
+      return internal::BucketIndexLookup::GetIndexForDenserBuckets(size);
+  }
+}
+
+template <AllocFlags flags>
+PA_ALWAYS_INLINE void* PartitionRoot::AllocInternal(size_t requested_size,
+                                                    size_t slot_span_alignment,
+                                                    const char* type_name) {
+  static_assert(AreValidFlags(flags));
+  PA_DCHECK(
+      (slot_span_alignment >= internal::PartitionPageSize()) &&
+      partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
+  static_assert(!ContainsFlags(
+      flags, AllocFlags::kMemoryShouldBeTaggedForMte));  // Internal only.
+
+  constexpr bool no_hooks = ContainsFlags(flags, AllocFlags::kNoHooks);
+  bool hooks_enabled;
+
+  if constexpr (!no_hooks) {
+    PA_DCHECK(initialized);
+
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+    if constexpr (!ContainsFlags(flags, AllocFlags::kNoMemoryToolOverride)) {
+      if (!PartitionRoot::AllocWithMemoryToolProlog<flags>(requested_size)) {
+        // Early return if AllocWithMemoryToolProlog returns false
+        return nullptr;
+      }
+      constexpr bool zero_fill = ContainsFlags(flags, AllocFlags::kZeroFill);
+      void* result =
+          zero_fill ? calloc(1, requested_size) : malloc(requested_size);
+      if constexpr (!ContainsFlags(flags, AllocFlags::kReturnNull)) {
+        PA_CHECK(result);
+      }
+      return result;
+    }
+#endif  // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+    void* object = nullptr;
+    hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
+    if (hooks_enabled) {
+      auto additional_flags = AllocFlags::kNone;
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+      if (IsMemoryTaggingEnabled()) {
+        additional_flags |= AllocFlags::kMemoryShouldBeTaggedForMte;
+      }
+#endif
+      // The override hooks will return false if it can't handle the request,
+      // i.e. due to unsupported flags. In this case, we forward the allocation
+      // request to the default mechanisms.
+      // TODO(crbug.com/1137393): See if we can make the forwarding more verbose
+      // to ensure that this situation doesn't go unnoticed.
+      if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(
+              &object, flags | additional_flags, requested_size, type_name)) {
+        PartitionAllocHooks::AllocationObserverHookIfEnabled(
+            CreateAllocationNotificationData(object, requested_size,
+                                             type_name));
+        return object;
+      }
+    }
+  }
+
+  void* const object =
+      AllocInternalNoHooks<flags>(requested_size, slot_span_alignment);
+
+  if constexpr (!no_hooks) {
+    if (PA_UNLIKELY(hooks_enabled)) {
+      PartitionAllocHooks::AllocationObserverHookIfEnabled(
+          CreateAllocationNotificationData(object, requested_size, type_name));
+    }
+  }
+
+  return object;
+}
+
+template <AllocFlags flags>
+PA_ALWAYS_INLINE void* PartitionRoot::AllocInternalNoHooks(
+    size_t requested_size,
+    size_t slot_span_alignment) {
+  static_assert(AreValidFlags(flags));
+
+  // The thread cache is added "in the middle" of the main allocator, that is:
+  // - After all the cookie/ref-count management
+  // - Before the "raw" allocator.
+  //
+  // That is, the general allocation flow is:
+  // 1. Adjustment of requested size to make room for extras
+  // 2. Allocation:
+  //   a. Call to the thread cache, if it succeeds, go to step 3.
+  //   b. Otherwise, call the "raw" allocator <-- Locking
+  // 3. Handle cookie/ref-count, zero allocation if required
+
+  size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
+  PA_CHECK(raw_size >= requested_size);  // check for overflows
+
+  // We should only call |SizeToBucketIndex| at most once when allocating.
+  // Otherwise, we risk having |bucket_distribution| changed
+  // underneath us (between calls to |SizeToBucketIndex| during the same call),
+  // which would result in an inconsistent state.
+  uint16_t bucket_index =
+      SizeToBucketIndex(raw_size, this->GetBucketDistribution());
+  size_t usable_size;
+  bool is_already_zeroed = false;
+  uintptr_t slot_start = 0;
+  size_t slot_size;
+
+#if BUILDFLAG(USE_STARSCAN)
+  const bool is_quarantine_enabled = IsQuarantineEnabled();
+  // PCScan safepoint. Call before trying to allocate from cache.
+  // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
+  // default.
+  if (PA_UNLIKELY(is_quarantine_enabled)) {
+    PCScan::JoinScanIfNeeded();
+  }
+#endif  // BUILDFLAG(USE_STARSCAN)
+
+  auto* thread_cache = GetOrCreateThreadCache();
+
+  // Don't use thread cache if higher order alignment is requested, because the
+  // thread cache will not be able to satisfy it.
+  //
+  // PA_LIKELY: performance-sensitive partitions use the thread cache.
+  if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
+                slot_span_alignment <= internal::PartitionPageSize())) {
+    // Note: getting slot_size from the thread cache rather than by
+    // `buckets[bucket_index].slot_size` to avoid touching `buckets` on the fast
+    // path.
+    slot_start = thread_cache->GetFromCache(bucket_index, &slot_size);
+
+    // PA_LIKELY: median hit rate in the thread cache is 95%, from metrics.
+    if (PA_LIKELY(slot_start)) {
+      // This follows the logic of SlotSpanMetadata::GetUsableSize for small
+      // buckets, which is too expensive to call here.
+      // Keep it in sync!
+      usable_size = AdjustSizeForExtrasSubtract(slot_size);
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+      // Make sure that the allocated pointer comes from the same place it would
+      // for a non-thread cache allocation.
+      SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
+      DCheckIsValidSlotSpan(slot_span);
+      PA_DCHECK(slot_span->bucket == &bucket_at(bucket_index));
+      PA_DCHECK(slot_span->bucket->slot_size == slot_size);
+      PA_DCHECK(usable_size == GetSlotUsableSize(slot_span));
+      // All large allocations must go through the RawAlloc path to correctly
+      // set |usable_size|.
+      PA_DCHECK(!slot_span->CanStoreRawSize());
+      PA_DCHECK(!slot_span->bucket->is_direct_mapped());
+#endif
+    } else {
+      slot_start =
+          RawAlloc<flags>(buckets + bucket_index, raw_size, slot_span_alignment,
+                          &usable_size, &is_already_zeroed);
+    }
+  } else {
+    slot_start =
+        RawAlloc<flags>(buckets + bucket_index, raw_size, slot_span_alignment,
+                        &usable_size, &is_already_zeroed);
+  }
+
+  if (PA_UNLIKELY(!slot_start)) {
+    return nullptr;
+  }
+
+  if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
+    thread_cache->RecordAllocation(usable_size);
+  }
+
+  // Layout inside the slot:
+  //   |[refcnt]|...object...|[empty]|[cookie]|[unused]|
+  //            <----(a)----->
+  //            <--------(b)--------->
+  //   <--(c)--->         +          <--(c)--->
+  //   <---------(d)--------->   +   <--(d)--->
+  //   <-----------------(e)------------------>
+  //   <----------------------(f)---------------------->
+  //     (a) requested_size
+  //     (b) usable_size
+  //     (c) extras
+  //     (d) raw_size
+  //     (e) utilized_slot_size
+  //     (f) slot_size
+  // Notes:
+  // - Ref-count may or may not exist in the slot, depending on brp_enabled().
+  // - Cookie exists only in the BUILDFLAG(PA_DCHECK_IS_ON) case.
+  // - Think of raw_size as the minimum size required internally to satisfy
+  //   the allocation request (i.e. requested_size + extras)
+  // - Note, at most one "empty" or "unused" space can occur at a time. It
+  //   occurs when slot_size is larger than raw_size. "unused" applies only to
+  //   large allocations (direct-mapped and single-slot slot spans) and "empty"
+  //   only to small allocations.
+  //   Why either-or, one might ask? We make an effort to put the trailing
+  //   cookie as close to data as possible to catch overflows (often
+  //   off-by-one), but that's possible only if we have enough space in metadata
+  //   to save raw_size, i.e. only for large allocations. For small allocations,
+  //   we have no other choice than putting the cookie at the very end of the
+  //   slot, thus creating the "empty" space.
+  //
+  // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is set, the layout is:
+  //   |...object...|[empty]|[cookie]|[unused]|[refcnt]|
+  //   <----(a)----->
+  //   <--------(b)--------->
+  //                        <--(c)--->   +    <--(c)--->
+  //   <----(d)----->   +   <--(d)--->   +    <--(d)--->
+  //   <-------------(e)------------->   +    <--(e)--->
+  //   <----------------------(f)---------------------->
+  // Notes:
+  // If |slot_start| is not SystemPageSize()-aligned (possible only for small
+  // allocations), ref-count of this slot is stored at the end of the previous
+  // slot. Otherwise it is stored in ref-count table placed after the super page
+  // metadata. For simplicity, the space for ref-count is still reserved at the
+  // end of previous slot, even though redundant.
+
+  void* object = SlotStartToObject(slot_start);
+
+  // Add the cookie after the allocation.
+  if (settings.use_cookie) {
+    internal::PartitionCookieWriteValue(static_cast<unsigned char*>(object) +
+                                        usable_size);
+  }
+
+  // Fill the region kUninitializedByte (on debug builds, if not requested to 0)
+  // or 0 (if requested and not 0 already).
+  constexpr bool zero_fill = ContainsFlags(flags, AllocFlags::kZeroFill);
+  // PA_LIKELY: operator new() calls malloc(), not calloc().
+  if constexpr (!zero_fill) {
+    // memset() can be really expensive.
+#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+    internal::DebugMemset(object, internal::kUninitializedByte, usable_size);
+#endif
+  } else if (!is_already_zeroed) {
+    memset(object, 0, usable_size);
+  }
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  // TODO(keishi): Add PA_LIKELY when brp is fully enabled as |brp_enabled| will
+  // be false only for the aligned partition.
+  if (brp_enabled()) {
+    bool needs_mac11_malloc_size_hack = false;
+#if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+    // Only apply hack to size 32 allocations on macOS 11. There is a buggy
+    // assertion that malloc_size() equals sizeof(class_rw_t) which is 32.
+    if (PA_UNLIKELY(settings.mac11_malloc_size_hack_enabled_ &&
+                    requested_size ==
+                        internal::kMac11MallocSizeHackRequestedSize)) {
+      needs_mac11_malloc_size_hack = true;
+    }
+#endif  // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
+    auto* ref_count = new (internal::PartitionRefCountPointer(slot_start))
+        internal::PartitionRefCount(needs_mac11_malloc_size_hack);
+#if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
+    ref_count->SetRequestedSize(requested_size);
+#else
+    (void)ref_count;
+#endif
+  }
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+#if BUILDFLAG(USE_STARSCAN)
+  // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
+  // default.
+  if (PA_UNLIKELY(is_quarantine_enabled)) {
+    if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
+      // Mark the corresponding bits in the state bitmap as allocated.
+      internal::StateBitmapFromAddr(slot_start)->Allocate(slot_start);
+    }
+  }
+#endif  // BUILDFLAG(USE_STARSCAN)
+
+  return object;
+}
+
+template <AllocFlags flags>
+PA_ALWAYS_INLINE uintptr_t PartitionRoot::RawAlloc(Bucket* bucket,
+                                                   size_t raw_size,
+                                                   size_t slot_span_alignment,
+                                                   size_t* usable_size,
+                                                   bool* is_already_zeroed) {
+  ::partition_alloc::internal::ScopedGuard guard{
+      internal::PartitionRootLock(this)};
+  return AllocFromBucket<flags>(bucket, raw_size, slot_span_alignment,
+                                usable_size, is_already_zeroed);
+}
+
+template <AllocFlags flags>
+PA_ALWAYS_INLINE void* PartitionRoot::AlignedAllocInline(
+    size_t alignment,
+    size_t requested_size) {
+  // Aligned allocation support relies on the natural alignment guarantees of
+  // PartitionAlloc. Specifically, it relies on the fact that slots within a
+  // slot span are aligned to slot size, from the beginning of the span.
+  //
+  // For alignments <=PartitionPageSize(), the code below adjusts the request
+  // size to be a power of two, no less than alignment. Since slot spans are
+  // aligned to PartitionPageSize(), which is also a power of two, this will
+  // automatically guarantee alignment on the adjusted size boundary, thanks to
+  // the natural alignment described above.
+  //
+  // For alignments >PartitionPageSize(), we need to pass the request down the
+  // stack to only give us a slot span aligned to this more restrictive
+  // boundary. In the current implementation, this code path will always
+  // allocate a new slot span and hand us the first slot, so no need to adjust
+  // the request size. As a consequence, allocating many small objects with
+  // such a high alignment can cause a non-negligable fragmentation,
+  // particularly if these allocations are back to back.
+  // TODO(bartekn): We should check that this is not causing issues in practice.
+  //
+  // Extras before the allocation are forbidden as they shift the returned
+  // allocation from the beginning of the slot, thus messing up alignment.
+  // Extras after the allocation are acceptable, but they have to be taken into
+  // account in the request size calculation to avoid crbug.com/1185484.
+  PA_DCHECK(settings.allow_aligned_alloc);
+  PA_DCHECK(!settings.extras_offset);
+  // This is mandated by |posix_memalign()|, so should never fire.
+  PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
+  // Catch unsupported alignment requests early.
+  PA_CHECK(alignment <= internal::kMaxSupportedAlignment);
+  size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
+
+  size_t adjusted_size = requested_size;
+  if (alignment <= internal::PartitionPageSize()) {
+    // Handle cases such as size = 16, alignment = 64.
+    // Wastes memory when a large alignment is requested with a small size, but
+    // this is hard to avoid, and should not be too common.
+    if (PA_UNLIKELY(raw_size < alignment)) {
+      raw_size = alignment;
+    } else {
+      // PartitionAlloc only guarantees alignment for power-of-two sized
+      // allocations. To make sure this applies here, round up the allocation
+      // size.
+      raw_size =
+          static_cast<size_t>(1)
+          << (int{sizeof(size_t) * 8} -
+              partition_alloc::internal::base::bits::CountLeadingZeroBits(
+                  raw_size - 1));
+    }
+    PA_DCHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(raw_size));
+    // Adjust back, because AllocInternalNoHooks/Alloc will adjust it again.
+    adjusted_size = AdjustSizeForExtrasSubtract(raw_size);
+
+    // Overflow check. adjusted_size must be larger or equal to requested_size.
+    if (PA_UNLIKELY(adjusted_size < requested_size)) {
+      if constexpr (ContainsFlags(flags, AllocFlags::kReturnNull)) {
+        return nullptr;
+      }
+      // OutOfMemoryDeathTest.AlignedAlloc requires
+      // base::TerminateBecauseOutOfMemory (invoked by
+      // PartitionExcessiveAllocationSize).
+      internal::PartitionExcessiveAllocationSize(requested_size);
+      // internal::PartitionExcessiveAllocationSize(size) causes OOM_CRASH.
+      PA_NOTREACHED();
+    }
+  }
+
+  // Slot spans are naturally aligned on partition page size, but make sure you
+  // don't pass anything less, because it'll mess up callee's calculations.
+  size_t slot_span_alignment =
+      std::max(alignment, internal::PartitionPageSize());
+  // TODO(mikt): Investigate why all flags except kNoHooks are ignored here.
+  void* object = AllocInternal<flags & AllocFlags::kNoHooks>(
+      adjusted_size, slot_span_alignment, nullptr);
+
+  // |alignment| is a power of two, but the compiler doesn't necessarily know
+  // that. A regular % operation is very slow, make sure to use the equivalent,
+  // faster form.
+  // No need to MTE-untag, as it doesn't change alignment.
+  PA_CHECK(!(reinterpret_cast<uintptr_t>(object) & (alignment - 1)));
+
+  return object;
+}
+
+template <AllocFlags alloc_flags, FreeFlags free_flags>
+void* PartitionRoot::ReallocInline(void* ptr,
+                                   size_t new_size,
+                                   const char* type_name) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  if (!PartitionRoot::AllocWithMemoryToolProlog<alloc_flags>(new_size)) {
+    // Early return if AllocWithMemoryToolProlog returns false
+    return nullptr;
+  }
+  void* result = realloc(ptr, new_size);
+  if constexpr (!ContainsFlags(alloc_flags, AllocFlags::kReturnNull)) {
+    PA_CHECK(result);
+  }
+  return result;
+#else
+  if (PA_UNLIKELY(!ptr)) {
+    return AllocInternal<alloc_flags>(new_size, internal::PartitionPageSize(),
+                                      type_name);
+  }
+
+  if (PA_UNLIKELY(!new_size)) {
+    FreeInUnknownRoot<free_flags>(ptr);
+    return nullptr;
+  }
+
+  if (new_size > internal::MaxDirectMapped()) {
+    if constexpr (ContainsFlags(alloc_flags, AllocFlags::kReturnNull)) {
+      return nullptr;
+    }
+    internal::PartitionExcessiveAllocationSize(new_size);
+  }
+
+  constexpr bool no_hooks = ContainsFlags(alloc_flags, AllocFlags::kNoHooks);
+  const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
+  bool overridden = false;
+  size_t old_usable_size;
+  if (PA_UNLIKELY(!no_hooks && hooks_enabled)) {
+    overridden = PartitionAllocHooks::ReallocOverrideHookIfEnabled(
+        &old_usable_size, ptr);
+  }
+  if (PA_LIKELY(!overridden)) {
+    // |ptr| may have been allocated in another root.
+    SlotSpan* slot_span = SlotSpan::FromObject(ptr);
+    auto* old_root = PartitionRoot::FromSlotSpan(slot_span);
+    bool success = false;
+    bool tried_in_place_for_direct_map = false;
+    {
+      ::partition_alloc::internal::ScopedGuard guard{
+          internal::PartitionRootLock(old_root)};
+      // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
+      DCheckIsValidSlotSpan(slot_span);
+      old_usable_size = old_root->GetSlotUsableSize(slot_span);
+
+      if (PA_UNLIKELY(slot_span->bucket->is_direct_mapped())) {
+        tried_in_place_for_direct_map = true;
+        // We may be able to perform the realloc in place by changing the
+        // accessibility of memory pages and, if reducing the size, decommitting
+        // them.
+        success = old_root->TryReallocInPlaceForDirectMap(slot_span, new_size);
+      }
+    }
+    if (success) {
+      if (PA_UNLIKELY(!no_hooks && hooks_enabled)) {
+        PartitionAllocHooks::ReallocObserverHookIfEnabled(
+            CreateFreeNotificationData(ptr),
+            CreateAllocationNotificationData(ptr, new_size, type_name));
+      }
+      return ptr;
+    }
+
+    if (PA_LIKELY(!tried_in_place_for_direct_map)) {
+      if (old_root->TryReallocInPlaceForNormalBuckets(ptr, slot_span,
+                                                      new_size)) {
+        return ptr;
+      }
+    }
+  }
+
+  // This realloc cannot be resized in-place. Sadness.
+  void* ret = AllocInternal<alloc_flags>(
+      new_size, internal::PartitionPageSize(), type_name);
+  if (!ret) {
+    if constexpr (ContainsFlags(alloc_flags, AllocFlags::kReturnNull)) {
+      return nullptr;
+    }
+    internal::PartitionExcessiveAllocationSize(new_size);
+  }
+
+  memcpy(ret, ptr, std::min(old_usable_size, new_size));
+  FreeInUnknownRoot<free_flags>(
+      ptr);  // Implicitly protects the old ptr on MTE systems.
+  return ret;
+#endif
+}
+
+// Return the capacity of the underlying slot (adjusted for extras) that'd be
+// used to satisfy a request of |size|. This doesn't mean this capacity would be
+// readily available. It merely means that if an allocation happened with that
+// returned value, it'd use the same amount of underlying memory as the
+// allocation with |size|.
+PA_ALWAYS_INLINE size_t
+PartitionRoot::AllocationCapacityFromRequestedSize(size_t size) const {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  return size;
+#else
+  PA_DCHECK(PartitionRoot::initialized);
+  size = AdjustSizeForExtrasAdd(size);
+  auto& bucket = bucket_at(SizeToBucketIndex(size, GetBucketDistribution()));
+  PA_DCHECK(!bucket.slot_size || bucket.slot_size >= size);
+  PA_DCHECK(!(bucket.slot_size % internal::kSmallestBucket));
+
+  if (PA_LIKELY(!bucket.is_direct_mapped())) {
+    size = bucket.slot_size;
+  } else if (size > internal::MaxDirectMapped()) {
+    // Too large to allocate => return the size unchanged.
+  } else {
+    size = GetDirectMapSlotSize(size);
+  }
+  size = AdjustSizeForExtrasSubtract(size);
+  return size;
+#endif
+}
+
+ThreadCache* PartitionRoot::GetOrCreateThreadCache() {
+  ThreadCache* thread_cache = nullptr;
+  if (PA_LIKELY(settings.with_thread_cache)) {
+    thread_cache = ThreadCache::Get();
+    if (PA_UNLIKELY(!ThreadCache::IsValid(thread_cache))) {
+      thread_cache = MaybeInitThreadCache();
+    }
+  }
+  return thread_cache;
+}
+
+ThreadCache* PartitionRoot::GetThreadCache() {
+  return PA_LIKELY(settings.with_thread_cache) ? ThreadCache::Get() : nullptr;
+}
+
+// private.
+internal::SchedulerLoopQuarantine& PartitionRoot::GetSchedulerLoopQuarantine() {
+  // TODO(crbug.com/1462223): Implement thread-local version and return it here.
+  return *scheduler_loop_quarantine;
+}
+
+// Explicitly declare common template instantiations to reduce compile time.
+#define EXPORT_TEMPLATE                       \
+  extern template PA_EXPORT_TEMPLATE_DECLARE( \
+      PA_COMPONENT_EXPORT(PARTITION_ALLOC))
+EXPORT_TEMPLATE void* PartitionRoot::Alloc<AllocFlags::kNone>(size_t,
+                                                              const char*);
+EXPORT_TEMPLATE void* PartitionRoot::Alloc<AllocFlags::kReturnNull>(
+    size_t,
+    const char*);
+EXPORT_TEMPLATE void*
+PartitionRoot::Realloc<AllocFlags::kNone, FreeFlags::kNone>(void*,
+                                                            size_t,
+                                                            const char*);
+EXPORT_TEMPLATE void*
+PartitionRoot::Realloc<AllocFlags::kReturnNull, FreeFlags::kNone>(void*,
+                                                                  size_t,
+                                                                  const char*);
+EXPORT_TEMPLATE void* PartitionRoot::AlignedAlloc<AllocFlags::kNone>(size_t,
+                                                                     size_t);
+#undef EXPORT_TEMPLATE
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+// Usage in `raw_ptr.cc` is notable enough to merit a non-internal alias.
+using ::partition_alloc::internal::PartitionAllocGetSlotStartInBRPPool;
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_ROOT_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_stats.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_stats.cc
new file mode 100644
index 0000000..19775e0
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_stats.cc
@@ -0,0 +1,21 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_stats.h"
+
+#include <cstring>
+
+namespace partition_alloc {
+
+SimplePartitionStatsDumper::SimplePartitionStatsDumper() {
+  memset(&stats_, 0, sizeof(stats_));
+}
+
+void SimplePartitionStatsDumper::PartitionDumpTotals(
+    const char* partition_name,
+    const PartitionMemoryStats* memory_stats) {
+  stats_ = *memory_stats;
+}
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_stats.h b/base/allocator/partition_allocator/src/partition_alloc/partition_stats.h
new file mode 100644
index 0000000..76b8e8c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_stats.h
@@ -0,0 +1,162 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_STATS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_STATS_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+
+namespace partition_alloc {
+
+// Most of these are not populated if PA_THREAD_CACHE_ENABLE_STATISTICS is not
+// defined.
+struct ThreadCacheStats {
+  uint64_t alloc_count;   // Total allocation requests.
+  uint64_t alloc_hits;    // Thread cache hits.
+  uint64_t alloc_misses;  // Thread cache misses.
+
+  // Allocation failure details:
+  uint64_t alloc_miss_empty;
+  uint64_t alloc_miss_too_large;
+
+  // Cache fill details:
+  uint64_t cache_fill_count;
+  uint64_t cache_fill_hits;
+  uint64_t cache_fill_misses;  // Object too large.
+
+  uint64_t batch_fill_count;  // Number of central allocator requests.
+
+  // Memory cost:
+  uint32_t bucket_total_memory;
+  uint32_t metadata_overhead;
+
+#if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
+  uint64_t allocs_per_bucket_[internal::kNumBuckets + 1];
+#endif  // PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
+};
+
+// Per-thread allocation statistics. Only covers allocations made through the
+// partition linked to the thread cache. As the allocator doesn't record
+// requested sizes in most cases, the data there will be an overestimate of the
+// actually requested sizes. It is also not expected to sum up to anything
+// meaningful across threads, due to the lack of synchronization. Figures there
+// are cumulative, not net. Since the data below is per-thread, note a thread
+// can deallocate more than it allocated.
+struct ThreadAllocStats {
+  uint64_t alloc_count;
+  uint64_t alloc_total_size;
+  uint64_t dealloc_count;
+  uint64_t dealloc_total_size;
+};
+
+struct LightweightQuarantineStats {
+  size_t size_in_bytes;
+  size_t count;
+  size_t cumulative_size_in_bytes;
+  size_t cumulative_count;
+  size_t quarantine_miss_count;  // Object too large.
+};
+
+// Struct used to retrieve total memory usage of a partition. Used by
+// PartitionStatsDumper implementation.
+struct PartitionMemoryStats {
+  size_t total_mmapped_bytes;    // Total bytes mmap()-ed from the system.
+  size_t total_committed_bytes;  // Total size of committed pages.
+  size_t max_committed_bytes;    // Max size of committed pages.
+  size_t total_allocated_bytes;  // Total size of allcoations.
+  size_t max_allocated_bytes;    // Max size of allocations.
+  size_t total_resident_bytes;   // Total bytes provisioned by the partition.
+  size_t total_active_bytes;     // Total active bytes in the partition.
+  size_t total_active_count;  // Total count of active objects in the partition.
+  size_t total_decommittable_bytes;  // Total bytes that could be decommitted.
+  size_t total_discardable_bytes;    // Total bytes that could be discarded.
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  size_t
+      total_brp_quarantined_bytes;  // Total bytes that are quarantined by BRP.
+  size_t total_brp_quarantined_count;       // Total number of slots that are
+                                            // quarantined by BRP.
+  size_t cumulative_brp_quarantined_bytes;  // Cumulative bytes that are
+                                            // quarantined by BRP.
+  size_t cumulative_brp_quarantined_count;  // Cumulative number of slots that
+                                            // are quarantined by BRP.
+#endif
+
+  bool has_thread_cache;
+  ThreadCacheStats current_thread_cache_stats;
+  ThreadCacheStats all_thread_caches_stats;
+
+  // Count and total duration of system calls made since process start. May not
+  // be reported on all platforms.
+  uint64_t syscall_count;
+  uint64_t syscall_total_time_ns;
+};
+
+// Struct used to retrieve memory statistics about a partition bucket. Used by
+// PartitionStatsDumper implementation.
+struct PartitionBucketMemoryStats {
+  bool is_valid;       // Used to check if the stats is valid.
+  bool is_direct_map;  // True if this is a direct mapping; size will not be
+                       // unique.
+  uint32_t bucket_slot_size;          // The size of the slot in bytes.
+  uint32_t allocated_slot_span_size;  // Total size the slot span allocated
+                                      // from the system (committed pages).
+  uint32_t active_bytes;              // Total active bytes used in the bucket.
+  uint32_t active_count;    // Total active objects allocated in the bucket.
+  uint32_t resident_bytes;  // Total bytes provisioned in the bucket.
+  uint32_t decommittable_bytes;    // Total bytes that could be decommitted.
+  uint32_t discardable_bytes;      // Total bytes that could be discarded.
+  uint32_t num_full_slot_spans;    // Number of slot spans with all slots
+                                   // allocated.
+  uint32_t num_active_slot_spans;  // Number of slot spans that have at least
+                                   // one provisioned slot.
+  uint32_t num_empty_slot_spans;   // Number of slot spans that are empty
+                                   // but not decommitted.
+  uint32_t num_decommitted_slot_spans;  // Number of slot spans that are empty
+                                        // and decommitted.
+};
+
+// Interface that is passed to PartitionDumpStats and
+// PartitionDumpStats for using the memory statistics.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionStatsDumper {
+ public:
+  virtual ~PartitionStatsDumper() = default;
+
+  // Called to dump total memory used by partition, once per partition.
+  virtual void PartitionDumpTotals(const char* partition_name,
+                                   const PartitionMemoryStats*) = 0;
+
+  // Called to dump stats about buckets, for each bucket.
+  virtual void PartitionsDumpBucketStats(const char* partition_name,
+                                         const PartitionBucketMemoryStats*) = 0;
+};
+
+// Simple version of PartitionStatsDumper, storing the returned stats in stats_.
+// Does not handle per-bucket stats.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) SimplePartitionStatsDumper
+    : public PartitionStatsDumper {
+ public:
+  SimplePartitionStatsDumper();
+  ~SimplePartitionStatsDumper() override = default;
+
+  void PartitionDumpTotals(const char* partition_name,
+                           const PartitionMemoryStats* memory_stats) override;
+
+  void PartitionsDumpBucketStats(const char* partition_name,
+                                 const PartitionBucketMemoryStats*) override {}
+
+  const PartitionMemoryStats& stats() const { return stats_; }
+
+ private:
+  PartitionMemoryStats stats_;
+};
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_STATS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_superpage_extent_entry.h b/base/allocator/partition_allocator/src/partition_alloc/partition_superpage_extent_entry.h
new file mode 100644
index 0000000..8a9989f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_superpage_extent_entry.h
@@ -0,0 +1,78 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_SUPERPAGE_EXTENT_ENTRY_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_SUPERPAGE_EXTENT_ENTRY_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_types.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_dcheck_helper.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+
+// Should not include partition_root.h, partition_bucket.h, partition_page.h.
+// For IsQuarantineAllowed(), use partition_dcheck_helper.h instead of
+// partition_root.h.
+
+namespace partition_alloc::internal {
+
+// An "extent" is a span of consecutive superpages. We link the partition's next
+// extent (if there is one) to the very start of a superpage's metadata area.
+struct PartitionSuperPageExtentEntry {
+  PartitionRoot* root;
+  PartitionSuperPageExtentEntry* next;
+  uint16_t number_of_consecutive_super_pages;
+  uint16_t number_of_nonempty_slot_spans;
+
+  PA_ALWAYS_INLINE void IncrementNumberOfNonemptySlotSpans() {
+    DCheckNumberOfPartitionPagesInSuperPagePayload(
+        this, root, number_of_nonempty_slot_spans);
+    ++number_of_nonempty_slot_spans;
+  }
+
+  PA_ALWAYS_INLINE void DecrementNumberOfNonemptySlotSpans() {
+    PA_DCHECK(number_of_nonempty_slot_spans);
+    --number_of_nonempty_slot_spans;
+  }
+};
+
+static_assert(
+    sizeof(PartitionSuperPageExtentEntry) <= kPageMetadataSize,
+    "PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
+static_assert(kMaxSuperPagesInPool / kSuperPageSize <=
+                  std::numeric_limits<
+                      decltype(PartitionSuperPageExtentEntry ::
+                                   number_of_consecutive_super_pages)>::max(),
+              "number_of_consecutive_super_pages must be big enough");
+
+// Returns the base of the first super page in the range of consecutive super
+// pages.
+//
+// CAUTION! |extent| must point to the extent of the first super page in the
+// range of consecutive super pages.
+PA_ALWAYS_INLINE uintptr_t
+SuperPagesBeginFromExtent(const PartitionSuperPageExtentEntry* extent) {
+  PA_DCHECK(0 < extent->number_of_consecutive_super_pages);
+  uintptr_t extent_as_uintptr = reinterpret_cast<uintptr_t>(extent);
+  PA_DCHECK(IsManagedByNormalBuckets(extent_as_uintptr));
+  return base::bits::AlignDown(extent_as_uintptr, kSuperPageAlignment);
+}
+
+// Returns the end of the last super page in the range of consecutive super
+// pages.
+//
+// CAUTION! |extent| must point to the extent of the first super page in the
+// range of consecutive super pages.
+PA_ALWAYS_INLINE uintptr_t
+SuperPagesEndFromExtent(const PartitionSuperPageExtentEntry* extent) {
+  return SuperPagesBeginFromExtent(extent) +
+         (extent->number_of_consecutive_super_pages * kSuperPageSize);
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_SUPERPAGE_EXTENT_ENTRY_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_tls.h b/base/allocator/partition_allocator/src/partition_alloc/partition_tls.h
new file mode 100644
index 0000000..44ceba2
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_tls.h
@@ -0,0 +1,147 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_TLS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_TLS_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_POSIX)
+#include <pthread.h>
+#endif
+
+#if BUILDFLAG(IS_WIN)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/windows_types.h"
+#endif
+
+// Barebones TLS implementation for use in PartitionAlloc. This doesn't use the
+// general chromium TLS handling to avoid dependencies, but more importantly
+// because it allocates memory.
+namespace partition_alloc::internal {
+
+#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+using PartitionTlsKey = pthread_key_t;
+
+// Only on x86_64, the implementation is not stable on ARM64. For instance, in
+// macOS 11, the TPIDRRO_EL0 registers holds the CPU index in the low bits,
+// which is not the case in macOS 12. See libsyscall/os/tsd.h in XNU
+// (_os_tsd_get_direct() is used by pthread_getspecific() internally).
+#if BUILDFLAG(IS_MAC) && defined(ARCH_CPU_X86_64)
+namespace {
+
+PA_ALWAYS_INLINE void* FastTlsGet(PartitionTlsKey index) {
+  // On macOS, pthread_getspecific() is in libSystem, so a call to it has to go
+  // through PLT. However, and contrary to some other platforms, *all* TLS keys
+  // are in a static array in the thread structure. So they are *always* at a
+  // fixed offset from the segment register holding the thread structure
+  // address.
+  //
+  // We could use _pthread_getspecific_direct(), but it is not
+  // exported. However, on all macOS versions we support, the TLS array is at
+  // %gs. This is used in V8 to back up InternalGetExistingThreadLocal(), and
+  // can also be seen by looking at pthread_getspecific() disassembly:
+  //
+  // libsystem_pthread.dylib`pthread_getspecific:
+  // libsystem_pthread.dylib[0x7ff800316099] <+0>: movq   %gs:(,%rdi,8), %rax
+  // libsystem_pthread.dylib[0x7ff8003160a2] <+9>: retq
+  //
+  // This function is essentially inlining the content of pthread_getspecific()
+  // here.
+  intptr_t result;
+  static_assert(sizeof index <= sizeof(intptr_t));
+  asm("movq %%gs:(,%1,8), %0;"
+      : "=r"(result)
+      : "r"(static_cast<intptr_t>(index)));
+
+  return reinterpret_cast<void*>(result);
+}
+
+}  // namespace
+#endif  // BUILDFLAG(IS_MAC) && defined(ARCH_CPU_X86_64)
+
+PA_ALWAYS_INLINE bool PartitionTlsCreate(PartitionTlsKey* key,
+                                         void (*destructor)(void*)) {
+  return !pthread_key_create(key, destructor);
+}
+
+PA_ALWAYS_INLINE void* PartitionTlsGet(PartitionTlsKey key) {
+#if BUILDFLAG(IS_MAC) && defined(ARCH_CPU_X86_64)
+  PA_DCHECK(pthread_getspecific(key) == FastTlsGet(key));
+  return FastTlsGet(key);
+#else
+  return pthread_getspecific(key);
+#endif
+}
+
+PA_ALWAYS_INLINE void PartitionTlsSet(PartitionTlsKey key, void* value) {
+  int ret = pthread_setspecific(key, value);
+  PA_DCHECK(!ret);
+}
+
+#elif BUILDFLAG(IS_WIN)
+// Note: supports only a single TLS key on Windows. Not a hard constraint, may
+// be lifted.
+using PartitionTlsKey = unsigned long;
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool PartitionTlsCreate(PartitionTlsKey* key, void (*destructor)(void*));
+
+PA_ALWAYS_INLINE void* PartitionTlsGet(PartitionTlsKey key) {
+  // Accessing TLS resets the last error, which then makes |GetLastError()|
+  // return something misleading. While this means that properly using
+  // |GetLastError()| is difficult, there is currently code in Chromium which
+  // expects malloc() to *not* reset it. Meaning that we either have to fix this
+  // code, or pay the cost of saving/restoring it.
+  //
+  // Source:
+  // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-tlsgetvalue
+  // "Functions that return indications of failure call SetLastError() when they
+  // fail. They generally do not call SetLastError() when they succeed. The
+  // TlsGetValue() function is an exception to this general rule. The
+  // TlsGetValue() function calls SetLastError() to clear a thread's last error
+  // when it succeeds."
+  DWORD saved_error = GetLastError();
+  void* ret = TlsGetValue(key);
+  // Only non-zero errors need to be restored.
+  if (PA_UNLIKELY(saved_error)) {
+    SetLastError(saved_error);
+  }
+  return ret;
+}
+
+PA_ALWAYS_INLINE void PartitionTlsSet(PartitionTlsKey key, void* value) {
+  BOOL ret = TlsSetValue(key, value);
+  PA_DCHECK(ret);
+}
+
+// Registers a callback for DLL_PROCESS_DETACH events.
+void PartitionTlsSetOnDllProcessDetach(void (*callback)());
+
+#else
+// Not supported.
+using PartitionTlsKey = int;
+
+PA_ALWAYS_INLINE bool PartitionTlsCreate(PartitionTlsKey* key,
+                                         void (*destructor)(void*)) {
+  // NOTIMPLEMENTED() may allocate, crash instead.
+  PA_IMMEDIATE_CRASH();
+}
+
+PA_ALWAYS_INLINE void* PartitionTlsGet(PartitionTlsKey key) {
+  PA_IMMEDIATE_CRASH();
+}
+
+PA_ALWAYS_INLINE void PartitionTlsSet(PartitionTlsKey key, void* value) {
+  PA_IMMEDIATE_CRASH();
+}
+
+#endif  // BUILDFLAG(IS_WIN)
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_TLS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_tls_win.cc b/base/allocator/partition_allocator/src/partition_alloc/partition_tls_win.cc
new file mode 100644
index 0000000..5caf32e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/partition_tls_win.cc
@@ -0,0 +1,113 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_tls.h"
+
+#include <windows.h>
+
+namespace partition_alloc::internal {
+
+namespace {
+
+// Store the key as the thread destruction callback doesn't get it.
+PartitionTlsKey g_key;
+void (*g_destructor)(void*) = nullptr;
+void (*g_on_dll_process_detach)() = nullptr;
+
+// Static callback function to call with each thread termination.
+void NTAPI PartitionTlsOnThreadExit(PVOID module,
+                                    DWORD reason,
+                                    PVOID reserved) {
+  if (reason != DLL_THREAD_DETACH && reason != DLL_PROCESS_DETACH) {
+    return;
+  }
+
+  if (reason == DLL_PROCESS_DETACH && g_on_dll_process_detach) {
+    g_on_dll_process_detach();
+  }
+
+  if (g_destructor) {
+    void* per_thread_data = PartitionTlsGet(g_key);
+    if (per_thread_data) {
+      g_destructor(per_thread_data);
+    }
+  }
+}
+
+}  // namespace
+
+bool PartitionTlsCreate(PartitionTlsKey* key, void (*destructor)(void*)) {
+  PA_CHECK(g_destructor == nullptr);  // Only one TLS key supported at a time.
+  PartitionTlsKey value = TlsAlloc();
+  if (value != TLS_OUT_OF_INDEXES) {
+    *key = value;
+
+    g_key = value;
+    g_destructor = destructor;
+    return true;
+  }
+  return false;
+}
+
+void PartitionTlsSetOnDllProcessDetach(void (*callback)()) {
+  g_on_dll_process_detach = callback;
+}
+
+}  // namespace partition_alloc::internal
+
+// See thread_local_storage_win.cc for details and reference.
+//
+// The callback has to be in any section between .CRT$XLA and .CRT$XLZ, as these
+// are sentinels used by the TLS code to find the callback array bounds. As we
+// don't particularly care about where we are called but would prefer to be
+// deinitialized towards the end (in particular after Chromium's TLS), we locate
+// ourselves in .CRT$XLY.
+
+// Force a reference to _tls_used to make the linker create the TLS directory if
+// it's not already there.  (e.g. if __declspec(thread) is not used).  Force a
+// reference to partition_tls_thread_exit_callback to prevent whole program
+// optimization from discarding the variable.
+#ifdef _WIN64
+
+#pragma comment(linker, "/INCLUDE:_tls_used")
+#pragma comment(linker, "/INCLUDE:partition_tls_thread_exit_callback")
+
+#else  // _WIN64
+
+#pragma comment(linker, "/INCLUDE:__tls_used")
+#pragma comment(linker, "/INCLUDE:_partition_tls_thread_exit_callback")
+
+#endif  // _WIN64
+
+// extern "C" suppresses C++ name mangling so we know the symbol name for the
+// linker /INCLUDE:symbol pragma above.
+extern "C" {
+// The linker must not discard partition_tls_thread_exit_callback.  (We force a
+// reference to this variable with a linker /INCLUDE:symbol pragma to ensure
+// that.) If this variable is discarded, PartitionTlsOnThreadExit will never be
+// called.
+#ifdef _WIN64
+
+// .CRT section is merged with .rdata on x64 so it must be constant data.
+#pragma const_seg(".CRT$XLY")
+// When defining a const variable, it must have external linkage to be sure the
+// linker doesn't discard it.
+extern const PIMAGE_TLS_CALLBACK partition_tls_thread_exit_callback;
+const PIMAGE_TLS_CALLBACK partition_tls_thread_exit_callback =
+    partition_alloc::internal::PartitionTlsOnThreadExit;
+
+// Reset the default section.
+#pragma const_seg()
+
+#else  // _WIN64
+
+#pragma data_seg(".CRT$XLY")
+PIMAGE_TLS_CALLBACK partition_tls_thread_exit_callback =
+    partition_alloc::internal::PartitionTlsOnThreadExit;
+
+// Reset the default section.
+#pragma data_seg()
+
+#endif  // _WIN64
+}       // extern "C"
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr.h b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr.h
new file mode 100644
index 0000000..a70a181
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr.h
@@ -0,0 +1,1103 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <cstddef>
+#include <functional>
+#include <type_traits>
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/flags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cxx20_is_constant_evaluated.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_exclusion.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/raw_ptr_buildflags.h"
+#include "build/build_config.h"
+#include "build/buildflag.h"
+
+#if BUILDFLAG(IS_WIN)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/win_handle_types.h"
+#endif
+
+#if BUILDFLAG(USE_PARTITION_ALLOC)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+// Live implementation of MiraclePtr being built.
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
+    BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+#define PA_RAW_PTR_CHECK(condition) PA_BASE_CHECK(condition)
+#else
+// No-op implementation of MiraclePtr being built.
+// Note that `PA_BASE_DCHECK()` evaporates from non-DCHECK builds,
+// minimizing impact of generated code.
+#define PA_RAW_PTR_CHECK(condition) PA_BASE_DCHECK(condition)
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
+        // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+#else   // BUILDFLAG(USE_PARTITION_ALLOC)
+// Without PartitionAlloc, there's no `PA_BASE_D?CHECK()` implementation
+// available.
+#define PA_RAW_PTR_CHECK(condition)
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC)
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_backup_ref_impl.h"
+#elif BUILDFLAG(USE_ASAN_UNOWNED_PTR)
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_asan_unowned_impl.h"
+#elif BUILDFLAG(USE_HOOKABLE_RAW_PTR)
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_hookable_impl.h"
+#else
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_noop_impl.h"
+#endif
+
+namespace cc {
+class Scheduler;
+}
+namespace base::internal {
+class DelayTimerBase;
+}
+namespace content::responsiveness {
+class Calculator;
+}
+
+namespace partition_alloc::internal {
+
+// NOTE: All methods should be `PA_ALWAYS_INLINE`. raw_ptr is meant to be a
+// lightweight replacement of a raw pointer, hence performance is critical.
+
+// This is a bitfield representing the different flags that can be applied to a
+// raw_ptr.
+//
+// Internal use only: Developers shouldn't use those values directly.
+//
+// Housekeeping rules: Try not to change trait values, so that numeric trait
+// values stay constant across builds (could be useful e.g. when analyzing stack
+// traces). A reasonable exception to this rule are `*ForTest` traits. As a
+// matter of fact, we propose that new non-test traits are added before the
+// `*ForTest` traits.
+enum class RawPtrTraits : unsigned {
+  kEmpty = 0,
+
+  // Disables dangling pointer detection, but keeps other raw_ptr protections.
+  //
+  // Don't use directly, use DisableDanglingPtrDetection or DanglingUntriaged
+  // instead.
+  kMayDangle = (1 << 0),
+
+  // Disables any hooks, when building with BUILDFLAG(USE_HOOKABLE_RAW_PTR).
+  //
+  // Internal use only.
+  kDisableHooks = (1 << 2),
+
+  // Pointer arithmetic is discouraged and disabled by default.
+  //
+  // Don't use directly, use AllowPtrArithmetic instead.
+  kAllowPtrArithmetic = (1 << 3),
+
+  // This pointer is evaluated by a separate, Ash-related experiment.
+  //
+  // Don't use directly, use ExperimentalAsh instead.
+  kExperimentalAsh = (1 << 4),
+
+  // Uninitialized pointers are discouraged and disabled by default.
+  //
+  // Don't use directly, use AllowUninitialized instead.
+  kAllowUninitialized = (1 << 5),
+
+  // *** ForTest traits below ***
+
+  // Adds accounting, on top of the NoOp implementation, for test purposes.
+  // raw_ptr/raw_ref with this trait perform extra bookkeeping, e.g. to track
+  // the number of times the raw_ptr is wrapped, unwrapped, etc.
+  //
+  // Test only. Include raw_ptr_counting_impl_for_test.h in your test
+  // files when using this trait.
+  kUseCountingImplForTest = (1 << 10),
+
+  // Helper trait that can be used to test raw_ptr's behaviour or conversions.
+  //
+  // Test only.
+  kDummyForTest = (1 << 11),
+
+  kAllMask = kMayDangle | kDisableHooks | kAllowPtrArithmetic |
+             kExperimentalAsh | kAllowUninitialized | kUseCountingImplForTest |
+             kDummyForTest,
+};
+// Template specialization to use |PA_DEFINE_OPERATORS_FOR_FLAGS| without
+// |kMaxValue| declaration.
+template <>
+constexpr inline RawPtrTraits kAllFlags<RawPtrTraits> = RawPtrTraits::kAllMask;
+PA_DEFINE_OPERATORS_FOR_FLAGS(RawPtrTraits);
+
+}  // namespace partition_alloc::internal
+
+namespace base {
+using partition_alloc::internal::RawPtrTraits;
+
+namespace raw_ptr_traits {
+
+// IsSupportedType<T>::value answers whether raw_ptr<T> 1) compiles and 2) is
+// always safe at runtime.  Templates that may end up using `raw_ptr<T>` should
+// use IsSupportedType to ensure that raw_ptr is not used with unsupported
+// types.  As an example, see how base::internal::StorageTraits uses
+// IsSupportedType as a condition for using base::internal::UnretainedWrapper
+// (which has a `ptr_` field that will become `raw_ptr<T>` after the Big
+// Rewrite).
+template <typename T, typename SFINAE = void>
+struct IsSupportedType {
+  static constexpr bool value = true;
+};
+
+// raw_ptr<T> is not compatible with function pointer types. Also, they don't
+// even need the raw_ptr protection, because they don't point on heap.
+template <typename T>
+struct IsSupportedType<T, std::enable_if_t<std::is_function_v<T>>> {
+  static constexpr bool value = false;
+};
+
+// This section excludes some types from raw_ptr<T> to avoid them from being
+// used inside base::Unretained in performance sensitive places. These were
+// identified from sampling profiler data. See crbug.com/1287151 for more info.
+template <>
+struct IsSupportedType<cc::Scheduler> {
+  static constexpr bool value = false;
+};
+template <>
+struct IsSupportedType<base::internal::DelayTimerBase> {
+  static constexpr bool value = false;
+};
+template <>
+struct IsSupportedType<content::responsiveness::Calculator> {
+  static constexpr bool value = false;
+};
+
+#if __OBJC__
+// raw_ptr<T> is not compatible with pointers to Objective-C classes for a
+// multitude of reasons. They may fail to compile in many cases, and wouldn't
+// work well with tagged pointers. Anyway, Objective-C objects have their own
+// way of tracking lifespan, hence don't need the raw_ptr protection as much.
+//
+// Such pointers are detected by checking if they're convertible to |id| type.
+template <typename T>
+struct IsSupportedType<T, std::enable_if_t<std::is_convertible_v<T*, id>>> {
+  static constexpr bool value = false;
+};
+#endif  // __OBJC__
+
+#if BUILDFLAG(IS_WIN)
+// raw_ptr<HWND__> is unsafe at runtime - if the handle happens to also
+// represent a valid pointer into a PartitionAlloc-managed region then it can
+// lead to manipulating random memory when treating it as BackupRefPtr
+// ref-count.  See also https://crbug.com/1262017.
+//
+// TODO(https://crbug.com/1262017): Cover other handle types like HANDLE,
+// HLOCAL, HINTERNET, or HDEVINFO.  Maybe we should avoid using raw_ptr<T> when
+// T=void (as is the case in these handle types).  OTOH, explicit,
+// non-template-based raw_ptr<void> should be allowed.  Maybe this can be solved
+// by having 2 traits: IsPointeeAlwaysSafe (to be used in templates) and
+// IsPointeeUsuallySafe (to be used in the static_assert in raw_ptr).  The
+// upside of this approach is that it will safely handle base::Bind closing over
+// HANDLE.  The downside of this approach is that base::Bind closing over a
+// void* pointer will not get UaF protection.
+#define PA_WINDOWS_HANDLE_TYPE(name)       \
+  template <>                              \
+  struct IsSupportedType<name##__, void> { \
+    static constexpr bool value = false;   \
+  };
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/win_handle_types_list.inc"
+#undef PA_WINDOWS_HANDLE_TYPE
+#endif
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+template <RawPtrTraits Traits>
+using UnderlyingImplForTraits = internal::RawPtrBackupRefImpl<
+    /*AllowDangling=*/ContainsFlags(Traits, RawPtrTraits::kMayDangle),
+    /*ExperimentalAsh=*/ContainsFlags(Traits, RawPtrTraits::kExperimentalAsh)>;
+
+#elif BUILDFLAG(USE_ASAN_UNOWNED_PTR)
+template <RawPtrTraits Traits>
+using UnderlyingImplForTraits = internal::RawPtrAsanUnownedImpl<
+    ContainsFlags(Traits, RawPtrTraits::kAllowPtrArithmetic),
+    ContainsFlags(Traits, RawPtrTraits::kMayDangle)>;
+
+#elif BUILDFLAG(USE_HOOKABLE_RAW_PTR)
+template <RawPtrTraits Traits>
+using UnderlyingImplForTraits = internal::RawPtrHookableImpl<
+    /*EnableHooks=*/!ContainsFlags(Traits, RawPtrTraits::kDisableHooks)>;
+
+#else
+template <RawPtrTraits Traits>
+using UnderlyingImplForTraits = internal::RawPtrNoOpImpl;
+#endif
+
+constexpr bool IsPtrArithmeticAllowed(RawPtrTraits Traits) {
+#if BUILDFLAG(ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK)
+  return ContainsFlags(Traits, RawPtrTraits::kAllowPtrArithmetic);
+#else
+  return true;
+#endif
+}
+
+}  // namespace raw_ptr_traits
+
+namespace test {
+
+struct RawPtrCountingImplForTest;
+
+}  // namespace test
+
+namespace raw_ptr_traits {
+
+// ImplForTraits is the struct that implements raw_ptr functions. Think of
+// raw_ptr as a thin wrapper, that directs calls to ImplForTraits. ImplForTraits
+// may be different from UnderlyingImplForTraits, because it may select a
+// test impl instead.
+template <RawPtrTraits Traits>
+using ImplForTraits =
+    std::conditional_t<ContainsFlags(Traits,
+                                     RawPtrTraits::kUseCountingImplForTest),
+                       test::RawPtrCountingImplForTest,
+                       UnderlyingImplForTraits<Traits>>;
+
+}  // namespace raw_ptr_traits
+
+// `raw_ptr<T>` is a non-owning smart pointer that has improved memory-safety
+// over raw pointers. See the documentation for details:
+// https://source.chromium.org/chromium/chromium/src/+/main:base/memory/raw_ptr.md
+//
+// raw_ptr<T> is marked as [[gsl::Pointer]] which allows the compiler to catch
+// some bugs where the raw_ptr holds a dangling pointer to a temporary object.
+// However the [[gsl::Pointer]] analysis expects that such types do not have a
+// non-default move constructor/assignment. Thus, it's possible to get an error
+// where the pointer is not actually dangling, and have to work around the
+// compiler. We have not managed to construct such an example in Chromium yet.
+template <typename T, RawPtrTraits Traits = RawPtrTraits::kEmpty>
+class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
+ public:
+  using Impl = typename raw_ptr_traits::ImplForTraits<Traits>;
+  // Needed to make gtest Pointee matcher work with raw_ptr.
+  using element_type = T;
+  using DanglingType = raw_ptr<T, Traits | RawPtrTraits::kMayDangle>;
+
+#if !BUILDFLAG(USE_PARTITION_ALLOC)
+  // See comment at top about `PA_RAW_PTR_CHECK()`.
+  static_assert(std::is_same_v<Impl, internal::RawPtrNoOpImpl>);
+#endif  // !BUILDFLAG(USE_PARTITION_ALLOC)
+
+  static_assert(AreValidFlags(Traits), "Unknown raw_ptr trait(s)");
+  static_assert(raw_ptr_traits::IsSupportedType<T>::value,
+                "raw_ptr<T> doesn't work with this kind of pointee type T");
+
+  static constexpr bool kZeroOnConstruct =
+      Impl::kMustZeroOnConstruct ||
+      (BUILDFLAG(RAW_PTR_ZERO_ON_CONSTRUCT) &&
+       !ContainsFlags(Traits, RawPtrTraits::kAllowUninitialized));
+  static constexpr bool kZeroOnMove =
+      Impl::kMustZeroOnMove || BUILDFLAG(RAW_PTR_ZERO_ON_MOVE);
+  static constexpr bool kZeroOnDestruct =
+      Impl::kMustZeroOnDestruct || BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT);
+
+// A non-trivial default ctor is required for complex implementations (e.g.
+// BackupRefPtr), or even for NoOpImpl when zeroing is requested.
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||                           \
+    BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) || \
+    BUILDFLAG(RAW_PTR_ZERO_ON_CONSTRUCT)
+  PA_ALWAYS_INLINE constexpr raw_ptr() noexcept {
+    if constexpr (kZeroOnConstruct) {
+      wrapped_ptr_ = nullptr;
+    }
+  }
+#else
+  // raw_ptr can be trivially default constructed (leaving |wrapped_ptr_|
+  // uninitialized).
+  PA_ALWAYS_INLINE constexpr raw_ptr() noexcept = default;
+  static_assert(!kZeroOnConstruct);
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
+        // BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) ||
+        // BUILDFLAG(RAW_PTR_ZERO_ON_CONSTRUCT)
+
+// A non-trivial copy ctor and assignment operator are required for complex
+// implementations (e.g. BackupRefPtr). Unlike the blocks around, we don't need
+// these for NoOpImpl even when zeroing is requested; better to keep them
+// trivial.
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
+    BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
+  PA_ALWAYS_INLINE constexpr raw_ptr(const raw_ptr& p) noexcept
+      : wrapped_ptr_(Impl::Duplicate(p.wrapped_ptr_)) {}
+  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(const raw_ptr& p) noexcept {
+    // Duplicate before releasing, in case the pointer is assigned to itself.
+    //
+    // Unlike the move version of this operator, don't add |this != &p| branch,
+    // for performance reasons. Even though Duplicate() is not cheap, we
+    // practically never assign a raw_ptr<T> to itself. We suspect that a
+    // cumulative cost of a conditional branch, even if always correctly
+    // predicted, would exceed that.
+    T* new_ptr = Impl::Duplicate(p.wrapped_ptr_);
+    Impl::ReleaseWrappedPtr(wrapped_ptr_);
+    wrapped_ptr_ = new_ptr;
+    return *this;
+  }
+#else
+  PA_ALWAYS_INLINE raw_ptr(const raw_ptr&) noexcept = default;
+  PA_ALWAYS_INLINE raw_ptr& operator=(const raw_ptr&) noexcept = default;
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
+        // BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
+
+// A non-trivial move ctor and assignment operator are required for complex
+// implementations (e.g. BackupRefPtr), or even for NoOpImpl when zeroing is
+// requested.
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||                           \
+    BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) || \
+    BUILDFLAG(RAW_PTR_ZERO_ON_MOVE)
+  PA_ALWAYS_INLINE constexpr raw_ptr(raw_ptr&& p) noexcept {
+    wrapped_ptr_ = p.wrapped_ptr_;
+    if constexpr (kZeroOnMove) {
+      p.wrapped_ptr_ = nullptr;
+    }
+  }
+  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(raw_ptr&& p) noexcept {
+    // Unlike the the copy version of this operator, this branch is necessary
+    // for correctness.
+    if (PA_LIKELY(this != &p)) {
+      Impl::ReleaseWrappedPtr(wrapped_ptr_);
+      wrapped_ptr_ = p.wrapped_ptr_;
+      if constexpr (kZeroOnMove) {
+        p.wrapped_ptr_ = nullptr;
+      }
+    }
+    return *this;
+  }
+#else
+  PA_ALWAYS_INLINE raw_ptr(raw_ptr&&) noexcept = default;
+  PA_ALWAYS_INLINE raw_ptr& operator=(raw_ptr&&) noexcept = default;
+  static_assert(!kZeroOnMove);
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
+        // BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) ||
+        // BUILDFLAG(RAW_PTR_ZERO_ON_MOVE)
+
+// A non-trivial default dtor is required for complex implementations (e.g.
+// BackupRefPtr), or even for NoOpImpl when zeroing is requested.
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||                           \
+    BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) || \
+    BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
+  PA_ALWAYS_INLINE PA_CONSTEXPR_DTOR ~raw_ptr() noexcept {
+    Impl::ReleaseWrappedPtr(wrapped_ptr_);
+    // Work around external issues where raw_ptr is used after destruction.
+    if constexpr (kZeroOnDestruct) {
+      wrapped_ptr_ = nullptr;
+    }
+  }
+#else
+  PA_ALWAYS_INLINE ~raw_ptr() noexcept = default;
+  static_assert(!kZeroOnDestruct);
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
+        // BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) ||
+        // BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
+
+  // Cross-kind copy constructor.
+  // Move is not supported as different traits may use different ref-counts, so
+  // let move operations degrade to copy, which handles it well.
+  template <RawPtrTraits PassedTraits,
+            typename Unused = std::enable_if_t<Traits != PassedTraits>>
+  PA_ALWAYS_INLINE constexpr explicit raw_ptr(
+      const raw_ptr<T, PassedTraits>& p) noexcept
+      : wrapped_ptr_(Impl::WrapRawPtrForDuplication(
+            raw_ptr_traits::ImplForTraits<PassedTraits>::
+                UnsafelyUnwrapPtrForDuplication(p.wrapped_ptr_))) {
+    // Limit cross-kind conversions only to cases where kMayDangle gets added,
+    // because that's needed for Unretained(Ref)Wrapper. Use a static_assert,
+    // instead of disabling via SFINAE, so that the compiler catches other
+    // conversions. Otherwise implicit raw_ptr<T> -> T* -> raw_ptr<> route will
+    // be taken.
+    static_assert(Traits == (PassedTraits | RawPtrTraits::kMayDangle));
+  }
+
+  // Cross-kind assignment.
+  // Move is not supported as different traits may use different ref-counts, so
+  // let move operations degrade to copy, which handles it well.
+  template <RawPtrTraits PassedTraits,
+            typename Unused = std::enable_if_t<Traits != PassedTraits>>
+  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(
+      const raw_ptr<T, PassedTraits>& p) noexcept {
+    // Limit cross-kind assignments only to cases where `kMayDangle` gets added,
+    // because that's needed for Unretained(Ref)Wrapper. Use a static_assert,
+    // instead of disabling via SFINAE, so that the compiler catches other
+    // conversions. Otherwise implicit raw_ptr<T> -> T* -> raw_ptr<> route will
+    // be taken.
+    static_assert(Traits == (PassedTraits | RawPtrTraits::kMayDangle));
+
+    Impl::ReleaseWrappedPtr(wrapped_ptr_);
+    wrapped_ptr_ = Impl::WrapRawPtrForDuplication(
+        raw_ptr_traits::ImplForTraits<
+            PassedTraits>::UnsafelyUnwrapPtrForDuplication(p.wrapped_ptr_));
+    return *this;
+  }
+
+  // Deliberately implicit, because raw_ptr is supposed to resemble raw ptr.
+  // Ignore kZeroOnConstruct, because here the caller explicitly wishes to
+  // initialize with nullptr.
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  PA_ALWAYS_INLINE constexpr raw_ptr(std::nullptr_t) noexcept
+      : wrapped_ptr_(nullptr) {}
+
+  // Deliberately implicit, because raw_ptr is supposed to resemble raw ptr.
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  PA_ALWAYS_INLINE constexpr raw_ptr(T* p) noexcept
+      : wrapped_ptr_(Impl::WrapRawPtr(p)) {}
+
+  // Deliberately implicit in order to support implicit upcast.
+  template <typename U,
+            typename Unused = std::enable_if_t<
+                std::is_convertible_v<U*, T*> &&
+                !std::is_void_v<typename std::remove_cv<T>::type>>>
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  PA_ALWAYS_INLINE constexpr raw_ptr(const raw_ptr<U, Traits>& ptr) noexcept
+      : wrapped_ptr_(
+            Impl::Duplicate(Impl::template Upcast<T, U>(ptr.wrapped_ptr_))) {}
+  // Deliberately implicit in order to support implicit upcast.
+  template <typename U,
+            typename Unused = std::enable_if_t<
+                std::is_convertible_v<U*, T*> &&
+                !std::is_void_v<typename std::remove_cv<T>::type>>>
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  PA_ALWAYS_INLINE constexpr raw_ptr(raw_ptr<U, Traits>&& ptr) noexcept
+      : wrapped_ptr_(Impl::template Upcast<T, U>(ptr.wrapped_ptr_)) {
+    if constexpr (kZeroOnMove) {
+      ptr.wrapped_ptr_ = nullptr;
+    }
+  }
+
+  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(std::nullptr_t) noexcept {
+    Impl::ReleaseWrappedPtr(wrapped_ptr_);
+    wrapped_ptr_ = nullptr;
+    return *this;
+  }
+  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(T* p) noexcept {
+    Impl::ReleaseWrappedPtr(wrapped_ptr_);
+    wrapped_ptr_ = Impl::WrapRawPtr(p);
+    return *this;
+  }
+
+  // Upcast assignment
+  template <typename U,
+            typename Unused = std::enable_if_t<
+                std::is_convertible_v<U*, T*> &&
+                !std::is_void_v<typename std::remove_cv<T>::type>>>
+  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(
+      const raw_ptr<U, Traits>& ptr) noexcept {
+    // Make sure that pointer isn't assigned to itself (look at raw_ptr address,
+    // not its contained pointer value). The comparison is only needed when they
+    // are the same type, otherwise they can't be the same raw_ptr object.
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+    if constexpr (std::is_same_v<raw_ptr, std::decay_t<decltype(ptr)>>) {
+      PA_RAW_PTR_CHECK(this != &ptr);
+    }
+#endif
+    Impl::ReleaseWrappedPtr(wrapped_ptr_);
+    wrapped_ptr_ =
+        Impl::Duplicate(Impl::template Upcast<T, U>(ptr.wrapped_ptr_));
+    return *this;
+  }
+  template <typename U,
+            typename Unused = std::enable_if_t<
+                std::is_convertible_v<U*, T*> &&
+                !std::is_void_v<typename std::remove_cv<T>::type>>>
+  PA_ALWAYS_INLINE constexpr raw_ptr& operator=(
+      raw_ptr<U, Traits>&& ptr) noexcept {
+    // Make sure that pointer isn't assigned to itself (look at raw_ptr address,
+    // not its contained pointer value). The comparison is only needed when they
+    // are the same type, otherwise they can't be the same raw_ptr object.
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+    if constexpr (std::is_same_v<raw_ptr, std::decay_t<decltype(ptr)>>) {
+      PA_RAW_PTR_CHECK(this != &ptr);
+    }
+#endif
+    Impl::ReleaseWrappedPtr(wrapped_ptr_);
+    wrapped_ptr_ = Impl::template Upcast<T, U>(ptr.wrapped_ptr_);
+    if constexpr (kZeroOnMove) {
+      ptr.wrapped_ptr_ = nullptr;
+    }
+    return *this;
+  }
+
+  // Avoid using. The goal of raw_ptr is to be as close to raw pointer as
+  // possible, so use it only if absolutely necessary (e.g. for const_cast).
+  PA_ALWAYS_INLINE constexpr T* get() const { return GetForExtraction(); }
+
+  // You may use |raw_ptr<T>::AsEphemeralRawAddr()| to obtain |T**| or |T*&|
+  // from |raw_ptr<T>|, as long as you follow these requirements:
+  // - DO NOT carry T**/T*& obtained via AsEphemeralRawAddr() out of
+  //   expression.
+  // - DO NOT use raw_ptr or T**/T*& multiple times within an expression.
+  //
+  // https://chromium.googlesource.com/chromium/src/+/main/base/memory/raw_ptr.md#in_out-arguments-need-to-be-refactored
+  class EphemeralRawAddr {
+   public:
+    EphemeralRawAddr(const EphemeralRawAddr&) = delete;
+    EphemeralRawAddr& operator=(const EphemeralRawAddr&) = delete;
+    void* operator new(size_t) = delete;
+    void* operator new(size_t, void*) = delete;
+    PA_ALWAYS_INLINE PA_CONSTEXPR_DTOR ~EphemeralRawAddr() { original = copy; }
+
+    PA_ALWAYS_INLINE constexpr T** operator&() && { return &copy; }
+    // NOLINTNEXTLINE(google-explicit-constructor)
+    PA_ALWAYS_INLINE constexpr operator T*&() && { return copy; }
+
+   private:
+    friend class raw_ptr;
+    PA_ALWAYS_INLINE constexpr explicit EphemeralRawAddr(raw_ptr& ptr)
+        : copy(ptr.get()), original(ptr) {}
+    T* copy;
+    raw_ptr& original;  // Original pointer.
+  };
+  PA_ALWAYS_INLINE PA_CONSTEXPR_DTOR EphemeralRawAddr AsEphemeralRawAddr() & {
+    return EphemeralRawAddr(*this);
+  }
+
+  PA_ALWAYS_INLINE constexpr explicit operator bool() const {
+    return !!wrapped_ptr_;
+  }
+
+  template <typename U = T,
+            typename Unused = std::enable_if_t<
+                !std::is_void_v<typename std::remove_cv<U>::type>>>
+  PA_ALWAYS_INLINE constexpr U& operator*() const {
+    return *GetForDereference();
+  }
+  PA_ALWAYS_INLINE constexpr T* operator->() const {
+    return GetForDereference();
+  }
+
+  // Deliberately implicit, because raw_ptr is supposed to resemble raw ptr.
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  PA_ALWAYS_INLINE constexpr operator T*() const { return GetForExtraction(); }
+  template <typename U>
+  PA_ALWAYS_INLINE constexpr explicit operator U*() const {
+    // This operator may be invoked from static_cast, meaning the types may not
+    // be implicitly convertible, hence the need for static_cast here.
+    return static_cast<U*>(GetForExtraction());
+  }
+
+  PA_ALWAYS_INLINE constexpr raw_ptr& operator++() {
+    static_assert(
+        raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
+        "cannot increment raw_ptr unless AllowPtrArithmetic trait is present.");
+    wrapped_ptr_ = Impl::Advance(wrapped_ptr_, 1);
+    return *this;
+  }
+  PA_ALWAYS_INLINE constexpr raw_ptr& operator--() {
+    static_assert(
+        raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
+        "cannot decrement raw_ptr unless AllowPtrArithmetic trait is present.");
+    wrapped_ptr_ = Impl::Retreat(wrapped_ptr_, 1);
+    return *this;
+  }
+  PA_ALWAYS_INLINE constexpr raw_ptr operator++(int /* post_increment */) {
+    static_assert(
+        raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
+        "cannot increment raw_ptr unless AllowPtrArithmetic trait is present.");
+    raw_ptr result = *this;
+    ++(*this);
+    return result;
+  }
+  PA_ALWAYS_INLINE constexpr raw_ptr operator--(int /* post_decrement */) {
+    static_assert(
+        raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
+        "cannot decrement raw_ptr unless AllowPtrArithmetic trait is present.");
+    raw_ptr result = *this;
+    --(*this);
+    return result;
+  }
+  template <
+      typename Z,
+      typename = std::enable_if_t<partition_alloc::internal::is_offset_type<Z>>>
+  PA_ALWAYS_INLINE constexpr raw_ptr& operator+=(Z delta_elems) {
+    static_assert(
+        raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
+        "cannot increment raw_ptr unless AllowPtrArithmetic trait is present.");
+    wrapped_ptr_ = Impl::Advance(wrapped_ptr_, delta_elems);
+    return *this;
+  }
+  template <
+      typename Z,
+      typename = std::enable_if_t<partition_alloc::internal::is_offset_type<Z>>>
+  PA_ALWAYS_INLINE constexpr raw_ptr& operator-=(Z delta_elems) {
+    static_assert(
+        raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
+        "cannot increment raw_ptr unless AllowPtrArithmetic trait is present.");
+    wrapped_ptr_ = Impl::Retreat(wrapped_ptr_, delta_elems);
+    return *this;
+  }
+
+  template <typename Z,
+            typename U = T,
+            RawPtrTraits CopyTraits = Traits,
+            typename Unused = std::enable_if_t<
+                !std::is_void_v<typename std::remove_cv<U>::type> &&
+                partition_alloc::internal::is_offset_type<Z>>>
+  U& operator[](Z delta_elems) const {
+    static_assert(
+        raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
+        "cannot index raw_ptr unless AllowPtrArithmetic trait is present.");
+    return wrapped_ptr_[delta_elems];
+  }
+
+  // Do not disable operator+() and operator-().
+  // They provide OOB checks, which prevent from assigning an arbitrary value to
+  // raw_ptr, leading BRP to modifying arbitrary memory thinking it's ref-count.
+  // Keep them enabled, which may be blocked later when attempting to apply the
+  // += or -= operation, when disabled. In the absence of operators +/-, the
+  // compiler is free to implicitly convert to the underlying T* representation
+  // and perform ordinary pointer arithmetic, thus invalidating the purpose
+  // behind disabling them.
+  template <typename Z>
+  PA_ALWAYS_INLINE friend constexpr raw_ptr operator+(const raw_ptr& p,
+                                                      Z delta_elems) {
+    raw_ptr result = p;
+    return result += delta_elems;
+  }
+  template <typename Z>
+  PA_ALWAYS_INLINE friend constexpr raw_ptr operator-(const raw_ptr& p,
+                                                      Z delta_elems) {
+    raw_ptr result = p;
+    return result -= delta_elems;
+  }
+
+  PA_ALWAYS_INLINE friend constexpr ptrdiff_t operator-(const raw_ptr& p1,
+                                                        const raw_ptr& p2) {
+    return Impl::GetDeltaElems(p1.wrapped_ptr_, p2.wrapped_ptr_);
+  }
+  PA_ALWAYS_INLINE friend constexpr ptrdiff_t operator-(T* p1,
+                                                        const raw_ptr& p2) {
+    return Impl::GetDeltaElems(p1, p2.wrapped_ptr_);
+  }
+  PA_ALWAYS_INLINE friend constexpr ptrdiff_t operator-(const raw_ptr& p1,
+                                                        T* p2) {
+    return Impl::GetDeltaElems(p1.wrapped_ptr_, p2);
+  }
+
+  // Stop referencing the underlying pointer and free its memory. Compared to
+  // raw delete calls, this avoids the raw_ptr to be temporarily dangling
+  // during the free operation, which will lead to taking the slower path that
+  // involves quarantine.
+  PA_ALWAYS_INLINE constexpr void ClearAndDelete() noexcept {
+    delete GetForExtractionAndReset();
+  }
+  PA_ALWAYS_INLINE constexpr void ClearAndDeleteArray() noexcept {
+    delete[] GetForExtractionAndReset();
+  }
+
+  // Clear the underlying pointer and return another raw_ptr instance
+  // that is allowed to dangle.
+  // This can be useful in cases such as:
+  // ```
+  //  ptr.ExtractAsDangling()->SelfDestroy();
+  // ```
+  // ```
+  //  c_style_api_do_something_and_destroy(ptr.ExtractAsDangling());
+  // ```
+  // NOTE, avoid using this method as it indicates an error-prone memory
+  // ownership pattern. If possible, use smart pointers like std::unique_ptr<>
+  // instead of raw_ptr<>.
+  // If you have to use it, avoid saving the return value in a long-lived
+  // variable (or worse, a field)! It's meant to be used as a temporary, to be
+  // passed into a cleanup & freeing function, and destructed at the end of the
+  // statement.
+  PA_ALWAYS_INLINE constexpr DanglingType ExtractAsDangling() noexcept {
+    DanglingType res(std::move(*this));
+    // Not all implementation clear the source pointer on move. Furthermore,
+    // even for implemtantions that do, cross-kind conversions (that add
+    // kMayDangle) fall back to a copy, instead of move. So do it here just in
+    // case. Should be cheap.
+    operator=(nullptr);
+    return res;
+  }
+
+  // Comparison operators between raw_ptr and raw_ptr<U>/U*/std::nullptr_t.
+  // Strictly speaking, it is not necessary to provide these: the compiler can
+  // use the conversion operator implicitly to allow comparisons to fall back to
+  // comparisons between raw pointers. However, `operator T*`/`operator U*` may
+  // perform safety checks with a higher runtime cost, so to avoid this, provide
+  // explicit comparison operators for all combinations of parameters.
+
+  // Comparisons between `raw_ptr`s. This unusual declaration and separate
+  // definition below is because `GetForComparison()` is a private method. The
+  // more conventional approach of defining a comparison operator between
+  // `raw_ptr` and `raw_ptr<U>` in the friend declaration itself does not work,
+  // because a comparison operator defined inline would not be allowed to call
+  // `raw_ptr<U>`'s private `GetForComparison()` method.
+  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
+  friend bool operator==(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
+  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
+  friend bool operator!=(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
+  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
+  friend bool operator<(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
+  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
+  friend bool operator>(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
+  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
+  friend bool operator<=(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
+  template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
+  friend bool operator>=(const raw_ptr<U, R1>& lhs, const raw_ptr<V, R2>& rhs);
+
+  // Comparisons with U*. These operators also handle the case where the RHS is
+  // T*.
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator==(const raw_ptr& lhs, U* rhs) {
+    return lhs.GetForComparison() == rhs;
+  }
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator!=(const raw_ptr& lhs, U* rhs) {
+    return !(lhs == rhs);
+  }
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator==(U* lhs, const raw_ptr& rhs) {
+    return rhs == lhs;  // Reverse order to call the operator above.
+  }
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator!=(U* lhs, const raw_ptr& rhs) {
+    return rhs != lhs;  // Reverse order to call the operator above.
+  }
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator<(const raw_ptr& lhs, U* rhs) {
+    return lhs.GetForComparison() < rhs;
+  }
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator<=(const raw_ptr& lhs, U* rhs) {
+    return lhs.GetForComparison() <= rhs;
+  }
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator>(const raw_ptr& lhs, U* rhs) {
+    return lhs.GetForComparison() > rhs;
+  }
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator>=(const raw_ptr& lhs, U* rhs) {
+    return lhs.GetForComparison() >= rhs;
+  }
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator<(U* lhs, const raw_ptr& rhs) {
+    return lhs < rhs.GetForComparison();
+  }
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator<=(U* lhs, const raw_ptr& rhs) {
+    return lhs <= rhs.GetForComparison();
+  }
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator>(U* lhs, const raw_ptr& rhs) {
+    return lhs > rhs.GetForComparison();
+  }
+  template <typename U>
+  PA_ALWAYS_INLINE friend bool operator>=(U* lhs, const raw_ptr& rhs) {
+    return lhs >= rhs.GetForComparison();
+  }
+
+  // Comparisons with `std::nullptr_t`.
+  PA_ALWAYS_INLINE friend bool operator==(const raw_ptr& lhs, std::nullptr_t) {
+    return !lhs;
+  }
+  PA_ALWAYS_INLINE friend bool operator!=(const raw_ptr& lhs, std::nullptr_t) {
+    return !!lhs;  // Use !! otherwise the costly implicit cast will be used.
+  }
+  PA_ALWAYS_INLINE friend bool operator==(std::nullptr_t, const raw_ptr& rhs) {
+    return !rhs;
+  }
+  PA_ALWAYS_INLINE friend bool operator!=(std::nullptr_t, const raw_ptr& rhs) {
+    return !!rhs;  // Use !! otherwise the costly implicit cast will be used.
+  }
+
+  PA_ALWAYS_INLINE friend constexpr void swap(raw_ptr& lhs,
+                                              raw_ptr& rhs) noexcept {
+    Impl::IncrementSwapCountForTest();
+    std::swap(lhs.wrapped_ptr_, rhs.wrapped_ptr_);
+  }
+
+  PA_ALWAYS_INLINE void ReportIfDangling() const noexcept {
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    Impl::ReportIfDangling(wrapped_ptr_);
+#endif
+  }
+
+ private:
+  // This getter is meant for situations where the pointer is meant to be
+  // dereferenced. It is allowed to crash on nullptr (it may or may not),
+  // because it knows that the caller will crash on nullptr.
+  PA_ALWAYS_INLINE constexpr T* GetForDereference() const {
+    return Impl::SafelyUnwrapPtrForDereference(wrapped_ptr_);
+  }
+  // This getter is meant for situations where the raw pointer is meant to be
+  // extracted outside of this class, but not necessarily with an intention to
+  // dereference. It mustn't crash on nullptr.
+  PA_ALWAYS_INLINE constexpr T* GetForExtraction() const {
+    return Impl::SafelyUnwrapPtrForExtraction(wrapped_ptr_);
+  }
+  // This getter is meant *only* for situations where the pointer is meant to be
+  // compared (guaranteeing no dereference or extraction outside of this class).
+  // Any verifications can and should be skipped for performance reasons.
+  PA_ALWAYS_INLINE constexpr T* GetForComparison() const {
+    return Impl::UnsafelyUnwrapPtrForComparison(wrapped_ptr_);
+  }
+
+  PA_ALWAYS_INLINE constexpr T* GetForExtractionAndReset() {
+    T* ptr = GetForExtraction();
+    operator=(nullptr);
+    return ptr;
+  }
+
+  // This field is not a raw_ptr<> because it was filtered by the rewriter for:
+  // #union, #global-scope, #constexpr-ctor-field-initializer
+  RAW_PTR_EXCLUSION T* wrapped_ptr_;
+
+  template <typename U, base::RawPtrTraits R>
+  friend class raw_ptr;
+};
+
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, Traits1>& lhs,
+                                 const raw_ptr<V, Traits2>& rhs) {
+  return lhs.GetForComparison() == rhs.GetForComparison();
+}
+
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, Traits1>& lhs,
+                                 const raw_ptr<V, Traits2>& rhs) {
+  return !(lhs == rhs);
+}
+
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, Traits1>& lhs,
+                                const raw_ptr<V, Traits2>& rhs) {
+  return lhs.GetForComparison() < rhs.GetForComparison();
+}
+
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, Traits1>& lhs,
+                                const raw_ptr<V, Traits2>& rhs) {
+  return lhs.GetForComparison() > rhs.GetForComparison();
+}
+
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, Traits1>& lhs,
+                                 const raw_ptr<V, Traits2>& rhs) {
+  return lhs.GetForComparison() <= rhs.GetForComparison();
+}
+
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, Traits1>& lhs,
+                                 const raw_ptr<V, Traits2>& rhs) {
+  return lhs.GetForComparison() >= rhs.GetForComparison();
+}
+
+template <typename T>
+struct IsRawPtr : std::false_type {};
+
+template <typename T, RawPtrTraits Traits>
+struct IsRawPtr<raw_ptr<T, Traits>> : std::true_type {};
+
+template <typename T>
+inline constexpr bool IsRawPtrV = IsRawPtr<T>::value;
+
+template <typename T>
+inline constexpr bool IsRawPtrMayDangleV = false;
+
+template <typename T, RawPtrTraits Traits>
+inline constexpr bool IsRawPtrMayDangleV<raw_ptr<T, Traits>> =
+    ContainsFlags(Traits, RawPtrTraits::kMayDangle);
+
+// Template helpers for working with T* or raw_ptr<T>.
+template <typename T>
+struct IsPointer : std::false_type {};
+
+template <typename T>
+struct IsPointer<T*> : std::true_type {};
+
+template <typename T, RawPtrTraits Traits>
+struct IsPointer<raw_ptr<T, Traits>> : std::true_type {};
+
+template <typename T>
+inline constexpr bool IsPointerV = IsPointer<T>::value;
+
+template <typename T>
+struct RemovePointer {
+  using type = T;
+};
+
+template <typename T>
+struct RemovePointer<T*> {
+  using type = T;
+};
+
+template <typename T, RawPtrTraits Traits>
+struct RemovePointer<raw_ptr<T, Traits>> {
+  using type = T;
+};
+
+template <typename T>
+using RemovePointerT = typename RemovePointer<T>::type;
+
+struct RawPtrGlobalSettings {
+  static void EnableExperimentalAsh() {
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+    internal::BackupRefPtrGlobalSettings::EnableExperimentalAsh();
+#endif
+  }
+};
+
+}  // namespace base
+
+using base::raw_ptr;
+
+// DisableDanglingPtrDetection option for raw_ptr annotates
+// "intentional-and-safe" dangling pointers. It is meant to be used at the
+// margin, only if there is no better way to re-architecture the code.
+//
+// Usage:
+// raw_ptr<T, DisableDanglingPtrDetection> dangling_ptr;
+//
+// When using it, please provide a justification about what guarantees that it
+// will never be dereferenced after becoming dangling.
+constexpr auto DisableDanglingPtrDetection = base::RawPtrTraits::kMayDangle;
+
+// See `docs/dangling_ptr.md`
+// Annotates known dangling raw_ptr. Those haven't been triaged yet. All the
+// occurrences are meant to be removed. See https://crbug.com/1291138.
+constexpr auto DanglingUntriaged = base::RawPtrTraits::kMayDangle;
+
+// Unlike DanglingUntriaged, this annotates raw_ptrs that are known to
+// dangle only occasionally on the CQ.
+//
+// These were found from CQ runs and analysed in this dashboard:
+// https://docs.google.com/spreadsheets/d/1k12PQOG4y1-UEV9xDfP1F8FSk4cVFywafEYHmzFubJ8/
+//
+// This is not meant to be added manually. You can ignore this flag.
+constexpr auto FlakyDanglingUntriaged = base::RawPtrTraits::kMayDangle;
+
+// Dangling raw_ptr that is more likely to cause UAF: its memory was freed in
+// one task, and the raw_ptr was released in a different one.
+//
+// This is not meant to be added manually. You can ignore this flag.
+constexpr auto AcrossTasksDanglingUntriaged = base::RawPtrTraits::kMayDangle;
+
+// The use of pointer arithmetic with raw_ptr is strongly discouraged and
+// disabled by default. Usually a container like span<> should be used
+// instead of the raw_ptr.
+constexpr auto AllowPtrArithmetic = base::RawPtrTraits::kAllowPtrArithmetic;
+
+// Temporary flag for `raw_ptr` / `raw_ref`. This is used by finch experiments
+// to differentiate pointers added recently for the ChromeOS ash rewrite.
+//
+// See launch plan:
+// https://docs.google.com/document/d/105OVhNl-2lrfWElQSk5BXYv-nLynfxUrbC4l8cZ0CoU/edit
+//
+// This is not meant to be added manually. You can ignore this flag.
+constexpr auto ExperimentalAsh = base::RawPtrTraits::kExperimentalAsh;
+
+// The use of uninitialized pointers is strongly discouraged. raw_ptrs will
+// be initialized to nullptr by default in all cases when building against
+// Chromium. However, third-party projects built in a standalone manner may
+// wish to opt out where possible. One way to do this is via buildflags,
+// thus affecting all raw_ptrs, but a finer-grained mechanism is the use
+// of the kAllowUninitialized trait.
+//
+// Note that opting out may not always be effective, given that algorithms
+// like BackupRefPtr require nullptr initializaion for correctness and thus
+// silently enforce it.
+constexpr auto AllowUninitialized = base::RawPtrTraits::kAllowUninitialized;
+
+// This flag is used to tag a subset of dangling pointers. Similarly to
+// DanglingUntriaged, those pointers are known to be dangling. However, we also
+// detected that those raw_ptr's were never released (either by calling
+// raw_ptr's destructor or by resetting its value), which can ultimately put
+// pressure on the BRP quarantine.
+//
+// This is not meant to be added manually. You can ignore this flag.
+constexpr auto LeakedDanglingUntriaged = base::RawPtrTraits::kMayDangle;
+
+// Temporary annotation for new pointers added during the renderer rewrite.
+// TODO(crbug.com/1444624): Find pre-existing dangling pointers and remove
+// this annotation.
+//
+// DO NOT ADD new occurrences of this.
+constexpr auto ExperimentalRenderer = base::RawPtrTraits::kMayDangle;
+
+// Public verson used in callbacks arguments when it is known that they might
+// receive dangling pointers. In any other cases, please
+// use one of:
+// - raw_ptr<T, DanglingUntriaged>
+// - raw_ptr<T, DisableDanglingPtrDetection>
+template <typename T, base::RawPtrTraits Traits = base::RawPtrTraits::kEmpty>
+using MayBeDangling = base::raw_ptr<T, Traits | base::RawPtrTraits::kMayDangle>;
+
+namespace std {
+
+// Override so set/map lookups do not create extra raw_ptr. This also allows
+// dangling pointers to be used for lookup.
+template <typename T, base::RawPtrTraits Traits>
+struct less<raw_ptr<T, Traits>> {
+  using Impl = typename raw_ptr<T, Traits>::Impl;
+  using is_transparent = void;
+
+  bool operator()(const raw_ptr<T, Traits>& lhs,
+                  const raw_ptr<T, Traits>& rhs) const {
+    Impl::IncrementLessCountForTest();
+    return lhs < rhs;
+  }
+
+  bool operator()(T* lhs, const raw_ptr<T, Traits>& rhs) const {
+    Impl::IncrementLessCountForTest();
+    return lhs < rhs;
+  }
+
+  bool operator()(const raw_ptr<T, Traits>& lhs, T* rhs) const {
+    Impl::IncrementLessCountForTest();
+    return lhs < rhs;
+  }
+};
+
+// Define for cases where raw_ptr<T> holds a pointer to an array of type T.
+// This is consistent with definition of std::iterator_traits<T*>.
+// Algorithms like std::binary_search need that.
+template <typename T, base::RawPtrTraits Traits>
+struct iterator_traits<raw_ptr<T, Traits>> {
+  using difference_type = ptrdiff_t;
+  using value_type = std::remove_cv_t<T>;
+  using pointer = T*;
+  using reference = T&;
+  using iterator_category = std::random_access_iterator_tag;
+};
+
+// Specialize std::pointer_traits. The latter is required to obtain the
+// underlying raw pointer in the std::to_address(pointer) overload.
+// Implementing the pointer_traits is the standard blessed way to customize
+// `std::to_address(pointer)` in C++20 [3].
+//
+// [1] https://wg21.link/pointer.traits.optmem
+
+template <typename T, ::base::RawPtrTraits Traits>
+struct pointer_traits<::raw_ptr<T, Traits>> {
+  using pointer = ::raw_ptr<T, Traits>;
+  using element_type = T;
+  using difference_type = ptrdiff_t;
+
+  template <typename U>
+  using rebind = ::raw_ptr<U, Traits>;
+
+  static constexpr pointer pointer_to(element_type& r) noexcept {
+    return pointer(&r);
+  }
+
+  static constexpr element_type* to_address(pointer p) noexcept {
+    return p.get();
+  }
+};
+
+}  // namespace std
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_asan_unowned_impl.cc b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_asan_unowned_impl.cc
new file mode 100644
index 0000000..3a43be2
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_asan_unowned_impl.cc
@@ -0,0 +1,47 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_asan_unowned_impl.h"
+
+#include <sanitizer/asan_interface.h>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+
+namespace base::internal {
+
+PA_NO_SANITIZE("address")
+bool EndOfAliveAllocation(const volatile void* ptr, bool is_adjustable_ptr) {
+  uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
+
+  // Normally, we probe the first byte of an object, but in cases of pointer
+  // arithmetic, we may be probing subsequent bytes, including the legal
+  // "end + 1" position.
+  //
+  // Alas, ASAN will claim an unmapped page is unpoisoned, so willfully ignore
+  // the fist address of a page, since "end + 1" of an object allocated exactly
+  // up to a page  boundary will SEGV on probe. This will cause false negatives
+  // for pointers that happen to be page aligned, which is undesirable but
+  // necessary for now.
+  //
+  // We minimize the consequences by using the pointer arithmetic flag in
+  // higher levels to conditionalize this suppression.
+  //
+  // TODO(tsepez): this may still fail for a non-accessible but non-null
+  // return from, say, malloc(0) which happens to be page-aligned.
+  //
+  // TODO(tsepez): enforce the pointer arithmetic flag. Until then, we
+  // may fail here if a pointer requires the flag but is lacking it.
+  return is_adjustable_ptr &&
+         ((address & 0x0fff) == 0 ||
+          __asan_region_is_poisoned(reinterpret_cast<void*>(address), 1)) &&
+         !__asan_region_is_poisoned(reinterpret_cast<void*>(address - 1), 1);
+}
+
+bool LikelySmuggledScalar(const volatile void* ptr) {
+  intptr_t address = reinterpret_cast<intptr_t>(ptr);
+  return address < 0x4000;  // Negative or small positive.
+}
+
+}  // namespace base::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_asan_unowned_impl.h b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_asan_unowned_impl.h
new file mode 100644
index 0000000..536d7ec
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_asan_unowned_impl.h
@@ -0,0 +1,151 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_ASAN_UNOWNED_IMPL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_ASAN_UNOWNED_IMPL_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cxx20_is_constant_evaluated.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+
+#if !BUILDFLAG(USE_ASAN_UNOWNED_PTR)
+#error "Included under wrong build option"
+#endif
+
+namespace base::internal {
+
+bool EndOfAliveAllocation(const volatile void* ptr, bool is_adjustable_ptr);
+bool LikelySmuggledScalar(const volatile void* ptr);
+
+template <bool IsAdjustablePtr, bool MayDangle>
+struct RawPtrAsanUnownedImpl {
+  // The first two are needed for correctness. The last one isn't technically a
+  // must, but better to set it.
+  static constexpr bool kMustZeroOnConstruct = true;
+  static constexpr bool kMustZeroOnMove = true;
+  static constexpr bool kMustZeroOnDestruct = true;
+
+  // Wraps a pointer.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
+    return ptr;
+  }
+
+  // Notifies the allocator when a wrapped pointer is being removed or replaced.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
+    }
+  }
+
+  // Unwraps the pointer, while asserting that memory hasn't been freed. The
+  // function is allowed to crash on nullptr.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
+      T* wrapped_ptr) {
+    // ASAN will catch use of dereferenced ptr without additional probing.
+    return wrapped_ptr;
+  }
+
+  // Unwraps the pointer, while asserting that memory hasn't been freed. The
+  // function must handle nullptr gracefully.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
+      T* wrapped_ptr) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
+    }
+    return wrapped_ptr;
+  }
+
+  // Unwraps the pointer, without making an assertion on whether memory was
+  // freed or not.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
+      T* wrapped_ptr) {
+    return wrapped_ptr;
+  }
+
+  // Upcasts the wrapped pointer.
+  template <typename To, typename From>
+  PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
+    static_assert(std::is_convertible_v<From*, To*>,
+                  "From must be convertible to To.");
+    // Note, this cast may change the address if upcasting to base that lies in
+    // the middle of the derived object.
+    return wrapped_ptr;
+  }
+
+  // Advance the wrapped pointer by `delta_elems`.
+  template <
+      typename T,
+      typename Z,
+      typename =
+          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
+  PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
+    return wrapped_ptr + delta_elems;
+  }
+
+  // Retreat the wrapped pointer by `delta_elems`.
+  template <
+      typename T,
+      typename Z,
+      typename =
+          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
+  PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
+    return wrapped_ptr - delta_elems;
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
+                                                            T* wrapped_ptr2) {
+    return wrapped_ptr1 - wrapped_ptr2;
+  }
+
+  // Returns a copy of a wrapped pointer, without making an assertion on whether
+  // memory was freed or not.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
+    return wrapped_ptr;
+  }
+
+  template <typename T>
+  static void ProbeForLowSeverityLifetimeIssue(T* wrapped_ptr) {
+    if (!MayDangle && wrapped_ptr) {
+      const volatile void* probe_ptr =
+          reinterpret_cast<const volatile void*>(wrapped_ptr);
+      if (!LikelySmuggledScalar(probe_ptr) &&
+          !EndOfAliveAllocation(probe_ptr, IsAdjustablePtr)) {
+        reinterpret_cast<const volatile uint8_t*>(probe_ptr)[0];
+      }
+    }
+  }
+
+  // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
+  // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
+    return ptr;
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
+      T* wrapped_ptr) {
+    return wrapped_ptr;
+  }
+
+  // This is for accounting only, used by unit tests.
+  PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
+  PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
+};
+
+}  // namespace base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_ASAN_UNOWNED_IMPL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_backup_ref_impl.cc b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_backup_ref_impl.cc
new file mode 100644
index 0000000..09ba441
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_backup_ref_impl.cc
@@ -0,0 +1,131 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_backup_ref_impl.h"
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_ref_count.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+
+namespace base::internal {
+
+template <bool AllowDangling, bool ExperimentalAsh>
+void RawPtrBackupRefImpl<AllowDangling, ExperimentalAsh>::AcquireInternal(
+    uintptr_t address) {
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+  PA_BASE_CHECK(UseBrp(address));
+#endif
+  uintptr_t slot_start =
+      partition_alloc::PartitionAllocGetSlotStartInBRPPool(address);
+  if constexpr (AllowDangling) {
+    partition_alloc::internal::PartitionRefCountPointer(slot_start)
+        ->AcquireFromUnprotectedPtr();
+  } else {
+    partition_alloc::internal::PartitionRefCountPointer(slot_start)->Acquire();
+  }
+}
+
+template <bool AllowDangling, bool ExperimentalAsh>
+void RawPtrBackupRefImpl<AllowDangling, ExperimentalAsh>::ReleaseInternal(
+    uintptr_t address) {
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+  PA_BASE_CHECK(UseBrp(address));
+#endif
+  uintptr_t slot_start =
+      partition_alloc::PartitionAllocGetSlotStartInBRPPool(address);
+  if constexpr (AllowDangling) {
+    if (partition_alloc::internal::PartitionRefCountPointer(slot_start)
+            ->ReleaseFromUnprotectedPtr()) {
+      partition_alloc::internal::PartitionAllocFreeForRefCounting(slot_start);
+    }
+  } else {
+    if (partition_alloc::internal::PartitionRefCountPointer(slot_start)
+            ->Release()) {
+      partition_alloc::internal::PartitionAllocFreeForRefCounting(slot_start);
+    }
+  }
+}
+
+template <bool AllowDangling, bool ExperimentalAsh>
+void RawPtrBackupRefImpl<AllowDangling, ExperimentalAsh>::
+    ReportIfDanglingInternal(uintptr_t address) {
+  if (partition_alloc::internal::IsUnretainedDanglingRawPtrCheckEnabled()) {
+    if (IsSupportedAndNotNull(address)) {
+      uintptr_t slot_start =
+          partition_alloc::PartitionAllocGetSlotStartInBRPPool(address);
+      partition_alloc::internal::PartitionRefCountPointer(slot_start)
+          ->ReportIfDangling();
+    }
+  }
+}
+
+// static
+template <bool AllowDangling, bool ExperimentalAsh>
+bool RawPtrBackupRefImpl<AllowDangling, ExperimentalAsh>::
+    CheckPointerWithinSameAlloc(uintptr_t before_addr,
+                                uintptr_t after_addr,
+                                size_t type_size) {
+  partition_alloc::internal::PtrPosWithinAlloc ptr_pos_within_alloc =
+      partition_alloc::internal::IsPtrWithinSameAlloc(before_addr, after_addr,
+                                                      type_size);
+  // No need to check that |new_ptr| is in the same pool, as
+  // IsPtrWithinSameAlloc() checks that it's within the same allocation, so
+  // must be the same pool.
+  PA_BASE_CHECK(ptr_pos_within_alloc !=
+                partition_alloc::internal::PtrPosWithinAlloc::kFarOOB);
+
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+  return ptr_pos_within_alloc ==
+         partition_alloc::internal::PtrPosWithinAlloc::kAllocEnd;
+#else
+  return false;
+#endif
+}
+
+template <bool AllowDangling, bool ExperimentalAsh>
+bool RawPtrBackupRefImpl<AllowDangling, ExperimentalAsh>::IsPointeeAlive(
+    uintptr_t address) {
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+  PA_BASE_CHECK(UseBrp(address));
+#endif
+  uintptr_t slot_start =
+      partition_alloc::PartitionAllocGetSlotStartInBRPPool(address);
+  return partition_alloc::internal::PartitionRefCountPointer(slot_start)
+      ->IsAlive();
+}
+
+// Explicitly instantiates the two BackupRefPtr variants in the .cc. This
+// ensures the definitions not visible from the .h are available in the binary.
+template struct RawPtrBackupRefImpl</*AllowDangling=*/false,
+                                    /*ExperimentalAsh=*/false>;
+template struct RawPtrBackupRefImpl</*AllowDangling=*/false,
+                                    /*ExperimentalAsh=*/true>;
+template struct RawPtrBackupRefImpl</*AllowDangling=*/true,
+                                    /*ExperimentalAsh=*/false>;
+template struct RawPtrBackupRefImpl</*AllowDangling=*/true,
+                                    /*ExperimentalAsh=*/true>;
+
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+void CheckThatAddressIsntWithinFirstPartitionPage(uintptr_t address) {
+  if (partition_alloc::internal::IsManagedByDirectMap(address)) {
+    uintptr_t reservation_start =
+        partition_alloc::internal::GetDirectMapReservationStart(address);
+    PA_BASE_CHECK(address - reservation_start >=
+                  partition_alloc::PartitionPageSize());
+  } else {
+    PA_BASE_CHECK(partition_alloc::internal::IsManagedByNormalBuckets(address));
+    PA_BASE_CHECK(address % partition_alloc::kSuperPageSize >=
+                  partition_alloc::PartitionPageSize());
+  }
+}
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON) ||
+        // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+
+}  // namespace base::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_backup_ref_impl.h b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_backup_ref_impl.h
new file mode 100644
index 0000000..34fffbf
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_backup_ref_impl.h
@@ -0,0 +1,509 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/chromeos_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cxx20_is_constant_evaluated.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "build/build_config.h"
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_bitmap.h"
+#endif
+
+#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+#error "Included under wrong build option"
+#endif
+
+namespace base::internal {
+
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+PA_COMPONENT_EXPORT(RAW_PTR)
+void CheckThatAddressIsntWithinFirstPartitionPage(uintptr_t address);
+#endif
+
+class BackupRefPtrGlobalSettings {
+ public:
+  static void EnableExperimentalAsh() {
+    PA_CHECK(!experimental_ash_raw_ptr_enabled_);
+    experimental_ash_raw_ptr_enabled_ = true;
+  }
+
+  static void DisableExperimentalAshForTest() {
+    PA_CHECK(experimental_ash_raw_ptr_enabled_);
+    experimental_ash_raw_ptr_enabled_ = false;
+  }
+
+  PA_ALWAYS_INLINE static bool IsExperimentalAshEnabled() {
+    return experimental_ash_raw_ptr_enabled_;
+  }
+
+ private:
+  // Write-once settings that should be in its own cacheline, as they're
+  // accessed frequently on a hot path.
+  PA_ALIGNAS(partition_alloc::internal::kPartitionCachelineSize)
+  static inline bool experimental_ash_raw_ptr_enabled_ = false;
+  [[maybe_unused]] char
+      padding_[partition_alloc::internal::kPartitionCachelineSize - 1];
+};
+
+// Note that `RawPtrBackupRefImpl` itself is not thread-safe. If multiple
+// threads modify the same raw_ptr object without synchronization, a data race
+// will occur.
+template <bool AllowDangling = false, bool ExperimentalAsh = false>
+struct RawPtrBackupRefImpl {
+  // These are needed for correctness, or else we may end up manipulating
+  // ref-count where we shouldn't, thus affecting the BRP's integrity. Unlike
+  // the first two, kMustZeroOnDestruct wouldn't be needed if raw_ptr was used
+  // correctly, but we already caught cases where a value is written after
+  // destruction.
+  static constexpr bool kMustZeroOnConstruct = true;
+  static constexpr bool kMustZeroOnMove = true;
+  static constexpr bool kMustZeroOnDestruct = true;
+
+ private:
+  PA_ALWAYS_INLINE static bool UseBrp(uintptr_t address) {
+    // Pointer annotated with ExperimentalAsh are subject to a separate,
+    // Ash-related experiment.
+    //
+    // Note that this can be enabled only before the BRP partition is created,
+    // so it's impossible for this function to change its answer for a specific
+    // pointer. (This relies on the original partition to not be BRP-enabled.)
+    if constexpr (ExperimentalAsh) {
+#if BUILDFLAG(PA_IS_CHROMEOS_ASH)
+      if (!BackupRefPtrGlobalSettings::IsExperimentalAshEnabled()) {
+        return false;
+      }
+#endif
+    }
+    return partition_alloc::IsManagedByPartitionAllocBRPPool(address);
+  }
+
+  PA_ALWAYS_INLINE static bool IsSupportedAndNotNull(uintptr_t address) {
+    // There are many situations where the compiler can prove that
+    // `ReleaseWrappedPtr` is called on a value that is always nullptr, but the
+    // way `IsManagedByPartitionAllocBRPPool` is written, the compiler can't
+    // prove that nullptr is not managed by PartitionAlloc; and so the compiler
+    // has to emit a useless check and dead code. To avoid that without making
+    // the runtime check slower, tell the compiler to skip
+    // `IsManagedByPartitionAllocBRPPool` when it can statically determine that
+    // address is nullptr.
+#if PA_HAS_BUILTIN(__builtin_constant_p)
+    if (__builtin_constant_p(address == 0) && (address == 0)) {
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+      PA_BASE_CHECK(
+          !partition_alloc::IsManagedByPartitionAllocBRPPool(address));
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON) ||
+        // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+      return false;
+    }
+#endif  // PA_HAS_BUILTIN(__builtin_constant_p)
+
+    // This covers the nullptr case, as address 0 is never in any
+    // PartitionAlloc pool.
+    bool use_brp = UseBrp(address);
+
+    // There may be pointers immediately after the allocation, e.g.
+    //   {
+    //     // Assume this allocation happens outside of PartitionAlloc.
+    //     raw_ptr<T> ptr = new T[20];
+    //     for (size_t i = 0; i < 20; i ++) { ptr++; }
+    //   }
+    //
+    // Such pointers are *not* at risk of accidentally falling into BRP pool,
+    // because:
+    // 1) On 64-bit systems, BRP pool is preceded by a forbidden region.
+    // 2) On 32-bit systems, the guard pages and metadata of super pages in BRP
+    //    pool aren't considered to be part of that pool.
+    //
+    // This allows us to make a stronger assertion that if
+    // IsManagedByPartitionAllocBRPPool returns true for a valid pointer,
+    // it must be at least partition page away from the beginning of a super
+    // page.
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+    if (use_brp) {
+      CheckThatAddressIsntWithinFirstPartitionPage(address);
+    }
+#endif
+
+    return use_brp;
+  }
+
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+  // Out-Of-Bounds (OOB) poison bit is set when the pointer has overflowed by
+  // one byte.
+#if defined(ARCH_CPU_X86_64)
+  // Bit 63 is the only pointer bit that will work as the poison bit across both
+  // LAM48 and LAM57. It also works when all unused linear address bits are
+  // checked for canonicality.
+  static constexpr uintptr_t OOB_POISON_BIT = static_cast<uintptr_t>(1) << 63;
+#else
+  // Avoid ARM's Top-Byte Ignore.
+  static constexpr uintptr_t OOB_POISON_BIT = static_cast<uintptr_t>(1) << 55;
+#endif
+
+  template <typename T>
+  PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) {
+    return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) &
+                                ~OOB_POISON_BIT);
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static bool IsPtrOOB(T* ptr) {
+    return (reinterpret_cast<uintptr_t>(ptr) & OOB_POISON_BIT) ==
+           OOB_POISON_BIT;
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static T* PoisonOOBPtr(T* ptr) {
+    return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) |
+                                OOB_POISON_BIT);
+  }
+#else   // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+  template <typename T>
+  PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) {
+    return ptr;
+  }
+#endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+
+ public:
+  // Wraps a pointer.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return ptr;
+    }
+    uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(ptr));
+    if (IsSupportedAndNotNull(address)) {
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+      PA_BASE_CHECK(ptr != nullptr);
+#endif
+      AcquireInternal(address);
+    } else {
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+#if PA_HAS_BUILTIN(__builtin_constant_p)
+      // Similarly to `IsSupportedAndNotNull` above, elide the
+      // `BanSuperPageFromBRPPool` call if the compiler can prove that `address`
+      // is zero since PA won't be able to map anything at that address anyway.
+      bool known_constant_zero =
+          __builtin_constant_p(address == 0) && (address == 0);
+#else   // PA_HAS_BUILTIN(__builtin_constant_p)
+      bool known_constant_zero = false;
+#endif  // PA_HAS_BUILTIN(__builtin_constant_p)
+
+      if (!known_constant_zero) {
+        partition_alloc::internal::AddressPoolManagerBitmap::
+            BanSuperPageFromBRPPool(address);
+      }
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
+    }
+
+    return ptr;
+  }
+
+  // Notifies the allocator when a wrapped pointer is being removed or replaced.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) {
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return;
+    }
+    uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(wrapped_ptr));
+    if (IsSupportedAndNotNull(address)) {
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+      PA_BASE_CHECK(wrapped_ptr != nullptr);
+#endif
+      ReleaseInternal(address);
+    }
+    // We are unable to counteract BanSuperPageFromBRPPool(), called from
+    // WrapRawPtr(). We only use one bit per super-page and, thus can't tell if
+    // there's more than one associated raw_ptr<T> at a given time. The risk of
+    // exhausting the entire address space is minuscule, therefore, we couldn't
+    // resist the perf gain of a single relaxed store (in the above mentioned
+    // function) over much more expensive two CAS operations, which we'd have to
+    // use if we were to un-ban a super-page.
+  }
+
+  // Unwraps the pointer, while asserting that memory hasn't been freed. The
+  // function is allowed to crash on nullptr.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
+      T* wrapped_ptr) {
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return wrapped_ptr;
+    }
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+    PA_BASE_CHECK(!IsPtrOOB(wrapped_ptr));
+#endif
+    uintptr_t address = partition_alloc::UntagPtr(wrapped_ptr);
+    if (IsSupportedAndNotNull(address)) {
+      PA_BASE_CHECK(wrapped_ptr != nullptr);
+      PA_BASE_CHECK(IsPointeeAlive(address));
+    }
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON) ||
+        // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+    return wrapped_ptr;
+  }
+
+  // Unwraps the pointer, while asserting that memory hasn't been freed. The
+  // function must handle nullptr gracefully.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
+      T* wrapped_ptr) {
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return wrapped_ptr;
+    }
+    T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+    // Some code uses invalid pointer values as indicators, so those values must
+    // be passed through unchanged during extraction. The following check will
+    // pass invalid values through if those values do not fall within the BRP
+    // pool after being unpoisoned.
+    if (!IsSupportedAndNotNull(partition_alloc::UntagPtr(unpoisoned_ptr))) {
+      return wrapped_ptr;
+    }
+    // Poison-based OOB checks do not extend to extracted pointers. The
+    // alternative of retaining poison on extracted pointers could introduce new
+    // OOB conditions, e.g., in code that extracts an end-of-allocation pointer
+    // for use in a loop termination condition. The poison bit would make that
+    // pointer appear to reference a very high address.
+#endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+    return unpoisoned_ptr;
+  }
+
+  // Unwraps the pointer, without making an assertion on whether memory was
+  // freed or not.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
+      T* wrapped_ptr) {
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return wrapped_ptr;
+    }
+    // This may be used for unwrapping an end-of-allocation pointer to be used
+    // as an endpoint in an iterative algorithm, so this removes the OOB poison
+    // bit.
+    return UnpoisonPtr(wrapped_ptr);
+  }
+
+  // Upcasts the wrapped pointer.
+  template <typename To, typename From>
+  PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
+    static_assert(std::is_convertible_v<From*, To*>,
+                  "From must be convertible to To.");
+    // Note, this cast may change the address if upcasting to base that lies in
+    // the middle of the derived object.
+    return wrapped_ptr;
+  }
+
+  // Verify the pointer stayed in the same slot, and return the poisoned version
+  // of `new_ptr` if OOB poisoning is enabled.
+  template <typename T>
+  PA_ALWAYS_INLINE static T* VerifyAndPoisonPointerAfterAdvanceOrRetreat(
+      T* unpoisoned_ptr,
+      T* new_ptr) {
+    // In the "before allocation" mode, on 32-bit, we can run into a problem
+    // that the end-of-allocation address could fall outside of
+    // PartitionAlloc's pools, if this is the last slot of the super page,
+    // thus pointing to the guard page. This means the ref-count won't be
+    // decreased when the pointer is released (leak).
+    //
+    // We could possibly solve it in a few different ways:
+    // - Add the trailing guard page to the pool, but we'd have to think very
+    //   hard if this doesn't create another hole.
+    // - Add an address adjustment to "is in pool?" check, similar as the one in
+    //   PartitionAllocGetSlotStartInBRPPool(), but that seems fragile, not to
+    //   mention adding an extra instruction to an inlined hot path.
+    // - Let the leak happen, since it should a very rare condition.
+    // - Go back to the previous solution of rewrapping the pointer, but that
+    //   had an issue of losing BRP protection in case the pointer ever gets
+    //   shifted back before the end of allocation.
+    //
+    // We decided to cross that bridge once we get there... if we ever get
+    // there. Currently there are no plans to switch back to the "before
+    // allocation" mode.
+    //
+    // This problem doesn't exist in the "previous slot" mode, or any mode that
+    // involves putting extras after the allocation, because the
+    // end-of-allocation address belongs to the same slot.
+    static_assert(BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT));
+
+    // First check if the new address didn't migrate in/out the BRP pool, and
+    // that it lands within the same allocation. An end-of-allocation address is
+    // ok, too, and that may lead to the pointer being poisoned if the relevant
+    // feature is enabled. These checks add a non-trivial cost, but they're
+    // cheaper and more secure than the previous implementation that rewrapped
+    // the pointer (wrapped the new pointer and unwrapped the old one).
+    //
+    // Note, the value of these checks goes beyond OOB protection. They're
+    // important for integrity of the BRP algorithm. Without these, an attacker
+    // could make the pointer point to another allocation, and cause its
+    // ref-count to go to 0 upon this pointer's destruction, even though there
+    // may be another pointer still pointing to it, thus making it lose the BRP
+    // protection prematurely.
+    const uintptr_t before_addr = partition_alloc::UntagPtr(unpoisoned_ptr);
+    const uintptr_t after_addr = partition_alloc::UntagPtr(new_ptr);
+    // TODO(bartekn): Consider adding support for non-BRP pools too (without
+    // removing the cross-pool migration check).
+    if (IsSupportedAndNotNull(before_addr)) {
+      constexpr size_t size = sizeof(T);
+      [[maybe_unused]] const bool is_end =
+          CheckPointerWithinSameAlloc(before_addr, after_addr, size);
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+      if (is_end) {
+        new_ptr = PoisonOOBPtr(new_ptr);
+      }
+#endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+    } else {
+      // Check that the new address didn't migrate into the BRP pool, as it
+      // would result in more pointers pointing to an allocation than its
+      // ref-count reflects.
+      PA_BASE_CHECK(!IsSupportedAndNotNull(after_addr));
+    }
+    return new_ptr;
+  }
+
+  // Advance the wrapped pointer by `delta_elems`.
+  template <
+      typename T,
+      typename Z,
+      typename =
+          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
+  PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return wrapped_ptr + delta_elems;
+    }
+    T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
+    return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
+        unpoisoned_ptr, unpoisoned_ptr + delta_elems);
+  }
+
+  // Retreat the wrapped pointer by `delta_elems`.
+  template <
+      typename T,
+      typename Z,
+      typename =
+          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
+  PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return wrapped_ptr - delta_elems;
+    }
+    T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
+    return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
+        unpoisoned_ptr, unpoisoned_ptr - delta_elems);
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
+                                                            T* wrapped_ptr2) {
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return wrapped_ptr1 - wrapped_ptr2;
+    }
+
+    T* unpoisoned_ptr1 = UnpoisonPtr(wrapped_ptr1);
+    T* unpoisoned_ptr2 = UnpoisonPtr(wrapped_ptr2);
+#if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return unpoisoned_ptr1 - unpoisoned_ptr2;
+    }
+    uintptr_t address1 = partition_alloc::UntagPtr(unpoisoned_ptr1);
+    uintptr_t address2 = partition_alloc::UntagPtr(unpoisoned_ptr2);
+    // Ensure that both pointers are within the same slot, and pool!
+    // TODO(bartekn): Consider adding support for non-BRP pool too.
+    if (IsSupportedAndNotNull(address1)) {
+      PA_BASE_CHECK(IsSupportedAndNotNull(address2));
+      PA_BASE_CHECK(partition_alloc::internal::IsPtrWithinSameAlloc(
+                        address2, address1, sizeof(T)) !=
+                    partition_alloc::internal::PtrPosWithinAlloc::kFarOOB);
+    } else {
+      PA_BASE_CHECK(!IsSupportedAndNotNull(address2));
+    }
+#endif  // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
+    return unpoisoned_ptr1 - unpoisoned_ptr2;
+  }
+
+  // Returns a copy of a wrapped pointer, without making an assertion on whether
+  // memory was freed or not.
+  // This method increments the reference count of the allocation slot.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return wrapped_ptr;
+    }
+    return WrapRawPtr(wrapped_ptr);
+  }
+
+  // Report the current wrapped pointer if pointee isn't alive anymore.
+  template <typename T>
+  PA_ALWAYS_INLINE static void ReportIfDangling(T* wrapped_ptr) {
+    ReportIfDanglingInternal(partition_alloc::UntagPtr(wrapped_ptr));
+  }
+
+  // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
+  // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return ptr;
+    } else {
+      return WrapRawPtr(ptr);
+    }
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
+      T* wrapped_ptr) {
+    if (partition_alloc::internal::base::is_constant_evaluated()) {
+      return wrapped_ptr;
+    } else {
+      return UnpoisonPtr(wrapped_ptr);
+    }
+  }
+
+  // This is for accounting only, used by unit tests.
+  PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
+  PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
+
+ private:
+  // We've evaluated several strategies (inline nothing, various parts, or
+  // everything in |Wrap()| and |Release()|) using the Speedometer2 benchmark
+  // to measure performance. The best results were obtained when only the
+  // lightweight |IsManagedByPartitionAllocBRPPool()| check was inlined.
+  // Therefore, we've extracted the rest into the functions below and marked
+  // them as PA_NOINLINE to prevent unintended LTO effects.
+  PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void AcquireInternal(
+      uintptr_t address);
+  PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReleaseInternal(
+      uintptr_t address);
+  PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) bool IsPointeeAlive(
+      uintptr_t address);
+  PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReportIfDanglingInternal(
+      uintptr_t address);
+
+  // CHECK if `before_addr` and `after_addr` are in the same allocation, for a
+  // given `type_size`.
+  // If BACKUP_REF_PTR_POISON_OOB_PTR is enabled, return whether the allocation
+  // is at the end.
+  // If BACKUP_REF_PTR_POISON_OOB_PTR is disable, return false.
+  PA_NOINLINE static PA_COMPONENT_EXPORT(
+      RAW_PTR) bool CheckPointerWithinSameAlloc(uintptr_t before_addr,
+                                                uintptr_t after_addr,
+                                                size_t type_size);
+};
+
+}  // namespace base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_cast.h b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_cast.h
new file mode 100644
index 0000000..50e5805
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_cast.h
@@ -0,0 +1,69 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_CAST_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_CAST_H_
+
+#include <memory>
+
+#include <type_traits>
+#if defined(__has_builtin)
+#define PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST __has_builtin(__builtin_bit_cast)
+#else
+#define PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST 0
+#endif
+
+#if !PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST
+#include <cstring>
+#endif
+
+// This header is explicitly allowlisted from a clang plugin rule at
+// "tools/clang/plugins/FindBadRawPtrPatterns.cpp". You can bypass these checks
+// by performing casts explicitly with functions here.
+namespace base {
+
+// Wrapper for |static_cast<T>(src)|.
+template <typename Dest, typename Source>
+inline constexpr Dest unsafe_raw_ptr_static_cast(Source&& source) noexcept {
+  return static_cast<Dest>(source);
+}
+
+// Wrapper for |reinterpret_cast<T>(src)|.
+template <typename Dest, typename Source>
+inline constexpr Dest unsafe_raw_ptr_reinterpret_cast(
+    Source&& source) noexcept {
+  return reinterpret_cast<Dest>(source);
+}
+
+// Wrapper for |std::bit_cast<T>(src)|.
+// Though we have similar implementations at |absl::bit_cast| and
+// |base::bit_cast|, it is important to perform casting in this file to
+// correctly exclude from the check.
+template <typename Dest, typename Source>
+#if PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST
+inline constexpr std::enable_if_t<sizeof(Dest) == sizeof(Source) &&
+                                      std::is_trivially_copyable_v<Dest> &&
+                                      std::is_trivially_copyable_v<Source>,
+                                  Dest>
+#else
+inline std::enable_if_t<sizeof(Dest) == sizeof(Source) &&
+                            std::is_trivially_copyable_v<Dest> &&
+                            std::is_trivially_copyable_v<Source> &&
+                            std::is_default_constructible_v<Dest>,
+                        Dest>
+#endif  // PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST
+unsafe_raw_ptr_bit_cast(const Source& source) noexcept {
+  // TODO(mikt): Replace this with |std::bit_cast<T>| when C++20 arrives.
+#if PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST
+  return __builtin_bit_cast(Dest, source);
+#else
+  Dest dest;
+  memcpy(std::addressof(dest), std::addressof(source), sizeof(dest));
+  return dest;
+#endif  // PA_RAWPTR_CAST_USE_BUILTIN_BIT_CAST
+}
+
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_CAST_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_counting_impl_for_test.h b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_counting_impl_for_test.h
new file mode 100644
index 0000000..fa000f4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_counting_impl_for_test.h
@@ -0,0 +1,104 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_COUNTING_IMPL_FOR_TEST_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_COUNTING_IMPL_FOR_TEST_H_
+
+#include <climits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_noop_impl.h"
+
+namespace base::test {
+
+// Provides a raw_ptr/raw_ref implementation that performs accounting for test
+// purposes. It performs extra bookkeeping, e.g. to track the number of times
+// the raw_ptr is wrapped, unrwapped, etc.
+//
+// Test only.
+struct RawPtrCountingImplForTest : public base::internal::RawPtrNoOpImpl {
+  using SuperImpl = base::internal::RawPtrNoOpImpl;
+
+  static constexpr bool kMustZeroOnConstruct = false;
+  static constexpr bool kMustZeroOnMove = false;
+  static constexpr bool kMustZeroOnDestruct = false;
+
+  template <typename T>
+  PA_ALWAYS_INLINE static T* WrapRawPtr(T* ptr) {
+    ++wrap_raw_ptr_cnt;
+    return SuperImpl::WrapRawPtr(ptr);
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static void ReleaseWrappedPtr(T* ptr) {
+    ++release_wrapped_ptr_cnt;
+    SuperImpl::ReleaseWrappedPtr(ptr);
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static T* SafelyUnwrapPtrForDereference(T* wrapped_ptr) {
+    ++get_for_dereference_cnt;
+    return SuperImpl::SafelyUnwrapPtrForDereference(wrapped_ptr);
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
+    ++get_for_extraction_cnt;
+    return SuperImpl::SafelyUnwrapPtrForExtraction(wrapped_ptr);
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
+    ++get_for_comparison_cnt;
+    return SuperImpl::UnsafelyUnwrapPtrForComparison(wrapped_ptr);
+  }
+
+  PA_ALWAYS_INLINE static void IncrementSwapCountForTest() {
+    ++wrapped_ptr_swap_cnt;
+  }
+
+  PA_ALWAYS_INLINE static void IncrementLessCountForTest() {
+    ++wrapped_ptr_less_cnt;
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static T* WrapRawPtrForDuplication(T* ptr) {
+    ++wrap_raw_ptr_for_dup_cnt;
+    return SuperImpl::WrapRawPtrForDuplication(ptr);
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static T* UnsafelyUnwrapPtrForDuplication(T* wrapped_ptr) {
+    ++get_for_duplication_cnt;
+    return SuperImpl::UnsafelyUnwrapPtrForDuplication(wrapped_ptr);
+  }
+
+  static void ClearCounters() {
+    wrap_raw_ptr_cnt = 0;
+    release_wrapped_ptr_cnt = 0;
+    get_for_dereference_cnt = 0;
+    get_for_extraction_cnt = 0;
+    get_for_comparison_cnt = 0;
+    wrapped_ptr_swap_cnt = 0;
+    wrapped_ptr_less_cnt = 0;
+    pointer_to_member_operator_cnt = 0;
+    wrap_raw_ptr_for_dup_cnt = 0;
+    get_for_duplication_cnt = 0;
+  }
+
+  static inline int wrap_raw_ptr_cnt = INT_MIN;
+  static inline int release_wrapped_ptr_cnt = INT_MIN;
+  static inline int get_for_dereference_cnt = INT_MIN;
+  static inline int get_for_extraction_cnt = INT_MIN;
+  static inline int get_for_comparison_cnt = INT_MIN;
+  static inline int wrapped_ptr_swap_cnt = INT_MIN;
+  static inline int wrapped_ptr_less_cnt = INT_MIN;
+  static inline int pointer_to_member_operator_cnt = INT_MIN;
+  static inline int wrap_raw_ptr_for_dup_cnt = INT_MIN;
+  static inline int get_for_duplication_cnt = INT_MIN;
+};
+
+}  // namespace base::test
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_COUNTING_IMPL_FOR_TEST_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_exclusion.h b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_exclusion.h
new file mode 100644
index 0000000..4b8eb7b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_exclusion.h
@@ -0,0 +1,37 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_EXCLUSION_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_EXCLUSION_H_
+
+// This header will be leakily included even when
+// `!use_partition_alloc`, which is okay because it's a leaf header.
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"  // nogncheck
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "build/build_config.h"
+
+#if PA_HAS_ATTRIBUTE(annotate)
+#if defined(OFFICIAL_BUILD) && !BUILDFLAG(FORCE_ENABLE_RAW_PTR_EXCLUSION)
+// The annotation changed compiler output and increased binary size so disable
+// for official builds.
+// TODO(crbug.com/1320670): Remove when issue is resolved.
+#define RAW_PTR_EXCLUSION
+#else
+// Marks a field as excluded from the `raw_ptr<T>` usage enforcement via
+// Chromium Clang plugin.
+//
+// Example:
+//     RAW_PTR_EXCLUSION Foo* foo_;
+//
+// `RAW_PTR_EXCLUSION` should be avoided, as exclusions makes it significantly
+// easier for any bug involving the pointer to become a security vulnerability.
+// For additional guidance please see the "When to use raw_ptr<T>" section of
+// `//base/memory/raw_ptr.md`.
+#define RAW_PTR_EXCLUSION __attribute__((annotate("raw_ptr_exclusion")))
+#endif
+#else
+#define RAW_PTR_EXCLUSION
+#endif
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_EXCLUSION_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_hookable_impl.cc b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_hookable_impl.cc
new file mode 100644
index 0000000..9b71004
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_hookable_impl.cc
@@ -0,0 +1,52 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_hookable_impl.h"
+
+#include <atomic>
+#include <cstdint>
+
+namespace base::internal {
+
+namespace {
+
+void DefaultWrapPtrHook(uintptr_t address) {}
+void DefaultReleaseWrappedPtrHook(uintptr_t address) {}
+void DefaultUnwrapForDereferenceHook(uintptr_t address) {}
+void DefaultUnwrapForExtractionHook(uintptr_t address) {}
+void DefaultUnwrapForComparisonHook(uintptr_t address) {}
+void DefaultAdvanceHook(uintptr_t old_address, uintptr_t new_address) {}
+void DefaultDuplicateHook(uintptr_t address) {}
+void DefaultWrapPtrForDuplicationHook(uintptr_t address) {}
+void DefaultUnsafelyUnwrapForDuplicationHook(uintptr_t address) {}
+
+constexpr RawPtrHooks default_hooks = {
+    DefaultWrapPtrHook,
+    DefaultReleaseWrappedPtrHook,
+    DefaultUnwrapForDereferenceHook,
+    DefaultUnwrapForExtractionHook,
+    DefaultUnwrapForComparisonHook,
+    DefaultAdvanceHook,
+    DefaultDuplicateHook,
+    DefaultWrapPtrForDuplicationHook,
+    DefaultUnsafelyUnwrapForDuplicationHook,
+};
+
+}  // namespace
+
+std::atomic<const RawPtrHooks*> g_hooks{&default_hooks};
+
+const RawPtrHooks* GetRawPtrHooks() {
+  return g_hooks.load(std::memory_order_relaxed);
+}
+
+void InstallRawPtrHooks(const RawPtrHooks* hooks) {
+  g_hooks.store(hooks, std::memory_order_relaxed);
+}
+
+void ResetRawPtrHooks() {
+  InstallRawPtrHooks(&default_hooks);
+}
+
+}  // namespace base::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_hookable_impl.h b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_hookable_impl.h
new file mode 100644
index 0000000..2a1430c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_hookable_impl.h
@@ -0,0 +1,214 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_HOOKABLE_IMPL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_HOOKABLE_IMPL_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cxx20_is_constant_evaluated.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+
+#if !BUILDFLAG(USE_HOOKABLE_RAW_PTR)
+#error "Included under wrong build option"
+#endif
+
+namespace base::internal {
+
+struct RawPtrHooks {
+  using WrapPtr = void(uintptr_t address);
+  using ReleaseWrappedPtr = void(uintptr_t address);
+  using SafelyUnwrapForDereference = void(uintptr_t address);
+  using SafelyUnwrapForExtraction = void(uintptr_t address);
+  using UnsafelyUnwrapForComparison = void(uintptr_t address);
+  using Advance = void(uintptr_t old_address, uintptr_t new_address);
+  using Duplicate = void(uintptr_t address);
+  using WrapPtrForDuplication = void(uintptr_t address);
+  using UnsafelyUnwrapForDuplication = void(uintptr_t address);
+
+  WrapPtr* wrap_ptr;
+  ReleaseWrappedPtr* release_wrapped_ptr;
+  SafelyUnwrapForDereference* safely_unwrap_for_dereference;
+  SafelyUnwrapForExtraction* safely_unwrap_for_extraction;
+  UnsafelyUnwrapForComparison* unsafely_unwrap_for_comparison;
+  Advance* advance;
+  Duplicate* duplicate;
+  WrapPtrForDuplication* wrap_ptr_for_duplication;
+  UnsafelyUnwrapForDuplication* unsafely_unwrap_for_duplication;
+};
+
+PA_COMPONENT_EXPORT(RAW_PTR) const RawPtrHooks* GetRawPtrHooks();
+PA_COMPONENT_EXPORT(RAW_PTR) void InstallRawPtrHooks(const RawPtrHooks*);
+PA_COMPONENT_EXPORT(RAW_PTR) void ResetRawPtrHooks();
+
+template <bool EnableHooks>
+struct RawPtrHookableImpl {
+  // Since this Impl is used for BRP-ASan, match BRP as closely as possible.
+  static constexpr bool kMustZeroOnConstruct = true;
+  static constexpr bool kMustZeroOnMove = true;
+  static constexpr bool kMustZeroOnDestruct = true;
+
+  // Wraps a pointer.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      if (EnableHooks) {
+        GetRawPtrHooks()->wrap_ptr(reinterpret_cast<uintptr_t>(ptr));
+      }
+    }
+    return ptr;
+  }
+
+  // Notifies the allocator when a wrapped pointer is being removed or replaced.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* ptr) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      if (EnableHooks) {
+        GetRawPtrHooks()->release_wrapped_ptr(reinterpret_cast<uintptr_t>(ptr));
+      }
+    }
+  }
+
+  // Unwraps the pointer, while asserting that memory hasn't been freed. The
+  // function is allowed to crash on nullptr.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
+      T* wrapped_ptr) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      if (EnableHooks) {
+        GetRawPtrHooks()->safely_unwrap_for_dereference(
+            reinterpret_cast<uintptr_t>(wrapped_ptr));
+      }
+    }
+    return wrapped_ptr;
+  }
+
+  // Unwraps the pointer, while asserting that memory hasn't been freed. The
+  // function must handle nullptr gracefully.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
+      T* wrapped_ptr) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      if (EnableHooks) {
+        GetRawPtrHooks()->safely_unwrap_for_extraction(
+            reinterpret_cast<uintptr_t>(wrapped_ptr));
+      }
+    }
+    return wrapped_ptr;
+  }
+
+  // Unwraps the pointer, without making an assertion on whether memory was
+  // freed or not.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
+      T* wrapped_ptr) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      if (EnableHooks) {
+        GetRawPtrHooks()->unsafely_unwrap_for_comparison(
+            reinterpret_cast<uintptr_t>(wrapped_ptr));
+      }
+    }
+    return wrapped_ptr;
+  }
+
+  // Upcasts the wrapped pointer.
+  template <typename To, typename From>
+  PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
+    static_assert(std::is_convertible_v<From*, To*>,
+                  "From must be convertible to To.");
+    // Note, this cast may change the address if upcasting to base that lies in
+    // the middle of the derived object.
+    return wrapped_ptr;
+  }
+
+  // Advance the wrapped pointer by `delta_elems`.
+  template <
+      typename T,
+      typename Z,
+      typename =
+          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
+  PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      if (EnableHooks) {
+        GetRawPtrHooks()->advance(
+            reinterpret_cast<uintptr_t>(wrapped_ptr),
+            reinterpret_cast<uintptr_t>(wrapped_ptr + delta_elems));
+      }
+    }
+    return wrapped_ptr + delta_elems;
+  }
+
+  // Retreat the wrapped pointer by `delta_elems`.
+  template <
+      typename T,
+      typename Z,
+      typename =
+          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
+  PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      if (EnableHooks) {
+        GetRawPtrHooks()->advance(
+            reinterpret_cast<uintptr_t>(wrapped_ptr),
+            reinterpret_cast<uintptr_t>(wrapped_ptr - delta_elems));
+      }
+    }
+    return wrapped_ptr - delta_elems;
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
+                                                            T* wrapped_ptr2) {
+    return wrapped_ptr1 - wrapped_ptr2;
+  }
+
+  // Returns a copy of a wrapped pointer, without making an assertion on whether
+  // memory was freed or not.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      if (EnableHooks) {
+        GetRawPtrHooks()->duplicate(reinterpret_cast<uintptr_t>(wrapped_ptr));
+      }
+    }
+    return wrapped_ptr;
+  }
+
+  // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
+  // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      if (EnableHooks) {
+        GetRawPtrHooks()->wrap_ptr_for_duplication(
+            reinterpret_cast<uintptr_t>(ptr));
+      }
+    }
+    return ptr;
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
+      T* wrapped_ptr) {
+    if (!partition_alloc::internal::base::is_constant_evaluated()) {
+      if (EnableHooks) {
+        GetRawPtrHooks()->unsafely_unwrap_for_duplication(
+            reinterpret_cast<uintptr_t>(wrapped_ptr));
+      }
+    }
+    return wrapped_ptr;
+  }
+
+  // This is for accounting only, used by unit tests.
+  PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
+  PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
+};
+
+}  // namespace base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_HOOKABLE_IMPL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_noop_impl.h b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_noop_impl.h
new file mode 100644
index 0000000..2fc12c6
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_noop_impl.h
@@ -0,0 +1,118 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_NOOP_IMPL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_NOOP_IMPL_H_
+
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+
+namespace base::internal {
+
+struct RawPtrNoOpImpl {
+  static constexpr bool kMustZeroOnConstruct = false;
+  static constexpr bool kMustZeroOnMove = false;
+  static constexpr bool kMustZeroOnDestruct = false;
+
+  // Wraps a pointer.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
+    return ptr;
+  }
+
+  // Notifies the allocator when a wrapped pointer is being removed or
+  // replaced.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T*) {}
+
+  // Unwraps the pointer, while asserting that memory hasn't been freed. The
+  // function is allowed to crash on nullptr.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
+      T* wrapped_ptr) {
+    return wrapped_ptr;
+  }
+
+  // Unwraps the pointer, while asserting that memory hasn't been freed. The
+  // function must handle nullptr gracefully.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
+      T* wrapped_ptr) {
+    return wrapped_ptr;
+  }
+
+  // Unwraps the pointer, without making an assertion on whether memory was
+  // freed or not.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
+      T* wrapped_ptr) {
+    return wrapped_ptr;
+  }
+
+  // Upcasts the wrapped pointer.
+  template <typename To, typename From>
+  PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
+    static_assert(std::is_convertible_v<From*, To*>,
+                  "From must be convertible to To.");
+    // Note, this cast may change the address if upcasting to base that lies
+    // in the middle of the derived object.
+    return wrapped_ptr;
+  }
+
+  // Advance the wrapped pointer by `delta_elems`.
+  template <
+      typename T,
+      typename Z,
+      typename =
+          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
+  PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
+    return wrapped_ptr + delta_elems;
+  }
+
+  // Retreat the wrapped pointer by `delta_elems`.
+  template <
+      typename T,
+      typename Z,
+      typename =
+          std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
+  PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
+    return wrapped_ptr - delta_elems;
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
+                                                            T* wrapped_ptr2) {
+    return wrapped_ptr1 - wrapped_ptr2;
+  }
+
+  // Returns a copy of a wrapped pointer, without making an assertion on
+  // whether memory was freed or not.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
+    return wrapped_ptr;
+  }
+
+  // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
+  // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
+    return ptr;
+  }
+
+  template <typename T>
+  PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
+      T* wrapped_ptr) {
+    return wrapped_ptr;
+  }
+
+  // This is for accounting only, used by unit tests.
+  PA_ALWAYS_INLINE constexpr static void IncrementSwapCountForTest() {}
+  PA_ALWAYS_INLINE constexpr static void IncrementLessCountForTest() {}
+};
+
+}  // namespace base::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_NOOP_IMPL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_test_support.h b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_test_support.h
new file mode 100644
index 0000000..c412281
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_test_support.h
@@ -0,0 +1,80 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_TEST_SUPPORT_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_TEST_SUPPORT_H_
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+// Struct intended to be used with designated initializers and passed
+// to the `CountersMatch()` matcher.
+//
+// TODO(tsepez): Although we only want one kind of these, the class is still
+// a template to circumvent the chromium-style out-of-line constructor rule.
+// Adding such a constructor would make this no longer be an aggregate and
+// that would prohibit designated initiaizers.
+template <int IGNORE>
+struct CountingRawPtrExpectationTemplate {
+  absl::optional<int> wrap_raw_ptr_cnt;
+  absl::optional<int> release_wrapped_ptr_cnt;
+  absl::optional<int> get_for_dereference_cnt;
+  absl::optional<int> get_for_extraction_cnt;
+  absl::optional<int> get_for_comparison_cnt;
+  absl::optional<int> wrapped_ptr_swap_cnt;
+  absl::optional<int> wrapped_ptr_less_cnt;
+  absl::optional<int> pointer_to_member_operator_cnt;
+  absl::optional<int> wrap_raw_ptr_for_dup_cnt;
+  absl::optional<int> get_for_duplication_cnt;
+};
+using CountingRawPtrExpectations = CountingRawPtrExpectationTemplate<0>;
+
+#define REPORT_UNEQUAL_RAW_PTR_COUNTER(member_name)                          \
+  {                                                                          \
+    if (arg.member_name.has_value() &&                                       \
+        arg.member_name.value() !=                                           \
+            base::test::RawPtrCountingImplForTest::member_name) {            \
+      *result_listener << "Expected `" #member_name "` to be "               \
+                       << arg.member_name.value() << " but got "             \
+                       << base::test::RawPtrCountingImplForTest::member_name \
+                       << "; ";                                              \
+      result = false;                                                        \
+    }                                                                        \
+  }
+
+#define REPORT_UNEQUAL_RAW_PTR_COUNTERS(result)                    \
+  {                                                                \
+    result = true;                                                 \
+    REPORT_UNEQUAL_RAW_PTR_COUNTER(wrap_raw_ptr_cnt)               \
+    REPORT_UNEQUAL_RAW_PTR_COUNTER(release_wrapped_ptr_cnt)        \
+    REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_dereference_cnt)        \
+    REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_extraction_cnt)         \
+    REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_comparison_cnt)         \
+    REPORT_UNEQUAL_RAW_PTR_COUNTER(wrapped_ptr_swap_cnt)           \
+    REPORT_UNEQUAL_RAW_PTR_COUNTER(wrapped_ptr_less_cnt)           \
+    REPORT_UNEQUAL_RAW_PTR_COUNTER(pointer_to_member_operator_cnt) \
+    REPORT_UNEQUAL_RAW_PTR_COUNTER(wrap_raw_ptr_for_dup_cnt)       \
+    REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_duplication_cnt)        \
+  }
+
+// Matcher used with `CountingRawPtr`. Provides slightly shorter
+// boilerplate for verifying counts. This inner function is detached
+// from the `MATCHER`.
+inline bool CountersMatchImpl(const CountingRawPtrExpectations& arg,
+                              testing::MatchResultListener* result_listener) {
+  bool result = true;
+  REPORT_UNEQUAL_RAW_PTR_COUNTERS(result);
+  return result;
+}
+
+// Implicit `arg` has type `CountingRawPtrExpectations`, specialized for
+// the specific counting impl.
+MATCHER(CountersMatch, "counting impl has specified counters") {
+  return CountersMatchImpl(arg, result_listener);
+}
+
+#undef REPORT_UNEQUAL_RAW_PTR_COUNTERS
+#undef REPORT_UNEQUAL_RAW_PTR_COUNTER
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_PTR_TEST_SUPPORT_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_unittest.cc
new file mode 100644
index 0000000..eb449b4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_unittest.cc
@@ -0,0 +1,2546 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr.h"
+
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <thread>
+#include <type_traits>
+#include <utility>
+
+#include "base/allocator/partition_alloc_features.h"
+#include "base/allocator/partition_alloc_support.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/chromeos_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_counting_impl_for_test.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_test_support.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "base/cpu.h"
+#include "base/cxx20_to_address.h"
+#include "base/logging.h"
+#include "base/memory/raw_ptr_asan_service.h"
+#include "base/task/thread_pool.h"
+#include "base/test/bind.h"
+#include "base/test/gtest_util.h"
+#include "base/test/memory/dangling_ptr_instrumentation.h"
+#include "base/test/scoped_feature_list.h"
+#include "base/test/task_environment.h"
+#include "build/build_config.h"
+#include "build/buildflag.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+#include "third_party/abseil-cpp/absl/types/variant.h"
+
+#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+#include <sanitizer/asan_interface.h>
+#include "base/debug/asan_service.h"
+#endif
+
+using testing::AllOf;
+using testing::HasSubstr;
+using testing::Test;
+
+static_assert(sizeof(raw_ptr<void>) == sizeof(void*),
+              "raw_ptr shouldn't add memory overhead");
+static_assert(sizeof(raw_ptr<int>) == sizeof(int*),
+              "raw_ptr shouldn't add memory overhead");
+static_assert(sizeof(raw_ptr<std::string>) == sizeof(std::string*),
+              "raw_ptr shouldn't add memory overhead");
+
+#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&                            \
+    !BUILDFLAG(USE_ASAN_UNOWNED_PTR) && !BUILDFLAG(USE_HOOKABLE_RAW_PTR) && \
+    !BUILDFLAG(RAW_PTR_ZERO_ON_MOVE) && !BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
+// |is_trivially_copyable| assertion means that arrays/vectors of raw_ptr can
+// be copied by memcpy.
+static_assert(std::is_trivially_copyable_v<raw_ptr<void>>,
+              "raw_ptr should be trivially copyable");
+static_assert(std::is_trivially_copyable_v<raw_ptr<int>>,
+              "raw_ptr should be trivially copyable");
+static_assert(std::is_trivially_copyable_v<raw_ptr<std::string>>,
+              "raw_ptr should be trivially copyable");
+#endif  // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
+        // !BUILDFLAG(USE_ASAN_UNOWNED_PTR) &&
+        // !BUILDFLAG(USE_HOOKABLE_RAW_PTR) &&
+        // !BUILDFLAG(RAW_PTR_ZERO_ON_MOVE) &&
+        // !BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
+
+#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&                            \
+    !BUILDFLAG(USE_ASAN_UNOWNED_PTR) && !BUILDFLAG(USE_HOOKABLE_RAW_PTR) && \
+    !BUILDFLAG(RAW_PTR_ZERO_ON_CONSTRUCT) &&                                \
+    !BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
+// |is_trivially_default_constructible| assertion helps retain implicit default
+// constructors when raw_ptr is used as a union field.  Example of an error
+// if this assertion didn't hold:
+//
+//     ../../base/trace_event/trace_arguments.h:249:16: error: call to
+//     implicitly-deleted default constructor of 'base::trace_event::TraceValue'
+//         TraceValue ret;
+//                    ^
+//     ../../base/trace_event/trace_arguments.h:211:26: note: default
+//     constructor of 'TraceValue' is implicitly deleted because variant field
+//     'as_pointer' has a non-trivial default constructor
+//       raw_ptr<const void> as_pointer;
+static_assert(std::is_trivially_default_constructible_v<raw_ptr<void>>,
+              "raw_ptr should be trivially default constructible");
+static_assert(std::is_trivially_default_constructible_v<raw_ptr<int>>,
+              "raw_ptr should be trivially default constructible");
+static_assert(std::is_trivially_default_constructible_v<raw_ptr<std::string>>,
+              "raw_ptr should be trivially default constructible");
+#endif  // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
+        // !BUILDFLAG(USE_ASAN_UNOWNED_PTR) &&
+        // !BUILDFLAG(USE_HOOKABLE_RAW_PTR) &&
+        // !BUILDFLAG(RAW_PTR_ZERO_ON_CONSTRUCT) &&
+        // !BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
+
+// Verify that raw_ptr is a literal type, and its entire interface is constexpr.
+//
+// Constexpr destructors were introduced in C++20. PartitionAlloc's minimum
+// supported C++ version is C++17, so raw_ptr is not a literal type in C++17.
+// Thus we only test for constexpr in C++20.
+#if defined(__cpp_constexpr) && __cpp_constexpr >= 201907L
+static_assert([]() constexpr {
+  struct IntBase {};
+  struct Int : public IntBase {
+    int i = 0;
+  };
+
+  Int* i = new Int();
+  {
+    raw_ptr<Int> r(i);              // raw_ptr(T*)
+    raw_ptr<Int> r2(r);             // raw_ptr(const raw_ptr&)
+    raw_ptr<Int> r3(std::move(r));  // raw_ptr(raw_ptr&&)
+    r = r2;                         // operator=(const raw_ptr&)
+    r = std::move(r3);              // operator=(raw_ptr&&)
+    raw_ptr<Int, base::RawPtrTraits::kMayDangle> r4(
+        r);   // raw_ptr(const raw_ptr<DifferentTraits>&)
+    r4 = r2;  // operator=(const raw_ptr<DifferentTraits>&)
+    // (There is no move-version of DifferentTraits.)
+    [[maybe_unused]] raw_ptr<IntBase> r5(
+        r2);  // raw_ptr(const raw_ptr<Convertible>&)
+    [[maybe_unused]] raw_ptr<IntBase> r6(
+        std::move(r2));  // raw_ptr(raw_ptr<Convertible>&&)
+    r2 = r;              // Reset after move...
+    r5 = r2;             // operator=(const raw_ptr<Convertible>&)
+    r5 = std::move(r2);  // operator=(raw_ptr<Convertible>&&)
+    [[maybe_unused]] raw_ptr<Int> r7(nullptr);  // raw_ptr(nullptr)
+    r4 = nullptr;                               // operator=(nullptr)
+    r4 = i;                                     // operator=(T*)
+    r5 = r4;                                    // operator=(const Upcast&)
+    r5 = std::move(r4);                         // operator=(Upcast&&)
+    r.get()->i += 1;                            // get()
+    [[maybe_unused]] bool b = r;                // operator bool
+    (*r).i += 1;                                // operator*()
+    r->i += 1;                                  // operator->()
+    [[maybe_unused]] Int* i2 = r;               // operator T*()
+    [[maybe_unused]] IntBase* i3 = r;           // operator Convertible*()
+
+    [[maybe_unused]] Int** i4 = &r.AsEphemeralRawAddr();
+    [[maybe_unused]] Int*& i5 = r.AsEphemeralRawAddr();
+
+    Int* array = new Int[3]();
+    {
+      raw_ptr<Int, base::RawPtrTraits::kAllowPtrArithmetic> ra(array);
+      ++ra;      // operator++()
+      --ra;      // operator--()
+      ra++;      // operator++(int)
+      ra--;      // operator--(int)
+      ra += 1u;  // operator+=()
+      ra -= 1u;  // operator-=()
+    }
+    delete[] array;
+  }
+  delete i;
+  return true;
+}());
+#endif
+
+// Don't use base::internal for testing raw_ptr API, to test if code outside
+// this namespace calls the correct functions from this namespace.
+namespace {
+
+// Shorter name for expected test impl.
+using RawPtrCountingImpl = base::test::RawPtrCountingImplForTest;
+
+template <typename T>
+using CountingRawPtr = raw_ptr<T,
+                               base::RawPtrTraits::kUseCountingImplForTest |
+                                   base::RawPtrTraits::kAllowPtrArithmetic>;
+
+// Ensure that the `kUseCountingImplForTest` flag selects the test impl.
+static_assert(std::is_same_v<CountingRawPtr<int>::Impl, RawPtrCountingImpl>);
+
+template <typename T>
+using CountingRawPtrMayDangle =
+    raw_ptr<T,
+            base::RawPtrTraits::kMayDangle |
+                base::RawPtrTraits::kUseCountingImplForTest |
+                base::RawPtrTraits::kAllowPtrArithmetic>;
+
+// Ensure that the `kUseCountingImplForTest` flag selects the test impl.
+static_assert(
+    std::is_same_v<CountingRawPtrMayDangle<int>::Impl, RawPtrCountingImpl>);
+
+template <typename T>
+using CountingRawPtrUninitialized =
+    raw_ptr<T,
+            base::RawPtrTraits::kUseCountingImplForTest |
+                base::RawPtrTraits::kAllowUninitialized>;
+
+// Ensure that the `kUseCountingImplForTest` flag selects the test impl.
+static_assert(
+    std::is_same_v<CountingRawPtrUninitialized<int>::Impl, RawPtrCountingImpl>);
+
+struct MyStruct {
+  int x;
+};
+
+struct Base1 {
+  explicit Base1(int b1) : b1(b1) {}
+  int b1;
+};
+
+struct Base2 {
+  explicit Base2(int b2) : b2(b2) {}
+  int b2;
+};
+
+struct Derived : Base1, Base2 {
+  Derived(int b1, int b2, int d) : Base1(b1), Base2(b2), d(d) {}
+  int d;
+};
+
+class RawPtrTest : public Test {
+ protected:
+  void SetUp() override {
+    RawPtrCountingImpl::ClearCounters();
+  }
+};
+
+// Use this instead of std::ignore, to prevent the instruction from getting
+// optimized out by the compiler.
+volatile int g_volatile_int_to_ignore;
+
+TEST_F(RawPtrTest, NullStarDereference) {
+  raw_ptr<int> ptr = nullptr;
+  EXPECT_DEATH_IF_SUPPORTED(g_volatile_int_to_ignore = *ptr, "");
+}
+
+TEST_F(RawPtrTest, NullArrowDereference) {
+  raw_ptr<MyStruct> ptr = nullptr;
+  EXPECT_DEATH_IF_SUPPORTED(g_volatile_int_to_ignore = ptr->x, "");
+}
+
+TEST_F(RawPtrTest, NullExtractNoDereference) {
+  CountingRawPtr<int> ptr = nullptr;
+  // No dereference hence shouldn't crash.
+  int* raw = ptr;
+  std::ignore = raw;
+  EXPECT_THAT((CountingRawPtrExpectations{.get_for_dereference_cnt = 0,
+                                          .get_for_extraction_cnt = 1,
+                                          .get_for_comparison_cnt = 0}),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, InvalidExtractNoDereference) {
+  // Some code uses invalid pointer values as indicators, so those values must
+  // be accepted by raw_ptr and passed through unchanged during extraction.
+  int* inv_ptr = reinterpret_cast<int*>(~static_cast<uintptr_t>(0));
+  CountingRawPtr<int> ptr = inv_ptr;
+  int* raw = ptr;
+  EXPECT_EQ(raw, inv_ptr);
+  EXPECT_THAT((CountingRawPtrExpectations{.get_for_dereference_cnt = 0,
+                                          .get_for_extraction_cnt = 1,
+                                          .get_for_comparison_cnt = 0}),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, NullCmpExplicit) {
+  CountingRawPtr<int> ptr = nullptr;
+  EXPECT_TRUE(ptr == nullptr);
+  EXPECT_TRUE(nullptr == ptr);
+  EXPECT_FALSE(ptr != nullptr);
+  EXPECT_FALSE(nullptr != ptr);
+  // No need to unwrap pointer, just compare against 0.
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, NullCmpBool) {
+  CountingRawPtr<int> ptr = nullptr;
+  EXPECT_FALSE(ptr);
+  EXPECT_TRUE(!ptr);
+  // No need to unwrap pointer, just compare against 0.
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+void FuncThatAcceptsBool(bool b) {}
+
+bool IsValidNoCast(CountingRawPtr<int> ptr) {
+  return !!ptr;  // !! to avoid implicit cast
+}
+bool IsValidNoCast2(CountingRawPtr<int> ptr) {
+  return ptr && true;
+}
+
+TEST_F(RawPtrTest, BoolOpNotCast) {
+  CountingRawPtr<int> ptr = nullptr;
+  volatile bool is_valid = !!ptr;  // !! to avoid implicit cast
+  is_valid = ptr || is_valid;      // volatile, so won't be optimized
+  if (ptr) {
+    is_valid = true;
+  }
+  [[maybe_unused]] bool is_not_valid = !ptr;
+  if (!ptr) {
+    is_not_valid = true;
+  }
+  std::ignore = IsValidNoCast(ptr);
+  std::ignore = IsValidNoCast2(ptr);
+  FuncThatAcceptsBool(!ptr);
+  // No need to unwrap pointer, just compare against 0.
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+bool IsValidWithCast(CountingRawPtr<int> ptr) {
+  return ptr;
+}
+
+// This test is mostly for documentation purposes. It demonstrates cases where
+// |operator T*| is called first and then the pointer is converted to bool,
+// as opposed to calling |operator bool| directly. The former may be more
+// costly, so the caller has to be careful not to trigger this path.
+TEST_F(RawPtrTest, CastNotBoolOp) {
+  CountingRawPtr<int> ptr = nullptr;
+  [[maybe_unused]] bool is_valid = ptr;
+  is_valid = IsValidWithCast(ptr);
+  FuncThatAcceptsBool(ptr);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 3,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, StarDereference) {
+  int foo = 42;
+  CountingRawPtr<int> ptr = &foo;
+  EXPECT_EQ(*ptr, 42);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 1,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, ArrowDereference) {
+  MyStruct foo = {42};
+  CountingRawPtr<MyStruct> ptr = &foo;
+  EXPECT_EQ(ptr->x, 42);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 1,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, Delete) {
+  CountingRawPtr<int> ptr = new int(42);
+  delete ptr.ExtractAsDangling();
+  // The pointer is first internally converted to MayDangle kind, then extracted
+  // using implicit cast before passing to |delete|.
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 1,
+                  .get_for_comparison_cnt = 0,
+                  .wrap_raw_ptr_for_dup_cnt = 1,
+                  .get_for_duplication_cnt = 1,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, ClearAndDelete) {
+  CountingRawPtr<int> ptr(new int);
+  ptr.ClearAndDelete();
+
+  // TODO(crbug.com/1346513): clang-format has a difficult time making
+  // sense of preprocessor arms mixed with designated initializers.
+  //
+  // clang-format off
+  EXPECT_THAT((CountingRawPtrExpectations{
+                .wrap_raw_ptr_cnt = 1,
+                .release_wrapped_ptr_cnt = 1,
+                .get_for_dereference_cnt = 0,
+                .get_for_extraction_cnt = 1,
+                .wrapped_ptr_swap_cnt = 0,
+              }),
+              CountersMatch());
+  // clang-format on
+  EXPECT_EQ(ptr.get(), nullptr);
+}
+
+TEST_F(RawPtrTest, ClearAndDeleteArray) {
+  CountingRawPtr<int> ptr(new int[8]);
+  ptr.ClearAndDeleteArray();
+
+  // TODO(crbug.com/1346513): clang-format has a difficult time making
+  // sense of preprocessor arms mixed with designated initializers.
+  //
+  // clang-format off
+  EXPECT_THAT((CountingRawPtrExpectations{
+                .wrap_raw_ptr_cnt = 1,
+                .release_wrapped_ptr_cnt = 1,
+                .get_for_dereference_cnt = 0,
+                .get_for_extraction_cnt = 1,
+                .wrapped_ptr_swap_cnt = 0,
+              }),
+              CountersMatch());
+  // clang-format on
+  EXPECT_EQ(ptr.get(), nullptr);
+}
+
+TEST_F(RawPtrTest, ExtractAsDangling) {
+  CountingRawPtr<int> ptr(new int);
+
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 1,
+                  .release_wrapped_ptr_cnt = 0,
+                  .get_for_dereference_cnt = 0,
+                  .wrapped_ptr_swap_cnt = 0,
+                  .wrap_raw_ptr_for_dup_cnt = 0,
+                  .get_for_duplication_cnt = 0,
+              }),
+              CountersMatch());
+
+  EXPECT_TRUE(ptr.get());
+
+  CountingRawPtrMayDangle<int> dangling = ptr.ExtractAsDangling();
+
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 1,
+                  .release_wrapped_ptr_cnt = 1,
+                  .get_for_dereference_cnt = 0,
+                  .wrapped_ptr_swap_cnt = 0,
+                  .wrap_raw_ptr_for_dup_cnt = 1,
+                  .get_for_duplication_cnt = 1,
+              }),
+              CountersMatch());
+
+  EXPECT_FALSE(ptr.get());
+  EXPECT_TRUE(dangling.get());
+
+  dangling.ClearAndDelete();
+}
+
+TEST_F(RawPtrTest, ExtractAsDanglingFromDangling) {
+  CountingRawPtrMayDangle<int> ptr(new int);
+
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 1,
+                  .release_wrapped_ptr_cnt = 0,
+                  .get_for_dereference_cnt = 0,
+                  .wrapped_ptr_swap_cnt = 0,
+                  .wrap_raw_ptr_for_dup_cnt = 0,
+                  .get_for_duplication_cnt = 0,
+              }),
+              CountersMatch());
+
+  CountingRawPtrMayDangle<int> dangling = ptr.ExtractAsDangling();
+
+  // wrap_raw_ptr_cnt remains `1` because, as `ptr` is already a dangling
+  // pointer, we are only moving `ptr` to `dangling` here to avoid extra cost.
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 1,
+                  .release_wrapped_ptr_cnt = 1,
+                  .get_for_dereference_cnt = 0,
+                  .wrapped_ptr_swap_cnt = 0,
+                  .wrap_raw_ptr_for_dup_cnt = 0,
+                  .get_for_duplication_cnt = 0,
+              }),
+              CountersMatch());
+
+  dangling.ClearAndDelete();
+}
+
+TEST_F(RawPtrTest, ConstVolatileVoidPtr) {
+  int32_t foo[] = {1234567890};
+  CountingRawPtr<const volatile void> ptr = foo;
+  EXPECT_EQ(*static_cast<const volatile int32_t*>(ptr), 1234567890);
+  // Because we're using a cast, the extraction API kicks in, which doesn't
+  // know if the extracted pointer will be dereferenced or not.
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 1,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, VoidPtr) {
+  int32_t foo[] = {1234567890};
+  CountingRawPtr<void> ptr = foo;
+  EXPECT_EQ(*static_cast<int32_t*>(ptr), 1234567890);
+  // Because we're using a cast, the extraction API kicks in, which doesn't
+  // know if the extracted pointer will be dereferenced or not.
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 1,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, OperatorEQ) {
+  int foo;
+  CountingRawPtr<int> ptr1 = nullptr;
+  EXPECT_TRUE(ptr1 == ptr1);
+
+  CountingRawPtr<int> ptr2 = nullptr;
+  EXPECT_TRUE(ptr1 == ptr2);
+
+  CountingRawPtr<int> ptr3 = &foo;
+  EXPECT_TRUE(&foo == ptr3);
+  EXPECT_TRUE(ptr3 == &foo);
+  EXPECT_FALSE(ptr1 == ptr3);
+
+  ptr1 = &foo;
+  EXPECT_TRUE(ptr1 == ptr3);
+  EXPECT_TRUE(ptr3 == ptr1);
+
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 12,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, OperatorNE) {
+  int foo;
+  CountingRawPtr<int> ptr1 = nullptr;
+  EXPECT_FALSE(ptr1 != ptr1);
+
+  CountingRawPtr<int> ptr2 = nullptr;
+  EXPECT_FALSE(ptr1 != ptr2);
+
+  CountingRawPtr<int> ptr3 = &foo;
+  EXPECT_FALSE(&foo != ptr3);
+  EXPECT_FALSE(ptr3 != &foo);
+  EXPECT_TRUE(ptr1 != ptr3);
+
+  ptr1 = &foo;
+  EXPECT_FALSE(ptr1 != ptr3);
+  EXPECT_FALSE(ptr3 != ptr1);
+
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 12,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, OperatorEQCast) {
+  int foo = 42;
+  const int* raw_int_ptr = &foo;
+  volatile void* raw_void_ptr = &foo;
+  CountingRawPtr<volatile int> checked_int_ptr = &foo;
+  CountingRawPtr<const void> checked_void_ptr = &foo;
+  EXPECT_TRUE(checked_int_ptr == checked_int_ptr);
+  EXPECT_TRUE(checked_int_ptr == raw_int_ptr);
+  EXPECT_TRUE(raw_int_ptr == checked_int_ptr);
+  EXPECT_TRUE(checked_void_ptr == checked_void_ptr);
+  EXPECT_TRUE(checked_void_ptr == raw_void_ptr);
+  EXPECT_TRUE(raw_void_ptr == checked_void_ptr);
+  EXPECT_TRUE(checked_int_ptr == checked_void_ptr);
+  EXPECT_TRUE(checked_int_ptr == raw_void_ptr);
+  EXPECT_TRUE(raw_int_ptr == checked_void_ptr);
+  EXPECT_TRUE(checked_void_ptr == checked_int_ptr);
+  EXPECT_TRUE(checked_void_ptr == raw_int_ptr);
+  EXPECT_TRUE(raw_void_ptr == checked_int_ptr);
+  // Make sure that all cases are handled by operator== (faster) and none by the
+  // cast operator (slower).
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 16,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, OperatorEQCastHierarchy) {
+  Derived derived_val(42, 84, 1024);
+  Derived* raw_derived_ptr = &derived_val;
+  const Base1* raw_base1_ptr = &derived_val;
+  volatile Base2* raw_base2_ptr = &derived_val;
+  // Double check the basic understanding of pointers: Even though the numeric
+  // value (i.e. the address) isn't equal, the pointers are still equal. That's
+  // because from derived to base adjusts the address.
+  // raw_ptr must behave the same, which is checked below.
+  ASSERT_NE(reinterpret_cast<uintptr_t>(raw_base2_ptr),
+            reinterpret_cast<uintptr_t>(raw_derived_ptr));
+  ASSERT_TRUE(raw_base2_ptr == raw_derived_ptr);
+
+  CountingRawPtr<const volatile Derived> checked_derived_ptr = &derived_val;
+  CountingRawPtr<volatile Base1> checked_base1_ptr = &derived_val;
+  CountingRawPtr<const Base2> checked_base2_ptr = &derived_val;
+  EXPECT_TRUE(checked_derived_ptr == checked_derived_ptr);
+  EXPECT_TRUE(checked_derived_ptr == raw_derived_ptr);
+  EXPECT_TRUE(raw_derived_ptr == checked_derived_ptr);
+  EXPECT_TRUE(checked_derived_ptr == checked_base1_ptr);
+  EXPECT_TRUE(checked_derived_ptr == raw_base1_ptr);
+  EXPECT_TRUE(raw_derived_ptr == checked_base1_ptr);
+  EXPECT_TRUE(checked_base1_ptr == checked_derived_ptr);
+  EXPECT_TRUE(checked_base1_ptr == raw_derived_ptr);
+  EXPECT_TRUE(raw_base1_ptr == checked_derived_ptr);
+  // |base2_ptr| points to the second base class of |derived|, so will be
+  // located at an offset. While the stored raw uinptr_t values shouldn't match,
+  // ensure that the internal pointer manipulation correctly offsets when
+  // casting up and down the class hierarchy.
+  EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()),
+            reinterpret_cast<uintptr_t>(checked_derived_ptr.get()));
+  EXPECT_NE(reinterpret_cast<uintptr_t>(raw_base2_ptr),
+            reinterpret_cast<uintptr_t>(checked_derived_ptr.get()));
+  EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()),
+            reinterpret_cast<uintptr_t>(raw_derived_ptr));
+  EXPECT_TRUE(checked_derived_ptr == checked_base2_ptr);
+  EXPECT_TRUE(checked_derived_ptr == raw_base2_ptr);
+  EXPECT_TRUE(raw_derived_ptr == checked_base2_ptr);
+  EXPECT_TRUE(checked_base2_ptr == checked_derived_ptr);
+  EXPECT_TRUE(checked_base2_ptr == raw_derived_ptr);
+  EXPECT_TRUE(raw_base2_ptr == checked_derived_ptr);
+  // Make sure that all cases are handled by operator== (faster) and none by the
+  // cast operator (slower).
+  // The 4 extractions come from .get() checks, that compare raw addresses.
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 4,
+                  .get_for_comparison_cnt = 20,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, OperatorNECast) {
+  int foo = 42;
+  volatile int* raw_int_ptr = &foo;
+  const void* raw_void_ptr = &foo;
+  CountingRawPtr<const int> checked_int_ptr = &foo;
+  CountingRawPtr<volatile void> checked_void_ptr = &foo;
+  EXPECT_FALSE(checked_int_ptr != checked_int_ptr);
+  EXPECT_FALSE(checked_int_ptr != raw_int_ptr);
+  EXPECT_FALSE(raw_int_ptr != checked_int_ptr);
+  EXPECT_FALSE(checked_void_ptr != checked_void_ptr);
+  EXPECT_FALSE(checked_void_ptr != raw_void_ptr);
+  EXPECT_FALSE(raw_void_ptr != checked_void_ptr);
+  EXPECT_FALSE(checked_int_ptr != checked_void_ptr);
+  EXPECT_FALSE(checked_int_ptr != raw_void_ptr);
+  EXPECT_FALSE(raw_int_ptr != checked_void_ptr);
+  EXPECT_FALSE(checked_void_ptr != checked_int_ptr);
+  EXPECT_FALSE(checked_void_ptr != raw_int_ptr);
+  EXPECT_FALSE(raw_void_ptr != checked_int_ptr);
+  // Make sure that all cases are handled by operator== (faster) and none by the
+  // cast operator (slower).
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 16,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, OperatorNECastHierarchy) {
+  Derived derived_val(42, 84, 1024);
+  const Derived* raw_derived_ptr = &derived_val;
+  volatile Base1* raw_base1_ptr = &derived_val;
+  const Base2* raw_base2_ptr = &derived_val;
+  CountingRawPtr<volatile Derived> checked_derived_ptr = &derived_val;
+  CountingRawPtr<const Base1> checked_base1_ptr = &derived_val;
+  CountingRawPtr<const volatile Base2> checked_base2_ptr = &derived_val;
+  EXPECT_FALSE(checked_derived_ptr != checked_derived_ptr);
+  EXPECT_FALSE(checked_derived_ptr != raw_derived_ptr);
+  EXPECT_FALSE(raw_derived_ptr != checked_derived_ptr);
+  EXPECT_FALSE(checked_derived_ptr != checked_base1_ptr);
+  EXPECT_FALSE(checked_derived_ptr != raw_base1_ptr);
+  EXPECT_FALSE(raw_derived_ptr != checked_base1_ptr);
+  EXPECT_FALSE(checked_base1_ptr != checked_derived_ptr);
+  EXPECT_FALSE(checked_base1_ptr != raw_derived_ptr);
+  EXPECT_FALSE(raw_base1_ptr != checked_derived_ptr);
+  // |base2_ptr| points to the second base class of |derived|, so will be
+  // located at an offset. While the stored raw uinptr_t values shouldn't match,
+  // ensure that the internal pointer manipulation correctly offsets when
+  // casting up and down the class hierarchy.
+  EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()),
+            reinterpret_cast<uintptr_t>(checked_derived_ptr.get()));
+  EXPECT_NE(reinterpret_cast<uintptr_t>(raw_base2_ptr),
+            reinterpret_cast<uintptr_t>(checked_derived_ptr.get()));
+  EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()),
+            reinterpret_cast<uintptr_t>(raw_derived_ptr));
+  EXPECT_FALSE(checked_derived_ptr != checked_base2_ptr);
+  EXPECT_FALSE(checked_derived_ptr != raw_base2_ptr);
+  EXPECT_FALSE(raw_derived_ptr != checked_base2_ptr);
+  EXPECT_FALSE(checked_base2_ptr != checked_derived_ptr);
+  EXPECT_FALSE(checked_base2_ptr != raw_derived_ptr);
+  EXPECT_FALSE(raw_base2_ptr != checked_derived_ptr);
+  // Make sure that all cases are handled by operator== (faster) and none by the
+  // cast operator (slower).
+  // The 4 extractions come from .get() checks, that compare raw addresses.
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 4,
+                  .get_for_comparison_cnt = 20,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, Cast) {
+  Derived derived_val(42, 84, 1024);
+  raw_ptr<Derived> checked_derived_ptr = &derived_val;
+  Base1* raw_base1_ptr = checked_derived_ptr;
+  EXPECT_EQ(raw_base1_ptr->b1, 42);
+  Base2* raw_base2_ptr = checked_derived_ptr;
+  EXPECT_EQ(raw_base2_ptr->b2, 84);
+
+  Derived* raw_derived_ptr = static_cast<Derived*>(raw_base1_ptr);
+  EXPECT_EQ(raw_derived_ptr->b1, 42);
+  EXPECT_EQ(raw_derived_ptr->b2, 84);
+  EXPECT_EQ(raw_derived_ptr->d, 1024);
+  raw_derived_ptr = static_cast<Derived*>(raw_base2_ptr);
+  EXPECT_EQ(raw_derived_ptr->b1, 42);
+  EXPECT_EQ(raw_derived_ptr->b2, 84);
+  EXPECT_EQ(raw_derived_ptr->d, 1024);
+
+  raw_ptr<Base1> checked_base1_ptr = raw_derived_ptr;
+  EXPECT_EQ(checked_base1_ptr->b1, 42);
+  raw_ptr<Base2> checked_base2_ptr = raw_derived_ptr;
+  EXPECT_EQ(checked_base2_ptr->b2, 84);
+
+  raw_ptr<Derived> checked_derived_ptr2 =
+      static_cast<Derived*>(checked_base1_ptr);
+  EXPECT_EQ(checked_derived_ptr2->b1, 42);
+  EXPECT_EQ(checked_derived_ptr2->b2, 84);
+  EXPECT_EQ(checked_derived_ptr2->d, 1024);
+  checked_derived_ptr2 = static_cast<Derived*>(checked_base2_ptr);
+  EXPECT_EQ(checked_derived_ptr2->b1, 42);
+  EXPECT_EQ(checked_derived_ptr2->b2, 84);
+  EXPECT_EQ(checked_derived_ptr2->d, 1024);
+
+  const Derived* raw_const_derived_ptr = checked_derived_ptr2;
+  EXPECT_EQ(raw_const_derived_ptr->b1, 42);
+  EXPECT_EQ(raw_const_derived_ptr->b2, 84);
+  EXPECT_EQ(raw_const_derived_ptr->d, 1024);
+
+  raw_ptr<const Derived> checked_const_derived_ptr = raw_const_derived_ptr;
+  EXPECT_EQ(checked_const_derived_ptr->b1, 42);
+  EXPECT_EQ(checked_const_derived_ptr->b2, 84);
+  EXPECT_EQ(checked_const_derived_ptr->d, 1024);
+
+  const Derived* raw_const_derived_ptr2 = checked_const_derived_ptr;
+  EXPECT_EQ(raw_const_derived_ptr2->b1, 42);
+  EXPECT_EQ(raw_const_derived_ptr2->b2, 84);
+  EXPECT_EQ(raw_const_derived_ptr2->d, 1024);
+
+  raw_ptr<const Derived> checked_const_derived_ptr2 = raw_derived_ptr;
+  EXPECT_EQ(checked_const_derived_ptr2->b1, 42);
+  EXPECT_EQ(checked_const_derived_ptr2->b2, 84);
+  EXPECT_EQ(checked_const_derived_ptr2->d, 1024);
+
+  raw_ptr<const Derived> checked_const_derived_ptr3 = checked_derived_ptr2;
+  EXPECT_EQ(checked_const_derived_ptr3->b1, 42);
+  EXPECT_EQ(checked_const_derived_ptr3->b2, 84);
+  EXPECT_EQ(checked_const_derived_ptr3->d, 1024);
+
+  volatile Derived* raw_volatile_derived_ptr = checked_derived_ptr2;
+  EXPECT_EQ(raw_volatile_derived_ptr->b1, 42);
+  EXPECT_EQ(raw_volatile_derived_ptr->b2, 84);
+  EXPECT_EQ(raw_volatile_derived_ptr->d, 1024);
+
+  raw_ptr<volatile Derived> checked_volatile_derived_ptr =
+      raw_volatile_derived_ptr;
+  EXPECT_EQ(checked_volatile_derived_ptr->b1, 42);
+  EXPECT_EQ(checked_volatile_derived_ptr->b2, 84);
+  EXPECT_EQ(checked_volatile_derived_ptr->d, 1024);
+
+  void* raw_void_ptr = checked_derived_ptr;
+  raw_ptr<void> checked_void_ptr = raw_derived_ptr;
+  raw_ptr<Derived> checked_derived_ptr3 = static_cast<Derived*>(raw_void_ptr);
+  raw_ptr<Derived> checked_derived_ptr4 =
+      static_cast<Derived*>(checked_void_ptr);
+  EXPECT_EQ(checked_derived_ptr3->b1, 42);
+  EXPECT_EQ(checked_derived_ptr3->b2, 84);
+  EXPECT_EQ(checked_derived_ptr3->d, 1024);
+  EXPECT_EQ(checked_derived_ptr4->b1, 42);
+  EXPECT_EQ(checked_derived_ptr4->b2, 84);
+  EXPECT_EQ(checked_derived_ptr4->d, 1024);
+}
+
+TEST_F(RawPtrTest, UpcastConvertible) {
+  {
+    Derived derived_val(42, 84, 1024);
+    raw_ptr<Derived> checked_derived_ptr = &derived_val;
+
+    raw_ptr<Base1> checked_base1_ptr(checked_derived_ptr);
+    EXPECT_EQ(checked_base1_ptr->b1, 42);
+    raw_ptr<Base2> checked_base2_ptr(checked_derived_ptr);
+    EXPECT_EQ(checked_base2_ptr->b2, 84);
+
+    checked_base1_ptr = checked_derived_ptr;
+    EXPECT_EQ(checked_base1_ptr->b1, 42);
+    checked_base2_ptr = checked_derived_ptr;
+    EXPECT_EQ(checked_base2_ptr->b2, 84);
+
+    EXPECT_EQ(checked_base1_ptr, checked_derived_ptr);
+    EXPECT_EQ(checked_base2_ptr, checked_derived_ptr);
+  }
+
+  {
+    Derived derived_val(42, 84, 1024);
+    raw_ptr<Derived> checked_derived_ptr1 = &derived_val;
+    raw_ptr<Derived> checked_derived_ptr2 = &derived_val;
+    raw_ptr<Derived> checked_derived_ptr3 = &derived_val;
+    raw_ptr<Derived> checked_derived_ptr4 = &derived_val;
+
+    raw_ptr<Base1> checked_base1_ptr(std::move(checked_derived_ptr1));
+    EXPECT_EQ(checked_base1_ptr->b1, 42);
+    raw_ptr<Base2> checked_base2_ptr(std::move(checked_derived_ptr2));
+    EXPECT_EQ(checked_base2_ptr->b2, 84);
+
+    checked_base1_ptr = std::move(checked_derived_ptr3);
+    EXPECT_EQ(checked_base1_ptr->b1, 42);
+    checked_base2_ptr = std::move(checked_derived_ptr4);
+    EXPECT_EQ(checked_base2_ptr->b2, 84);
+  }
+}
+
+TEST_F(RawPtrTest, UpcastNotConvertible) {
+  class Base {};
+  class Derived : private Base {};
+  class Unrelated {};
+  EXPECT_FALSE((std::is_convertible_v<raw_ptr<Derived>, raw_ptr<Base>>));
+  EXPECT_FALSE((std::is_convertible_v<raw_ptr<Unrelated>, raw_ptr<Base>>));
+  EXPECT_FALSE((std::is_convertible_v<raw_ptr<Unrelated>, raw_ptr<void>>));
+  EXPECT_FALSE((std::is_convertible_v<raw_ptr<void>, raw_ptr<Unrelated>>));
+  EXPECT_FALSE((std::is_convertible_v<raw_ptr<int64_t>, raw_ptr<int32_t>>));
+  EXPECT_FALSE((std::is_convertible_v<raw_ptr<int16_t>, raw_ptr<int32_t>>));
+}
+
+TEST_F(RawPtrTest, UpcastPerformance) {
+  {
+    Derived derived_val(42, 84, 1024);
+    CountingRawPtr<Derived> checked_derived_ptr = &derived_val;
+    CountingRawPtr<Base1> checked_base1_ptr(checked_derived_ptr);
+    CountingRawPtr<Base2> checked_base2_ptr(checked_derived_ptr);
+    checked_base1_ptr = checked_derived_ptr;
+    checked_base2_ptr = checked_derived_ptr;
+  }
+
+  {
+    Derived derived_val(42, 84, 1024);
+    CountingRawPtr<Derived> checked_derived_ptr = &derived_val;
+    CountingRawPtr<Base1> checked_base1_ptr(std::move(checked_derived_ptr));
+    CountingRawPtr<Base2> checked_base2_ptr(std::move(checked_derived_ptr));
+    checked_base1_ptr = std::move(checked_derived_ptr);
+    checked_base2_ptr = std::move(checked_derived_ptr);
+  }
+
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, CustomSwap) {
+  int foo1, foo2;
+  CountingRawPtr<int> ptr1(&foo1);
+  CountingRawPtr<int> ptr2(&foo2);
+  // Recommended use pattern.
+  using std::swap;
+  swap(ptr1, ptr2);
+  EXPECT_EQ(ptr1.get(), &foo2);
+  EXPECT_EQ(ptr2.get(), &foo1);
+  EXPECT_EQ(RawPtrCountingImpl::wrapped_ptr_swap_cnt, 1);
+}
+
+TEST_F(RawPtrTest, StdSwap) {
+  int foo1, foo2;
+  CountingRawPtr<int> ptr1(&foo1);
+  CountingRawPtr<int> ptr2(&foo2);
+  std::swap(ptr1, ptr2);
+  EXPECT_EQ(ptr1.get(), &foo2);
+  EXPECT_EQ(ptr2.get(), &foo1);
+  EXPECT_EQ(RawPtrCountingImpl::wrapped_ptr_swap_cnt, 0);
+}
+
+TEST_F(RawPtrTest, PostIncrementOperator) {
+  std::vector<int> foo({42, 43, 44, 45});
+  CountingRawPtr<int> ptr = &foo[0];
+  for (int i = 0; i < 4; ++i) {
+    ASSERT_EQ(*ptr++, 42 + i);
+  }
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 4,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, PostDecrementOperator) {
+  std::vector<int> foo({42, 43, 44, 45});
+  CountingRawPtr<int> ptr = &foo[3];
+  // Avoid decrementing out of the slot holding the vector's backing store.
+  for (int i = 3; i > 0; --i) {
+    ASSERT_EQ(*ptr--, 42 + i);
+  }
+  ASSERT_EQ(*ptr, 42);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 4,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, PreIncrementOperator) {
+  std::vector<int> foo({42, 43, 44, 45});
+  CountingRawPtr<int> ptr = &foo[0];
+  for (int i = 0; i < 4; ++i, ++ptr) {
+    ASSERT_EQ(*ptr, 42 + i);
+  }
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 4,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, PreDecrementOperator) {
+  std::vector<int> foo({42, 43, 44, 45});
+  CountingRawPtr<int> ptr = &foo[3];
+  // Avoid decrementing out of the slot holding the vector's backing store.
+  for (int i = 3; i > 0; --i, --ptr) {
+    ASSERT_EQ(*ptr, 42 + i);
+  }
+  ASSERT_EQ(*ptr, 42);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 4,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, PlusEqualOperator) {
+  std::vector<int> foo({42, 43, 44, 45});
+  CountingRawPtr<int> ptr = &foo[0];
+  for (int i = 0; i < 4; i += 2, ptr += 2) {
+    ASSERT_EQ(*ptr, 42 + i);
+  }
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 2,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, PlusEqualOperatorTypes) {
+  std::vector<int> foo({42, 43, 44, 45});
+  CountingRawPtr<int> ptr = &foo[0];
+  ASSERT_EQ(*ptr, 42);
+  ptr += 2;  // Positive literal.
+  ASSERT_EQ(*ptr, 44);
+  ptr -= 2;  // Negative literal.
+  ASSERT_EQ(*ptr, 42);
+  ptr += ptrdiff_t{1};  // ptrdiff_t.
+  ASSERT_EQ(*ptr, 43);
+  ptr += size_t{2};  // size_t.
+  ASSERT_EQ(*ptr, 45);
+}
+
+TEST_F(RawPtrTest, MinusEqualOperator) {
+  std::vector<int> foo({42, 43, 44, 45});
+  CountingRawPtr<int> ptr = &foo[3];
+  ASSERT_EQ(*ptr, 45);
+  ptr -= 2;
+  ASSERT_EQ(*ptr, 43);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 2,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, MinusEqualOperatorTypes) {
+  int foo[] = {42, 43, 44, 45};
+  CountingRawPtr<int> ptr = &foo[3];
+  ASSERT_EQ(*ptr, 45);
+  ptr -= 2;  // Positive literal.
+  ASSERT_EQ(*ptr, 43);
+  ptr -= -2;  // Negative literal.
+  ASSERT_EQ(*ptr, 45);
+  ptr -= ptrdiff_t{2};  // ptrdiff_t.
+  ASSERT_EQ(*ptr, 43);
+  ptr -= size_t{1};  // size_t.
+  ASSERT_EQ(*ptr, 42);
+}
+
+TEST_F(RawPtrTest, PlusOperator) {
+  int foo[] = {42, 43, 44, 45};
+  CountingRawPtr<int> ptr = foo;
+  for (int i = 0; i < 4; ++i) {
+    ASSERT_EQ(*(ptr + i), 42 + i);
+  }
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 4,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, MinusOperator) {
+  int foo[] = {42, 43, 44, 45};
+  CountingRawPtr<int> ptr = &foo[4];
+  for (int i = 1; i <= 4; ++i) {
+    ASSERT_EQ(*(ptr - i), 46 - i);
+  }
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 4,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, MinusDeltaOperator) {
+  int foo[] = {42, 43, 44, 45};
+  CountingRawPtr<int> ptrs[] = {&foo[0], &foo[1], &foo[2], &foo[3], &foo[4]};
+  for (int i = 0; i <= 4; ++i) {
+    for (int j = 0; j <= 4; ++j) {
+      ASSERT_EQ(ptrs[i] - ptrs[j], i - j);
+      ASSERT_EQ(ptrs[i] - &foo[j], i - j);
+      ASSERT_EQ(&foo[i] - ptrs[j], i - j);
+    }
+  }
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, AdvanceString) {
+  const char kChars[] = "Hello";
+  std::string str = kChars;
+  CountingRawPtr<const char> ptr = str.c_str();
+  for (size_t i = 0; i < str.size(); ++i, ++ptr) {
+    ASSERT_EQ(*ptr, kChars[i]);
+  }
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 5,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, AssignmentFromNullptr) {
+  CountingRawPtr<int> wrapped_ptr;
+  wrapped_ptr = nullptr;
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 0,
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+void FunctionWithRawPtrParameter(raw_ptr<int> actual_ptr, int* expected_ptr) {
+  EXPECT_EQ(actual_ptr.get(), expected_ptr);
+  EXPECT_EQ(*actual_ptr, *expected_ptr);
+}
+
+// This test checks that raw_ptr<T> can be passed by value into function
+// parameters.  This is mostly a smoke test for TRIVIAL_ABI attribute.
+TEST_F(RawPtrTest, FunctionParameters_ImplicitlyMovedTemporary) {
+  int x = 123;
+  FunctionWithRawPtrParameter(
+      raw_ptr<int>(&x),  // Temporary that will be moved into the function.
+      &x);
+}
+
+// This test checks that raw_ptr<T> can be passed by value into function
+// parameters.  This is mostly a smoke test for TRIVIAL_ABI attribute.
+TEST_F(RawPtrTest, FunctionParameters_ExplicitlyMovedLValue) {
+  int x = 123;
+  raw_ptr<int> ptr(&x);
+  FunctionWithRawPtrParameter(std::move(ptr), &x);
+}
+
+// This test checks that raw_ptr<T> can be passed by value into function
+// parameters.  This is mostly a smoke test for TRIVIAL_ABI attribute.
+TEST_F(RawPtrTest, FunctionParameters_Copy) {
+  int x = 123;
+  raw_ptr<int> ptr(&x);
+  FunctionWithRawPtrParameter(ptr,  // `ptr` will be copied into the function.
+                              &x);
+}
+
+TEST_F(RawPtrTest, SetLookupUsesGetForComparison) {
+  int x = 123;
+  CountingRawPtr<int> ptr(&x);
+  std::set<CountingRawPtr<int>> set;
+
+  RawPtrCountingImpl::ClearCounters();
+  set.emplace(&x);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 1,
+                  // Nothing to compare to yet.
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 0,
+                  .wrapped_ptr_less_cnt = 0,
+              }),
+              CountersMatch());
+
+  RawPtrCountingImpl::ClearCounters();
+  set.emplace(ptr);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 0,
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  // 2 items to compare to => 4 calls.
+                  .get_for_comparison_cnt = 4,
+                  // 1 element to compare to => 2 calls.
+                  .wrapped_ptr_less_cnt = 2,
+              }),
+              CountersMatch());
+
+  RawPtrCountingImpl::ClearCounters();
+  set.count(&x);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 0,
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  // 2 comparisons => 2 extractions. Less than before, because
+                  // this time a raw pointer is one side of the comparison.
+                  .get_for_comparison_cnt = 2,
+                  // 2 items to compare to => 4 calls.
+                  .wrapped_ptr_less_cnt = 2,
+              }),
+              CountersMatch());
+
+  RawPtrCountingImpl::ClearCounters();
+  set.count(ptr);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 0,
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  // 2 comparisons => 4 extractions.
+                  .get_for_comparison_cnt = 4,
+                  // 2 items to compare to => 4 calls.
+                  .wrapped_ptr_less_cnt = 2,
+              }),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, ComparisonOperatorUsesGetForComparison) {
+  int x = 123;
+  CountingRawPtr<int> ptr(&x);
+
+  RawPtrCountingImpl::ClearCounters();
+  EXPECT_FALSE(ptr < ptr);
+  EXPECT_FALSE(ptr > ptr);
+  EXPECT_TRUE(ptr <= ptr);
+  EXPECT_TRUE(ptr >= ptr);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 0,
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 8,
+                  // < is used directly, not std::less().
+                  .wrapped_ptr_less_cnt = 0,
+              }),
+              CountersMatch());
+
+  RawPtrCountingImpl::ClearCounters();
+  EXPECT_FALSE(ptr < &x);
+  EXPECT_FALSE(ptr > &x);
+  EXPECT_TRUE(ptr <= &x);
+  EXPECT_TRUE(ptr >= &x);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 0,
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 4,
+                  .wrapped_ptr_less_cnt = 0,
+              }),
+              CountersMatch());
+
+  RawPtrCountingImpl::ClearCounters();
+  EXPECT_FALSE(&x < ptr);
+  EXPECT_FALSE(&x > ptr);
+  EXPECT_TRUE(&x <= ptr);
+  EXPECT_TRUE(&x >= ptr);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .wrap_raw_ptr_cnt = 0,
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 4,
+                  .wrapped_ptr_less_cnt = 0,
+              }),
+              CountersMatch());
+}
+
+// Two `raw_ptr`s with different Traits should still hit `GetForComparison()`
+// (as opposed to `GetForExtraction()`) in their comparison operators. We use
+// `CountingRawPtr` and `CountingRawPtrMayDangle` to contrast two different
+// Traits.
+TEST_F(RawPtrTest, OperatorsUseGetForComparison) {
+  int x = 123;
+  CountingRawPtr<int> ptr1 = &x;
+  CountingRawPtrMayDangle<int> ptr2 = &x;
+
+  RawPtrCountingImpl::ClearCounters();
+
+  EXPECT_TRUE(ptr1 == ptr2);
+  EXPECT_FALSE(ptr1 != ptr2);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 4,
+              }),
+              CountersMatch());
+
+  EXPECT_FALSE(ptr1 < ptr2);
+  EXPECT_FALSE(ptr1 > ptr2);
+  EXPECT_TRUE(ptr1 <= ptr2);
+  EXPECT_TRUE(ptr1 >= ptr2);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = 12,
+              }),
+              CountersMatch());
+}
+
+// This test checks how the std library handles collections like
+// std::vector<raw_ptr<T>>.
+//
+// When this test is written, reallocating std::vector's storage (e.g.
+// when growing the vector) requires calling raw_ptr's destructor on the
+// old storage (after std::move-ing the data to the new storage).  In
+// the future we hope that TRIVIAL_ABI (or [trivially_relocatable]]
+// proposed by P1144 [1]) will allow memcpy-ing the elements into the
+// new storage (without invoking destructors and move constructors
+// and/or move assignment operators).  At that point, the assert in the
+// test should be modified to capture the new, better behavior.
+//
+// In the meantime, this test serves as a basic correctness test that
+// ensures that raw_ptr<T> stored in a std::vector passes basic smoke
+// tests.
+//
+// [1]
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1144r5.html#wording-attribute
+TEST_F(RawPtrTest, TrivialRelocability) {
+  std::vector<CountingRawPtr<int>> vector;
+  int x = 123;
+
+  // See how many times raw_ptr's destructor is called when std::vector
+  // needs to increase its capacity and reallocate the internal vector
+  // storage (moving the raw_ptr elements).
+  RawPtrCountingImpl::ClearCounters();
+  size_t number_of_capacity_changes = 0;
+  do {
+    size_t previous_capacity = vector.capacity();
+    while (vector.capacity() == previous_capacity) {
+      vector.emplace_back(&x);
+    }
+    number_of_capacity_changes++;
+  } while (number_of_capacity_changes < 10);
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||                           \
+    BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) || \
+    BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
+  // TODO(lukasza): In the future (once C++ language and std library
+  // support custom trivially relocatable objects) this #if branch can
+  // be removed (keeping only the right long-term expectation from the
+  // #else branch).
+  EXPECT_NE(0, RawPtrCountingImpl::release_wrapped_ptr_cnt);
+#else
+  // This is the right long-term expectation.
+  //
+  // (This EXPECT_EQ assertion is slightly misleading when NoOpImpl is used,
+  // because, unless zeroing is requested, it forces raw_ptr<> to use a default
+  // destructor that doesn't go through RawPtrCountingImpl::ReleaseWrappedPtr,
+  // so we can't really depend on `g_release_wrapped_ptr_cnt`. Nevertheless, the
+  // spirit of the EXPECT_EQ is correct + the assertion should be true in the
+  // long-term.)
+  EXPECT_EQ(0, RawPtrCountingImpl::release_wrapped_ptr_cnt);
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
+        // BUILDFLAG(USE_ASAN_UNOWNED_PTR) ||
+        // BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
+
+  // Basic smoke test that raw_ptr elements in a vector work okay.
+  for (const auto& elem : vector) {
+    EXPECT_EQ(elem.get(), &x);
+    EXPECT_EQ(*elem, x);
+  }
+
+  // Verification that release_wrapped_ptr_cnt does capture how many times the
+  // destructors are called (e.g. that it is not always zero).
+  RawPtrCountingImpl::ClearCounters();
+  size_t number_of_cleared_elements = vector.size();
+  vector.clear();
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||                           \
+    BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) || \
+    BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
+  EXPECT_EQ((int)number_of_cleared_elements,
+            RawPtrCountingImpl::release_wrapped_ptr_cnt);
+#else
+  // TODO(lukasza): NoOpImpl has a default destructor that, unless zeroing is
+  // requested, doesn't go through RawPtrCountingImpl::ReleaseWrappedPtr.  So we
+  // can't really depend on `g_release_wrapped_ptr_cnt`.  This #else branch
+  // should be deleted once USE_BACKUP_REF_PTR is removed (e.g. once
+  // BackupRefPtr ships to the Stable channel).
+  EXPECT_EQ(0, RawPtrCountingImpl::release_wrapped_ptr_cnt);
+  std::ignore = number_of_cleared_elements;
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
+        // BUILDFLAG(USE_ASAN_UNOWNED_PTR) ||
+        // BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
+}
+
+struct BaseStruct {
+  explicit BaseStruct(int a) : a(a) {}
+  virtual ~BaseStruct() = default;
+
+  int a;
+};
+
+struct DerivedType1 : public BaseStruct {
+  explicit DerivedType1(int a, int b) : BaseStruct(a), b(b) {}
+  int b;
+};
+
+struct DerivedType2 : public BaseStruct {
+  explicit DerivedType2(int a, int c) : BaseStruct(a), c(c) {}
+  int c;
+};
+
+TEST_F(RawPtrTest, DerivedStructsComparison) {
+  DerivedType1 derived_1(42, 84);
+  raw_ptr<DerivedType1> checked_derived1_ptr = &derived_1;
+  DerivedType2 derived_2(21, 10);
+  raw_ptr<DerivedType2> checked_derived2_ptr = &derived_2;
+
+  // Make sure that comparing a |DerivedType2*| to a |DerivedType1*| casted
+  // as a |BaseStruct*| doesn't cause CFI errors.
+  EXPECT_NE(checked_derived1_ptr,
+            static_cast<BaseStruct*>(checked_derived2_ptr.get()));
+  EXPECT_NE(static_cast<BaseStruct*>(checked_derived1_ptr.get()),
+            checked_derived2_ptr);
+}
+
+class PmfTestBase {
+ public:
+  int MemFunc(char, double) const { return 11; }
+};
+
+class PmfTestDerived : public PmfTestBase {
+ public:
+  using PmfTestBase::MemFunc;
+  int MemFunc(float, double) { return 22; }
+};
+
+TEST_F(RawPtrTest, PointerToMemberFunction) {
+  PmfTestDerived object;
+  int (PmfTestBase::*pmf_base_base)(char, double) const = &PmfTestBase::MemFunc;
+  int (PmfTestDerived::*pmf_derived_base)(char, double) const =
+      &PmfTestDerived::MemFunc;
+  int (PmfTestDerived::*pmf_derived_derived)(float, double) =
+      &PmfTestDerived::MemFunc;
+
+  // Test for `derived_ptr`
+  CountingRawPtr<PmfTestDerived> derived_ptr = &object;
+
+  EXPECT_EQ((derived_ptr->*pmf_base_base)(0, 0), 11);
+  EXPECT_EQ((derived_ptr->*pmf_derived_base)(0, 0), 11);
+  EXPECT_EQ((derived_ptr->*pmf_derived_derived)(0, 0), 22);
+
+  // Test for `derived_ptr_const`
+  const CountingRawPtr<PmfTestDerived> derived_ptr_const = &object;
+
+  EXPECT_EQ((derived_ptr_const->*pmf_base_base)(0, 0), 11);
+  EXPECT_EQ((derived_ptr_const->*pmf_derived_base)(0, 0), 11);
+  EXPECT_EQ((derived_ptr_const->*pmf_derived_derived)(0, 0), 22);
+
+  // Test for `const_derived_ptr`
+  CountingRawPtr<const PmfTestDerived> const_derived_ptr = &object;
+
+  EXPECT_EQ((const_derived_ptr->*pmf_base_base)(0, 0), 11);
+  EXPECT_EQ((const_derived_ptr->*pmf_derived_base)(0, 0), 11);
+  // const_derived_ptr->*pmf_derived_derived is not a const member function,
+  // so it's not possible to test it.
+}
+
+TEST_F(RawPtrTest, WorksWithOptional) {
+  int x = 0;
+  absl::optional<raw_ptr<int>> maybe_int;
+  EXPECT_FALSE(maybe_int.has_value());
+
+  maybe_int = nullptr;
+  ASSERT_TRUE(maybe_int.has_value());
+  EXPECT_EQ(nullptr, maybe_int.value());
+
+  maybe_int = &x;
+  ASSERT_TRUE(maybe_int.has_value());
+  EXPECT_EQ(&x, maybe_int.value());
+}
+
+TEST_F(RawPtrTest, WorksWithVariant) {
+  int x = 100;
+  absl::variant<int, raw_ptr<int>> vary;
+  ASSERT_EQ(0u, vary.index());
+  EXPECT_EQ(0, absl::get<int>(vary));
+
+  vary = x;
+  ASSERT_EQ(0u, vary.index());
+  EXPECT_EQ(100, absl::get<int>(vary));
+
+  vary = nullptr;
+  ASSERT_EQ(1u, vary.index());
+  EXPECT_EQ(nullptr, absl::get<raw_ptr<int>>(vary));
+
+  vary = &x;
+  ASSERT_EQ(1u, vary.index());
+  EXPECT_EQ(&x, absl::get<raw_ptr<int>>(vary));
+}
+
+TEST_F(RawPtrTest, CrossKindConversion) {
+  int x = 123;
+  CountingRawPtr<int> ptr1 = &x;
+
+  RawPtrCountingImpl::ClearCounters();
+
+  CountingRawPtrMayDangle<int> ptr2(ptr1);
+  CountingRawPtrMayDangle<int> ptr3(std::move(ptr1));  // Falls back to copy.
+
+  EXPECT_THAT((CountingRawPtrExpectations{.wrap_raw_ptr_cnt = 0,
+                                          .get_for_dereference_cnt = 0,
+                                          .get_for_extraction_cnt = 0,
+                                          .wrap_raw_ptr_for_dup_cnt = 2,
+                                          .get_for_duplication_cnt = 2}),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, CrossKindAssignment) {
+  int x = 123;
+  CountingRawPtr<int> ptr1 = &x;
+
+  RawPtrCountingImpl::ClearCounters();
+
+  CountingRawPtrMayDangle<int> ptr2;
+  CountingRawPtrMayDangle<int> ptr3;
+  ptr2 = ptr1;
+  ptr3 = std::move(ptr1);  // Falls back to copy.
+
+  EXPECT_THAT((CountingRawPtrExpectations{.wrap_raw_ptr_cnt = 0,
+                                          .get_for_dereference_cnt = 0,
+                                          .get_for_extraction_cnt = 0,
+                                          .wrap_raw_ptr_for_dup_cnt = 2,
+                                          .get_for_duplication_cnt = 2}),
+              CountersMatch());
+}
+
+// Without the explicitly customized `raw_ptr::to_address()`,
+// `base::to_address()` will use the dereference operator. This is not
+// what we want; this test enforces extraction semantics for
+// `to_address()`.
+TEST_F(RawPtrTest, ToAddressDoesNotDereference) {
+  CountingRawPtr<int> ptr = nullptr;
+  int* raw = base::to_address(ptr);
+  std::ignore = raw;
+  EXPECT_THAT((CountingRawPtrExpectations{.get_for_dereference_cnt = 0,
+                                          .get_for_extraction_cnt = 1,
+                                          .get_for_comparison_cnt = 0,
+                                          .get_for_duplication_cnt = 0}),
+              CountersMatch());
+}
+
+TEST_F(RawPtrTest, ToAddressGivesBackRawAddress) {
+  int* raw = nullptr;
+  raw_ptr<int> miracle = raw;
+  EXPECT_EQ(base::to_address(raw), base::to_address(miracle));
+}
+
+void InOutParamFuncWithPointer(int* in, int** out) {
+  *out = in;
+}
+
+TEST_F(RawPtrTest, EphemeralRawAddrPointerPointer) {
+  int v1 = 123;
+  int v2 = 456;
+  raw_ptr<int> ptr = &v1;
+  // Pointer pointer should point to a pointer other than one inside raw_ptr.
+  EXPECT_NE(&ptr.AsEphemeralRawAddr(),
+            reinterpret_cast<int**>(std::addressof(ptr)));
+  // But inner pointer should point to the same address.
+  EXPECT_EQ(*&ptr.AsEphemeralRawAddr(), &v1);
+
+  // Inner pointer can be rewritten via the pointer pointer.
+  *&ptr.AsEphemeralRawAddr() = &v2;
+  EXPECT_EQ(ptr.get(), &v2);
+  InOutParamFuncWithPointer(&v1, &ptr.AsEphemeralRawAddr());
+  EXPECT_EQ(ptr.get(), &v1);
+}
+
+void InOutParamFuncWithReference(int* in, int*& out) {
+  out = in;
+}
+
+TEST_F(RawPtrTest, EphemeralRawAddrPointerReference) {
+  int v1 = 123;
+  int v2 = 456;
+  raw_ptr<int> ptr = &v1;
+  // Pointer reference should refer to a pointer other than one inside raw_ptr.
+  EXPECT_NE(&static_cast<int*&>(ptr.AsEphemeralRawAddr()),
+            reinterpret_cast<int**>(std::addressof(ptr)));
+  // But inner pointer should point to the same address.
+  EXPECT_EQ(static_cast<int*&>(ptr.AsEphemeralRawAddr()), &v1);
+
+  // Inner pointer can be rewritten via the pointer pointer.
+  static_cast<int*&>(ptr.AsEphemeralRawAddr()) = &v2;
+  EXPECT_EQ(ptr.get(), &v2);
+  InOutParamFuncWithReference(&v1, ptr.AsEphemeralRawAddr());
+  EXPECT_EQ(ptr.get(), &v1);
+}
+
+TEST_F(RawPtrTest, AllowUninitialized) {
+  constexpr uintptr_t kPattern = 0x12345678;
+  uintptr_t storage = kPattern;
+  // Placement new over stored pattern must not change it.
+  new (&storage) CountingRawPtrUninitialized<int>;
+  EXPECT_EQ(storage, kPattern);
+}
+
+}  // namespace
+
+namespace base::internal {
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
+    !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+void HandleOOM(size_t unused_size) {
+  LOG(FATAL) << "Out of memory";
+}
+
+class BackupRefPtrTest : public testing::Test {
+ protected:
+  void SetUp() override {
+    // TODO(bartekn): Avoid using PartitionAlloc API directly. Switch to
+    // new/delete once PartitionAlloc Everywhere is fully enabled.
+    partition_alloc::PartitionAllocGlobalInit(HandleOOM);
+  }
+
+  partition_alloc::PartitionAllocator allocator_ =
+      partition_alloc::PartitionAllocator(partition_alloc::PartitionOptions{
+          .backup_ref_ptr = partition_alloc::PartitionOptions::kEnabled,
+          .memory_tagging = {
+              .enabled = base::CPU::GetInstanceNoAllocation().has_mte()
+                             ? partition_alloc::PartitionOptions::kEnabled
+                             : partition_alloc::PartitionOptions::kDisabled}});
+};
+
+TEST_F(BackupRefPtrTest, Basic) {
+  base::CPU cpu;
+
+  int* raw_ptr1 =
+      reinterpret_cast<int*>(allocator_.root()->Alloc(sizeof(int), ""));
+  // Use the actual raw_ptr implementation, not a test substitute, to
+  // exercise real PartitionAlloc paths.
+  raw_ptr<int, DisableDanglingPtrDetection> wrapped_ptr1 = raw_ptr1;
+
+  *raw_ptr1 = 42;
+  EXPECT_EQ(*raw_ptr1, *wrapped_ptr1);
+
+  allocator_.root()->Free(raw_ptr1);
+#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+  // In debug builds, the use-after-free should be caught immediately.
+  EXPECT_DEATH_IF_SUPPORTED(g_volatile_int_to_ignore = *wrapped_ptr1, "");
+#else   // DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+  if (cpu.has_mte()) {
+    // If the hardware supports MTE, the use-after-free should also be caught.
+    EXPECT_DEATH_IF_SUPPORTED(g_volatile_int_to_ignore = *wrapped_ptr1, "");
+  } else {
+    // The allocation should be poisoned since there's a raw_ptr alive.
+    EXPECT_NE(*wrapped_ptr1, 42);
+  }
+
+  // The allocator should not be able to reuse the slot at this point.
+  void* raw_ptr2 = allocator_.root()->Alloc(sizeof(int), "");
+  EXPECT_NE(partition_alloc::UntagPtr(raw_ptr1),
+            partition_alloc::UntagPtr(raw_ptr2));
+  allocator_.root()->Free(raw_ptr2);
+
+  // When the last reference is released, the slot should become reusable.
+  wrapped_ptr1 = nullptr;
+  void* raw_ptr3 = allocator_.root()->Alloc(sizeof(int), "");
+  EXPECT_EQ(partition_alloc::UntagPtr(raw_ptr1),
+            partition_alloc::UntagPtr(raw_ptr3));
+  allocator_.root()->Free(raw_ptr3);
+#endif  // DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+}
+
+TEST_F(BackupRefPtrTest, ZeroSized) {
+  std::vector<raw_ptr<void>> ptrs;
+  // Use a reasonable number of elements to fill up the slot span.
+  for (int i = 0; i < 128 * 1024; ++i) {
+    // Constructing a raw_ptr instance from a zero-sized allocation should
+    // not result in a crash.
+    ptrs.emplace_back(allocator_.root()->Alloc(0));
+  }
+}
+
+TEST_F(BackupRefPtrTest, EndPointer) {
+  // This test requires a fresh partition with an empty free list.
+  // Check multiple size buckets and levels of slot filling.
+  for (int size = 0; size < 1024; size += sizeof(void*)) {
+    // Creating a raw_ptr from an address right past the end of an allocation
+    // should not result in a crash or corrupt the free list.
+    char* raw_ptr1 = reinterpret_cast<char*>(allocator_.root()->Alloc(size));
+    raw_ptr<char, AllowPtrArithmetic> wrapped_ptr = raw_ptr1 + size;
+    wrapped_ptr = nullptr;
+    // We need to make two more allocations to turn the possible free list
+    // corruption into an observable crash.
+    char* raw_ptr2 = reinterpret_cast<char*>(allocator_.root()->Alloc(size));
+    char* raw_ptr3 = reinterpret_cast<char*>(allocator_.root()->Alloc(size));
+
+    // Similarly for operator+=.
+    char* raw_ptr4 = reinterpret_cast<char*>(allocator_.root()->Alloc(size));
+    wrapped_ptr = raw_ptr4;
+    wrapped_ptr += size;
+    wrapped_ptr = nullptr;
+    char* raw_ptr5 = reinterpret_cast<char*>(allocator_.root()->Alloc(size));
+    char* raw_ptr6 = reinterpret_cast<char*>(allocator_.root()->Alloc(size));
+
+    allocator_.root()->Free(raw_ptr1);
+    allocator_.root()->Free(raw_ptr2);
+    allocator_.root()->Free(raw_ptr3);
+    allocator_.root()->Free(raw_ptr4);
+    allocator_.root()->Free(raw_ptr5);
+    allocator_.root()->Free(raw_ptr6);
+  }
+}
+
+TEST_F(BackupRefPtrTest, QuarantinedBytes) {
+  uint64_t* raw_ptr1 = reinterpret_cast<uint64_t*>(
+      allocator_.root()->Alloc(sizeof(uint64_t), ""));
+  raw_ptr<uint64_t, DisableDanglingPtrDetection> wrapped_ptr1 = raw_ptr1;
+  EXPECT_EQ(allocator_.root()->total_size_of_brp_quarantined_bytes.load(
+                std::memory_order_relaxed),
+            0U);
+  EXPECT_EQ(allocator_.root()->total_count_of_brp_quarantined_slots.load(
+                std::memory_order_relaxed),
+            0U);
+
+  // Memory should get quarantined.
+  allocator_.root()->Free(raw_ptr1);
+  EXPECT_GT(allocator_.root()->total_size_of_brp_quarantined_bytes.load(
+                std::memory_order_relaxed),
+            0U);
+  EXPECT_EQ(allocator_.root()->total_count_of_brp_quarantined_slots.load(
+                std::memory_order_relaxed),
+            1U);
+
+  // Non quarantined free should not effect total_size_of_brp_quarantined_bytes
+  void* raw_ptr2 = allocator_.root()->Alloc(sizeof(uint64_t), "");
+  allocator_.root()->Free(raw_ptr2);
+
+  // Freeing quarantined memory should bring the size back down to zero.
+  wrapped_ptr1 = nullptr;
+  EXPECT_EQ(allocator_.root()->total_size_of_brp_quarantined_bytes.load(
+                std::memory_order_relaxed),
+            0U);
+  EXPECT_EQ(allocator_.root()->total_count_of_brp_quarantined_slots.load(
+                std::memory_order_relaxed),
+            0U);
+}
+
+void RunBackupRefPtrImplAdvanceTest(
+    partition_alloc::PartitionAllocator& allocator,
+    size_t requested_size) {
+  char* ptr = static_cast<char*>(allocator.root()->Alloc(requested_size));
+  raw_ptr<char, AllowPtrArithmetic> protected_ptr = ptr;
+  protected_ptr += 123;
+  protected_ptr -= 123;
+  protected_ptr = protected_ptr + 123;
+  protected_ptr = protected_ptr - 123;
+  protected_ptr += requested_size / 2;
+  // end-of-allocation address should not cause an error immediately, but it may
+  // result in the pointer being poisoned.
+  protected_ptr = protected_ptr + requested_size / 2;
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+  EXPECT_DEATH_IF_SUPPORTED(*protected_ptr = ' ', "");
+  protected_ptr -= 1;  // This brings the pointer back within
+                       // bounds, which causes the poison to be removed.
+  *protected_ptr = ' ';
+  protected_ptr += 1;  // Reposition pointer back past end of allocation.
+#endif
+  EXPECT_CHECK_DEATH(protected_ptr = protected_ptr + 1);
+  EXPECT_CHECK_DEATH(protected_ptr += 1);
+  EXPECT_CHECK_DEATH(++protected_ptr);
+
+  // Even though |protected_ptr| is already pointing to the end of the
+  // allocation, assign it explicitly to make sure the underlying implementation
+  // doesn't "switch" to the next slot.
+  protected_ptr = ptr + requested_size;
+  protected_ptr -= requested_size / 2;
+  protected_ptr = protected_ptr - requested_size / 2;
+  EXPECT_CHECK_DEATH(protected_ptr = protected_ptr - 1);
+  EXPECT_CHECK_DEATH(protected_ptr -= 1);
+  EXPECT_CHECK_DEATH(--protected_ptr);
+
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+  // An array type that should be more than a third the size of the available
+  // memory for the allocation such that incrementing a pointer to this type
+  // twice causes it to point to a memory location that is too small to fit a
+  // complete element of this type.
+  typedef int OverThirdArray[200 / sizeof(int)];
+  raw_ptr<OverThirdArray> protected_arr_ptr =
+      reinterpret_cast<OverThirdArray*>(ptr);
+
+  protected_arr_ptr++;
+  **protected_arr_ptr = 4;
+  protected_arr_ptr++;
+  EXPECT_DEATH_IF_SUPPORTED(** protected_arr_ptr = 4, "");
+#endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+
+  protected_ptr = nullptr;
+  allocator.root()->Free(ptr);
+}
+
+TEST_F(BackupRefPtrTest, Advance) {
+  // This requires some internal PartitionAlloc knowledge, but for the test to
+  // work well the allocation + extras have to fill out the entire slot. That's
+  // because PartitionAlloc doesn't know exact allocation size and bases the
+  // guards on the slot size.
+  //
+  // A power of two is a safe choice for a slot size, then adjust it for extras.
+  size_t slot_size = 512;
+  size_t requested_size =
+      allocator_.root()->AdjustSizeForExtrasSubtract(slot_size);
+  // Verify that we're indeed filling up the slot.
+  ASSERT_EQ(
+      requested_size,
+      allocator_.root()->AllocationCapacityFromRequestedSize(requested_size));
+  RunBackupRefPtrImplAdvanceTest(allocator_, requested_size);
+
+  // We don't have the same worry for single-slot spans, as PartitionAlloc knows
+  // exactly where the allocation ends.
+  size_t raw_size = 300003;
+  ASSERT_GT(raw_size, partition_alloc::internal::MaxRegularSlotSpanSize());
+  ASSERT_LE(raw_size, partition_alloc::internal::kMaxBucketed);
+  requested_size = allocator_.root()->AdjustSizeForExtrasSubtract(slot_size);
+  RunBackupRefPtrImplAdvanceTest(allocator_, requested_size);
+
+  // Same for direct map.
+  raw_size = 1001001;
+  ASSERT_GT(raw_size, partition_alloc::internal::kMaxBucketed);
+  requested_size = allocator_.root()->AdjustSizeForExtrasSubtract(slot_size);
+  RunBackupRefPtrImplAdvanceTest(allocator_, requested_size);
+}
+
+TEST_F(BackupRefPtrTest, AdvanceAcrossPools) {
+  char array1[1000];
+  char array2[1000];
+
+  char* in_pool_ptr = static_cast<char*>(allocator_.root()->Alloc(123));
+
+  raw_ptr<char, AllowPtrArithmetic> protected_ptr = array1;
+  // Nothing bad happens. Both pointers are outside of the BRP pool, so no
+  // checks are triggered.
+  protected_ptr += (array2 - array1);
+  // A pointer is shifted from outside of the BRP pool into the BRP pool. This
+  // should trigger death to avoid
+  EXPECT_CHECK_DEATH(protected_ptr += (in_pool_ptr - array2));
+
+  protected_ptr = in_pool_ptr;
+  // Same when a pointer is shifted from inside the BRP pool out of it.
+  EXPECT_CHECK_DEATH(protected_ptr += (array1 - in_pool_ptr));
+
+  protected_ptr = nullptr;
+  allocator_.root()->Free(in_pool_ptr);
+}
+
+TEST_F(BackupRefPtrTest, GetDeltaElems) {
+  size_t requested_size = allocator_.root()->AdjustSizeForExtrasSubtract(512);
+  char* ptr1 = static_cast<char*>(allocator_.root()->Alloc(requested_size));
+  char* ptr2 = static_cast<char*>(allocator_.root()->Alloc(requested_size));
+  ASSERT_LT(ptr1, ptr2);  // There should be a ref-count between slots.
+  raw_ptr<char> protected_ptr1 = ptr1;
+  raw_ptr<char> protected_ptr1_2 = ptr1 + 1;
+  raw_ptr<char> protected_ptr1_3 = ptr1 + requested_size - 1;
+  raw_ptr<char> protected_ptr1_4 = ptr1 + requested_size;
+  raw_ptr<char> protected_ptr2 = ptr2;
+  raw_ptr<char> protected_ptr2_2 = ptr2 + 1;
+
+  EXPECT_EQ(protected_ptr1_2 - protected_ptr1, 1);
+  EXPECT_EQ(protected_ptr1 - protected_ptr1_2, -1);
+  EXPECT_EQ(protected_ptr1_3 - protected_ptr1,
+            checked_cast<ptrdiff_t>(requested_size) - 1);
+  EXPECT_EQ(protected_ptr1 - protected_ptr1_3,
+            -checked_cast<ptrdiff_t>(requested_size) + 1);
+  EXPECT_EQ(protected_ptr1_4 - protected_ptr1,
+            checked_cast<ptrdiff_t>(requested_size));
+  EXPECT_EQ(protected_ptr1 - protected_ptr1_4,
+            -checked_cast<ptrdiff_t>(requested_size));
+#if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
+  EXPECT_CHECK_DEATH(protected_ptr2 - protected_ptr1);
+  EXPECT_CHECK_DEATH(protected_ptr1 - protected_ptr2);
+  EXPECT_CHECK_DEATH(protected_ptr2 - protected_ptr1_4);
+  EXPECT_CHECK_DEATH(protected_ptr1_4 - protected_ptr2);
+  EXPECT_CHECK_DEATH(protected_ptr2_2 - protected_ptr1);
+  EXPECT_CHECK_DEATH(protected_ptr1 - protected_ptr2_2);
+  EXPECT_CHECK_DEATH(protected_ptr2_2 - protected_ptr1_4);
+  EXPECT_CHECK_DEATH(protected_ptr1_4 - protected_ptr2_2);
+#endif  // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
+  EXPECT_EQ(protected_ptr2_2 - protected_ptr2, 1);
+  EXPECT_EQ(protected_ptr2 - protected_ptr2_2, -1);
+
+  protected_ptr1 = nullptr;
+  protected_ptr1_2 = nullptr;
+  protected_ptr1_3 = nullptr;
+  protected_ptr1_4 = nullptr;
+  protected_ptr2 = nullptr;
+  protected_ptr2_2 = nullptr;
+
+  allocator_.root()->Free(ptr1);
+  allocator_.root()->Free(ptr2);
+}
+
+bool IsQuarantineEmpty(partition_alloc::PartitionAllocator& allocator) {
+  return allocator.root()->total_size_of_brp_quarantined_bytes.load(
+             std::memory_order_relaxed) == 0;
+}
+
+struct BoundRawPtrTestHelper {
+  static BoundRawPtrTestHelper* Create(
+      partition_alloc::PartitionAllocator& allocator) {
+    return new (allocator.root()->Alloc(sizeof(BoundRawPtrTestHelper), ""))
+        BoundRawPtrTestHelper(allocator);
+  }
+
+  explicit BoundRawPtrTestHelper(partition_alloc::PartitionAllocator& allocator)
+      : owning_allocator(allocator),
+        once_callback(
+            BindOnce(&BoundRawPtrTestHelper::DeleteItselfAndCheckIfInQuarantine,
+                     Unretained(this))),
+        repeating_callback(BindRepeating(
+            &BoundRawPtrTestHelper::DeleteItselfAndCheckIfInQuarantine,
+            Unretained(this))) {}
+
+  void DeleteItselfAndCheckIfInQuarantine() {
+    auto& allocator = *owning_allocator;
+    EXPECT_TRUE(IsQuarantineEmpty(allocator));
+
+    // Since we use a non-default partition, `delete` has to be simulated.
+    this->~BoundRawPtrTestHelper();
+    allocator.root()->Free(this);
+
+    EXPECT_FALSE(IsQuarantineEmpty(allocator));
+  }
+
+  const raw_ref<partition_alloc::PartitionAllocator> owning_allocator;
+  OnceClosure once_callback;
+  RepeatingClosure repeating_callback;
+};
+
+// Check that bound callback arguments remain protected by BRP for the
+// entire duration of a callback invocation.
+TEST_F(BackupRefPtrTest, Bind) {
+  // This test requires a separate partition; otherwise, unrelated allocations
+  // might interfere with `IsQuarantineEmpty`.
+  auto* object_for_once_callback1 = BoundRawPtrTestHelper::Create(allocator_);
+  std::move(object_for_once_callback1->once_callback).Run();
+  EXPECT_TRUE(IsQuarantineEmpty(allocator_));
+
+  auto* object_for_repeating_callback1 =
+      BoundRawPtrTestHelper::Create(allocator_);
+  std::move(object_for_repeating_callback1->repeating_callback).Run();
+  EXPECT_TRUE(IsQuarantineEmpty(allocator_));
+
+  // `RepeatingCallback` has both lvalue and rvalue versions of `Run`.
+  auto* object_for_repeating_callback2 =
+      BoundRawPtrTestHelper::Create(allocator_);
+  object_for_repeating_callback2->repeating_callback.Run();
+  EXPECT_TRUE(IsQuarantineEmpty(allocator_));
+}
+
+#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
+TEST_F(BackupRefPtrTest, ReinterpretCast) {
+  void* ptr = allocator_.root()->Alloc(16);
+  allocator_.root()->Free(ptr);
+
+  raw_ptr<void>* wrapped_ptr = reinterpret_cast<raw_ptr<void>*>(&ptr);
+  // The reference count cookie check should detect that the allocation has
+  // been already freed.
+  BASE_EXPECT_DEATH(*wrapped_ptr = nullptr, "");
+}
+#endif
+
+namespace {
+
+// Install dangling raw_ptr handlers and restore them when going out of scope.
+class ScopedInstallDanglingRawPtrChecks {
+ public:
+  ScopedInstallDanglingRawPtrChecks() {
+    enabled_feature_list_.InitWithFeaturesAndParameters(
+        {{features::kPartitionAllocDanglingPtr, {{"mode", "crash"}}}},
+        {/* disabled_features */});
+    old_detected_fn_ = partition_alloc::GetDanglingRawPtrDetectedFn();
+    old_dereferenced_fn_ = partition_alloc::GetDanglingRawPtrReleasedFn();
+    allocator::InstallDanglingRawPtrChecks();
+  }
+  ~ScopedInstallDanglingRawPtrChecks() {
+    partition_alloc::SetDanglingRawPtrDetectedFn(old_detected_fn_);
+    partition_alloc::SetDanglingRawPtrReleasedFn(old_dereferenced_fn_);
+  }
+
+ private:
+  test::ScopedFeatureList enabled_feature_list_;
+  partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
+  partition_alloc::DanglingRawPtrReleasedFn* old_dereferenced_fn_;
+};
+
+}  // namespace
+
+TEST_F(BackupRefPtrTest, RawPtrMayDangle) {
+  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
+
+  void* ptr = allocator_.root()->Alloc(16);
+  raw_ptr<void, DisableDanglingPtrDetection> dangling_ptr = ptr;
+  allocator_.root()->Free(ptr);  // No dangling raw_ptr reported.
+  dangling_ptr = nullptr;        // No dangling raw_ptr reported.
+}
+
+TEST_F(BackupRefPtrTest, RawPtrNotDangling) {
+  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
+
+  void* ptr = allocator_.root()->Alloc(16);
+  raw_ptr<void> dangling_ptr = ptr;
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) && \
+    !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
+  BASE_EXPECT_DEATH(
+      {
+        allocator_.root()->Free(ptr);  // Dangling raw_ptr detected.
+        dangling_ptr = nullptr;        // Dangling raw_ptr released.
+      },
+      AllOf(HasSubstr("Detected dangling raw_ptr"),
+            HasSubstr("The memory was freed at:"),
+            HasSubstr("The dangling raw_ptr was released at:")));
+#else
+  allocator_.root()->Free(ptr);
+  dangling_ptr = nullptr;
+#endif
+}
+
+// Check the comparator operators work, even across raw_ptr with different
+// dangling policies.
+TEST_F(BackupRefPtrTest, DanglingPtrComparison) {
+  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
+
+  void* ptr_1 = allocator_.root()->Alloc(16);
+  void* ptr_2 = allocator_.root()->Alloc(16);
+
+  if (ptr_1 > ptr_2) {
+    std::swap(ptr_1, ptr_2);
+  }
+
+  raw_ptr<void, DisableDanglingPtrDetection> dangling_ptr_1 = ptr_1;
+  raw_ptr<void, DisableDanglingPtrDetection> dangling_ptr_2 = ptr_2;
+  raw_ptr<void> not_dangling_ptr_1 = ptr_1;
+  raw_ptr<void> not_dangling_ptr_2 = ptr_2;
+
+  EXPECT_EQ(dangling_ptr_1, not_dangling_ptr_1);
+  EXPECT_EQ(dangling_ptr_2, not_dangling_ptr_2);
+  EXPECT_NE(dangling_ptr_1, not_dangling_ptr_2);
+  EXPECT_NE(dangling_ptr_2, not_dangling_ptr_1);
+  EXPECT_LT(dangling_ptr_1, not_dangling_ptr_2);
+  EXPECT_GT(dangling_ptr_2, not_dangling_ptr_1);
+  EXPECT_LT(not_dangling_ptr_1, dangling_ptr_2);
+  EXPECT_GT(not_dangling_ptr_2, dangling_ptr_1);
+
+  not_dangling_ptr_1 = nullptr;
+  not_dangling_ptr_2 = nullptr;
+
+  allocator_.root()->Free(ptr_1);
+  allocator_.root()->Free(ptr_2);
+}
+
+// Check the assignment operator works, even across raw_ptr with different
+// dangling policies (only `not dangling` -> `dangling` direction is supported).
+TEST_F(BackupRefPtrTest, DanglingPtrAssignment) {
+  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
+
+  void* ptr = allocator_.root()->Alloc(16);
+
+  raw_ptr<void, DisableDanglingPtrDetection> dangling_ptr;
+  raw_ptr<void> not_dangling_ptr;
+
+  not_dangling_ptr = ptr;
+  dangling_ptr = not_dangling_ptr;
+  not_dangling_ptr = nullptr;
+
+  allocator_.root()->Free(ptr);
+
+  dangling_ptr = nullptr;
+}
+
+// Check the copy constructor works, even across raw_ptr with different dangling
+// policies (only `not dangling` -> `dangling` direction is supported).
+TEST_F(BackupRefPtrTest, DanglingPtrCopyContructor) {
+  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
+
+  void* ptr = allocator_.root()->Alloc(16);
+
+  raw_ptr<void> not_dangling_ptr(ptr);
+  raw_ptr<void, DisableDanglingPtrDetection> dangling_ptr(not_dangling_ptr);
+
+  not_dangling_ptr = nullptr;
+  dangling_ptr = nullptr;
+
+  allocator_.root()->Free(ptr);
+}
+
+TEST_F(BackupRefPtrTest, RawPtrExtractAsDangling) {
+  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
+
+  raw_ptr<int> ptr =
+      static_cast<int*>(allocator_.root()->Alloc(sizeof(int), ""));
+  allocator_.root()->Free(
+      ptr.ExtractAsDangling());  // No dangling raw_ptr reported.
+  EXPECT_EQ(ptr, nullptr);
+}
+
+TEST_F(BackupRefPtrTest, RawPtrDeleteWithoutExtractAsDangling) {
+  ScopedInstallDanglingRawPtrChecks enable_dangling_raw_ptr_checks;
+
+  raw_ptr<int> ptr =
+      static_cast<int*>(allocator_.root()->Alloc(sizeof(int), ""));
+#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) && \
+    !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
+  BASE_EXPECT_DEATH(
+      {
+        allocator_.root()->Free(ptr.get());  // Dangling raw_ptr detected.
+        ptr = nullptr;                       // Dangling raw_ptr released.
+      },
+      AllOf(HasSubstr("Detected dangling raw_ptr"),
+            HasSubstr("The memory was freed at:"),
+            HasSubstr("The dangling raw_ptr was released at:")));
+#else
+  allocator_.root()->Free(ptr.get());
+  ptr = nullptr;
+#endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) && \
+        // !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
+}
+
+TEST_F(BackupRefPtrTest, SpatialAlgoCompat) {
+  size_t slot_size = 512;
+  size_t requested_size =
+      allocator_.root()->AdjustSizeForExtrasSubtract(slot_size);
+  // Verify that we're indeed filling up the slot.
+  ASSERT_EQ(
+      requested_size,
+      allocator_.root()->AllocationCapacityFromRequestedSize(requested_size));
+  size_t requested_elements = requested_size / sizeof(uint32_t);
+
+  uint32_t* ptr =
+      reinterpret_cast<uint32_t*>(allocator_.root()->Alloc(requested_size));
+  uint32_t* ptr_end = ptr + requested_elements;
+
+  CountingRawPtr<uint32_t> protected_ptr = ptr;
+  CountingRawPtr<uint32_t> protected_ptr_end =
+      protected_ptr + requested_elements;
+
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+  EXPECT_DEATH_IF_SUPPORTED(*protected_ptr_end = 1, "");
+#endif
+
+  RawPtrCountingImpl::ClearCounters();
+
+  uint32_t gen_val = 1;
+  std::generate(protected_ptr, protected_ptr_end, [&gen_val]() {
+    gen_val ^= gen_val + 1;
+    return gen_val;
+  });
+
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = requested_elements,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = (requested_elements + 1) * 2,
+              }),
+              CountersMatch());
+
+  RawPtrCountingImpl::ClearCounters();
+
+  for (CountingRawPtr<uint32_t> protected_ptr_i = protected_ptr;
+       protected_ptr_i < protected_ptr_end; protected_ptr_i++) {
+    *protected_ptr_i ^= *protected_ptr_i + 1;
+  }
+
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = requested_elements * 2,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = (requested_elements + 1) * 2,
+              }),
+              CountersMatch());
+
+  RawPtrCountingImpl::ClearCounters();
+
+  for (CountingRawPtr<uint32_t> protected_ptr_i = protected_ptr;
+       protected_ptr_i < ptr_end; protected_ptr_i++) {
+    *protected_ptr_i ^= *protected_ptr_i + 1;
+  }
+
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = requested_elements * 2,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = requested_elements + 1,
+              }),
+              CountersMatch());
+
+  RawPtrCountingImpl::ClearCounters();
+
+  for (uint32_t* ptr_i = ptr; ptr_i < protected_ptr_end; ptr_i++) {
+    *ptr_i ^= *ptr_i + 1;
+  }
+
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 0,
+                  .get_for_comparison_cnt = requested_elements + 1,
+              }),
+              CountersMatch());
+
+  RawPtrCountingImpl::ClearCounters();
+
+  size_t iter_cnt = 0;
+  for (uint32_t *ptr_i = protected_ptr, *ptr_i_end = protected_ptr_end;
+       ptr_i < ptr_i_end; ptr_i++) {
+    *ptr_i ^= *ptr_i + 1;
+    iter_cnt++;
+  }
+  EXPECT_EQ(iter_cnt, requested_elements);
+
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_dereference_cnt = 0,
+                  .get_for_extraction_cnt = 2,
+                  .get_for_comparison_cnt = 0,
+              }),
+              CountersMatch());
+
+  protected_ptr = nullptr;
+  protected_ptr_end = nullptr;
+  allocator_.root()->Free(ptr);
+}
+
+#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+TEST_F(BackupRefPtrTest, Duplicate) {
+  size_t requested_size = allocator_.root()->AdjustSizeForExtrasSubtract(512);
+  char* ptr = static_cast<char*>(allocator_.root()->Alloc(requested_size));
+  raw_ptr<char> protected_ptr1 = ptr;
+  protected_ptr1 += requested_size;  // Pointer should now be poisoned.
+
+  // Duplicating a poisoned pointer should be allowed.
+  raw_ptr<char> protected_ptr2 = protected_ptr1;
+
+  // The poison bit should be propagated to the duplicate such that the OOB
+  // access is disallowed:
+  EXPECT_DEATH_IF_SUPPORTED(*protected_ptr2 = ' ', "");
+
+  // Assignment from a poisoned pointer should be allowed.
+  raw_ptr<char> protected_ptr3;
+  protected_ptr3 = protected_ptr1;
+
+  // The poison bit should be propagated via the assignment such that the OOB
+  // access is disallowed:
+  EXPECT_DEATH_IF_SUPPORTED(*protected_ptr3 = ' ', "");
+
+  allocator_.root()->Free(ptr);
+}
+#endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
+
+#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+TEST_F(BackupRefPtrTest, WriteAfterFree) {
+  constexpr uint64_t kPayload = 0x1234567890ABCDEF;
+
+  raw_ptr<uint64_t, DisableDanglingPtrDetection> ptr =
+      static_cast<uint64_t*>(allocator_.root()->Alloc(sizeof(uint64_t), ""));
+
+  // Now |ptr| should be quarantined.
+  allocator_.root()->Free(ptr);
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        // Write something different from |kQuarantinedByte|.
+        *ptr = kPayload;
+        // Write-after-Free should lead to crash
+        // on |PartitionAllocFreeForRefCounting|.
+        ptr = nullptr;
+      },
+      "");
+}
+#endif  // BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
+
+namespace {
+constexpr uint8_t kCustomQuarantineByte = 0xff;
+static_assert(kCustomQuarantineByte !=
+              partition_alloc::internal::kQuarantinedByte);
+
+void CustomQuarantineHook(void* address, size_t size) {
+  partition_alloc::internal::SecureMemset(address, kCustomQuarantineByte, size);
+}
+}  // namespace
+
+TEST_F(BackupRefPtrTest, QuarantineHook) {
+  partition_alloc::PartitionAllocHooks::SetQuarantineOverrideHook(
+      CustomQuarantineHook);
+  uint8_t* native_ptr =
+      static_cast<uint8_t*>(allocator_.root()->Alloc(sizeof(uint8_t), ""));
+  *native_ptr = 0;
+  {
+    raw_ptr<uint8_t, DisableDanglingPtrDetection> smart_ptr = native_ptr;
+
+    allocator_.root()->Free(smart_ptr);
+    // Access the allocation through the native pointer to avoid triggering
+    // dereference checks in debug builds.
+    EXPECT_EQ(*partition_alloc::internal::TagPtr(native_ptr),
+              kCustomQuarantineByte);
+
+    // Leaving |smart_ptr| filled with |kCustomQuarantineByte| can
+    // cause a crash because we have a DCHECK that expects it to be filled with
+    // |kQuarantineByte|. We need to ensure it is unquarantined before
+    // unregistering the hook.
+  }  // <- unquarantined here
+
+  partition_alloc::PartitionAllocHooks::SetQuarantineOverrideHook(nullptr);
+}
+
+#if BUILDFLAG(PA_IS_CHROMEOS_ASH)
+TEST_F(BackupRefPtrTest, ExperimentalAsh) {
+  const bool feature_enabled_by_default =
+      BackupRefPtrGlobalSettings::IsExperimentalAshEnabled();
+  if (feature_enabled_by_default) {
+    BackupRefPtrGlobalSettings::DisableExperimentalAshForTest();
+  }
+
+  // Allocate a slot so that a slot span doesn't get decommitted from memory,
+  // while we allocate/deallocate/access the tested slot below.
+  void* sentinel = allocator_.root()->Alloc(sizeof(unsigned int), "");
+
+  constexpr uint32_t kQuarantined2Bytes =
+      partition_alloc::internal::kQuarantinedByte |
+      (partition_alloc::internal::kQuarantinedByte << 8);
+  constexpr uint32_t kQuarantined4Bytes =
+      kQuarantined2Bytes | (kQuarantined2Bytes << 16);
+
+  // Plain raw_ptr, with BRP for ExperimentalAsh pointer disabled.
+  {
+    raw_ptr<unsigned int, DanglingUntriaged> ptr = static_cast<unsigned int*>(
+        allocator_.root()->Alloc(sizeof(unsigned int), ""));
+    *ptr = 0;
+    allocator_.root()->Free(ptr);
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+    EXPECT_DEATH_IF_SUPPORTED(*ptr = 0, "");
+#else
+    EXPECT_EQ(kQuarantined4Bytes, *ptr);
+#endif
+  }
+  // raw_ptr with ExperimentalAsh, BRP is expected to be off, as it is enabled
+  // independently for these pointers.
+  {
+    raw_ptr<unsigned int, DanglingUntriaged | ExperimentalAsh> ptr =
+        static_cast<unsigned int*>(
+            allocator_.root()->Alloc(sizeof(unsigned int), ""));
+    *ptr = 0;
+    allocator_.root()->Free(ptr);
+    // A tad fragile as a new allocation or free-list pointer may be there, but
+    // highly unlikely it'll match 4 quarantine bytes in a row.
+    EXPECT_NE(kQuarantined4Bytes, *ptr);
+  }
+
+  BackupRefPtrGlobalSettings::EnableExperimentalAsh();
+  // BRP should be on for both types of pointers.
+  {
+    raw_ptr<unsigned int, DanglingUntriaged> ptr = static_cast<unsigned int*>(
+        allocator_.root()->Alloc(sizeof(unsigned int), ""));
+    *ptr = 0;
+    allocator_.root()->Free(ptr);
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+    EXPECT_DEATH_IF_SUPPORTED(*ptr = 0, "");
+#else
+    EXPECT_EQ(kQuarantined4Bytes, *ptr);
+#endif
+  }
+  {
+    raw_ptr<unsigned int, DanglingUntriaged | ExperimentalAsh> ptr =
+        static_cast<unsigned int*>(
+            allocator_.root()->Alloc(sizeof(unsigned int), ""));
+    *ptr = 0;
+    allocator_.root()->Free(ptr);
+#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
+    EXPECT_DEATH_IF_SUPPORTED(*ptr = 0, "");
+#else
+    EXPECT_EQ(kQuarantined4Bytes, *ptr);
+#endif
+  }
+
+  allocator_.root()->Free(sentinel);
+
+  // Restore the feature state to avoid one test to "leak" into the next one.
+  if (!feature_enabled_by_default) {
+    BackupRefPtrGlobalSettings::DisableExperimentalAshForTest();
+  }
+}
+#endif  // BUILDFLAG(PA_IS_CHROMEOS_ASH)
+
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
+        // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+#if BUILDFLAG(USE_HOOKABLE_RAW_PTR)
+
+namespace {
+#define FOR_EACH_RAW_PTR_OPERATION(F) \
+  F(wrap_ptr)                         \
+  F(release_wrapped_ptr)              \
+  F(safely_unwrap_for_dereference)    \
+  F(safely_unwrap_for_extraction)     \
+  F(unsafely_unwrap_for_comparison)   \
+  F(advance)                          \
+  F(duplicate)                        \
+  F(wrap_ptr_for_duplication)         \
+  F(unsafely_unwrap_for_duplication)
+
+// Can't use gMock to count the number of invocations because
+// gMock itself triggers raw_ptr<T> operations.
+struct CountingHooks {
+  void ResetCounts() {
+#define F(name) name##_count = 0;
+    FOR_EACH_RAW_PTR_OPERATION(F)
+#undef F
+  }
+
+  static CountingHooks* Get() {
+    static thread_local CountingHooks instance;
+    return &instance;
+  }
+
+// The adapter method is templated to accept any number of arguments.
+#define F(name)                      \
+  template <typename... T>           \
+  static void name##_adapter(T...) { \
+    Get()->name##_count++;           \
+  }                                  \
+  size_t name##_count = 0;
+  FOR_EACH_RAW_PTR_OPERATION(F)
+#undef F
+};
+
+constexpr RawPtrHooks raw_ptr_hooks{
+#define F(name) .name = CountingHooks::name##_adapter,
+    FOR_EACH_RAW_PTR_OPERATION(F)
+#undef F
+};
+}  // namespace
+
+class HookableRawPtrImplTest : public testing::Test {
+ protected:
+  void SetUp() override { InstallRawPtrHooks(&raw_ptr_hooks); }
+  void TearDown() override { ResetRawPtrHooks(); }
+};
+
+TEST_F(HookableRawPtrImplTest, WrapPtr) {
+  // Can't call `ResetCounts` in `SetUp` because gTest triggers
+  // raw_ptr<T> operations between `SetUp` and the test body.
+  CountingHooks::Get()->ResetCounts();
+  {
+    int* ptr = new int;
+    [[maybe_unused]] raw_ptr<int> interesting_ptr = ptr;
+    delete ptr;
+  }
+  EXPECT_EQ(CountingHooks::Get()->wrap_ptr_count, 1u);
+}
+
+TEST_F(HookableRawPtrImplTest, ReleaseWrappedPtr) {
+  CountingHooks::Get()->ResetCounts();
+  {
+    int* ptr = new int;
+    [[maybe_unused]] raw_ptr<int> interesting_ptr = ptr;
+    delete ptr;
+  }
+  EXPECT_EQ(CountingHooks::Get()->release_wrapped_ptr_count, 1u);
+}
+
+TEST_F(HookableRawPtrImplTest, SafelyUnwrapForDereference) {
+  CountingHooks::Get()->ResetCounts();
+  {
+    int* ptr = new int;
+    raw_ptr<int> interesting_ptr = ptr;
+    *interesting_ptr = 1;
+    delete ptr;
+  }
+  EXPECT_EQ(CountingHooks::Get()->safely_unwrap_for_dereference_count, 1u);
+}
+
+TEST_F(HookableRawPtrImplTest, SafelyUnwrapForExtraction) {
+  CountingHooks::Get()->ResetCounts();
+  {
+    int* ptr = new int;
+    raw_ptr<int> interesting_ptr = ptr;
+    ptr = interesting_ptr;
+    delete ptr;
+  }
+  EXPECT_EQ(CountingHooks::Get()->safely_unwrap_for_extraction_count, 1u);
+}
+
+TEST_F(HookableRawPtrImplTest, UnsafelyUnwrapForComparison) {
+  CountingHooks::Get()->ResetCounts();
+  {
+    int* ptr = new int;
+    raw_ptr<int> interesting_ptr = ptr;
+    EXPECT_EQ(interesting_ptr, ptr);
+    delete ptr;
+  }
+  EXPECT_EQ(CountingHooks::Get()->unsafely_unwrap_for_comparison_count, 1u);
+}
+
+TEST_F(HookableRawPtrImplTest, Advance) {
+  CountingHooks::Get()->ResetCounts();
+  {
+    int* ptr = new int[10];
+    raw_ptr<int, AllowPtrArithmetic> interesting_ptr = ptr;
+    interesting_ptr += 1;
+    delete[] ptr;
+  }
+  EXPECT_EQ(CountingHooks::Get()->advance_count, 1u);
+}
+
+TEST_F(HookableRawPtrImplTest, Duplicate) {
+  CountingHooks::Get()->ResetCounts();
+  {
+    int* ptr = new int;
+    raw_ptr<int> interesting_ptr = ptr;
+    raw_ptr<int> interesting_ptr2 = interesting_ptr;
+    delete ptr;
+  }
+  EXPECT_EQ(CountingHooks::Get()->duplicate_count, 1u);
+}
+
+TEST_F(HookableRawPtrImplTest, CrossKindCopyConstruction) {
+  CountingHooks::Get()->ResetCounts();
+  {
+    int* ptr = new int;
+    raw_ptr<int> non_dangling_ptr = ptr;
+    raw_ptr<int, RawPtrTraits::kMayDangle> dangling_ptr(non_dangling_ptr);
+    delete ptr;
+  }
+  EXPECT_EQ(CountingHooks::Get()->duplicate_count, 0u);
+  EXPECT_EQ(CountingHooks::Get()->wrap_ptr_for_duplication_count, 1u);
+  EXPECT_EQ(CountingHooks::Get()->unsafely_unwrap_for_duplication_count, 1u);
+}
+
+#endif  // BUILDFLAG(USE_HOOKABLE_RAW_PTR)
+
+TEST(DanglingPtrTest, DetectAndReset) {
+  auto instrumentation = test::DanglingPtrInstrumentation::Create();
+  if (!instrumentation.has_value()) {
+    GTEST_SKIP() << instrumentation.error();
+  }
+
+  auto owned_ptr = std::make_unique<int>(42);
+  raw_ptr<int> dangling_ptr = owned_ptr.get();
+  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 0u);
+  EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+  owned_ptr.reset();
+  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+  EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+  dangling_ptr = nullptr;
+  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+  EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
+}
+
+TEST(DanglingPtrTest, DetectAndDestructor) {
+  auto instrumentation = test::DanglingPtrInstrumentation::Create();
+  if (!instrumentation.has_value()) {
+    GTEST_SKIP() << instrumentation.error();
+  }
+
+  auto owned_ptr = std::make_unique<int>(42);
+  {
+    [[maybe_unused]] raw_ptr<int> dangling_ptr = owned_ptr.get();
+    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 0u);
+    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+    owned_ptr.reset();
+    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+  }
+  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+  EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
+}
+
+TEST(DanglingPtrTest, DetectResetAndDestructor) {
+  auto instrumentation = test::DanglingPtrInstrumentation::Create();
+  if (!instrumentation.has_value()) {
+    GTEST_SKIP() << instrumentation.error();
+  }
+
+  auto owned_ptr = std::make_unique<int>(42);
+  {
+    raw_ptr<int> dangling_ptr = owned_ptr.get();
+    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 0u);
+    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+    owned_ptr.reset();
+    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+    dangling_ptr = nullptr;
+    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+    EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
+  }
+  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+  EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
+}
+
+}  // namespace base::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_unittest.nc b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_unittest.nc
new file mode 100644
index 0000000..1b589d1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_unittest.nc
@@ -0,0 +1,312 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include <memory>
+#include <tuple>  // for std::ignore
+#include <type_traits>  // for std::remove_pointer_t
+
+#include "base/functional/bind.h"
+#include "base/functional/callback.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+
+namespace {
+
+struct Producer {};
+struct DerivedProducer : Producer {};
+struct OtherDerivedProducer : Producer {};
+struct Unrelated {};
+struct DerivedUnrelated : Unrelated {};
+
+#if defined(NCTEST_INVALID_RAW_PTR_TRAIT)  // [r"Unknown raw_ptr trait\(s\)"]
+
+void WontCompile() {
+  constexpr auto InvalidRawPtrTrait = static_cast<base::RawPtrTraits>(-1);
+  raw_ptr<int, InvalidRawPtrTrait> p;
+}
+
+#elif defined(NCTEST_INVALID_RAW_PTR_TRAIT_OF_MANY)  // [r"Unknown raw_ptr trait\(s\)"]
+
+void WontCompile() {
+  constexpr auto InvalidRawPtrTrait = static_cast<base::RawPtrTraits>(-1);
+  raw_ptr<int, DisableDanglingPtrDetection | InvalidRawPtrTrait>
+      p;
+}
+
+#elif defined(NCTEST_AUTO_DOWNCAST)  // [r"no viable conversion from 'raw_ptr<Producer>' to 'raw_ptr<DerivedProducer>'"]
+
+void WontCompile() {
+  Producer f;
+  raw_ptr<Producer> ptr = &f;
+  raw_ptr<DerivedProducer> derived_ptr = ptr;
+}
+
+#elif defined(NCTEST_STATIC_DOWNCAST)  // [r"no matching conversion for static_cast from 'raw_ptr<Producer>' to 'raw_ptr<DerivedProducer>'"]
+
+void WontCompile() {
+  Producer f;
+  raw_ptr<Producer> ptr = &f;
+  raw_ptr<DerivedProducer> derived_ptr =
+      static_cast<raw_ptr<DerivedProducer>>(ptr);
+}
+
+#elif defined(NCTEST_AUTO_REF_DOWNCAST)  // [r"non-const lvalue reference to type 'raw_ptr<DerivedProducer>' cannot bind to a value of unrelated type 'raw_ptr<Producer>'"]
+
+void WontCompile() {
+  Producer f;
+  raw_ptr<Producer> ptr = &f;
+  raw_ptr<DerivedProducer>& derived_ptr = ptr;
+}
+
+#elif defined(NCTEST_STATIC_REF_DOWNCAST)  // [r"non-const lvalue reference to type 'raw_ptr<DerivedProducer>' cannot bind to a value of unrelated type 'raw_ptr<Producer>'"]
+
+void WontCompile() {
+  Producer f;
+  raw_ptr<Producer> ptr = &f;
+  raw_ptr<DerivedProducer>& derived_ptr =
+      static_cast<raw_ptr<DerivedProducer>&>(ptr);
+}
+
+#elif defined(NCTEST_AUTO_DOWNCAST_FROM_RAW) // [r"no viable conversion from 'Producer \*' to 'raw_ptr<DerivedProducer>'"]
+
+void WontCompile() {
+  Producer f;
+  raw_ptr<DerivedProducer> ptr = &f;
+}
+
+#elif defined(NCTEST_UNRELATED_FROM_RAW) // [r"no viable conversion from 'DerivedProducer \*' to 'raw_ptr<Unrelated>'"]
+
+void WontCompile() {
+  DerivedProducer f;
+  raw_ptr<Unrelated> ptr = &f;
+}
+
+#elif defined(NCTEST_UNRELATED_STATIC_FROM_WRAPPED) // [r"static_cast from '\(anonymous namespace\)::DerivedProducer \*' to '\(anonymous namespace\)::Unrelated \*', which are not related by inheritance, is not allowed"]
+
+void WontCompile() {
+  DerivedProducer f;
+  raw_ptr<DerivedProducer> ptr = &f;
+  std::ignore = static_cast<Unrelated*>(ptr);
+}
+
+#elif defined(NCTEST_VOID_DEREFERENCE) // [r"indirection requires pointer operand \('raw_ptr<const void>' invalid\)"]
+
+void WontCompile() {
+  const char foo[] = "42";
+  raw_ptr<const void> ptr = foo;
+  std::ignore = *ptr;
+}
+
+#elif defined(NCTEST_FUNCTION_POINTER) // [r"raw_ptr<T> doesn't work with this kind of pointee type T"]
+
+void WontCompile() {
+  raw_ptr<void(int)> raw_ptr_var;
+  std::ignore = raw_ptr_var.get();
+}
+
+#elif defined(NCTEST_DANGLING_GSL) // [r"object backing the pointer will be destroyed at the end of the full-expression"]
+
+void WontCompile() {
+  [[maybe_unused]] raw_ptr<int> ptr = std::make_unique<int>(2).get();
+}
+
+#elif defined(NCTEST_BINDING_RAW_PTR_PARAMETER) // [r"base::Bind\(\) target functor has a parameter of type raw_ptr<T>."]
+
+void WontCompile() {
+  raw_ptr<int> ptr = new int(3);
+
+  // Make sure that we are not allowed to bind a function with a raw_ptr<T>
+  // parameter type.
+  auto callback = base::BindOnce(
+      [](raw_ptr<int> ptr) {
+      },
+      ptr);
+}
+
+#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_PLUS_EQUALS_STRUCT) // [r"no viable overloaded '\+='"]
+
+void WontCompile() {
+  raw_ptr<int> ptr = new int(3);
+  struct {} s;
+  ptr += s;
+}
+
+#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_MINUS_EQUALS_STRUCT) // [r"no viable overloaded '-='"]
+
+void WontCompile() {
+  raw_ptr<int> ptr = new int(3);
+  struct {} s;
+  ptr -= s;
+}
+
+#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_PLUS_STRUCT) // [r"no viable overloaded '\+='"]
+
+void WontCompile() {
+  raw_ptr<int> ptr = new int(3);
+  struct {} s;
+  // Note, operator + exists, but it calls += which doesn't.
+  [[maybe_unused]] raw_ptr<int> ptr2 = ptr + s;
+}
+
+#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_MINUS_STRUCT) // [r"no viable overloaded '-='"]
+
+void WontCompile() {
+  raw_ptr<int> ptr = new int(3);
+  struct {} s;
+  // Note, operator - exists, but it calls -= which doesn't.
+  [[maybe_unused]] raw_ptr<int> ptr2 = ptr - s;
+}
+
+#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_PLUS_EQUALS_UINT64) // [r"no viable overloaded '\+='"]
+
+void WontCompile() {
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+  raw_ptr<int> ptr = new int(3);
+  ptr += uint64_t{2};
+#else
+  // Fake error on 64-bit to match the expectation.
+  static_assert(false, "no viable overloaded '+='");
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
+}
+
+#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_MINUS_EQUALS_UINT64) // [r"no viable overloaded '-='"]
+
+void WontCompile() {
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+  raw_ptr<int> ptr = new int(3);
+  ptr -= uint64_t{2};
+#else
+  // Fake error on 64-bit to match the expectation.
+  static_assert(false, "no viable overloaded '-='");
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
+}
+
+#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_PLUS_UINT64) // [r"no viable overloaded '\+='"]
+
+void WontCompile() {
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+  raw_ptr<int> ptr = new int(3);
+  // Note, operator + exists, but it calls += which doesn't.
+  [[maybe_unused]] raw_ptr<int> ptr2 = ptr + uint64_t{2};
+#else
+  // Fake error on 64-bit to match the expectation.
+  static_assert(false, "no viable overloaded '+='");
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
+}
+
+#elif defined(NCTEST_BINDING_RAW_PTR_DISALLOW_MINUS_UINT64) // [r"no viable overloaded '-='"]
+
+void WontCompile() {
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+  raw_ptr<int> ptr = new int(3);
+  // Note, operator - exists, but it calls -= which doesn't.
+  [[maybe_unused]] raw_ptr<int> ptr2 = ptr - uint64_t{2};
+#else
+  // Fake error on 64-bit to match the expectation.
+  static_assert(false, "no viable overloaded '-='");
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
+}
+
+#elif defined(NCTEST_CROSS_KIND_CONVERSION_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)0U == \(\(partition_alloc::internal::RawPtrTraits\)1U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr = new int(3);
+  [[maybe_unused]] raw_ptr<int> ptr2(ptr);
+}
+
+#elif defined(NCTEST_CROSS_KIND_CONVERSION_FROM_DUMMY) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)1U == \(\(partition_alloc::internal::RawPtrTraits\)2048U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  raw_ptr<int, base::RawPtrTraits::kDummyForTest> ptr = new int(3);
+  [[maybe_unused]] raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr2(ptr);
+}
+
+#elif defined(NCTEST_CROSS_KIND_MOVE_CONVERSION_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)0U == \(\(partition_alloc::internal::RawPtrTraits\)1U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr = new int(3);
+  [[maybe_unused]] raw_ptr<int> ptr2(std::move(ptr));
+}
+
+#elif defined(NCTEST_CROSS_KIND_MOVE_CONVERSION_FROM_DUMMY) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)1U == \(\(partition_alloc::internal::RawPtrTraits\)2048U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  raw_ptr<int, base::RawPtrTraits::kDummyForTest> ptr = new int(3);
+  [[maybe_unused]] raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr2(std::move(ptr));
+}
+
+#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)0U == \(\(partition_alloc::internal::RawPtrTraits\)1U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr = new int(3);
+  raw_ptr<int> ptr2;
+  ptr2 = ptr;
+}
+
+#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_FROM_DUMMY) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)1U == \(\(partition_alloc::internal::RawPtrTraits\)2048U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  raw_ptr<int, base::RawPtrTraits::kDummyForTest> ptr = new int(3);
+  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr2;
+  ptr2 = ptr;
+}
+
+#elif defined(NCTEST_CROSS_KIND_MOVE_ASSIGNMENT_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)0U == \(\(partition_alloc::internal::RawPtrTraits\)1U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr = new int(3);
+  raw_ptr<int> ptr2;
+  ptr2 = std::move(ptr);
+}
+
+#elif defined(NCTEST_CROSS_KIND_MOVE_ASSIGNMENT_FROM_DUMMY) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)1U == \(\(partition_alloc::internal::RawPtrTraits\)2048U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  raw_ptr<int, base::RawPtrTraits::kDummyForTest> ptr = new int(3);
+  raw_ptr<int, base::RawPtrTraits::kMayDangle> ptr2;
+  ptr2 = std::move(ptr);
+}
+
+// TODO(tsepez): enable once enable_pointer_arithmetic_trait_check=true.
+#elif defined(DISABLED_NCTEST_BAN_PTR_INCREMENT) // [r"static assertion failed due to requirement '.*IsPtrArithmeticAllowed.*'"]
+
+void WontCompile() {
+  raw_ptr<int> ptr = new int(3);
+  ptr++;
+}
+
+#elif defined(DISABLED_NCTEST_BAN_PTR_DECREMENT) // [r"static assertion failed due to requirement '.*IsPtrArithmeticAllowed.*'"]
+
+void WontCompile() {
+  raw_ptr<int> ptr = new int(3);
+  ptr--;
+}
+
+#elif defined(DISABLED_NCTEST_BAN_PTR_ADDITION) // [r"static assertion failed due to requirement '.*IsPtrArithmeticAllowed.*'"]
+
+void WontCompile() {
+  raw_ptr<int> ptr = new int(3);
+  raw_ptr<int> ptr2 = ptr + 1;
+}
+
+#elif defined(DISABLED_NCTEST_BAN_PTR_SUBTRACTION) // [r"static assertion failed due to requirement '.*IsPtrArithmeticAllowed.*'"]
+
+void WontCompile() {
+  raw_ptr<int> ptr = new int(3);
+  raw_ptr<int> ptr2 = ptr - 1;
+}
+
+#elif defined(DISABLED_NCTEST_BAN_PTR_INDEX) // [r"static assertion failed due to requirement '.*IsPtrArithmeticAllowed.*'"]
+
+int WontCompile() {
+  raw_ptr<int> ptr = new int(3);
+  return ptr[1];
+}
+
+#endif
+
+}  // namespace
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref.h b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref.h
new file mode 100644
index 0000000..7a548fc
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref.h
@@ -0,0 +1,444 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_REF_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_REF_H_
+
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/augmentations/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr.h"
+
+namespace base {
+
+template <class T, RawPtrTraits Traits>
+class raw_ref;
+
+namespace internal {
+
+template <class T>
+struct is_raw_ref : std::false_type {};
+
+template <class T, RawPtrTraits Traits>
+struct is_raw_ref<::base::raw_ref<T, Traits>> : std::true_type {};
+
+template <class T>
+constexpr inline bool is_raw_ref_v = is_raw_ref<T>::value;
+
+}  // namespace internal
+
+// A smart pointer for a pointer which can not be null, and which provides
+// Use-after-Free protection in the same ways as raw_ptr. This class acts like a
+// combination of std::reference_wrapper and raw_ptr.
+//
+// See raw_ptr and //base/memory/raw_ptr.md for more details on the
+// Use-after-Free protection.
+//
+// # Use after move
+//
+// The raw_ref type will abort if used after being moved.
+//
+// # Constness
+//
+// Use a `const raw_ref<T>` when the smart pointer should not be able to rebind
+// to a new reference. Use a `const raw_ref<const T>` do the same for a const
+// reference, which is like `const T&`.
+//
+// Unlike a native `T&` reference, a mutable `raw_ref<T>` can be changed
+// independent of the underlying `T`, similar to `std::reference_wrapper`. That
+// means the reference inside it can be moved and reassigned.
+template <class T, RawPtrTraits Traits = RawPtrTraits::kEmpty>
+class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
+  // operator* is used with the expectation of GetForExtraction semantics:
+  //
+  // raw_ref<Foo> foo_raw_ref = something;
+  // Foo& foo_ref = *foo_raw_ref;
+  //
+  // The implementation of operator* provides GetForDereference semantics, and
+  // this results in spurious crashes in BRP-ASan builds, so we need to disable
+  // hooks that provide BRP-ASan instrumentation for raw_ref.
+  using Inner = raw_ptr<T, Traits | RawPtrTraits::kDisableHooks>;
+
+  // Some underlying implementations do not clear on move, which produces an
+  // inconsistent behaviour. We want consistent behaviour such that using a
+  // raw_ref after move is caught and aborts, so do it when the underlying
+  // implementation doesn't. Failure to clear would be indicated by the related
+  // death tests not CHECKing appropriately.
+  static constexpr bool kNeedClearAfterMove = !Inner::kZeroOnMove;
+
+ public:
+  using Impl = typename Inner::Impl;
+
+  // Construct a raw_ref from a pointer, which must not be null.
+  //
+  // This function is safe to use with any pointer, as it will CHECK and
+  // terminate the process if the pointer is null. Avoid dereferencing a pointer
+  // to avoid this CHECK as you may be dereferencing null.
+  PA_ALWAYS_INLINE constexpr static raw_ref from_ptr(T* ptr) noexcept {
+    PA_RAW_PTR_CHECK(ptr);
+    return raw_ref(*ptr);
+  }
+
+  // Construct a raw_ref from a reference.
+  PA_ALWAYS_INLINE constexpr explicit raw_ref(T& p) noexcept
+      : inner_(std::addressof(p)) {}
+
+  // Assign a new reference to the raw_ref, replacing the existing reference.
+  PA_ALWAYS_INLINE constexpr raw_ref& operator=(T& p) noexcept {
+    inner_.operator=(&p);
+    return *this;
+  }
+
+  // Disallow holding references to temporaries.
+  raw_ref(const T&& p) = delete;
+  raw_ref& operator=(const T&& p) = delete;
+
+  PA_ALWAYS_INLINE constexpr raw_ref(const raw_ref& p) noexcept
+      : inner_(p.inner_) {
+    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
+  }
+
+  PA_ALWAYS_INLINE constexpr raw_ref(raw_ref&& p) noexcept
+      : inner_(std::move(p.inner_)) {
+    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
+    if constexpr (kNeedClearAfterMove) {
+      p.inner_ = nullptr;
+    }
+  }
+
+  PA_ALWAYS_INLINE constexpr raw_ref& operator=(const raw_ref& p) noexcept {
+    PA_RAW_PTR_CHECK(p.inner_);  // Catch use-after-move.
+    inner_.operator=(p.inner_);
+    return *this;
+  }
+
+  PA_ALWAYS_INLINE constexpr raw_ref& operator=(raw_ref&& p) noexcept {
+    PA_RAW_PTR_CHECK(p.inner_);  // Catch use-after-move.
+    inner_.operator=(std::move(p.inner_));
+    if constexpr (kNeedClearAfterMove) {
+      p.inner_ = nullptr;
+    }
+    return *this;
+  }
+
+  // Deliberately implicit in order to support implicit upcast.
+  // Delegate cross-kind conversion to the inner raw_ptr, which decides when to
+  // allow it.
+  template <class U,
+            RawPtrTraits PassedTraits,
+            class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  PA_ALWAYS_INLINE constexpr raw_ref(const raw_ref<U, PassedTraits>& p) noexcept
+      : inner_(p.inner_) {
+    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
+  }
+  // Deliberately implicit in order to support implicit upcast.
+  // Delegate cross-kind conversion to the inner raw_ptr, which decides when to
+  // allow it.
+  template <class U,
+            RawPtrTraits PassedTraits,
+            class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  PA_ALWAYS_INLINE constexpr raw_ref(raw_ref<U, PassedTraits>&& p) noexcept
+      : inner_(std::move(p.inner_)) {
+    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
+    if constexpr (kNeedClearAfterMove) {
+      p.inner_ = nullptr;
+    }
+  }
+
+  // Upcast assignment
+  // Delegate cross-kind conversion to the inner raw_ptr, which decides when to
+  // allow it.
+  template <class U,
+            RawPtrTraits PassedTraits,
+            class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
+  PA_ALWAYS_INLINE constexpr raw_ref& operator=(
+      const raw_ref<U, PassedTraits>& p) noexcept {
+    PA_RAW_PTR_CHECK(p.inner_);  // Catch use-after-move.
+    inner_.operator=(p.inner_);
+    return *this;
+  }
+  // Delegate cross-kind conversion to the inner raw_ptr, which decides when to
+  // allow it.
+  template <class U,
+            RawPtrTraits PassedTraits,
+            class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
+  PA_ALWAYS_INLINE constexpr raw_ref& operator=(
+      raw_ref<U, PassedTraits>&& p) noexcept {
+    PA_RAW_PTR_CHECK(p.inner_);  // Catch use-after-move.
+    inner_.operator=(std::move(p.inner_));
+    if constexpr (kNeedClearAfterMove) {
+      p.inner_ = nullptr;
+    }
+    return *this;
+  }
+
+  PA_ALWAYS_INLINE constexpr T& operator*() const {
+    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
+    return inner_.operator*();
+  }
+
+  // This is an equivalent to operator*() that provides GetForExtraction rather
+  // rather than GetForDereference semantics (see raw_ptr.h). This should be
+  // used in place of operator*() when the memory referred to by the reference
+  // is not immediately going to be accessed.
+  PA_ALWAYS_INLINE constexpr T& get() const {
+    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
+    return *inner_.get();
+  }
+
+  PA_ALWAYS_INLINE constexpr T* operator->() const
+      PA_ATTRIBUTE_RETURNS_NONNULL {
+    PA_RAW_PTR_CHECK(inner_);  // Catch use-after-move.
+    return inner_.operator->();
+  }
+
+  // This is used to verify callbacks are not invoked with dangling references.
+  // If the `raw_ref` references a deleted object, it will trigger an error.
+  // Depending on the PartitionAllocUnretainedDanglingPtr feature, this is
+  // either a DumpWithoutCrashing, a crash, or ignored.
+  PA_ALWAYS_INLINE void ReportIfDangling() const noexcept {
+    inner_.ReportIfDangling();
+  }
+
+  PA_ALWAYS_INLINE friend constexpr void swap(raw_ref& lhs,
+                                              raw_ref& rhs) noexcept {
+    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+    swap(lhs.inner_, rhs.inner_);
+  }
+
+  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+  friend bool operator==(const raw_ref<U, Traits1>& lhs,
+                         const raw_ref<V, Traits2>& rhs);
+  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+  friend bool operator!=(const raw_ref<U, Traits1>& lhs,
+                         const raw_ref<V, Traits2>& rhs);
+  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+  friend bool operator<(const raw_ref<U, Traits1>& lhs,
+                        const raw_ref<V, Traits2>& rhs);
+  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+  friend bool operator>(const raw_ref<U, Traits1>& lhs,
+                        const raw_ref<V, Traits2>& rhs);
+  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+  friend bool operator<=(const raw_ref<U, Traits1>& lhs,
+                         const raw_ref<V, Traits2>& rhs);
+  template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+  friend bool operator>=(const raw_ref<U, Traits1>& lhs,
+                         const raw_ref<V, Traits2>& rhs);
+
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator==(const raw_ref& lhs, const U& rhs) {
+    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+    return lhs.inner_ == &rhs;
+  }
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator!=(const raw_ref& lhs, const U& rhs) {
+    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+    return lhs.inner_ != &rhs;
+  }
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator<(const raw_ref& lhs, const U& rhs) {
+    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+    return lhs.inner_ < &rhs;
+  }
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator>(const raw_ref& lhs, const U& rhs) {
+    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+    return lhs.inner_ > &rhs;
+  }
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator<=(const raw_ref& lhs, const U& rhs) {
+    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+    return lhs.inner_ <= &rhs;
+  }
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator>=(const raw_ref& lhs, const U& rhs) {
+    PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+    return lhs.inner_ >= &rhs;
+  }
+
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator==(const U& lhs, const raw_ref& rhs) {
+    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+    return &lhs == rhs.inner_;
+  }
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator!=(const U& lhs, const raw_ref& rhs) {
+    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+    return &lhs != rhs.inner_;
+  }
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator<(const U& lhs, const raw_ref& rhs) {
+    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+    return &lhs < rhs.inner_;
+  }
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator>(const U& lhs, const raw_ref& rhs) {
+    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+    return &lhs > rhs.inner_;
+  }
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator<=(const U& lhs, const raw_ref& rhs) {
+    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+    return &lhs <= rhs.inner_;
+  }
+  template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
+  PA_ALWAYS_INLINE friend bool operator>=(const U& lhs, const raw_ref& rhs) {
+    PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+    return &lhs >= rhs.inner_;
+  }
+
+ private:
+  template <class U, RawPtrTraits R>
+  friend class raw_ref;
+
+  Inner inner_;
+};
+
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator==(const raw_ref<U, Traits1>& lhs,
+                                 const raw_ref<V, Traits2>& rhs) {
+  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+  return lhs.inner_ == rhs.inner_;
+}
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator!=(const raw_ref<U, Traits1>& lhs,
+                                 const raw_ref<V, Traits2>& rhs) {
+  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+  return lhs.inner_ != rhs.inner_;
+}
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator<(const raw_ref<U, Traits1>& lhs,
+                                const raw_ref<V, Traits2>& rhs) {
+  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+  return lhs.inner_ < rhs.inner_;
+}
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator>(const raw_ref<U, Traits1>& lhs,
+                                const raw_ref<V, Traits2>& rhs) {
+  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+  return lhs.inner_ > rhs.inner_;
+}
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator<=(const raw_ref<U, Traits1>& lhs,
+                                 const raw_ref<V, Traits2>& rhs) {
+  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+  return lhs.inner_ <= rhs.inner_;
+}
+template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
+PA_ALWAYS_INLINE bool operator>=(const raw_ref<U, Traits1>& lhs,
+                                 const raw_ref<V, Traits2>& rhs) {
+  PA_RAW_PTR_CHECK(lhs.inner_);  // Catch use-after-move.
+  PA_RAW_PTR_CHECK(rhs.inner_);  // Catch use-after-move.
+  return lhs.inner_ >= rhs.inner_;
+}
+
+// CTAD deduction guide.
+template <class T>
+raw_ref(T&) -> raw_ref<T>;
+template <class T>
+raw_ref(const T&) -> raw_ref<const T>;
+
+// Template helpers for working with raw_ref<T>.
+template <typename T>
+struct IsRawRef : std::false_type {};
+
+template <typename T, RawPtrTraits Traits>
+struct IsRawRef<raw_ref<T, Traits>> : std::true_type {};
+
+template <typename T>
+inline constexpr bool IsRawRefV = IsRawRef<T>::value;
+
+template <typename T>
+struct RemoveRawRef {
+  using type = T;
+};
+
+template <typename T, RawPtrTraits Traits>
+struct RemoveRawRef<raw_ref<T, Traits>> {
+  using type = T;
+};
+
+template <typename T>
+using RemoveRawRefT = typename RemoveRawRef<T>::type;
+
+}  // namespace base
+
+using base::raw_ref;
+
+template <base::RawPtrTraits Traits = base::RawPtrTraits::kEmpty, typename T>
+auto ToRawRef(T& ref) {
+  return raw_ref<T, Traits>(ref);
+}
+
+namespace std {
+
+// Override so set/map lookups do not create extra raw_ref. This also
+// allows C++ references to be used for lookup.
+template <typename T, base::RawPtrTraits Traits>
+struct less<raw_ref<T, Traits>> {
+  using Impl = typename raw_ref<T, Traits>::Impl;
+  using is_transparent = void;
+
+  bool operator()(const raw_ref<T, Traits>& lhs,
+                  const raw_ref<T, Traits>& rhs) const {
+    Impl::IncrementLessCountForTest();
+    return lhs < rhs;
+  }
+
+  bool operator()(T& lhs, const raw_ref<T, Traits>& rhs) const {
+    Impl::IncrementLessCountForTest();
+    return lhs < rhs;
+  }
+
+  bool operator()(const raw_ref<T, Traits>& lhs, T& rhs) const {
+    Impl::IncrementLessCountForTest();
+    return lhs < rhs;
+  }
+};
+
+// Specialize std::pointer_traits. The latter is required to obtain the
+// underlying raw pointer in the std::to_address(pointer) overload.
+// Implementing the pointer_traits is the standard blessed way to customize
+// `std::to_address(pointer)` in C++20 [3].
+//
+// [1] https://wg21.link/pointer.traits.optmem
+
+template <typename T, ::base::RawPtrTraits Traits>
+struct pointer_traits<::raw_ref<T, Traits>> {
+  using pointer = ::raw_ref<T, Traits>;
+  using element_type = T;
+  using difference_type = ptrdiff_t;
+
+  template <typename U>
+  using rebind = ::raw_ref<U, Traits>;
+
+  static constexpr pointer pointer_to(element_type& r) noexcept {
+    return pointer(r);
+  }
+
+  static constexpr element_type* to_address(pointer p) noexcept {
+    // `raw_ref::get` is used instead of raw_ref::operator*`. It provides
+    // GetForExtraction rather rather than GetForDereference semantics (see
+    // raw_ptr.h). This should be used when we we don't know the memory will be
+    // accessed.
+    return &(p.get());
+  }
+};
+
+}  // namespace std
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POINTERS_RAW_REF_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref_unittest.cc
new file mode 100644
index 0000000..5249ea1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref_unittest.cc
@@ -0,0 +1,991 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref.h"
+
+#include <functional>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_counting_impl_for_test.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_test_support.h"
+#include "base/memory/raw_ptr.h"
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+#include "base/debug/asan_service.h"
+#include "base/memory/raw_ptr_asan_service.h"
+#endif  // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+
+namespace {
+
+class BaseClass {};
+class SubClass : public BaseClass {};
+
+// raw_ref just defers to the superclass for implementations, so it
+// can't add more data types.
+static_assert(sizeof(raw_ref<int>) == sizeof(raw_ptr<int>));
+
+// Since it can't hold null, raw_ref is not default-constructible.
+static_assert(!std::is_default_constructible_v<raw_ref<int>>);
+static_assert(!std::is_default_constructible_v<raw_ref<const int>>);
+
+// A mutable reference can only be constructed from a mutable lvalue reference.
+static_assert(!std::is_constructible_v<raw_ref<int>, const int>);
+static_assert(!std::is_constructible_v<raw_ref<int>, int>);
+static_assert(!std::is_constructible_v<raw_ref<int>, const int&>);
+static_assert(std::is_constructible_v<raw_ref<int>, int&>);
+static_assert(!std::is_constructible_v<raw_ref<int>, const int*>);
+static_assert(!std::is_constructible_v<raw_ref<int>, int*>);
+static_assert(!std::is_constructible_v<raw_ref<int>, const int&&>);
+static_assert(!std::is_constructible_v<raw_ref<int>, int&&>);
+// Same for assignment.
+static_assert(!std::is_assignable_v<raw_ref<int>, const int>);
+static_assert(!std::is_assignable_v<raw_ref<int>, int>);
+static_assert(!std::is_assignable_v<raw_ref<int>, const int&>);
+static_assert(std::is_assignable_v<raw_ref<int>, int&>);
+static_assert(!std::is_assignable_v<raw_ref<int>, const int*>);
+static_assert(!std::is_assignable_v<raw_ref<int>, int*>);
+static_assert(!std::is_assignable_v<raw_ref<int>, const int&&>);
+static_assert(!std::is_assignable_v<raw_ref<int>, int&&>);
+
+// A const reference can be constructed from a const or mutable lvalue
+// reference.
+static_assert(!std::is_constructible_v<raw_ref<const int>, const int>);
+static_assert(!std::is_constructible_v<raw_ref<const int>, int>);
+static_assert(std::is_constructible_v<raw_ref<const int>, const int&>);
+static_assert(std::is_constructible_v<raw_ref<const int>, int&>);
+static_assert(!std::is_constructible_v<raw_ref<const int>, const int*>);
+static_assert(!std::is_constructible_v<raw_ref<const int>, int*>);
+static_assert(!std::is_constructible_v<raw_ref<const int>, const int&&>);
+static_assert(!std::is_constructible_v<raw_ref<const int>, int&&>);
+// Same for assignment.
+static_assert(!std::is_assignable_v<raw_ref<const int>, const int>);
+static_assert(!std::is_assignable_v<raw_ref<const int>, int>);
+static_assert(std::is_assignable_v<raw_ref<const int>, const int&>);
+static_assert(std::is_assignable_v<raw_ref<const int>, int&>);
+static_assert(!std::is_assignable_v<raw_ref<const int>, const int*>);
+static_assert(!std::is_assignable_v<raw_ref<const int>, int*>);
+static_assert(!std::is_assignable_v<raw_ref<const int>, const int&&>);
+static_assert(!std::is_assignable_v<raw_ref<const int>, int&&>);
+
+// Same trivial operations (or not) as raw_ptr<T>.
+static_assert(std::is_trivially_constructible_v<raw_ref<int>, const int&> ==
+              std::is_trivially_constructible_v<raw_ptr<int>, const int&>);
+static_assert(std::is_trivially_destructible_v<raw_ref<int>> ==
+              std::is_trivially_destructible_v<raw_ptr<int>>);
+// But constructing from another raw_ref must check if it's internally null
+// (which indicates use-after-move).
+static_assert(!std::is_trivially_move_constructible_v<raw_ref<int>>);
+static_assert(!std::is_trivially_move_assignable_v<raw_ref<int>>);
+static_assert(!std::is_trivially_copy_constructible_v<raw_ref<int>>);
+static_assert(!std::is_trivially_copy_assignable_v<raw_ref<int>>);
+
+// A raw_ref can be copied or moved.
+static_assert(std::is_move_constructible_v<raw_ref<int>>);
+static_assert(std::is_copy_constructible_v<raw_ref<int>>);
+static_assert(std::is_move_assignable_v<raw_ref<int>>);
+static_assert(std::is_copy_assignable_v<raw_ref<int>>);
+
+// A SubClass can be converted to a BaseClass.
+static_assert(std::is_constructible_v<raw_ref<BaseClass>, raw_ref<SubClass>>);
+static_assert(
+    std::is_constructible_v<raw_ref<BaseClass>, const raw_ref<SubClass>&>);
+static_assert(std::is_constructible_v<raw_ref<BaseClass>, raw_ref<SubClass>&&>);
+static_assert(std::is_assignable_v<raw_ref<BaseClass>, raw_ref<SubClass>>);
+static_assert(
+    std::is_assignable_v<raw_ref<BaseClass>, const raw_ref<SubClass>&>);
+static_assert(std::is_assignable_v<raw_ref<BaseClass>, raw_ref<SubClass>&&>);
+// A BaseClass can't be implicitly downcasted.
+static_assert(!std::is_constructible_v<raw_ref<SubClass>, raw_ref<BaseClass>>);
+static_assert(
+    !std::is_constructible_v<raw_ref<SubClass>, const raw_ref<BaseClass>&>);
+static_assert(
+    !std::is_constructible_v<raw_ref<SubClass>, raw_ref<BaseClass>&&>);
+static_assert(!std::is_assignable_v<raw_ref<SubClass>, raw_ref<BaseClass>>);
+static_assert(
+    !std::is_assignable_v<raw_ref<SubClass>, const raw_ref<BaseClass>&>);
+static_assert(!std::is_assignable_v<raw_ref<SubClass>, raw_ref<BaseClass>&&>);
+
+// A raw_ref<BaseClass> can be constructed directly from a SubClass.
+static_assert(std::is_constructible_v<raw_ref<BaseClass>, SubClass&>);
+static_assert(std::is_assignable_v<raw_ref<BaseClass>, SubClass&>);
+static_assert(std::is_constructible_v<raw_ref<const BaseClass>, SubClass&>);
+static_assert(std::is_assignable_v<raw_ref<const BaseClass>, SubClass&>);
+static_assert(
+    std::is_constructible_v<raw_ref<const BaseClass>, const SubClass&>);
+static_assert(std::is_assignable_v<raw_ref<const BaseClass>, const SubClass&>);
+// But a raw_ref<SubClass> can't be constructed from an implicit downcast from a
+// BaseClass.
+static_assert(!std::is_constructible_v<raw_ref<SubClass>, BaseClass&>);
+static_assert(!std::is_assignable_v<raw_ref<SubClass>, BaseClass&>);
+static_assert(!std::is_constructible_v<raw_ref<const SubClass>, BaseClass&>);
+static_assert(!std::is_assignable_v<raw_ref<const SubClass>, BaseClass&>);
+static_assert(
+    !std::is_constructible_v<raw_ref<const SubClass>, const BaseClass&>);
+static_assert(!std::is_assignable_v<raw_ref<const SubClass>, const BaseClass&>);
+
+// A mutable reference can be converted to const reference.
+static_assert(std::is_constructible_v<raw_ref<const int>, raw_ref<int>>);
+static_assert(std::is_assignable_v<raw_ref<const int>, raw_ref<int>>);
+// A const reference can't be converted to mutable.
+static_assert(!std::is_constructible_v<raw_ref<int>, raw_ref<const int>>);
+static_assert(!std::is_assignable_v<raw_ref<int>, raw_ref<const int>>);
+
+// The deref operator gives the internal reference.
+static_assert(std::is_same_v<int&, decltype(*std::declval<raw_ref<int>>())>);
+static_assert(
+    std::is_same_v<int&, decltype(*std::declval<const raw_ref<int>>())>);
+static_assert(std::is_same_v<int&, decltype(*std::declval<raw_ref<int>&>())>);
+static_assert(
+    std::is_same_v<int&, decltype(*std::declval<const raw_ref<int>&>())>);
+static_assert(std::is_same_v<int&, decltype(*std::declval<raw_ref<int>&&>())>);
+static_assert(
+    std::is_same_v<int&, decltype(*std::declval<const raw_ref<int>&&>())>);
+// A const T is always returned as const.
+static_assert(
+    std::is_same_v<const int&, decltype(*std::declval<raw_ref<const int>>())>);
+
+// The arrow operator gives a (non-null) pointer to the internal reference.
+static_assert(
+    std::is_same_v<int*, decltype(std::declval<raw_ref<int>>().operator->())>);
+static_assert(
+    std::is_same_v<const int*,
+                   decltype(std::declval<raw_ref<const int>>().operator->())>);
+
+// Verify that raw_ref is a literal type, and its entire interface is constexpr.
+//
+// Constexpr destructors were introduced in C++20. PartitionAlloc's minimum
+// supported C++ version is C++17, so raw_ref is not a literal type in C++17.
+// Thus we only test for constexpr in C++20.
+#if defined(__cpp_constexpr) && __cpp_constexpr >= 201907L
+static_assert([]() constexpr {
+  struct IntBase {};
+  struct Int : public IntBase {
+    int i = 0;
+  };
+
+  Int* i = new Int();
+  {
+    raw_ref<Int> r(*i);              // raw_ref(T&)
+    r = *i;                          // operator=(T&)
+    raw_ref<Int> r2(r);              // raw_ref(const raw_ref&)
+    raw_ref<Int> r3(std::move(r2));  // raw_ref(raw_ref&&)
+    r2 = r;                          // operator=(const raw_ref&)
+    r3 = std::move(r2);              // operator=(raw_ref&&)
+    r2 = r;                          // Reset after move.
+    [[maybe_unused]] raw_ref<IntBase> r5(
+        r2);  // raw_ref(const raw_ref<Convertible>&)
+    [[maybe_unused]] raw_ref<IntBase> r6(
+        std::move(r2));         // raw_ref(raw_ref<Convertible>&&)
+    r2 = r;                     // Reset after move.
+    r5 = r2;                    // operator=(const raw_ref<Convertible>&)
+    r6 = std::move(r2);         // operator=(raw_ref<Convertible>&&)
+    raw_ref<Int>::from_ptr(i);  // from_ptr(T*)
+    (*r).i += 1;                // operator*()
+    r.get().i += 1;             // get()
+    r->i += 1;                  // operator->()
+    r2 = r;                     // Reset after move.
+    swap(r, r2);                // swap()
+  }
+  delete i;
+  return true;
+}());
+#endif
+
+TEST(RawRef, Construct) {
+  int i = 1;
+  auto r = raw_ref<int>(i);
+  EXPECT_EQ(&*r, &i);
+  auto cr = raw_ref<const int>(i);
+  EXPECT_EQ(&*cr, &i);
+  const int ci = 1;
+  auto cci = raw_ref<const int>(ci);
+  EXPECT_EQ(&*cci, &ci);
+}
+
+TEST(RawRef, CopyConstruct) {
+  {
+    int i = 1;
+    auto r = raw_ref<int>(i);
+    EXPECT_EQ(&*r, &i);
+    auto r2 = raw_ref<int>(r);
+    EXPECT_EQ(&*r2, &i);
+  }
+  {
+    int i = 1;
+    auto r = raw_ref<const int>(i);
+    EXPECT_EQ(&*r, &i);
+    auto r2 = raw_ref<const int>(r);
+    EXPECT_EQ(&*r2, &i);
+  }
+}
+
+TEST(RawRef, MoveConstruct) {
+  {
+    int i = 1;
+    auto r = raw_ref<int>(i);
+    EXPECT_EQ(&*r, &i);
+    auto r2 = raw_ref<int>(std::move(r));
+    EXPECT_EQ(&*r2, &i);
+  }
+  {
+    int i = 1;
+    auto r = raw_ref<const int>(i);
+    EXPECT_EQ(&*r, &i);
+    auto r2 = raw_ref<const int>(std::move(r));
+    EXPECT_EQ(&*r2, &i);
+  }
+}
+
+TEST(RawRef, CopyAssign) {
+  {
+    int i = 1;
+    int j = 2;
+    auto r = raw_ref<int>(i);
+    EXPECT_EQ(&*r, &i);
+    auto rj = raw_ref<int>(j);
+    r = rj;
+    EXPECT_EQ(&*r, &j);
+  }
+  {
+    int i = 1;
+    int j = 2;
+    auto r = raw_ref<const int>(i);
+    EXPECT_EQ(&*r, &i);
+    auto rj = raw_ref<const int>(j);
+    r = rj;
+    EXPECT_EQ(&*r, &j);
+  }
+  {
+    int i = 1;
+    int j = 2;
+    auto r = raw_ref<const int>(i);
+    EXPECT_EQ(&*r, &i);
+    auto rj = raw_ref<int>(j);
+    r = rj;
+    EXPECT_EQ(&*r, &j);
+  }
+}
+
+TEST(RawRef, CopyReassignAfterMove) {
+  int i = 1;
+  int j = 1;
+  auto r = raw_ref<int>(i);
+  auto r2 = std::move(r);
+  r2 = raw_ref<int>(j);
+  // Reassign to the moved-from `r` so it can be used again.
+  r = r2;
+  EXPECT_EQ(&*r, &j);
+}
+
+TEST(RawRef, MoveAssign) {
+  {
+    int i = 1;
+    int j = 2;
+    auto r = raw_ref<int>(i);
+    EXPECT_EQ(&*r, &i);
+    r = raw_ref<int>(j);
+    EXPECT_EQ(&*r, &j);
+  }
+  {
+    int i = 1;
+    int j = 2;
+    auto r = raw_ref<const int>(i);
+    EXPECT_EQ(&*r, &i);
+    r = raw_ref<const int>(j);
+    EXPECT_EQ(&*r, &j);
+  }
+  {
+    int i = 1;
+    int j = 2;
+    auto r = raw_ref<const int>(i);
+    EXPECT_EQ(&*r, &i);
+    r = raw_ref<int>(j);
+    EXPECT_EQ(&*r, &j);
+  }
+}
+
+TEST(RawRef, MoveReassignAfterMove) {
+  int i = 1;
+  int j = 1;
+  auto r = raw_ref<int>(i);
+  auto r2 = std::move(r);
+  // Reassign to the moved-from `r` so it can be used again.
+  r = raw_ref<int>(j);
+  EXPECT_EQ(&*r, &j);
+}
+
+TEST(RawRef, CopyConstructUpCast) {
+  {
+    auto s = SubClass();
+    auto r = raw_ref<SubClass>(s);
+    EXPECT_EQ(&*r, &s);
+    auto r2 = raw_ref<BaseClass>(r);
+    EXPECT_EQ(&*r2, &s);
+  }
+  {
+    auto s = SubClass();
+    auto r = raw_ref<const SubClass>(s);
+    EXPECT_EQ(&*r, &s);
+    auto r2 = raw_ref<const BaseClass>(r);
+    EXPECT_EQ(&*r2, &s);
+  }
+}
+
+TEST(RawRef, MoveConstructUpCast) {
+  {
+    auto s = SubClass();
+    auto r = raw_ref<SubClass>(s);
+    EXPECT_EQ(&*r, &s);
+    auto r2 = raw_ref<BaseClass>(std::move(r));
+    EXPECT_EQ(&*r2, &s);
+  }
+  {
+    auto s = SubClass();
+    auto r = raw_ref<const SubClass>(s);
+    EXPECT_EQ(&*r, &s);
+    auto r2 = raw_ref<const BaseClass>(std::move(r));
+    EXPECT_EQ(&*r2, &s);
+  }
+}
+
+TEST(RawRef, FromPtr) {
+  int i = 42;
+  auto ref = raw_ref<int>::from_ptr(&i);
+  EXPECT_EQ(&i, &*ref);
+}
+
+TEST(RawRef, CopyAssignUpCast) {
+  {
+    auto s = SubClass();
+    auto r = raw_ref<SubClass>(s);
+    auto t = BaseClass();
+    auto rt = raw_ref<BaseClass>(t);
+    rt = r;
+    EXPECT_EQ(&*rt, &s);
+  }
+  {
+    auto s = SubClass();
+    auto r = raw_ref<const SubClass>(s);
+    auto t = BaseClass();
+    auto rt = raw_ref<const BaseClass>(t);
+    rt = r;
+    EXPECT_EQ(&*rt, &s);
+  }
+  {
+    auto s = SubClass();
+    auto r = raw_ref<SubClass>(s);
+    auto t = BaseClass();
+    auto rt = raw_ref<const BaseClass>(t);
+    rt = r;
+    EXPECT_EQ(&*rt, &s);
+  }
+}
+
+TEST(RawRef, MoveAssignUpCast) {
+  {
+    auto s = SubClass();
+    auto r = raw_ref<SubClass>(s);
+    auto t = BaseClass();
+    auto rt = raw_ref<BaseClass>(t);
+    rt = std::move(r);
+    EXPECT_EQ(&*rt, &s);
+  }
+  {
+    auto s = SubClass();
+    auto r = raw_ref<const SubClass>(s);
+    auto t = BaseClass();
+    auto rt = raw_ref<const BaseClass>(t);
+    rt = std::move(r);
+    EXPECT_EQ(&*rt, &s);
+  }
+  {
+    auto s = SubClass();
+    auto r = raw_ref<SubClass>(s);
+    auto t = BaseClass();
+    auto rt = raw_ref<const BaseClass>(t);
+    rt = std::move(r);
+    EXPECT_EQ(&*rt, &s);
+  }
+}
+
+TEST(RawRef, Deref) {
+  int i;
+  auto r = raw_ref<int>(i);
+  EXPECT_EQ(&*r, &i);
+}
+
+TEST(RawRef, Arrow) {
+  int i;
+  auto r = raw_ref<int>(i);
+  EXPECT_EQ(r.operator->(), &i);
+}
+
+TEST(RawRef, Swap) {
+  int i;
+  int j;
+  auto ri = raw_ref<int>(i);
+  auto rj = raw_ref<int>(j);
+  swap(ri, rj);
+  EXPECT_EQ(&*ri, &j);
+  EXPECT_EQ(&*rj, &i);
+}
+
+TEST(RawRef, Equals) {
+  int i = 1;
+  auto r1 = raw_ref<int>(i);
+  auto r2 = raw_ref<int>(i);
+  EXPECT_TRUE(r1 == r1);
+  EXPECT_TRUE(r1 == r2);
+  EXPECT_TRUE(r1 == i);
+  EXPECT_TRUE(i == r1);
+  int j = 1;
+  auto r3 = raw_ref<int>(j);
+  EXPECT_FALSE(r1 == r3);
+  EXPECT_FALSE(r1 == j);
+  EXPECT_FALSE(j == r1);
+}
+
+TEST(RawRef, NotEquals) {
+  int i = 1;
+  auto r1 = raw_ref<int>(i);
+  int j = 1;
+  auto r2 = raw_ref<int>(j);
+  EXPECT_TRUE(r1 != r2);
+  EXPECT_TRUE(r1 != j);
+  EXPECT_TRUE(j != r1);
+  EXPECT_FALSE(r1 != r1);
+  EXPECT_FALSE(r2 != j);
+  EXPECT_FALSE(j != r2);
+}
+
+TEST(RawRef, LessThan) {
+  int i[] = {1, 1};
+  auto r1 = raw_ref<int>(i[0]);
+  auto r2 = raw_ref<int>(i[1]);
+  EXPECT_TRUE(r1 < r2);
+  EXPECT_TRUE(r1 < i[1]);
+  EXPECT_FALSE(i[1] < r1);
+  EXPECT_FALSE(r2 < r1);
+  EXPECT_FALSE(r2 < i[0]);
+  EXPECT_TRUE(i[0] < r2);
+  EXPECT_FALSE(r1 < r1);
+  EXPECT_FALSE(r1 < i[0]);
+  EXPECT_FALSE(i[0] < r1);
+}
+
+TEST(RawRef, GreaterThan) {
+  int i[] = {1, 1};
+  auto r1 = raw_ref<int>(i[0]);
+  auto r2 = raw_ref<int>(i[1]);
+  EXPECT_TRUE(r2 > r1);
+  EXPECT_FALSE(r1 > r2);
+  EXPECT_FALSE(r1 > i[1]);
+  EXPECT_TRUE(i[1] > r1);
+  EXPECT_FALSE(r2 > r2);
+  EXPECT_FALSE(r2 > i[1]);
+  EXPECT_FALSE(i[1] > r2);
+}
+
+TEST(RawRef, LessThanOrEqual) {
+  int i[] = {1, 1};
+  auto r1 = raw_ref<int>(i[0]);
+  auto r2 = raw_ref<int>(i[1]);
+  EXPECT_TRUE(r1 <= r2);
+  EXPECT_TRUE(r1 <= r1);
+  EXPECT_TRUE(r2 <= r2);
+  EXPECT_FALSE(r2 <= r1);
+  EXPECT_TRUE(r1 <= i[1]);
+  EXPECT_TRUE(r1 <= i[0]);
+  EXPECT_TRUE(r2 <= i[1]);
+  EXPECT_FALSE(r2 <= i[0]);
+  EXPECT_FALSE(i[1] <= r1);
+  EXPECT_TRUE(i[0] <= r1);
+  EXPECT_TRUE(i[1] <= r2);
+  EXPECT_TRUE(i[0] <= r2);
+}
+
+TEST(RawRef, GreaterThanOrEqual) {
+  int i[] = {1, 1};
+  auto r1 = raw_ref<int>(i[0]);
+  auto r2 = raw_ref<int>(i[1]);
+  EXPECT_TRUE(r2 >= r1);
+  EXPECT_TRUE(r1 >= r1);
+  EXPECT_TRUE(r2 >= r2);
+  EXPECT_FALSE(r1 >= r2);
+  EXPECT_TRUE(r2 >= i[0]);
+  EXPECT_TRUE(r1 >= i[0]);
+  EXPECT_TRUE(r2 >= i[1]);
+  EXPECT_FALSE(r1 >= i[1]);
+  EXPECT_FALSE(i[0] >= r2);
+  EXPECT_TRUE(i[0] >= r1);
+  EXPECT_TRUE(i[1] >= r2);
+  EXPECT_TRUE(i[1] >= r1);
+}
+
+// Death Tests: If we're only using the no-op version of `raw_ptr` and
+// have `!BUILDFLAG(PA_DCHECK_IS_ON)`, the `PA_RAW_PTR_CHECK()`s used in
+// `raw_ref` evaluate to nothing. Therefore, death tests relying on
+// these CHECKs firing are disabled in their absence.
+
+#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
+    BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) || BUILDFLAG(PA_DCHECK_IS_ON)
+
+TEST(RawRefDeathTest, CopyConstructAfterMove) {
+  int i = 1;
+  auto r = raw_ref<int>(i);
+  auto r2 = std::move(r);
+  EXPECT_CHECK_DEATH({ [[maybe_unused]] auto r3 = r; });
+}
+
+TEST(RawRefDeathTest, MoveConstructAfterMove) {
+  int i = 1;
+  auto r = raw_ref<int>(i);
+  auto r2 = std::move(r);
+  EXPECT_CHECK_DEATH({ [[maybe_unused]] auto r3 = std::move(r); });
+}
+
+TEST(RawRefDeathTest, CopyAssignAfterMove) {
+  int i = 1;
+  auto r = raw_ref<int>(i);
+  auto r2 = std::move(r);
+  EXPECT_CHECK_DEATH({ r2 = r; });
+}
+
+TEST(RawRefDeathTest, MoveAssignAfterMove) {
+  int i = 1;
+  auto r = raw_ref<int>(i);
+  auto r2 = std::move(r);
+  EXPECT_CHECK_DEATH({ r2 = std::move(r); });
+}
+
+TEST(RawRefDeathTest, CopyConstructAfterMoveUpCast) {
+  auto s = SubClass();
+  auto r = raw_ref<SubClass>(s);
+  auto moved = std::move(r);
+  EXPECT_CHECK_DEATH({ [[maybe_unused]] auto r2 = raw_ref<BaseClass>(r); });
+}
+
+TEST(RawRefDeathTest, MoveConstructAfterMoveUpCast) {
+  auto s = SubClass();
+  auto r = raw_ref<SubClass>(s);
+  auto moved = std::move(r);
+  EXPECT_CHECK_DEATH(
+      { [[maybe_unused]] auto r2 = raw_ref<BaseClass>(std::move(r)); });
+}
+
+TEST(RawRefDeathTest, FromPtrWithNullptr) {
+  EXPECT_CHECK_DEATH({ raw_ref<int>::from_ptr(nullptr); });
+}
+
+TEST(RawRefDeathTest, CopyAssignAfterMoveUpCast) {
+  auto s = SubClass();
+  auto r = raw_ref<const SubClass>(s);
+  auto t = BaseClass();
+  auto rt = raw_ref<const BaseClass>(t);
+  auto moved = std::move(r);
+  EXPECT_CHECK_DEATH({ rt = r; });
+}
+
+TEST(RawRefDeathTest, MoveAssignAfterMoveUpCast) {
+  auto s = SubClass();
+  auto r = raw_ref<const SubClass>(s);
+  auto t = BaseClass();
+  auto rt = raw_ref<const BaseClass>(t);
+  auto moved = std::move(r);
+  EXPECT_CHECK_DEATH({ rt = std::move(r); });
+}
+
+TEST(RawRefDeathTest, DerefAfterMove) {
+  int i;
+  auto r = raw_ref<int>(i);
+  auto moved = std::move(r);
+  EXPECT_CHECK_DEATH({ r.operator*(); });
+}
+
+TEST(RawRefDeathTest, ArrowAfterMove) {
+  int i;
+  auto r = raw_ref<int>(i);
+  auto moved = std::move(r);
+  EXPECT_CHECK_DEATH({ r.operator->(); });
+}
+
+TEST(RawRefDeathTest, SwapAfterMove) {
+  {
+    int i;
+    auto ri = raw_ref<int>(i);
+    int j;
+    auto rj = raw_ref<int>(j);
+
+    auto moved = std::move(ri);
+    EXPECT_CHECK_DEATH({ swap(ri, rj); });
+  }
+  {
+    int i;
+    auto ri = raw_ref<int>(i);
+    int j;
+    auto rj = raw_ref<int>(j);
+
+    auto moved = std::move(rj);
+    EXPECT_CHECK_DEATH({ swap(ri, rj); });
+  }
+}
+
+TEST(RawRefDeathTest, EqualsAfterMove) {
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 == r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r2);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 == r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 == r1; });
+  }
+}
+
+TEST(RawRefDeathTest, NotEqualsAfterMove) {
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 != r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r2);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 != r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 != r1; });
+  }
+}
+
+TEST(RawRefDeathTest, LessThanAfterMove) {
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 < r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r2);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 < r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 < r1; });
+  }
+}
+
+TEST(RawRefDeathTest, GreaterThanAfterMove) {
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 > r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r2);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 > r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 > r1; });
+  }
+}
+
+TEST(RawRefDeathTest, LessThanOrEqualAfterMove) {
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 <= r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r2);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 <= r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 <= r1; });
+  }
+}
+
+TEST(RawRefDeathTest, GreaterThanOrEqualAfterMove) {
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 >= r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto r2 = raw_ref<int>(i);
+    auto moved = std::move(r2);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 >= r2; });
+  }
+  {
+    int i = 1;
+    auto r1 = raw_ref<int>(i);
+    auto moved = std::move(r1);
+    EXPECT_CHECK_DEATH({ [[maybe_unused]] bool b = r1 >= r1; });
+  }
+}
+
+#endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
+        // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) ||
+        // BUILDFLAG(PA_DCHECK_IS_ON)
+
+TEST(RawRef, CTAD) {
+  int i = 1;
+  auto r = raw_ref(i);
+  EXPECT_EQ(&*r, &i);
+}
+
+TEST(RawRefPtr, CTADWithConst) {
+  std::string str;
+  struct S {
+    const raw_ref<const std::string> r;
+  };
+  // Deduces as `raw_ref<std::string>`, for which the constructor call is valid
+  // making a mutable reference, and then converts to
+  // `raw_ref<const std::string>`.
+  S s1 = {.r = raw_ref(str)};
+  // Deduces as raw_ref<const std::string>, for which the constructor call is
+  // valid from a const ref.
+  S s2 = {.r = raw_ref(static_cast<const std::string&>(str))};
+  EXPECT_EQ(&*s1.r, &str);
+  EXPECT_EQ(&*s2.r, &str);
+}
+
+// Shorter name for expected test impl.
+using RawPtrCountingImpl = base::test::RawPtrCountingImplForTest;
+
+template <typename T>
+using CountingRawRef = raw_ref<T, base::RawPtrTraits::kUseCountingImplForTest>;
+
+// Ensure that the `kUseCountingImplForTest` flag selects the test impl.
+static_assert(std::is_same_v<CountingRawRef<int>::Impl, RawPtrCountingImpl>);
+
+template <typename T>
+using CountingRawRefMayDangle =
+    raw_ref<T,
+            base::RawPtrTraits::kMayDangle |
+                base::RawPtrTraits::kUseCountingImplForTest>;
+
+// Ensure that the `kUseCountingImplForTest` flag selects the test impl.
+static_assert(
+    std::is_same_v<CountingRawRefMayDangle<int>::Impl, RawPtrCountingImpl>);
+
+TEST(RawRef, StdLess) {
+  int i[] = {1, 1};
+  {
+    RawPtrCountingImpl::ClearCounters();
+    auto r1 = CountingRawRef<int>(i[0]);
+    auto r2 = CountingRawRef<int>(i[1]);
+    EXPECT_TRUE(std::less<CountingRawRef<int>>()(r1, r2));
+    EXPECT_FALSE(std::less<CountingRawRef<int>>()(r2, r1));
+    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
+  }
+  {
+    RawPtrCountingImpl::ClearCounters();
+    const auto r1 = CountingRawRef<int>(i[0]);
+    const auto r2 = CountingRawRef<int>(i[1]);
+    EXPECT_TRUE(std::less<CountingRawRef<int>>()(r1, r2));
+    EXPECT_FALSE(std::less<CountingRawRef<int>>()(r2, r1));
+    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
+  }
+  {
+    RawPtrCountingImpl::ClearCounters();
+    auto r1 = CountingRawRef<const int>(i[0]);
+    auto r2 = CountingRawRef<const int>(i[1]);
+    EXPECT_TRUE(std::less<CountingRawRef<const int>>()(r1, r2));
+    EXPECT_FALSE(std::less<CountingRawRef<const int>>()(r2, r1));
+    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
+  }
+  {
+    RawPtrCountingImpl::ClearCounters();
+    auto r1 = CountingRawRef<int>(i[0]);
+    auto r2 = CountingRawRef<int>(i[1]);
+    EXPECT_TRUE(std::less<CountingRawRef<int>>()(r1, i[1]));
+    EXPECT_FALSE(std::less<CountingRawRef<int>>()(r2, i[0]));
+    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
+  }
+  {
+    RawPtrCountingImpl::ClearCounters();
+    const auto r1 = CountingRawRef<int>(i[0]);
+    const auto r2 = CountingRawRef<int>(i[1]);
+    EXPECT_TRUE(std::less<CountingRawRef<int>>()(r1, i[1]));
+    EXPECT_FALSE(std::less<CountingRawRef<int>>()(r2, i[0]));
+    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
+  }
+  {
+    RawPtrCountingImpl::ClearCounters();
+    auto r1 = CountingRawRef<const int>(i[0]);
+    auto r2 = CountingRawRef<const int>(i[1]);
+    EXPECT_TRUE(std::less<CountingRawRef<const int>>()(r1, i[1]));
+    EXPECT_FALSE(std::less<CountingRawRef<const int>>()(r2, i[0]));
+    EXPECT_EQ(2, RawPtrCountingImpl::wrapped_ptr_less_cnt);
+  }
+}
+
+// Verifies that comparing `raw_ref`s with different underlying Traits
+// is a valid utterance and primarily uses the `GetForComparison()` methods.
+TEST(RawRef, OperatorsUseGetForComparison) {
+  int x = 123;
+  CountingRawRef<int> ref1(x);
+  CountingRawRefMayDangle<int> ref2(x);
+
+  RawPtrCountingImpl::ClearCounters();
+
+  EXPECT_TRUE(ref1 == ref2);
+  EXPECT_FALSE(ref1 != ref2);
+  // The use of `PA_RAW_PTR_CHECK()`s to catch dangling references means
+  // that we can't actually readily specify whether there are 0
+  // extractions (`CHECK()`s compiled out) or 2 extractions.
+  EXPECT_THAT((CountingRawPtrExpectations{.get_for_comparison_cnt = 4}),
+              CountersMatch());
+
+  EXPECT_FALSE(ref1 < ref2);
+  EXPECT_FALSE(ref1 > ref2);
+  EXPECT_TRUE(ref1 <= ref2);
+  EXPECT_TRUE(ref1 >= ref2);
+  EXPECT_THAT((CountingRawPtrExpectations{
+                  .get_for_comparison_cnt = 12,
+              }),
+              CountersMatch());
+}
+
+TEST(RawRef, CrossKindConversion) {
+  int x = 123;
+  CountingRawRef<int> ref1(x);
+
+  RawPtrCountingImpl::ClearCounters();
+
+  CountingRawRefMayDangle<int> ref2(ref1);
+  CountingRawRefMayDangle<int> ref3(std::move(ref1));  // Falls back to copy.
+
+  EXPECT_THAT((CountingRawPtrExpectations{.wrap_raw_ptr_cnt = 0,
+                                          .get_for_dereference_cnt = 0,
+                                          .get_for_extraction_cnt = 0,
+                                          .wrap_raw_ptr_for_dup_cnt = 2,
+                                          .get_for_duplication_cnt = 2}),
+              CountersMatch());
+}
+
+TEST(RawRef, CrossKindAssignment) {
+  int x = 123;
+  CountingRawRef<int> ref1(x);
+
+  CountingRawRefMayDangle<int> ref2(x);
+  CountingRawRefMayDangle<int> ref3(x);
+
+  RawPtrCountingImpl::ClearCounters();
+  ref2 = ref1;
+  ref3 = std::move(ref1);  // Falls back to copy.
+
+  EXPECT_THAT((CountingRawPtrExpectations{.wrap_raw_ptr_cnt = 0,
+                                          .get_for_dereference_cnt = 0,
+                                          .get_for_extraction_cnt = 0,
+                                          .wrap_raw_ptr_for_dup_cnt = 2,
+                                          .get_for_duplication_cnt = 2}),
+              CountersMatch());
+}
+
+#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+
+TEST(AsanBackupRefPtrImpl, RawRefGet) {
+  base::debug::AsanService::GetInstance()->Initialize();
+
+  if (!base::RawPtrAsanService::GetInstance().IsEnabled()) {
+    base::RawPtrAsanService::GetInstance().Configure(
+        base::EnableDereferenceCheck(true), base::EnableExtractionCheck(true),
+        base::EnableInstantiationCheck(true));
+  } else {
+    ASSERT_TRUE(
+        base::RawPtrAsanService::GetInstance().is_dereference_check_enabled());
+    ASSERT_TRUE(
+        base::RawPtrAsanService::GetInstance().is_extraction_check_enabled());
+    ASSERT_TRUE(base::RawPtrAsanService::GetInstance()
+                    .is_instantiation_check_enabled());
+  }
+
+  auto ptr = ::std::make_unique<int>();
+  raw_ref<int> safe_ref(*ptr);
+  ptr.reset();
+
+  // This test is specifically to ensure that raw_ref.get() does not cause a
+  // dereference of the memory referred to by the reference. If there is a
+  // dereference, then this test will crash.
+  [[maybe_unused]] volatile int& ref = safe_ref.get();
+}
+
+TEST(AsanBackupRefPtrImpl, RawRefOperatorStar) {
+  base::debug::AsanService::GetInstance()->Initialize();
+
+  if (!base::RawPtrAsanService::GetInstance().IsEnabled()) {
+    base::RawPtrAsanService::GetInstance().Configure(
+        base::EnableDereferenceCheck(true), base::EnableExtractionCheck(true),
+        base::EnableInstantiationCheck(true));
+  } else {
+    ASSERT_TRUE(
+        base::RawPtrAsanService::GetInstance().is_dereference_check_enabled());
+    ASSERT_TRUE(
+        base::RawPtrAsanService::GetInstance().is_extraction_check_enabled());
+    ASSERT_TRUE(base::RawPtrAsanService::GetInstance()
+                    .is_instantiation_check_enabled());
+  }
+
+  auto ptr = ::std::make_unique<int>();
+  raw_ref<int> safe_ref(*ptr);
+  ptr.reset();
+
+  // This test is specifically to ensure that &*raw_ref does not cause a
+  // dereference of the memory referred to by the reference. If there is a
+  // dereference, then this test will crash.
+  [[maybe_unused]] volatile int& ref = *safe_ref;
+}
+
+#endif  // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+
+}  // namespace
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref_unittest.nc b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref_unittest.nc
new file mode 100644
index 0000000..8f596d5
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref_unittest.nc
@@ -0,0 +1,82 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref.h"
+
+namespace {
+
+#if defined(NCTEST_CROSS_KIND_CONVERSION_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)4U == \(\(partition_alloc::internal::RawPtrTraits\)5U | RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  int x = 123;
+  raw_ref<int, base::RawPtrTraits::kMayDangle> ref(x);
+  [[maybe_unused]] raw_ref<int> ref2(ref);
+}
+
+#elif defined(NCTEST_CROSS_KIND_CONVERSION_FROM_DUMMY) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)5U == \(\(partition_alloc::internal::RawPtrTraits\)2052U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  int x = 123;
+  raw_ref<int, base::RawPtrTraits::kDummyForTest> ref(x);
+  [[maybe_unused]] raw_ref<int, base::RawPtrTraits::kMayDangle> ref2(ref);
+}
+
+#elif defined(NCTEST_CROSS_KIND_CONVERSION_MOVE_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)4U == \(\(partition_alloc::internal::RawPtrTraits\)5U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  int x = 123;
+  raw_ref<int, base::RawPtrTraits::kMayDangle> ref(x);
+  [[maybe_unused]] raw_ref<int> ref2(std::move(ref));
+}
+
+#elif defined(NCTEST_CROSS_KIND_CONVERSION_MOVE_FROM_DUMMY) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)5U == \(\(partition_alloc::internal::RawPtrTraits\)2052U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  int x = 123;
+  raw_ref<int, base::RawPtrTraits::kDummyForTest> ref(x);
+  [[maybe_unused]] raw_ref<int, base::RawPtrTraits::kMayDangle> ref2(std::move(ref));
+}
+
+#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)4U == \(\(partition_alloc::internal::RawPtrTraits\)5U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  int x = 123;
+  raw_ref<int, base::RawPtrTraits::kMayDangle> ref(x);
+  raw_ref<int> ref2(x);
+  ref2 = ref;
+}
+
+#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_FROM_DUMMY) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)5U == \(\(partition_alloc::internal::RawPtrTraits\)2052U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  int x = 123;
+  raw_ref<int, base::RawPtrTraits::kDummyForTest> ref(x);
+  raw_ref<int, base::RawPtrTraits::kMayDangle> ref2(x);
+  ref2 = ref;
+}
+
+#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_MOVE_FROM_MAY_DANGLE) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)4U == \(\(partition_alloc::internal::RawPtrTraits\)5U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  int x = 123;
+  raw_ref<int, base::RawPtrTraits::kMayDangle> ref(x);
+  raw_ref<int> ref2(x);
+  ref2 = std::move(ref);
+}
+
+#elif defined(NCTEST_CROSS_KIND_ASSIGNMENT_MOVE_FROM_DUMMY) // [r"static assertion failed due to requirement '\(partition_alloc::internal::RawPtrTraits\)5U == \(\(partition_alloc::internal::RawPtrTraits\)2052U \| RawPtrTraits::kMayDangle\)'"]
+
+void WontCompile() {
+  int x = 123;
+  raw_ref<int, base::RawPtrTraits::kDummyForTest> ref(x);
+  raw_ref<int, base::RawPtrTraits::kMayDangle> ref2(x);
+  ref2 = std::move(ref);
+}
+
+#endif
+
+}  // namespace
diff --git a/base/allocator/partition_allocator/src/partition_alloc/pool_offset_freelist.h b/base/allocator/partition_allocator/src/partition_alloc/pool_offset_freelist.h
new file mode 100644
index 0000000..c721b91
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/pool_offset_freelist.h
@@ -0,0 +1,156 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POOL_OFFSET_FREELIST_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POOL_OFFSET_FREELIST_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+
+namespace partition_alloc::internal {
+
+// This implementation of PartitionAlloc's freelist uses pool offsets
+// rather than naked pointers. This is intended to prevent usage of
+// freelist pointers to easily jump around to freed slots.
+class PoolOffsetFreelistEntry {
+ private:
+  constexpr explicit PoolOffsetFreelistEntry(std::nullptr_t) {}
+
+  explicit PoolOffsetFreelistEntry(PoolOffsetFreelistEntry* next)
+      : next_(PoolOffset(reinterpret_cast<uintptr_t>(next))) {}
+
+  // For testing only.
+  PoolOffsetFreelistEntry(void* next, bool make_shadow_match)
+      : next_(PoolOffset(reinterpret_cast<uintptr_t>(next))) {}
+
+ public:
+  ~PoolOffsetFreelistEntry() = delete;
+
+  PA_ALWAYS_INLINE static PoolOffsetFreelistEntry* EmplaceAndInitNull(
+      void* slot_start_tagged) {
+    auto* entry = new (slot_start_tagged) PoolOffsetFreelistEntry(nullptr);
+    return entry;
+  }
+
+  PA_ALWAYS_INLINE static PoolOffsetFreelistEntry* EmplaceAndInitNull(
+      uintptr_t slot_start) {
+    return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
+  }
+
+  PA_ALWAYS_INLINE static PoolOffsetFreelistEntry* EmplaceAndInitForThreadCache(
+      uintptr_t slot_start,
+      PoolOffsetFreelistEntry* next) {
+    auto* entry =
+        new (SlotStartAddr2Ptr(slot_start)) PoolOffsetFreelistEntry(next);
+    return entry;
+  }
+
+  PA_ALWAYS_INLINE static void EmplaceAndInitForTest(uintptr_t slot_start,
+                                                     void* next,
+                                                     bool make_shadow_match) {
+    new (SlotStartAddr2Ptr(slot_start))
+        PoolOffsetFreelistEntry(next, make_shadow_match);
+  }
+
+  void CorruptNextForTesting(uintptr_t v) {
+    // TODO(crbug.com/1461983): Make this do something useful.
+    next_ += 1ull << 31;
+  }
+
+  template <bool crash_on_corruption>
+  PA_ALWAYS_INLINE PoolOffsetFreelistEntry* GetNextForThreadCache(
+      size_t slot_size) const {
+    return GetNextInternal<crash_on_corruption, /*for_thread_cache=*/true>(
+        slot_size);
+  }
+
+  PA_ALWAYS_INLINE PoolOffsetFreelistEntry* GetNext(size_t slot_size) const {
+    return GetNextInternal<true, /*for_thread_cache=*/false>(slot_size);
+  }
+
+  PA_NOINLINE void CheckFreeList(size_t slot_size) const {
+    for (auto* entry = this; entry; entry = entry->GetNext(slot_size)) {
+      // `GetNext()` calls `IsWellFormed()`.
+    }
+  }
+
+  PA_NOINLINE void CheckFreeListForThreadCache(size_t slot_size) const {
+    for (auto* entry = this; entry;
+         entry = entry->GetNextForThreadCache<true>(slot_size)) {
+      // `GetNextForThreadCache()` calls `IsWellFormed()`.
+    }
+  }
+
+  PA_ALWAYS_INLINE void SetNext(PoolOffsetFreelistEntry* entry) {
+    next_ = PoolOffset(reinterpret_cast<uintptr_t>(entry));
+  }
+
+  PA_ALWAYS_INLINE uintptr_t ClearForAllocation() {
+    next_ = uintptr_t{0};
+    return SlotStartPtr2Addr(this);
+  }
+
+  PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero() const {
+    return !next_;
+  }
+
+ private:
+  // Determines the containing pool of `addr` and returns `addr`
+  // represented as an offset into that pool.
+  PA_ALWAYS_INLINE static uintptr_t PoolOffset(uintptr_t addr) {
+    return addr ? PartitionAddressSpace::GetPoolInfo(addr).offset : addr;
+  }
+
+  template <bool crash_on_corruption, bool for_thread_cache>
+  PA_ALWAYS_INLINE PoolOffsetFreelistEntry* GetNextInternal(
+      size_t slot_size) const {
+    if (IsEncodedNextPtrZero()) {
+      return nullptr;
+    }
+
+    auto* ret = reinterpret_cast<PoolOffsetFreelistEntry*>(
+        GetPoolInfo(reinterpret_cast<uintptr_t>(this)).base + next_);
+    if (PA_UNLIKELY(!IsWellFormed<for_thread_cache>(this, ret))) {
+      if constexpr (crash_on_corruption) {
+        PA_DEBUG_DATA_ON_STACK("first", static_cast<size_t>(next_));
+        FreelistCorruptionDetected(slot_size);
+      }
+      return nullptr;
+    }
+    PA_PREFETCH(ret);
+    return ret;
+  }
+
+  // TODO(crbug.com/1461983): Add support for freelist shadow entries
+  // (and freeslot bitmaps).
+  template <bool for_thread_cache>
+  PA_ALWAYS_INLINE static bool IsWellFormed(
+      const PoolOffsetFreelistEntry* here,
+      const PoolOffsetFreelistEntry* next) {
+    const uintptr_t here_address = SlotStartPtr2Addr(here);
+    const uintptr_t next_address = SlotStartPtr2Addr(next);
+
+    const bool not_in_metadata =
+        (next_address & kSuperPageOffsetMask) >= PartitionPageSize();
+    if constexpr (for_thread_cache) {
+      return not_in_metadata;
+    }
+    const bool same_super_page = (here_address & kSuperPageBaseMask) ==
+                                 (next_address & kSuperPageBaseMask);
+    return same_super_page && not_in_metadata;
+  }
+
+  // Expresses the next entry in the freelist as an offset in the
+  // same pool as `this`, except when 0, which (as an invalid pool
+  // offset) serves as a sentinel value.
+  uintptr_t next_ = uintptr_t{0};
+};
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_POOL_OFFSET_FREELIST_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/random.cc b/base/allocator/partition_allocator/src/partition_alloc/random.cc
new file mode 100644
index 0000000..db5fa4c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/random.cc
@@ -0,0 +1,70 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/random.h"
+
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+
+namespace partition_alloc {
+
+class RandomGenerator {
+ public:
+  constexpr RandomGenerator() {}
+
+  uint32_t RandomValue() {
+    ::partition_alloc::internal::ScopedGuard guard(lock_);
+    return GetGenerator()->RandUint32();
+  }
+
+  void SeedForTesting(uint64_t seed) {
+    ::partition_alloc::internal::ScopedGuard guard(lock_);
+    GetGenerator()->ReseedForTesting(seed);
+  }
+
+ private:
+  ::partition_alloc::internal::Lock lock_ = {};
+  bool initialized_ PA_GUARDED_BY(lock_) = false;
+  union {
+    internal::base::InsecureRandomGenerator instance_ PA_GUARDED_BY(lock_);
+    uint8_t instance_buffer_[sizeof(
+        internal::base::InsecureRandomGenerator)] PA_GUARDED_BY(lock_) = {};
+  };
+
+  internal::base::InsecureRandomGenerator* GetGenerator()
+      PA_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
+    if (!initialized_) {
+      new (instance_buffer_) internal::base::InsecureRandomGenerator();
+      initialized_ = true;
+    }
+    return &instance_;
+  }
+};
+
+// Note: this is redundant, since the anonymous union is incompatible with a
+// non-trivial default destructor. Not meant to be destructed anyway.
+static_assert(std::is_trivially_destructible_v<RandomGenerator>, "");
+
+namespace {
+
+RandomGenerator g_generator = {};
+
+}  // namespace
+
+namespace internal {
+
+uint32_t RandomValue() {
+  return g_generator.RandomValue();
+}
+
+}  // namespace internal
+
+void SetMmapSeedForTesting(uint64_t seed) {
+  return g_generator.SeedForTesting(seed);
+}
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/random.h b/base/allocator/partition_allocator/src/partition_alloc/random.h
new file mode 100644
index 0000000..27faccd
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/random.h
@@ -0,0 +1,31 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_RANDOM_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_RANDOM_H_
+
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc {
+
+namespace internal {
+
+// Returns a random value. The generator's internal state is initialized with
+// `base::RandUint64` which is very unpredictable, but which is expensive due to
+// the need to call into the kernel. Therefore this generator uses a fast,
+// entirely user-space function after initialization.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) uint32_t RandomValue();
+
+}  // namespace internal
+
+// Sets the seed for the random number generator to a known value, to cause the
+// RNG to generate a predictable sequence of outputs. May be called multiple
+// times.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void SetMmapSeedForTesting(uint64_t seed);
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_RANDOM_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.cc b/base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.cc
new file mode 100644
index 0000000..5fff5d6
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.cc
@@ -0,0 +1,18 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+
+namespace partition_alloc::internal {
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+ReservationOffsetTable ReservationOffsetTable::singleton_;
+#else
+ReservationOffsetTable::_ReservationOffsetTable
+    ReservationOffsetTable::reservation_offset_table_;
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h b/base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h
new file mode 100644
index 0000000..5fdd81b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h
@@ -0,0 +1,284 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_RESERVATION_OFFSET_TABLE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_RESERVATION_OFFSET_TABLE_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <tuple>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/alignment.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal {
+
+static constexpr uint16_t kOffsetTagNotAllocated =
+    std::numeric_limits<uint16_t>::max();
+static constexpr uint16_t kOffsetTagNormalBuckets =
+    std::numeric_limits<uint16_t>::max() - 1;
+
+// The main purpose of the reservation offset table is to easily locate the
+// direct map reservation start address for any given address. There is one
+// entry in the table for each super page.
+//
+// When PartitionAlloc reserves an address region it is always aligned to
+// super page boundary. However, in 32-bit mode, the size may not be aligned
+// super-page-aligned, so it may look like this:
+//   |<--------- actual reservation size --------->|
+//   +----------+----------+-----+-----------+-----+ - - - +
+//   |SuperPage0|SuperPage1| ... |SuperPage K|SuperPage K+1|
+//   +----------+----------+-----+-----------+-----+ - - -.+
+//                                           |<-X->|<-Y*)->|
+//
+// The table entries for reserved super pages say how many pages away from the
+// reservation the super page is:
+//   +----------+----------+-----+-----------+-------------+
+//   |Entry for |Entry for | ... |Entry for  |Entry for    |
+//   |SuperPage0|SuperPage1|     |SuperPage K|SuperPage K+1|
+//   +----------+----------+-----+-----------+-------------+
+//   |     0    |    1     | ... |     K     |   K + 1     |
+//   +----------+----------+-----+-----------+-------------+
+//
+// For an address Z, the reservation start can be found using this formula:
+//   ((Z >> kSuperPageShift) - (the entry for Z)) << kSuperPageShift
+//
+// kOffsetTagNotAllocated is a special tag denoting that the super page isn't
+// allocated by PartitionAlloc and kOffsetTagNormalBuckets denotes that it is
+// used for a normal-bucket allocation, not for a direct-map allocation.
+//
+// *) In 32-bit mode, Y is not used by PartitionAlloc, and cannot be used
+//    until X is unreserved, because PartitionAlloc always uses kSuperPageSize
+//    alignment when reserving address spaces. One can use check "is in pool?"
+//    to further determine which part of the super page is used by
+//    PartitionAlloc. This isn't a problem in 64-bit mode, where allocation
+//    granularity is kSuperPageSize.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+    PA_THREAD_ISOLATED_ALIGN ReservationOffsetTable {
+ public:
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  // There is one reservation offset table per Pool in 64-bit mode.
+  static constexpr size_t kReservationOffsetTableCoverage = kPoolMaxSize;
+  static constexpr size_t kReservationOffsetTableLength =
+      kReservationOffsetTableCoverage >> kSuperPageShift;
+#else
+  // The size of the reservation offset table should cover the entire 32-bit
+  // address space, one element per super page.
+  static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
+  static constexpr size_t kReservationOffsetTableLength =
+      4 * kGiB / kSuperPageSize;
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+  static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets,
+                "Offsets should be smaller than kOffsetTagNormalBuckets.");
+
+  struct _ReservationOffsetTable {
+    // The number of table elements is less than MAX_UINT16, so the element type
+    // can be uint16_t.
+    static_assert(
+        kReservationOffsetTableLength <= std::numeric_limits<uint16_t>::max(),
+        "Length of the reservation offset table must be less than MAX_UINT16");
+    uint16_t offsets[kReservationOffsetTableLength] = {};
+
+    constexpr _ReservationOffsetTable() {
+      for (uint16_t& offset : offsets) {
+        offset = kOffsetTagNotAllocated;
+      }
+    }
+  };
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  // If thread isolation support is enabled, we need to write-protect the tables
+  // of the thread isolated pool. For this, we need to pad the tables so that
+  // the thread isolated ones start on a page boundary.
+  char pad_[PA_THREAD_ISOLATED_ARRAY_PAD_SZ(_ReservationOffsetTable,
+                                            kNumPools)] = {};
+  struct _ReservationOffsetTable tables[kNumPools];
+  static PA_CONSTINIT ReservationOffsetTable singleton_;
+#else
+  // A single table for the entire 32-bit address space.
+  static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_;
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+};
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) {
+  PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
+  return ReservationOffsetTable::singleton_.tables[handle - 1].offsets;
+}
+
+PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
+    pool_handle handle) {
+  return GetReservationOffsetTable(handle) +
+         ReservationOffsetTable::kReservationOffsetTableLength;
+}
+
+PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
+  pool_handle handle = GetPool(address);
+  return GetReservationOffsetTable(handle);
+}
+
+PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
+    uintptr_t address) {
+  pool_handle handle = GetPool(address);
+  return GetReservationOffsetTableEnd(handle);
+}
+
+PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(pool_handle pool,
+                                                    uintptr_t offset_in_pool) {
+  size_t table_index = offset_in_pool >> kSuperPageShift;
+  PA_DCHECK(table_index <
+            ReservationOffsetTable::kReservationOffsetTableLength);
+  return GetReservationOffsetTable(pool) + table_index;
+}
+#else   // BUILDFLAG(HAS_64_BIT_POINTERS)
+PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
+  return ReservationOffsetTable::reservation_offset_table_.offsets;
+}
+
+PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
+    uintptr_t address) {
+  return ReservationOffsetTable::reservation_offset_table_.offsets +
+         ReservationOffsetTable::kReservationOffsetTableLength;
+}
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) {
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  // In 64-bit mode, find the owning Pool and compute the offset from its base.
+  auto [pool, unused_base, offset] = GetPoolInfo(address);
+  return ReservationOffsetPointer(pool, offset);
+#else
+  size_t table_index = address >> kSuperPageShift;
+  PA_DCHECK(table_index <
+            ReservationOffsetTable::kReservationOffsetTableLength);
+  return GetReservationOffsetTable(address) + table_index;
+#endif
+}
+
+PA_ALWAYS_INLINE uintptr_t ComputeReservationStart(uintptr_t address,
+                                                   uint16_t* offset_ptr) {
+  return (address & kSuperPageBaseMask) -
+         (static_cast<size_t>(*offset_ptr) << kSuperPageShift);
+}
+
+// If the given address doesn't point to direct-map allocated memory,
+// returns 0.
+PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  bool is_in_brp_pool = IsManagedByPartitionAllocBRPPool(address);
+  bool is_in_regular_pool = IsManagedByPartitionAllocRegularPool(address);
+  bool is_in_configurable_pool =
+      IsManagedByPartitionAllocConfigurablePool(address);
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  bool is_in_thread_isolated_pool =
+      IsManagedByPartitionAllocThreadIsolatedPool(address);
+#endif
+
+  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
+#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
+  PA_DCHECK(!is_in_brp_pool);
+#endif
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+  uint16_t* offset_ptr = ReservationOffsetPointer(address);
+  PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
+  if (*offset_ptr == kOffsetTagNormalBuckets) {
+    return 0;
+  }
+  uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // MSVC workaround: the preprocessor seems to choke on an `#if` embedded
+  // inside another macro (PA_DCHECK).
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+  constexpr size_t kBRPOffset =
+      AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
+      AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
+#else
+  constexpr size_t kBRPOffset = 0ull;
+#endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
+  // Make sure the reservation start is in the same pool as |address|.
+  // In the 32-bit mode, the beginning of a reservation may be excluded
+  // from the BRP pool, so shift the pointer. The other pools don't have
+  // this logic.
+  PA_DCHECK(is_in_brp_pool ==
+            IsManagedByPartitionAllocBRPPool(reservation_start + kBRPOffset));
+  PA_DCHECK(is_in_regular_pool ==
+            IsManagedByPartitionAllocRegularPool(reservation_start));
+  PA_DCHECK(is_in_configurable_pool ==
+            IsManagedByPartitionAllocConfigurablePool(reservation_start));
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+  PA_DCHECK(is_in_thread_isolated_pool ==
+            IsManagedByPartitionAllocThreadIsolatedPool(reservation_start));
+#endif
+  PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+  return reservation_start;
+}
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+// If the given address doesn't point to direct-map allocated memory,
+// returns 0.
+// This variant has better performance than the regular one on 64-bit builds if
+// the Pool that an allocation belongs to is known.
+PA_ALWAYS_INLINE uintptr_t
+GetDirectMapReservationStart(uintptr_t address,
+                             pool_handle pool,
+                             uintptr_t offset_in_pool) {
+  PA_DCHECK(AddressPoolManager::GetInstance().GetPoolBaseAddress(pool) +
+                offset_in_pool ==
+            address);
+  uint16_t* offset_ptr = ReservationOffsetPointer(pool, offset_in_pool);
+  PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
+  if (*offset_ptr == kOffsetTagNormalBuckets) {
+    return 0;
+  }
+  uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
+  PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
+  return reservation_start;
+}
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+// Returns true if |address| is the beginning of the first super page of a
+// reservation, i.e. either a normal bucket super page, or the first super page
+// of direct map.
+// |address| must belong to an allocated super page.
+PA_ALWAYS_INLINE bool IsReservationStart(uintptr_t address) {
+  uint16_t* offset_ptr = ReservationOffsetPointer(address);
+  PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
+  return ((*offset_ptr == kOffsetTagNormalBuckets) || (*offset_ptr == 0)) &&
+         (address % kSuperPageSize == 0);
+}
+
+// Returns true if |address| belongs to a normal bucket super page.
+PA_ALWAYS_INLINE bool IsManagedByNormalBuckets(uintptr_t address) {
+  uint16_t* offset_ptr = ReservationOffsetPointer(address);
+  return *offset_ptr == kOffsetTagNormalBuckets;
+}
+
+// Returns true if |address| belongs to a direct map region.
+PA_ALWAYS_INLINE bool IsManagedByDirectMap(uintptr_t address) {
+  uint16_t* offset_ptr = ReservationOffsetPointer(address);
+  return *offset_ptr != kOffsetTagNormalBuckets &&
+         *offset_ptr != kOffsetTagNotAllocated;
+}
+
+// Returns true if |address| belongs to a normal bucket super page or a direct
+// map region, i.e. belongs to an allocated super page.
+PA_ALWAYS_INLINE bool IsManagedByNormalBucketsOrDirectMap(uintptr_t address) {
+  uint16_t* offset_ptr = ReservationOffsetPointer(address);
+  return *offset_ptr != kOffsetTagNotAllocated;
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_RESERVATION_OFFSET_TABLE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/reverse_bytes.h b/base/allocator/partition_allocator/src/partition_alloc/reverse_bytes.h
new file mode 100644
index 0000000..b236ac7
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/reverse_bytes.h
@@ -0,0 +1,49 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_REVERSE_BYTES_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_REVERSE_BYTES_H_
+
+// This header defines drop-in constexpr replacements for the
+// byte-reversing routines that we used from `//base/sys_byteorder.h`.
+// They will be made moot by C++23's <endian> header or by C++20's
+// <bit> header.
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal {
+
+constexpr uint32_t ReverseFourBytes(uint32_t value) {
+#if PA_CONFIG(IS_NONCLANG_MSVC)
+  return value >> 24 | (value >> 8 & 0xff00) | (value & 0xff00) << 8 |
+         value << 24;
+#else
+  return __builtin_bswap32(value);
+#endif  // PA_CONFIG(IS_NONCLANG_MSVC)
+}
+
+constexpr uint64_t ReverseEightBytes(uint64_t value) {
+#if PA_CONFIG(IS_NONCLANG_MSVC)
+  return value >> 56 | (value >> 40 & 0xff00) | (value >> 24 & 0xff0000) |
+         (value >> 8 & 0xff000000) | (value & 0xff000000) << 8 |
+         (value & 0xff0000) << 24 | (value & 0xff00) << 40 |
+         (value & 0xff) << 56;
+#else
+  return __builtin_bswap64(value);
+#endif  // PA_CONFIG(IS_NONCLANG_MSVC)
+}
+
+constexpr uintptr_t ReverseBytes(uintptr_t value) {
+  if (sizeof(uintptr_t) == 4) {
+    return ReverseFourBytes(static_cast<uint32_t>(value));
+  }
+  return ReverseEightBytes(static_cast<uint64_t>(value));
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_REVERSE_BYTES_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/reverse_bytes_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/reverse_bytes_unittest.cc
new file mode 100644
index 0000000..32042cd
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/reverse_bytes_unittest.cc
@@ -0,0 +1,26 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/reverse_bytes.h"
+
+#include <cstdint>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal {
+namespace {
+
+TEST(ReverseBytes, DeadBeefScramble) {
+  if (sizeof(uintptr_t) == 4) {
+    EXPECT_EQ(ReverseBytes(uintptr_t{0xefbeadde}), 0xdeadbeef);
+  } else {
+    // Hacky kludge to escape the compiler from immediately noticing that
+    // this won't fit into a uintptr_t when it's four bytes.
+    EXPECT_EQ(ReverseBytes(uint64_t{0xffeeddccefbeadde}), 0xdeadbeefccddeeff);
+  }
+}
+
+}  // namespace
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/DEPS b/base/allocator/partition_allocator/src/partition_alloc/shim/DEPS
new file mode 100644
index 0000000..1caf9b8
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/DEPS
@@ -0,0 +1,18 @@
+# Copyright 2022 The Chromium Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be temporary during we migrate allocator_shim code
+# into partition_allocator/. This file will be removed away once the
+# migration gets done.
+
+specific_include_rules = {
+  "allocator_shim_unittest\.cc$": [
+    "+base/synchronization/waitable_event.h",
+    "+base/threading/platform_thread.h",
+    "+base/threading/thread_local.h",
+  ],
+  "allocator_interception_apple_unittest\.mm$": [
+    "+testing/gtest/include/gtest/gtest.h",
+  ],
+}
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h
new file mode 100644
index 0000000..ed6f98b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h
@@ -0,0 +1,66 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_INTERCEPTION_APPLE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_INTERCEPTION_APPLE_H_
+
+#include <stddef.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/third_party/apple_apsl/malloc.h"
+
+namespace allocator_shim {
+
+struct MallocZoneFunctions;
+
+// This initializes AllocatorDispatch::default_dispatch by saving pointers to
+// the functions in the current default malloc zone. This must be called before
+// the default malloc zone is changed to have its intended effect.
+void InitializeDefaultDispatchToMacAllocator();
+
+// Saves the function pointers currently used by the default zone.
+void StoreFunctionsForDefaultZone();
+
+// Same as StoreFunctionsForDefaultZone, but for all malloc zones.
+void StoreFunctionsForAllZones();
+
+// For all malloc zones that have been stored, replace their functions with
+// |functions|.
+void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern bool g_replaced_default_zone;
+
+// Calls the original implementation of malloc/calloc prior to interception.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool UncheckedMallocMac(size_t size, void** result);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool UncheckedCallocMac(size_t num_items, size_t size, void** result);
+
+// Intercepts calls to default and purgeable malloc zones. Intercepts Core
+// Foundation and Objective-C allocations.
+// Has no effect on the default malloc zone if the allocator shim already
+// performs that interception.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void InterceptAllocationsMac();
+
+// Updates all malloc zones to use their original functions.
+// Also calls ClearAllMallocZonesForTesting.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void UninterceptMallocZonesForTesting();
+
+// Returns true if allocations are successfully being intercepted for all malloc
+// zones.
+bool AreMallocZonesIntercepted();
+
+// heap_profiling::ProfilingClient needs to shim all malloc zones even ones
+// that are registered after the start-up time. ProfilingClient periodically
+// calls this API to make it sure that all malloc zones are shimmed.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void ShimNewMallocZones();
+
+// Exposed for testing.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void ReplaceZoneFunctions(ChromeMallocZone* zone,
+                          const MallocZoneFunctions* functions);
+
+}  // namespace allocator_shim
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_INTERCEPTION_APPLE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.mm b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.mm
new file mode 100644
index 0000000..fde5be3
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.mm
@@ -0,0 +1,628 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains all the logic necessary to intercept allocations on
+// macOS. "malloc zones" are an abstraction that allows the process to intercept
+// all malloc-related functions.  There is no good mechanism [short of
+// interposition] to determine new malloc zones are added, so there's no clean
+// mechanism to intercept all malloc zones. This file contains logic to
+// intercept the default and purgeable zones, which always exist. A cursory
+// review of Chrome seems to imply that non-default zones are almost never used.
+//
+// This file also contains logic to intercept Core Foundation and Objective-C
+// allocations. The implementations forward to the default malloc zone, so the
+// only reason to intercept these calls is to re-label OOM crashes with slightly
+// more details.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#import <Foundation/Foundation.h>
+#include <errno.h>
+#include <mach/mach.h>
+#import <objc/runtime.h>
+#include <stddef.h>
+
+#include <algorithm>
+#include <new>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/mach_logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/third_party/apple_apsl/CFBase.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_IOS)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/ios/ios_util.h"
+#else
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/mac/mac_util.h"
+#endif
+
+// The patching of Objective-C runtime bits must be done without any
+// interference from the ARC machinery.
+#if defined(__has_feature) && __has_feature(objc_arc)
+#error "This file must not be compiled with ARC."
+#endif
+
+namespace allocator_shim {
+
+bool g_replaced_default_zone = false;
+
+namespace {
+
+bool g_oom_killer_enabled;
+bool g_allocator_shims_failed_to_install;
+
+// Starting with Mac OS X 10.7, the zone allocators set up by the system are
+// read-only, to prevent them from being overwritten in an attack. However,
+// blindly unprotecting and reprotecting the zone allocators fails with
+// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
+// memory in its bss. Explicit saving/restoring of the protection is required.
+//
+// This function takes a pointer to a malloc zone, de-protects it if necessary,
+// and returns (in the out parameters) a region of memory (if any) to be
+// re-protected when modifications are complete. This approach assumes that
+// there is no contention for the protection of this memory.
+//
+// Returns true if the malloc zone was properly de-protected, or false
+// otherwise. If this function returns false, the out parameters are invalid and
+// the region does not need to be re-protected.
+bool DeprotectMallocZone(ChromeMallocZone* default_zone,
+                         vm_address_t* reprotection_start,
+                         vm_size_t* reprotection_length,
+                         vm_prot_t* reprotection_value) {
+  mach_port_t unused;
+  *reprotection_start = reinterpret_cast<vm_address_t>(default_zone);
+  struct vm_region_basic_info_64 info;
+  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+  kern_return_t result =
+      vm_region_64(mach_task_self(), reprotection_start, reprotection_length,
+                   VM_REGION_BASIC_INFO_64,
+                   reinterpret_cast<vm_region_info_t>(&info), &count, &unused);
+  if (result != KERN_SUCCESS) {
+    PA_MACH_LOG(ERROR, result) << "vm_region_64";
+    return false;
+  }
+
+  // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
+  // balance it with a deallocate in case this ever changes. See
+  // the VM_REGION_BASIC_INFO_64 case in vm_map_region() in 10.15's
+  // https://opensource.apple.com/source/xnu/xnu-6153.11.26/osfmk/vm/vm_map.c .
+  mach_port_deallocate(mach_task_self(), unused);
+
+  if (!(info.max_protection & VM_PROT_WRITE)) {
+    PA_LOG(ERROR) << "Invalid max_protection " << info.max_protection;
+    return false;
+  }
+
+  // Does the region fully enclose the zone pointers? Possibly unwarranted
+  // simplification used: using the size of a full version 10 malloc zone rather
+  // than the actual smaller size if the passed-in zone is not version 10.
+  PA_DCHECK(*reprotection_start <=
+            reinterpret_cast<vm_address_t>(default_zone));
+  vm_size_t zone_offset = reinterpret_cast<vm_address_t>(default_zone) -
+                          reinterpret_cast<vm_address_t>(*reprotection_start);
+  PA_DCHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
+
+  if (info.protection & VM_PROT_WRITE) {
+    // No change needed; the zone is already writable.
+    *reprotection_start = 0;
+    *reprotection_length = 0;
+    *reprotection_value = VM_PROT_NONE;
+  } else {
+    *reprotection_value = info.protection;
+    result =
+        vm_protect(mach_task_self(), *reprotection_start, *reprotection_length,
+                   false, info.protection | VM_PROT_WRITE);
+    if (result != KERN_SUCCESS) {
+      PA_MACH_LOG(ERROR, result) << "vm_protect";
+      return false;
+    }
+  }
+  return true;
+}
+
+#if !defined(ADDRESS_SANITIZER)
+
+MallocZoneFunctions g_old_zone;
+MallocZoneFunctions g_old_purgeable_zone;
+
+#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) {
+  void* result = g_old_zone.malloc(zone, size);
+  if (!result && size) {
+    partition_alloc::TerminateBecauseOutOfMemory(size);
+  }
+  return result;
+}
+
+void* oom_killer_calloc(struct _malloc_zone_t* zone,
+                        size_t num_items,
+                        size_t size) {
+  void* result = g_old_zone.calloc(zone, num_items, size);
+  if (!result && num_items && size) {
+    partition_alloc::TerminateBecauseOutOfMemory(num_items * size);
+  }
+  return result;
+}
+
+void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) {
+  void* result = g_old_zone.valloc(zone, size);
+  if (!result && size) {
+    partition_alloc::TerminateBecauseOutOfMemory(size);
+  }
+  return result;
+}
+
+void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) {
+  g_old_zone.free(zone, ptr);
+}
+
+void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) {
+  void* result = g_old_zone.realloc(zone, ptr, size);
+  if (!result && size) {
+    partition_alloc::TerminateBecauseOutOfMemory(size);
+  }
+  return result;
+}
+
+void* oom_killer_memalign(struct _malloc_zone_t* zone,
+                          size_t alignment,
+                          size_t size) {
+  void* result = g_old_zone.memalign(zone, alignment, size);
+  // Only die if posix_memalign would have returned ENOMEM, since there are
+  // other reasons why null might be returned. See posix_memalign() in 10.15's
+  // https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
+  if (!result && size && alignment >= sizeof(void*) &&
+      partition_alloc::internal::base::bits::IsPowerOfTwo(alignment)) {
+    partition_alloc::TerminateBecauseOutOfMemory(size);
+  }
+  return result;
+}
+
+#endif  // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
+  void* result = g_old_purgeable_zone.malloc(zone, size);
+  if (!result && size) {
+    partition_alloc::TerminateBecauseOutOfMemory(size);
+  }
+  return result;
+}
+
+void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
+                                  size_t num_items,
+                                  size_t size) {
+  void* result = g_old_purgeable_zone.calloc(zone, num_items, size);
+  if (!result && num_items && size) {
+    partition_alloc::TerminateBecauseOutOfMemory(num_items * size);
+  }
+  return result;
+}
+
+void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
+  void* result = g_old_purgeable_zone.valloc(zone, size);
+  if (!result && size) {
+    partition_alloc::TerminateBecauseOutOfMemory(size);
+  }
+  return result;
+}
+
+void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) {
+  g_old_purgeable_zone.free(zone, ptr);
+}
+
+void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
+                                   void* ptr,
+                                   size_t size) {
+  void* result = g_old_purgeable_zone.realloc(zone, ptr, size);
+  if (!result && size) {
+    partition_alloc::TerminateBecauseOutOfMemory(size);
+  }
+  return result;
+}
+
+void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
+                                    size_t alignment,
+                                    size_t size) {
+  void* result = g_old_purgeable_zone.memalign(zone, alignment, size);
+  // Only die if posix_memalign would have returned ENOMEM, since there are
+  // other reasons why null might be returned. See posix_memalign() in 10.15's
+  // https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
+  if (!result && size && alignment >= sizeof(void*) &&
+      partition_alloc::internal::base::bits::IsPowerOfTwo(alignment)) {
+    partition_alloc::TerminateBecauseOutOfMemory(size);
+  }
+  return result;
+}
+
+#endif  // !defined(ADDRESS_SANITIZER)
+
+#if !defined(ADDRESS_SANITIZER)
+
+// === Core Foundation CFAllocators ===
+
+bool CanGetContextForCFAllocator() {
+#if BUILDFLAG(IS_IOS)
+  return !partition_alloc::internal::base::ios::IsRunningOnOrLater(17, 0, 0);
+#else
+  // As of macOS 14, the allocators are in read-only memory and can no longer be
+  // altered.
+  return partition_alloc::internal::base::mac::MacOSMajorVersion() < 14;
+#endif
+}
+
+CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
+  ChromeCFAllocatorLions* our_allocator = const_cast<ChromeCFAllocatorLions*>(
+      reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
+  return &our_allocator->_context;
+}
+
+CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
+CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
+CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
+
+void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
+                                            CFOptionFlags hint,
+                                            void* info) {
+  void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
+  if (!result) {
+    partition_alloc::TerminateBecauseOutOfMemory(
+        static_cast<size_t>(alloc_size));
+  }
+  return result;
+}
+
+void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
+                                    CFOptionFlags hint,
+                                    void* info) {
+  void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
+  if (!result) {
+    partition_alloc::TerminateBecauseOutOfMemory(
+        static_cast<size_t>(alloc_size));
+  }
+  return result;
+}
+
+void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
+                                         CFOptionFlags hint,
+                                         void* info) {
+  void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
+  if (!result) {
+    partition_alloc::TerminateBecauseOutOfMemory(
+        static_cast<size_t>(alloc_size));
+  }
+  return result;
+}
+
+#endif  // !defined(ADDRESS_SANITIZER)
+
+// === Cocoa NSObject allocation ===
+
+typedef id (*allocWithZone_t)(id, SEL, NSZone*);
+allocWithZone_t g_old_allocWithZone;
+
+id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
+  id result = g_old_allocWithZone(self, _cmd, zone);
+  if (!result) {
+    partition_alloc::TerminateBecauseOutOfMemory(0);
+  }
+  return result;
+}
+
+void UninterceptMallocZoneForTesting(struct _malloc_zone_t* zone) {
+  ChromeMallocZone* chrome_zone = reinterpret_cast<ChromeMallocZone*>(zone);
+  if (!IsMallocZoneAlreadyStored(chrome_zone)) {
+    return;
+  }
+  MallocZoneFunctions& functions = GetFunctionsForZone(zone);
+  ReplaceZoneFunctions(chrome_zone, &functions);
+}
+
+}  // namespace
+
+bool UncheckedMallocMac(size_t size, void** result) {
+#if defined(ADDRESS_SANITIZER)
+  *result = malloc(size);
+#else
+  if (g_old_zone.malloc) {
+    *result = g_old_zone.malloc(malloc_default_zone(), size);
+  } else {
+    *result = malloc(size);
+  }
+#endif  // defined(ADDRESS_SANITIZER)
+
+  return *result != NULL;
+}
+
+bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
+#if defined(ADDRESS_SANITIZER)
+  *result = calloc(num_items, size);
+#else
+  if (g_old_zone.calloc) {
+    *result = g_old_zone.calloc(malloc_default_zone(), num_items, size);
+  } else {
+    *result = calloc(num_items, size);
+  }
+#endif  // defined(ADDRESS_SANITIZER)
+
+  return *result != NULL;
+}
+
+void InitializeDefaultDispatchToMacAllocator() {
+  StoreFunctionsForAllZones();
+}
+
+void StoreFunctionsForDefaultZone() {
+  ChromeMallocZone* default_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  StoreMallocZone(default_zone);
+}
+
+void StoreFunctionsForAllZones() {
+  // This ensures that the default zone is always at the front of the array,
+  // which is important for performance.
+  StoreFunctionsForDefaultZone();
+
+  vm_address_t* zones;
+  unsigned int count;
+  kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
+  if (kr != KERN_SUCCESS) {
+    return;
+  }
+  for (unsigned int i = 0; i < count; ++i) {
+    ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
+    StoreMallocZone(zone);
+  }
+}
+
+void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
+  // The default zone does not get returned in malloc_get_all_zones().
+  ChromeMallocZone* default_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  if (DoesMallocZoneNeedReplacing(default_zone, functions)) {
+    ReplaceZoneFunctions(default_zone, functions);
+  }
+
+  vm_address_t* zones;
+  unsigned int count;
+  kern_return_t kr =
+      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &count);
+  if (kr != KERN_SUCCESS) {
+    return;
+  }
+  for (unsigned int i = 0; i < count; ++i) {
+    ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
+    if (DoesMallocZoneNeedReplacing(zone, functions)) {
+      ReplaceZoneFunctions(zone, functions);
+    }
+  }
+  g_replaced_default_zone = true;
+}
+
+void InterceptAllocationsMac() {
+  if (g_oom_killer_enabled) {
+    return;
+  }
+
+  g_oom_killer_enabled = true;
+
+  // === C malloc/calloc/valloc/realloc/posix_memalign ===
+
+  // This approach is not perfect, as requests for amounts of memory larger than
+  // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
+  // still fail with a NULL rather than dying (see malloc_zone_malloc() in
+  // https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c
+  // for details). Unfortunately, it's the best we can do. Also note that this
+  // does not affect allocations from non-default zones.
+
+#if !defined(ADDRESS_SANITIZER)
+  // Don't do anything special on OOM for the malloc zones replaced by
+  // AddressSanitizer, as modifying or protecting them may not work correctly.
+#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  // The malloc zone backed by PartitionAlloc crashes by default, so there is
+  // no need to install the OOM killer.
+  ChromeMallocZone* default_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  if (!IsMallocZoneAlreadyStored(default_zone)) {
+    StoreZoneFunctions(default_zone, &g_old_zone);
+    MallocZoneFunctions new_functions = {};
+    new_functions.malloc = oom_killer_malloc;
+    new_functions.calloc = oom_killer_calloc;
+    new_functions.valloc = oom_killer_valloc;
+    new_functions.free = oom_killer_free;
+    new_functions.realloc = oom_killer_realloc;
+    new_functions.memalign = oom_killer_memalign;
+
+    ReplaceZoneFunctions(default_zone, &new_functions);
+    g_replaced_default_zone = true;
+  }
+#endif  // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+  ChromeMallocZone* purgeable_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
+  if (purgeable_zone && !IsMallocZoneAlreadyStored(purgeable_zone)) {
+    StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone);
+    MallocZoneFunctions new_functions = {};
+    new_functions.malloc = oom_killer_malloc_purgeable;
+    new_functions.calloc = oom_killer_calloc_purgeable;
+    new_functions.valloc = oom_killer_valloc_purgeable;
+    new_functions.free = oom_killer_free_purgeable;
+    new_functions.realloc = oom_killer_realloc_purgeable;
+    new_functions.memalign = oom_killer_memalign_purgeable;
+    ReplaceZoneFunctions(purgeable_zone, &new_functions);
+  }
+#endif
+
+  // === C malloc_zone_batch_malloc ===
+
+  // batch_malloc is omitted because the default malloc zone's implementation
+  // only supports batch_malloc for "tiny" allocations from the free list. It
+  // will fail for allocations larger than "tiny", and will only allocate as
+  // many blocks as it's able to from the free list. These factors mean that it
+  // can return less than the requested memory even in a non-out-of-memory
+  // situation. There's no good way to detect whether a batch_malloc failure is
+  // due to these other factors, or due to genuine memory or address space
+  // exhaustion. The fact that it only allocates space from the "tiny" free list
+  // means that it's likely that a failure will not be due to memory exhaustion.
+  // Similarly, these constraints on batch_malloc mean that callers must always
+  // be expecting to receive less memory than was requested, even in situations
+  // where memory pressure is not a concern. Finally, the only public interface
+  // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
+  // system's malloc implementation. It's unlikely that anyone's even heard of
+  // it.
+
+#ifndef ADDRESS_SANITIZER
+  // === Core Foundation CFAllocators ===
+
+  // This will not catch allocation done by custom allocators, but will catch
+  // all allocation done by system-provided ones.
+
+  PA_CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
+           !g_old_cfallocator_malloc_zone)
+      << "Old allocators unexpectedly non-null";
+
+  bool cf_allocator_internals_known = CanGetContextForCFAllocator();
+
+  if (cf_allocator_internals_known) {
+    CFAllocatorContext* context =
+        ContextForCFAllocator(kCFAllocatorSystemDefault);
+    PA_CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
+    g_old_cfallocator_system_default = context->allocate;
+    PA_CHECK(g_old_cfallocator_system_default)
+        << "Failed to get kCFAllocatorSystemDefault allocation function.";
+    context->allocate = oom_killer_cfallocator_system_default;
+
+    context = ContextForCFAllocator(kCFAllocatorMalloc);
+    PA_CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
+    g_old_cfallocator_malloc = context->allocate;
+    PA_CHECK(g_old_cfallocator_malloc)
+        << "Failed to get kCFAllocatorMalloc allocation function.";
+    context->allocate = oom_killer_cfallocator_malloc;
+
+    context = ContextForCFAllocator(kCFAllocatorMallocZone);
+    PA_CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
+    g_old_cfallocator_malloc_zone = context->allocate;
+    PA_CHECK(g_old_cfallocator_malloc_zone)
+        << "Failed to get kCFAllocatorMallocZone allocation function.";
+    context->allocate = oom_killer_cfallocator_malloc_zone;
+  }
+#endif
+
+  // === Cocoa NSObject allocation ===
+
+  // Note that both +[NSObject new] and +[NSObject alloc] call through to
+  // +[NSObject allocWithZone:].
+
+  PA_CHECK(!g_old_allocWithZone) << "Old allocator unexpectedly non-null";
+
+  Class nsobject_class = [NSObject class];
+  Method orig_method =
+      class_getClassMethod(nsobject_class, @selector(allocWithZone:));
+  g_old_allocWithZone =
+      reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method));
+  PA_CHECK(g_old_allocWithZone)
+      << "Failed to get allocWithZone allocation function.";
+  method_setImplementation(orig_method,
+                           reinterpret_cast<IMP>(oom_killer_allocWithZone));
+}
+
+void UninterceptMallocZonesForTesting() {
+  UninterceptMallocZoneForTesting(malloc_default_zone());  // IN-TEST
+  vm_address_t* zones;
+  unsigned int count;
+  kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
+  PA_CHECK(kr == KERN_SUCCESS);
+  for (unsigned int i = 0; i < count; ++i) {
+    UninterceptMallocZoneForTesting(  // IN-TEST
+        reinterpret_cast<struct _malloc_zone_t*>(zones[i]));
+  }
+
+  ClearAllMallocZonesForTesting();  // IN-TEST
+}
+
+bool AreMallocZonesIntercepted() {
+  return !g_allocator_shims_failed_to_install;
+}
+
+void ShimNewMallocZones() {
+  StoreFunctionsForAllZones();
+
+  // Use the functions for the default zone as a template to replace those
+  // new zones.
+  ChromeMallocZone* default_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  PA_DCHECK(IsMallocZoneAlreadyStored(default_zone));
+
+  MallocZoneFunctions new_functions;
+  StoreZoneFunctions(default_zone, &new_functions);
+  ReplaceFunctionsForStoredZones(&new_functions);
+}
+
+void ReplaceZoneFunctions(ChromeMallocZone* zone,
+                          const MallocZoneFunctions* functions) {
+  // Remove protection.
+  vm_address_t reprotection_start = 0;
+  vm_size_t reprotection_length = 0;
+  vm_prot_t reprotection_value = VM_PROT_NONE;
+  bool success = DeprotectMallocZone(zone, &reprotection_start,
+                                     &reprotection_length, &reprotection_value);
+  if (!success) {
+    g_allocator_shims_failed_to_install = true;
+    return;
+  }
+
+  PA_CHECK(functions->malloc && functions->calloc && functions->valloc &&
+           functions->free && functions->realloc);
+  zone->malloc = functions->malloc;
+  zone->calloc = functions->calloc;
+  zone->valloc = functions->valloc;
+  zone->free = functions->free;
+  zone->realloc = functions->realloc;
+  if (functions->batch_malloc) {
+    zone->batch_malloc = functions->batch_malloc;
+  }
+  if (functions->batch_free) {
+    zone->batch_free = functions->batch_free;
+  }
+  if (functions->size) {
+    zone->size = functions->size;
+  }
+  if (zone->version >= 5 && functions->memalign) {
+    zone->memalign = functions->memalign;
+  }
+  if (zone->version >= 6 && functions->free_definite_size) {
+    zone->free_definite_size = functions->free_definite_size;
+  }
+  if (zone->version >= 10 && functions->claimed_address) {
+    zone->claimed_address = functions->claimed_address;
+  }
+  if (zone->version >= 13 && functions->try_free_default) {
+    zone->try_free_default = functions->try_free_default;
+  }
+
+  // Cap the version to the max supported to ensure malloc doesn't try to call
+  // functions that weren't replaced.
+#if (__MAC_OS_X_VERSION_MAX_ALLOWED >= 130000) || \
+    (__IPHONE_OS_VERSION_MAX_ALLOWED >= 160100)
+  zone->version = std::min(zone->version, 13U);
+#else
+  zone->version = std::min(zone->version, 12U);
+#endif
+
+  // Restore protection if it was active.
+  if (reprotection_start) {
+    kern_return_t result =
+        vm_protect(mach_task_self(), reprotection_start, reprotection_length,
+                   false, reprotection_value);
+    PA_MACH_DCHECK(result == KERN_SUCCESS, result) << "vm_protect";
+  }
+}
+
+}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple_unittest.mm b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple_unittest.mm
new file mode 100644
index 0000000..44d36a6
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple_unittest.mm
@@ -0,0 +1,65 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h"
+
+#include <mach/mach.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace allocator_shim {
+
+namespace {
+void ResetMallocZone(ChromeMallocZone* zone) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(zone);
+  ReplaceZoneFunctions(zone, &functions);
+}
+
+void ResetAllMallocZones() {
+  ChromeMallocZone* default_malloc_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  ResetMallocZone(default_malloc_zone);
+
+  vm_address_t* zones;
+  unsigned int count;
+  kern_return_t kr = malloc_get_all_zones(mach_task_self(), /*reader=*/nullptr,
+                                          &zones, &count);
+  if (kr != KERN_SUCCESS) {
+    return;
+  }
+  for (unsigned int i = 0; i < count; ++i) {
+    ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
+    ResetMallocZone(zone);
+  }
+}
+}  // namespace
+
+class AllocatorInterceptionTest : public testing::Test {
+ protected:
+  void TearDown() override {
+    ResetAllMallocZones();
+    ClearAllMallocZonesForTesting();
+  }
+};
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+TEST_F(AllocatorInterceptionTest, ShimNewMallocZones) {
+  InitializeAllocatorShim();
+  ChromeMallocZone* default_malloc_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+
+  malloc_zone_t new_zone;
+  memset(&new_zone, 1, sizeof(malloc_zone_t));
+  malloc_zone_register(&new_zone);
+  EXPECT_NE(new_zone.malloc, default_malloc_zone->malloc);
+  ShimNewMallocZones();
+  EXPECT_EQ(new_zone.malloc, default_malloc_zone->malloc);
+
+  malloc_zone_unregister(&new_zone);
+}
+#endif
+
+}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.cc
new file mode 100644
index 0000000..b44a85e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.cc
@@ -0,0 +1,463 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+
+#include <errno.h>
+
+#include <atomic>
+#include <new>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/notreached.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "build/build_config.h"
+
+#if !BUILDFLAG(IS_WIN)
+#include <unistd.h>
+#else
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win.h"
+#endif
+
+#if BUILDFLAG(IS_APPLE)
+#include <malloc/malloc.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/mach_logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h"
+#endif
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
+#endif
+
+// No calls to malloc / new in this file. They would would cause re-entrancy of
+// the shim, which is hard to deal with. Keep this code as simple as possible
+// and don't use any external C++ object here, not even //base ones. Even if
+// they are safe to use today, in future they might be refactored.
+
+namespace {
+
+std::atomic<const allocator_shim::AllocatorDispatch*> g_chain_head{
+    &allocator_shim::AllocatorDispatch::default_dispatch};
+
+bool g_call_new_handler_on_malloc_failure = false;
+
+PA_ALWAYS_INLINE size_t GetCachedPageSize() {
+  static size_t pagesize = 0;
+  if (!pagesize) {
+    pagesize = partition_alloc::internal::base::GetPageSize();
+  }
+  return pagesize;
+}
+
+// Calls the std::new handler thread-safely. Returns true if a new_handler was
+// set and called, false if no new_handler was set.
+bool CallNewHandler(size_t size) {
+#if BUILDFLAG(IS_WIN)
+  return allocator_shim::WinCallNewHandler(size);
+#else
+  std::new_handler nh = std::get_new_handler();
+  if (!nh) {
+    return false;
+  }
+  (*nh)();
+  // Assume the new_handler will abort if it fails. Exception are disabled and
+  // we don't support the case of a new_handler throwing std::bad_balloc.
+  return true;
+#endif
+}
+
+PA_ALWAYS_INLINE const allocator_shim::AllocatorDispatch* GetChainHead() {
+  return g_chain_head.load(std::memory_order_relaxed);
+}
+
+}  // namespace
+
+namespace allocator_shim {
+
+void SetCallNewHandlerOnMallocFailure(bool value) {
+  g_call_new_handler_on_malloc_failure = value;
+}
+
+void* UncheckedAlloc(size_t size) {
+  const AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
+}
+
+void UncheckedFree(void* ptr) {
+  const AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->free_function(chain_head, ptr, nullptr);
+}
+
+void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
+  // Loop in case of (an unlikely) race on setting the list head.
+  size_t kMaxRetries = 7;
+  for (size_t i = 0; i < kMaxRetries; ++i) {
+    const AllocatorDispatch* chain_head = GetChainHead();
+    dispatch->next = chain_head;
+
+    // This function guarantees to be thread-safe w.r.t. concurrent
+    // insertions. It also has to guarantee that all the threads always
+    // see a consistent chain, hence the atomic_thread_fence() below.
+    // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
+    // we don't really want this to be a release-store with a corresponding
+    // acquire-load during malloc().
+    std::atomic_thread_fence(std::memory_order_seq_cst);
+    // Set the chain head to the new dispatch atomically. If we lose the race,
+    // retry.
+    if (g_chain_head.compare_exchange_strong(chain_head, dispatch,
+                                             std::memory_order_relaxed,
+                                             std::memory_order_relaxed)) {
+      // Success.
+      return;
+    }
+  }
+
+  PA_CHECK(false);  // Too many retries, this shouldn't happen.
+}
+
+void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
+  PA_DCHECK(GetChainHead() == dispatch);
+  g_chain_head.store(dispatch->next, std::memory_order_relaxed);
+}
+
+#if BUILDFLAG(IS_APPLE)
+void TryFreeDefaultFallbackToFindZoneAndFree(void* ptr) {
+  unsigned int zone_count = 0;
+  vm_address_t* zones = nullptr;
+  kern_return_t result =
+      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
+  PA_MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
+
+  // "find_zone_and_free" expected by try_free_default.
+  //
+  // libmalloc's zones call find_registered_zone() in case the default one
+  // doesn't handle the allocation. We can't, so we try to emulate it. See the
+  // implementation in libmalloc/src/malloc.c for details.
+  // https://github.com/apple-oss-distributions/libmalloc/blob/main/src/malloc.c
+  for (unsigned int i = 0; i < zone_count; ++i) {
+    malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
+    if (size_t size = zone->size(zone, ptr)) {
+      if (zone->version >= 6 && zone->free_definite_size) {
+        zone->free_definite_size(zone, ptr, size);
+      } else {
+        zone->free(zone, ptr);
+      }
+      return;
+    }
+  }
+
+  // There must be an owner zone.
+  PA_CHECK(false);
+}
+#endif  // BUILDFLAG(IS_APPLE)
+
+}  // namespace allocator_shim
+
+// The Shim* functions below are the entry-points into the shim-layer and
+// are supposed to be invoked by the allocator_shim_override_*
+// headers to route the malloc / new symbols through the shim layer.
+// They are defined as ALWAYS_INLINE in order to remove a level of indirection
+// between the system-defined entry points and the shim implementations.
+extern "C" {
+
+// The general pattern for allocations is:
+// - Try to allocate, if succeded return the pointer.
+// - If the allocation failed:
+//   - Call the std::new_handler if it was a C++ allocation.
+//   - Call the std::new_handler if it was a malloc() (or calloc() or similar)
+//     AND SetCallNewHandlerOnMallocFailure(true).
+//   - If the std::new_handler is NOT set just return nullptr.
+//   - If the std::new_handler is set:
+//     - Assume it will abort() if it fails (very likely the new_handler will
+//       just suicide printing a message).
+//     - Assume it did succeed if it returns, in which case reattempt the alloc.
+
+PA_ALWAYS_INLINE void* ShimCppNew(size_t size) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    void* context = nullptr;
+#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+    context = malloc_default_zone();
+#endif
+    ptr = chain_head->alloc_function(chain_head, size, context);
+  } while (!ptr && CallNewHandler(size));
+  return ptr;
+}
+
+PA_ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
+  void* context = nullptr;
+#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  context = malloc_default_zone();
+#endif
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->alloc_unchecked_function(chain_head, size, context);
+}
+
+PA_ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    void* context = nullptr;
+#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+    context = malloc_default_zone();
+#endif
+    ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
+                                             context);
+  } while (!ptr && CallNewHandler(size));
+  return ptr;
+}
+
+PA_ALWAYS_INLINE void ShimCppDelete(void* address) {
+  void* context = nullptr;
+#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  context = malloc_default_zone();
+#endif
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->free_function(chain_head, address, context);
+}
+
+PA_ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_function(chain_head, size, context);
+  } while (!ptr && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler(size));
+  return ptr;
+}
+
+PA_ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
+                                                      context);
+  } while (!ptr && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler(size));
+  return ptr;
+}
+
+PA_ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
+  // realloc(size == 0) means free() and might return a nullptr. We should
+  // not call the std::new_handler in that case, though.
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->realloc_function(chain_head, address, size, context);
+  } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler(size));
+  return ptr;
+}
+
+PA_ALWAYS_INLINE void* ShimMemalign(size_t alignment,
+                                    size_t size,
+                                    void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
+                                             context);
+  } while (!ptr && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler(size));
+  return ptr;
+}
+
+PA_ALWAYS_INLINE int ShimPosixMemalign(void** res,
+                                       size_t alignment,
+                                       size_t size) {
+  // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
+  // in tc_malloc.cc.
+  if (((alignment % sizeof(void*)) != 0) ||
+      !partition_alloc::internal::base::bits::IsPowerOfTwo(alignment)) {
+    return EINVAL;
+  }
+  void* ptr = ShimMemalign(alignment, size, nullptr);
+  *res = ptr;
+  return ptr ? 0 : ENOMEM;
+}
+
+PA_ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
+  return ShimMemalign(GetCachedPageSize(), size, context);
+}
+
+PA_ALWAYS_INLINE void* ShimPvalloc(size_t size) {
+  // pvalloc(0) should allocate one page, according to its man page.
+  if (size == 0) {
+    size = GetCachedPageSize();
+  } else {
+    size = partition_alloc::internal::base::bits::AlignUp(size,
+                                                          GetCachedPageSize());
+  }
+  // The third argument is nullptr because pvalloc is glibc only and does not
+  // exist on OSX/BSD systems.
+  return ShimMemalign(GetCachedPageSize(), size, nullptr);
+}
+
+PA_ALWAYS_INLINE void ShimFree(void* address, void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->free_function(chain_head, address, context);
+}
+
+PA_ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address,
+                                            void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->get_size_estimate_function(
+      chain_head, const_cast<void*>(address), context);
+}
+
+PA_ALWAYS_INLINE bool ShimClaimedAddress(void* address, void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->claimed_address_function(chain_head, address, context);
+}
+
+PA_ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
+                                          void** results,
+                                          unsigned num_requested,
+                                          void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->batch_malloc_function(chain_head, size, results,
+                                           num_requested, context);
+}
+
+PA_ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
+                                    unsigned num_to_be_freed,
+                                    void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->batch_free_function(chain_head, to_be_freed,
+                                         num_to_be_freed, context);
+}
+
+PA_ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr,
+                                           size_t size,
+                                           void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->free_definite_size_function(chain_head, ptr, size,
+                                                 context);
+}
+
+PA_ALWAYS_INLINE void ShimTryFreeDefault(void* ptr, void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->try_free_default_function(chain_head, ptr, context);
+}
+
+PA_ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
+                                         size_t alignment,
+                                         void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr = nullptr;
+  do {
+    ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
+                                              context);
+  } while (!ptr && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler(size));
+  return ptr;
+}
+
+PA_ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
+                                          size_t size,
+                                          size_t alignment,
+                                          void* context) {
+  // _aligned_realloc(size == 0) means _aligned_free() and might return a
+  // nullptr. We should not call the std::new_handler in that case, though.
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr = nullptr;
+  do {
+    ptr = chain_head->aligned_realloc_function(chain_head, address, size,
+                                               alignment, context);
+  } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler(size));
+  return ptr;
+}
+
+PA_ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
+  const allocator_shim::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->aligned_free_function(chain_head, address, context);
+}
+
+}  // extern "C"
+
+#if !BUILDFLAG(IS_WIN) && \
+    !(BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
+// Cpp symbols (new / delete) should always be routed through the shim layer
+// except on Windows and macOS (except for PartitionAlloc-Everywhere) where the
+// malloc intercept is deep enough that it also catches the cpp calls.
+//
+// In case of PartitionAlloc-Everywhere on macOS, malloc backed by
+// allocator_shim::internal::PartitionMalloc crashes on OOM, and we need to
+// avoid crashes in case of operator new() noexcept.  Thus, operator new()
+// noexcept needs to be routed to
+// allocator_shim::internal::PartitionMallocUnchecked through the shim layer.
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_cpp_symbols.h"
+#endif
+
+#if BUILDFLAG(IS_ANDROID)
+// Android does not support symbol interposition. The way malloc symbols are
+// intercepted on Android is by using link-time -wrap flags.
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_linker_wrapped_symbols.h"
+#elif BUILDFLAG(IS_WIN)
+// On Windows we use plain link-time overriding of the CRT symbols.
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_ucrt_symbols_win.h"
+#elif BUILDFLAG(IS_APPLE)
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_apple_default_zone.h"
+#else  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_apple_symbols.h"
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+#else
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_libc_symbols.h"
+#endif
+
+// Some glibc versions (until commit 6c444ad6e953dbdf9c7be065308a0a777)
+// incorrectly call __libc_memalign() to allocate memory (see elf/dl-tls.c in
+// glibc 2.23 for instance), and free() to free it. This causes issues for us,
+// as we are then asked to free memory we didn't allocate.
+//
+// This only happened in glibc to allocate TLS storage metadata, and there are
+// no other callers of __libc_memalign() there as of September 2020. To work
+// around this issue, intercept this internal libc symbol to make sure that both
+// the allocation and the free() are caught by the shim.
+//
+// This seems fragile, and is, but there is ample precedent for it, making it
+// quite likely to keep working in the future. For instance, LLVM for LSAN uses
+// this mechanism.
+
+#if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_glibc_weak_symbols.h"
+#endif
+
+#if BUILDFLAG(IS_APPLE)
+namespace allocator_shim {
+
+void InitializeAllocatorShim() {
+#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  // Prepares the default dispatch. After the intercepted malloc calls have
+  // traversed the shim this will route them to the default malloc zone.
+  InitializeDefaultDispatchToMacAllocator();
+
+  MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
+
+  // This replaces the default malloc zone, causing calls to malloc & friends
+  // from the codebase to be routed to ShimMalloc() above.
+  ReplaceFunctionsForStoredZones(&functions);
+#endif  // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+}
+
+}  // namespace allocator_shim
+#endif
+
+// Cross-checks.
+
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+#error The allocator shim should not be compiled when building for memory tools.
+#endif
+
+#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
+    (defined(_MSC_VER) && defined(_CPPUNWIND))
+#error This code cannot be used when exceptions are turned on.
+#endif
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h
new file mode 100644
index 0000000..838aa34
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h
@@ -0,0 +1,240 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/types/strong_alias.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#endif
+
+namespace allocator_shim {
+
+// Allocator Shim API. Allows to:
+//  - Configure the behavior of the allocator (what to do on OOM failures).
+//  - Install new hooks (AllocatorDispatch) in the allocator chain.
+
+// When this shim layer is enabled, the route of an allocation is as-follows:
+//
+// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
+//   The override_* headers define the symbols required to intercept calls to
+//   malloc() and operator new (if not overridden by specific C++ classes).
+//
+// [allocator_shim.cc] Routing allocation calls to the shim:
+//   The headers above route the calls to the internal ShimMalloc(), ShimFree(),
+//   ShimCppNew() etc. methods defined in allocator_shim.cc.
+//   These methods will: (1) forward the allocation call to the front of the
+//   AllocatorDispatch chain. (2) perform security hardenings (e.g., might
+//   call std::new_handler on OOM failure).
+//
+// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
+//   It is a singly linked list where each element is a struct with function
+//   pointers (|malloc_function|, |free_function|, etc). Normally the chain
+//   consists of a single AllocatorDispatch element, herein called
+//   the "default dispatch", which is statically defined at build time and
+//   ultimately routes the calls to the actual allocator defined by the build
+//   config (glibc, ...).
+//
+// It is possible to dynamically insert further AllocatorDispatch stages
+// to the front of the chain, for debugging / profiling purposes.
+//
+// All the functions must be thread safe. The shim does not enforce any
+// serialization. This is to route to thread-aware allocators without
+// introducing unnecessary perf hits.
+
+struct AllocatorDispatch {
+  using AllocFn = void*(const AllocatorDispatch* self,
+                        size_t size,
+                        void* context);
+  using AllocUncheckedFn = void*(const AllocatorDispatch* self,
+                                 size_t size,
+                                 void* context);
+  using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
+                                       size_t n,
+                                       size_t size,
+                                       void* context);
+  using AllocAlignedFn = void*(const AllocatorDispatch* self,
+                               size_t alignment,
+                               size_t size,
+                               void* context);
+  using ReallocFn = void*(const AllocatorDispatch* self,
+                          void* address,
+                          size_t size,
+                          void* context);
+  using FreeFn = void(const AllocatorDispatch* self,
+                      void* address,
+                      void* context);
+  // Returns the allocated size of user data (not including heap overhead).
+  // Can be larger than the requested size.
+  using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
+                                   void* address,
+                                   void* context);
+  using ClaimedAddressFn = bool(const AllocatorDispatch* self,
+                                void* address,
+                                void* context);
+  using BatchMallocFn = unsigned(const AllocatorDispatch* self,
+                                 size_t size,
+                                 void** results,
+                                 unsigned num_requested,
+                                 void* context);
+  using BatchFreeFn = void(const AllocatorDispatch* self,
+                           void** to_be_freed,
+                           unsigned num_to_be_freed,
+                           void* context);
+  using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
+                                  void* ptr,
+                                  size_t size,
+                                  void* context);
+  using TryFreeDefaultFn = void(const AllocatorDispatch* self,
+                                void* ptr,
+                                void* context);
+  using AlignedMallocFn = void*(const AllocatorDispatch* self,
+                                size_t size,
+                                size_t alignment,
+                                void* context);
+  using AlignedReallocFn = void*(const AllocatorDispatch* self,
+                                 void* address,
+                                 size_t size,
+                                 size_t alignment,
+                                 void* context);
+  using AlignedFreeFn = void(const AllocatorDispatch* self,
+                             void* address,
+                             void* context);
+
+  AllocFn* const alloc_function;
+  AllocUncheckedFn* const alloc_unchecked_function;
+  AllocZeroInitializedFn* const alloc_zero_initialized_function;
+  AllocAlignedFn* const alloc_aligned_function;
+  ReallocFn* const realloc_function;
+  FreeFn* const free_function;
+  GetSizeEstimateFn* const get_size_estimate_function;
+  // claimed_address, batch_malloc, batch_free, free_definite_size and
+  // try_free_default are specific to the OSX and iOS allocators.
+  ClaimedAddressFn* const claimed_address_function;
+  BatchMallocFn* const batch_malloc_function;
+  BatchFreeFn* const batch_free_function;
+  FreeDefiniteSizeFn* const free_definite_size_function;
+  TryFreeDefaultFn* const try_free_default_function;
+  // _aligned_malloc, _aligned_realloc, and _aligned_free are specific to the
+  // Windows allocator.
+  AlignedMallocFn* const aligned_malloc_function;
+  AlignedReallocFn* const aligned_realloc_function;
+  AlignedFreeFn* const aligned_free_function;
+
+  const AllocatorDispatch* next;
+
+  // |default_dispatch| is statically defined by one (and only one) of the
+  // allocator_shim_default_dispatch_to_*.cc files, depending on the build
+  // configuration.
+  static const AllocatorDispatch default_dispatch;
+};
+
+// When true makes malloc behave like new, w.r.t calling the new_handler if
+// the allocation fails (see set_new_mode() in Windows).
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void SetCallNewHandlerOnMallocFailure(bool value);
+
+// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
+// regardless of SetCallNewHandlerOnMallocFailure().
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void* UncheckedAlloc(size_t size);
+
+// Frees memory allocated with UncheckedAlloc().
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void UncheckedFree(void* ptr);
+
+// Inserts |dispatch| in front of the allocator chain. This method is
+// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
+// The callers have responsibility for inserting a single dispatch no more
+// than once.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
+
+// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
+// removal of arbitrary elements from a singly linked list would require a lock
+// in malloc(), which we really don't want.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
+
+#if BUILDFLAG(IS_APPLE)
+// The fallback function to be called when try_free_default_function receives a
+// pointer which doesn't belong to the allocator.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void TryFreeDefaultFallbackToFindZoneAndFree(void* ptr);
+#endif  // BUILDFLAG(IS_APPLE)
+
+#if BUILDFLAG(IS_APPLE)
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void InitializeDefaultAllocatorPartitionRoot();
+bool IsDefaultAllocatorPartitionRootInitialized();
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+// On macOS, the allocator shim needs to be turned on during runtime.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void InitializeAllocatorShim();
+#endif  // BUILDFLAG(IS_APPLE)
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void EnablePartitionAllocMemoryReclaimer();
+
+using EnableBrp =
+    partition_alloc::internal::base::StrongAlias<class EnableBrpTag, bool>;
+using EnableMemoryTagging =
+    partition_alloc::internal::base::StrongAlias<class EnableMemoryTaggingTag,
+                                                 bool>;
+using SplitMainPartition =
+    partition_alloc::internal::base::StrongAlias<class SplitMainPartitionTag,
+                                                 bool>;
+using UseDedicatedAlignedPartition = partition_alloc::internal::base::
+    StrongAlias<class UseDedicatedAlignedPartitionTag, bool>;
+enum class BucketDistribution : uint8_t { kNeutral, kDenser };
+
+// If |thread_cache_on_non_quarantinable_partition| is specified, the
+// thread-cache will be enabled on the non-quarantinable partition. The
+// thread-cache on the main (malloc) partition will be disabled.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void ConfigurePartitions(
+    EnableBrp enable_brp,
+    EnableMemoryTagging enable_memory_tagging,
+    partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode,
+    SplitMainPartition split_main_partition,
+    UseDedicatedAlignedPartition use_dedicated_aligned_partition,
+    size_t ref_count_size,
+    BucketDistribution distribution,
+    size_t scheduler_loop_quarantine_capacity_in_bytes);
+
+// If |thread_cache_on_non_quarantinable_partition| is specified, the
+// thread-cache will be enabled on the non-quarantinable partition. The
+// thread-cache on the main (malloc) partition will be disabled.
+// This is the deprecated version of ConfigurePartitions, kept for compatibility
+// with pdfium's test setup, see
+// third_party/pdfium/testing/allocator_shim_config.cpp.
+// TODO(crbug.com/1137393): Remove this functions once pdfium has switched to
+// the new version.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void ConfigurePartitions(
+    EnableBrp enable_brp,
+    EnableMemoryTagging enable_memory_tagging,
+    SplitMainPartition split_main_partition,
+    UseDedicatedAlignedPartition use_dedicated_aligned_partition,
+    size_t ref_count_size,
+    BucketDistribution distribution);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) uint32_t GetMainPartitionRootExtrasSize();
+
+#if BUILDFLAG(USE_STARSCAN)
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void EnablePCScan(partition_alloc::internal::PCScan::InitConfig);
+#endif
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+}  // namespace allocator_shim
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_apple_zoned_malloc.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_apple_zoned_malloc.cc
new file mode 100644
index 0000000..27cf53e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_apple_zoned_malloc.cc
@@ -0,0 +1,130 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.h"
+
+namespace allocator_shim {
+namespace {
+
+void* MallocImpl(const AllocatorDispatch*, size_t size, void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.malloc(reinterpret_cast<struct _malloc_zone_t*>(context),
+                          size);
+}
+
+void* CallocImpl(const AllocatorDispatch*,
+                 size_t n,
+                 size_t size,
+                 void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.calloc(reinterpret_cast<struct _malloc_zone_t*>(context), n,
+                          size);
+}
+
+void* MemalignImpl(const AllocatorDispatch*,
+                   size_t alignment,
+                   size_t size,
+                   void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.memalign(reinterpret_cast<struct _malloc_zone_t*>(context),
+                            alignment, size);
+}
+
+void* ReallocImpl(const AllocatorDispatch*,
+                  void* ptr,
+                  size_t size,
+                  void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.realloc(reinterpret_cast<struct _malloc_zone_t*>(context),
+                           ptr, size);
+}
+
+void FreeImpl(const AllocatorDispatch*, void* ptr, void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  functions.free(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
+}
+
+size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr, void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
+}
+
+bool ClaimedAddressImpl(const AllocatorDispatch*, void* ptr, void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  if (functions.claimed_address) {
+    return functions.claimed_address(
+        reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
+  }
+  // If the fast API 'claimed_address' is not implemented in the specified zone,
+  // fall back to 'size' function, which also tells whether the given address
+  // belongs to the zone or not although it'd be slow.
+  return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
+}
+
+unsigned BatchMallocImpl(const AllocatorDispatch* self,
+                         size_t size,
+                         void** results,
+                         unsigned num_requested,
+                         void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.batch_malloc(
+      reinterpret_cast<struct _malloc_zone_t*>(context), size, results,
+      num_requested);
+}
+
+void BatchFreeImpl(const AllocatorDispatch* self,
+                   void** to_be_freed,
+                   unsigned num_to_be_freed,
+                   void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  functions.batch_free(reinterpret_cast<struct _malloc_zone_t*>(context),
+                       to_be_freed, num_to_be_freed);
+}
+
+void FreeDefiniteSizeImpl(const AllocatorDispatch* self,
+                          void* ptr,
+                          size_t size,
+                          void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  functions.free_definite_size(
+      reinterpret_cast<struct _malloc_zone_t*>(context), ptr, size);
+}
+
+void TryFreeDefaultImpl(const AllocatorDispatch* self,
+                        void* ptr,
+                        void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  if (functions.try_free_default) {
+    return functions.try_free_default(
+        reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
+  }
+  allocator_shim::TryFreeDefaultFallbackToFindZoneAndFree(ptr);
+}
+
+}  // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &MallocImpl,           /* alloc_function */
+    &MallocImpl,           /* alloc_unchecked_function */
+    &CallocImpl,           /* alloc_zero_initialized_function */
+    &MemalignImpl,         /* alloc_aligned_function */
+    &ReallocImpl,          /* realloc_function */
+    &FreeImpl,             /* free_function */
+    &GetSizeEstimateImpl,  /* get_size_estimate_function */
+    &ClaimedAddressImpl,   /* claimed_address_function */
+    &BatchMallocImpl,      /* batch_malloc_function */
+    &BatchFreeImpl,        /* batch_free_function */
+    &FreeDefiniteSizeImpl, /* free_definite_size_function */
+    &TryFreeDefaultImpl,   /* try_free_default_function */
+    nullptr,               /* aligned_malloc_function */
+    nullptr,               /* aligned_realloc_function */
+    nullptr,               /* aligned_free_function */
+    nullptr,               /* next */
+};
+
+}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_glibc.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_glibc.cc
new file mode 100644
index 0000000..9a7501d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_glibc.cc
@@ -0,0 +1,129 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+
+#include <dlfcn.h>
+#include <malloc.h>
+
+// This translation unit defines a default dispatch for the allocator shim which
+// routes allocations to libc functions.
+// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
+
+extern "C" {
+void* __libc_malloc(size_t size);
+void* __libc_calloc(size_t n, size_t size);
+void* __libc_realloc(void* address, size_t size);
+void* __libc_memalign(size_t alignment, size_t size);
+void __libc_free(void* ptr);
+}  // extern "C"
+
+namespace {
+
+using allocator_shim::AllocatorDispatch;
+
+// Strictly speaking, it would make more sense to not subtract amything, but
+// other shims limit to something lower than INT_MAX (which is 0x7FFFFFFF on
+// most platforms), and tests expect that.
+constexpr size_t kMaxAllowedSize = std::numeric_limits<int>::max() - (1 << 12);
+
+void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
+  // Cannot force glibc's malloc() to crash when a large size is requested, do
+  // it in the shim instead.
+  if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
+    partition_alloc::TerminateBecauseOutOfMemory(size);
+  }
+
+  return __libc_malloc(size);
+}
+
+void* GlibcUncheckedMalloc(const AllocatorDispatch*,
+                           size_t size,
+                           void* context) {
+  if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
+    return nullptr;
+  }
+
+  return __libc_malloc(size);
+}
+
+void* GlibcCalloc(const AllocatorDispatch*,
+                  size_t n,
+                  size_t size,
+                  void* context) {
+  const auto total = partition_alloc::internal::base::CheckMul(n, size);
+  if (PA_UNLIKELY(!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize)) {
+    partition_alloc::TerminateBecauseOutOfMemory(size * n);
+  }
+
+  return __libc_calloc(n, size);
+}
+
+void* GlibcRealloc(const AllocatorDispatch*,
+                   void* address,
+                   size_t size,
+                   void* context) {
+  if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
+    partition_alloc::TerminateBecauseOutOfMemory(size);
+  }
+
+  return __libc_realloc(address, size);
+}
+
+void* GlibcMemalign(const AllocatorDispatch*,
+                    size_t alignment,
+                    size_t size,
+                    void* context) {
+  if (PA_UNLIKELY(size >= kMaxAllowedSize)) {
+    partition_alloc::TerminateBecauseOutOfMemory(size);
+  }
+
+  return __libc_memalign(alignment, size);
+}
+
+void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
+  __libc_free(address);
+}
+
+PA_NO_SANITIZE("cfi-icall")
+size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
+                            void* address,
+                            void* context) {
+  // glibc does not expose an alias to resolve malloc_usable_size. Dynamically
+  // resolve it instead. This should be safe because glibc (and hence dlfcn)
+  // does not use malloc_size internally and so there should not be a risk of
+  // recursion.
+  using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
+  static MallocUsableSizeFunction fn_ptr =
+      reinterpret_cast<MallocUsableSizeFunction>(
+          dlsym(RTLD_NEXT, "malloc_usable_size"));
+
+  return fn_ptr(address);
+}
+
+}  // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &GlibcMalloc,          /* alloc_function */
+    &GlibcUncheckedMalloc, /* alloc_unchecked_function */
+    &GlibcCalloc,          /* alloc_zero_initialized_function */
+    &GlibcMemalign,        /* alloc_aligned_function */
+    &GlibcRealloc,         /* realloc_function */
+    &GlibcFree,            /* free_function */
+    &GlibcGetSizeEstimate, /* get_size_estimate_function */
+    nullptr,               /* claimed_address */
+    nullptr,               /* batch_malloc_function */
+    nullptr,               /* batch_free_function */
+    nullptr,               /* free_definite_size_function */
+    nullptr,               /* try_free_default_function */
+    nullptr,               /* aligned_malloc_function */
+    nullptr,               /* aligned_realloc_function */
+    nullptr,               /* aligned_free_function */
+    nullptr,               /* next */
+};
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
new file mode 100644
index 0000000..950b932
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
@@ -0,0 +1,86 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <malloc.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+#include "build/build_config.h"
+
+// This translation unit defines a default dispatch for the allocator shim which
+// routes allocations to the original libc functions when using the link-time
+// -Wl,-wrap,malloc approach (see README.md).
+// The __real_X functions here are special symbols that the linker will relocate
+// against the real "X" undefined symbol, so that __real_malloc becomes the
+// equivalent of what an undefined malloc symbol reference would have been.
+// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
+// which routes the __wrap_X functions into the shim.
+
+extern "C" {
+void* __real_malloc(size_t);
+void* __real_calloc(size_t, size_t);
+void* __real_realloc(void*, size_t);
+void* __real_memalign(size_t, size_t);
+void __real_free(void*);
+size_t __real_malloc_usable_size(void*);
+}  // extern "C"
+
+namespace {
+
+using allocator_shim::AllocatorDispatch;
+
+void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
+  return __real_malloc(size);
+}
+
+void* RealCalloc(const AllocatorDispatch*,
+                 size_t n,
+                 size_t size,
+                 void* context) {
+  return __real_calloc(n, size);
+}
+
+void* RealRealloc(const AllocatorDispatch*,
+                  void* address,
+                  size_t size,
+                  void* context) {
+  return __real_realloc(address, size);
+}
+
+void* RealMemalign(const AllocatorDispatch*,
+                   size_t alignment,
+                   size_t size,
+                   void* context) {
+  return __real_memalign(alignment, size);
+}
+
+void RealFree(const AllocatorDispatch*, void* address, void* context) {
+  __real_free(address);
+}
+
+size_t RealSizeEstimate(const AllocatorDispatch*,
+                        void* address,
+                        void* context) {
+  return __real_malloc_usable_size(address);
+}
+
+}  // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &RealMalloc,       /* alloc_function */
+    &RealMalloc,       /* alloc_unchecked_function */
+    &RealCalloc,       /* alloc_zero_initialized_function */
+    &RealMemalign,     /* alloc_aligned_function */
+    &RealRealloc,      /* realloc_function */
+    &RealFree,         /* free_function */
+    &RealSizeEstimate, /* get_size_estimate_function */
+    nullptr,           /* claimed_address */
+    nullptr,           /* batch_malloc_function */
+    nullptr,           /* batch_free_function */
+    nullptr,           /* free_definite_size_function */
+    nullptr,           /* try_free_default_function */
+    nullptr,           /* aligned_malloc_function */
+    nullptr,           /* aligned_realloc_function */
+    nullptr,           /* aligned_free_function */
+    nullptr,           /* next */
+};
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.cc
new file mode 100644
index 0000000..a41096c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.cc
@@ -0,0 +1,829 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
+
+#include <atomic>
+#include <cstddef>
+#include <map>
+#include <string>
+#include <tuple>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/allocation_guard.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/chromecast_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/memory_reclaimer.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_stats.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_internals.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#include <malloc.h>
+#endif
+
+using allocator_shim::AllocatorDispatch;
+
+namespace {
+
+class SimpleScopedSpinLocker {
+ public:
+  explicit SimpleScopedSpinLocker(std::atomic<bool>& lock) : lock_(lock) {
+    // Lock. Semantically equivalent to base::Lock::Acquire().
+    bool expected = false;
+    // Weak CAS since we are in a retry loop, relaxed ordering for failure since
+    // in this case we don't imply any ordering.
+    //
+    // This matches partition_allocator/spinning_mutex.h fast path on Linux.
+    while (!lock_.compare_exchange_weak(
+        expected, true, std::memory_order_acquire, std::memory_order_relaxed)) {
+      expected = false;
+    }
+  }
+
+  ~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); }
+
+ private:
+  std::atomic<bool>& lock_;
+};
+
+// We can't use a "static local" or a base::LazyInstance, as:
+// - static local variables call into the runtime on Windows, which is not
+//   prepared to handle it, as the first allocation happens during CRT init.
+// - We don't want to depend on base::LazyInstance, which may be converted to
+//   static locals one day.
+//
+// Nevertheless, this provides essentially the same thing.
+template <typename T, typename Constructor>
+class LeakySingleton {
+ public:
+  constexpr LeakySingleton() = default;
+
+  PA_ALWAYS_INLINE T* Get() {
+    auto* instance = instance_.load(std::memory_order_acquire);
+    if (PA_LIKELY(instance)) {
+      return instance;
+    }
+
+    return GetSlowPath();
+  }
+
+  // Replaces the instance pointer with a new one.
+  void Replace(T* new_instance) {
+    SimpleScopedSpinLocker scoped_lock{initialization_lock_};
+
+    // Modify under the lock to avoid race between |if (instance)| and
+    // |instance_.store()| in GetSlowPath().
+    instance_.store(new_instance, std::memory_order_release);
+  }
+
+ private:
+  T* GetSlowPath();
+
+  std::atomic<T*> instance_;
+  // Before C++20, having an initializer here causes a "variable does not have a
+  // constant initializer" error.  In C++20, omitting it causes a similar error.
+  // Presumably this is due to the C++20 changes to make atomic initialization
+  // (of the other members of this class) sane, so guarding under that
+  // feature-test.
+#if !defined(__cpp_lib_atomic_value_initialization) || \
+    __cpp_lib_atomic_value_initialization < 201911L
+  alignas(T) uint8_t instance_buffer_[sizeof(T)];
+#else
+  alignas(T) uint8_t instance_buffer_[sizeof(T)] = {0};
+#endif
+  std::atomic<bool> initialization_lock_;
+};
+
+template <typename T, typename Constructor>
+T* LeakySingleton<T, Constructor>::GetSlowPath() {
+  // The instance has not been set, the proper way to proceed (correct
+  // double-checked locking) is:
+  //
+  // auto* instance = instance_.load(std::memory_order_acquire);
+  // if (!instance) {
+  //   ScopedLock initialization_lock;
+  //   root = instance_.load(std::memory_order_relaxed);
+  //   if (root)
+  //     return root;
+  //   instance = Create new root;
+  //   instance_.store(instance, std::memory_order_release);
+  //   return instance;
+  // }
+  //
+  // However, we don't want to use a base::Lock here, so instead we use
+  // compare-and-exchange on a lock variable, which provides the same
+  // guarantees.
+  SimpleScopedSpinLocker scoped_lock{initialization_lock_};
+
+  T* instance = instance_.load(std::memory_order_relaxed);
+  // Someone beat us.
+  if (instance) {
+    return instance;
+  }
+
+  instance = Constructor::New(reinterpret_cast<void*>(instance_buffer_));
+  instance_.store(instance, std::memory_order_release);
+
+  return instance;
+}
+
+class MainPartitionConstructor {
+ public:
+  static partition_alloc::PartitionRoot* New(void* buffer) {
+    constexpr partition_alloc::PartitionOptions::EnableToggle thread_cache =
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+        // Additional partitions may be created in ConfigurePartitions(). Since
+        // only one partition can have thread cache enabled, postpone the
+        // decision to turn the thread cache on until after that call.
+        // TODO(bartekn): Enable it here by default, once the "split-only" mode
+        // is no longer needed.
+        partition_alloc::PartitionOptions::kDisabled;
+#else   // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+        // Other tests, such as the ThreadCache tests create a thread cache,
+        // and only one is supported at a time.
+        partition_alloc::PartitionOptions::kDisabled;
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+    auto* new_root = new (buffer)
+        partition_alloc::PartitionRoot(partition_alloc::PartitionOptions{
+            .aligned_alloc = partition_alloc::PartitionOptions::kAllowed,
+            .thread_cache = thread_cache,
+            .star_scan_quarantine = partition_alloc::PartitionOptions::kAllowed,
+            .backup_ref_ptr = partition_alloc::PartitionOptions::kDisabled,
+        });
+
+    return new_root;
+  }
+};
+
+LeakySingleton<partition_alloc::PartitionRoot, MainPartitionConstructor> g_root
+    PA_CONSTINIT = {};
+partition_alloc::PartitionRoot* Allocator() {
+  return g_root.Get();
+}
+
+// Original g_root_ if it was replaced by ConfigurePartitions().
+std::atomic<partition_alloc::PartitionRoot*> g_original_root(nullptr);
+
+std::atomic<bool> g_roots_finalized = false;
+
+class AlignedPartitionConstructor {
+ public:
+  static partition_alloc::PartitionRoot* New(void* buffer) {
+    return g_root.Get();
+  }
+};
+
+LeakySingleton<partition_alloc::PartitionRoot, AlignedPartitionConstructor>
+    g_aligned_root PA_CONSTINIT = {};
+
+partition_alloc::PartitionRoot* OriginalAllocator() {
+  return g_original_root.load(std::memory_order_relaxed);
+}
+
+partition_alloc::PartitionRoot* AlignedAllocator() {
+  return g_aligned_root.Get();
+}
+
+bool AllocatorConfigurationFinalized() {
+  return g_roots_finalized.load();
+}
+
+void* AllocateAlignedMemory(size_t alignment, size_t size) {
+  // Memory returned by the regular allocator *always* respects |kAlignment|,
+  // which is a power of two, and any valid alignment is also a power of two. So
+  // we can directly fulfill these requests with the main allocator.
+  //
+  // This has several advantages:
+  // - The thread cache is supported on the main partition
+  // - Reduced fragmentation
+  // - Better coverage for MiraclePtr variants requiring extras
+  //
+  // There are several call sites in Chromium where base::AlignedAlloc is called
+  // with a small alignment. Some may be due to overly-careful code, some are
+  // because the client code doesn't know the required alignment at compile
+  // time.
+  //
+  // Note that all "AlignedFree()" variants (_aligned_free() on Windows for
+  // instance) directly call PartitionFree(), so there is no risk of
+  // mismatch. (see below the default_dispatch definition).
+  if (alignment <= partition_alloc::internal::kAlignment) {
+    // This is mandated by |posix_memalign()| and friends, so should never fire.
+    PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
+    // TODO(bartekn): See if the compiler optimizes branches down the stack on
+    // Mac, where PartitionPageSize() isn't constexpr.
+    return Allocator()->AllocInline<partition_alloc::AllocFlags::kNoHooks>(
+        size);
+  }
+
+  return AlignedAllocator()
+      ->AlignedAllocInline<partition_alloc::AllocFlags::kNoHooks>(alignment,
+                                                                  size);
+}
+
+}  // namespace
+
+namespace allocator_shim::internal {
+
+void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
+  partition_alloc::ScopedDisallowAllocations guard{};
+  return Allocator()->AllocInline<partition_alloc::AllocFlags::kNoHooks>(size);
+}
+
+void* PartitionMallocUnchecked(const AllocatorDispatch*,
+                               size_t size,
+                               void* context) {
+  partition_alloc::ScopedDisallowAllocations guard{};
+  return Allocator()
+      ->AllocInline<partition_alloc::AllocFlags::kReturnNull |
+                    partition_alloc::AllocFlags::kNoHooks>(size);
+}
+
+void* PartitionCalloc(const AllocatorDispatch*,
+                      size_t n,
+                      size_t size,
+                      void* context) {
+  partition_alloc::ScopedDisallowAllocations guard{};
+  const size_t total =
+      partition_alloc::internal::base::CheckMul(n, size).ValueOrDie();
+  return Allocator()
+      ->AllocInline<partition_alloc::AllocFlags::kZeroFill |
+                    partition_alloc::AllocFlags::kNoHooks>(total);
+}
+
+void* PartitionMemalign(const AllocatorDispatch*,
+                        size_t alignment,
+                        size_t size,
+                        void* context) {
+  partition_alloc::ScopedDisallowAllocations guard{};
+  return AllocateAlignedMemory(alignment, size);
+}
+
+void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
+                            size_t size,
+                            size_t alignment,
+                            void* context) {
+  partition_alloc::ScopedDisallowAllocations guard{};
+  return AllocateAlignedMemory(alignment, size);
+}
+
+// aligned_realloc documentation is
+// https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc
+// TODO(tasak): Expand the given memory block to the given size if possible.
+// This realloc always free the original memory block and allocates a new memory
+// block.
+// TODO(tasak): Implement PartitionRoot::AlignedRealloc and use it.
+void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
+                              void* address,
+                              size_t size,
+                              size_t alignment,
+                              void* context) {
+  partition_alloc::ScopedDisallowAllocations guard{};
+  void* new_ptr = nullptr;
+  if (size > 0) {
+    new_ptr = AllocateAlignedMemory(alignment, size);
+  } else {
+    // size == 0 and address != null means just "free(address)".
+    if (address) {
+      partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
+          partition_alloc::FreeFlags::kNoHooks>(address);
+    }
+  }
+  // The original memory block (specified by address) is unchanged if ENOMEM.
+  if (!new_ptr) {
+    return nullptr;
+  }
+  // TODO(tasak): Need to compare the new alignment with the address' alignment.
+  // If the two alignments are not the same, need to return nullptr with EINVAL.
+  if (address) {
+    size_t usage = partition_alloc::PartitionRoot::GetUsableSize(address);
+    size_t copy_size = usage > size ? size : usage;
+    memcpy(new_ptr, address, copy_size);
+
+    partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
+        partition_alloc::FreeFlags::kNoHooks>(address);
+  }
+  return new_ptr;
+}
+
+void* PartitionRealloc(const AllocatorDispatch*,
+                       void* address,
+                       size_t size,
+                       void* context) {
+  partition_alloc::ScopedDisallowAllocations guard{};
+#if BUILDFLAG(IS_APPLE)
+  if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
+                      reinterpret_cast<uintptr_t>(address)) &&
+                  address)) {
+    // A memory region allocated by the system allocator is passed in this
+    // function.  Forward the request to `realloc` which supports zone-
+    // dispatching so that it appropriately selects the right zone.
+    return realloc(address, size);
+  }
+#endif  // BUILDFLAG(IS_APPLE)
+
+  return Allocator()->Realloc<partition_alloc::AllocFlags::kNoHooks>(address,
+                                                                     size, "");
+}
+
+#if BUILDFLAG(PA_IS_CAST_ANDROID)
+extern "C" {
+void __real_free(void*);
+}       // extern "C"
+#endif  // BUILDFLAG(PA_IS_CAST_ANDROID)
+
+void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
+  partition_alloc::ScopedDisallowAllocations guard{};
+#if BUILDFLAG(IS_APPLE)
+  // TODO(bartekn): Add MTE unmasking here (and below).
+  if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
+                      reinterpret_cast<uintptr_t>(object)) &&
+                  object)) {
+    // A memory region allocated by the system allocator is passed in this
+    // function.  Forward the request to `free` which supports zone-
+    // dispatching so that it appropriately selects the right zone.
+    return free(object);
+  }
+#endif  // BUILDFLAG(IS_APPLE)
+
+  // On Android Chromecast devices, there is at least one case where a system
+  // malloc() pointer can be passed to PartitionAlloc's free(). If we don't own
+  // the pointer, pass it along. This should not have a runtime cost vs regular
+  // Android, since on Android we have a PA_CHECK() rather than the branch here.
+#if BUILDFLAG(PA_IS_CAST_ANDROID)
+  if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
+                      reinterpret_cast<uintptr_t>(object)) &&
+                  object)) {
+    // A memory region allocated by the system allocator is passed in this
+    // function.  Forward the request to `free()`, which is `__real_free()`
+    // here.
+    return __real_free(object);
+  }
+#endif  // BUILDFLAG(PA_IS_CAST_ANDROID)
+
+  partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
+      partition_alloc::FreeFlags::kNoHooks>(object);
+}
+
+#if BUILDFLAG(IS_APPLE)
+// Normal free() path on Apple OSes:
+// 1. size = GetSizeEstimate(ptr);
+// 2. if (size) FreeDefiniteSize(ptr, size)
+//
+// So we don't need to re-check that the pointer is owned in Free(), and we
+// can use the size.
+void PartitionFreeDefiniteSize(const AllocatorDispatch*,
+                               void* address,
+                               size_t size,
+                               void* context) {
+  partition_alloc::ScopedDisallowAllocations guard{};
+  // TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
+  // still useful though, as we avoid double-checking that the address is owned.
+  partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
+      partition_alloc::FreeFlags::kNoHooks>(address);
+}
+#endif  // BUILDFLAG(IS_APPLE)
+
+size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
+                                void* address,
+                                void* context) {
+  // This is used to implement malloc_usable_size(3). Per its man page, "if ptr
+  // is NULL, 0 is returned".
+  if (!address) {
+    return 0;
+  }
+
+#if BUILDFLAG(IS_APPLE)
+  if (!partition_alloc::IsManagedByPartitionAlloc(
+          reinterpret_cast<uintptr_t>(address))) {
+    // The object pointed to by `address` is not allocated by the
+    // PartitionAlloc.  The return value `0` means that the pointer does not
+    // belong to this malloc zone.
+    return 0;
+  }
+#endif  // BUILDFLAG(IS_APPLE)
+
+  // TODO(lizeb): Returns incorrect values for aligned allocations.
+  const size_t size =
+      partition_alloc::PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(
+          address);
+#if BUILDFLAG(IS_APPLE)
+  // The object pointed to by `address` is allocated by the PartitionAlloc.
+  // So, this function must not return zero so that the malloc zone dispatcher
+  // finds the appropriate malloc zone.
+  PA_DCHECK(size);
+#endif  // BUILDFLAG(IS_APPLE)
+  return size;
+}
+
+#if BUILDFLAG(IS_APPLE)
+bool PartitionClaimedAddress(const AllocatorDispatch*,
+                             void* address,
+                             void* context) {
+  return partition_alloc::IsManagedByPartitionAlloc(
+      reinterpret_cast<uintptr_t>(address));
+}
+#endif  // BUILDFLAG(IS_APPLE)
+
+unsigned PartitionBatchMalloc(const AllocatorDispatch*,
+                              size_t size,
+                              void** results,
+                              unsigned num_requested,
+                              void* context) {
+  // No real batching: we could only acquire the lock once for instance, keep it
+  // simple for now.
+  for (unsigned i = 0; i < num_requested; i++) {
+    // No need to check the results, we crash if it fails.
+    results[i] = PartitionMalloc(nullptr, size, nullptr);
+  }
+
+  // Either all succeeded, or we crashed.
+  return num_requested;
+}
+
+void PartitionBatchFree(const AllocatorDispatch*,
+                        void** to_be_freed,
+                        unsigned num_to_be_freed,
+                        void* context) {
+  // No real batching: we could only acquire the lock once for instance, keep it
+  // simple for now.
+  for (unsigned i = 0; i < num_to_be_freed; i++) {
+    PartitionFree(nullptr, to_be_freed[i], nullptr);
+  }
+}
+
+#if BUILDFLAG(IS_APPLE)
+void PartitionTryFreeDefault(const AllocatorDispatch*,
+                             void* address,
+                             void* context) {
+  partition_alloc::ScopedDisallowAllocations guard{};
+
+  if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
+          reinterpret_cast<uintptr_t>(address)))) {
+    // The object pointed to by `address` is not allocated by the
+    // PartitionAlloc. Call find_zone_and_free.
+    return allocator_shim::TryFreeDefaultFallbackToFindZoneAndFree(address);
+  }
+
+  partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
+      partition_alloc::FreeFlags::kNoHooks>(address);
+}
+#endif  // BUILDFLAG(IS_APPLE)
+
+// static
+bool PartitionAllocMalloc::AllocatorConfigurationFinalized() {
+  return ::AllocatorConfigurationFinalized();
+}
+
+// static
+partition_alloc::PartitionRoot* PartitionAllocMalloc::Allocator() {
+  return ::Allocator();
+}
+
+// static
+partition_alloc::PartitionRoot* PartitionAllocMalloc::OriginalAllocator() {
+  return ::OriginalAllocator();
+}
+
+// static
+partition_alloc::PartitionRoot* PartitionAllocMalloc::AlignedAllocator() {
+  return ::AlignedAllocator();
+}
+
+}  // namespace allocator_shim::internal
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+namespace allocator_shim {
+
+void EnablePartitionAllocMemoryReclaimer() {
+  // Unlike other partitions, Allocator() does not register its PartitionRoot to
+  // the memory reclaimer, because doing so may allocate memory. Thus, the
+  // registration to the memory reclaimer has to be done some time later, when
+  // the main root is fully configured.
+  ::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
+      Allocator());
+
+  // There is only one PartitionAlloc-Everywhere partition at the moment. Any
+  // additional partitions will be created in ConfigurePartitions() and
+  // registered for memory reclaimer there.
+  PA_DCHECK(!AllocatorConfigurationFinalized());
+  PA_DCHECK(OriginalAllocator() == nullptr);
+  PA_DCHECK(AlignedAllocator() == Allocator());
+}
+
+void ConfigurePartitions(
+    EnableBrp enable_brp,
+    EnableMemoryTagging enable_memory_tagging,
+    partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode,
+    SplitMainPartition split_main_partition,
+    UseDedicatedAlignedPartition use_dedicated_aligned_partition,
+    size_t ref_count_size,
+    BucketDistribution distribution,
+    size_t scheduler_loop_quarantine_capacity_in_bytes) {
+  // BRP cannot be enabled without splitting the main partition. Furthermore, in
+  // the "before allocation" mode, it can't be enabled without further splitting
+  // out the aligned partition.
+  PA_CHECK(!enable_brp || split_main_partition);
+#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+  PA_CHECK(!enable_brp || use_dedicated_aligned_partition);
+#endif
+  // Can't split out the aligned partition, without splitting the main one.
+  PA_CHECK(!use_dedicated_aligned_partition || split_main_partition);
+
+  // Calling Get() is actually important, even if the return values weren't
+  // used, because it has a side effect of initializing the variables, if they
+  // weren't already.
+  auto* current_root = g_root.Get();
+  auto* current_aligned_root = g_aligned_root.Get();
+  PA_DCHECK(current_root == current_aligned_root);
+
+  if (!split_main_partition) {
+    switch (distribution) {
+      case BucketDistribution::kNeutral:
+        // We start in the 'default' case.
+        break;
+      case BucketDistribution::kDenser:
+        current_root->SwitchToDenserBucketDistribution();
+        break;
+    }
+    PA_DCHECK(!enable_brp);
+    PA_DCHECK(!use_dedicated_aligned_partition);
+    PA_DCHECK(!current_root->settings.with_thread_cache);
+    PA_CHECK(!g_roots_finalized.exchange(true));  // Ensure configured once.
+    return;
+  }
+
+  // We've been bitten before by using a static local when initializing a
+  // partition. For synchronization, static local variables call into the
+  // runtime on Windows, which may not be ready to handle it, if the path is
+  // invoked on an allocation during the runtime initialization.
+  // ConfigurePartitions() is invoked explicitly from Chromium code, so this
+  // shouldn't bite us here. Mentioning just in case we move this code earlier.
+  static partition_alloc::internal::base::NoDestructor<
+      partition_alloc::PartitionAllocator>
+      new_main_allocator(partition_alloc::PartitionOptions{
+          .aligned_alloc = !use_dedicated_aligned_partition
+                               ? partition_alloc::PartitionOptions::kAllowed
+                               : partition_alloc::PartitionOptions::kDisallowed,
+          .thread_cache = partition_alloc::PartitionOptions::kDisabled,
+          .star_scan_quarantine = partition_alloc::PartitionOptions::kAllowed,
+          .backup_ref_ptr = enable_brp
+                                ? partition_alloc::PartitionOptions::kEnabled
+                                : partition_alloc::PartitionOptions::kDisabled,
+          .ref_count_size = ref_count_size,
+          .scheduler_loop_quarantine_capacity_in_bytes =
+              scheduler_loop_quarantine_capacity_in_bytes,
+          .memory_tagging = {
+              .enabled = enable_memory_tagging
+                             ? partition_alloc::PartitionOptions::kEnabled
+                             : partition_alloc::PartitionOptions::kDisabled,
+              .reporting_mode = memory_tagging_reporting_mode}});
+  partition_alloc::PartitionRoot* new_root = new_main_allocator->root();
+
+  partition_alloc::PartitionRoot* new_aligned_root;
+  if (use_dedicated_aligned_partition) {
+    // TODO(bartekn): Use the original root instead of creating a new one. It'd
+    // result in one less partition, but come at a cost of commingling types.
+    static partition_alloc::internal::base::NoDestructor<
+        partition_alloc::PartitionAllocator>
+        new_aligned_allocator(partition_alloc::PartitionOptions{
+            .aligned_alloc = partition_alloc::PartitionOptions::kAllowed,
+            .thread_cache = partition_alloc::PartitionOptions::kDisabled,
+            .star_scan_quarantine = partition_alloc::PartitionOptions::kAllowed,
+            .backup_ref_ptr = partition_alloc::PartitionOptions::kDisabled,
+        });
+    new_aligned_root = new_aligned_allocator->root();
+  } else {
+    // The new main root can also support AlignedAlloc.
+    new_aligned_root = new_root;
+  }
+
+  // Now switch traffic to the new partitions.
+  g_original_root = current_root;
+  g_aligned_root.Replace(new_aligned_root);
+  g_root.Replace(new_root);
+
+  // No need for g_original_aligned_root, because in cases where g_aligned_root
+  // is replaced, it must've been g_original_root.
+  PA_CHECK(current_aligned_root == g_original_root);
+
+  // Purge memory, now that the traffic to the original partition is cut off.
+  current_root->PurgeMemory(
+      partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |
+      partition_alloc::PurgeFlags::kDiscardUnusedSystemPages);
+
+  switch (distribution) {
+    case BucketDistribution::kNeutral:
+      // We start in the 'default' case.
+      break;
+    case BucketDistribution::kDenser:
+      new_root->SwitchToDenserBucketDistribution();
+      if (new_aligned_root != new_root) {
+        new_aligned_root->SwitchToDenserBucketDistribution();
+      }
+      break;
+  }
+
+  PA_CHECK(!g_roots_finalized.exchange(true));  // Ensure configured once.
+}
+
+// TODO(crbug.com/1137393): Remove this functions once pdfium has switched to
+// the new version.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void ConfigurePartitions(
+    EnableBrp enable_brp,
+    EnableMemoryTagging enable_memory_tagging,
+    SplitMainPartition split_main_partition,
+    UseDedicatedAlignedPartition use_dedicated_aligned_partition,
+    size_t ref_count_size,
+    BucketDistribution distribution) {
+  // Since the only user of this function is a test function, we use synchronous
+  // testing mode.
+  const partition_alloc::TagViolationReportingMode
+      memory_tagging_reporting_mode =
+          enable_memory_tagging
+              ? partition_alloc::TagViolationReportingMode::kSynchronous
+              : partition_alloc::TagViolationReportingMode::kDisabled;
+
+  // We don't use this feature in PDFium.
+  size_t scheduler_loop_quarantine_capacity_in_bytes = 0;
+
+  ConfigurePartitions(
+      enable_brp, enable_memory_tagging, memory_tagging_reporting_mode,
+      split_main_partition, use_dedicated_aligned_partition, ref_count_size,
+      distribution, scheduler_loop_quarantine_capacity_in_bytes);
+}
+
+// No synchronization provided: `PartitionRoot.flags` is only written
+// to in `PartitionRoot::Init()`.
+uint32_t GetMainPartitionRootExtrasSize() {
+#if PA_CONFIG(EXTRAS_REQUIRED)
+  return g_root.Get()->settings.extras_size;
+#else
+  return 0;
+#endif  // PA_CONFIG(EXTRAS_REQUIRED)
+}
+
+#if BUILDFLAG(USE_STARSCAN)
+void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
+  partition_alloc::internal::PCScan::Initialize(config);
+
+  PA_CHECK(AllocatorConfigurationFinalized());
+  partition_alloc::internal::PCScan::RegisterScannableRoot(Allocator());
+  if (OriginalAllocator() != nullptr) {
+    partition_alloc::internal::PCScan::RegisterScannableRoot(
+        OriginalAllocator());
+  }
+  if (Allocator() != AlignedAllocator()) {
+    partition_alloc::internal::PCScan::RegisterScannableRoot(
+        AlignedAllocator());
+  }
+
+  allocator_shim::NonScannableAllocator::Instance().NotifyPCScanEnabled();
+  allocator_shim::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
+}
+#endif  // BUILDFLAG(USE_STARSCAN)
+}  // namespace allocator_shim
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &allocator_shim::internal::PartitionMalloc,  // alloc_function
+    &allocator_shim::internal::
+        PartitionMallocUnchecked,  // alloc_unchecked_function
+    &allocator_shim::internal::
+        PartitionCalloc,  // alloc_zero_initialized_function
+    &allocator_shim::internal::PartitionMemalign,  // alloc_aligned_function
+    &allocator_shim::internal::PartitionRealloc,   // realloc_function
+    &allocator_shim::internal::PartitionFree,      // free_function
+    &allocator_shim::internal::
+        PartitionGetSizeEstimate,  // get_size_estimate_function
+#if BUILDFLAG(IS_APPLE)
+    &allocator_shim::internal::PartitionClaimedAddress,  // claimed_address
+#else
+    nullptr,  // claimed_address
+#endif
+    &allocator_shim::internal::PartitionBatchMalloc,  // batch_malloc_function
+    &allocator_shim::internal::PartitionBatchFree,    // batch_free_function
+#if BUILDFLAG(IS_APPLE)
+    // On Apple OSes, free_definite_size() is always called from free(), since
+    // get_size_estimate() is used to determine whether an allocation belongs to
+    // the current zone. It makes sense to optimize for it.
+    &allocator_shim::internal::PartitionFreeDefiniteSize,
+    // On Apple OSes, try_free_default() is sometimes called as an optimization
+    // of free().
+    &allocator_shim::internal::PartitionTryFreeDefault,
+#else
+    nullptr,  // free_definite_size_function
+    nullptr,  // try_free_default_function
+#endif
+    &allocator_shim::internal::
+        PartitionAlignedAlloc,  // aligned_malloc_function
+    &allocator_shim::internal::
+        PartitionAlignedRealloc,               // aligned_realloc_function
+    &allocator_shim::internal::PartitionFree,  // aligned_free_function
+    nullptr,                                   // next
+};
+
+// Intercept diagnostics symbols as well, even though they are not part of the
+// unified shim layer.
+//
+// TODO(lizeb): Implement the ones that doable.
+
+extern "C" {
+
+#if !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
+
+SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
+
+SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
+  return 0;
+}
+
+#endif  // !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
+
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
+  partition_alloc::SimplePartitionStatsDumper allocator_dumper;
+  Allocator()->DumpStats("malloc", true, &allocator_dumper);
+  // TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
+
+  partition_alloc::SimplePartitionStatsDumper aligned_allocator_dumper;
+  if (AlignedAllocator() != Allocator()) {
+    AlignedAllocator()->DumpStats("posix_memalign", true,
+                                  &aligned_allocator_dumper);
+  }
+
+  // Dump stats for nonscannable and nonquarantinable allocators.
+  auto& nonscannable_allocator =
+      allocator_shim::NonScannableAllocator::Instance();
+  partition_alloc::SimplePartitionStatsDumper nonscannable_allocator_dumper;
+  if (auto* nonscannable_root = nonscannable_allocator.root()) {
+    nonscannable_root->DumpStats("malloc", true,
+                                 &nonscannable_allocator_dumper);
+  }
+  auto& nonquarantinable_allocator =
+      allocator_shim::NonQuarantinableAllocator::Instance();
+  partition_alloc::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
+  if (auto* nonquarantinable_root = nonquarantinable_allocator.root()) {
+    nonquarantinable_root->DumpStats("malloc", true,
+                                     &nonquarantinable_allocator_dumper);
+  }
+
+  struct mallinfo info = {0};
+  info.arena = 0;  // Memory *not* allocated with mmap().
+
+  // Memory allocated with mmap(), aka virtual size.
+  info.hblks =
+      partition_alloc::internal::base::checked_cast<decltype(info.hblks)>(
+          allocator_dumper.stats().total_mmapped_bytes +
+          aligned_allocator_dumper.stats().total_mmapped_bytes +
+          nonscannable_allocator_dumper.stats().total_mmapped_bytes +
+          nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
+  // Resident bytes.
+  info.hblkhd =
+      partition_alloc::internal::base::checked_cast<decltype(info.hblkhd)>(
+          allocator_dumper.stats().total_resident_bytes +
+          aligned_allocator_dumper.stats().total_resident_bytes +
+          nonscannable_allocator_dumper.stats().total_resident_bytes +
+          nonquarantinable_allocator_dumper.stats().total_resident_bytes);
+  // Allocated bytes.
+  info.uordblks =
+      partition_alloc::internal::base::checked_cast<decltype(info.uordblks)>(
+          allocator_dumper.stats().total_active_bytes +
+          aligned_allocator_dumper.stats().total_active_bytes +
+          nonscannable_allocator_dumper.stats().total_active_bytes +
+          nonquarantinable_allocator_dumper.stats().total_active_bytes);
+
+  return info;
+}
+#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+}  // extern "C"
+
+#if BUILDFLAG(IS_APPLE)
+
+namespace allocator_shim {
+
+void InitializeDefaultAllocatorPartitionRoot() {
+  // On OS_APPLE, the initialization of PartitionRoot uses memory allocations
+  // internally, e.g. __builtin_available, and it's not easy to avoid it.
+  // Thus, we initialize the PartitionRoot with using the system default
+  // allocator before we intercept the system default allocator.
+  std::ignore = Allocator();
+}
+
+}  // namespace allocator_shim
+
+#endif  // BUILDFLAG(IS_APPLE)
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h
new file mode 100644
index 0000000..99b88fd
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h
@@ -0,0 +1,76 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+
+namespace allocator_shim::internal {
+
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocMalloc {
+ public:
+  // Returns true if ConfigurePartitions() has completed, meaning that the
+  // allocators are effectively set in stone.
+  static bool AllocatorConfigurationFinalized();
+
+  static partition_alloc::PartitionRoot* Allocator();
+  // May return |nullptr|, will never return the same pointer as |Allocator()|.
+  static partition_alloc::PartitionRoot* OriginalAllocator();
+  // May return the same pointer as |Allocator()|.
+  static partition_alloc::PartitionRoot* AlignedAllocator();
+};
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* PartitionMallocUnchecked(const AllocatorDispatch*,
+                               size_t size,
+                               void* context);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* PartitionCalloc(const AllocatorDispatch*,
+                      size_t n,
+                      size_t size,
+                      void* context);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* PartitionMemalign(const AllocatorDispatch*,
+                        size_t alignment,
+                        size_t size,
+                        void* context);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
+                            size_t size,
+                            size_t alignment,
+                            void* context);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
+                              void* address,
+                              size_t size,
+                              size_t alignment,
+                              void* context);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* PartitionRealloc(const AllocatorDispatch*,
+                       void* address,
+                       size_t size,
+                       void* context);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void PartitionFree(const AllocatorDispatch*, void* object, void* context);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
+                                void* address,
+                                void* context);
+
+}  // namespace allocator_shim::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc
new file mode 100644
index 0000000..0e75848
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc
@@ -0,0 +1,190 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
+
+#include <cstdlib>
+#include <cstring>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#include <malloc.h>
+#endif
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && BUILDFLAG(USE_PARTITION_ALLOC)
+namespace allocator_shim::internal {
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+// Platforms on which we override weak libc symbols.
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+PA_NOINLINE void FreeForTest(void* data) {
+  free(data);
+}
+
+TEST(PartitionAllocAsMalloc, Mallinfo) {
+  // mallinfo was deprecated in glibc 2.33. The Chrome OS device sysroot has
+  // a new-enough glibc, but the Linux one doesn't yet, so we can't switch to
+  // the replacement mallinfo2 yet.
+  // Once we update the Linux sysroot to be new enough, this warning will
+  // start firing on Linux too. At that point, s/mallinfo/mallinfo2/ in this
+  // file and remove the pragma here and and the end of this function.
+#if BUILDFLAG(IS_CHROMEOS)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+  constexpr int kLargeAllocSize = 10 * 1024 * 1024;
+  struct mallinfo before = mallinfo();
+  void* data = malloc(1000);
+  ASSERT_TRUE(data);
+  void* aligned_data;
+  ASSERT_EQ(0, posix_memalign(&aligned_data, 1024, 1000));
+  ASSERT_TRUE(aligned_data);
+  void* direct_mapped_data = malloc(kLargeAllocSize);
+  ASSERT_TRUE(direct_mapped_data);
+  struct mallinfo after_alloc = mallinfo();
+
+  // Something is reported.
+  EXPECT_GT(after_alloc.hblks, 0);
+  EXPECT_GT(after_alloc.hblkhd, 0);
+  EXPECT_GT(after_alloc.uordblks, 0);
+
+  EXPECT_GT(after_alloc.hblks, kLargeAllocSize);
+
+  // malloc() can reuse memory, so sizes are not necessarily changing, which
+  // would mean that we need EXPECT_G*E*() rather than EXPECT_GT().
+  //
+  // However since we allocate direct-mapped memory, this increases the total.
+  EXPECT_GT(after_alloc.hblks, before.hblks);
+  EXPECT_GT(after_alloc.hblkhd, before.hblkhd);
+  EXPECT_GT(after_alloc.uordblks, before.uordblks);
+
+  // a simple malloc() / free() pair can be discarded by the compiler (and is),
+  // making the test fail. It is sufficient to make |FreeForTest()| a
+  // PA_NOINLINE function for the call to not be eliminated, but this is
+  // required.
+  FreeForTest(data);
+  FreeForTest(aligned_data);
+  FreeForTest(direct_mapped_data);
+  struct mallinfo after_free = mallinfo();
+
+  EXPECT_LT(after_free.hblks, after_alloc.hblks);
+  EXPECT_LT(after_free.hblkhd, after_alloc.hblkhd);
+  EXPECT_LT(after_free.uordblks, after_alloc.uordblks);
+#if BUILDFLAG(IS_CHROMEOS)
+#pragma clang diagnostic pop
+#endif
+}
+
+#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+// Note: the tests below are quite simple, they are used as simple smoke tests
+// for PartitionAlloc-Everywhere. Most of these directly dispatch to
+// PartitionAlloc, which has much more extensive tests.
+TEST(PartitionAllocAsMalloc, Simple) {
+  void* data = PartitionMalloc(nullptr, 10, nullptr);
+  EXPECT_TRUE(data);
+  PartitionFree(nullptr, data, nullptr);
+}
+
+TEST(PartitionAllocAsMalloc, MallocUnchecked) {
+  void* data = PartitionMallocUnchecked(nullptr, 10, nullptr);
+  EXPECT_TRUE(data);
+  PartitionFree(nullptr, data, nullptr);
+
+  void* too_large = PartitionMallocUnchecked(nullptr, 4e9, nullptr);
+  EXPECT_FALSE(too_large);  // No crash.
+}
+
+TEST(PartitionAllocAsMalloc, Calloc) {
+  constexpr size_t alloc_size = 100;
+  void* data = PartitionCalloc(nullptr, 1, alloc_size, nullptr);
+  EXPECT_TRUE(data);
+
+  char* zeroes[alloc_size];
+  memset(zeroes, 0, alloc_size);
+
+  EXPECT_EQ(0, memcmp(zeroes, data, alloc_size));
+  PartitionFree(nullptr, data, nullptr);
+}
+
+TEST(PartitionAllocAsMalloc, Memalign) {
+  constexpr size_t alloc_size = 100;
+  constexpr size_t alignment = 1024;
+  void* data = PartitionMemalign(nullptr, alignment, alloc_size, nullptr);
+  EXPECT_TRUE(data);
+  EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(data) % alignment);
+  PartitionFree(nullptr, data, nullptr);
+}
+
+TEST(PartitionAllocAsMalloc, AlignedAlloc) {
+  for (size_t alloc_size : {100, 100000, 10000000}) {
+    for (size_t alignment = 1;
+         alignment <= partition_alloc::kMaxSupportedAlignment;
+         alignment <<= 1) {
+      void* data =
+          PartitionAlignedAlloc(nullptr, alloc_size, alignment, nullptr);
+      EXPECT_TRUE(data);
+      EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(data) % alignment);
+      PartitionFree(nullptr, data, nullptr);
+    }
+  }
+}
+
+TEST(PartitionAllocAsMalloc, AlignedRealloc) {
+  for (size_t alloc_size : {100, 100000, 10000000}) {
+    for (size_t alignment = 1;
+         alignment <= partition_alloc::kMaxSupportedAlignment;
+         alignment <<= 1) {
+      void* data =
+          PartitionAlignedAlloc(nullptr, alloc_size, alignment, nullptr);
+      EXPECT_TRUE(data);
+
+      void* data2 = PartitionAlignedRealloc(nullptr, data, alloc_size,
+                                            alignment, nullptr);
+      EXPECT_TRUE(data2);
+
+      // Aligned realloc always relocates.
+      EXPECT_NE(reinterpret_cast<uintptr_t>(data),
+                reinterpret_cast<uintptr_t>(data2));
+      PartitionFree(nullptr, data2, nullptr);
+    }
+  }
+}
+
+TEST(PartitionAllocAsMalloc, Realloc) {
+  constexpr size_t alloc_size = 100;
+  void* data = PartitionMalloc(nullptr, alloc_size, nullptr);
+  EXPECT_TRUE(data);
+  void* data2 = PartitionMalloc(nullptr, 2 * alloc_size, nullptr);
+  EXPECT_TRUE(data2);
+  EXPECT_NE(data2, data);
+  PartitionFree(nullptr, data2, nullptr);
+}
+
+// crbug.com/1141752
+TEST(PartitionAllocAsMalloc, Alignment) {
+  EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(PartitionAllocMalloc::Allocator()) %
+                    alignof(partition_alloc::PartitionRoot));
+  // This works fine even if nullptr is returned.
+  EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
+                    PartitionAllocMalloc::OriginalAllocator()) %
+                    alignof(partition_alloc::PartitionRoot));
+  EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
+                    PartitionAllocMalloc::AlignedAllocator()) %
+                    alignof(partition_alloc::PartitionRoot));
+}
+
+}  // namespace allocator_shim::internal
+#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) &&
+        // BUILDFLAG(USE_PARTITION_ALLOC)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_winheap.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_winheap.cc
new file mode 100644
index 0000000..8c923cc
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_winheap.cc
@@ -0,0 +1,109 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+
+#include <ostream>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win.h"
+
+namespace {
+
+using allocator_shim::AllocatorDispatch;
+
+void* DefaultWinHeapMallocImpl(const AllocatorDispatch*,
+                               size_t size,
+                               void* context) {
+  return allocator_shim::WinHeapMalloc(size);
+}
+
+void* DefaultWinHeapCallocImpl(const AllocatorDispatch* self,
+                               size_t n,
+                               size_t elem_size,
+                               void* context) {
+  // Overflow check.
+  const size_t size = n * elem_size;
+  if (elem_size != 0 && size / elem_size != n) {
+    return nullptr;
+  }
+
+  void* result = DefaultWinHeapMallocImpl(self, size, context);
+  if (result) {
+    memset(result, 0, size);
+  }
+  return result;
+}
+
+void* DefaultWinHeapMemalignImpl(const AllocatorDispatch* self,
+                                 size_t alignment,
+                                 size_t size,
+                                 void* context) {
+  PA_CHECK(false) << "The windows heap does not support memalign.";
+  return nullptr;
+}
+
+void* DefaultWinHeapReallocImpl(const AllocatorDispatch* self,
+                                void* address,
+                                size_t size,
+                                void* context) {
+  return allocator_shim::WinHeapRealloc(address, size);
+}
+
+void DefaultWinHeapFreeImpl(const AllocatorDispatch*,
+                            void* address,
+                            void* context) {
+  allocator_shim::WinHeapFree(address);
+}
+
+size_t DefaultWinHeapGetSizeEstimateImpl(const AllocatorDispatch*,
+                                         void* address,
+                                         void* context) {
+  return allocator_shim::WinHeapGetSizeEstimate(address);
+}
+
+void* DefaultWinHeapAlignedMallocImpl(const AllocatorDispatch*,
+                                      size_t size,
+                                      size_t alignment,
+                                      void* context) {
+  return allocator_shim::WinHeapAlignedMalloc(size, alignment);
+}
+
+void* DefaultWinHeapAlignedReallocImpl(const AllocatorDispatch*,
+                                       void* ptr,
+                                       size_t size,
+                                       size_t alignment,
+                                       void* context) {
+  return allocator_shim::WinHeapAlignedRealloc(ptr, size, alignment);
+}
+
+void DefaultWinHeapAlignedFreeImpl(const AllocatorDispatch*,
+                                   void* ptr,
+                                   void* context) {
+  allocator_shim::WinHeapAlignedFree(ptr);
+}
+
+}  // namespace
+
+// Guarantee that default_dispatch is compile-time initialized to avoid using
+// it before initialization (allocations before main in release builds with
+// optimizations disabled).
+constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &DefaultWinHeapMallocImpl,
+    &DefaultWinHeapMallocImpl, /* alloc_unchecked_function */
+    &DefaultWinHeapCallocImpl,
+    &DefaultWinHeapMemalignImpl,
+    &DefaultWinHeapReallocImpl,
+    &DefaultWinHeapFreeImpl,
+    &DefaultWinHeapGetSizeEstimateImpl,
+    nullptr, /* claimed_address */
+    nullptr, /* batch_malloc_function */
+    nullptr, /* batch_free_function */
+    nullptr, /* free_definite_size_function */
+    nullptr, /* try_free_default_function */
+    &DefaultWinHeapAlignedMallocImpl,
+    &DefaultWinHeapAlignedReallocImpl,
+    &DefaultWinHeapAlignedFreeImpl,
+    nullptr, /* next */
+};
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_internals.h b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_internals.h
new file mode 100644
index 0000000..2c1f2c6
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_internals.h
@@ -0,0 +1,53 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_INTERNALS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_INTERNALS_H_
+
+#include "build/build_config.h"
+
+#if defined(__GNUC__)
+
+#if BUILDFLAG(IS_POSIX)
+#include <sys/cdefs.h>  // for __THROW
+#endif
+
+#ifndef __THROW   // Not a glibc system
+#ifdef _NOEXCEPT  // LLVM libc++ uses noexcept instead
+#define __THROW _NOEXCEPT
+#else
+#define __THROW
+#endif  // !_NOEXCEPT
+#endif
+
+// Shim layer symbols need to be ALWAYS exported, regardless of component build.
+//
+// If an exported symbol is linked into a DSO, it may be preempted by a
+// definition in the main executable. If this happens to an allocator symbol, it
+// will mean that the DSO will use the main executable's allocator. This is
+// normally relatively harmless -- regular allocations should all use the same
+// allocator, but if the DSO tries to hook the allocator it will not see any
+// allocations.
+//
+// However, if LLVM LTO is enabled, the compiler may inline the shim layer
+// symbols into callers. The end result is that allocator calls in DSOs may use
+// either the main executable's allocator or the DSO's allocator, depending on
+// whether the call was inlined. This is arguably a bug in LLVM caused by its
+// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
+// To work around the bug we use noinline to prevent the symbols from being
+// inlined.
+//
+// In the long run we probably want to avoid linking the allocator bits into
+// DSOs altogether. This will save a little space and stop giving DSOs the false
+// impression that they can hook the allocator.
+#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
+
+#elif BUILDFLAG(IS_WIN)  // __GNUC__
+
+#define __THROW
+#define SHIM_ALWAYS_EXPORT __declspec(noinline)
+
+#endif  // __GNUC__
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_INTERNALS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_apple_default_zone.h b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_apple_default_zone.h
new file mode 100644
index 0000000..f0676a4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_apple_default_zone.h
@@ -0,0 +1,412 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
+
+#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+#error This header must be included iff PartitionAlloc-Everywhere is enabled.
+#endif
+
+#include <string.h>
+
+#include <atomic>
+#include <tuple>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/apple/mach_logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/early_zone_registration_constants.h"
+
+namespace partition_alloc {
+
+// Defined in
+// base/allocator/partition_allocator/src/partition_alloc/partition_root.cc
+void PartitionAllocMallocHookOnBeforeForkInParent();
+void PartitionAllocMallocHookOnAfterForkInParent();
+void PartitionAllocMallocHookOnAfterForkInChild();
+
+}  // namespace partition_alloc
+
+namespace allocator_shim {
+
+namespace {
+
+// malloc_introspection_t's callback functions for our own zone
+
+kern_return_t MallocIntrospectionEnumerator(task_t task,
+                                            void*,
+                                            unsigned type_mask,
+                                            vm_address_t zone_address,
+                                            memory_reader_t reader,
+                                            vm_range_recorder_t recorder) {
+  // Should enumerate all memory regions allocated by this allocator, but not
+  // implemented just because of no use case for now.
+  return KERN_FAILURE;
+}
+
+size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) {
+  return partition_alloc::internal::base::bits::AlignUp(
+      size, partition_alloc::internal::kAlignment);
+}
+
+boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) {
+  // Should check the consistency of the allocator implementing this malloc
+  // zone, but not implemented just because of no use case for now.
+  return true;
+}
+
+void MallocIntrospectionPrint(malloc_zone_t* zone, boolean_t verbose) {
+  // Should print the current states of the zone for debugging / investigation
+  // purpose, but not implemented just because of no use case for now.
+}
+
+void MallocIntrospectionLog(malloc_zone_t* zone, void* address) {
+  // Should enable logging of the activities on the given `address`, but not
+  // implemented just because of no use case for now.
+}
+
+void MallocIntrospectionForceLock(malloc_zone_t* zone) {
+  // Called before fork(2) to acquire the lock.
+  partition_alloc::PartitionAllocMallocHookOnBeforeForkInParent();
+}
+
+void MallocIntrospectionForceUnlock(malloc_zone_t* zone) {
+  // Called in the parent process after fork(2) to release the lock.
+  partition_alloc::PartitionAllocMallocHookOnAfterForkInParent();
+}
+
+void MallocIntrospectionStatistics(malloc_zone_t* zone,
+                                   malloc_statistics_t* stats) {
+  // Should report the memory usage correctly, but not implemented just because
+  // of no use case for now.
+  stats->blocks_in_use = 0;
+  stats->size_in_use = 0;
+  stats->max_size_in_use = 0;  // High water mark of touched memory
+  stats->size_allocated = 0;   // Reserved in memory
+}
+
+boolean_t MallocIntrospectionZoneLocked(malloc_zone_t* zone) {
+  // Should return true if the underlying PartitionRoot is locked, but not
+  // implemented just because this function seems not used effectively.
+  return false;
+}
+
+boolean_t MallocIntrospectionEnableDischargeChecking(malloc_zone_t* zone) {
+  // 'discharge' is not supported.
+  return false;
+}
+
+void MallocIntrospectionDisableDischargeChecking(malloc_zone_t* zone) {
+  // 'discharge' is not supported.
+}
+
+void MallocIntrospectionDischarge(malloc_zone_t* zone, void* memory) {
+  // 'discharge' is not supported.
+}
+
+void MallocIntrospectionEnumerateDischargedPointers(
+    malloc_zone_t* zone,
+    void (^report_discharged)(void* memory, void* info)) {
+  // 'discharge' is not supported.
+}
+
+void MallocIntrospectionReinitLock(malloc_zone_t* zone) {
+  // Called in a child process after fork(2) to re-initialize the lock.
+  partition_alloc::PartitionAllocMallocHookOnAfterForkInChild();
+}
+
+void MallocIntrospectionPrintTask(task_t task,
+                                  unsigned level,
+                                  vm_address_t zone_address,
+                                  memory_reader_t reader,
+                                  print_task_printer_t printer) {
+  // Should print the current states of another process's zone for debugging /
+  // investigation purpose, but not implemented just because of no use case
+  // for now.
+}
+
+void MallocIntrospectionTaskStatistics(task_t task,
+                                       vm_address_t zone_address,
+                                       memory_reader_t reader,
+                                       malloc_statistics_t* stats) {
+  // Should report the memory usage in another process's zone, but not
+  // implemented just because of no use case for now.
+  stats->blocks_in_use = 0;
+  stats->size_in_use = 0;
+  stats->max_size_in_use = 0;  // High water mark of touched memory
+  stats->size_allocated = 0;   // Reserved in memory
+}
+
+// malloc_zone_t's callback functions for our own zone
+
+size_t MallocZoneSize(malloc_zone_t* zone, const void* ptr) {
+  return ShimGetSizeEstimate(ptr, nullptr);
+}
+
+void* MallocZoneMalloc(malloc_zone_t* zone, size_t size) {
+  return ShimMalloc(size, nullptr);
+}
+
+void* MallocZoneCalloc(malloc_zone_t* zone, size_t n, size_t size) {
+  return ShimCalloc(n, size, nullptr);
+}
+
+void* MallocZoneValloc(malloc_zone_t* zone, size_t size) {
+  return ShimValloc(size, nullptr);
+}
+
+void MallocZoneFree(malloc_zone_t* zone, void* ptr) {
+  return ShimFree(ptr, nullptr);
+}
+
+void* MallocZoneRealloc(malloc_zone_t* zone, void* ptr, size_t size) {
+  return ShimRealloc(ptr, size, nullptr);
+}
+
+void MallocZoneDestroy(malloc_zone_t* zone) {
+  // No support to destroy the zone for now.
+}
+
+void* MallocZoneMemalign(malloc_zone_t* zone, size_t alignment, size_t size) {
+  return ShimMemalign(alignment, size, nullptr);
+}
+
+void MallocZoneFreeDefiniteSize(malloc_zone_t* zone, void* ptr, size_t size) {
+  return ShimFreeDefiniteSize(ptr, size, nullptr);
+}
+
+unsigned MallocZoneBatchMalloc(malloc_zone_t* zone,
+                               size_t size,
+                               void** results,
+                               unsigned num_requested) {
+  return ShimBatchMalloc(size, results, num_requested, nullptr);
+}
+
+void MallocZoneBatchFree(malloc_zone_t* zone,
+                         void** to_be_freed,
+                         unsigned num) {
+  return ShimBatchFree(to_be_freed, num, nullptr);
+}
+
+boolean_t MallocZoneClaimedAddress(malloc_zone_t* zone, void* ptr) {
+  return static_cast<boolean_t>(ShimClaimedAddress(ptr, nullptr));
+}
+
+#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
+void MallocZoneTryFreeDefault(malloc_zone_t* zone, void* ptr) {
+  return ShimTryFreeDefault(ptr, nullptr);
+}
+#endif
+
+malloc_introspection_t g_mac_malloc_introspection{};
+malloc_zone_t g_mac_malloc_zone{};
+
+malloc_zone_t* GetDefaultMallocZone() {
+  // malloc_default_zone() does not return... the default zone, but the initial
+  // one. The default one is the first element of the default zone array.
+  unsigned int zone_count = 0;
+  vm_address_t* zones = nullptr;
+  kern_return_t result =
+      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
+  PA_MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
+  return reinterpret_cast<malloc_zone_t*>(zones[0]);
+}
+
+bool IsAlreadyRegistered() {
+  // HACK: This should really only be called once, but it is not.
+  //
+  // This function is a static constructor of its binary. If it is included in a
+  // dynamic library, then the same process may end up executing this code
+  // multiple times, once per library. As a consequence, each new library will
+  // add its own allocator as the default zone. Aside from splitting the heap
+  // further, the main issue arises if/when the last library to be loaded
+  // (dlopen()-ed) gets dlclose()-ed.
+  //
+  // See crbug.com/1271139 for details.
+  //
+  // In this case, subsequent free() will be routed by libmalloc to the deleted
+  // zone (since its code has been unloaded from memory), and crash inside
+  // libsystem's free(). This in practice happens as soon as dlclose() is
+  // called, inside the dynamic linker (dyld).
+  //
+  // Since we are talking about different library, and issues inside the dynamic
+  // linker, we cannot use a global static variable (which would be
+  // per-library), or anything from pthread.
+  //
+  // The solution used here is to check whether the current default zone is
+  // already ours, in which case we are not the first dynamic library here, and
+  // should do nothing. This is racy, and hacky.
+  vm_address_t* zones = nullptr;
+  unsigned int zone_count = 0;
+  // *Not* using malloc_default_zone(), as it seems to be hardcoded to return
+  // something else than the default zone. See the difference between
+  // malloc_default_zone() and inline_malloc_default_zone() in Apple's malloc.c
+  // (in libmalloc).
+  kern_return_t result =
+      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
+  PA_MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
+  // Checking all the zones, in case someone registered their own zone on top of
+  // our own.
+  for (unsigned int i = 0; i < zone_count; i++) {
+    malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
+
+    // strcmp() and not a pointer comparison, as the zone was registered from
+    // another library, the pointers don't match.
+    if (zone->zone_name &&
+        (strcmp(zone->zone_name, kPartitionAllocZoneName) == 0)) {
+      // This zone is provided by PartitionAlloc, so this function has been
+      // called from another library (or the main executable), nothing to do.
+      //
+      // This should be a crash, ideally, but callers do it, so only warn, for
+      // now.
+      PA_RAW_LOG(ERROR,
+                 "Trying to load the allocator multiple times. This is *not* "
+                 "supported.");
+      return true;
+    }
+  }
+
+  return false;
+}
+
+void InitializeZone() {
+  g_mac_malloc_introspection.enumerator = MallocIntrospectionEnumerator;
+  g_mac_malloc_introspection.good_size = MallocIntrospectionGoodSize;
+  g_mac_malloc_introspection.check = MallocIntrospectionCheck;
+  g_mac_malloc_introspection.print = MallocIntrospectionPrint;
+  g_mac_malloc_introspection.log = MallocIntrospectionLog;
+  g_mac_malloc_introspection.force_lock = MallocIntrospectionForceLock;
+  g_mac_malloc_introspection.force_unlock = MallocIntrospectionForceUnlock;
+  g_mac_malloc_introspection.statistics = MallocIntrospectionStatistics;
+  g_mac_malloc_introspection.zone_locked = MallocIntrospectionZoneLocked;
+  g_mac_malloc_introspection.enable_discharge_checking =
+      MallocIntrospectionEnableDischargeChecking;
+  g_mac_malloc_introspection.disable_discharge_checking =
+      MallocIntrospectionDisableDischargeChecking;
+  g_mac_malloc_introspection.discharge = MallocIntrospectionDischarge;
+  g_mac_malloc_introspection.enumerate_discharged_pointers =
+      MallocIntrospectionEnumerateDischargedPointers;
+  g_mac_malloc_introspection.reinit_lock = MallocIntrospectionReinitLock;
+  g_mac_malloc_introspection.print_task = MallocIntrospectionPrintTask;
+  g_mac_malloc_introspection.task_statistics =
+      MallocIntrospectionTaskStatistics;
+  // `version` member indicates which APIs are supported in this zone.
+  //   version >= 5: memalign is supported
+  //   version >= 6: free_definite_size is supported
+  //   version >= 7: introspect's discharge family is supported
+  //   version >= 8: pressure_relief is supported
+  //   version >= 9: introspect.reinit_lock is supported
+  //   version >= 10: claimed_address is supported
+  //   version >= 11: introspect.print_task is supported
+  //   version >= 12: introspect.task_statistics is supported
+  //   version >= 13: try_free_default is supported
+  g_mac_malloc_zone.version = kZoneVersion;
+  g_mac_malloc_zone.zone_name = kPartitionAllocZoneName;
+  g_mac_malloc_zone.introspect = &g_mac_malloc_introspection;
+  g_mac_malloc_zone.size = MallocZoneSize;
+  g_mac_malloc_zone.malloc = MallocZoneMalloc;
+  g_mac_malloc_zone.calloc = MallocZoneCalloc;
+  g_mac_malloc_zone.valloc = MallocZoneValloc;
+  g_mac_malloc_zone.free = MallocZoneFree;
+  g_mac_malloc_zone.realloc = MallocZoneRealloc;
+  g_mac_malloc_zone.destroy = MallocZoneDestroy;
+  g_mac_malloc_zone.batch_malloc = MallocZoneBatchMalloc;
+  g_mac_malloc_zone.batch_free = MallocZoneBatchFree;
+  g_mac_malloc_zone.memalign = MallocZoneMemalign;
+  g_mac_malloc_zone.free_definite_size = MallocZoneFreeDefiniteSize;
+  g_mac_malloc_zone.pressure_relief = nullptr;
+  g_mac_malloc_zone.claimed_address = MallocZoneClaimedAddress;
+#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
+  g_mac_malloc_zone.try_free_default = MallocZoneTryFreeDefault;
+#endif
+}
+
+namespace {
+static std::atomic<bool> g_initialization_is_done;
+}
+
+// Replaces the default malloc zone with our own malloc zone backed by
+// PartitionAlloc.  Since we'd like to make as much code as possible to use our
+// own memory allocator (and reduce bugs caused by mixed use of the system
+// allocator and our own allocator), run the following function
+// `InitializeDefaultAllocatorPartitionRoot` with the highest priority.
+//
+// Note that, despite of the highest priority of the initialization order,
+// [NSThread init] runs before InitializeDefaultMallocZoneWithPartitionAlloc
+// unfortunately and allocates memory with the system allocator.  Plus, the
+// allocated memory will be deallocated with the default zone's `free` at that
+// moment without using a zone dispatcher.  Hence, our own `free` function
+// receives an address allocated by the system allocator.
+__attribute__((constructor(0))) void
+InitializeDefaultMallocZoneWithPartitionAlloc() {
+  if (IsAlreadyRegistered()) {
+    return;
+  }
+
+  // Instantiate the existing regular and purgeable zones in order to make the
+  // existing purgeable zone use the existing regular zone since PartitionAlloc
+  // doesn't support a purgeable zone.
+  std::ignore = malloc_default_zone();
+  std::ignore = malloc_default_purgeable_zone();
+
+  // Initialize the default allocator's PartitionRoot with the existing zone.
+  InitializeDefaultAllocatorPartitionRoot();
+
+  // Create our own malloc zone.
+  InitializeZone();
+
+  malloc_zone_t* system_default_zone = GetDefaultMallocZone();
+  if (strcmp(system_default_zone->zone_name, kDelegatingZoneName) == 0) {
+    // The first zone is our zone, we can unregister it, replacing it with the
+    // new one. This relies on a precise zone setup, done in
+    // |EarlyMallocZoneRegistration()|.
+    malloc_zone_register(&g_mac_malloc_zone);
+    malloc_zone_unregister(system_default_zone);
+    g_initialization_is_done.store(true, std::memory_order_release);
+    return;
+  }
+
+  // Not in the path where the zone was registered early. This is either racy,
+  // or fine if the current process is not hosting multiple threads.
+  //
+  // This path is fine for e.g. most unit tests.
+  //
+  // Make our own zone the default zone.
+  //
+  // Put our own zone at the last position, so that it promotes to the default
+  // zone.  The implementation logic of malloc_zone_unregister is:
+  //   zone_table.swap(unregistered_zone, last_zone);
+  //   zone_table.shrink_size_by_1();
+  malloc_zone_register(&g_mac_malloc_zone);
+  malloc_zone_unregister(system_default_zone);
+  // Between malloc_zone_unregister(system_default_zone) (above) and
+  // malloc_zone_register(system_default_zone) (below), i.e. while absence of
+  // system_default_zone, it's possible that another thread calls free(ptr) and
+  // "no zone found" error is hit, crashing the process.
+  malloc_zone_register(system_default_zone);
+
+  // Confirm that our own zone is now the default zone.
+  PA_CHECK(GetDefaultMallocZone() == &g_mac_malloc_zone);
+  g_initialization_is_done.store(true, std::memory_order_release);
+}
+
+}  // namespace
+
+bool IsDefaultAllocatorPartitionRootInitialized() {
+  // Even though zone registration is not thread-safe, let's not make it worse,
+  // and use acquire/release ordering.
+  return g_initialization_is_done.load(std::memory_order_acquire);
+}
+
+}  // namespace allocator_shim
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_apple_symbols.h b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_apple_symbols.h
new file mode 100644
index 0000000..42ad8be
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_apple_symbols.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_SYMBOLS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_SYMBOLS_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/third_party/apple_apsl/malloc.h"
+
+namespace allocator_shim {
+
+MallocZoneFunctions MallocZoneFunctionsToReplaceDefault() {
+  MallocZoneFunctions new_functions;
+  memset(&new_functions, 0, sizeof(MallocZoneFunctions));
+  new_functions.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
+    return ShimGetSizeEstimate(ptr, zone);
+  };
+  new_functions.claimed_address = [](malloc_zone_t* zone,
+                                     void* ptr) -> boolean_t {
+    return ShimClaimedAddress(ptr, zone);
+  };
+  new_functions.malloc = [](malloc_zone_t* zone, size_t size) -> void* {
+    return ShimMalloc(size, zone);
+  };
+  new_functions.calloc = [](malloc_zone_t* zone, size_t n,
+                            size_t size) -> void* {
+    return ShimCalloc(n, size, zone);
+  };
+  new_functions.valloc = [](malloc_zone_t* zone, size_t size) -> void* {
+    return ShimValloc(size, zone);
+  };
+  new_functions.free = [](malloc_zone_t* zone, void* ptr) {
+    ShimFree(ptr, zone);
+  };
+  new_functions.realloc = [](malloc_zone_t* zone, void* ptr,
+                             size_t size) -> void* {
+    return ShimRealloc(ptr, size, zone);
+  };
+  new_functions.batch_malloc = [](struct _malloc_zone_t* zone, size_t size,
+                                  void** results,
+                                  unsigned num_requested) -> unsigned {
+    return ShimBatchMalloc(size, results, num_requested, zone);
+  };
+  new_functions.batch_free = [](struct _malloc_zone_t* zone, void** to_be_freed,
+                                unsigned num_to_be_freed) -> void {
+    ShimBatchFree(to_be_freed, num_to_be_freed, zone);
+  };
+  new_functions.memalign = [](malloc_zone_t* zone, size_t alignment,
+                              size_t size) -> void* {
+    return ShimMemalign(alignment, size, zone);
+  };
+  new_functions.free_definite_size = [](malloc_zone_t* zone, void* ptr,
+                                        size_t size) {
+    ShimFreeDefiniteSize(ptr, size, zone);
+  };
+  new_functions.try_free_default = [](malloc_zone_t* zone, void* ptr) {
+    ShimTryFreeDefault(ptr, zone);
+  };
+  return new_functions;
+}
+
+}  // namespace allocator_shim
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_SYMBOLS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_cpp_symbols.h b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_cpp_symbols.h
new file mode 100644
index 0000000..5ec1625
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_cpp_symbols.h
@@ -0,0 +1,134 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
+
+// Preempt the default new/delete C++ symbols so they call the shim entry
+// points. This file is strongly inspired by tcmalloc's
+// libc_override_redefine.h.
+
+#include <new>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_internals.h"
+#include "build/build_config.h"
+
+#if !BUILDFLAG(IS_APPLE)
+#define SHIM_CPP_SYMBOLS_EXPORT SHIM_ALWAYS_EXPORT
+#else
+// On Apple OSes, prefer not exporting these symbols (as this reverts to the
+// default behavior, they are still exported in e.g. component builds). This is
+// partly due to intentional limits on exported symbols in the main library, but
+// it is also needless, since no library used on macOS imports these.
+//
+// TODO(lizeb): It may not be necessary anywhere to export these.
+#define SHIM_CPP_SYMBOLS_EXPORT PA_NOINLINE
+#endif
+
+SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size) {
+  return ShimCppNew(size);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size) {
+  return ShimCppNew(size);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size,
+                                           const std::nothrow_t&) __THROW {
+  return ShimCppNewNoThrow(size);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size,
+                                             const std::nothrow_t&) __THROW {
+  return ShimCppNewNoThrow(size);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
+                                             const std::nothrow_t&) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
+                                               const std::nothrow_t&) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p, size_t) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p, size_t) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void* operator new(std::size_t size,
+                                           std::align_val_t alignment) {
+  return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void* operator new(std::size_t size,
+                                           std::align_val_t alignment,
+                                           const std::nothrow_t&) __THROW {
+  return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
+                                             std::align_val_t) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
+                                             std::size_t size,
+                                             std::align_val_t) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
+                                             std::align_val_t,
+                                             const std::nothrow_t&) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void* operator new[](std::size_t size,
+                                             std::align_val_t alignment) {
+  return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void* operator new[](std::size_t size,
+                                             std::align_val_t alignment,
+                                             const std::nothrow_t&) __THROW {
+  return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
+                                               std::align_val_t) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
+                                               std::size_t size,
+                                               std::align_val_t) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
+                                               std::align_val_t,
+                                               const std::nothrow_t&) __THROW {
+  ShimCppDelete(p);
+}
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_glibc_weak_symbols.h b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_glibc_weak_symbols.h
new file mode 100644
index 0000000..b1564c7
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_glibc_weak_symbols.h
@@ -0,0 +1,123 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
+
+// Alias the internal Glibc symbols to the shim entry points.
+// This file is strongly inspired by tcmalloc's libc_override_glibc.h.
+// Effectively this file does two things:
+//  1) Re-define the  __malloc_hook & co symbols. Those symbols are defined as
+//     weak in glibc and are meant to be defined strongly by client processes
+//     to hook calls initiated from within glibc.
+//  2) Re-define Glibc-specific symbols (__libc_malloc). The historical reason
+//     is that in the past (in RedHat 9) we had instances of libraries that were
+//     allocating via malloc() and freeing using __libc_free().
+//     See tcmalloc's libc_override_glibc.h for more context.
+
+#include <features.h>  // for __GLIBC__
+#include <malloc.h>
+#include <unistd.h>
+
+#include <new>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_internals.h"
+
+// __MALLOC_HOOK_VOLATILE not defined in all Glibc headers.
+#if !defined(__MALLOC_HOOK_VOLATILE)
+#define MALLOC_HOOK_MAYBE_VOLATILE /**/
+#else
+#define MALLOC_HOOK_MAYBE_VOLATILE __MALLOC_HOOK_VOLATILE
+#endif
+
+extern "C" {
+
+// 1) Re-define malloc_hook weak symbols.
+namespace {
+
+void* GlibcMallocHook(size_t size, const void* caller) {
+  return ShimMalloc(size, nullptr);
+}
+
+void* GlibcReallocHook(void* ptr, size_t size, const void* caller) {
+  return ShimRealloc(ptr, size, nullptr);
+}
+
+void GlibcFreeHook(void* ptr, const void* caller) {
+  return ShimFree(ptr, nullptr);
+}
+
+void* GlibcMemalignHook(size_t align, size_t size, const void* caller) {
+  return ShimMemalign(align, size, nullptr);
+}
+
+}  // namespace
+
+__attribute__((visibility("default"))) void* (
+    *MALLOC_HOOK_MAYBE_VOLATILE __malloc_hook)(size_t,
+                                               const void*) = &GlibcMallocHook;
+
+__attribute__((visibility("default"))) void* (
+    *MALLOC_HOOK_MAYBE_VOLATILE __realloc_hook)(void*, size_t, const void*) =
+    &GlibcReallocHook;
+
+__attribute__((visibility("default"))) void (
+    *MALLOC_HOOK_MAYBE_VOLATILE __free_hook)(void*,
+                                             const void*) = &GlibcFreeHook;
+
+__attribute__((visibility("default"))) void* (
+    *MALLOC_HOOK_MAYBE_VOLATILE __memalign_hook)(size_t, size_t, const void*) =
+    &GlibcMemalignHook;
+
+// 2) Redefine libc symbols themselves.
+
+SHIM_ALWAYS_EXPORT void* __libc_malloc(size_t size) {
+  return ShimMalloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void __libc_free(void* ptr) {
+  ShimFree(ptr, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __libc_realloc(void* ptr, size_t size) {
+  return ShimRealloc(ptr, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __libc_calloc(size_t n, size_t size) {
+  return ShimCalloc(n, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void __libc_cfree(void* ptr) {
+  return ShimFree(ptr, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __libc_memalign(size_t align, size_t s) {
+  return ShimMemalign(align, s, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __libc_valloc(size_t size) {
+  return ShimValloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __libc_pvalloc(size_t size) {
+  return ShimPvalloc(size);
+}
+
+SHIM_ALWAYS_EXPORT int __posix_memalign(void** r, size_t a, size_t s) {
+  return ShimPosixMemalign(r, a, s);
+}
+
+}  // extern "C"
+
+// Safety check.
+#if !defined(__GLIBC__)
+#error The target platform does not seem to use Glibc. Disable the allocator \
+shim by setting use_allocator_shim=false in GN args.
+#endif
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_libc_symbols.h b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_libc_symbols.h
new file mode 100644
index 0000000..624a446
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_libc_symbols.h
@@ -0,0 +1,92 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Its purpose is to preempt the Libc symbols for malloc/new so they call the
+// shim layer entry points.
+
+#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
+
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_APPLE)
+#include <malloc/malloc.h>
+#else
+#include <malloc.h>
+#endif
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_internals.h"
+
+extern "C" {
+
+// WARNING: Whenever a new function is added there (which, surprisingly enough,
+// happens. For instance glibc 2.33 introduced mallinfo2(), which we don't
+// support... yet?), it MUST be added to build/linux/chrome.map.
+//
+// Otherwise the new symbol is not exported from Chromium's main binary, which
+// is necessary to override libc's weak symbol, which in turn is necessary to
+// intercept calls made by dynamic libraries. See crbug.com/1292206 for such
+// an example.
+
+SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW {
+  return ShimMalloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW {
+  ShimFree(ptr, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW {
+  return ShimRealloc(ptr, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW {
+  return ShimCalloc(n, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW {
+  ShimFree(ptr, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW {
+  return ShimMemalign(align, s, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* aligned_alloc(size_t align, size_t s) __THROW {
+  return ShimMemalign(align, s, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW {
+  return ShimValloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW {
+  return ShimPvalloc(size);
+}
+
+SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW {
+  return ShimPosixMemalign(r, a, s);
+}
+
+SHIM_ALWAYS_EXPORT size_t malloc_size(const void* address) __THROW {
+  return ShimGetSizeEstimate(address, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT size_t malloc_usable_size(void* address) __THROW {
+  return ShimGetSizeEstimate(address, nullptr);
+}
+
+// The default dispatch translation unit has to define also the following
+// symbols (unless they are ultimately routed to the system symbols):
+//   void malloc_stats(void);
+//   int mallopt(int, int);
+//   struct mallinfo mallinfo(void);
+
+}  // extern "C"
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_linker_wrapped_symbols.h b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_linker_wrapped_symbols.h
new file mode 100644
index 0000000..a2413f8
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_linker_wrapped_symbols.h
@@ -0,0 +1,178 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
+
+// This header overrides the __wrap_X symbols when using the link-time
+// -Wl,-wrap,malloc shim-layer approach (see README.md).
+// All references to malloc, free, etc. within the linker unit that gets the
+// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
+// linker as references to __wrap_malloc, __wrap_free, which are defined here.
+
+#include <algorithm>
+#include <cstring>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_internals.h"
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t n, size_t size) {
+  return ShimCalloc(n, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void __wrap_free(void* ptr) {
+  ShimFree(ptr, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t size) {
+  return ShimMalloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t align, size_t size) {
+  return ShimMemalign(align, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void** res,
+                                             size_t align,
+                                             size_t size) {
+  return ShimPosixMemalign(res, align, size);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t size) {
+  return ShimPvalloc(size);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_realloc(void* address, size_t size) {
+  return ShimRealloc(address, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
+  return ShimValloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT size_t __wrap_malloc_usable_size(void* address) {
+  return ShimGetSizeEstimate(address, nullptr);
+}
+
+const size_t kPathMaxSize = 8192;
+static_assert(kPathMaxSize >= PATH_MAX, "");
+
+extern char* __wrap_strdup(const char* str);
+
+// Override <stdlib.h>
+
+extern char* __real_realpath(const char* path, char* resolved_path);
+
+SHIM_ALWAYS_EXPORT char* __wrap_realpath(const char* path,
+                                         char* resolved_path) {
+  if (resolved_path) {
+    return __real_realpath(path, resolved_path);
+  }
+
+  char buffer[kPathMaxSize];
+  if (!__real_realpath(path, buffer)) {
+    return nullptr;
+  }
+  return __wrap_strdup(buffer);
+}
+
+// Override <string.h> functions
+
+SHIM_ALWAYS_EXPORT char* __wrap_strdup(const char* str) {
+  std::size_t length = std::strlen(str) + 1;
+  void* buffer = ShimMalloc(length, nullptr);
+  if (!buffer) {
+    return nullptr;
+  }
+  return reinterpret_cast<char*>(std::memcpy(buffer, str, length));
+}
+
+SHIM_ALWAYS_EXPORT char* __wrap_strndup(const char* str, size_t n) {
+  std::size_t length = std::min(std::strlen(str), n);
+  char* buffer = reinterpret_cast<char*>(ShimMalloc(length + 1, nullptr));
+  if (!buffer) {
+    return nullptr;
+  }
+  std::memcpy(buffer, str, length);
+  buffer[length] = '\0';
+  return buffer;
+}
+
+// Override <unistd.h>
+
+extern char* __real_getcwd(char* buffer, size_t size);
+
+SHIM_ALWAYS_EXPORT char* __wrap_getcwd(char* buffer, size_t size) {
+  if (buffer) {
+    return __real_getcwd(buffer, size);
+  }
+
+  if (!size) {
+    size = kPathMaxSize;
+  }
+  char local_buffer[size];
+  if (!__real_getcwd(local_buffer, size)) {
+    return nullptr;
+  }
+  return __wrap_strdup(local_buffer);
+}
+
+// Override stdio.h
+
+// This is non-standard (_GNU_SOURCE only), but implemented by Bionic on
+// Android, and used by libc++.
+SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
+                                        const char* fmt,
+                                        va_list va_args) {
+  // There are cases where we need to use the list of arguments twice, namely
+  // when the original buffer is too small. It is not allowed to walk the list
+  // twice, so make a copy for the second invocation of vsnprintf().
+  va_list va_args_copy;
+  va_copy(va_args_copy, va_args);
+
+  constexpr int kInitialSize = 128;
+  *strp = static_cast<char*>(
+      malloc(kInitialSize));  // Our malloc() doesn't return nullptr.
+
+  int actual_size = vsnprintf(*strp, kInitialSize, fmt, va_args);
+  if (actual_size < 0) {
+    va_end(va_args_copy);
+    return actual_size;
+  }
+  *strp =
+      static_cast<char*>(realloc(*strp, static_cast<size_t>(actual_size + 1)));
+
+  // Now we know the size. This is not very efficient, but we cannot really do
+  // better without accessing internal libc functions, or reimplementing
+  // *printf().
+  //
+  // This is very lightly used in Chromium in practice, see crbug.com/116558 for
+  // details.
+  if (actual_size >= kInitialSize) {
+    int ret = vsnprintf(*strp, static_cast<size_t>(actual_size + 1), fmt,
+                        va_args_copy);
+    va_end(va_args_copy);
+    return ret;
+  }
+
+  va_end(va_args_copy);
+  return actual_size;
+}
+
+SHIM_ALWAYS_EXPORT int __wrap_asprintf(char** strp, const char* fmt, ...) {
+  va_list va_args;
+  va_start(va_args, fmt);
+  int retval = vasprintf(strp, fmt, va_args);
+  va_end(va_args);
+  return retval;
+}
+
+}  // extern "C"
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_ucrt_symbols_win.h b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_ucrt_symbols_win.h
new file mode 100644
index 0000000..677899f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_override_ucrt_symbols_win.h
@@ -0,0 +1,183 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header defines symbols to override the same functions in the Visual C++
+// CRT implementation.
+
+#ifdef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
+
+#include <malloc.h>
+
+#include <windows.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/checked_math.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_internals.h"
+
+// Even though most C++ allocation operators can be left alone since the
+// interception works at a lower level, these ones should be
+// overridden. Otherwise they redirect to malloc(), which is configured to crash
+// with an OOM in failure cases, such as allocation requests that are too large.
+SHIM_ALWAYS_EXPORT void* operator new(size_t size,
+                                      const std::nothrow_t&) noexcept {
+  return ShimCppNewNoThrow(size);
+}
+
+SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
+                                        const std::nothrow_t&) noexcept {
+  return ShimCppNewNoThrow(size);
+}
+
+extern "C" {
+
+namespace {
+
+int win_new_mode = 0;
+
+}  // namespace
+
+// This function behaves similarly to MSVC's _set_new_mode.
+// If flag is 0 (default), calls to malloc will behave normally.
+// If flag is 1, calls to malloc will behave like calls to new,
+// and the std_new_handler will be invoked on failure.
+// Returns the previous mode.
+//
+// Replaces _set_new_mode in ucrt\heap\new_mode.cpp
+int _set_new_mode(int flag) {
+  // The MS CRT calls this function early on in startup, so this serves as a low
+  // overhead proof that the allocator shim is in place for this process.
+  allocator_shim::g_is_win_shim_layer_initialized = true;
+  int old_mode = win_new_mode;
+  win_new_mode = flag;
+
+  allocator_shim::SetCallNewHandlerOnMallocFailure(win_new_mode != 0);
+
+  return old_mode;
+}
+
+// Replaces _query_new_mode in ucrt\heap\new_mode.cpp
+int _query_new_mode() {
+  return win_new_mode;
+}
+
+// These symbols override the CRT's implementation of the same functions.
+__declspec(restrict) void* malloc(size_t size) {
+  return ShimMalloc(size, nullptr);
+}
+
+void free(void* ptr) {
+  ShimFree(ptr, nullptr);
+}
+
+__declspec(restrict) void* realloc(void* ptr, size_t size) {
+  return ShimRealloc(ptr, size, nullptr);
+}
+
+__declspec(restrict) void* calloc(size_t n, size_t size) {
+  return ShimCalloc(n, size, nullptr);
+}
+
+// _msize() is the Windows equivalent of malloc_size().
+size_t _msize(void* memblock) {
+  return ShimGetSizeEstimate(memblock, nullptr);
+}
+
+__declspec(restrict) void* _aligned_malloc(size_t size, size_t alignment) {
+  return ShimAlignedMalloc(size, alignment, nullptr);
+}
+
+__declspec(restrict) void* _aligned_realloc(void* address,
+                                            size_t size,
+                                            size_t alignment) {
+  return ShimAlignedRealloc(address, size, alignment, nullptr);
+}
+
+void _aligned_free(void* address) {
+  ShimAlignedFree(address, nullptr);
+}
+
+// _recalloc_base is called by CRT internally.
+__declspec(restrict) void* _recalloc_base(void* block,
+                                          size_t count,
+                                          size_t size) {
+  const size_t old_block_size = (block != nullptr) ? _msize(block) : 0;
+  partition_alloc::internal::base::CheckedNumeric<size_t>
+      new_block_size_checked = count;
+  new_block_size_checked *= size;
+  const size_t new_block_size = new_block_size_checked.ValueOrDie();
+
+  void* const new_block = realloc(block, new_block_size);
+
+  if (new_block != nullptr && old_block_size < new_block_size) {
+    memset(static_cast<char*>(new_block) + old_block_size, 0,
+           new_block_size - old_block_size);
+  }
+
+  return new_block;
+}
+
+__declspec(restrict) void* _malloc_base(size_t size) {
+  return malloc(size);
+}
+
+__declspec(restrict) void* _calloc_base(size_t n, size_t size) {
+  return calloc(n, size);
+}
+
+void _free_base(void* block) {
+  free(block);
+}
+
+__declspec(restrict) void* _recalloc(void* block, size_t count, size_t size) {
+  return _recalloc_base(block, count, size);
+}
+
+// The following uncommon _aligned_* routines are not used in Chromium and have
+// been shimmed to immediately crash to ensure that implementations are added if
+// uses are introduced.
+__declspec(restrict) void* _aligned_recalloc(void* address,
+                                             size_t num,
+                                             size_t size,
+                                             size_t alignment) {
+  PA_CHECK(false) << "This routine has not been implemented";
+  __builtin_unreachable();
+}
+
+size_t _aligned_msize(void* address, size_t alignment, size_t offset) {
+  PA_CHECK(false) << "This routine has not been implemented";
+  __builtin_unreachable();
+}
+
+__declspec(restrict) void* _aligned_offset_malloc(size_t size,
+                                                  size_t alignment,
+                                                  size_t offset) {
+  PA_CHECK(false) << "This routine has not been implemented";
+  __builtin_unreachable();
+}
+
+__declspec(restrict) void* _aligned_offset_realloc(void* address,
+                                                   size_t size,
+                                                   size_t alignment,
+                                                   size_t offset) {
+  PA_CHECK(false) << "This routine has not been implemented";
+  __builtin_unreachable();
+}
+
+__declspec(restrict) void* _aligned_offset_recalloc(void* address,
+                                                    size_t num,
+                                                    size_t size,
+                                                    size_t alignment,
+                                                    size_t offset) {
+  PA_CHECK(false) << "This routine has not been implemented";
+  __builtin_unreachable();
+}
+
+}  // extern "C"
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_unittest.cc
new file mode 100644
index 0000000..9bc4056
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_unittest.cc
@@ -0,0 +1,791 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <atomic>
+#include <iomanip>
+#include <memory>
+#include <new>
+#include <sstream>
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/page_size.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <malloc.h>
+#include <windows.h>
+#elif BUILDFLAG(IS_APPLE)
+#include <malloc/malloc.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/third_party/apple_apsl/malloc.h"
+#else
+#include <malloc.h>
+#endif
+
+#if !BUILDFLAG(IS_WIN)
+#include <unistd.h>
+#endif
+
+#if defined(LIBC_GLIBC)
+extern "C" void* __libc_memalign(size_t align, size_t s);
+#endif
+
+namespace allocator_shim {
+namespace {
+
+using testing::_;
+using testing::MockFunction;
+
+// Special sentinel values used for testing GetSizeEstimate() interception.
+const char kTestSizeEstimateData[] = "test_value";
+constexpr void* kTestSizeEstimateAddress = (void*)kTestSizeEstimateData;
+constexpr size_t kTestSizeEstimate = 1234;
+
+class AllocatorShimTest : public testing::Test {
+ public:
+  AllocatorShimTest() : testing::Test() {}
+
+  static size_t Hash(const void* ptr) {
+    return reinterpret_cast<uintptr_t>(ptr) % MaxSizeTracked();
+  }
+
+  static void* MockAlloc(const AllocatorDispatch* self,
+                         size_t size,
+                         void* context) {
+    if (instance_ && size < MaxSizeTracked()) {
+      ++(instance_->allocs_intercepted_by_size[size]);
+    }
+    return self->next->alloc_function(self->next, size, context);
+  }
+
+  static void* MockAllocUnchecked(const AllocatorDispatch* self,
+                                  size_t size,
+                                  void* context) {
+    if (instance_ && size < MaxSizeTracked()) {
+      ++(instance_->allocs_intercepted_by_size[size]);
+    }
+    return self->next->alloc_unchecked_function(self->next, size, context);
+  }
+
+  static void* MockAllocZeroInit(const AllocatorDispatch* self,
+                                 size_t n,
+                                 size_t size,
+                                 void* context) {
+    const size_t real_size = n * size;
+    if (instance_ && real_size < MaxSizeTracked()) {
+      ++(instance_->zero_allocs_intercepted_by_size[real_size]);
+    }
+    return self->next->alloc_zero_initialized_function(self->next, n, size,
+                                                       context);
+  }
+
+  static void* MockAllocAligned(const AllocatorDispatch* self,
+                                size_t alignment,
+                                size_t size,
+                                void* context) {
+    if (instance_) {
+      if (size < MaxSizeTracked()) {
+        ++(instance_->aligned_allocs_intercepted_by_size[size]);
+      }
+      if (alignment < MaxSizeTracked()) {
+        ++(instance_->aligned_allocs_intercepted_by_alignment[alignment]);
+      }
+    }
+    return self->next->alloc_aligned_function(self->next, alignment, size,
+                                              context);
+  }
+
+  static void* MockRealloc(const AllocatorDispatch* self,
+                           void* address,
+                           size_t size,
+                           void* context) {
+    if (instance_) {
+      // Size 0xFEED is a special sentinel for the NewHandlerConcurrency test.
+      // Hitting it for the first time will cause a failure, causing the
+      // invocation of the std::new_handler.
+      if (size == 0xFEED) {
+        thread_local bool did_fail_realloc_0xfeed_once = false;
+        if (!did_fail_realloc_0xfeed_once) {
+          did_fail_realloc_0xfeed_once = true;
+          return nullptr;
+        }
+        return address;
+      }
+
+      if (size < MaxSizeTracked()) {
+        ++(instance_->reallocs_intercepted_by_size[size]);
+      }
+      ++instance_->reallocs_intercepted_by_addr[Hash(address)];
+    }
+    return self->next->realloc_function(self->next, address, size, context);
+  }
+
+  static void MockFree(const AllocatorDispatch* self,
+                       void* address,
+                       void* context) {
+    if (instance_) {
+      ++instance_->frees_intercepted_by_addr[Hash(address)];
+    }
+    self->next->free_function(self->next, address, context);
+  }
+
+  static size_t MockGetSizeEstimate(const AllocatorDispatch* self,
+                                    void* address,
+                                    void* context) {
+    // Special testing values for GetSizeEstimate() interception.
+    if (address == kTestSizeEstimateAddress) {
+      return kTestSizeEstimate;
+    }
+    return self->next->get_size_estimate_function(self->next, address, context);
+  }
+
+  static bool MockClaimedAddress(const AllocatorDispatch* self,
+                                 void* address,
+                                 void* context) {
+    // The same as MockGetSizeEstimate.
+    if (address == kTestSizeEstimateAddress) {
+      return true;
+    }
+    return self->next->claimed_address_function(self->next, address, context);
+  }
+
+  static unsigned MockBatchMalloc(const AllocatorDispatch* self,
+                                  size_t size,
+                                  void** results,
+                                  unsigned num_requested,
+                                  void* context) {
+    if (instance_) {
+      instance_->batch_mallocs_intercepted_by_size[size] =
+          instance_->batch_mallocs_intercepted_by_size[size] + num_requested;
+    }
+    return self->next->batch_malloc_function(self->next, size, results,
+                                             num_requested, context);
+  }
+
+  static void MockBatchFree(const AllocatorDispatch* self,
+                            void** to_be_freed,
+                            unsigned num_to_be_freed,
+                            void* context) {
+    if (instance_) {
+      for (unsigned i = 0; i < num_to_be_freed; ++i) {
+        ++instance_->batch_frees_intercepted_by_addr[Hash(to_be_freed[i])];
+      }
+    }
+    self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
+                                    context);
+  }
+
+  static void MockFreeDefiniteSize(const AllocatorDispatch* self,
+                                   void* ptr,
+                                   size_t size,
+                                   void* context) {
+    if (instance_) {
+      ++instance_->frees_intercepted_by_addr[Hash(ptr)];
+      ++instance_->free_definite_sizes_intercepted_by_size[size];
+    }
+    self->next->free_definite_size_function(self->next, ptr, size, context);
+  }
+
+  static void MockTryFreeDefault(const AllocatorDispatch* self,
+                                 void* ptr,
+                                 void* context) {
+    if (instance_) {
+      ++instance_->frees_intercepted_by_addr[Hash(ptr)];
+    }
+    self->next->try_free_default_function(self->next, ptr, context);
+  }
+
+  static void* MockAlignedMalloc(const AllocatorDispatch* self,
+                                 size_t size,
+                                 size_t alignment,
+                                 void* context) {
+    if (instance_ && size < MaxSizeTracked()) {
+      ++instance_->aligned_mallocs_intercepted_by_size[size];
+    }
+    return self->next->aligned_malloc_function(self->next, size, alignment,
+                                               context);
+  }
+
+  static void* MockAlignedRealloc(const AllocatorDispatch* self,
+                                  void* address,
+                                  size_t size,
+                                  size_t alignment,
+                                  void* context) {
+    if (instance_) {
+      if (size < MaxSizeTracked()) {
+        ++instance_->aligned_reallocs_intercepted_by_size[size];
+      }
+      ++instance_->aligned_reallocs_intercepted_by_addr[Hash(address)];
+    }
+    return self->next->aligned_realloc_function(self->next, address, size,
+                                                alignment, context);
+  }
+
+  static void MockAlignedFree(const AllocatorDispatch* self,
+                              void* address,
+                              void* context) {
+    if (instance_) {
+      ++instance_->aligned_frees_intercepted_by_addr[Hash(address)];
+    }
+    self->next->aligned_free_function(self->next, address, context);
+  }
+
+  static void NewHandler() {
+    if (!instance_) {
+      return;
+    }
+    instance_->num_new_handler_calls.fetch_add(1, std::memory_order_relaxed);
+  }
+
+  int32_t GetNumberOfNewHandlerCalls() {
+    return instance_->num_new_handler_calls.load(std::memory_order_acquire);
+  }
+
+  void SetUp() override {
+    allocs_intercepted_by_size.resize(MaxSizeTracked());
+    zero_allocs_intercepted_by_size.resize(MaxSizeTracked());
+    aligned_allocs_intercepted_by_size.resize(MaxSizeTracked());
+    aligned_allocs_intercepted_by_alignment.resize(MaxSizeTracked());
+    reallocs_intercepted_by_size.resize(MaxSizeTracked());
+    reallocs_intercepted_by_addr.resize(MaxSizeTracked());
+    frees_intercepted_by_addr.resize(MaxSizeTracked());
+    batch_mallocs_intercepted_by_size.resize(MaxSizeTracked());
+    batch_frees_intercepted_by_addr.resize(MaxSizeTracked());
+    free_definite_sizes_intercepted_by_size.resize(MaxSizeTracked());
+    aligned_mallocs_intercepted_by_size.resize(MaxSizeTracked());
+    aligned_reallocs_intercepted_by_size.resize(MaxSizeTracked());
+    aligned_reallocs_intercepted_by_addr.resize(MaxSizeTracked());
+    aligned_frees_intercepted_by_addr.resize(MaxSizeTracked());
+    num_new_handler_calls.store(0, std::memory_order_release);
+    instance_ = this;
+
+#if BUILDFLAG(IS_APPLE)
+    InitializeAllocatorShim();
+#endif
+  }
+
+  void TearDown() override {
+    instance_ = nullptr;
+#if BUILDFLAG(IS_APPLE)
+    UninterceptMallocZonesForTesting();
+#endif
+  }
+
+  static size_t MaxSizeTracked() {
+#if BUILDFLAG(IS_IOS)
+    // TODO(crbug.com/1077271): 64-bit iOS uses a page size that is larger than
+    // SystemPageSize(), causing this test to make larger allocations, relative
+    // to SystemPageSize().
+    return 6 * partition_alloc::internal::SystemPageSize();
+#else
+    return 2 * partition_alloc::internal::SystemPageSize();
+#endif
+  }
+
+ protected:
+  std::vector<size_t> allocs_intercepted_by_size;
+  std::vector<size_t> zero_allocs_intercepted_by_size;
+  std::vector<size_t> aligned_allocs_intercepted_by_size;
+  std::vector<size_t> aligned_allocs_intercepted_by_alignment;
+  std::vector<size_t> reallocs_intercepted_by_size;
+  std::vector<size_t> reallocs_intercepted_by_addr;
+  std::vector<size_t> frees_intercepted_by_addr;
+  std::vector<size_t> batch_mallocs_intercepted_by_size;
+  std::vector<size_t> batch_frees_intercepted_by_addr;
+  std::vector<size_t> free_definite_sizes_intercepted_by_size;
+  std::vector<size_t> aligned_mallocs_intercepted_by_size;
+  std::vector<size_t> aligned_reallocs_intercepted_by_size;
+  std::vector<size_t> aligned_reallocs_intercepted_by_addr;
+  std::vector<size_t> aligned_frees_intercepted_by_addr;
+  std::atomic<uint32_t> num_new_handler_calls;
+
+ private:
+  static AllocatorShimTest* instance_;
+};
+
+struct TestStruct1 {
+  uint32_t ignored;
+  uint8_t ignored_2;
+};
+
+struct TestStruct2 {
+  uint64_t ignored;
+  uint8_t ignored_3;
+};
+
+class ThreadDelegateForNewHandlerTest : public base::PlatformThread::Delegate {
+ public:
+  explicit ThreadDelegateForNewHandlerTest(base::WaitableEvent* event)
+      : event_(event) {}
+
+  void ThreadMain() override {
+    event_->Wait();
+    void* temp = malloc(1);
+    void* res = realloc(temp, 0xFEED);
+    EXPECT_EQ(temp, res);
+  }
+
+ private:
+  base::WaitableEvent* event_;
+};
+
+AllocatorShimTest* AllocatorShimTest::instance_ = nullptr;
+
+AllocatorDispatch g_mock_dispatch = {
+    &AllocatorShimTest::MockAlloc,          /* alloc_function */
+    &AllocatorShimTest::MockAllocUnchecked, /* alloc_unchecked_function */
+    &AllocatorShimTest::MockAllocZeroInit, /* alloc_zero_initialized_function */
+    &AllocatorShimTest::MockAllocAligned,  /* alloc_aligned_function */
+    &AllocatorShimTest::MockRealloc,       /* realloc_function */
+    &AllocatorShimTest::MockFree,          /* free_function */
+    &AllocatorShimTest::MockGetSizeEstimate,  /* get_size_estimate_function */
+    &AllocatorShimTest::MockClaimedAddress,   /* claimed_address_function */
+    &AllocatorShimTest::MockBatchMalloc,      /* batch_malloc_function */
+    &AllocatorShimTest::MockBatchFree,        /* batch_free_function */
+    &AllocatorShimTest::MockFreeDefiniteSize, /* free_definite_size_function */
+    &AllocatorShimTest::MockTryFreeDefault,   /* try_free_default_function */
+    &AllocatorShimTest::MockAlignedMalloc,    /* aligned_malloc_function */
+    &AllocatorShimTest::MockAlignedRealloc,   /* aligned_realloc_function */
+    &AllocatorShimTest::MockAlignedFree,      /* aligned_free_function */
+    nullptr,                                  /* next */
+};
+
+TEST_F(AllocatorShimTest, InterceptLibcSymbols) {
+  InsertAllocatorDispatch(&g_mock_dispatch);
+
+  void* alloc_ptr = malloc(19);
+  ASSERT_NE(nullptr, alloc_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[19], 1u);
+
+  void* zero_alloc_ptr = calloc(2, 23);
+  ASSERT_NE(nullptr, zero_alloc_ptr);
+  ASSERT_GE(zero_allocs_intercepted_by_size[2 * 23], 1u);
+
+#if !BUILDFLAG(IS_WIN)
+  void* posix_memalign_ptr = nullptr;
+  int res = posix_memalign(&posix_memalign_ptr, 256, 59);
+  ASSERT_EQ(0, res);
+  ASSERT_NE(nullptr, posix_memalign_ptr);
+  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(posix_memalign_ptr) % 256);
+  ASSERT_GE(aligned_allocs_intercepted_by_alignment[256], 1u);
+  ASSERT_GE(aligned_allocs_intercepted_by_size[59], 1u);
+
+  // (p)valloc() are not defined on Android. pvalloc() is a GNU extension,
+  // valloc() is not in POSIX.
+#if !BUILDFLAG(IS_ANDROID)
+  const size_t kPageSize = partition_alloc::internal::base::GetPageSize();
+  void* valloc_ptr = valloc(61);
+  ASSERT_NE(nullptr, valloc_ptr);
+  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(valloc_ptr) % kPageSize);
+  ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
+  ASSERT_GE(aligned_allocs_intercepted_by_size[61], 1u);
+#endif  // !BUILDFLAG(IS_ANDROID)
+
+#endif  // !BUILDFLAG(IS_WIN)
+
+#if !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
+  void* memalign_ptr = memalign(128, 53);
+  ASSERT_NE(nullptr, memalign_ptr);
+  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(memalign_ptr) % 128);
+  ASSERT_GE(aligned_allocs_intercepted_by_alignment[128], 1u);
+  ASSERT_GE(aligned_allocs_intercepted_by_size[53], 1u);
+
+#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
+  void* pvalloc_ptr = pvalloc(67);
+  ASSERT_NE(nullptr, pvalloc_ptr);
+  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(pvalloc_ptr) % kPageSize);
+  ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
+  // pvalloc rounds the size up to the next page.
+  ASSERT_GE(aligned_allocs_intercepted_by_size[kPageSize], 1u);
+#endif  // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
+
+#endif  // !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
+
+// See allocator_shim_override_glibc_weak_symbols.h for why we intercept
+// internal libc symbols.
+#if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  void* libc_memalign_ptr = __libc_memalign(512, 56);
+  ASSERT_NE(nullptr, memalign_ptr);
+  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(libc_memalign_ptr) % 512);
+  ASSERT_GE(aligned_allocs_intercepted_by_alignment[512], 1u);
+  ASSERT_GE(aligned_allocs_intercepted_by_size[56], 1u);
+#endif
+
+  char* realloc_ptr = static_cast<char*>(malloc(10));
+  strcpy(realloc_ptr, "foobar");
+  void* old_realloc_ptr = realloc_ptr;
+  realloc_ptr = static_cast<char*>(realloc(realloc_ptr, 73));
+  ASSERT_GE(reallocs_intercepted_by_size[73], 1u);
+  ASSERT_GE(reallocs_intercepted_by_addr[Hash(old_realloc_ptr)], 1u);
+  ASSERT_EQ(0, strcmp(realloc_ptr, "foobar"));
+
+  free(alloc_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(alloc_ptr)], 1u);
+
+  free(zero_alloc_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(zero_alloc_ptr)], 1u);
+
+#if !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
+  free(memalign_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u);
+
+#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
+  free(pvalloc_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(pvalloc_ptr)], 1u);
+#endif  // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
+
+#endif  // !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
+
+#if !BUILDFLAG(IS_WIN)
+  free(posix_memalign_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(posix_memalign_ptr)], 1u);
+
+#if !BUILDFLAG(IS_ANDROID)
+  free(valloc_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(valloc_ptr)], 1u);
+#endif  // !BUILDFLAG(IS_ANDROID)
+
+#endif  // !BUILDFLAG(IS_WIN)
+
+#if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  free(libc_memalign_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u);
+#endif
+
+  free(realloc_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(realloc_ptr)], 1u);
+
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+
+  void* non_hooked_ptr = malloc(4095);
+  ASSERT_NE(nullptr, non_hooked_ptr);
+  ASSERT_EQ(0u, allocs_intercepted_by_size[4095]);
+  free(non_hooked_ptr);
+}
+
+// PartitionAlloc-Everywhere does not support batch_malloc / batch_free.
+#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+TEST_F(AllocatorShimTest, InterceptLibcSymbolsBatchMallocFree) {
+  InsertAllocatorDispatch(&g_mock_dispatch);
+
+  unsigned count = 13;
+  std::vector<void*> results;
+  results.resize(count);
+  unsigned result_count = malloc_zone_batch_malloc(malloc_default_zone(), 99,
+                                                   results.data(), count);
+  ASSERT_EQ(count, result_count);
+
+  // TODO(erikchen): On macOS 10.12+, batch_malloc in the default zone may
+  // forward to another zone, which we've also shimmed, resulting in
+  // MockBatchMalloc getting called twice as often as we'd expect. This
+  // re-entrancy into the allocator shim is a bug that needs to be fixed.
+  // https://crbug.com/693237.
+  // ASSERT_EQ(count, batch_mallocs_intercepted_by_size[99]);
+
+  std::vector<void*> results_copy(results);
+  malloc_zone_batch_free(malloc_default_zone(), results.data(), count);
+  for (void* result : results_copy) {
+    ASSERT_GE(batch_frees_intercepted_by_addr[Hash(result)], 1u);
+  }
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+}
+
+TEST_F(AllocatorShimTest, InterceptLibcSymbolsFreeDefiniteSize) {
+  InsertAllocatorDispatch(&g_mock_dispatch);
+
+  void* alloc_ptr = malloc(19);
+  ASSERT_NE(nullptr, alloc_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[19], 1u);
+
+  ChromeMallocZone* default_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  default_zone->free_definite_size(malloc_default_zone(), alloc_ptr, 19);
+  ASSERT_GE(free_definite_sizes_intercepted_by_size[19], 1u);
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+}
+#endif  // BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+#if BUILDFLAG(IS_WIN)
+TEST_F(AllocatorShimTest, InterceptUcrtAlignedAllocationSymbols) {
+  InsertAllocatorDispatch(&g_mock_dispatch);
+
+  constexpr size_t kAlignment = 32;
+  void* alloc_ptr = _aligned_malloc(123, kAlignment);
+  EXPECT_GE(aligned_mallocs_intercepted_by_size[123], 1u);
+
+  void* new_alloc_ptr = _aligned_realloc(alloc_ptr, 1234, kAlignment);
+  EXPECT_GE(aligned_reallocs_intercepted_by_size[1234], 1u);
+  EXPECT_GE(aligned_reallocs_intercepted_by_addr[Hash(alloc_ptr)], 1u);
+
+  _aligned_free(new_alloc_ptr);
+  EXPECT_GE(aligned_frees_intercepted_by_addr[Hash(new_alloc_ptr)], 1u);
+
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+}
+
+TEST_F(AllocatorShimTest, AlignedReallocSizeZeroFrees) {
+  void* alloc_ptr = _aligned_malloc(123, 16);
+  ASSERT_TRUE(alloc_ptr);
+  alloc_ptr = _aligned_realloc(alloc_ptr, 0, 16);
+  ASSERT_TRUE(!alloc_ptr);
+}
+#endif  // BUILDFLAG(IS_WIN)
+
+TEST_F(AllocatorShimTest, InterceptCppSymbols) {
+  InsertAllocatorDispatch(&g_mock_dispatch);
+
+  TestStruct1* new_ptr = new TestStruct1;
+  ASSERT_NE(nullptr, new_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1)], 1u);
+
+  TestStruct1* new_array_ptr = new TestStruct1[3];
+  ASSERT_NE(nullptr, new_array_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1) * 3], 1u);
+
+  TestStruct2* new_nt_ptr = new (std::nothrow) TestStruct2;
+  ASSERT_NE(nullptr, new_nt_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct2)], 1u);
+
+  TestStruct2* new_array_nt_ptr = new TestStruct2[3];
+  ASSERT_NE(nullptr, new_array_nt_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct2) * 3], 1u);
+
+  delete new_ptr;
+  ASSERT_GE(frees_intercepted_by_addr[Hash(new_ptr)], 1u);
+
+  delete[] new_array_ptr;
+  ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_ptr)], 1u);
+
+  delete new_nt_ptr;
+  ASSERT_GE(frees_intercepted_by_addr[Hash(new_nt_ptr)], 1u);
+
+  delete[] new_array_nt_ptr;
+  ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_nt_ptr)], 1u);
+
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+}
+
+// PartitionAlloc disallows large allocations to avoid errors with int
+// overflows.
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+struct TooLarge {
+  char padding1[1UL << 31];
+  int padding2;
+};
+
+TEST_F(AllocatorShimTest, NewNoThrowTooLarge) {
+  char* too_large_array = new (std::nothrow) char[(1UL << 31) + 100];
+  EXPECT_EQ(nullptr, too_large_array);
+
+  TooLarge* too_large_struct = new (std::nothrow) TooLarge;
+  EXPECT_EQ(nullptr, too_large_struct);
+}
+#endif
+
+// This test exercises the case of concurrent OOM failure, which would end up
+// invoking std::new_handler concurrently. This is to cover the CallNewHandler()
+// paths of allocator_shim.cc and smoke-test its thread safey.
+// The test creates kNumThreads threads. Each of them mallocs some memory, and
+// then does a realloc(<new memory>, 0xFEED).
+// The shim intercepts such realloc and makes it fail only once on each thread.
+// We expect to see excactly kNumThreads invocations of the new_handler.
+TEST_F(AllocatorShimTest, NewHandlerConcurrency) {
+  const int kNumThreads = 32;
+  base::PlatformThreadHandle threads[kNumThreads];
+
+  // The WaitableEvent here is used to attempt to trigger all the threads at
+  // the same time, after they have been initialized.
+  base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+                            base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+  ThreadDelegateForNewHandlerTest mock_thread_main(&event);
+
+  for (auto& thread : threads) {
+    base::PlatformThread::Create(0, &mock_thread_main, &thread);
+  }
+
+  std::set_new_handler(&AllocatorShimTest::NewHandler);
+  SetCallNewHandlerOnMallocFailure(true);  // It's going to fail on realloc().
+  InsertAllocatorDispatch(&g_mock_dispatch);
+  event.Signal();
+  for (auto& thread : threads) {
+    base::PlatformThread::Join(thread);
+  }
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+  ASSERT_EQ(kNumThreads, GetNumberOfNewHandlerCalls());
+}
+
+#if BUILDFLAG(IS_WIN)
+TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) {
+  ASSERT_EQ(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle()));
+}
+#endif  // BUILDFLAG(IS_WIN)
+
+#if BUILDFLAG(IS_WIN)
+static size_t GetUsableSize(void* ptr) {
+  return _msize(ptr);
+}
+#elif BUILDFLAG(IS_APPLE)
+static size_t GetUsableSize(void* ptr) {
+  return malloc_size(ptr);
+}
+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
+static size_t GetUsableSize(void* ptr) {
+  return malloc_usable_size(ptr);
+}
+#else
+#define NO_MALLOC_SIZE
+#endif
+
+#if !defined(NO_MALLOC_SIZE)
+TEST_F(AllocatorShimTest, ShimReplacesMallocSizeWhenEnabled) {
+  InsertAllocatorDispatch(&g_mock_dispatch);
+  EXPECT_EQ(GetUsableSize(kTestSizeEstimateAddress), kTestSizeEstimate);
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+}
+
+TEST_F(AllocatorShimTest, ShimDoesntChangeMallocSizeWhenEnabled) {
+  void* alloc = malloc(16);
+  size_t sz = GetUsableSize(alloc);
+  EXPECT_GE(sz, 16U);
+
+  InsertAllocatorDispatch(&g_mock_dispatch);
+  EXPECT_EQ(GetUsableSize(alloc), sz);
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+
+  free(alloc);
+}
+#endif  // !defined(NO_MALLOC_SIZE)
+
+#if BUILDFLAG(IS_ANDROID)
+TEST_F(AllocatorShimTest, InterceptCLibraryFunctions) {
+  auto total_counts = [](const std::vector<size_t>& counts) {
+    size_t total = 0;
+    for (const auto count : counts) {
+      total += count;
+    }
+    return total;
+  };
+  size_t counts_before;
+  size_t counts_after = total_counts(allocs_intercepted_by_size);
+  void* ptr;
+
+  InsertAllocatorDispatch(&g_mock_dispatch);
+
+  // <stdlib.h>
+  counts_before = counts_after;
+  ptr = realpath(".", nullptr);
+  EXPECT_NE(nullptr, ptr);
+  free(ptr);
+  counts_after = total_counts(allocs_intercepted_by_size);
+  EXPECT_GT(counts_after, counts_before);
+
+  // <string.h>
+  counts_before = counts_after;
+  ptr = strdup("hello, world");
+  EXPECT_NE(nullptr, ptr);
+  free(ptr);
+  counts_after = total_counts(allocs_intercepted_by_size);
+  EXPECT_GT(counts_after, counts_before);
+
+  counts_before = counts_after;
+  ptr = strndup("hello, world", 5);
+  EXPECT_NE(nullptr, ptr);
+  free(ptr);
+  counts_after = total_counts(allocs_intercepted_by_size);
+  EXPECT_GT(counts_after, counts_before);
+
+  // <unistd.h>
+  counts_before = counts_after;
+  ptr = getcwd(nullptr, 0);
+  EXPECT_NE(nullptr, ptr);
+  free(ptr);
+  counts_after = total_counts(allocs_intercepted_by_size);
+  EXPECT_GT(counts_after, counts_before);
+
+  // With component builds on Android, we cannot intercept calls to functions
+  // inside another component, in this instance the call to vasprintf() inside
+  // libc++. This is not necessarily an issue for allocator shims, as long as we
+  // accept that allocations and deallocations will not be matched at all times.
+  // It is however essential for PartitionAlloc, which is exercized in the test
+  // below.
+#ifndef COMPONENT_BUILD
+  // Calls vasprintf() indirectly, see below.
+  counts_before = counts_after;
+  std::stringstream stream;
+  stream << std::setprecision(1) << std::showpoint << std::fixed << 1.e38;
+  EXPECT_GT(stream.str().size(), 30u);
+  counts_after = total_counts(allocs_intercepted_by_size);
+  EXPECT_GT(counts_after, counts_before);
+#endif  // COMPONENT_BUILD
+
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+}
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+// Non-regression test for crbug.com/1166558.
+TEST_F(AllocatorShimTest, InterceptVasprintf) {
+  // Printing a float which expands to >=30 characters calls vasprintf() in
+  // libc, which we should intercept.
+  std::stringstream stream;
+  stream << std::setprecision(1) << std::showpoint << std::fixed << 1.e38;
+  EXPECT_GT(stream.str().size(), 30u);
+  // Should not crash.
+}
+
+TEST_F(AllocatorShimTest, InterceptLongVasprintf) {
+  char* str = nullptr;
+  const char* lorem_ipsum =
+      "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed non risus. "
+      "Suspendisse lectus tortor, dignissim sit amet, adipiscing nec, "
+      "ultricies sed, dolor. Cras elementum ultrices diam. Maecenas ligula "
+      "massa, varius a, semper congue, euismod non, mi. Proin porttitor, orci "
+      "nec nonummy molestie, enim est eleifend mi, non fermentum diam nisl sit "
+      "amet erat. Duis semper. Duis arcu massa, scelerisque vitae, consequat "
+      "in, pretium a, enim. Pellentesque congue. Ut in risus volutpat libero "
+      "pharetra tempor. Cras vestibulum bibendum augue. Praesent egestas leo "
+      "in pede. Praesent blandit odio eu enim. Pellentesque sed dui ut augue "
+      "blandit sodales. Vestibulum ante ipsum primis in faucibus orci luctus "
+      "et ultrices posuere cubilia Curae; Aliquam nibh. Mauris ac mauris sed "
+      "pede pellentesque fermentum. Maecenas adipiscing ante non diam sodales "
+      "hendrerit.";
+  int err = asprintf(&str, "%s", lorem_ipsum);
+  EXPECT_EQ(err, static_cast<int>(strlen(lorem_ipsum)));
+  EXPECT_TRUE(str);
+  free(str);
+}
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+#endif  // BUILDFLAG(IS_ANDROID)
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_APPLE)
+
+// Non-regression test for crbug.com/1291885.
+TEST_F(AllocatorShimTest, BatchMalloc) {
+  constexpr unsigned kNumToAllocate = 20;
+  void* pointers[kNumToAllocate];
+
+  EXPECT_EQ(kNumToAllocate, malloc_zone_batch_malloc(malloc_default_zone(), 10,
+                                                     pointers, kNumToAllocate));
+  malloc_zone_batch_free(malloc_default_zone(), pointers, kNumToAllocate);
+  // Should not crash.
+}
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_APPLE)
+
+}  // namespace
+}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/early_zone_registration_constants.h b/base/allocator/partition_allocator/src/partition_alloc/shim/early_zone_registration_constants.h
new file mode 100644
index 0000000..a296eaa
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/early_zone_registration_constants.h
@@ -0,0 +1,33 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_EARLY_ZONE_REGISTRATION_CONSTANTS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_EARLY_ZONE_REGISTRATION_CONSTANTS_H_
+
+// This is an Apple-only file, used to register PartitionAlloc's zone *before*
+// the process becomes multi-threaded. These constants are shared between the
+// allocator shim which installs the PartitionAlloc's malloc zone and the
+// application which installs the "early malloc zone" to reserve the zone slot.
+
+namespace allocator_shim {
+
+static constexpr char kDelegatingZoneName[] =
+    "DelegatingDefaultZoneForPartitionAlloc";
+static constexpr char kPartitionAllocZoneName[] = "PartitionAlloc";
+
+// Zone version. Determines which callbacks are set in the various malloc_zone_t
+// structs.
+#if (__MAC_OS_X_VERSION_MAX_ALLOWED >= 130000) || \
+    (__IPHONE_OS_VERSION_MAX_ALLOWED >= 160100)
+#define PA_TRY_FREE_DEFAULT_IS_AVAILABLE 1
+#endif
+#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
+constexpr int kZoneVersion = 13;
+#else
+constexpr int kZoneVersion = 9;
+#endif
+
+}  // namespace allocator_shim
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_EARLY_ZONE_REGISTRATION_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.cc
new file mode 100644
index 0000000..1c786b1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.cc
@@ -0,0 +1,125 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.h"
+
+#include <atomic>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+
+namespace allocator_shim {
+
+MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
+static_assert(std::is_pod_v<MallocZoneFunctions>,
+              "MallocZoneFunctions must be POD");
+
+void StoreZoneFunctions(const ChromeMallocZone* zone,
+                        MallocZoneFunctions* functions) {
+  memset(functions, 0, sizeof(MallocZoneFunctions));
+  functions->malloc = zone->malloc;
+  functions->calloc = zone->calloc;
+  functions->valloc = zone->valloc;
+  functions->free = zone->free;
+  functions->realloc = zone->realloc;
+  functions->size = zone->size;
+  PA_BASE_CHECK(functions->malloc && functions->calloc && functions->valloc &&
+                functions->free && functions->realloc && functions->size);
+
+  // These functions might be nullptr.
+  functions->batch_malloc = zone->batch_malloc;
+  functions->batch_free = zone->batch_free;
+
+  if (zone->version >= 5) {
+    // Not all custom malloc zones have a memalign.
+    functions->memalign = zone->memalign;
+  }
+  if (zone->version >= 6) {
+    // This may be nullptr.
+    functions->free_definite_size = zone->free_definite_size;
+  }
+  if (zone->version >= 10) {
+    functions->claimed_address = zone->claimed_address;
+  }
+  if (zone->version >= 13) {
+    functions->try_free_default = zone->try_free_default;
+  }
+
+  // Note that zone version 8 introduced a pressure relief callback, and version
+  // 10 introduced a claimed address callback, but neither are allocation or
+  // deallocation callbacks and so aren't important to intercept.
+
+  functions->context = zone;
+}
+
+namespace {
+
+// All modifications to g_malloc_zones are gated behind this lock.
+// Dispatch to a malloc zone does not need to acquire this lock.
+partition_alloc::internal::Lock& GetLock() {
+  static partition_alloc::internal::Lock s_lock;
+  return s_lock;
+}
+
+void EnsureMallocZonesInitializedLocked() {
+  GetLock().AssertAcquired();
+}
+
+int g_zone_count = 0;
+
+bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) {
+  EnsureMallocZonesInitializedLocked();
+  for (int i = 0; i < g_zone_count; ++i) {
+    if (g_malloc_zones[i].context == reinterpret_cast<void*>(zone)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+}  // namespace
+
+bool StoreMallocZone(ChromeMallocZone* zone) {
+  partition_alloc::internal::ScopedGuard guard(GetLock());
+  if (IsMallocZoneAlreadyStoredLocked(zone)) {
+    return false;
+  }
+
+  if (g_zone_count == kMaxZoneCount) {
+    return false;
+  }
+
+  StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]);
+  ++g_zone_count;
+
+  // No other thread can possibly see these stores at this point. The code that
+  // reads these values is triggered after this function returns. so we want to
+  // guarantee that they are committed at this stage"
+  std::atomic_thread_fence(std::memory_order_seq_cst);
+  return true;
+}
+
+bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
+  partition_alloc::internal::ScopedGuard guard(GetLock());
+  return IsMallocZoneAlreadyStoredLocked(zone);
+}
+
+bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
+                                 const MallocZoneFunctions* functions) {
+  return IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc;
+}
+
+int GetMallocZoneCountForTesting() {
+  partition_alloc::internal::ScopedGuard guard(GetLock());
+  return g_zone_count;
+}
+
+void ClearAllMallocZonesForTesting() {
+  partition_alloc::internal::ScopedGuard guard(GetLock());
+  memset(g_malloc_zones, 0, kMaxZoneCount * sizeof(MallocZoneFunctions));
+  g_zone_count = 0;
+}
+
+}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.h b/base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.h
new file mode 100644
index 0000000..9232554
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.h
@@ -0,0 +1,111 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_MALLOC_ZONE_FUNCTIONS_APPLE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_MALLOC_ZONE_FUNCTIONS_APPLE_H_
+
+#include <malloc/malloc.h>
+#include <stddef.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/third_party/apple_apsl/malloc.h"
+
+namespace allocator_shim {
+
+typedef void* (*malloc_type)(struct _malloc_zone_t* zone, size_t size);
+typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
+                             size_t num_items,
+                             size_t size);
+typedef void* (*valloc_type)(struct _malloc_zone_t* zone, size_t size);
+typedef void (*free_type)(struct _malloc_zone_t* zone, void* ptr);
+typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
+                              void* ptr,
+                              size_t size);
+typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
+                               size_t alignment,
+                               size_t size);
+typedef unsigned (*batch_malloc_type)(struct _malloc_zone_t* zone,
+                                      size_t size,
+                                      void** results,
+                                      unsigned num_requested);
+typedef void (*batch_free_type)(struct _malloc_zone_t* zone,
+                                void** to_be_freed,
+                                unsigned num_to_be_freed);
+typedef void (*free_definite_size_type)(struct _malloc_zone_t* zone,
+                                        void* ptr,
+                                        size_t size);
+typedef void (*try_free_default_type)(struct _malloc_zone_t* zone, void* ptr);
+typedef size_t (*size_fn_type)(struct _malloc_zone_t* zone, const void* ptr);
+typedef boolean_t (*claimed_address_type)(struct _malloc_zone_t* zone,
+                                          void* ptr);
+
+struct MallocZoneFunctions {
+  malloc_type malloc;
+  calloc_type calloc;
+  valloc_type valloc;
+  free_type free;
+  realloc_type realloc;
+  memalign_type memalign;
+  batch_malloc_type batch_malloc;
+  batch_free_type batch_free;
+  free_definite_size_type free_definite_size;
+  try_free_default_type try_free_default;
+  size_fn_type size;
+  claimed_address_type claimed_address;
+  const ChromeMallocZone* context;
+};
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void StoreZoneFunctions(const ChromeMallocZone* zone,
+                        MallocZoneFunctions* functions);
+static constexpr int kMaxZoneCount = 30;
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+extern MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
+
+// The array g_malloc_zones stores all information about malloc zones before
+// they are shimmed. This information needs to be accessed during dispatch back
+// into the zone, and additional zones may be added later in the execution fo
+// the program, so the array needs to be both thread-safe and high-performance.
+//
+// We begin by creating an array of MallocZoneFunctions of fixed size. We will
+// never modify the container, which provides thread-safety to iterators.  When
+// we want to add a MallocZoneFunctions to the container, we:
+//   1. Fill in all the fields.
+//   2. Update the total zone count.
+//   3. Insert a memory barrier.
+//   4. Insert our shim.
+//
+// Each MallocZoneFunctions is uniquely identified by |context|, which is a
+// pointer to the original malloc zone. When we wish to dispatch back to the
+// original malloc zones, we iterate through the array, looking for a matching
+// |context|.
+//
+// Most allocations go through the default allocator. We will ensure that the
+// default allocator is stored as the first MallocZoneFunctions.
+//
+// Returns whether the zone was successfully stored.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool StoreMallocZone(ChromeMallocZone* zone);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
+                                 const MallocZoneFunctions* functions);
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) int GetMallocZoneCountForTesting();
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void ClearAllMallocZonesForTesting();
+
+inline MallocZoneFunctions& GetFunctionsForZone(void* zone) {
+  for (unsigned int i = 0; i < kMaxZoneCount; ++i) {
+    if (g_malloc_zones[i].context == zone) {
+      return g_malloc_zones[i];
+    }
+  }
+  PA_IMMEDIATE_CRASH();
+}
+
+}  // namespace allocator_shim
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_MALLOC_ZONE_FUNCTIONS_APPLE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple_unittest.cc
new file mode 100644
index 0000000..3656393
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple_unittest.cc
@@ -0,0 +1,56 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/malloc_zone_functions_apple.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace allocator_shim {
+
+class MallocZoneFunctionsTest : public testing::Test {
+ protected:
+  void TearDown() override { ClearAllMallocZonesForTesting(); }
+};
+
+TEST_F(MallocZoneFunctionsTest, TestDefaultZoneMallocFree) {
+  ChromeMallocZone* malloc_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  StoreMallocZone(malloc_zone);
+  int* test = reinterpret_cast<int*>(
+      g_malloc_zones[0].malloc(malloc_default_zone(), 33));
+  test[0] = 1;
+  test[1] = 2;
+  g_malloc_zones[0].free(malloc_default_zone(), test);
+}
+
+TEST_F(MallocZoneFunctionsTest, IsZoneAlreadyStored) {
+  ChromeMallocZone* malloc_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  EXPECT_FALSE(IsMallocZoneAlreadyStored(malloc_zone));
+  StoreMallocZone(malloc_zone);
+  EXPECT_TRUE(IsMallocZoneAlreadyStored(malloc_zone));
+}
+
+TEST_F(MallocZoneFunctionsTest, CannotDoubleStoreZone) {
+  ChromeMallocZone* malloc_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  StoreMallocZone(malloc_zone);
+  StoreMallocZone(malloc_zone);
+  EXPECT_EQ(1, GetMallocZoneCountForTesting());
+}
+
+TEST_F(MallocZoneFunctionsTest, CannotStoreMoreThanMaxZones) {
+  std::vector<ChromeMallocZone> zones;
+  zones.resize(kMaxZoneCount * 2);
+  for (int i = 0; i < kMaxZoneCount * 2; ++i) {
+    ChromeMallocZone& zone = zones[i];
+    memcpy(&zone, malloc_default_zone(), sizeof(ChromeMallocZone));
+    StoreMallocZone(&zone);
+  }
+
+  int max_zone_count = kMaxZoneCount;
+  EXPECT_EQ(max_zone_count, GetMallocZoneCountForTesting());
+}
+
+}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.cc
new file mode 100644
index 0000000..1374476
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.cc
@@ -0,0 +1,87 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
+
+#if BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#endif
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+namespace allocator_shim::internal {
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+template <bool quarantinable>
+NonScannableAllocatorImpl<quarantinable>::NonScannableAllocatorImpl() = default;
+template <bool quarantinable>
+NonScannableAllocatorImpl<quarantinable>::~NonScannableAllocatorImpl() =
+    default;
+
+template <bool quarantinable>
+NonScannableAllocatorImpl<quarantinable>&
+NonScannableAllocatorImpl<quarantinable>::Instance() {
+  static partition_alloc::internal::base::NoDestructor<
+      NonScannableAllocatorImpl>
+      instance;
+  return *instance;
+}
+
+template <bool quarantinable>
+void* NonScannableAllocatorImpl<quarantinable>::Alloc(size_t size) {
+#if BUILDFLAG(USE_STARSCAN)
+  // TODO(bikineev): Change to LIKELY once PCScan is enabled by default.
+  if (PA_UNLIKELY(pcscan_enabled_.load(std::memory_order_acquire))) {
+    PA_DCHECK(allocator_.get());
+    return allocator_->root()
+        ->AllocInline<partition_alloc::AllocFlags::kNoHooks>(size);
+  }
+#endif  // BUILDFLAG(USE_STARSCAN)
+  // Otherwise, dispatch to default partition.
+  return allocator_shim::internal::PartitionAllocMalloc::Allocator()
+      ->AllocInline<partition_alloc::AllocFlags::kNoHooks>(size);
+}
+
+template <bool quarantinable>
+void NonScannableAllocatorImpl<quarantinable>::Free(void* ptr) {
+#if BUILDFLAG(USE_STARSCAN)
+  if (PA_UNLIKELY(pcscan_enabled_.load(std::memory_order_acquire))) {
+    allocator_->root()->FreeInline<partition_alloc::FreeFlags::kNoHooks>(ptr);
+    return;
+  }
+#endif  // BUILDFLAG(USE_STARSCAN)
+  partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
+      partition_alloc::FreeFlags::kNoHooks>(ptr);
+}
+
+template <bool quarantinable>
+void NonScannableAllocatorImpl<quarantinable>::NotifyPCScanEnabled() {
+#if BUILDFLAG(USE_STARSCAN)
+  allocator_.reset(partition_alloc::internal::MakePCScanMetadata<
+                   partition_alloc::PartitionAllocator>(
+      partition_alloc::PartitionOptions{
+          .star_scan_quarantine =
+              quarantinable ? partition_alloc::PartitionOptions::kAllowed
+                            : partition_alloc::PartitionOptions::kDisallowed,
+          .backup_ref_ptr = partition_alloc::PartitionOptions::kDisabled,
+      }));
+  if constexpr (quarantinable) {
+    partition_alloc::internal::PCScan::RegisterNonScannableRoot(
+        allocator_->root());
+  }
+  pcscan_enabled_.store(true, std::memory_order_release);
+#endif  // BUILDFLAG(USE_STARSCAN)
+}
+
+template class NonScannableAllocatorImpl<true>;
+template class NonScannableAllocatorImpl<false>;
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+}  // namespace allocator_shim::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.h b/base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.h
new file mode 100644
index 0000000..665143c
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.h
@@ -0,0 +1,87 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_NONSCANNABLE_ALLOCATOR_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_NONSCANNABLE_ALLOCATOR_H_
+
+#include <atomic>
+#include <cstddef>
+#include <memory>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+
+#if BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.h"
+#endif
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+namespace allocator_shim {
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+namespace internal {
+
+// Represents allocator that contains memory for data-like objects (that don't
+// contain pointers/references) and therefore doesn't require scanning by
+// PCScan. An example would be strings or socket/IPC/file buffers. Use with
+// caution.
+template <bool quarantinable>
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) NonScannableAllocatorImpl final {
+ public:
+  static NonScannableAllocatorImpl& Instance();
+
+  NonScannableAllocatorImpl(const NonScannableAllocatorImpl&) = delete;
+  NonScannableAllocatorImpl& operator=(const NonScannableAllocatorImpl&) =
+      delete;
+
+  void* Alloc(size_t size);
+  void Free(void*);
+
+  // Returns PartitionRoot corresponding to the allocator, or nullptr if the
+  // allocator is not enabled.
+  partition_alloc::PartitionRoot* root() {
+#if BUILDFLAG(USE_STARSCAN)
+    if (!allocator_.get()) {
+      return nullptr;
+    }
+    return allocator_->root();
+#else
+    return nullptr;
+#endif  // BUILDFLAG(USE_STARSCAN)
+  }
+
+  void NotifyPCScanEnabled();
+
+ private:
+  template <typename>
+  friend class partition_alloc::internal::base::NoDestructor;
+
+  NonScannableAllocatorImpl();
+  ~NonScannableAllocatorImpl();
+
+#if BUILDFLAG(USE_STARSCAN)
+  std::unique_ptr<partition_alloc::PartitionAllocator,
+                  partition_alloc::internal::PCScanMetadataDeleter>
+      allocator_;
+  std::atomic_bool pcscan_enabled_{false};
+#endif  // BUILDFLAG(USE_STARSCAN)
+};
+
+extern template class NonScannableAllocatorImpl<true>;
+extern template class NonScannableAllocatorImpl<false>;
+
+}  // namespace internal
+
+using NonScannableAllocator = internal::NonScannableAllocatorImpl<true>;
+using NonQuarantinableAllocator = internal::NonScannableAllocatorImpl<false>;
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+}  // namespace allocator_shim
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_NONSCANNABLE_ALLOCATOR_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win.cc
new file mode 100644
index 0000000..d4a36d4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win.cc
@@ -0,0 +1,221 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This code should move into the default Windows shim once the win-specific
+// allocation shim has been removed, and the generic shim has becaome the
+// default.
+
+#include "winheap_stubs_win.h"
+
+#include <limits.h>
+#include <malloc.h>
+#include <new.h>
+#include <windows.h>
+#include <algorithm>
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+
+namespace allocator_shim {
+
+bool g_is_win_shim_layer_initialized = false;
+
+namespace {
+
+const size_t kWindowsPageSize = 4096;
+const size_t kMaxWindowsAllocation = INT_MAX - kWindowsPageSize;
+
+inline HANDLE get_heap_handle() {
+  return reinterpret_cast<HANDLE>(_get_heap_handle());
+}
+
+}  // namespace
+
+void* WinHeapMalloc(size_t size) {
+  if (size < kMaxWindowsAllocation) {
+    return HeapAlloc(get_heap_handle(), 0, size);
+  }
+  return nullptr;
+}
+
+void WinHeapFree(void* ptr) {
+  if (!ptr) {
+    return;
+  }
+
+  HeapFree(get_heap_handle(), 0, ptr);
+}
+
+void* WinHeapRealloc(void* ptr, size_t size) {
+  if (!ptr) {
+    return WinHeapMalloc(size);
+  }
+  if (!size) {
+    WinHeapFree(ptr);
+    return nullptr;
+  }
+  if (size < kMaxWindowsAllocation) {
+    return HeapReAlloc(get_heap_handle(), 0, ptr, size);
+  }
+  return nullptr;
+}
+
+size_t WinHeapGetSizeEstimate(void* ptr) {
+  if (!ptr) {
+    return 0;
+  }
+
+  return HeapSize(get_heap_handle(), 0, ptr);
+}
+
+// Call the new handler, if one has been set.
+// Returns true on successfully calling the handler, false otherwise.
+bool WinCallNewHandler(size_t size) {
+#ifdef _CPPUNWIND
+#error "Exceptions in allocator shim are not supported!"
+#endif  // _CPPUNWIND
+  // Get the current new handler.
+  _PNH nh = _query_new_handler();
+  if (!nh) {
+    return false;
+  }
+  // Since exceptions are disabled, we don't really know if new_handler
+  // failed.  Assume it will abort if it fails.
+  return nh(size) ? true : false;
+}
+
+// The Windows _aligned_* functions are implemented by creating an allocation
+// with enough space to create an aligned allocation internally. The offset to
+// the original allocation is prefixed to the aligned allocation so that it can
+// be correctly freed.
+
+namespace {
+
+struct AlignedPrefix {
+  // Offset to the original allocation point.
+  unsigned int original_allocation_offset;
+  // Make sure an unsigned int is enough to store the offset
+  static_assert(
+      kMaxWindowsAllocation < std::numeric_limits<unsigned int>::max(),
+      "original_allocation_offset must be able to fit into an unsigned int");
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  // Magic value used to check that _aligned_free() and _aligned_realloc() are
+  // only ever called on an aligned allocated chunk.
+  static constexpr unsigned int kMagic = 0x12003400;
+  unsigned int magic;
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+};
+
+// Compute how large an allocation we need to fit an allocation with the given
+// size and alignment and space for a prefix pointer.
+size_t AdjustedSize(size_t size, size_t alignment) {
+  // Minimal alignment is the prefix size so the prefix is properly aligned.
+  alignment = std::max(alignment, alignof(AlignedPrefix));
+  return size + sizeof(AlignedPrefix) + alignment - 1;
+}
+
+// Align the allocation and write the prefix.
+void* AlignAllocation(void* ptr, size_t alignment) {
+  // Minimal alignment is the prefix size so the prefix is properly aligned.
+  alignment = std::max(alignment, alignof(AlignedPrefix));
+
+  uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
+  address = partition_alloc::internal::base::bits::AlignUp(
+      address + sizeof(AlignedPrefix), alignment);
+
+  // Write the prefix.
+  AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(address) - 1;
+  prefix->original_allocation_offset =
+      partition_alloc::internal::base::checked_cast<unsigned int>(
+          address - reinterpret_cast<uintptr_t>(ptr));
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  prefix->magic = AlignedPrefix::kMagic;
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+  return reinterpret_cast<void*>(address);
+}
+
+// Return the original allocation from an aligned allocation.
+void* UnalignAllocation(void* ptr) {
+  AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(ptr) - 1;
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  PA_DCHECK(prefix->magic == AlignedPrefix::kMagic);
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+  void* unaligned =
+      static_cast<uint8_t*>(ptr) - prefix->original_allocation_offset;
+  PA_CHECK(unaligned < ptr);
+  PA_CHECK(reinterpret_cast<uintptr_t>(ptr) -
+               reinterpret_cast<uintptr_t>(unaligned) <=
+           kMaxWindowsAllocation);
+  return unaligned;
+}
+
+}  // namespace
+
+void* WinHeapAlignedMalloc(size_t size, size_t alignment) {
+  PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
+
+  size_t adjusted = AdjustedSize(size, alignment);
+  if (adjusted >= kMaxWindowsAllocation) {
+    return nullptr;
+  }
+
+  void* ptr = WinHeapMalloc(adjusted);
+  if (!ptr) {
+    return nullptr;
+  }
+
+  return AlignAllocation(ptr, alignment);
+}
+
+void* WinHeapAlignedRealloc(void* ptr, size_t size, size_t alignment) {
+  PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
+
+  if (!ptr) {
+    return WinHeapAlignedMalloc(size, alignment);
+  }
+  if (!size) {
+    WinHeapAlignedFree(ptr);
+    return nullptr;
+  }
+
+  size_t adjusted = AdjustedSize(size, alignment);
+  if (adjusted >= kMaxWindowsAllocation) {
+    return nullptr;
+  }
+
+  // Try to resize the allocation in place first.
+  void* unaligned = UnalignAllocation(ptr);
+  if (HeapReAlloc(get_heap_handle(), HEAP_REALLOC_IN_PLACE_ONLY, unaligned,
+                  adjusted)) {
+    return ptr;
+  }
+
+  // Otherwise manually perform an _aligned_malloc() and copy since an
+  // unaligned allocation from HeapReAlloc() would force us to copy the
+  // allocation twice.
+  void* new_ptr = WinHeapAlignedMalloc(size, alignment);
+  if (!new_ptr) {
+    return nullptr;
+  }
+
+  size_t gap =
+      reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(unaligned);
+  size_t old_size = WinHeapGetSizeEstimate(unaligned) - gap;
+  memcpy(new_ptr, ptr, std::min(size, old_size));
+  WinHeapAlignedFree(ptr);
+  return new_ptr;
+}
+
+void WinHeapAlignedFree(void* ptr) {
+  if (!ptr) {
+    return;
+  }
+
+  void* original_allocation = UnalignAllocation(ptr);
+  WinHeapFree(original_allocation);
+}
+
+}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win.h b/base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win.h
new file mode 100644
index 0000000..dba03a3
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Thin allocation wrappers for the windows heap. This file should be deleted
+// once the win-specific allocation shim has been removed, and the generic shim
+// has becaome the default.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_WINHEAP_STUBS_WIN_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_WINHEAP_STUBS_WIN_H_
+
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace allocator_shim {
+
+// Set to true if the link-time magic has successfully hooked into the CRT's
+// heap initialization.
+extern bool g_is_win_shim_layer_initialized;
+
+// Thin wrappers to implement the standard C allocation semantics on the
+// CRT's Windows heap.
+void* WinHeapMalloc(size_t size);
+void WinHeapFree(void* ptr);
+void* WinHeapRealloc(void* ptr, size_t size);
+
+// Returns a lower-bound estimate for the full amount of memory consumed by the
+// the allocation |ptr|.
+size_t WinHeapGetSizeEstimate(void* ptr);
+
+// Call the new handler, if one has been set.
+// Returns true on successfully calling the handler, false otherwise.
+bool WinCallNewHandler(size_t size);
+
+// Wrappers to implement the interface for the _aligned_* functions on top of
+// the CRT's Windows heap. Exported for tests.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* WinHeapAlignedMalloc(size_t size, size_t alignment);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* WinHeapAlignedRealloc(void* ptr, size_t size, size_t alignment);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void WinHeapAlignedFree(void* ptr);
+
+}  // namespace allocator_shim
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SHIM_WINHEAP_STUBS_WIN_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win_unittest.cc
new file mode 100644
index 0000000..bb37798
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win_unittest.cc
@@ -0,0 +1,69 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/winheap_stubs_win.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace allocator_shim {
+namespace {
+
+bool IsPtrAligned(void* ptr, size_t alignment) {
+  PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
+  uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
+  return partition_alloc::internal::base::bits::AlignUp(address, alignment) ==
+         address;
+}
+
+}  // namespace
+
+TEST(WinHeapStubs, AlignedAllocationAreAligned) {
+  for (size_t alignment = 1; alignment < 65536; alignment *= 2) {
+    SCOPED_TRACE(alignment);
+
+    void* ptr = WinHeapAlignedMalloc(10, alignment);
+    ASSERT_NE(ptr, nullptr);
+    EXPECT_TRUE(IsPtrAligned(ptr, alignment));
+
+    ptr = WinHeapAlignedRealloc(ptr, 1000, alignment);
+    ASSERT_NE(ptr, nullptr);
+    EXPECT_TRUE(IsPtrAligned(ptr, alignment));
+
+    WinHeapAlignedFree(ptr);
+  }
+}
+
+TEST(WinHeapStubs, AlignedReallocationsCorrectlyCopyData) {
+  constexpr size_t kAlignment = 64;
+  constexpr uint8_t kMagicByte = 0xab;
+
+  size_t old_size = 8;
+  void* ptr = WinHeapAlignedMalloc(old_size, kAlignment);
+  ASSERT_NE(ptr, nullptr);
+
+  // Cause allocations to grow and shrink and confirm allocation contents are
+  // copied regardless.
+  constexpr size_t kSizes[] = {10, 1000, 50, 3000, 30, 9000};
+
+  for (size_t size : kSizes) {
+    SCOPED_TRACE(size);
+
+    memset(ptr, kMagicByte, old_size);
+    ptr = WinHeapAlignedRealloc(ptr, size, kAlignment);
+    ASSERT_NE(ptr, nullptr);
+
+    for (size_t i = 0; i < std::min(size, old_size); i++) {
+      SCOPED_TRACE(i);
+      ASSERT_EQ(reinterpret_cast<uint8_t*>(ptr)[i], kMagicByte);
+    }
+
+    old_size = size;
+  }
+
+  WinHeapAlignedFree(ptr);
+}
+
+}  // namespace allocator_shim
diff --git a/base/allocator/partition_allocator/src/partition_alloc/spinning_mutex.cc b/base/allocator/partition_allocator/src/partition_alloc/spinning_mutex.cc
new file mode 100644
index 0000000..7780111
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/spinning_mutex.cc
@@ -0,0 +1,186 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/spinning_mutex.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#endif
+
+#if BUILDFLAG(IS_POSIX)
+#include <pthread.h>
+#endif
+
+#if PA_CONFIG(HAS_LINUX_KERNEL)
+#include <errno.h>
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#endif  // PA_CONFIG(HAS_LINUX_KERNEL)
+
+#if !PA_CONFIG(HAS_FAST_MUTEX)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+
+#if BUILDFLAG(IS_POSIX)
+#include <sched.h>
+
+#define PA_YIELD_THREAD sched_yield()
+
+#else  // Other OS
+
+#warning "Thread yield not supported on this OS."
+#define PA_YIELD_THREAD ((void)0)
+#endif
+
+#endif  // !PA_CONFIG(HAS_FAST_MUTEX)
+
+namespace partition_alloc::internal {
+
+void SpinningMutex::Reinit() {
+#if !BUILDFLAG(IS_APPLE)
+  // On most platforms, no need to re-init the lock, can just unlock it.
+  Release();
+#else
+  unfair_lock_ = OS_UNFAIR_LOCK_INIT;
+#endif  // BUILDFLAG(IS_APPLE)
+}
+
+void SpinningMutex::AcquireSpinThenBlock() {
+  int tries = 0;
+  int backoff = 1;
+  do {
+    if (PA_LIKELY(Try())) {
+      return;
+    }
+    // Note: Per the intel optimization manual
+    // (https://software.intel.com/content/dam/develop/public/us/en/documents/64-ia-32-architectures-optimization-manual.pdf),
+    // the "pause" instruction is more costly on Skylake Client than on previous
+    // architectures. The latency is found to be 141 cycles
+    // there (from ~10 on previous ones, nice 14x).
+    //
+    // According to Agner Fog's instruction tables, the latency is still >100
+    // cycles on Ice Lake, and from other sources, seems to be high as well on
+    // Adler Lake. Separately, it is (from
+    // https://agner.org/optimize/instruction_tables.pdf) also high on AMD Zen 3
+    // (~65). So just assume that it's this way for most x86_64 architectures.
+    //
+    // Also, loop several times here, following the guidelines in section 2.3.4
+    // of the manual, "Pause latency in Skylake Client Microarchitecture".
+    for (int yields = 0; yields < backoff; yields++) {
+      PA_YIELD_PROCESSOR;
+      tries++;
+    }
+    constexpr int kMaxBackoff = 16;
+    backoff = std::min(kMaxBackoff, backoff << 1);
+  } while (tries < kSpinCount);
+
+  LockSlow();
+}
+
+#if PA_CONFIG(HAS_FAST_MUTEX)
+
+#if PA_CONFIG(HAS_LINUX_KERNEL)
+
+void SpinningMutex::FutexWait() {
+  // Save and restore errno.
+  int saved_errno = errno;
+  // Don't check the return value, as we will not be awaken by a timeout, since
+  // none is specified.
+  //
+  // Ignoring the return value doesn't impact correctness, as this acts as an
+  // immediate wakeup. For completeness, the possible errors for FUTEX_WAIT are:
+  // - EACCES: state_ is not readable. Should not happen.
+  // - EAGAIN: the value is not as expected, that is not |kLockedContended|, in
+  //           which case retrying the loop is the right behavior.
+  // - EINTR: signal, looping is the right behavior.
+  // - EINVAL: invalid argument.
+  //
+  // Note: not checking the return value is the approach used in bionic and
+  // glibc as well.
+  //
+  // Will return immediately if |state_| is no longer equal to
+  // |kLockedContended|. Otherwise, sleeps and wakes up when |state_| may not be
+  // |kLockedContended| anymore. Note that even without spurious wakeups, the
+  // value of |state_| is not guaranteed when this returns, as another thread
+  // may get the lock before we get to run.
+  int err = syscall(SYS_futex, &state_, FUTEX_WAIT | FUTEX_PRIVATE_FLAG,
+                    kLockedContended, nullptr, nullptr, 0);
+
+  if (err) {
+    // These are programming error, check them.
+    PA_DCHECK(errno != EACCES);
+    PA_DCHECK(errno != EINVAL);
+  }
+  errno = saved_errno;
+}
+
+void SpinningMutex::FutexWake() {
+  int saved_errno = errno;
+  long retval = syscall(SYS_futex, &state_, FUTEX_WAKE | FUTEX_PRIVATE_FLAG,
+                        1 /* wake up a single waiter */, nullptr, nullptr, 0);
+  PA_CHECK(retval != -1);
+  errno = saved_errno;
+}
+
+void SpinningMutex::LockSlow() {
+  // If this thread gets awaken but another one got the lock first, then go back
+  // to sleeping. See comments in |FutexWait()| to see why a loop is required.
+  while (state_.exchange(kLockedContended, std::memory_order_acquire) !=
+         kUnlocked) {
+    FutexWait();
+  }
+}
+
+#elif BUILDFLAG(IS_WIN)
+
+void SpinningMutex::LockSlow() {
+  ::AcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&lock_));
+}
+
+#elif BUILDFLAG(IS_APPLE)
+
+void SpinningMutex::LockSlow() {
+  return os_unfair_lock_lock(&unfair_lock_);
+}
+
+#elif BUILDFLAG(IS_POSIX)
+
+void SpinningMutex::LockSlow() {
+  int retval = pthread_mutex_lock(&lock_);
+  PA_DCHECK(retval == 0);
+}
+
+#elif BUILDFLAG(IS_FUCHSIA)
+
+void SpinningMutex::LockSlow() {
+  sync_mutex_lock(&lock_);
+}
+
+#endif
+
+#else  // PA_CONFIG(HAS_FAST_MUTEX)
+
+void SpinningMutex::LockSlowSpinLock() {
+  int yield_thread_count = 0;
+  do {
+    if (yield_thread_count < 10) {
+      PA_YIELD_THREAD;
+      yield_thread_count++;
+    } else {
+      // At this point, it's likely that the lock is held by a lower priority
+      // thread that is unavailable to finish its work because of higher
+      // priority threads spinning here. Sleeping should ensure that they make
+      // progress.
+      base::PlatformThread::Sleep(base::Milliseconds(1));
+    }
+  } while (!TrySpinLock());
+}
+
+#endif  // PA_CONFIG(HAS_FAST_MUTEX)
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/spinning_mutex.h b/base/allocator/partition_allocator/src/partition_alloc/spinning_mutex.h
new file mode 100644
index 0000000..63d8bbe
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/spinning_mutex.h
@@ -0,0 +1,240 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SPINNING_MUTEX_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SPINNING_MUTEX_H_
+
+#include <algorithm>
+#include <atomic>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/yield_processor.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/win/windows_types.h"
+#endif
+
+#if BUILDFLAG(IS_POSIX)
+#include <errno.h>
+#include <pthread.h>
+#endif
+
+#if BUILDFLAG(IS_APPLE)
+#include <os/lock.h>
+#endif  // BUILDFLAG(IS_APPLE)
+
+#if BUILDFLAG(IS_FUCHSIA)
+#include <lib/sync/mutex.h>
+#endif
+
+namespace partition_alloc::internal {
+
+// The behavior of this class depends on whether PA_HAS_FAST_MUTEX is defined.
+// 1. When it is defined:
+//
+// Simple spinning lock. It will spin in user space a set number of times before
+// going into the kernel to sleep.
+//
+// This is intended to give "the best of both worlds" between a SpinLock and
+// base::Lock:
+// - SpinLock: Inlined fast path, no external function calls, just
+//   compare-and-swap. Short waits do not go into the kernel. Good behavior in
+//   low contention cases.
+// - base::Lock: Good behavior in case of contention.
+//
+// We don't rely on base::Lock which we could make spin (by calling Try() in a
+// loop), as performance is below a custom spinlock as seen on high-level
+// benchmarks. Instead this implements a simple non-recursive mutex on top of
+// the futex() syscall on Linux, SRWLock on Windows, os_unfair_lock on macOS,
+// and pthread_mutex on POSIX. The main difference between this and a libc
+// implementation is that it only supports the simplest path: private (to a
+// process), non-recursive mutexes with no priority inheritance, no timed waits.
+//
+// As an interesting side-effect to be used in the allocator, this code does not
+// make any allocations, locks are small with a constexpr constructor and no
+// destructor.
+//
+// 2. Otherwise: This is a simple SpinLock, in the sense that it does not have
+// any awareness of other threads' behavior.
+class PA_LOCKABLE PA_COMPONENT_EXPORT(PARTITION_ALLOC) SpinningMutex {
+ public:
+  inline constexpr SpinningMutex();
+  PA_ALWAYS_INLINE void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION();
+  PA_ALWAYS_INLINE void Release() PA_UNLOCK_FUNCTION();
+  PA_ALWAYS_INLINE bool Try() PA_EXCLUSIVE_TRYLOCK_FUNCTION(true);
+  void AssertAcquired() const {}  // Not supported.
+  void Reinit() PA_UNLOCK_FUNCTION();
+
+ private:
+  PA_NOINLINE void AcquireSpinThenBlock() PA_EXCLUSIVE_LOCK_FUNCTION();
+#if PA_CONFIG(HAS_FAST_MUTEX)
+  void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION();
+#else
+  PA_ALWAYS_INLINE void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION();
+#endif
+
+  // See below, the latency of PA_YIELD_PROCESSOR can be as high as ~150
+  // cycles. Meanwhile, sleeping costs a few us. Spinning 64 times at 3GHz would
+  // cost 150 * 64 / 3e9 ~= 3.2us.
+  //
+  // This applies to Linux kernels, on x86_64. On ARM we might want to spin
+  // more.
+  static constexpr int kSpinCount = 64;
+
+#if PA_CONFIG(HAS_FAST_MUTEX)
+
+#if PA_CONFIG(HAS_LINUX_KERNEL)
+  void FutexWait();
+  void FutexWake();
+
+  static constexpr int kUnlocked = 0;
+  static constexpr int kLockedUncontended = 1;
+  static constexpr int kLockedContended = 2;
+
+  std::atomic<int32_t> state_{kUnlocked};
+#elif BUILDFLAG(IS_WIN)
+  PA_CHROME_SRWLOCK lock_ = SRWLOCK_INIT;
+#elif BUILDFLAG(IS_APPLE)
+  os_unfair_lock unfair_lock_ = OS_UNFAIR_LOCK_INIT;
+#elif BUILDFLAG(IS_POSIX)
+  pthread_mutex_t lock_ = PTHREAD_MUTEX_INITIALIZER;
+#elif BUILDFLAG(IS_FUCHSIA)
+  sync_mutex lock_;
+#endif
+
+#else   // PA_CONFIG(HAS_FAST_MUTEX)
+  std::atomic<bool> lock_{false};
+
+  // Spinlock-like, fallback.
+  PA_ALWAYS_INLINE bool TrySpinLock();
+  PA_ALWAYS_INLINE void ReleaseSpinLock();
+  void LockSlowSpinLock();
+#endif  // PA_CONFIG(HAS_FAST_MUTEX)
+};
+
+PA_ALWAYS_INLINE void SpinningMutex::Acquire() {
+  // Not marked PA_LIKELY(), as:
+  // 1. We don't know how much contention the lock would experience
+  // 2. This may lead to weird-looking code layout when inlined into a caller
+  // with PA_(UN)LIKELY() annotations.
+  if (Try()) {
+    return;
+  }
+
+  return AcquireSpinThenBlock();
+}
+
+inline constexpr SpinningMutex::SpinningMutex() = default;
+
+#if PA_CONFIG(HAS_FAST_MUTEX)
+
+#if PA_CONFIG(HAS_LINUX_KERNEL)
+
+PA_ALWAYS_INLINE bool SpinningMutex::Try() {
+  // Using the weak variant of compare_exchange(), which may fail spuriously. On
+  // some architectures such as ARM, CAS is typically performed as a LDREX/STREX
+  // pair, where the store may fail. In the strong version, there is a loop
+  // inserted by the compiler to retry in these cases.
+  //
+  // Since we are retrying in Lock() anyway, there is no point having two nested
+  // loops.
+  int expected = kUnlocked;
+  return (state_.load(std::memory_order_relaxed) == expected) &&
+         state_.compare_exchange_weak(expected, kLockedUncontended,
+                                      std::memory_order_acquire,
+                                      std::memory_order_relaxed);
+}
+
+PA_ALWAYS_INLINE void SpinningMutex::Release() {
+  if (PA_UNLIKELY(state_.exchange(kUnlocked, std::memory_order_release) ==
+                  kLockedContended)) {
+    // |kLockedContended|: there is a waiter to wake up.
+    //
+    // Here there is a window where the lock is unlocked, since we just set it
+    // to |kUnlocked| above. Meaning that another thread can grab the lock
+    // in-between now and |FutexWake()| waking up a waiter. Aside from
+    // potentially fairness, this is not an issue, as the newly-awaken thread
+    // will check that the lock is still free.
+    //
+    // There is a small pessimization here though: if we have a single waiter,
+    // then when it wakes up, the lock will be set to |kLockedContended|, so
+    // when this waiter releases the lock, it will needlessly call
+    // |FutexWake()|, even though there are no waiters. This is supported by the
+    // kernel, and is what bionic (Android's libc) also does.
+    FutexWake();
+  }
+}
+
+#elif BUILDFLAG(IS_WIN)
+
+PA_ALWAYS_INLINE bool SpinningMutex::Try() {
+  return !!::TryAcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&lock_));
+}
+
+PA_ALWAYS_INLINE void SpinningMutex::Release() {
+  ::ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&lock_));
+}
+
+#elif BUILDFLAG(IS_APPLE)
+
+PA_ALWAYS_INLINE bool SpinningMutex::Try() {
+  return os_unfair_lock_trylock(&unfair_lock_);
+}
+
+PA_ALWAYS_INLINE void SpinningMutex::Release() {
+  return os_unfair_lock_unlock(&unfair_lock_);
+}
+
+#elif BUILDFLAG(IS_POSIX)
+
+PA_ALWAYS_INLINE bool SpinningMutex::Try() {
+  int retval = pthread_mutex_trylock(&lock_);
+  PA_DCHECK(retval == 0 || retval == EBUSY);
+  return retval == 0;
+}
+
+PA_ALWAYS_INLINE void SpinningMutex::Release() {
+  int retval = pthread_mutex_unlock(&lock_);
+  PA_DCHECK(retval == 0);
+}
+
+#elif BUILDFLAG(IS_FUCHSIA)
+
+PA_ALWAYS_INLINE bool SpinningMutex::Try() {
+  return sync_mutex_trylock(&lock_) == ZX_OK;
+}
+
+PA_ALWAYS_INLINE void SpinningMutex::Release() {
+  sync_mutex_unlock(&lock_);
+}
+
+#endif
+
+#else  // PA_CONFIG(HAS_FAST_MUTEX)
+
+PA_ALWAYS_INLINE bool SpinningMutex::Try() {
+  // Possibly faster than CAS. The theory is that if the cacheline is shared,
+  // then it can stay shared, for the contended case.
+  return !lock_.load(std::memory_order_relaxed) &&
+         !lock_.exchange(true, std::memory_order_acquire);
+}
+
+PA_ALWAYS_INLINE void SpinningMutex::Release() {
+  lock_.store(false, std::memory_order_release);
+}
+
+PA_ALWAYS_INLINE void SpinningMutex::LockSlow() {
+  return LockSlowSpinLock();
+}
+
+#endif  // PA_CONFIG(HAS_FAST_MUTEX)
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_SPINNING_MUTEX_H_
diff --git a/base/allocator/partition_allocator/starscan/README.md b/base/allocator/partition_allocator/src/partition_alloc/starscan/README.md
similarity index 100%
rename from base/allocator/partition_allocator/starscan/README.md
rename to base/allocator/partition_allocator/src/partition_alloc/starscan/README.md
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/logging.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/logging.h
new file mode 100644
index 0000000..5c8169d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/logging.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_LOGGING_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_LOGGING_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/allocation_guard.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+
+namespace partition_alloc::internal {
+
+// Logging requires allocations. This logger allows reentrant allocations to
+// happen within the allocator context.
+struct LoggerWithAllowedAllocations : ScopedAllowAllocations,
+                                      logging::LogMessage {
+  using logging::LogMessage::LogMessage;
+};
+
+#define PA_PCSCAN_VLOG_STREAM(verbose_level)                 \
+  ::partition_alloc::internal::LoggerWithAllowedAllocations( \
+      __FILE__, __LINE__, -(verbose_level))                  \
+      .stream()
+
+// Logging macro that is meant to be used inside *Scan. Generally, reentrancy
+// may be an issue if the macro is called from malloc()/free(). Currently, it's
+// only called at the end of *Scan and when scheduling a new *Scan task.
+// Allocating from these paths should not be an issue, since we make sure that
+// no infinite recursion can occur (e.g. we can't schedule two *Scan tasks and
+// the inner free() call must be non-reentrant).  However, these sorts of things
+// are tricky to enforce and easy to mess up with. Since verbose *Scan logging
+// is essential for debugging, we choose to provide support for it inside *Scan.
+#define PA_PCSCAN_VLOG(verbose_level)                  \
+  PA_LAZY_STREAM(PA_PCSCAN_VLOG_STREAM(verbose_level), \
+                 PA_VLOG_IS_ON(verbose_level))
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_LOGGING_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.cc
new file mode 100644
index 0000000..6e2d6cf
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.cc
@@ -0,0 +1,35 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+
+#include <cstring>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+
+namespace partition_alloc::internal {
+
+namespace {
+constexpr PartitionOptions kConfig{};
+}  // namespace
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+PartitionRoot& PCScanMetadataAllocator() {
+  static internal::base::NoDestructor<PartitionRoot> allocator(kConfig);
+  return *allocator;
+}
+
+// TODO(tasak): investigate whether PartitionAlloc tests really need this
+// function or not. If we found no tests need, remove it.
+void ReinitPCScanMetadataAllocatorForTesting() {
+  // First, purge memory owned by PCScanMetadataAllocator.
+  PCScanMetadataAllocator().PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
+                                        PurgeFlags::kDiscardUnusedSystemPages);
+  // Then, reinit the allocator.
+  PCScanMetadataAllocator().ResetForTesting(true);  // IN-TEST
+  PCScanMetadataAllocator().Init(kConfig);
+}
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.h
new file mode 100644
index 0000000..93b04b4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.h
@@ -0,0 +1,86 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_METADATA_ALLOCATOR_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_METADATA_ALLOCATOR_H_
+
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+
+namespace partition_alloc::internal {
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+PartitionRoot& PCScanMetadataAllocator();
+void ReinitPCScanMetadataAllocatorForTesting();
+
+// STL allocator which is needed to keep internal data structures required by
+// PCScan.
+template <typename T>
+class MetadataAllocator {
+ public:
+  using value_type = T;
+
+  MetadataAllocator() = default;
+
+  template <typename U>
+  MetadataAllocator(const MetadataAllocator<U>&) {}  // NOLINT
+
+  template <typename U>
+  MetadataAllocator& operator=(const MetadataAllocator<U>&) {
+    return *this;
+  }
+
+  template <typename U>
+  bool operator==(const MetadataAllocator<U>&) {
+    return true;
+  }
+
+  template <typename U>
+  bool operator!=(const MetadataAllocator<U>& o) {
+    return !operator==(o);
+  }
+
+  value_type* allocate(size_t size) {
+    return static_cast<value_type*>(
+        PCScanMetadataAllocator()
+            .AllocInline<partition_alloc::AllocFlags::kNoHooks>(
+                size * sizeof(value_type)));
+  }
+
+  void deallocate(value_type* ptr, size_t size) {
+    PCScanMetadataAllocator().FreeInline<FreeFlags::kNoHooks>(ptr);
+  }
+};
+
+// Inherit from it to make a class allocated on the metadata partition.
+struct AllocatedOnPCScanMetadataPartition {
+  static void* operator new(size_t size) {
+    return PCScanMetadataAllocator()
+        .AllocInline<partition_alloc::AllocFlags::kNoHooks>(size);
+  }
+  static void operator delete(void* ptr) {
+    PCScanMetadataAllocator().FreeInline<FreeFlags::kNoHooks>(ptr);
+  }
+};
+
+template <typename T, typename... Args>
+T* MakePCScanMetadata(Args&&... args) {
+  auto* memory = static_cast<T*>(
+      PCScanMetadataAllocator()
+          .AllocInline<partition_alloc::AllocFlags::kNoHooks>(sizeof(T)));
+  return new (memory) T(std::forward<Args>(args)...);
+}
+
+struct PCScanMetadataDeleter final {
+  inline void operator()(void* ptr) const {
+    PCScanMetadataAllocator().FreeInline<FreeFlags::kNoHooks>(ptr);
+  }
+};
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_METADATA_ALLOCATOR_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.cc
new file mode 100644
index 0000000..504658f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.cc
@@ -0,0 +1,113 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_internal.h"
+
+namespace partition_alloc::internal {
+
+void PCScan::Initialize(InitConfig config) {
+  PCScanInternal::Instance().Initialize(config);
+}
+
+bool PCScan::IsInitialized() {
+  return PCScanInternal::Instance().is_initialized();
+}
+
+void PCScan::Disable() {
+  auto& instance = PCScan::Instance();
+  instance.scheduler().scheduling_backend().DisableScheduling();
+}
+
+bool PCScan::IsEnabled() {
+  auto& instance = PCScan::Instance();
+  return instance.scheduler().scheduling_backend().is_scheduling_enabled();
+}
+
+void PCScan::Reenable() {
+  auto& instance = PCScan::Instance();
+  instance.scheduler().scheduling_backend().EnableScheduling();
+}
+
+void PCScan::RegisterScannableRoot(Root* root) {
+  PCScanInternal::Instance().RegisterScannableRoot(root);
+}
+
+void PCScan::RegisterNonScannableRoot(Root* root) {
+  PCScanInternal::Instance().RegisterNonScannableRoot(root);
+}
+
+void PCScan::RegisterNewSuperPage(Root* root, uintptr_t super_page_base) {
+  PCScanInternal::Instance().RegisterNewSuperPage(root, super_page_base);
+}
+
+void PCScan::PerformScan(InvocationMode invocation_mode) {
+  PCScanInternal::Instance().PerformScan(invocation_mode);
+}
+
+void PCScan::PerformScanIfNeeded(InvocationMode invocation_mode) {
+  PCScanInternal::Instance().PerformScanIfNeeded(invocation_mode);
+}
+
+void PCScan::PerformDelayedScan(int64_t delay_in_microseconds) {
+  PCScanInternal::Instance().PerformDelayedScan(
+      base::Microseconds(delay_in_microseconds));
+}
+
+void PCScan::JoinScan() {
+  PCScanInternal::Instance().JoinScan();
+}
+
+void PCScan::SetProcessName(const char* process_name) {
+  PCScanInternal::Instance().SetProcessName(process_name);
+}
+
+void PCScan::EnableStackScanning() {
+  PCScanInternal::Instance().EnableStackScanning();
+}
+void PCScan::DisableStackScanning() {
+  PCScanInternal::Instance().DisableStackScanning();
+}
+bool PCScan::IsStackScanningEnabled() {
+  return PCScanInternal::Instance().IsStackScanningEnabled();
+}
+
+void PCScan::EnableImmediateFreeing() {
+  PCScanInternal::Instance().EnableImmediateFreeing();
+}
+
+void PCScan::NotifyThreadCreated(void* stack_top) {
+  PCScanInternal::Instance().NotifyThreadCreated(stack_top);
+}
+void PCScan::NotifyThreadDestroyed() {
+  PCScanInternal::Instance().NotifyThreadDestroyed();
+}
+
+void PCScan::SetClearType(ClearType clear_type) {
+  PCScan& instance = Instance();
+  instance.clear_type_ = clear_type;
+}
+
+void PCScan::UninitForTesting() {
+  PCScanInternal::Instance().ClearRootsForTesting();  // IN-TEST
+  ReinitPCScanMetadataAllocatorForTesting();          // IN-TEST
+}
+
+void PCScan::ReinitForTesting(InitConfig config) {
+  PCScanInternal::Instance().ReinitForTesting(config);  // IN-TEST
+}
+
+void PCScan::FinishScanForTesting() {
+  PCScanInternal::Instance().FinishScanForTesting();  // IN-TEST
+}
+
+void PCScan::RegisterStatsReporter(partition_alloc::StatsReporter* reporter) {
+  PCScanInternal::Instance().RegisterStatsReporter(reporter);
+}
+
+PCScan PCScan::instance_ PA_CONSTINIT;
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h
new file mode 100644
index 0000000..0bb7ad3
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h
@@ -0,0 +1,281 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_PCSCAN_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_PCSCAN_H_
+
+#include <atomic>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_direct_map_extent.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+
+namespace partition_alloc {
+
+class StatsReporter;
+
+namespace internal {
+
+[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED
+    PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DoubleFreeAttempt();
+
+// PCScan (Probabilistic Conservative Scanning) is the algorithm that eliminates
+// use-after-free bugs by verifying that there are no pointers in memory which
+// point to explicitly freed objects before actually releasing their memory. If
+// PCScan is enabled for a partition, freed objects are not immediately returned
+// to the allocator, but are stored in a quarantine. When the quarantine reaches
+// a certain threshold, a concurrent PCScan task gets posted. The task scans the
+// entire heap, looking for dangling pointers (those that point to the
+// quarantine entries). After scanning, the unvisited quarantine entries are
+// unreachable and therefore can be safely reclaimed.
+//
+// The driver class encapsulates the entire PCScan infrastructure.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScan final {
+ public:
+  using Root = PartitionRoot;
+  using SlotSpan = SlotSpanMetadata;
+
+  enum class InvocationMode {
+    kBlocking,
+    kNonBlocking,
+    kForcedBlocking,
+    kScheduleOnlyForTesting,
+  };
+
+  enum class ClearType : uint8_t {
+    // Clear in the scanning task.
+    kLazy,
+    // Eagerly clear quarantined objects on MoveToQuarantine().
+    kEager,
+  };
+
+  // Parameters used to initialize *Scan.
+  struct InitConfig {
+    // Based on the provided mode, PCScan will try to use a certain
+    // WriteProtector, if supported by the system.
+    enum class WantedWriteProtectionMode : uint8_t {
+      kDisabled,
+      kEnabled,
+    } write_protection = WantedWriteProtectionMode::kDisabled;
+
+    // Flag that enables safepoints that stop mutator execution and help
+    // scanning.
+    enum class SafepointMode : uint8_t {
+      kDisabled,
+      kEnabled,
+    } safepoint = SafepointMode::kDisabled;
+  };
+
+  PCScan(const PCScan&) = delete;
+  PCScan& operator=(const PCScan&) = delete;
+
+  // Initializes PCScan and prepares internal data structures.
+  static void Initialize(InitConfig);
+  static bool IsInitialized();
+
+  // Disable/reenable PCScan. Temporal disabling can be useful in CPU demanding
+  // contexts.
+  static void Disable();
+  static void Reenable();
+  // Query if PCScan is enabled.
+  static bool IsEnabled();
+
+  // Registers a root for scanning.
+  static void RegisterScannableRoot(Root* root);
+  // Registers a root that doesn't need to be scanned but still contains
+  // quarantined objects.
+  static void RegisterNonScannableRoot(Root* root);
+
+  // Registers a newly allocated super page for |root|.
+  static void RegisterNewSuperPage(Root* root, uintptr_t super_page_base);
+
+  PA_ALWAYS_INLINE static void MoveToQuarantine(void* object,
+                                                size_t usable_size,
+                                                uintptr_t slot_start,
+                                                size_t slot_size);
+
+  // Performs scanning unconditionally.
+  static void PerformScan(InvocationMode invocation_mode);
+  // Performs scanning only if a certain quarantine threshold was reached.
+  static void PerformScanIfNeeded(InvocationMode invocation_mode);
+  // Performs scanning with specified delay.
+  static void PerformDelayedScan(int64_t delay_in_microseconds);
+
+  // Enables safepoints in mutator threads.
+  PA_ALWAYS_INLINE static void EnableSafepoints();
+  // Join scan from safepoint in mutator thread. As soon as PCScan is scheduled,
+  // mutators can join PCScan helping out with clearing and scanning.
+  PA_ALWAYS_INLINE static void JoinScanIfNeeded();
+
+  // Checks if there is a PCScan task currently in progress.
+  PA_ALWAYS_INLINE static bool IsInProgress();
+
+  // Sets process name (used for histograms). |name| must be a string literal.
+  static void SetProcessName(const char* name);
+
+  static void EnableStackScanning();
+  static void DisableStackScanning();
+  static bool IsStackScanningEnabled();
+
+  static void EnableImmediateFreeing();
+
+  // Notify PCScan that a new thread was created/destroyed. Can be called for
+  // uninitialized PCScan (before Initialize()).
+  static void NotifyThreadCreated(void* stack_top);
+  static void NotifyThreadDestroyed();
+
+  // Define when clearing should happen (on free() or in scanning task).
+  static void SetClearType(ClearType);
+
+  static void UninitForTesting();
+
+  static inline PCScanScheduler& scheduler();
+
+  // Registers reporting class.
+  static void RegisterStatsReporter(partition_alloc::StatsReporter* reporter);
+
+ private:
+  class PCScanThread;
+  friend class PCScanTask;
+  friend class PartitionAllocPCScanTestBase;
+  friend class PCScanInternal;
+
+  enum class State : uint8_t {
+    // PCScan task is not scheduled.
+    kNotRunning,
+    // PCScan task is being started and about to be scheduled.
+    kScheduled,
+    // PCScan task is scheduled and can be scanning (or clearing).
+    kScanning,
+    // PCScan task is sweeping or finalizing.
+    kSweepingAndFinishing
+  };
+
+  PA_ALWAYS_INLINE static PCScan& Instance();
+
+  PA_ALWAYS_INLINE bool IsJoinable() const;
+  PA_ALWAYS_INLINE void SetJoinableIfSafepointEnabled(bool);
+
+  inline constexpr PCScan();
+
+  // Joins scan unconditionally.
+  static void JoinScan();
+
+  // Finish scan as scanner thread.
+  static void FinishScanForTesting();
+
+  // Reinitialize internal structures (e.g. card table).
+  static void ReinitForTesting(InitConfig);
+
+  size_t epoch() const { return scheduler_.epoch(); }
+
+  // PA_CONSTINIT for fast access (avoiding static thread-safe initialization).
+  static PCScan instance_ PA_CONSTINIT;
+
+  PCScanScheduler scheduler_{};
+  std::atomic<State> state_{State::kNotRunning};
+  std::atomic<bool> is_joinable_{false};
+  bool is_safepoint_enabled_{false};
+  ClearType clear_type_{ClearType::kLazy};
+};
+
+// To please Chromium's clang plugin.
+constexpr PCScan::PCScan() = default;
+
+PA_ALWAYS_INLINE PCScan& PCScan::Instance() {
+  // The instance is declared as a static member, not static local. The reason
+  // is that we want to use the require_constant_initialization attribute to
+  // avoid double-checked-locking which would otherwise have been introduced
+  // by the compiler for thread-safe dynamic initialization (see constinit
+  // from C++20).
+  return instance_;
+}
+
+PA_ALWAYS_INLINE bool PCScan::IsInProgress() {
+  const PCScan& instance = Instance();
+  return instance.state_.load(std::memory_order_relaxed) != State::kNotRunning;
+}
+
+PA_ALWAYS_INLINE bool PCScan::IsJoinable() const {
+  // This has acquire semantics since a mutator relies on the task being set up.
+  return is_joinable_.load(std::memory_order_acquire);
+}
+
+PA_ALWAYS_INLINE void PCScan::SetJoinableIfSafepointEnabled(bool value) {
+  if (!is_safepoint_enabled_) {
+    PA_DCHECK(!is_joinable_.load(std::memory_order_relaxed));
+    return;
+  }
+  // Release semantics is required to "publish" the change of the state so that
+  // the mutators can join scanning and expect the consistent state.
+  is_joinable_.store(value, std::memory_order_release);
+}
+
+PA_ALWAYS_INLINE void PCScan::EnableSafepoints() {
+  PCScan& instance = Instance();
+  instance.is_safepoint_enabled_ = true;
+}
+
+PA_ALWAYS_INLINE void PCScan::JoinScanIfNeeded() {
+  PCScan& instance = Instance();
+  if (PA_UNLIKELY(instance.IsJoinable())) {
+    instance.JoinScan();
+  }
+}
+
+PA_ALWAYS_INLINE void PCScan::MoveToQuarantine(void* object,
+                                               size_t usable_size,
+                                               uintptr_t slot_start,
+                                               size_t slot_size) {
+  PCScan& instance = Instance();
+  if (instance.clear_type_ == ClearType::kEager) {
+    // We need to distinguish between usable_size and slot_size in this context:
+    // - for large buckets usable_size can be noticeably smaller than slot_size;
+    // - usable_size is safe as it doesn't cover extras as opposed to slot_size.
+    // TODO(bikineev): If we start protecting quarantine memory, we can lose
+    // double-free coverage (the check below). Consider performing the
+    // double-free check before protecting if eager clearing becomes default.
+    SecureMemset(object, 0, usable_size);
+  }
+
+  auto* state_bitmap = StateBitmapFromAddr(slot_start);
+
+  // Mark the state in the state bitmap as quarantined. Make sure to do it after
+  // the clearing to avoid racing with *Scan Sweeper.
+  [[maybe_unused]] const bool succeeded =
+      state_bitmap->Quarantine(slot_start, instance.epoch());
+#if PA_CONFIG(STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED)
+  if (PA_UNLIKELY(!succeeded)) {
+    DoubleFreeAttempt();
+  }
+#else
+  // The compiler is able to optimize cmpxchg to a lock-prefixed and.
+#endif  // PA_CONFIG(STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED)
+
+  const bool is_limit_reached = instance.scheduler_.AccountFreed(slot_size);
+  if (PA_UNLIKELY(is_limit_reached)) {
+    // Perform a quick check if another scan is already in progress.
+    if (instance.IsInProgress()) {
+      return;
+    }
+    // Avoid blocking the current thread for regular scans.
+    instance.PerformScan(InvocationMode::kNonBlocking);
+  }
+}
+
+inline PCScanScheduler& PCScan::scheduler() {
+  PCScan& instance = Instance();
+  return instance.scheduler_;
+}
+
+}  // namespace internal
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_PCSCAN_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_internal.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_internal.cc
new file mode 100644
index 0000000..8b25637
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_internal.cc
@@ -0,0 +1,1642 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_internal.h"
+
+#include <algorithm>
+#include <array>
+#include <chrono>
+#include <condition_variable>
+#include <cstdint>
+#include <mutex>
+#include <numeric>
+#include <set>
+#include <thread>
+#include <type_traits>
+#include <unordered_map>
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/allocation_guard.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/alias.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/ref_counted.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/scoped_refptr.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_page.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/raceful_worklist.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/scan_loop.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/snapshot.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stats_collector.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stats_reporter.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
+#include "build/build_config.h"
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager_bitmap.h"
+#endif
+
+#if PA_CONFIG(STARSCAN_NOINLINE_SCAN_FUNCTIONS)
+#define PA_SCAN_INLINE PA_NOINLINE
+#else
+#define PA_SCAN_INLINE PA_ALWAYS_INLINE
+#endif
+
+namespace partition_alloc::internal {
+
+[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void DoubleFreeAttempt() {
+  PA_NO_CODE_FOLDING();
+  PA_IMMEDIATE_CRASH();
+}
+
+namespace {
+
+#if PA_CONFIG(HAS_ALLOCATION_GUARD)
+// Currently, check reentracy only on Linux. On Android TLS is emulated by the
+// runtime lib, which can allocate and therefore cause reentrancy.
+struct ReentrantScannerGuard final {
+ public:
+  ReentrantScannerGuard() {
+    PA_CHECK(!guard_);
+    guard_ = true;
+  }
+  ~ReentrantScannerGuard() { guard_ = false; }
+
+ private:
+  // Since this variable has hidden visibility (not referenced by other DSOs),
+  // assume that thread_local works on all supported architectures.
+  static thread_local size_t guard_;
+};
+thread_local size_t ReentrantScannerGuard::guard_ = 0;
+#else
+struct [[maybe_unused]] ReentrantScannerGuard final {};
+#endif  // PA_CONFIG(HAS_ALLOCATION_GUARD)
+
+// Scope that disables MTE checks. Only used inside scanning to avoid the race:
+// a slot tag is changed by the mutator, while the scanner sees an old value.
+struct DisableMTEScope final {
+  DisableMTEScope() {
+    ::partition_alloc::ChangeMemoryTaggingModeForCurrentThread(
+        ::partition_alloc::TagViolationReportingMode::kDisabled);
+  }
+  ~DisableMTEScope() {
+    ::partition_alloc::ChangeMemoryTaggingModeForCurrentThread(
+        parent_tagging_mode);
+  }
+
+ private:
+  ::partition_alloc::TagViolationReportingMode parent_tagging_mode =
+      ::partition_alloc::internal::GetMemoryTaggingModeForCurrentThread();
+};
+
+#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+// Bytemap that represent regions (cards) that contain quarantined slots.
+// A single PCScan cycle consists of the following steps:
+// 1) clearing (memset quarantine + marking cards that contain quarantine);
+// 2) scanning;
+// 3) sweeping (freeing + unmarking cards that contain freed slots).
+// Marking cards on step 1) ensures that the card table stays in the consistent
+// state while scanning. Unmarking on the step 3) ensures that unmarking
+// actually happens (and we don't hit too many false positives).
+//
+// The code here relies on the fact that |address| is in the regular pool and
+// that the card table (this object) is allocated at the very beginning of that
+// pool.
+class QuarantineCardTable final {
+ public:
+  // Avoid the load of the base of the regular pool.
+  PA_ALWAYS_INLINE static QuarantineCardTable& GetFrom(uintptr_t address) {
+    PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(address));
+    return *reinterpret_cast<QuarantineCardTable*>(
+        address & PartitionAddressSpace::RegularPoolBaseMask());
+  }
+
+  PA_ALWAYS_INLINE void Quarantine(uintptr_t begin, size_t size) {
+    return SetImpl(begin, size, true);
+  }
+
+  PA_ALWAYS_INLINE void Unquarantine(uintptr_t begin, size_t size) {
+    return SetImpl(begin, size, false);
+  }
+
+  // Returns whether the card to which |address| points to contains quarantined
+  // slots. May return false positives for but should never return false
+  // negatives, as otherwise this breaks security.
+  PA_ALWAYS_INLINE bool IsQuarantined(uintptr_t address) const {
+    const size_t byte = Byte(address);
+    PA_SCAN_DCHECK(byte < bytes_.size());
+    return bytes_[byte];
+  }
+
+ private:
+  static constexpr size_t kCardSize = kPoolMaxSize / kSuperPageSize;
+  static constexpr size_t kBytes = kPoolMaxSize / kCardSize;
+
+  QuarantineCardTable() = default;
+
+  PA_ALWAYS_INLINE static size_t Byte(uintptr_t address) {
+    return (address & ~PartitionAddressSpace::RegularPoolBaseMask()) /
+           kCardSize;
+  }
+
+  PA_ALWAYS_INLINE void SetImpl(uintptr_t begin, size_t size, bool value) {
+    const size_t byte = Byte(begin);
+    const size_t need_bytes = (size + (kCardSize - 1)) / kCardSize;
+    PA_SCAN_DCHECK(bytes_.size() >= byte + need_bytes);
+    PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(begin));
+    for (size_t i = byte; i < byte + need_bytes; ++i) {
+      bytes_[i] = value;
+    }
+  }
+
+  std::array<bool, kBytes> bytes_;
+};
+static_assert(kSuperPageSize >= sizeof(QuarantineCardTable),
+              "Card table size must be less than kSuperPageSize, since this is "
+              "what is committed");
+#endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+
+template <typename T>
+using MetadataVector = std::vector<T, MetadataAllocator<T>>;
+template <typename T>
+using MetadataSet = std::set<T, std::less<>, MetadataAllocator<T>>;
+template <typename K, typename V>
+using MetadataHashMap =
+    std::unordered_map<K,
+                       V,
+                       std::hash<K>,
+                       std::equal_to<>,
+                       MetadataAllocator<std::pair<const K, V>>>;
+
+struct GetSlotStartResult final {
+  PA_ALWAYS_INLINE bool is_found() const {
+    PA_SCAN_DCHECK(!slot_start || slot_size);
+    return slot_start;
+  }
+
+  uintptr_t slot_start = 0;
+  size_t slot_size = 0;
+};
+
+// Returns the start of a slot, or 0 if |maybe_inner_address| is not inside of
+// an existing slot span. The function may return a non-0 address even inside a
+// decommitted or free slot span, it's the caller responsibility to check if
+// memory is actually allocated.
+//
+// |maybe_inner_address| must be within a normal-bucket super page and can also
+// point to guard pages or slot-span metadata.
+PA_SCAN_INLINE GetSlotStartResult
+GetSlotStartInSuperPage(uintptr_t maybe_inner_address) {
+  PA_SCAN_DCHECK(IsManagedByNormalBuckets(maybe_inner_address));
+  // Don't use SlotSpanMetadata/PartitionPage::FromAddr() and family, because
+  // they expect an address within a super page payload area, which we don't
+  // know yet if |maybe_inner_address| is.
+  const uintptr_t super_page = maybe_inner_address & kSuperPageBaseMask;
+
+  const uintptr_t partition_page_index =
+      (maybe_inner_address & kSuperPageOffsetMask) >> PartitionPageShift();
+  auto* page =
+      PartitionSuperPageToMetadataArea(super_page) + partition_page_index;
+  // Check if page is valid. The check also works for the guard pages and the
+  // metadata page.
+  if (!page->is_valid) {
+    return {};
+  }
+
+  page -= page->slot_span_metadata_offset;
+  PA_SCAN_DCHECK(page->is_valid);
+  PA_SCAN_DCHECK(!page->slot_span_metadata_offset);
+  auto* slot_span = &page->slot_span_metadata;
+  // Check if the slot span is actually used and valid.
+  if (!slot_span->bucket) {
+    return {};
+  }
+#if PA_SCAN_DCHECK_IS_ON()
+  DCheckIsValidSlotSpan(slot_span);
+#endif
+  const uintptr_t slot_span_start =
+      SlotSpanMetadata::ToSlotSpanStart(slot_span);
+  const ptrdiff_t ptr_offset = maybe_inner_address - slot_span_start;
+  PA_SCAN_DCHECK(0 <= ptr_offset &&
+                 ptr_offset < static_cast<ptrdiff_t>(
+                                  slot_span->bucket->get_pages_per_slot_span() *
+                                  PartitionPageSize()));
+  // Slot span size in bytes is not necessarily multiple of partition page.
+  // Don't check if the pointer points outside of usable area, since checking
+  // the quarantine bit will anyway return false in this case.
+  const size_t slot_size = slot_span->bucket->slot_size;
+  const size_t slot_number = slot_span->bucket->GetSlotNumber(ptr_offset);
+  const uintptr_t slot_start = slot_span_start + (slot_number * slot_size);
+  PA_SCAN_DCHECK(slot_start <= maybe_inner_address &&
+                 maybe_inner_address < slot_start + slot_size);
+  return {.slot_start = slot_start, .slot_size = slot_size};
+}
+
+#if PA_SCAN_DCHECK_IS_ON()
+bool IsQuarantineEmptyOnSuperPage(uintptr_t super_page) {
+  auto* bitmap = SuperPageStateBitmap(super_page);
+  size_t visited = 0;
+  bitmap->IterateQuarantined([&visited](auto) { ++visited; });
+  return !visited;
+}
+#endif
+
+SimdSupport DetectSimdSupport() {
+#if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
+  return SimdSupport::kNEON;
+#else
+  const base::CPU& cpu = base::CPU::GetInstanceNoAllocation();
+  if (cpu.has_avx2()) {
+    return SimdSupport::kAVX2;
+  }
+  if (cpu.has_sse41()) {
+    return SimdSupport::kSSE41;
+  }
+  return SimdSupport::kUnvectorized;
+#endif  // PA_CONFIG(STARSCAN_NEON_SUPPORTED)
+}
+
+void CommitCardTable() {
+#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+  RecommitSystemPages(PartitionAddressSpace::RegularPoolBase(),
+                      sizeof(QuarantineCardTable),
+                      PageAccessibilityConfiguration(
+                          PageAccessibilityConfiguration::kReadWrite),
+                      PageAccessibilityDisposition::kRequireUpdate);
+#endif
+}
+
+template <class Function>
+void IterateNonEmptySlotSpans(uintptr_t super_page,
+                              size_t nonempty_slot_spans,
+                              Function function) {
+  PA_SCAN_DCHECK(!(super_page % kSuperPageAlignment));
+  PA_SCAN_DCHECK(nonempty_slot_spans);
+
+  size_t slot_spans_to_visit = nonempty_slot_spans;
+#if PA_SCAN_DCHECK_IS_ON()
+  size_t visited = 0;
+#endif
+
+  IterateSlotSpans(super_page, true /*with_quarantine*/,
+                   [&function, &slot_spans_to_visit
+#if PA_SCAN_DCHECK_IS_ON()
+                    ,
+                    &visited
+#endif
+  ](SlotSpanMetadata* slot_span) {
+                     if (slot_span->is_empty() || slot_span->is_decommitted()) {
+                       // Skip empty/decommitted slot spans.
+                       return false;
+                     }
+                     function(slot_span);
+                     --slot_spans_to_visit;
+#if PA_SCAN_DCHECK_IS_ON()
+                     // In debug builds, scan all the slot spans to check that
+                     // number of visited slot spans is equal to the number of
+                     // nonempty_slot_spans.
+                     ++visited;
+                     return false;
+#else
+        return slot_spans_to_visit == 0;
+#endif
+                   });
+#if PA_SCAN_DCHECK_IS_ON()
+  // Check that exactly all non-empty slot spans have been visited.
+  PA_DCHECK(nonempty_slot_spans == visited);
+#endif
+}
+
+// SuperPageSnapshot is used to record all slot spans that contain live slots.
+// The class avoids dynamic allocations and is designed to be instantiated on
+// stack. To avoid stack overflow, internal data structures are kept packed.
+class SuperPageSnapshot final {
+  // The following constants are used to define a conservative estimate for
+  // maximum number of slot spans in a super page.
+  //
+  // For systems with runtime-defined page size, assume partition page size is
+  // at least 16kiB.
+  static constexpr size_t kMinPartitionPageSize =
+      __builtin_constant_p(PartitionPageSize()) ? PartitionPageSize() : 1 << 14;
+  static constexpr size_t kStateBitmapMinReservedSize =
+      __builtin_constant_p(ReservedStateBitmapSize())
+          ? ReservedStateBitmapSize()
+          : partition_alloc::internal::base::bits::AlignUp(
+                sizeof(AllocationStateMap),
+                kMinPartitionPageSize);
+  // Take into account guard partition page at the end of super-page.
+  static constexpr size_t kGuardPagesSize = 2 * kMinPartitionPageSize;
+
+  static constexpr size_t kPayloadMaxSize =
+      kSuperPageSize - kStateBitmapMinReservedSize - kGuardPagesSize;
+  static_assert(kPayloadMaxSize % kMinPartitionPageSize == 0,
+                "kPayloadMaxSize must be multiple of kMinPartitionPageSize");
+
+  static constexpr size_t kMaxSlotSpansInSuperPage =
+      kPayloadMaxSize / kMinPartitionPageSize;
+
+ public:
+  struct ScanArea {
+    // Use packed integer types to save stack space. In theory, kAlignment could
+    // be used instead of words, but it doesn't seem to bring savings.
+    uint32_t offset_within_page_in_words;
+    uint32_t size_in_words;
+    uint32_t slot_size_in_words;
+  };
+
+  class ScanAreas : private std::array<ScanArea, kMaxSlotSpansInSuperPage> {
+    using Base = std::array<ScanArea, kMaxSlotSpansInSuperPage>;
+
+   public:
+    using iterator = Base::iterator;
+    using const_iterator = Base::const_iterator;
+    using Base::operator[];
+
+    iterator begin() { return Base::begin(); }
+    const_iterator begin() const { return Base::begin(); }
+
+    iterator end() { return std::next(begin(), size_); }
+    const_iterator end() const { return std::next(begin(), size_); }
+
+    void set_size(size_t new_size) { size_ = new_size; }
+
+   private:
+    size_t size_;
+  };
+
+  static_assert(std::is_trivially_default_constructible_v<ScanAreas>,
+                "ScanAreas must be trivially default constructible to ensure "
+                "that no memsets are generated by the compiler as a "
+                "result of value-initialization (or zero-initialization)");
+
+  void* operator new(size_t) = delete;
+  void operator delete(void*) = delete;
+
+  // Creates snapshot for a single super page. In theory, we could simply
+  // iterate over slot spans without taking a snapshot. However, we do this to
+  // minimize the mutex locking time. The mutex must be acquired to make sure
+  // that no mutator is concurrently changing any of the slot spans.
+  explicit SuperPageSnapshot(uintptr_t super_page_base);
+
+  const ScanAreas& scan_areas() const { return scan_areas_; }
+
+ private:
+  ScanAreas scan_areas_;
+};
+
+static_assert(
+    sizeof(SuperPageSnapshot) <= 2048,
+    "SuperPageSnapshot must stay relatively small to be allocated on stack");
+
+SuperPageSnapshot::SuperPageSnapshot(uintptr_t super_page) {
+  using SlotSpan = SlotSpanMetadata;
+
+  auto* extent_entry = PartitionSuperPageToExtent(super_page);
+
+  ::partition_alloc::internal::ScopedGuard lock(
+      ::partition_alloc::internal::PartitionRootLock(extent_entry->root));
+
+  const size_t nonempty_slot_spans =
+      extent_entry->number_of_nonempty_slot_spans;
+  if (!nonempty_slot_spans) {
+#if PA_SCAN_DCHECK_IS_ON()
+    // Check that quarantine bitmap is empty for super-pages that contain
+    // only empty/decommitted slot-spans.
+    PA_CHECK(IsQuarantineEmptyOnSuperPage(super_page));
+#endif
+    scan_areas_.set_size(0);
+    return;
+  }
+
+  size_t current = 0;
+
+  IterateNonEmptySlotSpans(
+      super_page, nonempty_slot_spans, [this, &current](SlotSpan* slot_span) {
+        const uintptr_t payload_begin = SlotSpan::ToSlotSpanStart(slot_span);
+        // For single-slot slot-spans, scan only utilized slot part.
+        const size_t provisioned_size =
+            PA_UNLIKELY(slot_span->CanStoreRawSize())
+                ? slot_span->GetRawSize()
+                : slot_span->GetProvisionedSize();
+        // Free & decommitted slot spans are skipped.
+        PA_SCAN_DCHECK(provisioned_size > 0);
+        const uintptr_t payload_end = payload_begin + provisioned_size;
+        auto& area = scan_areas_[current];
+
+        const size_t offset_in_words =
+            (payload_begin & kSuperPageOffsetMask) / sizeof(uintptr_t);
+        const size_t size_in_words =
+            (payload_end - payload_begin) / sizeof(uintptr_t);
+        const size_t slot_size_in_words =
+            slot_span->bucket->slot_size / sizeof(uintptr_t);
+
+#if PA_SCAN_DCHECK_IS_ON()
+        PA_DCHECK(offset_in_words <=
+                  std::numeric_limits<
+                      decltype(area.offset_within_page_in_words)>::max());
+        PA_DCHECK(size_in_words <=
+                  std::numeric_limits<decltype(area.size_in_words)>::max());
+        PA_DCHECK(
+            slot_size_in_words <=
+            std::numeric_limits<decltype(area.slot_size_in_words)>::max());
+#endif
+
+        area.offset_within_page_in_words = offset_in_words;
+        area.size_in_words = size_in_words;
+        area.slot_size_in_words = slot_size_in_words;
+
+        ++current;
+      });
+
+  PA_SCAN_DCHECK(kMaxSlotSpansInSuperPage >= current);
+  scan_areas_.set_size(current);
+}
+
+}  // namespace
+
+class PCScanScanLoop;
+
+// This class is responsible for performing the entire PCScan task.
+// TODO(bikineev): Move PCScan algorithm out of PCScanTask.
+class PCScanTask final : public base::RefCountedThreadSafe<PCScanTask>,
+                         public AllocatedOnPCScanMetadataPartition {
+ public:
+  // Creates and initializes a PCScan state.
+  PCScanTask(PCScan& pcscan, size_t quarantine_last_size);
+
+  PCScanTask(PCScanTask&&) noexcept = delete;
+  PCScanTask& operator=(PCScanTask&&) noexcept = delete;
+
+  // Execute PCScan from mutator inside safepoint.
+  void RunFromMutator();
+
+  // Execute PCScan from the scanner thread. Must be called only once from the
+  // scanner thread.
+  void RunFromScanner();
+
+  PCScanScheduler& scheduler() const { return pcscan_.scheduler(); }
+
+ private:
+  class StackVisitor;
+  friend class PCScanScanLoop;
+
+  using Root = PCScan::Root;
+  using SlotSpan = SlotSpanMetadata;
+
+  // This is used:
+  // - to synchronize all scanning threads (mutators and the scanner);
+  // - for the scanner, to transition through the state machine
+  //   (kScheduled -> kScanning (ctor) -> kSweepingAndFinishing (dtor).
+  template <Context context>
+  class SyncScope final {
+   public:
+    explicit SyncScope(PCScanTask& task) : task_(task) {
+      task_.number_of_scanning_threads_.fetch_add(1, std::memory_order_relaxed);
+      if (context == Context::kScanner) {
+        task_.pcscan_.state_.store(PCScan::State::kScanning,
+                                   std::memory_order_relaxed);
+        task_.pcscan_.SetJoinableIfSafepointEnabled(true);
+      }
+    }
+    ~SyncScope() {
+      // First, notify the scanning thread that this thread is done.
+      NotifyThreads();
+      if (context == Context::kScanner) {
+        // The scanner thread must wait here until all safepoints leave.
+        // Otherwise, sweeping may free a page that can later be accessed by a
+        // descheduled mutator.
+        WaitForOtherThreads();
+        task_.pcscan_.state_.store(PCScan::State::kSweepingAndFinishing,
+                                   std::memory_order_relaxed);
+      }
+    }
+
+   private:
+    void NotifyThreads() {
+      {
+        // The lock is required as otherwise there is a race between
+        // fetch_sub/notify in the mutator and checking
+        // number_of_scanning_threads_/waiting in the scanner.
+        std::lock_guard<std::mutex> lock(task_.mutex_);
+        task_.number_of_scanning_threads_.fetch_sub(1,
+                                                    std::memory_order_relaxed);
+        {
+          // Notify that scan is done and there is no need to enter
+          // the safepoint. This also helps a mutator to avoid repeating
+          // entering. Since the scanner thread waits for all threads to finish,
+          // there is no ABA problem here.
+          task_.pcscan_.SetJoinableIfSafepointEnabled(false);
+        }
+      }
+      task_.condvar_.notify_all();
+    }
+
+    void WaitForOtherThreads() {
+      std::unique_lock<std::mutex> lock(task_.mutex_);
+      task_.condvar_.wait(lock, [this] {
+        return !task_.number_of_scanning_threads_.load(
+            std::memory_order_relaxed);
+      });
+    }
+
+    PCScanTask& task_;
+  };
+
+  friend class base::RefCountedThreadSafe<PCScanTask>;
+  ~PCScanTask() = default;
+
+  PA_SCAN_INLINE AllocationStateMap* TryFindScannerBitmapForPointer(
+      uintptr_t maybe_ptr) const;
+
+  // Lookup and marking functions. Return size of the slot if marked, or zero
+  // otherwise.
+  PA_SCAN_INLINE size_t TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const;
+
+  // Scans stack, only called from safepoints.
+  void ScanStack();
+
+  // Scan individual areas.
+  void ScanNormalArea(PCScanInternal& pcscan,
+                      PCScanScanLoop& scan_loop,
+                      uintptr_t begin,
+                      uintptr_t end);
+  void ScanLargeArea(PCScanInternal& pcscan,
+                     PCScanScanLoop& scan_loop,
+                     uintptr_t begin,
+                     uintptr_t end,
+                     size_t slot_size);
+
+  // Scans all registered partitions and marks reachable quarantined slots.
+  void ScanPartitions();
+
+  // Clear quarantined slots and prepare card table for fast lookup
+  void ClearQuarantinedSlotsAndPrepareCardTable();
+
+  // Unprotect all slot spans from all partitions.
+  void UnprotectPartitions();
+
+  // Sweeps (frees) unreachable quarantined entries.
+  void SweepQuarantine();
+
+  // Finishes the scanner (updates limits, UMA, etc).
+  void FinishScanner();
+
+  // Cache the pcscan epoch to avoid the compiler loading the atomic
+  // QuarantineData::epoch_ on each access.
+  const size_t pcscan_epoch_;
+  std::unique_ptr<StarScanSnapshot> snapshot_;
+  StatsCollector stats_;
+  // Mutex and codvar that are used to synchronize scanning threads.
+  std::mutex mutex_;
+  std::condition_variable condvar_;
+  std::atomic<size_t> number_of_scanning_threads_{0u};
+  // We can unprotect only once to reduce context-switches.
+  std::once_flag unprotect_once_flag_;
+  bool immediatelly_free_slots_{false};
+  PCScan& pcscan_;
+};
+
+PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
+    uintptr_t maybe_ptr) const {
+  PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(maybe_ptr));
+  // First, check if |maybe_ptr| points to a valid super page or a quarantined
+  // card.
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+  // Check if |maybe_ptr| points to a quarantined card.
+  if (PA_LIKELY(
+          !QuarantineCardTable::GetFrom(maybe_ptr).IsQuarantined(maybe_ptr))) {
+    return nullptr;
+  }
+#else   // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+  // Without the card table, use the reservation offset table to check if
+  // |maybe_ptr| points to a valid super-page. It's not as precise (meaning that
+  // we may have hit the slow path more frequently), but reduces the memory
+  // overhead.  Since we are certain here, that |maybe_ptr| refers to the
+  // regular pool, it's okay to use non-checking version of
+  // ReservationOffsetPointer().
+  const uintptr_t offset =
+      maybe_ptr & ~PartitionAddressSpace::RegularPoolBaseMask();
+  if (PA_LIKELY(*ReservationOffsetPointer(kRegularPoolHandle, offset) !=
+                kOffsetTagNormalBuckets)) {
+    return nullptr;
+  }
+#endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+#else   // BUILDFLAG(HAS_64_BIT_POINTERS)
+  if (PA_LIKELY(!IsManagedByPartitionAllocRegularPool(maybe_ptr))) {
+    return nullptr;
+  }
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+  // We are certain here that |maybe_ptr| points to an allocated super-page.
+  return StateBitmapFromAddr(maybe_ptr);
+}
+
+// Looks up and marks a potential dangling pointer. Returns the size of the slot
+// (which is then accounted as quarantined), or zero if no slot is found.
+// For normal bucket super pages, PCScan uses two quarantine bitmaps, the
+// mutator and the scanner one. The former is used by mutators when slots are
+// freed, while the latter is used concurrently by the PCScan thread. The
+// bitmaps are swapped as soon as PCScan is triggered. Once a dangling pointer
+// (which points to a slot in the scanner bitmap) is found,
+// TryMarkSlotInNormalBuckets() marks it again in the bitmap and clears
+// from the scanner bitmap. This way, when scanning is done, all uncleared
+// entries in the scanner bitmap correspond to unreachable slots.
+PA_SCAN_INLINE size_t
+PCScanTask::TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const {
+  // Check if |maybe_ptr| points somewhere to the heap.
+  // The caller has to make sure that |maybe_ptr| isn't MTE-tagged.
+  auto* state_map = TryFindScannerBitmapForPointer(maybe_ptr);
+  if (!state_map) {
+    return 0;
+  }
+
+  // Beyond this point, we know that |maybe_ptr| is a pointer within a
+  // normal-bucket super page.
+  PA_SCAN_DCHECK(IsManagedByNormalBuckets(maybe_ptr));
+
+#if !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+  // Pointer from a normal bucket is always in the first superpage.
+  auto* root = Root::FromAddrInFirstSuperpage(maybe_ptr);
+  // Without the card table, we must make sure that |maybe_ptr| doesn't point to
+  // metadata partition.
+  // TODO(bikineev): To speed things up, consider removing the check and
+  // committing quarantine bitmaps for metadata partition.
+  // TODO(bikineev): Marking an entry in the reservation-table is not a
+  // publishing operation, meaning that the |root| pointer may not be assigned
+  // yet. This can happen as arbitrary pointers may point into a super-page
+  // during its set up. Make sure to check |root| is not null before
+  // dereferencing it.
+  if (PA_UNLIKELY(!root || !root->IsQuarantineEnabled())) {
+    return 0;
+  }
+#endif  // !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+
+  // Check if pointer was in the quarantine bitmap.
+  const GetSlotStartResult slot_start_result =
+      GetSlotStartInSuperPage(maybe_ptr);
+  if (!slot_start_result.is_found()) {
+    return 0;
+  }
+
+  const uintptr_t slot_start = slot_start_result.slot_start;
+  if (PA_LIKELY(!state_map->IsQuarantined(slot_start))) {
+    return 0;
+  }
+
+  PA_SCAN_DCHECK((maybe_ptr & kSuperPageBaseMask) ==
+                 (slot_start & kSuperPageBaseMask));
+
+  if (PA_UNLIKELY(immediatelly_free_slots_)) {
+    return 0;
+  }
+
+  // Now we are certain that |maybe_ptr| is a dangling pointer. Mark it again in
+  // the mutator bitmap and clear from the scanner bitmap. Note that since
+  // PCScan has exclusive access to the scanner bitmap, we can avoid atomic rmw
+  // operation for it.
+  if (PA_LIKELY(
+          state_map->MarkQuarantinedAsReachable(slot_start, pcscan_epoch_))) {
+    return slot_start_result.slot_size;
+  }
+
+  return 0;
+}
+
+void PCScanTask::ClearQuarantinedSlotsAndPrepareCardTable() {
+  const PCScan::ClearType clear_type = pcscan_.clear_type_;
+
+#if !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+  if (clear_type == PCScan::ClearType::kEager) {
+    return;
+  }
+#endif
+
+  StarScanSnapshot::ClearingView view(*snapshot_);
+  view.VisitConcurrently([clear_type](uintptr_t super_page) {
+    auto* bitmap = StateBitmapFromAddr(super_page);
+    auto* root = Root::FromFirstSuperPage(super_page);
+    bitmap->IterateQuarantined([root, clear_type](uintptr_t slot_start) {
+      auto* slot_span = SlotSpan::FromSlotStart(slot_start);
+      // Use zero as a zapping value to speed up the fast bailout check in
+      // ScanPartitions.
+      const size_t size = root->GetSlotUsableSize(slot_span);
+      if (clear_type == PCScan::ClearType::kLazy) {
+        void* object = root->SlotStartToObject(slot_start);
+        memset(object, 0, size);
+      }
+#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+      // Set card(s) for this quarantined slot.
+      QuarantineCardTable::GetFrom(slot_start).Quarantine(slot_start, size);
+#endif
+    });
+  });
+}
+
+void PCScanTask::UnprotectPartitions() {
+  auto& pcscan = PCScanInternal::Instance();
+  if (!pcscan.WriteProtectionEnabled()) {
+    return;
+  }
+
+  StarScanSnapshot::UnprotectingView unprotect_view(*snapshot_);
+  unprotect_view.VisitConcurrently([&pcscan](uintptr_t super_page) {
+    SuperPageSnapshot super_page_snapshot(super_page);
+
+    for (const auto& scan_area : super_page_snapshot.scan_areas()) {
+      const uintptr_t begin =
+          super_page |
+          (scan_area.offset_within_page_in_words * sizeof(uintptr_t));
+      const uintptr_t end =
+          begin + (scan_area.size_in_words * sizeof(uintptr_t));
+
+      pcscan.UnprotectPages(begin, end - begin);
+    }
+  });
+}
+
+class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
+  friend class ScanLoop<PCScanScanLoop>;
+
+ public:
+  explicit PCScanScanLoop(const PCScanTask& task)
+      : ScanLoop(PCScanInternal::Instance().simd_support()), task_(task) {}
+
+  size_t quarantine_size() const { return quarantine_size_; }
+
+ private:
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
+    return PartitionAddressSpace::RegularPoolBase();
+  }
+  PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() {
+    return PartitionAddressSpace::RegularPoolBaseMask();
+  }
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+  PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) {
+    // |maybe_ptr| may have an MTE tag, so remove it first.
+    quarantine_size_ +=
+        task_.TryMarkSlotInNormalBuckets(UntagAddr(maybe_ptr_maybe_tagged));
+  }
+
+  const PCScanTask& task_;
+  DisableMTEScope disable_mte_;
+  size_t quarantine_size_ = 0;
+};
+
+class PCScanTask::StackVisitor final : public internal::StackVisitor {
+ public:
+  explicit StackVisitor(const PCScanTask& task) : task_(task) {}
+
+  void VisitStack(uintptr_t* stack_ptr, uintptr_t* stack_top) override {
+    static constexpr size_t kMinimalAlignment = 32;
+    uintptr_t begin =
+        reinterpret_cast<uintptr_t>(stack_ptr) & ~(kMinimalAlignment - 1);
+    uintptr_t end =
+        (reinterpret_cast<uintptr_t>(stack_top) + kMinimalAlignment - 1) &
+        ~(kMinimalAlignment - 1);
+    PA_CHECK(begin < end);
+    PCScanScanLoop loop(task_);
+    loop.Run(begin, end);
+    quarantine_size_ += loop.quarantine_size();
+  }
+
+  // Returns size of quarantined slots that are reachable from the current
+  // stack.
+  size_t quarantine_size() const { return quarantine_size_; }
+
+ private:
+  const PCScanTask& task_;
+  size_t quarantine_size_ = 0;
+};
+
+PCScanTask::PCScanTask(PCScan& pcscan, size_t quarantine_last_size)
+    : pcscan_epoch_(pcscan.epoch() - 1),
+      snapshot_(StarScanSnapshot::Create(PCScanInternal::Instance())),
+      stats_(PCScanInternal::Instance().process_name(), quarantine_last_size),
+      immediatelly_free_slots_(
+          PCScanInternal::Instance().IsImmediateFreeingEnabled()),
+      pcscan_(pcscan) {}
+
+void PCScanTask::ScanStack() {
+  const auto& pcscan = PCScanInternal::Instance();
+  if (!pcscan.IsStackScanningEnabled()) {
+    return;
+  }
+  // Check if the stack top was registered. It may happen that it's not if the
+  // current allocation happens from pthread trampolines.
+  void* stack_top = pcscan.GetCurrentThreadStackTop();
+  if (PA_UNLIKELY(!stack_top)) {
+    return;
+  }
+
+  Stack stack_scanner(stack_top);
+  StackVisitor visitor(*this);
+  stack_scanner.IteratePointers(&visitor);
+  stats_.IncreaseSurvivedQuarantineSize(visitor.quarantine_size());
+}
+
+void PCScanTask::ScanNormalArea(PCScanInternal& pcscan,
+                                PCScanScanLoop& scan_loop,
+                                uintptr_t begin,
+                                uintptr_t end) {
+  // Protect slot span before scanning it.
+  pcscan.ProtectPages(begin, end - begin);
+  scan_loop.Run(begin, end);
+}
+
+void PCScanTask::ScanLargeArea(PCScanInternal& pcscan,
+                               PCScanScanLoop& scan_loop,
+                               uintptr_t begin,
+                               uintptr_t end,
+                               size_t slot_size) {
+  // For scanning large areas, it's worthwhile checking whether the range that
+  // is scanned contains allocated slots. It also helps to skip discarded
+  // freed slots.
+  // Protect slot span before scanning it.
+  pcscan.ProtectPages(begin, end - begin);
+
+  auto* bitmap = StateBitmapFromAddr(begin);
+
+  for (uintptr_t current_slot = begin; current_slot < end;
+       current_slot += slot_size) {
+    // It is okay to skip slots as the object they hold has been zapped at this
+    // point, which means that the pointers no longer retain other slots.
+    if (!bitmap->IsAllocated(current_slot)) {
+      continue;
+    }
+    uintptr_t current_slot_end = current_slot + slot_size;
+    // |slot_size| may be larger than |raw_size| for single-slot slot spans.
+    scan_loop.Run(current_slot, std::min(current_slot_end, end));
+  }
+}
+
+void PCScanTask::ScanPartitions() {
+  // Threshold for which bucket size it is worthwhile in checking whether the
+  // slot is allocated and needs to be scanned. PartitionPurgeSlotSpan()
+  // purges only slots >= page-size, this helps us to avoid faulting in
+  // discarded pages. We actually lower it further to 1024, to take advantage of
+  // skipping unallocated slots, but don't want to go any lower, as this comes
+  // at a cost of expensive bitmap checking.
+  static constexpr size_t kLargeScanAreaThresholdInWords =
+      1024 / sizeof(uintptr_t);
+
+  PCScanScanLoop scan_loop(*this);
+  auto& pcscan = PCScanInternal::Instance();
+
+  StarScanSnapshot::ScanningView snapshot_view(*snapshot_);
+  snapshot_view.VisitConcurrently([this, &pcscan,
+                                   &scan_loop](uintptr_t super_page) {
+    SuperPageSnapshot super_page_snapshot(super_page);
+
+    for (const auto& scan_area : super_page_snapshot.scan_areas()) {
+      const uintptr_t begin =
+          super_page |
+          (scan_area.offset_within_page_in_words * sizeof(uintptr_t));
+      PA_SCAN_DCHECK(begin ==
+                     super_page + (scan_area.offset_within_page_in_words *
+                                   sizeof(uintptr_t)));
+      const uintptr_t end = begin + scan_area.size_in_words * sizeof(uintptr_t);
+
+      if (PA_UNLIKELY(scan_area.slot_size_in_words >=
+                      kLargeScanAreaThresholdInWords)) {
+        ScanLargeArea(pcscan, scan_loop, begin, end,
+                      scan_area.slot_size_in_words * sizeof(uintptr_t));
+      } else {
+        ScanNormalArea(pcscan, scan_loop, begin, end);
+      }
+    }
+  });
+
+  stats_.IncreaseSurvivedQuarantineSize(scan_loop.quarantine_size());
+}
+
+namespace {
+
+struct SweepStat {
+  // Bytes that were really swept (by calling free()).
+  size_t swept_bytes = 0;
+  // Bytes of marked quarantine memory that were discarded (by calling
+  // madvice(DONT_NEED)).
+  size_t discarded_bytes = 0;
+};
+
+void UnmarkInCardTable(uintptr_t slot_start, SlotSpanMetadata* slot_span) {
+#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+  // Reset card(s) for this quarantined slot. Please note that the cards may
+  // still contain quarantined slots (which were promoted in this scan cycle),
+  // but ClearQuarantinedSlotsAndPrepareCardTable() will set them again in the
+  // next PCScan cycle.
+  QuarantineCardTable::GetFrom(slot_start)
+      .Unquarantine(slot_start, slot_span->GetUtilizedSlotSize());
+#endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
+}
+
+[[maybe_unused]] size_t FreeAndUnmarkInCardTable(PartitionRoot* root,
+                                                 SlotSpanMetadata* slot_span,
+                                                 uintptr_t slot_start) {
+  void* object = root->SlotStartToObject(slot_start);
+  root->FreeNoHooksImmediate(object, slot_span, slot_start);
+  UnmarkInCardTable(slot_start, slot_span);
+  return slot_span->bucket->slot_size;
+}
+
+[[maybe_unused]] void SweepSuperPage(PartitionRoot* root,
+                                     uintptr_t super_page,
+                                     size_t epoch,
+                                     SweepStat& stat) {
+  auto* bitmap = StateBitmapFromAddr(super_page);
+  PartitionRoot::FromFirstSuperPage(super_page);
+  bitmap->IterateUnmarkedQuarantined(epoch, [root,
+                                             &stat](uintptr_t slot_start) {
+    auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
+    stat.swept_bytes += FreeAndUnmarkInCardTable(root, slot_span, slot_start);
+  });
+}
+
+[[maybe_unused]] void SweepSuperPageAndDiscardMarkedQuarantine(
+    PartitionRoot* root,
+    uintptr_t super_page,
+    size_t epoch,
+    SweepStat& stat) {
+  auto* bitmap = StateBitmapFromAddr(super_page);
+  bitmap->IterateQuarantined(epoch, [root, &stat](uintptr_t slot_start,
+                                                  bool is_marked) {
+    auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
+    if (PA_LIKELY(!is_marked)) {
+      stat.swept_bytes += FreeAndUnmarkInCardTable(root, slot_span, slot_start);
+      return;
+    }
+    // Otherwise, try to discard pages for marked quarantine. Since no data is
+    // stored in quarantined slots (e.g. the |next| pointer), this can be
+    // freely done.
+    const size_t slot_size = slot_span->bucket->slot_size;
+    if (slot_size >= SystemPageSize()) {
+      const uintptr_t discard_end =
+          base::bits::AlignDown(slot_start + slot_size, SystemPageSize());
+      const uintptr_t discard_begin =
+          base::bits::AlignUp(slot_start, SystemPageSize());
+      const intptr_t discard_size = discard_end - discard_begin;
+      if (discard_size > 0) {
+        DiscardSystemPages(discard_begin, discard_size);
+        stat.discarded_bytes += discard_size;
+      }
+    }
+  });
+}
+
+[[maybe_unused]] void SweepSuperPageWithBatchedFree(PartitionRoot* root,
+                                                    uintptr_t super_page,
+                                                    size_t epoch,
+                                                    SweepStat& stat) {
+  using SlotSpan = SlotSpanMetadata;
+
+  auto* bitmap = StateBitmapFromAddr(super_page);
+  SlotSpan* previous_slot_span = nullptr;
+  internal::EncodedNextFreelistEntry* freelist_tail = nullptr;
+  internal::EncodedNextFreelistEntry* freelist_head = nullptr;
+  size_t freelist_entries = 0;
+
+  const auto bitmap_iterator = [&](uintptr_t slot_start) {
+    SlotSpan* current_slot_span = SlotSpan::FromSlotStart(slot_start);
+    auto* entry = EncodedNextFreelistEntry::EmplaceAndInitNull(slot_start);
+
+    if (current_slot_span != previous_slot_span) {
+      // We started scanning a new slot span. Flush the accumulated freelist to
+      // the slot-span's freelist. This is a single lock acquired per slot span.
+      if (previous_slot_span && freelist_entries) {
+        root->RawFreeBatch(freelist_head, freelist_tail, freelist_entries,
+                           previous_slot_span);
+      }
+      freelist_head = entry;
+      freelist_tail = nullptr;
+      freelist_entries = 0;
+      previous_slot_span = current_slot_span;
+    }
+
+    if (freelist_tail) {
+      freelist_tail->SetNext(entry);
+    }
+    freelist_tail = entry;
+    ++freelist_entries;
+
+    UnmarkInCardTable(slot_start, current_slot_span);
+
+    stat.swept_bytes += current_slot_span->bucket->slot_size;
+  };
+
+  bitmap->IterateUnmarkedQuarantinedAndFree(epoch, bitmap_iterator);
+
+  if (previous_slot_span && freelist_entries) {
+    root->RawFreeBatch(freelist_head, freelist_tail, freelist_entries,
+                       previous_slot_span);
+  }
+}
+
+}  // namespace
+
+void PCScanTask::SweepQuarantine() {
+  // Check that scan is unjoinable by this time.
+  PA_DCHECK(!pcscan_.IsJoinable());
+  // Discard marked quarantine memory on every Nth scan.
+  // TODO(bikineev): Find a better signal (e.g. memory pressure, high
+  // survival rate, etc).
+  static constexpr size_t kDiscardMarkedQuarantineFrequency = 16;
+  const bool should_discard =
+      (pcscan_epoch_ % kDiscardMarkedQuarantineFrequency == 0) &&
+      (pcscan_.clear_type_ == PCScan::ClearType::kEager);
+
+  SweepStat stat;
+  StarScanSnapshot::SweepingView sweeping_view(*snapshot_);
+  sweeping_view.VisitNonConcurrently(
+      [this, &stat, should_discard](uintptr_t super_page) {
+        auto* root = PartitionRoot::FromFirstSuperPage(super_page);
+
+#if PA_CONFIG(STARSCAN_BATCHED_FREE)
+        SweepSuperPageWithBatchedFree(root, super_page, pcscan_epoch_, stat);
+        (void)should_discard;
+#else
+        if (PA_UNLIKELY(should_discard && !root->settings.use_cookie)) {
+          SweepSuperPageAndDiscardMarkedQuarantine(root, super_page,
+                                                   pcscan_epoch_, stat);
+        } else {
+          SweepSuperPage(root, super_page, pcscan_epoch_, stat);
+        }
+#endif  // PA_CONFIG(STARSCAN_BATCHED_FREE)
+      });
+
+  stats_.IncreaseSweptSize(stat.swept_bytes);
+  stats_.IncreaseDiscardedQuarantineSize(stat.discarded_bytes);
+
+#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
+  // Sweeping potentially frees into the current thread's thread cache. Purge
+  // releases the cache back to the global allocator.
+  auto* current_thread_tcache = ThreadCache::Get();
+  if (ThreadCache::IsValid(current_thread_tcache)) {
+    current_thread_tcache->Purge();
+  }
+#endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
+}
+
+void PCScanTask::FinishScanner() {
+  stats_.ReportTracesAndHists(PCScanInternal::Instance().GetReporter());
+
+  pcscan_.scheduler_.scheduling_backend().UpdateScheduleAfterScan(
+      stats_.survived_quarantine_size(), stats_.GetOverallTime(),
+      PCScanInternal::Instance().CalculateTotalHeapSize());
+
+  PCScanInternal::Instance().ResetCurrentPCScanTask();
+  // Change the state and check that concurrent task can't be scheduled twice.
+  PA_CHECK(pcscan_.state_.exchange(PCScan::State::kNotRunning,
+                                   std::memory_order_acq_rel) ==
+           PCScan::State::kSweepingAndFinishing);
+}
+
+void PCScanTask::RunFromMutator() {
+  ReentrantScannerGuard reentrancy_guard;
+  StatsCollector::MutatorScope overall_scope(
+      stats_, StatsCollector::MutatorId::kOverall);
+  {
+    SyncScope<Context::kMutator> sync_scope(*this);
+    // Mutator might start entering the safepoint while scanning was already
+    // finished.
+    if (!pcscan_.IsJoinable()) {
+      return;
+    }
+    {
+      // Clear all quarantined slots and prepare card table.
+      StatsCollector::MutatorScope clear_scope(
+          stats_, StatsCollector::MutatorId::kClear);
+      ClearQuarantinedSlotsAndPrepareCardTable();
+    }
+    {
+      // Scan the thread's stack to find dangling references.
+      StatsCollector::MutatorScope scan_scope(
+          stats_, StatsCollector::MutatorId::kScanStack);
+      ScanStack();
+    }
+    {
+      // Unprotect all scanned pages, if needed.
+      UnprotectPartitions();
+    }
+    {
+      // Scan heap for dangling references.
+      StatsCollector::MutatorScope scan_scope(stats_,
+                                              StatsCollector::MutatorId::kScan);
+      ScanPartitions();
+    }
+  }
+}
+
+void PCScanTask::RunFromScanner() {
+  ReentrantScannerGuard reentrancy_guard;
+  {
+    StatsCollector::ScannerScope overall_scope(
+        stats_, StatsCollector::ScannerId::kOverall);
+    {
+      SyncScope<Context::kScanner> sync_scope(*this);
+      {
+        // Clear all quarantined slots and prepare the card table.
+        StatsCollector::ScannerScope clear_scope(
+            stats_, StatsCollector::ScannerId::kClear);
+        ClearQuarantinedSlotsAndPrepareCardTable();
+      }
+      {
+        // Scan heap for dangling references.
+        StatsCollector::ScannerScope scan_scope(
+            stats_, StatsCollector::ScannerId::kScan);
+        ScanPartitions();
+      }
+      {
+        // Unprotect all scanned pages, if needed.
+        UnprotectPartitions();
+      }
+    }
+    {
+      // Sweep unreachable quarantined slots.
+      StatsCollector::ScannerScope sweep_scope(
+          stats_, StatsCollector::ScannerId::kSweep);
+      SweepQuarantine();
+    }
+  }
+  FinishScanner();
+}
+
+class PCScan::PCScanThread final {
+ public:
+  using TaskHandle = PCScanInternal::TaskHandle;
+
+  static PCScanThread& Instance() {
+    // Lazily instantiate the scanning thread.
+    static internal::base::NoDestructor<PCScanThread> instance;
+    return *instance;
+  }
+
+  void PostTask(TaskHandle task) {
+    {
+      std::lock_guard<std::mutex> lock(mutex_);
+      PA_DCHECK(!posted_task_.get());
+      posted_task_ = std::move(task);
+      wanted_delay_ = base::TimeDelta();
+    }
+    condvar_.notify_one();
+  }
+
+  void PostDelayedTask(base::TimeDelta delay) {
+    {
+      std::lock_guard<std::mutex> lock(mutex_);
+      if (posted_task_.get()) {
+        return;
+      }
+      wanted_delay_ = delay;
+    }
+    condvar_.notify_one();
+  }
+
+ private:
+  friend class internal::base::NoDestructor<PCScanThread>;
+
+  PCScanThread() {
+    ScopedAllowAllocations allow_allocations_within_std_thread;
+    std::thread{[](PCScanThread* instance) {
+                  static constexpr const char* kThreadName = "PCScan";
+                  // Ideally we should avoid mixing base:: and std:: API for
+                  // threading, but this is useful for visualizing the pcscan
+                  // thread in chrome://tracing.
+                  internal::base::PlatformThread::SetName(kThreadName);
+                  instance->TaskLoop();
+                },
+                this}
+        .detach();
+  }
+
+  // Waits and returns whether the delay should be recomputed.
+  bool Wait(std::unique_lock<std::mutex>& lock) {
+    PA_DCHECK(lock.owns_lock());
+    if (wanted_delay_.is_zero()) {
+      condvar_.wait(lock, [this] {
+        // Re-evaluate if either delay changed, or a task was
+        // enqueued.
+        return !wanted_delay_.is_zero() || posted_task_.get();
+      });
+      // The delay has already been set up and should not be queried again.
+      return false;
+    }
+    condvar_.wait_for(
+        lock, std::chrono::microseconds(wanted_delay_.InMicroseconds()));
+    // If no task has been posted, the delay should be recomputed at this point.
+    return !posted_task_.get();
+  }
+
+  void TaskLoop() {
+    while (true) {
+      TaskHandle current_task;
+      {
+        std::unique_lock<std::mutex> lock(mutex_);
+        // Scheduling.
+        while (!posted_task_.get()) {
+          if (Wait(lock)) {
+            wanted_delay_ =
+                scheduler().scheduling_backend().UpdateDelayedSchedule();
+            if (wanted_delay_.is_zero()) {
+              break;
+            }
+          }
+        }
+        // Differentiate between a posted task and a delayed task schedule.
+        if (posted_task_.get()) {
+          std::swap(current_task, posted_task_);
+          wanted_delay_ = base::TimeDelta();
+        } else {
+          PA_DCHECK(wanted_delay_.is_zero());
+        }
+      }
+      // Differentiate between a posted task and a delayed task schedule.
+      if (current_task.get()) {
+        current_task->RunFromScanner();
+      } else {
+        PCScan::Instance().PerformScan(PCScan::InvocationMode::kNonBlocking);
+      }
+    }
+  }
+
+  PCScanScheduler& scheduler() const { return PCScan::Instance().scheduler(); }
+
+  std::mutex mutex_;
+  std::condition_variable condvar_;
+  TaskHandle posted_task_;
+  base::TimeDelta wanted_delay_;
+};
+
+PCScanInternal::PCScanInternal() : simd_support_(DetectSimdSupport()) {}
+
+PCScanInternal::~PCScanInternal() = default;
+
+void PCScanInternal::Initialize(PCScan::InitConfig config) {
+  PA_DCHECK(!is_initialized_);
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  // Make sure that pools are initialized.
+  PartitionAddressSpace::Init();
+#endif
+  CommitCardTable();
+#if PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
+  if (config.write_protection ==
+      PCScan::InitConfig::WantedWriteProtectionMode::kEnabled) {
+    write_protector_ = std::make_unique<UserFaultFDWriteProtector>();
+  } else {
+    write_protector_ = std::make_unique<NoWriteProtector>();
+  }
+#else
+  write_protector_ = std::make_unique<NoWriteProtector>();
+#endif  // PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
+  PCScan::SetClearType(write_protector_->SupportedClearType());
+
+  if (config.safepoint == PCScan::InitConfig::SafepointMode::kEnabled) {
+    PCScan::Instance().EnableSafepoints();
+  }
+  scannable_roots_ = RootsMap();
+  nonscannable_roots_ = RootsMap();
+
+  static partition_alloc::StatsReporter s_no_op_reporter;
+  PCScan::Instance().RegisterStatsReporter(&s_no_op_reporter);
+
+  // Don't initialize PCScanThread::Instance() as otherwise sandbox complains
+  // about multiple threads running on sandbox initialization.
+  is_initialized_ = true;
+}
+
+void PCScanInternal::PerformScan(PCScan::InvocationMode invocation_mode) {
+#if PA_SCAN_DCHECK_IS_ON()
+  PA_DCHECK(is_initialized());
+  PA_DCHECK(scannable_roots().size() > 0);
+  PA_DCHECK(std::all_of(
+      scannable_roots().begin(), scannable_roots().end(),
+      [](const auto& pair) { return pair.first->IsScanEnabled(); }));
+  PA_DCHECK(std::all_of(
+      nonscannable_roots().begin(), nonscannable_roots().end(),
+      [](const auto& pair) { return pair.first->IsQuarantineEnabled(); }));
+#endif
+
+  PCScan& frontend = PCScan::Instance();
+  {
+    // If scanning is already in progress, bail out.
+    PCScan::State expected = PCScan::State::kNotRunning;
+    if (!frontend.state_.compare_exchange_strong(
+            expected, PCScan::State::kScheduled, std::memory_order_acq_rel,
+            std::memory_order_relaxed)) {
+      return;
+    }
+  }
+
+  const size_t last_quarantine_size =
+      frontend.scheduler_.scheduling_backend().ScanStarted();
+
+  // Create PCScan task and set it as current.
+  auto task = base::MakeRefCounted<PCScanTask>(frontend, last_quarantine_size);
+  PCScanInternal::Instance().SetCurrentPCScanTask(task);
+
+  if (PA_UNLIKELY(invocation_mode ==
+                  PCScan::InvocationMode::kScheduleOnlyForTesting)) {
+    // Immediately change the state to enable safepoint testing.
+    frontend.state_.store(PCScan::State::kScanning, std::memory_order_release);
+    frontend.SetJoinableIfSafepointEnabled(true);
+    return;
+  }
+
+  // Post PCScan task.
+  if (PA_LIKELY(invocation_mode == PCScan::InvocationMode::kNonBlocking)) {
+    PCScan::PCScanThread::Instance().PostTask(std::move(task));
+  } else {
+    PA_SCAN_DCHECK(PCScan::InvocationMode::kBlocking == invocation_mode ||
+                   PCScan::InvocationMode::kForcedBlocking == invocation_mode);
+    std::move(*task).RunFromScanner();
+  }
+}
+
+void PCScanInternal::PerformScanIfNeeded(
+    PCScan::InvocationMode invocation_mode) {
+  if (!scannable_roots().size()) {
+    return;
+  }
+  PCScan& frontend = PCScan::Instance();
+  if (invocation_mode == PCScan::InvocationMode::kForcedBlocking ||
+      frontend.scheduler_.scheduling_backend()
+          .GetQuarantineData()
+          .MinimumScanningThresholdReached()) {
+    PerformScan(invocation_mode);
+  }
+}
+
+void PCScanInternal::PerformDelayedScan(base::TimeDelta delay) {
+  PCScan::PCScanThread::Instance().PostDelayedTask(delay);
+}
+
+void PCScanInternal::JoinScan() {
+  // Current task can be destroyed by the scanner. Check that it's valid.
+  if (auto current_task = CurrentPCScanTask()) {
+    current_task->RunFromMutator();
+  }
+}
+
+PCScanInternal::TaskHandle PCScanInternal::CurrentPCScanTask() const {
+  std::lock_guard<std::mutex> lock(current_task_mutex_);
+  return current_task_;
+}
+
+void PCScanInternal::SetCurrentPCScanTask(TaskHandle task) {
+  std::lock_guard<std::mutex> lock(current_task_mutex_);
+  current_task_ = std::move(task);
+}
+
+void PCScanInternal::ResetCurrentPCScanTask() {
+  std::lock_guard<std::mutex> lock(current_task_mutex_);
+  current_task_.reset();
+}
+
+namespace {
+PCScanInternal::SuperPages GetSuperPagesAndCommitStateBitmaps(
+    PCScan::Root& root) {
+  const size_t state_bitmap_size_to_commit = CommittedStateBitmapSize();
+  PCScanInternal::SuperPages super_pages;
+  for (auto* super_page_extent = root.first_extent; super_page_extent;
+       super_page_extent = super_page_extent->next) {
+    for (uintptr_t super_page = SuperPagesBeginFromExtent(super_page_extent),
+                   super_page_end = SuperPagesEndFromExtent(super_page_extent);
+         super_page != super_page_end; super_page += kSuperPageSize) {
+      // Make sure the metadata is committed.
+      // TODO(bikineev): Remove once this is known to work.
+      const volatile char* metadata =
+          reinterpret_cast<char*>(PartitionSuperPageToMetadataArea(super_page));
+      *metadata;
+      RecommitSystemPages(SuperPageStateBitmapAddr(super_page),
+                          state_bitmap_size_to_commit,
+                          PageAccessibilityConfiguration(
+                              PageAccessibilityConfiguration::kReadWrite),
+                          PageAccessibilityDisposition::kRequireUpdate);
+      super_pages.push_back(super_page);
+    }
+  }
+  return super_pages;
+}
+}  // namespace
+
+void PCScanInternal::RegisterScannableRoot(Root* root) {
+  PA_DCHECK(is_initialized());
+  PA_DCHECK(root);
+  // Avoid nesting locks and store super_pages in a temporary vector.
+  SuperPages super_pages;
+  {
+    ::partition_alloc::internal::ScopedGuard guard(
+        ::partition_alloc::internal::PartitionRootLock(root));
+    PA_CHECK(root->IsQuarantineAllowed());
+    if (root->IsScanEnabled()) {
+      return;
+    }
+    PA_CHECK(!root->IsQuarantineEnabled());
+    super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
+    root->settings.scan_mode = Root::ScanMode::kEnabled;
+    root->settings.quarantine_mode = Root::QuarantineMode::kEnabled;
+  }
+  std::lock_guard<std::mutex> lock(roots_mutex_);
+  PA_DCHECK(!scannable_roots_.count(root));
+  auto& root_super_pages = scannable_roots_[root];
+  root_super_pages.insert(root_super_pages.end(), super_pages.begin(),
+                          super_pages.end());
+}
+
+void PCScanInternal::RegisterNonScannableRoot(Root* root) {
+  PA_DCHECK(is_initialized());
+  PA_DCHECK(root);
+  // Avoid nesting locks and store super_pages in a temporary vector.
+  SuperPages super_pages;
+  {
+    ::partition_alloc::internal::ScopedGuard guard(
+        ::partition_alloc::internal::PartitionRootLock(root));
+    PA_CHECK(root->IsQuarantineAllowed());
+    PA_CHECK(!root->IsScanEnabled());
+    if (root->IsQuarantineEnabled()) {
+      return;
+    }
+    super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
+    root->settings.quarantine_mode = Root::QuarantineMode::kEnabled;
+  }
+  std::lock_guard<std::mutex> lock(roots_mutex_);
+  PA_DCHECK(!nonscannable_roots_.count(root));
+  auto& root_super_pages = nonscannable_roots_[root];
+  root_super_pages.insert(root_super_pages.end(), super_pages.begin(),
+                          super_pages.end());
+}
+
+void PCScanInternal::RegisterNewSuperPage(Root* root,
+                                          uintptr_t super_page_base) {
+  PA_DCHECK(is_initialized());
+  PA_DCHECK(root);
+  PA_CHECK(root->IsQuarantineAllowed());
+  PA_DCHECK(!(super_page_base % kSuperPageAlignment));
+  // Make sure the metadata is committed.
+  // TODO(bikineev): Remove once this is known to work.
+  const volatile char* metadata = reinterpret_cast<char*>(
+      PartitionSuperPageToMetadataArea(super_page_base));
+  *metadata;
+
+  std::lock_guard<std::mutex> lock(roots_mutex_);
+
+  // Dispatch based on whether root is scannable or not.
+  if (root->IsScanEnabled()) {
+    PA_DCHECK(scannable_roots_.count(root));
+    auto& super_pages = scannable_roots_[root];
+    PA_DCHECK(std::find(super_pages.begin(), super_pages.end(),
+                        super_page_base) == super_pages.end());
+    super_pages.push_back(super_page_base);
+  } else {
+    PA_DCHECK(root->IsQuarantineEnabled());
+    PA_DCHECK(nonscannable_roots_.count(root));
+    auto& super_pages = nonscannable_roots_[root];
+    PA_DCHECK(std::find(super_pages.begin(), super_pages.end(),
+                        super_page_base) == super_pages.end());
+    super_pages.push_back(super_page_base);
+  }
+}
+
+void PCScanInternal::SetProcessName(const char* process_name) {
+  PA_DCHECK(is_initialized());
+  PA_DCHECK(process_name);
+  PA_DCHECK(!process_name_);
+  process_name_ = process_name;
+}
+
+size_t PCScanInternal::CalculateTotalHeapSize() const {
+  PA_DCHECK(is_initialized());
+  std::lock_guard<std::mutex> lock(roots_mutex_);
+  const auto acc = [](size_t size, const auto& pair) {
+    return size + pair.first->get_total_size_of_committed_pages();
+  };
+  return std::accumulate(scannable_roots_.begin(), scannable_roots_.end(), 0u,
+                         acc) +
+         std::accumulate(nonscannable_roots_.begin(), nonscannable_roots_.end(),
+                         0u, acc);
+}
+
+void PCScanInternal::EnableStackScanning() {
+  PA_DCHECK(!stack_scanning_enabled_);
+  stack_scanning_enabled_ = true;
+}
+void PCScanInternal::DisableStackScanning() {
+  PA_DCHECK(stack_scanning_enabled_);
+  stack_scanning_enabled_ = false;
+}
+bool PCScanInternal::IsStackScanningEnabled() const {
+  return stack_scanning_enabled_;
+}
+
+void PCScanInternal::NotifyThreadCreated(void* stack_top) {
+  const auto tid = base::PlatformThread::CurrentId();
+  std::lock_guard<std::mutex> lock(stack_tops_mutex_);
+  const auto res = stack_tops_.insert({tid, stack_top});
+  PA_DCHECK(res.second);
+}
+
+void PCScanInternal::NotifyThreadDestroyed() {
+  const auto tid = base::PlatformThread::CurrentId();
+  std::lock_guard<std::mutex> lock(stack_tops_mutex_);
+  PA_DCHECK(1 == stack_tops_.count(tid));
+  stack_tops_.erase(tid);
+}
+
+void* PCScanInternal::GetCurrentThreadStackTop() const {
+  const auto tid = base::PlatformThread::CurrentId();
+  std::lock_guard<std::mutex> lock(stack_tops_mutex_);
+  auto it = stack_tops_.find(tid);
+  return it != stack_tops_.end() ? it->second : nullptr;
+}
+
+bool PCScanInternal::WriteProtectionEnabled() const {
+  return write_protector_->IsEnabled();
+}
+
+void PCScanInternal::ProtectPages(uintptr_t begin, size_t size) {
+  // Slot-span sizes are multiple of system page size. However, the ranges that
+  // are recorded are not, since in the snapshot we only record the used
+  // payload. Therefore we align up the incoming range by 4k. The unused part of
+  // slot-spans doesn't need to be protected (the allocator will enter the
+  // safepoint before trying to allocate from it).
+  PA_SCAN_DCHECK(write_protector_.get());
+  write_protector_->ProtectPages(
+      begin,
+      partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
+}
+
+void PCScanInternal::UnprotectPages(uintptr_t begin, size_t size) {
+  PA_SCAN_DCHECK(write_protector_.get());
+  write_protector_->UnprotectPages(
+      begin,
+      partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
+}
+
+void PCScanInternal::ClearRootsForTesting() {
+  std::lock_guard<std::mutex> lock(roots_mutex_);
+  // Set all roots as non-scannable and non-quarantinable.
+  for (auto& pair : scannable_roots_) {
+    Root* root = pair.first;
+    root->settings.scan_mode = Root::ScanMode::kDisabled;
+    root->settings.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
+  }
+  for (auto& pair : nonscannable_roots_) {
+    Root* root = pair.first;
+    root->settings.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
+  }
+  // Make sure to destroy maps so that on the following ReinitForTesting() call
+  // the maps don't attempt to destroy the backing.
+  scannable_roots_.clear();
+  scannable_roots_.~RootsMap();
+  nonscannable_roots_.clear();
+  nonscannable_roots_.~RootsMap();
+  // Destroy write protector object, so that there is no double free on the next
+  // call to ReinitForTesting();
+  write_protector_.reset();
+}
+
+void PCScanInternal::ReinitForTesting(PCScan::InitConfig config) {
+  is_initialized_ = false;
+  auto* new_this = new (this) PCScanInternal;
+  new_this->Initialize(config);
+}
+
+void PCScanInternal::FinishScanForTesting() {
+  auto current_task = CurrentPCScanTask();
+  PA_CHECK(current_task.get());
+  current_task->RunFromScanner();
+}
+
+void PCScanInternal::RegisterStatsReporter(
+    partition_alloc::StatsReporter* reporter) {
+  PA_DCHECK(reporter);
+  stats_reporter_ = reporter;
+}
+
+partition_alloc::StatsReporter& PCScanInternal::GetReporter() {
+  PA_DCHECK(stats_reporter_);
+  return *stats_reporter_;
+}
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_internal.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_internal.h
new file mode 100644
index 0000000..fd7f16b
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_internal.h
@@ -0,0 +1,149 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_PCSCAN_INTERNAL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_PCSCAN_INTERNAL_H_
+
+#include <array>
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/memory/scoped_refptr.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/starscan_fwd.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/write_protector.h"
+
+namespace partition_alloc::internal {
+
+class PCScanTask;
+
+// Internal PCScan singleton. The separation between frontend and backend is
+// needed to keep access to the hot data (quarantine) in the frontend fast,
+// whereas the backend can hold cold data.
+class PCScanInternal final {
+ public:
+  using Root = PCScan::Root;
+  using TaskHandle = scoped_refptr<PCScanTask>;
+
+  using SuperPages = std::vector<uintptr_t, MetadataAllocator<uintptr_t>>;
+  using RootsMap =
+      std::unordered_map<Root*,
+                         SuperPages,
+                         std::hash<Root*>,
+                         std::equal_to<>,
+                         MetadataAllocator<std::pair<Root* const, SuperPages>>>;
+
+  static PCScanInternal& Instance() {
+    // Since the data that PCScanInternal holds is cold, it's fine to have the
+    // runtime check for thread-safe local static initialization.
+    static internal::base::NoDestructor<PCScanInternal> instance;
+    return *instance;
+  }
+
+  PCScanInternal(const PCScanInternal&) = delete;
+  PCScanInternal& operator=(const PCScanInternal&) = delete;
+
+  ~PCScanInternal();
+
+  void Initialize(PCScan::InitConfig);
+  bool is_initialized() const { return is_initialized_; }
+
+  void PerformScan(PCScan::InvocationMode);
+  void PerformScanIfNeeded(PCScan::InvocationMode);
+  void PerformDelayedScan(base::TimeDelta delay);
+  void JoinScan();
+
+  TaskHandle CurrentPCScanTask() const;
+  void SetCurrentPCScanTask(TaskHandle task);
+  void ResetCurrentPCScanTask();
+
+  void RegisterScannableRoot(Root*);
+  void RegisterNonScannableRoot(Root*);
+
+  RootsMap& scannable_roots() { return scannable_roots_; }
+  const RootsMap& scannable_roots() const { return scannable_roots_; }
+
+  RootsMap& nonscannable_roots() { return nonscannable_roots_; }
+  const RootsMap& nonscannable_roots() const { return nonscannable_roots_; }
+
+  void RegisterNewSuperPage(Root* root, uintptr_t super_page_base);
+
+  void SetProcessName(const char* name);
+  const char* process_name() const { return process_name_; }
+
+  // Get size of all committed pages from scannable and nonscannable roots.
+  size_t CalculateTotalHeapSize() const;
+
+  SimdSupport simd_support() const { return simd_support_; }
+
+  void EnableStackScanning();
+  void DisableStackScanning();
+  bool IsStackScanningEnabled() const;
+
+  void EnableImmediateFreeing() { immediate_freeing_enabled_ = true; }
+  bool IsImmediateFreeingEnabled() const { return immediate_freeing_enabled_; }
+
+  void NotifyThreadCreated(void* stack_top);
+  void NotifyThreadDestroyed();
+
+  void* GetCurrentThreadStackTop() const;
+
+  bool WriteProtectionEnabled() const;
+  void ProtectPages(uintptr_t begin, size_t size);
+  void UnprotectPages(uintptr_t begin, size_t size);
+
+  void ClearRootsForTesting();                // IN-TEST
+  void ReinitForTesting(PCScan::InitConfig);  // IN-TEST
+  void FinishScanForTesting();                // IN-TEST
+
+  void RegisterStatsReporter(partition_alloc::StatsReporter* reporter);
+  partition_alloc::StatsReporter& GetReporter();
+
+ private:
+  friend internal::base::NoDestructor<PCScanInternal>;
+  friend class StarScanSnapshot;
+
+  using StackTops = std::unordered_map<
+      internal::base::PlatformThreadId,
+      void*,
+      std::hash<internal::base::PlatformThreadId>,
+      std::equal_to<>,
+      MetadataAllocator<
+          std::pair<const internal::base::PlatformThreadId, void*>>>;
+
+  PCScanInternal();
+
+  TaskHandle current_task_;
+  mutable std::mutex current_task_mutex_;
+
+  RootsMap scannable_roots_;
+  RootsMap nonscannable_roots_;
+  mutable std::mutex roots_mutex_;
+
+  bool stack_scanning_enabled_{false};
+  // TLS emulation of stack tops. Since this is guaranteed to go through
+  // non-quarantinable partition, using it from safepoints is safe.
+  StackTops stack_tops_;
+  mutable std::mutex stack_tops_mutex_;
+
+  bool immediate_freeing_enabled_{false};
+
+  const char* process_name_ = nullptr;
+  const SimdSupport simd_support_;
+
+  std::unique_ptr<WriteProtector> write_protector_;
+  partition_alloc::StatsReporter* stats_reporter_ = nullptr;
+
+  bool is_initialized_ = false;
+};
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_PCSCAN_INTERNAL_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.cc
new file mode 100644
index 0000000..319e016
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.cc
@@ -0,0 +1,218 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.h"
+
+#include <algorithm>
+#include <atomic>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_hooks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+
+namespace partition_alloc::internal {
+
+// static
+constexpr size_t QuarantineData::kQuarantineSizeMinLimit;
+
+void PCScanScheduler::SetNewSchedulingBackend(
+    PCScanSchedulingBackend& backend) {
+  backend_ = &backend;
+}
+
+void PCScanSchedulingBackend::DisableScheduling() {
+  scheduling_enabled_.store(false, std::memory_order_relaxed);
+}
+
+void PCScanSchedulingBackend::EnableScheduling() {
+  scheduling_enabled_.store(true, std::memory_order_relaxed);
+  // Check if *Scan needs to be run immediately.
+  if (NeedsToImmediatelyScan()) {
+    PCScan::PerformScan(PCScan::InvocationMode::kNonBlocking);
+  }
+}
+
+size_t PCScanSchedulingBackend::ScanStarted() {
+  auto& data = GetQuarantineData();
+  data.epoch.fetch_add(1, std::memory_order_relaxed);
+  return data.current_size.exchange(0, std::memory_order_relaxed);
+}
+
+base::TimeDelta PCScanSchedulingBackend::UpdateDelayedSchedule() {
+  return base::TimeDelta();
+}
+
+// static
+constexpr double LimitBackend::kQuarantineSizeFraction;
+
+bool LimitBackend::LimitReached() {
+  return is_scheduling_enabled();
+}
+
+void LimitBackend::UpdateScheduleAfterScan(size_t survived_bytes,
+                                           base::TimeDelta,
+                                           size_t heap_size) {
+  scheduler_.AccountFreed(survived_bytes);
+  // |heap_size| includes the current quarantine size, we intentionally leave
+  // some slack till hitting the limit.
+  auto& data = GetQuarantineData();
+  data.size_limit.store(
+      std::max(QuarantineData::kQuarantineSizeMinLimit,
+               static_cast<size_t>(kQuarantineSizeFraction * heap_size)),
+      std::memory_order_relaxed);
+}
+
+bool LimitBackend::NeedsToImmediatelyScan() {
+  return false;
+}
+
+// static
+constexpr double MUAwareTaskBasedBackend::kSoftLimitQuarantineSizePercent;
+// static
+constexpr double MUAwareTaskBasedBackend::kHardLimitQuarantineSizePercent;
+// static
+constexpr double MUAwareTaskBasedBackend::kTargetMutatorUtilizationPercent;
+
+MUAwareTaskBasedBackend::MUAwareTaskBasedBackend(
+    PCScanScheduler& scheduler,
+    ScheduleDelayedScanFunc schedule_delayed_scan)
+    : PCScanSchedulingBackend(scheduler),
+      schedule_delayed_scan_(schedule_delayed_scan) {
+  PA_DCHECK(schedule_delayed_scan_);
+}
+
+MUAwareTaskBasedBackend::~MUAwareTaskBasedBackend() = default;
+
+bool MUAwareTaskBasedBackend::LimitReached() {
+  bool should_reschedule = false;
+  base::TimeDelta reschedule_delay;
+  {
+    ScopedGuard guard(scheduler_lock_);
+    // At this point we reached a limit where the schedule generally wants to
+    // trigger a scan.
+    if (hard_limit_) {
+      // The hard limit is not reset, indicating that the scheduler only hit the
+      // soft limit. See inlined comments for the algorithm.
+      auto& data = GetQuarantineData();
+      PA_DCHECK(hard_limit_ >= QuarantineData::kQuarantineSizeMinLimit);
+      // 1. Update the limit to the hard limit which will always immediately
+      // trigger a scan.
+      data.size_limit.store(hard_limit_, std::memory_order_relaxed);
+      hard_limit_ = 0;
+
+      // 2. Unlikely case: If also above hard limit, start scan right away. This
+      // ignores explicit PCScan disabling.
+      if (PA_UNLIKELY(data.current_size.load(std::memory_order_relaxed) >
+                      data.size_limit.load(std::memory_order_relaxed))) {
+        return true;
+      }
+
+      // 3. Check if PCScan was explicitly disabled.
+      if (PA_UNLIKELY(!is_scheduling_enabled())) {
+        return false;
+      }
+
+      // 4. Otherwise, the soft limit would trigger a scan immediately if the
+      // mutator utilization requirement is satisfied.
+      reschedule_delay = earliest_next_scan_time_ - base::TimeTicks::Now();
+      if (reschedule_delay <= base::TimeDelta()) {
+        // May invoke scan immediately.
+        return true;
+      }
+
+      PA_PCSCAN_VLOG(3) << "Rescheduling scan with delay: "
+                        << reschedule_delay.InMillisecondsF() << " ms";
+      // 5. If the MU requirement is not satisfied, schedule a delayed scan to
+      // the time instance when MU is satisfied.
+      should_reschedule = true;
+    }
+  }
+  // Don't reschedule under the lock as the callback can call free() and
+  // recursively enter the lock.
+  if (should_reschedule) {
+    schedule_delayed_scan_(reschedule_delay.InMicroseconds());
+    return false;
+  }
+  return true;
+}
+
+size_t MUAwareTaskBasedBackend::ScanStarted() {
+  ScopedGuard guard(scheduler_lock_);
+
+  return PCScanSchedulingBackend::ScanStarted();
+}
+
+void MUAwareTaskBasedBackend::UpdateScheduleAfterScan(
+    size_t survived_bytes,
+    base::TimeDelta time_spent_in_scan,
+    size_t heap_size) {
+  scheduler_.AccountFreed(survived_bytes);
+
+  ScopedGuard guard(scheduler_lock_);
+
+  // |heap_size| includes the current quarantine size, we intentionally leave
+  // some slack till hitting the limit.
+  auto& data = GetQuarantineData();
+  data.size_limit.store(
+      std::max(
+          QuarantineData::kQuarantineSizeMinLimit,
+          static_cast<size_t>(kSoftLimitQuarantineSizePercent * heap_size)),
+      std::memory_order_relaxed);
+  hard_limit_ = std::max(
+      QuarantineData::kQuarantineSizeMinLimit,
+      static_cast<size_t>(kHardLimitQuarantineSizePercent * heap_size));
+
+  // This computes the time window that the scheduler will reserve for the
+  // mutator. Scanning, unless reaching the hard limit, will generally be
+  // delayed until this time has passed.
+  const auto time_required_on_mutator =
+      time_spent_in_scan * kTargetMutatorUtilizationPercent /
+      (1.0 - kTargetMutatorUtilizationPercent);
+  earliest_next_scan_time_ = base::TimeTicks::Now() + time_required_on_mutator;
+}
+
+bool MUAwareTaskBasedBackend::NeedsToImmediatelyScan() {
+  bool should_reschedule = false;
+  base::TimeDelta reschedule_delay;
+  {
+    ScopedGuard guard(scheduler_lock_);
+    // If |hard_limit_| was set to zero, the soft limit was reached. Bail out if
+    // it's not.
+    if (hard_limit_) {
+      return false;
+    }
+
+    // Check if mutator utilization requiremet is satisfied.
+    reschedule_delay = earliest_next_scan_time_ - base::TimeTicks::Now();
+    if (reschedule_delay <= base::TimeDelta()) {
+      // May invoke scan immediately.
+      return true;
+    }
+
+    PA_PCSCAN_VLOG(3) << "Rescheduling scan with delay: "
+                      << reschedule_delay.InMillisecondsF() << " ms";
+    // Schedule a delayed scan to the time instance when MU is satisfied.
+    should_reschedule = true;
+  }
+  // Don't reschedule under the lock as the callback can call free() and
+  // recursively enter the lock.
+  if (should_reschedule) {
+    schedule_delayed_scan_(reschedule_delay.InMicroseconds());
+  }
+  return false;
+}
+
+base::TimeDelta MUAwareTaskBasedBackend::UpdateDelayedSchedule() {
+  ScopedGuard guard(scheduler_lock_);
+  // TODO(1197479): Adjust schedule to current heap sizing.
+  const auto delay = earliest_next_scan_time_ - base::TimeTicks::Now();
+  PA_PCSCAN_VLOG(3) << "Schedule is off by " << delay.InMillisecondsF() << "ms";
+  return delay >= base::TimeDelta() ? delay : base::TimeDelta();
+}
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.h
new file mode 100644
index 0000000..f795535
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.h
@@ -0,0 +1,209 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_PCSCAN_SCHEDULING_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_PCSCAN_SCHEDULING_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+
+namespace partition_alloc::internal {
+
+class PCScanScheduler;
+
+struct QuarantineData final {
+  static constexpr size_t kQuarantineSizeMinLimit = 1 * 1024 * 1024;
+
+  inline constexpr QuarantineData();
+
+  bool MinimumScanningThresholdReached() const {
+    return current_size.load(std::memory_order_relaxed) >
+           kQuarantineSizeMinLimit;
+  }
+
+  std::atomic<size_t> current_size{0u};
+  std::atomic<size_t> size_limit{kQuarantineSizeMinLimit};
+  std::atomic<size_t> epoch{0u};
+};
+
+// No virtual destructor to allow constant initialization of PCScan as
+// static global which directly embeds LimitBackend as default backend.
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScanSchedulingBackend {
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
+ public:
+  inline constexpr explicit PCScanSchedulingBackend(PCScanScheduler&);
+
+  PCScanSchedulingBackend(const PCScanSchedulingBackend&) = delete;
+  PCScanSchedulingBackend& operator=(const PCScanSchedulingBackend&) = delete;
+
+  void DisableScheduling();
+  void EnableScheduling();
+
+  bool is_scheduling_enabled() const {
+    return scheduling_enabled_.load(std::memory_order_relaxed);
+  }
+
+  inline QuarantineData& GetQuarantineData();
+
+  // Invoked when the limit in PCScanScheduler is reached. Returning true
+  // signals the caller to invoke a scan.
+  virtual bool LimitReached() = 0;
+
+  // Invoked on starting a scan. Returns current quarantine size.
+  virtual size_t ScanStarted();
+
+  // Invoked at the end of a scan to compute a new limit.
+  virtual void UpdateScheduleAfterScan(size_t survived_bytes,
+                                       base::TimeDelta time_spent_in_scan,
+                                       size_t heap_size) = 0;
+
+  // Invoked by PCScan to ask for a new timeout for a scheduled PCScan task.
+  // Only invoked if scheduler requests a delayed scan at some point.
+  virtual base::TimeDelta UpdateDelayedSchedule();
+
+ protected:
+  inline bool SchedulingDisabled() const;
+
+  virtual bool NeedsToImmediatelyScan() = 0;
+
+  PCScanScheduler& scheduler_;
+  std::atomic<bool> scheduling_enabled_{true};
+};
+
+// Scheduling backend that just considers a single hard limit.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LimitBackend final
+    : public PCScanSchedulingBackend {
+ public:
+  static constexpr double kQuarantineSizeFraction = 0.1;
+
+  inline constexpr explicit LimitBackend(PCScanScheduler&);
+
+  bool LimitReached() final;
+  void UpdateScheduleAfterScan(size_t, base::TimeDelta, size_t) final;
+
+ private:
+  bool NeedsToImmediatelyScan() final;
+};
+
+// Task based backend that is aware of a target mutator utilization that
+// specifies how much percent of the execution should be reserved for the
+// mutator. I.e., the MU-aware scheduler ensures that scans are limit and
+// there is enough time left for the mutator to execute the actual application
+// workload.
+//
+// See constants below for trigger mechanisms.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MUAwareTaskBasedBackend final
+    : public PCScanSchedulingBackend {
+ public:
+  using ScheduleDelayedScanFunc = void (*)(int64_t delay_in_microseconds);
+
+  MUAwareTaskBasedBackend(PCScanScheduler&, ScheduleDelayedScanFunc);
+  ~MUAwareTaskBasedBackend();
+
+  bool LimitReached() final;
+  size_t ScanStarted() final;
+  void UpdateScheduleAfterScan(size_t, base::TimeDelta, size_t) final;
+  base::TimeDelta UpdateDelayedSchedule() final;
+
+ private:
+  // Limit triggering the scheduler. If `kTargetMutatorUtilizationPercent` is
+  // satisfied at this point then a scan is triggered immediately.
+  static constexpr double kSoftLimitQuarantineSizePercent = 0.1;
+  // Hard limit at which a scan is triggered in any case. Avoids blowing up the
+  // heap completely.
+  static constexpr double kHardLimitQuarantineSizePercent = 0.5;
+  // Target mutator utilization that is respected when invoking a scan.
+  // Specifies how much percent of walltime should be spent in the mutator.
+  // Inversely, specifies how much walltime (indirectly CPU) is spent on
+  // memory management in scan.
+  static constexpr double kTargetMutatorUtilizationPercent = 0.90;
+
+  bool NeedsToImmediatelyScan() final;
+
+  // Callback to schedule a delayed scan.
+  const ScheduleDelayedScanFunc schedule_delayed_scan_;
+
+  Lock scheduler_lock_;
+  size_t hard_limit_ PA_GUARDED_BY(scheduler_lock_){0};
+  base::TimeTicks earliest_next_scan_time_ PA_GUARDED_BY(scheduler_lock_);
+
+  friend class PartitionAllocPCScanMUAwareTaskBasedBackendTest;
+};
+
+// The scheduler that is embedded in the PCSCan frontend which requires a fast
+// path for freeing objects. The scheduler holds data needed to invoke a
+// `PCScanSchedulingBackend` upon hitting a limit. The backend implements
+// the actual scheduling strategy and is in charge of maintaining limits.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScanScheduler final {
+ public:
+  inline constexpr PCScanScheduler();
+
+  PCScanScheduler(const PCScanScheduler&) = delete;
+  PCScanScheduler& operator=(const PCScanScheduler&) = delete;
+
+  // Account freed `bytes`. Returns true if scan should be triggered
+  // immediately, and false otherwise.
+  PA_ALWAYS_INLINE bool AccountFreed(size_t bytes);
+
+  size_t epoch() const {
+    return quarantine_data_.epoch.load(std::memory_order_relaxed);
+  }
+
+  // Sets a new scheduling backend that should be used by the scanner.
+  void SetNewSchedulingBackend(PCScanSchedulingBackend&);
+
+  PCScanSchedulingBackend& scheduling_backend() { return *backend_; }
+  const PCScanSchedulingBackend& scheduling_backend() const {
+    return *backend_;
+  }
+
+ private:
+  QuarantineData quarantine_data_{};
+  // The default backend used is a simple LimitBackend that just triggers scan
+  // on reaching a hard limit.
+  LimitBackend default_scheduling_backend_{*this};
+  PCScanSchedulingBackend* backend_ = &default_scheduling_backend_;
+
+  friend PCScanSchedulingBackend;
+};
+
+// To please Chromium's clang plugin.
+constexpr PCScanScheduler::PCScanScheduler() = default;
+constexpr QuarantineData::QuarantineData() = default;
+
+constexpr PCScanSchedulingBackend::PCScanSchedulingBackend(
+    PCScanScheduler& scheduler)
+    : scheduler_(scheduler) {}
+
+QuarantineData& PCScanSchedulingBackend::GetQuarantineData() {
+  return scheduler_.quarantine_data_;
+}
+
+constexpr LimitBackend::LimitBackend(PCScanScheduler& scheduler)
+    : PCScanSchedulingBackend(scheduler) {}
+
+PA_ALWAYS_INLINE bool PCScanScheduler::AccountFreed(size_t size) {
+  const size_t size_before =
+      quarantine_data_.current_size.fetch_add(size, std::memory_order_relaxed);
+  return (size_before + size >
+          quarantine_data_.size_limit.load(std::memory_order_relaxed)) &&
+         backend_->LimitReached();
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_PCSCAN_SCHEDULING_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling_unittest.cc
new file mode 100644
index 0000000..768080e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling_unittest.cc
@@ -0,0 +1,169 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_scheduling.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time_override.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal {
+
+namespace {
+constexpr size_t kMB = 1024 * 1024;
+}  // namespace
+
+TEST(PartitionAllocPCScanSchedulerLimitBackendTest,
+     NoScanBelowMinimumScanningThreshold) {
+  PCScanScheduler scheduler;
+  LimitBackend limit_backend(scheduler);
+  scheduler.SetNewSchedulingBackend(limit_backend);
+  constexpr size_t kMinimumScanningThreshold =
+      QuarantineData::kQuarantineSizeMinLimit;
+  EXPECT_FALSE(scheduler.AccountFreed(kMinimumScanningThreshold / 2));
+  EXPECT_FALSE(scheduler.AccountFreed(kMinimumScanningThreshold -
+                                      kMinimumScanningThreshold / 2));
+  EXPECT_TRUE(scheduler.AccountFreed(1));
+}
+
+TEST(PartitionAllocPCScanSchedulerLimitBackendTest,
+     ScanAtQuarantineSizeFraction) {
+  PCScanScheduler scheduler;
+  LimitBackend limit_backend(scheduler);
+  scheduler.SetNewSchedulingBackend(limit_backend);
+  constexpr size_t kHeapSize = 100 * kMB;
+  constexpr size_t kNoSurvivedBytes = 0;
+  limit_backend.UpdateScheduleAfterScan(kNoSurvivedBytes, base::TimeDelta(),
+                                        kHeapSize);
+  constexpr size_t kExpectedTriggerSize = static_cast<size_t>(
+      static_cast<double>(kHeapSize) * LimitBackend::kQuarantineSizeFraction);
+  EXPECT_FALSE(scheduler.AccountFreed(kExpectedTriggerSize / 2));
+  EXPECT_FALSE(
+      scheduler.AccountFreed(kExpectedTriggerSize - kExpectedTriggerSize / 2));
+  EXPECT_TRUE(scheduler.AccountFreed(1));
+}
+
+class PartitionAllocPCScanMUAwareTaskBasedBackendTest : public ::testing::Test {
+ public:
+  static constexpr size_t kHeapSize = 100 * kMB;
+
+  static constexpr size_t HardLimitSize(size_t heap_size) {
+    return static_cast<size_t>(
+               static_cast<double>(heap_size) *
+               MUAwareTaskBasedBackend::kHardLimitQuarantineSizePercent) +
+           1;
+  }
+
+  static constexpr size_t SoftLimitSize(size_t heap_size) {
+    return static_cast<size_t>(
+               static_cast<double>(heap_size) *
+               MUAwareTaskBasedBackend::kSoftLimitQuarantineSizePercent) +
+           1;
+  }
+
+  PartitionAllocPCScanMUAwareTaskBasedBackendTest()
+      : backend_(scheduler_, &IncrementDelayedScanScheduledCount) {
+    scheduler_.SetNewSchedulingBackend(backend_);
+    constexpr size_t kNoSurvivedBytes = 0;
+    constexpr base::TimeDelta kZeroTimeForScan;
+    backend_.UpdateScheduleAfterScan(kNoSurvivedBytes, kZeroTimeForScan,
+                                     kHeapSize);
+  }
+
+  void SetUp() override { delayed_scan_scheduled_count_ = 0; }
+
+  PCScanScheduler& scheduler() { return scheduler_; }
+  MUAwareTaskBasedBackend& backend() { return backend_; }
+  size_t delayed_scan_scheduled_count() const {
+    return delayed_scan_scheduled_count_;
+  }
+
+ private:
+  static void IncrementDelayedScanScheduledCount(
+      int64_t delay_in_microseconds) {
+    ++delayed_scan_scheduled_count_;
+  }
+
+  static size_t delayed_scan_scheduled_count_;
+  PCScanScheduler scheduler_;
+  MUAwareTaskBasedBackend backend_;
+};
+
+size_t PartitionAllocPCScanMUAwareTaskBasedBackendTest::
+    delayed_scan_scheduled_count_ = 0;
+
+namespace {
+
+class ScopedTimeTicksOverride final {
+ public:
+  ScopedTimeTicksOverride()
+      : ScopedTimeTicksOverride(InitializeTimeAndReturnTimeTicksNow()) {}
+
+  void AddTicksToNow(base::TimeDelta ticks) { now_ticks_ += ticks; }
+
+ private:
+  static base::TimeTicks Now() { return now_ticks_; }
+
+  static base::TimeTicksNowFunction InitializeTimeAndReturnTimeTicksNow() {
+    now_ticks_ = base::TimeTicks::Now();
+    return &Now;
+  }
+
+  explicit ScopedTimeTicksOverride(
+      base::TimeTicksNowFunction time_ticks_function)
+      : overrides_(nullptr, time_ticks_function, nullptr) {}
+
+  static base::TimeTicks now_ticks_;
+
+  base::subtle::ScopedTimeClockOverrides overrides_;
+};
+
+// static
+base::TimeTicks ScopedTimeTicksOverride::now_ticks_;
+
+}  // namespace
+
+TEST_F(PartitionAllocPCScanMUAwareTaskBasedBackendTest,
+       SoftLimitSchedulesScanIfMUNotSatisfied) {
+  // Stop the time.
+  ScopedTimeTicksOverride now_ticks_override;
+  // Simulate PCScan that processed kHeapSize in 1s. Since time is stopped that
+  // schedule is not reachable.
+  backend().UpdateScheduleAfterScan(0, base::Seconds(1), kHeapSize);
+
+  EXPECT_EQ(0u, delayed_scan_scheduled_count());
+  EXPECT_FALSE(scheduler().AccountFreed(SoftLimitSize(kHeapSize)));
+  EXPECT_EQ(1u, delayed_scan_scheduled_count());
+}
+
+TEST_F(PartitionAllocPCScanMUAwareTaskBasedBackendTest,
+       SoftLimitInvokesScanIfMUSatisfied) {
+  // Stop the time.
+  ScopedTimeTicksOverride now_ticks_override;
+  // Simulate PCScan that processed kHeapSize in 0s. The next scan should thus
+  // happen immediately.
+  backend().UpdateScheduleAfterScan(0, base::Seconds(0), kHeapSize);
+
+  EXPECT_EQ(0u, delayed_scan_scheduled_count());
+  EXPECT_TRUE(scheduler().AccountFreed(SoftLimitSize(kHeapSize)));
+  EXPECT_EQ(0u, delayed_scan_scheduled_count());
+}
+
+TEST_F(PartitionAllocPCScanMUAwareTaskBasedBackendTest,
+       HardLimitSchedulesScanImmediately) {
+  // Stop the time.
+  ScopedTimeTicksOverride now_ticks_override;
+  // Simulate PCScan that processed kHeapSize in 1s. Since time is stopped that
+  // schedule is not reachable.
+  backend().UpdateScheduleAfterScan(0, base::Seconds(0), kHeapSize);
+
+  EXPECT_EQ(0u, delayed_scan_scheduled_count());
+  // Triogering the hard limit should immediately require a scan and not
+  // schedule anything.
+  EXPECT_TRUE(scheduler().AccountFreed(HardLimitSize(kHeapSize)));
+  EXPECT_EQ(0u, delayed_scan_scheduled_count());
+}
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_unittest.cc
new file mode 100644
index 0000000..5e8ad61
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_unittest.cc
@@ -0,0 +1,838 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstdint>
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if BUILDFLAG(USE_STARSCAN)
+
+namespace partition_alloc::internal {
+
+namespace {
+
+struct DisableStackScanningScope final {
+  DisableStackScanningScope() {
+    if (PCScan::IsStackScanningEnabled()) {
+      PCScan::DisableStackScanning();
+      changed_ = true;
+    }
+  }
+  ~DisableStackScanningScope() {
+    if (changed_) {
+      PCScan::EnableStackScanning();
+    }
+  }
+
+ private:
+  bool changed_ = false;
+};
+
+}  // namespace
+
+class PartitionAllocPCScanTestBase : public testing::Test {
+ public:
+  PartitionAllocPCScanTestBase()
+      : allocator_(PartitionOptions{
+            .aligned_alloc = PartitionOptions::kAllowed,
+            .star_scan_quarantine = PartitionOptions::kAllowed,
+            .memory_tagging = {
+                .enabled =
+                    base::CPU::GetInstanceNoAllocation().has_mte()
+                        ? partition_alloc::PartitionOptions::kEnabled
+                        : partition_alloc::PartitionOptions::kDisabled}}) {
+    PartitionAllocGlobalInit([](size_t) { PA_LOG(FATAL) << "Out of memory"; });
+    // Previous test runs within the same process decommit pools, therefore
+    // we need to make sure that the card table is recommitted for each run.
+    PCScan::ReinitForTesting(
+        {PCScan::InitConfig::WantedWriteProtectionMode::kDisabled,
+         PCScan::InitConfig::SafepointMode::kEnabled});
+    allocator_.root()->UncapEmptySlotSpanMemoryForTesting();
+    allocator_.root()->SwitchToDenserBucketDistribution();
+
+    PCScan::RegisterScannableRoot(allocator_.root());
+  }
+
+  ~PartitionAllocPCScanTestBase() override {
+    allocator_.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
+                                   PurgeFlags::kDiscardUnusedSystemPages);
+    PartitionAllocGlobalUninitForTesting();
+  }
+
+  void RunPCScan() {
+    PCScan::Instance().PerformScan(PCScan::InvocationMode::kBlocking);
+  }
+
+  void SchedulePCScan() {
+    PCScan::Instance().PerformScan(
+        PCScan::InvocationMode::kScheduleOnlyForTesting);
+  }
+
+  void JoinPCScanAsMutator() {
+    auto& instance = PCScan::Instance();
+    PA_CHECK(instance.IsJoinable());
+    instance.JoinScan();
+  }
+
+  void FinishPCScanAsScanner() { PCScan::FinishScanForTesting(); }
+
+  bool IsInQuarantine(void* object) const {
+    uintptr_t slot_start = root().ObjectToSlotStart(object);
+    return StateBitmapFromAddr(slot_start)->IsQuarantined(slot_start);
+  }
+
+  PartitionRoot& root() { return *allocator_.root(); }
+  const PartitionRoot& root() const { return *allocator_.root(); }
+
+ private:
+  // Leverage the already-templated version outside `internal::`.
+  partition_alloc::PartitionAllocatorAllowLeaksForTesting allocator_;
+};
+
+namespace {
+
+// The test that expects free() being quarantined only when tag overflow occurs.
+using PartitionAllocPCScanWithMTETest = PartitionAllocPCScanTestBase;
+
+// The test that expects every free() being quarantined.
+class PartitionAllocPCScanTest : public PartitionAllocPCScanTestBase {
+ public:
+  PartitionAllocPCScanTest() { root().SetQuarantineAlwaysForTesting(true); }
+  ~PartitionAllocPCScanTest() override {
+    root().SetQuarantineAlwaysForTesting(false);
+  }
+};
+
+using SlotSpan = PartitionRoot::SlotSpan;
+
+struct FullSlotSpanAllocation {
+  SlotSpan* slot_span;
+  void* first;
+  void* last;
+};
+
+// Assumes heap is purged.
+FullSlotSpanAllocation GetFullSlotSpan(PartitionRoot& root,
+                                       size_t object_size) {
+  PA_CHECK(0u == root.get_total_size_of_committed_pages());
+
+  const size_t raw_size = root.AdjustSizeForExtrasAdd(object_size);
+  const size_t bucket_index =
+      root.SizeToBucketIndex(raw_size, root.GetBucketDistribution());
+  PartitionRoot::Bucket& bucket = root.buckets[bucket_index];
+  const size_t num_slots = (bucket.get_bytes_per_span()) / bucket.slot_size;
+
+  uintptr_t first = 0;
+  uintptr_t last = 0;
+  for (size_t i = 0; i < num_slots; ++i) {
+    void* ptr = root.Alloc<partition_alloc::AllocFlags::kNoHooks>(object_size);
+    EXPECT_TRUE(ptr);
+    if (i == 0) {
+      first = root.ObjectToSlotStart(ptr);
+    } else if (i == num_slots - 1) {
+      last = root.ObjectToSlotStart(ptr);
+    }
+  }
+
+  EXPECT_EQ(SlotSpan::FromSlotStart(first), SlotSpan::FromSlotStart(last));
+  if (bucket.num_system_pages_per_slot_span ==
+      NumSystemPagesPerPartitionPage()) {
+    // Pointers are expected to be in the same partition page, but have a
+    // different MTE-tag.
+    EXPECT_EQ(UntagAddr(first & PartitionPageBaseMask()),
+              UntagAddr(last & PartitionPageBaseMask()));
+  }
+  EXPECT_EQ(num_slots, bucket.active_slot_spans_head->num_allocated_slots);
+  EXPECT_EQ(nullptr, bucket.active_slot_spans_head->get_freelist_head());
+  EXPECT_TRUE(bucket.is_valid());
+  EXPECT_TRUE(bucket.active_slot_spans_head !=
+              SlotSpan::get_sentinel_slot_span());
+
+  return {bucket.active_slot_spans_head, root.SlotStartToObject(first),
+          root.SlotStartToObject(last)};
+}
+
+bool IsInFreeList(uintptr_t slot_start) {
+  // slot_start isn't MTE-tagged, whereas pointers in the freelist are.
+  void* slot_start_tagged = SlotStartAddr2Ptr(slot_start);
+  auto* slot_span = SlotSpan::FromSlotStart(slot_start);
+  for (auto* entry = slot_span->get_freelist_head(); entry;
+       entry = entry->GetNext(slot_span->bucket->slot_size)) {
+    if (entry == slot_start_tagged) {
+      return true;
+    }
+  }
+  return false;
+}
+
+struct ListBase {
+  // Volatile to prevent the compiler from doing dead store elimination.
+  ListBase* volatile next = nullptr;
+};
+
+template <size_t Size, size_t Alignment = 0>
+struct List final : ListBase {
+  char buffer[Size];
+
+  static List* Create(PartitionRoot& root, ListBase* next = nullptr) {
+    List* list;
+    if (Alignment) {
+      list = static_cast<List*>(root.AlignedAlloc(Alignment, sizeof(List)));
+    } else {
+      list = static_cast<List*>(root.Alloc(sizeof(List), nullptr));
+    }
+    list->next = next;
+    return list;
+  }
+
+  static void Destroy(PartitionRoot& root, List* list) { root.Free(list); }
+};
+
+TEST_F(PartitionAllocPCScanTest, ArbitraryObjectInQuarantine) {
+  using ListType = List<8>;
+
+  auto* obj1 = ListType::Create(root());
+  auto* obj2 = ListType::Create(root());
+  EXPECT_FALSE(IsInQuarantine(obj1));
+  EXPECT_FALSE(IsInQuarantine(obj2));
+
+  ListType::Destroy(root(), obj2);
+  EXPECT_FALSE(IsInQuarantine(obj1));
+  EXPECT_TRUE(IsInQuarantine(obj2));
+}
+
+TEST_F(PartitionAllocPCScanTest, FirstObjectInQuarantine) {
+  static constexpr size_t kAllocationSize = 16;
+
+  FullSlotSpanAllocation full_slot_span =
+      GetFullSlotSpan(root(), kAllocationSize);
+  EXPECT_FALSE(IsInQuarantine(full_slot_span.first));
+
+  root().Free<FreeFlags::kNoHooks>(full_slot_span.first);
+  EXPECT_TRUE(IsInQuarantine(full_slot_span.first));
+}
+
+TEST_F(PartitionAllocPCScanTest, LastObjectInQuarantine) {
+  static constexpr size_t kAllocationSize = 16;
+
+  FullSlotSpanAllocation full_slot_span =
+      GetFullSlotSpan(root(), kAllocationSize);
+  EXPECT_FALSE(IsInQuarantine(full_slot_span.last));
+
+  root().Free<FreeFlags::kNoHooks>(full_slot_span.last);
+  EXPECT_TRUE(IsInQuarantine(full_slot_span.last));
+}
+
+template <typename SourceList, typename ValueList>
+void TestDanglingReference(PartitionAllocPCScanTest& test,
+                           SourceList* source,
+                           ValueList* value,
+                           PartitionRoot& value_root) {
+  {
+    // Free |value| and leave the dangling reference in |source|.
+    ValueList::Destroy(value_root, value);
+    // Check that |value| is in the quarantine now.
+    EXPECT_TRUE(test.IsInQuarantine(value));
+    // Run PCScan.
+    test.RunPCScan();
+    // Check that the object is still quarantined since it's referenced by
+    // |source|.
+    EXPECT_TRUE(test.IsInQuarantine(value));
+  }
+  {
+    // Get rid of the dangling reference.
+    source->next = nullptr;
+    // Run PCScan again.
+    test.RunPCScan();
+    // Check that the object is no longer in the quarantine.
+    EXPECT_FALSE(test.IsInQuarantine(value));
+    // Check that the object is in the freelist now.
+    EXPECT_TRUE(IsInFreeList(value_root.ObjectToSlotStart(value)));
+  }
+}
+
+void TestDanglingReferenceNotVisited(PartitionAllocPCScanTest& test,
+                                     void* value,
+                                     PartitionRoot& value_root) {
+  value_root.Free(value);
+  // Check that |value| is in the quarantine now.
+  EXPECT_TRUE(test.IsInQuarantine(value));
+  // Run PCScan.
+  test.RunPCScan();
+  // Check that the object is no longer in the quarantine since the pointer to
+  // it was not scanned from the non-scannable partition.
+  EXPECT_FALSE(test.IsInQuarantine(value));
+  // Check that the object is in the freelist now.
+  EXPECT_TRUE(IsInFreeList(value_root.ObjectToSlotStart(value)));
+}
+
+TEST_F(PartitionAllocPCScanTest, DanglingReferenceSameBucket) {
+  using SourceList = List<8>;
+  using ValueList = SourceList;
+
+  // Create two objects, where |source| references |value|.
+  auto* value = ValueList::Create(root(), nullptr);
+  auto* source = SourceList::Create(root(), value);
+
+  TestDanglingReference(*this, source, value, root());
+}
+
+TEST_F(PartitionAllocPCScanTest, DanglingReferenceDifferentBuckets) {
+  using SourceList = List<8>;
+  using ValueList = List<128>;
+
+  // Create two objects, where |source| references |value|.
+  auto* value = ValueList::Create(root(), nullptr);
+  auto* source = SourceList::Create(root(), value);
+
+  TestDanglingReference(*this, source, value, root());
+}
+
+TEST_F(PartitionAllocPCScanTest, DanglingReferenceDifferentBucketsAligned) {
+  // Choose a high alignment that almost certainly will cause a gap between slot
+  // spans. But make it less than kMaxSupportedAlignment, or else two
+  // allocations will end up on different super pages.
+  constexpr size_t alignment = kMaxSupportedAlignment / 2;
+  using SourceList = List<8, alignment>;
+  using ValueList = List<128, alignment>;
+
+  // Create two objects, where |source| references |value|.
+  auto* value = ValueList::Create(root(), nullptr);
+  auto* source = SourceList::Create(root(), value);
+
+  // Double check the setup -- make sure that exactly two slot spans were
+  // allocated, within the same super page, with a gap in between.
+  {
+    ::partition_alloc::internal::ScopedGuard guard{root().lock_};
+
+    uintptr_t value_slot_start = root().ObjectToSlotStart(value);
+    uintptr_t source_slot_start = root().ObjectToSlotStart(source);
+    auto super_page = value_slot_start & kSuperPageBaseMask;
+    ASSERT_EQ(super_page, source_slot_start & kSuperPageBaseMask);
+    size_t i = 0;
+    uintptr_t first_slot_span_end = 0;
+    uintptr_t second_slot_span_start = 0;
+    IterateSlotSpans(super_page, true, [&](SlotSpan* slot_span) -> bool {
+      if (i == 0) {
+        first_slot_span_end =
+            SlotSpan::ToSlotSpanStart(slot_span) +
+            slot_span->bucket->get_pages_per_slot_span() * PartitionPageSize();
+      } else {
+        second_slot_span_start = SlotSpan::ToSlotSpanStart(slot_span);
+      }
+      ++i;
+      return false;
+    });
+    ASSERT_EQ(i, 2u);
+    ASSERT_GT(second_slot_span_start, first_slot_span_end);
+  }
+
+  TestDanglingReference(*this, source, value, root());
+}
+
+TEST_F(PartitionAllocPCScanTest,
+       DanglingReferenceSameSlotSpanButDifferentPages) {
+  using SourceList = List<8>;
+  using ValueList = SourceList;
+
+  static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages =
+      static_cast<size_t>(PartitionPageSize() * 0.75);
+
+  FullSlotSpanAllocation full_slot_span = GetFullSlotSpan(
+      root(), root().AdjustSizeForExtrasSubtract(
+                  kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages));
+
+  // Assert that the first and the last objects are in the same slot span but on
+  // different partition pages.
+  // Converting to slot start also takes care of the MTE-tag difference.
+  ASSERT_EQ(SlotSpan::FromObject(full_slot_span.first),
+            SlotSpan::FromObject(full_slot_span.last));
+  uintptr_t first_slot_start = root().ObjectToSlotStart(full_slot_span.first);
+  uintptr_t last_slot_start = root().ObjectToSlotStart(full_slot_span.last);
+  ASSERT_NE(first_slot_start & PartitionPageBaseMask(),
+            last_slot_start & PartitionPageBaseMask());
+
+  // Create two objects, on different partition pages.
+  auto* value = new (full_slot_span.first) ValueList;
+  auto* source = new (full_slot_span.last) SourceList;
+  source->next = value;
+
+  TestDanglingReference(*this, source, value, root());
+}
+
+TEST_F(PartitionAllocPCScanTest, DanglingReferenceFromFullPage) {
+  using SourceList = List<64>;
+  using ValueList = SourceList;
+
+  FullSlotSpanAllocation full_slot_span =
+      GetFullSlotSpan(root(), sizeof(SourceList));
+  void* source_buffer = full_slot_span.first;
+  // This allocation must go through the slow path and call SetNewActivePage(),
+  // which will flush the full page from the active page list.
+  void* value_buffer =
+      root().Alloc<partition_alloc::AllocFlags::kNoHooks>(sizeof(ValueList));
+
+  // Assert that the first and the last objects are in different slot spans but
+  // in the same bucket.
+  SlotSpan* source_slot_span =
+      PartitionRoot::SlotSpan::FromObject(source_buffer);
+  SlotSpan* value_slot_span = PartitionRoot::SlotSpan::FromObject(value_buffer);
+  ASSERT_NE(source_slot_span, value_slot_span);
+  ASSERT_EQ(source_slot_span->bucket, value_slot_span->bucket);
+
+  // Create two objects, where |source| is in a full detached page.
+  auto* value = new (value_buffer) ValueList;
+  auto* source = new (source_buffer) SourceList;
+  source->next = value;
+
+  TestDanglingReference(*this, source, value, root());
+}
+
+template <size_t Size>
+struct ListWithInnerReference {
+  char buffer1[Size];
+  // Volatile to prevent the compiler from doing dead store elimination.
+  char* volatile next = nullptr;
+  char buffer2[Size];
+
+  static ListWithInnerReference* Create(PartitionRoot& root) {
+    auto* list = static_cast<ListWithInnerReference*>(
+        root.Alloc(sizeof(ListWithInnerReference), nullptr));
+    return list;
+  }
+
+  static void Destroy(PartitionRoot& root, ListWithInnerReference* list) {
+    root.Free(list);
+  }
+};
+
+// Disabled due to consistent failure http://crbug.com/1242407
+#if BUILDFLAG(IS_ANDROID)
+#define MAYBE_DanglingInnerReference DISABLED_DanglingInnerReference
+#else
+#define MAYBE_DanglingInnerReference DanglingInnerReference
+#endif
+TEST_F(PartitionAllocPCScanTest, MAYBE_DanglingInnerReference) {
+  using SourceList = ListWithInnerReference<64>;
+  using ValueList = SourceList;
+
+  auto* source = SourceList::Create(root());
+  auto* value = ValueList::Create(root());
+  source->next = value->buffer2;
+
+  TestDanglingReference(*this, source, value, root());
+}
+
+TEST_F(PartitionAllocPCScanTest, DanglingReferenceFromSingleSlotSlotSpan) {
+  using SourceList = List<kMaxBucketed - 4096>;
+  using ValueList = SourceList;
+
+  auto* source = SourceList::Create(root());
+  auto* slot_span = SlotSpanMetadata::FromObject(source);
+  ASSERT_TRUE(slot_span->CanStoreRawSize());
+
+  auto* value = ValueList::Create(root());
+  source->next = value;
+
+  TestDanglingReference(*this, source, value, root());
+}
+
+TEST_F(PartitionAllocPCScanTest, DanglingInterPartitionReference) {
+  using SourceList = List<64>;
+  using ValueList = SourceList;
+
+  PartitionRoot source_root(PartitionOptions{
+      .star_scan_quarantine = PartitionOptions::kAllowed,
+  });
+  source_root.UncapEmptySlotSpanMemoryForTesting();
+  PartitionRoot value_root(PartitionOptions{
+      .star_scan_quarantine = PartitionOptions::kAllowed,
+  });
+  value_root.UncapEmptySlotSpanMemoryForTesting();
+
+  PCScan::RegisterScannableRoot(&source_root);
+  source_root.SetQuarantineAlwaysForTesting(true);
+  PCScan::RegisterScannableRoot(&value_root);
+  value_root.SetQuarantineAlwaysForTesting(true);
+
+  auto* source = SourceList::Create(source_root);
+  auto* value = ValueList::Create(value_root);
+  source->next = value;
+
+  TestDanglingReference(*this, source, value, value_root);
+}
+
+TEST_F(PartitionAllocPCScanTest, DanglingReferenceToNonScannablePartition) {
+  using SourceList = List<64>;
+  using ValueList = SourceList;
+
+  PartitionRoot source_root(PartitionOptions{
+      .star_scan_quarantine = PartitionOptions::kAllowed,
+  });
+  source_root.UncapEmptySlotSpanMemoryForTesting();
+  PartitionRoot value_root(PartitionOptions{
+      .star_scan_quarantine = PartitionOptions::kAllowed,
+  });
+  value_root.UncapEmptySlotSpanMemoryForTesting();
+
+  PCScan::RegisterScannableRoot(&source_root);
+  source_root.SetQuarantineAlwaysForTesting(true);
+  PCScan::RegisterNonScannableRoot(&value_root);
+  value_root.SetQuarantineAlwaysForTesting(true);
+
+  auto* source = SourceList::Create(source_root);
+  auto* value = ValueList::Create(value_root);
+  source->next = value;
+
+  TestDanglingReference(*this, source, value, value_root);
+}
+
+TEST_F(PartitionAllocPCScanTest, DanglingReferenceFromNonScannablePartition) {
+  using SourceList = List<64>;
+  using ValueList = SourceList;
+
+  PartitionRoot source_root(PartitionOptions{
+      .star_scan_quarantine = PartitionOptions::kAllowed,
+  });
+  source_root.UncapEmptySlotSpanMemoryForTesting();
+  PartitionRoot value_root(PartitionOptions{
+      .star_scan_quarantine = PartitionOptions::kAllowed,
+  });
+  value_root.UncapEmptySlotSpanMemoryForTesting();
+
+  PCScan::RegisterNonScannableRoot(&source_root);
+  value_root.SetQuarantineAlwaysForTesting(true);
+  PCScan::RegisterScannableRoot(&value_root);
+  source_root.SetQuarantineAlwaysForTesting(true);
+
+  auto* source = SourceList::Create(source_root);
+  auto* value = ValueList::Create(value_root);
+  source->next = value;
+
+  TestDanglingReferenceNotVisited(*this, value, value_root);
+}
+
+// Death tests misbehave on Android, http://crbug.com/643760.
+#if defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
+#if PA_CONFIG(STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED)
+TEST_F(PartitionAllocPCScanTest, DoubleFree) {
+  auto* list = List<1>::Create(root());
+  List<1>::Destroy(root(), list);
+  EXPECT_DEATH(List<1>::Destroy(root(), list), "");
+}
+#endif  // PA_CONFIG(STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED)
+#endif  // defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
+
+template <typename SourceList, typename ValueList>
+void TestDanglingReferenceWithSafepoint(PartitionAllocPCScanTest& test,
+                                        SourceList* source,
+                                        ValueList* value,
+                                        PartitionRoot& value_root) {
+  {
+    // Free |value| and leave the dangling reference in |source|.
+    ValueList::Destroy(value_root, value);
+    // Check that |value| is in the quarantine now.
+    EXPECT_TRUE(test.IsInQuarantine(value));
+    // Schedule PCScan but don't scan.
+    test.SchedulePCScan();
+    // Enter safepoint and scan from mutator.
+    test.JoinPCScanAsMutator();
+    // Check that the object is still quarantined since it's referenced by
+    // |source|.
+    EXPECT_TRUE(test.IsInQuarantine(value));
+    // Check that |value| is not in the freelist.
+    EXPECT_FALSE(IsInFreeList(test.root().ObjectToSlotStart(value)));
+    // Run sweeper.
+    test.FinishPCScanAsScanner();
+    // Check that |value| still exists.
+    EXPECT_FALSE(IsInFreeList(test.root().ObjectToSlotStart(value)));
+  }
+  {
+    // Get rid of the dangling reference.
+    source->next = nullptr;
+    // Schedule PCScan but don't scan.
+    test.SchedulePCScan();
+    // Enter safepoint and scan from mutator.
+    test.JoinPCScanAsMutator();
+    // Check that |value| is not in the freelist yet, since sweeper didn't run.
+    EXPECT_FALSE(IsInFreeList(test.root().ObjectToSlotStart(value)));
+    test.FinishPCScanAsScanner();
+    // Check that the object is no longer in the quarantine.
+    EXPECT_FALSE(test.IsInQuarantine(value));
+    // Check that |value| is in the freelist now.
+    EXPECT_TRUE(IsInFreeList(test.root().ObjectToSlotStart(value)));
+  }
+}
+
+TEST_F(PartitionAllocPCScanTest, Safepoint) {
+  using SourceList = List<64>;
+  using ValueList = SourceList;
+
+  DisableStackScanningScope no_stack_scanning;
+
+  auto* source = SourceList::Create(root());
+  auto* value = ValueList::Create(root());
+  source->next = value;
+
+  TestDanglingReferenceWithSafepoint(*this, source, value, root());
+}
+
+class PartitionAllocPCScanStackScanningTest : public PartitionAllocPCScanTest {
+ protected:
+  // Creates and sets a dangling reference in `dangling_reference_`.
+  PA_NOINLINE void CreateDanglingReference() {
+    using ValueList = List<8>;
+    auto* value = ValueList::Create(root(), nullptr);
+    ValueList::Destroy(root(), value);
+    dangling_reference_ = value;
+  }
+
+  PA_NOINLINE void SetupAndRunTest() {
+    // Register the top of the stack to be the current pointer.
+    PCScan::NotifyThreadCreated(GetStackPointer());
+    RunTest();
+  }
+
+  PA_NOINLINE void RunTest() {
+    // This writes the pointer to the stack.
+    [[maybe_unused]] auto* volatile stack_ref = dangling_reference_;
+    // Call the non-inline function that would scan the stack. Don't execute
+    // the rest of the actions inside the function, since otherwise it would
+    // be tail-call optimized and the parent frame's stack with the dangling
+    // pointer would be missed.
+    ScanStack();
+    // Check that the object is still quarantined since it's referenced by
+    // |dangling_reference_|.
+    EXPECT_TRUE(IsInQuarantine(dangling_reference_));
+    // Check that value is not in the freelist.
+    EXPECT_FALSE(IsInFreeList(root().ObjectToSlotStart(dangling_reference_)));
+    // Run sweeper.
+    FinishPCScanAsScanner();
+    // Check that |dangling_reference_| still exists.
+    EXPECT_FALSE(IsInFreeList(root().ObjectToSlotStart(dangling_reference_)));
+  }
+
+  PA_NOINLINE void ScanStack() {
+    // Schedule PCScan but don't scan.
+    SchedulePCScan();
+    // Enter safepoint and scan from mutator. This will scan the stack.
+    JoinPCScanAsMutator();
+  }
+
+  static void* dangling_reference_;
+};
+
+// static
+void* PartitionAllocPCScanStackScanningTest::dangling_reference_ = nullptr;
+
+// The test currently fails on some platform due to the stack dangling reference
+// not being found.
+TEST_F(PartitionAllocPCScanStackScanningTest, DISABLED_StackScanning) {
+  PCScan::EnableStackScanning();
+
+  // Set to nullptr if the test is retried.
+  dangling_reference_ = nullptr;
+
+  CreateDanglingReference();
+
+  SetupAndRunTest();
+}
+
+TEST_F(PartitionAllocPCScanTest, DontScanUnusedRawSize) {
+  using ValueList = List<8>;
+
+  // Make sure to commit more memory than requested to have slack for storing
+  // dangling reference outside of the raw size.
+  const size_t big_size = kMaxBucketed - SystemPageSize() + 1;
+  void* ptr = root().Alloc(big_size);
+
+  uintptr_t slot_start = root().ObjectToSlotStart(ptr);
+  auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
+  ASSERT_TRUE(big_size + sizeof(void*) <=
+              root().AllocationCapacityFromSlotStart(slot_start));
+  ASSERT_TRUE(slot_span->CanStoreRawSize());
+
+  auto* value = ValueList::Create(root());
+
+  // This not only points past the object, but past all extras around it.
+  // However, there should be enough space between this and the end of slot, to
+  // store some data.
+  uintptr_t source_end =
+      slot_start +
+      base::bits::AlignUp(slot_span->GetRawSize(), alignof(ValueList*));
+  // Write the pointer.
+  // Since we stripped the MTE-tag to get |slot_start|, we need to retag it.
+  *static_cast<ValueList**>(TagAddr(source_end)) = value;
+
+  TestDanglingReferenceNotVisited(*this, value, root());
+}
+
+TEST_F(PartitionAllocPCScanTest, PointersToGuardPages) {
+  struct Pointers {
+    void* super_page;
+    void* metadata_page;
+    void* guard_page1;
+    void* scan_bitmap;
+    void* guard_page2;
+  };
+  auto* const pointers = static_cast<Pointers*>(
+      root().Alloc<partition_alloc::AllocFlags::kNoHooks>(sizeof(Pointers)));
+
+  // Converting to slot start strips MTE tag.
+  const uintptr_t super_page =
+      root().ObjectToSlotStart(pointers) & kSuperPageBaseMask;
+
+  // Initialize scannable pointers with addresses of guard pages and metadata.
+  // None of these point to an MTE-tagged area, so no need for retagging.
+  pointers->super_page = reinterpret_cast<void*>(super_page);
+  pointers->metadata_page = PartitionSuperPageToMetadataArea(super_page);
+  pointers->guard_page1 =
+      static_cast<char*>(pointers->metadata_page) + SystemPageSize();
+  pointers->scan_bitmap = SuperPageStateBitmap(super_page);
+  pointers->guard_page2 = reinterpret_cast<void*>(super_page + kSuperPageSize -
+                                                  PartitionPageSize());
+
+  // Simply run PCScan and expect no crashes.
+  RunPCScan();
+}
+
+TEST_F(PartitionAllocPCScanTest, TwoDanglingPointersToSameObject) {
+  using SourceList = List<8>;
+  using ValueList = List<128>;
+
+  auto* value = ValueList::Create(root(), nullptr);
+  // Create two source objects referring to |value|.
+  SourceList::Create(root(), value);
+  SourceList::Create(root(), value);
+
+  // Destroy |value| and run PCScan.
+  ValueList::Destroy(root(), value);
+  RunPCScan();
+  EXPECT_TRUE(IsInQuarantine(value));
+
+  // Check that accounted size after the cycle is only sizeof ValueList.
+  auto* slot_span_metadata = SlotSpan::FromObject(value);
+  const auto& quarantine =
+      PCScan::scheduler().scheduling_backend().GetQuarantineData();
+  EXPECT_EQ(slot_span_metadata->bucket->slot_size, quarantine.current_size);
+}
+
+TEST_F(PartitionAllocPCScanTest, DanglingPointerToInaccessibleArea) {
+  static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages =
+      static_cast<size_t>(PartitionPageSize() * 1.25);
+
+  FullSlotSpanAllocation full_slot_span = GetFullSlotSpan(
+      root(), root().AdjustSizeForExtrasSubtract(
+                  kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages));
+
+  // Assert that number of allocatable bytes for this bucket is smaller or equal
+  // to all allocated partition pages.
+  auto* bucket = full_slot_span.slot_span->bucket;
+  ASSERT_LE(bucket->get_bytes_per_span(),
+            bucket->get_pages_per_slot_span() * PartitionPageSize());
+
+  // Let the first object point past the end of the last one + some random
+  // offset.
+  // It should fall within the same slot, so no need for MTE-retagging.
+  static constexpr size_t kOffsetPastEnd = 7;
+  *reinterpret_cast<uint8_t**>(full_slot_span.first) =
+      reinterpret_cast<uint8_t*>(full_slot_span.last) +
+      kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages + kOffsetPastEnd;
+
+  // Destroy the last object and put it in quarantine.
+  root().Free(full_slot_span.last);
+  EXPECT_TRUE(IsInQuarantine(full_slot_span.last));
+
+  // Run PCScan. After it, the quarantined object should not be promoted.
+  RunPCScan();
+  EXPECT_FALSE(IsInQuarantine(full_slot_span.last));
+}
+
+TEST_F(PartitionAllocPCScanTest, DanglingPointerOutsideUsablePart) {
+  using ValueList = List<kMaxBucketed - 4096>;
+  using SourceList = List<64>;
+
+  auto* value = ValueList::Create(root());
+  auto* slot_span = SlotSpanMetadata::FromObject(value);
+  ASSERT_TRUE(slot_span->CanStoreRawSize());
+
+  auto* source = SourceList::Create(root());
+
+  // Let the |source| object point to the unused area of |value| and expect
+  // |value| to be nevertheless marked during scanning.
+  // It should fall within the same slot, so no need for MTE-retagging.
+  static constexpr size_t kOffsetPastEnd = 7;
+  source->next = reinterpret_cast<ListBase*>(
+      reinterpret_cast<uint8_t*>(value + 1) + kOffsetPastEnd);
+
+  TestDanglingReference(*this, source, value, root());
+}
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+TEST_F(PartitionAllocPCScanWithMTETest, QuarantineOnlyOnTagOverflow) {
+  using ListType = List<64>;
+
+  if (!base::CPU::GetInstanceNoAllocation().has_mte()) {
+    return;
+  }
+
+  {
+    auto* obj1 = ListType::Create(root());
+    ListType::Destroy(root(), obj1);
+    auto* obj2 = ListType::Create(root());
+    // The test relies on unrandomized freelist! If the slot was not moved to
+    // quarantine, assert that the obj2 is the same as obj1 and the tags are
+    // different.
+    // MTE-retag |obj1|, as the tag changed when freeing it.
+    if (!HasOverflowTag(TagPtr(obj1))) {
+      // Assert that the pointer is the same.
+      ASSERT_EQ(UntagPtr(obj1), UntagPtr(obj2));
+      // Assert that the tag is different.
+      ASSERT_NE(obj1, obj2);
+    }
+  }
+
+  for (size_t i = 0; i < 16; ++i) {
+    auto* obj = ListType::Create(root());
+    ListType::Destroy(root(), obj);
+    // MTE-retag |obj|, as the tag changed when freeing it.
+    obj = TagPtr(obj);
+    // Check if the tag overflows. If so, the object must be in quarantine.
+    if (HasOverflowTag(obj)) {
+      EXPECT_TRUE(IsInQuarantine(obj));
+      EXPECT_FALSE(IsInFreeList(root().ObjectToSlotStart(obj)));
+      return;
+    } else {
+      EXPECT_FALSE(IsInQuarantine(obj));
+      EXPECT_TRUE(IsInFreeList(root().ObjectToSlotStart(obj)));
+    }
+  }
+
+  EXPECT_FALSE(true && "Should never be reached");
+}
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+}  // namespace
+
+}  // namespace partition_alloc::internal
+
+#endif  // BUILDFLAG(USE_STARSCAN)
+#endif  // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/raceful_worklist.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/raceful_worklist.h
new file mode 100644
index 0000000..baa3e05
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/raceful_worklist.h
@@ -0,0 +1,148 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_RACEFUL_WORKLIST_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_RACEFUL_WORKLIST_H_
+
+#include <algorithm>
+#include <atomic>
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.h"
+
+namespace partition_alloc::internal {
+
+template <typename T>
+class RacefulWorklist {
+  struct Node {
+    explicit Node(const T& value) : value(value) {}
+    Node(const Node& other)
+        : value(other.value),
+          is_being_visited(
+              other.is_being_visited.load(std::memory_order_relaxed)),
+          is_visited(other.is_visited.load(std::memory_order_relaxed)) {}
+
+    T value;
+    std::atomic<bool> is_being_visited{false};
+    std::atomic<bool> is_visited{false};
+  };
+  using Underlying = std::vector<Node, MetadataAllocator<Node>>;
+
+ public:
+  class RandomizedView {
+   public:
+    explicit RandomizedView(RacefulWorklist& worklist)
+        : worklist_(worklist), offset_(0) {
+      if (worklist.data_.size() > 0) {
+        offset_ = static_cast<size_t>(
+            internal::base::RandGenerator(worklist.data_.size()));
+      }
+    }
+
+    RandomizedView(const RandomizedView&) = delete;
+    const RandomizedView& operator=(const RandomizedView&) = delete;
+
+    template <typename Function>
+    void Visit(Function f);
+
+   private:
+    RacefulWorklist& worklist_;
+    size_t offset_;
+  };
+
+  RacefulWorklist() = default;
+
+  RacefulWorklist(const RacefulWorklist&) = delete;
+  RacefulWorklist& operator=(const RacefulWorklist&) = delete;
+
+  void Push(const T& t) { data_.push_back(Node(t)); }
+
+  template <typename It>
+  void Push(It begin, It end) {
+    std::transform(begin, end, std::back_inserter(data_),
+                   [](const T& t) { return Node(t); });
+  }
+
+  template <typename Function>
+  void VisitNonConcurrently(Function) const;
+
+ private:
+  Underlying data_;
+  std::atomic<bool> fully_visited_{false};
+};
+
+template <typename T>
+template <typename Function>
+void RacefulWorklist<T>::VisitNonConcurrently(Function f) const {
+  for (const auto& t : data_) {
+    f(t.value);
+  }
+}
+
+template <typename T>
+template <typename Function>
+void RacefulWorklist<T>::RandomizedView::Visit(Function f) {
+  auto& data = worklist_.data_;
+  std::vector<typename Underlying::iterator,
+              MetadataAllocator<typename Underlying::iterator>>
+      to_revisit;
+
+  // To avoid worklist iteration, quick check if the worklist was already
+  // visited.
+  if (worklist_.fully_visited_.load(std::memory_order_acquire)) {
+    return;
+  }
+
+  const auto offset_it = std::next(data.begin(), offset_);
+
+  // First, visit items starting from the offset.
+  for (auto it = offset_it; it != data.end(); ++it) {
+    if (it->is_visited.load(std::memory_order_relaxed)) {
+      continue;
+    }
+    if (it->is_being_visited.load(std::memory_order_relaxed)) {
+      to_revisit.push_back(it);
+      continue;
+    }
+    it->is_being_visited.store(true, std::memory_order_relaxed);
+    f(it->value);
+    it->is_visited.store(true, std::memory_order_relaxed);
+  }
+
+  // Then, visit items before the offset.
+  for (auto it = data.begin(); it != offset_it; ++it) {
+    if (it->is_visited.load(std::memory_order_relaxed)) {
+      continue;
+    }
+    if (it->is_being_visited.load(std::memory_order_relaxed)) {
+      to_revisit.push_back(it);
+      continue;
+    }
+    it->is_being_visited.store(true, std::memory_order_relaxed);
+    f(it->value);
+    it->is_visited.store(true, std::memory_order_relaxed);
+  }
+
+  // Finally, racefully visit items that were scanned by some other thread.
+  for (auto it : to_revisit) {
+    if (PA_LIKELY(it->is_visited.load(std::memory_order_relaxed))) {
+      continue;
+    }
+    // Don't bail out here if the item is being visited by another thread.
+    // This is helpful to guarantee forward progress if the other thread
+    // is making slow progress.
+    it->is_being_visited.store(true, std::memory_order_relaxed);
+    f(it->value);
+    it->is_visited.store(true, std::memory_order_relaxed);
+  }
+
+  worklist_.fully_visited_.store(true, std::memory_order_release);
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_RACEFUL_WORKLIST_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/scan_loop.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/scan_loop.h
new file mode 100644
index 0000000..ff11624
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/scan_loop.h
@@ -0,0 +1,255 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_SCAN_LOOP_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_SCAN_LOOP_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/starscan_fwd.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "build/build_config.h"
+
+#if defined(ARCH_CPU_X86_64)
+// Include order is important, so we disable formatting.
+// clang-format off
+// Including these headers directly should generally be avoided. For the
+// scanning loop, we check at runtime which SIMD extension we can use. Since
+// Chrome is compiled with -msse3 (the minimal requirement), we include the
+// headers directly to make the intrinsics available. Another option could be to
+// use inline assembly, but that would hinder compiler optimization for
+// vectorized instructions.
+#include <immintrin.h>
+#include <smmintrin.h>
+#include <avxintrin.h>
+#include <avx2intrin.h>
+// clang-format on
+#endif
+
+#if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
+#include <arm_neon.h>
+#endif
+
+namespace partition_alloc::internal {
+
+// Iterates over range of memory using the best available SIMD extension.
+// Assumes that 64bit platforms have pool support and the begin pointer of
+// incoming ranges are properly aligned. The class is designed around the CRTP
+// version of the "template method" (in GoF terms). CRTP is needed for fast
+// static dispatch.
+template <typename Derived>
+class ScanLoop {
+ public:
+  explicit ScanLoop(SimdSupport simd_type) : simd_type_(simd_type) {}
+
+  ScanLoop(const ScanLoop&) = delete;
+  ScanLoop& operator=(const ScanLoop&) = delete;
+
+  // Scan input range. Assumes the range is properly aligned. Please note that
+  // the function doesn't MTE-tag the input range as it assumes that MTE is
+  // disabled when function is called. See DisableMTEScope for details.
+  void Run(uintptr_t begin, uintptr_t end);
+
+ private:
+  const Derived& derived() const { return static_cast<const Derived&>(*this); }
+  Derived& derived() { return static_cast<Derived&>(*this); }
+
+#if defined(ARCH_CPU_X86_64)
+  __attribute__((target("avx2"))) void RunAVX2(uintptr_t, uintptr_t);
+  __attribute__((target("sse4.1"))) void RunSSE4(uintptr_t, uintptr_t);
+#endif
+#if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
+  void RunNEON(uintptr_t, uintptr_t);
+#endif
+
+  void RunUnvectorized(uintptr_t, uintptr_t);
+
+  SimdSupport simd_type_;
+};
+
+template <typename Derived>
+void ScanLoop<Derived>::Run(uintptr_t begin, uintptr_t end) {
+// We allow vectorization only for 64bit since they require support of the
+// 64bit regular pool, and only for x86 because a special instruction set is
+// required.
+#if defined(ARCH_CPU_X86_64)
+  if (simd_type_ == SimdSupport::kAVX2) {
+    return RunAVX2(begin, end);
+  }
+  if (simd_type_ == SimdSupport::kSSE41) {
+    return RunSSE4(begin, end);
+  }
+#elif PA_CONFIG(STARSCAN_NEON_SUPPORTED)
+  if (simd_type_ == SimdSupport::kNEON) {
+    return RunNEON(begin, end);
+  }
+#endif  // PA_CONFIG(STARSCAN_NEON_SUPPORTED)
+  return RunUnvectorized(begin, end);
+}
+
+template <typename Derived>
+void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
+  PA_SCAN_DCHECK(!(begin % sizeof(uintptr_t)));
+  PA_SCAN_DCHECK(!(end % sizeof(uintptr_t)));
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+  // If the read value is a pointer into the PA region, it's likely
+  // MTE-tagged. Piggyback on |mask| to untag, for efficiency.
+  const uintptr_t mask = Derived::RegularPoolMask() & kPtrUntagMask;
+  const uintptr_t base = Derived::RegularPoolBase();
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+  for (; begin < end; begin += sizeof(uintptr_t)) {
+    // Read the region word-by-word. Everything that we read is a potential
+    // pointer to or inside an object on heap. Such an object should be
+    // quarantined, if attempted to free.
+    //
+    // Keep it MTE-untagged. See DisableMTEScope for details.
+    const uintptr_t maybe_ptr = *reinterpret_cast<uintptr_t*>(begin);
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+    if (PA_LIKELY((maybe_ptr & mask) != base)) {
+      continue;
+    }
+#else
+    if (!maybe_ptr) {
+      continue;
+    }
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+    derived().CheckPointer(maybe_ptr);
+  }
+}
+
+#if defined(ARCH_CPU_X86_64)
+template <typename Derived>
+__attribute__((target("avx2"))) void ScanLoop<Derived>::RunAVX2(uintptr_t begin,
+                                                                uintptr_t end) {
+  static constexpr size_t kAlignmentRequirement = 32;
+  static constexpr size_t kWordsInVector = 4;
+  static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
+  PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
+  // Stick to integer instructions. This brings slightly better throughput. For
+  // example, according to the Intel docs, on Broadwell and Haswell the CPI of
+  // vmovdqa (_mm256_load_si256) is twice smaller (0.25) than that of vmovapd
+  // (_mm256_load_pd).
+  const __m256i vbase = _mm256_set1_epi64x(derived().RegularPoolBase());
+  // If the read value is a pointer into the PA region, it's likely
+  // MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency.
+  const __m256i regular_pool_mask =
+      _mm256_set1_epi64x(derived().RegularPoolMask() & kPtrUntagMask);
+
+  static_assert(sizeof(__m256i) == kBytesInVector);
+  for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
+    // Keep it MTE-untagged. See DisableMTEScope for details.
+    const __m256i maybe_ptrs =
+        _mm256_load_si256(reinterpret_cast<__m256i*>(begin));
+    const __m256i vand = _mm256_and_si256(maybe_ptrs, regular_pool_mask);
+    const __m256i vcmp = _mm256_cmpeq_epi64(vand, vbase);
+    const int mask = _mm256_movemask_pd(_mm256_castsi256_pd(vcmp));
+    if (PA_LIKELY(!mask)) {
+      continue;
+    }
+    // It's important to extract pointers from the already loaded vector.
+    // Otherwise, new loads can break in-pool assumption checked above.
+    if (mask & 0b0001) {
+      derived().CheckPointer(_mm256_extract_epi64(maybe_ptrs, 0));
+    }
+    if (mask & 0b0010) {
+      derived().CheckPointer(_mm256_extract_epi64(maybe_ptrs, 1));
+    }
+    if (mask & 0b0100) {
+      derived().CheckPointer(_mm256_extract_epi64(maybe_ptrs, 2));
+    }
+    if (mask & 0b1000) {
+      derived().CheckPointer(_mm256_extract_epi64(maybe_ptrs, 3));
+    }
+  }
+  // Run unvectorized on the remainder of the region.
+  RunUnvectorized(begin, end);
+}
+
+template <typename Derived>
+__attribute__((target("sse4.1"))) void ScanLoop<Derived>::RunSSE4(
+    uintptr_t begin,
+    uintptr_t end) {
+  static constexpr size_t kAlignmentRequirement = 16;
+  static constexpr size_t kWordsInVector = 2;
+  static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
+  PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
+  const __m128i vbase = _mm_set1_epi64x(derived().RegularPoolBase());
+  // If the read value is a pointer into the PA region, it's likely
+  // MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency.
+  const __m128i regular_pool_mask =
+      _mm_set1_epi64x(derived().RegularPoolMask() & kPtrUntagMask);
+
+  static_assert(sizeof(__m128i) == kBytesInVector);
+  for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
+    // Keep it MTE-untagged. See DisableMTEScope for details.
+    const __m128i maybe_ptrs =
+        _mm_loadu_si128(reinterpret_cast<__m128i*>(begin));
+    const __m128i vand = _mm_and_si128(maybe_ptrs, regular_pool_mask);
+    const __m128i vcmp = _mm_cmpeq_epi64(vand, vbase);
+    const int mask = _mm_movemask_pd(_mm_castsi128_pd(vcmp));
+    if (PA_LIKELY(!mask)) {
+      continue;
+    }
+    // It's important to extract pointers from the already loaded vector.
+    // Otherwise, new loads can break in-pool assumption checked above.
+    if (mask & 0b01) {
+      derived().CheckPointer(_mm_cvtsi128_si64(maybe_ptrs));
+    }
+    if (mask & 0b10) {
+      // The mask is used to move the 4th and 3rd dwords into the second and
+      // first position.
+      static constexpr int kSecondWordMask = (3 << 2) | (2 << 0);
+      const __m128i shuffled = _mm_shuffle_epi32(maybe_ptrs, kSecondWordMask);
+      derived().CheckPointer(_mm_cvtsi128_si64(shuffled));
+    }
+  }
+  // Run unvectorized on the remainder of the region.
+  RunUnvectorized(begin, end);
+}
+#endif  // defined(ARCH_CPU_X86_64)
+
+#if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
+template <typename Derived>
+void ScanLoop<Derived>::RunNEON(uintptr_t begin, uintptr_t end) {
+  static constexpr size_t kAlignmentRequirement = 16;
+  static constexpr size_t kWordsInVector = 2;
+  static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
+  PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
+  const uint64x2_t vbase = vdupq_n_u64(derived().RegularPoolBase());
+  // If the read value is a pointer into the PA region, it's likely
+  // MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency.
+  const uint64x2_t regular_pool_mask =
+      vdupq_n_u64(derived().RegularPoolMask() & kPtrUntagMask);
+
+  for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
+    // Keep it MTE-untagged. See DisableMTEScope for details.
+    const uint64x2_t maybe_ptrs = vld1q_u64(reinterpret_cast<uint64_t*>(begin));
+    const uint64x2_t vand = vandq_u64(maybe_ptrs, regular_pool_mask);
+    const uint64x2_t vcmp = vceqq_u64(vand, vbase);
+    const uint32_t max = vmaxvq_u32(vreinterpretq_u32_u64(vcmp));
+    if (PA_LIKELY(!max)) {
+      continue;
+    }
+    // It's important to extract pointers from the already loaded vector.
+    // Otherwise, new loads can break in-pool assumption checked above.
+    if (vgetq_lane_u64(vcmp, 0)) {
+      derived().CheckPointer(vgetq_lane_u64(maybe_ptrs, 0));
+    }
+    if (vgetq_lane_u64(vcmp, 1)) {
+      derived().CheckPointer(vgetq_lane_u64(maybe_ptrs, 1));
+    }
+  }
+  // Run unvectorized on the remainder of the region.
+  RunUnvectorized(begin, end);
+}
+#endif  // PA_CONFIG(STARSCAN_NEON_SUPPORTED)
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_SCAN_LOOP_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/scan_loop_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/scan_loop_unittest.cc
new file mode 100644
index 0000000..dd24490
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/scan_loop_unittest.cc
@@ -0,0 +1,174 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/scan_loop.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "build/build_config.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+
+namespace partition_alloc::internal {
+
+namespace {
+
+class TestScanLoop final : public ScanLoop<TestScanLoop> {
+  friend class ScanLoop<TestScanLoop>;
+
+ public:
+  explicit TestScanLoop(SimdSupport ss) : ScanLoop(ss) {}
+
+  size_t visited() const { return visited_; }
+
+  void Reset() { visited_ = 0; }
+
+ private:
+  static constexpr uintptr_t kRegularPoolMask = 0xffffff0000000000;
+  static constexpr uintptr_t kBasePtr = 0x0000560000000000;
+
+  static uintptr_t RegularPoolBase() { return kBasePtr; }
+  static uintptr_t RegularPoolMask() { return kRegularPoolMask; }
+
+  void CheckPointer(uintptr_t maybe_ptr) { ++visited_; }
+
+  size_t visited_ = 0;
+};
+
+static constexpr uintptr_t kValidPtr = 0x000056789abcdef0;
+static constexpr uintptr_t kInvalidPtr = 0x0000aaaaaaaaaaaa;
+static constexpr uintptr_t kZeroPtr = 0x0;
+
+// Tests all possible compbinations of incoming args.
+template <size_t Alignment, typename... Args>
+void TestOnRangeWithAlignment(TestScanLoop& sl,
+                              size_t expected_visited,
+                              Args... args) {
+  alignas(Alignment) uintptr_t range[] = {args...};
+  std::sort(std::begin(range), std::end(range));
+  do {
+    sl.Run(reinterpret_cast<uintptr_t>(std::begin(range)),
+           reinterpret_cast<uintptr_t>(std::end(range)));
+    EXPECT_EQ(expected_visited, sl.visited());
+    sl.Reset();
+  } while (std::next_permutation(std::begin(range), std::end(range)));
+}
+
+}  // namespace
+
+TEST(PartitionAllocScanLoopTest, UnvectorizedWithRegularPool) {
+  {
+    TestScanLoop sl(SimdSupport::kUnvectorized);
+    TestOnRangeWithAlignment<8>(sl, 0u, kInvalidPtr, kInvalidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kUnvectorized);
+    TestOnRangeWithAlignment<8>(sl, 1u, kValidPtr, kInvalidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kUnvectorized);
+    TestOnRangeWithAlignment<8>(sl, 2u, kValidPtr, kValidPtr, kInvalidPtr);
+  }
+  {
+    // Make sure zeros are skipped.
+    TestScanLoop sl(SimdSupport::kUnvectorized);
+    TestOnRangeWithAlignment<8>(sl, 1u, kValidPtr, kInvalidPtr, kZeroPtr);
+  }
+}
+
+#if defined(ARCH_CPU_X86_64)
+TEST(PartitionAllocScanLoopTest, VectorizedSSE4) {
+  base::CPU cpu;
+  if (!cpu.has_sse41()) {
+    return;
+  }
+  {
+    TestScanLoop sl(SimdSupport::kSSE41);
+    TestOnRangeWithAlignment<16>(sl, 0u, kInvalidPtr, kInvalidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kSSE41);
+    TestOnRangeWithAlignment<16>(sl, 1u, kValidPtr, kInvalidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kSSE41);
+    TestOnRangeWithAlignment<16>(sl, 2u, kValidPtr, kValidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kSSE41);
+    TestOnRangeWithAlignment<16>(sl, 3u, kValidPtr, kValidPtr, kValidPtr);
+  }
+}
+
+TEST(PartitionAllocScanLoopTest, VectorizedAVX2) {
+  base::CPU cpu;
+  if (!cpu.has_avx2()) {
+    return;
+  }
+  {
+    TestScanLoop sl(SimdSupport::kAVX2);
+    TestOnRangeWithAlignment<32>(sl, 0u, kInvalidPtr, kInvalidPtr, kInvalidPtr,
+                                 kInvalidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kAVX2);
+    TestOnRangeWithAlignment<32>(sl, 1u, kValidPtr, kInvalidPtr, kInvalidPtr,
+                                 kInvalidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kAVX2);
+    TestOnRangeWithAlignment<32>(sl, 2u, kValidPtr, kValidPtr, kInvalidPtr,
+                                 kInvalidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kAVX2);
+    TestOnRangeWithAlignment<32>(sl, 3u, kValidPtr, kValidPtr, kValidPtr,
+                                 kInvalidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kAVX2);
+    TestOnRangeWithAlignment<32>(sl, 4u, kValidPtr, kValidPtr, kValidPtr,
+                                 kValidPtr, kInvalidPtr);
+  }
+  {
+    // Check that the residual pointer is also visited.
+    TestScanLoop sl(SimdSupport::kAVX2);
+    TestOnRangeWithAlignment<32>(sl, 5u, kValidPtr, kValidPtr, kValidPtr,
+                                 kValidPtr, kValidPtr);
+  }
+}
+#endif  // defined(ARCH_CPU_X86_64)
+
+#if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
+TEST(PartitionAllocScanLoopTest, VectorizedNEON) {
+  {
+    TestScanLoop sl(SimdSupport::kNEON);
+    TestOnRangeWithAlignment<16>(sl, 0u, kInvalidPtr, kInvalidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kNEON);
+    TestOnRangeWithAlignment<16>(sl, 1u, kValidPtr, kInvalidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kNEON);
+    TestOnRangeWithAlignment<16>(sl, 2u, kValidPtr, kValidPtr, kInvalidPtr);
+  }
+  {
+    TestScanLoop sl(SimdSupport::kNEON);
+    TestOnRangeWithAlignment<16>(sl, 3u, kValidPtr, kValidPtr, kValidPtr);
+  }
+  {
+    // Don't visit zeroes.
+    TestScanLoop sl(SimdSupport::kNEON);
+    TestOnRangeWithAlignment<16>(sl, 1u, kInvalidPtr, kValidPtr, kZeroPtr);
+  }
+}
+#endif  // PA_CONFIG(STARSCAN_NEON_SUPPORTED)
+
+}  // namespace partition_alloc::internal
+
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/snapshot.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/snapshot.cc
new file mode 100644
index 0000000..311aee4
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/snapshot.cc
@@ -0,0 +1,48 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/snapshot.h"
+
+#include <memory>
+#include <mutex>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_internal.h"
+
+namespace partition_alloc::internal {
+
+std::unique_ptr<StarScanSnapshot> StarScanSnapshot::Create(
+    const PCScanInternal& pcscan) {
+  // Create unique_ptr object to avoid presubmit error.
+  std::unique_ptr<StarScanSnapshot> snapshot(new StarScanSnapshot(pcscan));
+  return snapshot;
+}
+
+StarScanSnapshot::StarScanSnapshot(const PCScanInternal& pcscan) {
+  PA_DCHECK(pcscan.is_initialized());
+  std::lock_guard<std::mutex> lock(pcscan.roots_mutex_);
+
+  for (const auto& root : pcscan.scannable_roots()) {
+    const auto& super_pages = root.second;
+    clear_worklist_.Push(super_pages.begin(), super_pages.end());
+    scan_worklist_.Push(super_pages.begin(), super_pages.end());
+    sweep_worklist_.Push(super_pages.begin(), super_pages.end());
+    if (pcscan.WriteProtectionEnabled()) {
+      unprotect_worklist_.Push(super_pages.begin(), super_pages.end());
+    }
+  }
+
+  for (const auto& root : pcscan.nonscannable_roots()) {
+    const auto& super_pages = root.second;
+    clear_worklist_.Push(super_pages.begin(), super_pages.end());
+    sweep_worklist_.Push(super_pages.begin(), super_pages.end());
+    if (pcscan.WriteProtectionEnabled()) {
+      unprotect_worklist_.Push(super_pages.begin(), super_pages.end());
+    }
+  }
+}
+
+StarScanSnapshot::~StarScanSnapshot() = default;
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/snapshot.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/snapshot.h
new file mode 100644
index 0000000..de9bcdc
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/snapshot.h
@@ -0,0 +1,94 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_SNAPSHOT_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_SNAPSHOT_H_
+
+#include <memory>
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan_internal.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/raceful_worklist.h"
+
+namespace partition_alloc::internal {
+
+class StarScanSnapshot final : public AllocatedOnPCScanMetadataPartition {
+ public:
+  using SuperPageBase = uintptr_t;
+  using SuperPagesWorklist = RacefulWorklist<SuperPageBase>;
+
+  class ViewBase {
+   public:
+    template <typename Function>
+    void VisitConcurrently(Function);
+
+    template <typename Function>
+    void VisitNonConcurrently(Function);
+
+   protected:
+    explicit ViewBase(SuperPagesWorklist& worklist) : worklist_(worklist) {}
+
+   private:
+    SuperPagesWorklist& worklist_;
+  };
+
+  class ClearingView : public ViewBase {
+   public:
+    inline explicit ClearingView(StarScanSnapshot& snapshot);
+  };
+  class ScanningView : public ViewBase {
+   public:
+    inline explicit ScanningView(StarScanSnapshot& snapshot);
+  };
+  class SweepingView : public ViewBase {
+   public:
+    inline explicit SweepingView(StarScanSnapshot& snapshot);
+  };
+  class UnprotectingView : public ViewBase {
+   public:
+    inline explicit UnprotectingView(StarScanSnapshot& snapshot);
+  };
+
+  static std::unique_ptr<StarScanSnapshot> Create(const PCScanInternal&);
+
+  StarScanSnapshot(const StarScanSnapshot&) = delete;
+  StarScanSnapshot& operator=(const StarScanSnapshot&) = delete;
+
+  ~StarScanSnapshot();
+
+ private:
+  explicit StarScanSnapshot(const PCScanInternal&);
+
+  SuperPagesWorklist clear_worklist_;
+  SuperPagesWorklist scan_worklist_;
+  SuperPagesWorklist unprotect_worklist_;
+  SuperPagesWorklist sweep_worklist_;
+};
+
+template <typename Function>
+void StarScanSnapshot::ViewBase::VisitConcurrently(Function f) {
+  SuperPagesWorklist::RandomizedView view(worklist_);
+  view.Visit(std::move(f));
+}
+
+template <typename Function>
+void StarScanSnapshot::ViewBase::VisitNonConcurrently(Function f) {
+  worklist_.VisitNonConcurrently(std::move(f));
+}
+
+StarScanSnapshot::ClearingView::ClearingView(StarScanSnapshot& snapshot)
+    : StarScanSnapshot::ViewBase(snapshot.clear_worklist_) {}
+
+StarScanSnapshot::ScanningView::ScanningView(StarScanSnapshot& snapshot)
+    : StarScanSnapshot::ViewBase(snapshot.scan_worklist_) {}
+
+StarScanSnapshot::SweepingView::SweepingView(StarScanSnapshot& snapshot)
+    : StarScanSnapshot::ViewBase(snapshot.sweep_worklist_) {}
+
+StarScanSnapshot::UnprotectingView::UnprotectingView(StarScanSnapshot& snapshot)
+    : StarScanSnapshot::ViewBase(snapshot.unprotect_worklist_) {}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_SNAPSHOT_H_
diff --git a/base/allocator/partition_allocator/starscan/stack/asm/arm/push_registers_asm.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/asm/arm/push_registers_asm.cc
similarity index 100%
rename from base/allocator/partition_allocator/starscan/stack/asm/arm/push_registers_asm.cc
rename to base/allocator/partition_allocator/src/partition_alloc/starscan/stack/asm/arm/push_registers_asm.cc
diff --git a/base/allocator/partition_allocator/starscan/stack/asm/arm64/push_registers_asm.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/asm/arm64/push_registers_asm.cc
similarity index 100%
rename from base/allocator/partition_allocator/starscan/stack/asm/arm64/push_registers_asm.cc
rename to base/allocator/partition_allocator/src/partition_alloc/starscan/stack/asm/arm64/push_registers_asm.cc
diff --git a/base/allocator/partition_allocator/starscan/stack/asm/riscv64/push_registers_asm.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/asm/riscv64/push_registers_asm.cc
similarity index 100%
rename from base/allocator/partition_allocator/starscan/stack/asm/riscv64/push_registers_asm.cc
rename to base/allocator/partition_allocator/src/partition_alloc/starscan/stack/asm/riscv64/push_registers_asm.cc
diff --git a/base/allocator/partition_allocator/starscan/stack/asm/x64/push_registers_asm.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/asm/x64/push_registers_asm.cc
similarity index 100%
rename from base/allocator/partition_allocator/starscan/stack/asm/x64/push_registers_asm.cc
rename to base/allocator/partition_allocator/src/partition_alloc/starscan/stack/asm/x64/push_registers_asm.cc
diff --git a/base/allocator/partition_allocator/starscan/stack/asm/x86/push_registers_asm.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/asm/x86/push_registers_asm.cc
similarity index 100%
rename from base/allocator/partition_allocator/starscan/stack/asm/x86/push_registers_asm.cc
rename to base/allocator/partition_allocator/src/partition_alloc/starscan/stack/asm/x86/push_registers_asm.cc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.cc
new file mode 100644
index 0000000..ec6ecbb
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.cc
@@ -0,0 +1,147 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h"
+
+#include <cstdint>
+#include <limits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#else
+#include <pthread.h>
+#endif
+
+#if defined(LIBC_GLIBC)
+extern "C" void* __libc_stack_end;
+#endif
+
+namespace partition_alloc::internal {
+
+#if BUILDFLAG(IS_WIN)
+
+void* GetStackTop() {
+#if defined(ARCH_CPU_X86_64)
+  return reinterpret_cast<void*>(
+      reinterpret_cast<NT_TIB64*>(NtCurrentTeb())->StackBase);
+#elif defined(ARCH_CPU_32_BITS)
+  return reinterpret_cast<void*>(
+      reinterpret_cast<NT_TIB*>(NtCurrentTeb())->StackBase);
+#elif defined(ARCH_CPU_ARM64)
+  // Windows 8 and later, see
+  // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getcurrentthreadstacklimits
+  ULONG_PTR lowLimit, highLimit;
+  ::GetCurrentThreadStackLimits(&lowLimit, &highLimit);
+  return reinterpret_cast<void*>(highLimit);
+#else
+#error "Unsupported GetStackStart"
+#endif
+}
+
+#elif BUILDFLAG(IS_APPLE)
+
+void* GetStackTop() {
+  return pthread_get_stackaddr_np(pthread_self());
+}
+
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+
+void* GetStackTop() {
+  pthread_attr_t attr;
+  int error = pthread_getattr_np(pthread_self(), &attr);
+  if (!error) {
+    void* base;
+    size_t size;
+    error = pthread_attr_getstack(&attr, &base, &size);
+    PA_CHECK(!error);
+    pthread_attr_destroy(&attr);
+    return reinterpret_cast<uint8_t*>(base) + size;
+  }
+
+#if defined(LIBC_GLIBC)
+  // pthread_getattr_np can fail for the main thread. In this case
+  // just like NaCl we rely on the __libc_stack_end to give us
+  // the start of the stack.
+  // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
+  return __libc_stack_end;
+#else
+  return nullptr;
+#endif  // defined(LIBC_GLIBC)
+}
+
+#else  // BUILDFLAG(IS_WIN)
+#error "Unsupported GetStackTop"
+#endif  // BUILDFLAG(IS_WIN)
+
+using IterateStackCallback = void (*)(const Stack*, StackVisitor*, uintptr_t*);
+extern "C" void PAPushAllRegistersAndIterateStack(const Stack*,
+                                                  StackVisitor*,
+                                                  IterateStackCallback);
+
+Stack::Stack(void* stack_top) : stack_top_(stack_top) {
+  PA_DCHECK(stack_top);
+}
+
+PA_NOINLINE uintptr_t* GetStackPointer() {
+  return reinterpret_cast<uintptr_t*>(__builtin_frame_address(0));
+}
+
+namespace {
+
+[[maybe_unused]] void IterateSafeStackIfNecessary(StackVisitor* visitor) {
+#if defined(__has_feature)
+#if __has_feature(safe_stack)
+  // Source:
+  // https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/safestack/safestack.cpp
+  constexpr size_t kSafeStackAlignmentBytes = 16;
+  void* stack_ptr = __builtin___get_unsafe_stack_ptr();
+  void* stack_top = __builtin___get_unsafe_stack_top();
+  PA_CHECK(stack_top > stack_ptr);
+  PA_CHECK(0u == (reinterpret_cast<uintptr_t>(stack_ptr) &
+                  (kSafeStackAlignmentBytes - 1)));
+  PA_CHECK(0u == (reinterpret_cast<uintptr_t>(stack_top) &
+                  (kSafeStackAlignmentBytes - 1)));
+  visitor->VisitStack(reinterpret_cast<uintptr_t*>(stack_ptr),
+                      reinterpret_cast<uintptr_t*>(stack_top));
+#endif  // __has_feature(safe_stack)
+#endif  // defined(__has_feature)
+}
+
+// Called by the trampoline that pushes registers on the stack. This method
+// should never be inlined to ensure that a possible redzone cannot contain
+// any data that needs to be scanned.
+// No ASAN support as method accesses redzones while walking the stack.
+[[maybe_unused]] PA_NOINLINE PA_NO_SANITIZE("address") void IteratePointersImpl(
+    const Stack* stack,
+    StackVisitor* visitor,
+    uintptr_t* stack_ptr) {
+  PA_DCHECK(stack);
+  PA_DCHECK(visitor);
+  PA_CHECK(nullptr != stack->stack_top());
+  // All supported platforms should have their stack aligned to at least
+  // sizeof(void*).
+  constexpr size_t kMinStackAlignment = sizeof(void*);
+  PA_CHECK(0u ==
+           (reinterpret_cast<uintptr_t>(stack_ptr) & (kMinStackAlignment - 1)));
+  visitor->VisitStack(stack_ptr,
+                      reinterpret_cast<uintptr_t*>(stack->stack_top()));
+}
+
+}  // namespace
+
+void Stack::IteratePointers(StackVisitor* visitor) const {
+#if BUILDFLAG(PCSCAN_STACK_SUPPORTED)
+  PAPushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
+  // No need to deal with callee-saved registers as they will be kept alive by
+  // the regular conservative stack iteration.
+  IterateSafeStackIfNecessary(visitor);
+#endif  // BUILDFLAG(PCSCAN_STACK_SUPPORTED)
+}
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h
new file mode 100644
index 0000000..74c195e
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h
@@ -0,0 +1,48 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STACK_STACK_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STACK_STACK_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+
+namespace partition_alloc::internal {
+
+// Returns the current stack pointer.
+// TODO(bikineev,1202644): Remove this once base/stack_util.h lands.
+PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t* GetStackPointer();
+// Returns the top of the stack using system API.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) void* GetStackTop();
+
+// Interface for stack visitation.
+class StackVisitor {
+ public:
+  virtual void VisitStack(uintptr_t* stack_ptr, uintptr_t* stack_top) = 0;
+};
+
+// Abstraction over the stack. Supports handling of:
+// - native stack;
+// - SafeStack: https://releases.llvm.org/10.0.0/tools/clang/docs/SafeStack.html
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) Stack final {
+ public:
+  // Sets start of the stack.
+  explicit Stack(void* stack_top);
+
+  // Word-aligned iteration of the stack. Flushes callee saved registers and
+  // passes the range of the stack on to |visitor|.
+  void IteratePointers(StackVisitor* visitor) const;
+
+  // Returns the top of the stack.
+  void* stack_top() const { return stack_top_; }
+
+ private:
+  void* stack_top_;
+};
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STACK_STACK_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack_unittest.cc
new file mode 100644
index 0000000..ecbcd66
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack_unittest.cc
@@ -0,0 +1,350 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h"
+
+#include <memory>
+#include <ostream>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+#if BUILDFLAG(IS_LINUX) && (defined(ARCH_CPU_X86) || defined(ARCH_CPU_X86_64))
+#include <xmmintrin.h>
+#endif
+
+namespace partition_alloc::internal {
+
+namespace {
+
+class PartitionAllocStackTest : public ::testing::Test {
+ protected:
+  PartitionAllocStackTest() : stack_(std::make_unique<Stack>(GetStackTop())) {}
+
+  Stack* GetStack() const { return stack_.get(); }
+
+ private:
+  std::unique_ptr<Stack> stack_;
+};
+
+class StackScanner final : public StackVisitor {
+ public:
+  struct Container {
+    std::unique_ptr<int> value;
+  };
+
+  StackScanner() : container_(std::make_unique<Container>()) {
+    container_->value = std::make_unique<int>();
+  }
+
+  void VisitStack(uintptr_t* stack_ptr, uintptr_t* stack_top) final {
+    for (; stack_ptr != stack_top; ++stack_ptr) {
+      if (*stack_ptr == reinterpret_cast<uintptr_t>(container_->value.get())) {
+        found_ = true;
+      }
+    }
+  }
+
+  void Reset() { found_ = false; }
+  bool found() const { return found_; }
+  int* needle() const { return container_->value.get(); }
+
+ private:
+  std::unique_ptr<Container> container_;
+  bool found_ = false;
+};
+
+}  // namespace
+
+TEST_F(PartitionAllocStackTest, IteratePointersFindsOnStackValue) {
+  auto scanner = std::make_unique<StackScanner>();
+
+  // No check that the needle is initially not found as on some platforms it
+  // may be part of temporaries after setting it up through StackScanner.
+  {
+    [[maybe_unused]] int* volatile tmp = scanner->needle();
+    GetStack()->IteratePointers(scanner.get());
+    EXPECT_TRUE(scanner->found());
+  }
+}
+
+TEST_F(PartitionAllocStackTest,
+       IteratePointersFindsOnStackValuePotentiallyUnaligned) {
+  auto scanner = std::make_unique<StackScanner>();
+
+  // No check that the needle is initially not found as on some platforms it
+  // may be part of  temporaries after setting it up through StackScanner.
+  {
+    [[maybe_unused]] char a = 'c';
+    [[maybe_unused]] int* volatile tmp = scanner->needle();
+    GetStack()->IteratePointers(scanner.get());
+    EXPECT_TRUE(scanner->found());
+  }
+}
+
+namespace {
+
+// Prevent inlining as that would allow the compiler to prove that the parameter
+// must not actually be materialized.
+//
+// Parameter positiosn are explicit to test various calling conventions.
+PA_NOINLINE void* RecursivelyPassOnParameterImpl(void* p1,
+                                                 void* p2,
+                                                 void* p3,
+                                                 void* p4,
+                                                 void* p5,
+                                                 void* p6,
+                                                 void* p7,
+                                                 void* p8,
+                                                 Stack* stack,
+                                                 StackVisitor* visitor) {
+  if (p1) {
+    return RecursivelyPassOnParameterImpl(nullptr, p1, nullptr, nullptr,
+                                          nullptr, nullptr, nullptr, nullptr,
+                                          stack, visitor);
+  } else if (p2) {
+    return RecursivelyPassOnParameterImpl(nullptr, nullptr, p2, nullptr,
+                                          nullptr, nullptr, nullptr, nullptr,
+                                          stack, visitor);
+  } else if (p3) {
+    return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, p3,
+                                          nullptr, nullptr, nullptr, nullptr,
+                                          stack, visitor);
+  } else if (p4) {
+    return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+                                          p4, nullptr, nullptr, nullptr, stack,
+                                          visitor);
+  } else if (p5) {
+    return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+                                          nullptr, p5, nullptr, nullptr, stack,
+                                          visitor);
+  } else if (p6) {
+    return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+                                          nullptr, nullptr, p6, nullptr, stack,
+                                          visitor);
+  } else if (p7) {
+    return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+                                          nullptr, nullptr, nullptr, p7, stack,
+                                          visitor);
+  } else if (p8) {
+    stack->IteratePointers(visitor);
+    return p8;
+  }
+  return nullptr;
+}
+
+PA_NOINLINE void* RecursivelyPassOnParameter(size_t num,
+                                             void* parameter,
+                                             Stack* stack,
+                                             StackVisitor* visitor) {
+  switch (num) {
+    case 0:
+      stack->IteratePointers(visitor);
+      return parameter;
+    case 1:
+      return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+                                            nullptr, nullptr, nullptr,
+                                            parameter, stack, visitor);
+    case 2:
+      return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+                                            nullptr, nullptr, parameter,
+                                            nullptr, stack, visitor);
+    case 3:
+      return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+                                            nullptr, parameter, nullptr,
+                                            nullptr, stack, visitor);
+    case 4:
+      return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+                                            parameter, nullptr, nullptr,
+                                            nullptr, stack, visitor);
+    case 5:
+      return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr,
+                                            parameter, nullptr, nullptr,
+                                            nullptr, nullptr, stack, visitor);
+    case 6:
+      return RecursivelyPassOnParameterImpl(nullptr, nullptr, parameter,
+                                            nullptr, nullptr, nullptr, nullptr,
+                                            nullptr, stack, visitor);
+    case 7:
+      return RecursivelyPassOnParameterImpl(nullptr, parameter, nullptr,
+                                            nullptr, nullptr, nullptr, nullptr,
+                                            nullptr, stack, visitor);
+    case 8:
+      return RecursivelyPassOnParameterImpl(parameter, nullptr, nullptr,
+                                            nullptr, nullptr, nullptr, nullptr,
+                                            nullptr, stack, visitor);
+    default:
+      __builtin_unreachable();
+  }
+  __builtin_unreachable();
+}
+
+}  // namespace
+
+TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting0) {
+  auto scanner = std::make_unique<StackScanner>();
+  void* needle = RecursivelyPassOnParameter(0, scanner->needle(), GetStack(),
+                                            scanner.get());
+  EXPECT_EQ(scanner->needle(), needle);
+  EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting1) {
+  auto scanner = std::make_unique<StackScanner>();
+  void* needle = RecursivelyPassOnParameter(1, scanner->needle(), GetStack(),
+                                            scanner.get());
+  EXPECT_EQ(scanner->needle(), needle);
+  EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting2) {
+  auto scanner = std::make_unique<StackScanner>();
+  void* needle = RecursivelyPassOnParameter(2, scanner->needle(), GetStack(),
+                                            scanner.get());
+  EXPECT_EQ(scanner->needle(), needle);
+  EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting3) {
+  auto scanner = std::make_unique<StackScanner>();
+  void* needle = RecursivelyPassOnParameter(3, scanner->needle(), GetStack(),
+                                            scanner.get());
+  EXPECT_EQ(scanner->needle(), needle);
+  EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting4) {
+  auto scanner = std::make_unique<StackScanner>();
+  void* needle = RecursivelyPassOnParameter(4, scanner->needle(), GetStack(),
+                                            scanner.get());
+  EXPECT_EQ(scanner->needle(), needle);
+  EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting5) {
+  auto scanner = std::make_unique<StackScanner>();
+  void* needle = RecursivelyPassOnParameter(5, scanner->needle(), GetStack(),
+                                            scanner.get());
+  EXPECT_EQ(scanner->needle(), needle);
+  EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting6) {
+  auto scanner = std::make_unique<StackScanner>();
+  void* needle = RecursivelyPassOnParameter(6, scanner->needle(), GetStack(),
+                                            scanner.get());
+  EXPECT_EQ(scanner->needle(), needle);
+  EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting7) {
+  auto scanner = std::make_unique<StackScanner>();
+  void* needle = RecursivelyPassOnParameter(7, scanner->needle(), GetStack(),
+                                            scanner.get());
+  EXPECT_EQ(scanner->needle(), needle);
+  EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting8) {
+  auto scanner = std::make_unique<StackScanner>();
+  void* needle = RecursivelyPassOnParameter(8, scanner->needle(), GetStack(),
+                                            scanner.get());
+  EXPECT_EQ(scanner->needle(), needle);
+  EXPECT_TRUE(scanner->found());
+}
+
+// The following test uses inline assembly and has been checked to work on clang
+// to verify that the stack-scanning trampoline pushes callee-saved registers.
+//
+// The test uses a macro loop as asm() can only be passed string literals.
+#if defined(__clang__) && defined(ARCH_CPU_X86_64) && !BUILDFLAG(IS_WIN)
+
+// Excluded from test: rbp
+#define FOR_ALL_CALLEE_SAVED_REGS(V) \
+  V(rbx)                             \
+  V(r12)                             \
+  V(r13)                             \
+  V(r14)                             \
+  V(r15)
+
+namespace {
+
+extern "C" void IteratePointersNoMangling(Stack* stack, StackVisitor* visitor) {
+  stack->IteratePointers(visitor);
+}
+
+#define DEFINE_MOVE_INTO(reg)                                         \
+  PA_NOINLINE void MoveInto##reg(Stack* local_stack,                  \
+                                 StackScanner* local_scanner) {       \
+    asm volatile("   mov %0, %%" #reg                                 \
+                 "\n mov %1, %%rdi"                                   \
+                 "\n mov %2, %%rsi"                                   \
+                 "\n call %P3"                                        \
+                 "\n mov $0, %%" #reg                                 \
+                 :                                                    \
+                 : "r"(local_scanner->needle()), "r"(local_stack),    \
+                   "r"(local_scanner), "i"(IteratePointersNoMangling) \
+                 : "memory", #reg, "rdi", "rsi", "cc");               \
+  }
+
+FOR_ALL_CALLEE_SAVED_REGS(DEFINE_MOVE_INTO)
+
+}  // namespace
+
+TEST_F(PartitionAllocStackTest, IteratePointersFindsCalleeSavedRegisters) {
+  auto scanner = std::make_unique<StackScanner>();
+
+  // No check that the needle is initially not found as on some platforms it
+  // may be part of  temporaries after setting it up through StackScanner.
+
+// First, clear all callee-saved registers.
+#define CLEAR_REGISTER(reg) asm("mov $0, %%" #reg : : : #reg);
+
+  FOR_ALL_CALLEE_SAVED_REGS(CLEAR_REGISTER)
+#undef CLEAR_REGISTER
+
+  // Keep local raw pointers to keep instruction sequences small below.
+  auto* local_stack = GetStack();
+  auto* local_scanner = scanner.get();
+
+// Moves |local_scanner->needle()| into a callee-saved register, leaving the
+// callee-saved register as the only register referencing the needle.
+// (Ignoring implementation-dependent dirty registers/stack.)
+#define KEEP_ALIVE_FROM_CALLEE_SAVED(reg)                                 \
+  local_scanner->Reset();                                                 \
+  MoveInto##reg(local_stack, local_scanner);                              \
+  EXPECT_TRUE(local_scanner->found())                                     \
+      << "pointer in callee-saved register not found. register: " << #reg \
+      << std::endl;
+
+  FOR_ALL_CALLEE_SAVED_REGS(KEEP_ALIVE_FROM_CALLEE_SAVED)
+#undef KEEP_ALIVE_FROM_CALLEE_SAVED
+#undef FOR_ALL_CALLEE_SAVED_REGS
+}
+
+#endif  // defined(__clang__) && defined(ARCH_CPU_X86_64) && !BUILDFLAG(IS_WIN)
+
+#if BUILDFLAG(IS_LINUX) && (defined(ARCH_CPU_X86) || defined(ARCH_CPU_X86_64))
+class CheckStackAlignmentVisitor final : public StackVisitor {
+ public:
+  void VisitStack(uintptr_t*, uintptr_t*) final {
+    // Check that the stack doesn't get misaligned by asm trampolines.
+    float f[4] = {0.};
+    [[maybe_unused]] volatile auto xmm = ::_mm_load_ps(f);
+  }
+};
+
+TEST_F(PartitionAllocStackTest, StackAlignment) {
+  auto checker = std::make_unique<CheckStackAlignmentVisitor>();
+  GetStack()->IteratePointers(checker.get());
+}
+#endif  // BUILDFLAG(IS_LINUX) && (defined(ARCH_CPU_X86) ||
+        // defined(ARCH_CPU_X86_64))
+
+}  // namespace partition_alloc::internal
+
+#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/starscan_fwd.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/starscan_fwd.h
new file mode 100644
index 0000000..023dd08
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/starscan_fwd.h
@@ -0,0 +1,30 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STARSCAN_FWD_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STARSCAN_FWD_H_
+
+#include <cstdint>
+
+namespace partition_alloc::internal {
+
+// Defines what thread executes a StarScan task.
+enum class Context {
+  // For tasks executed from mutator threads (safepoints).
+  kMutator,
+  // For concurrent scanner tasks.
+  kScanner
+};
+
+// Defines ISA extension for scanning.
+enum class SimdSupport : uint8_t {
+  kUnvectorized,
+  kSSE41,
+  kAVX2,
+  kNEON,
+};
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STARSCAN_FWD_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/state_bitmap.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/state_bitmap.h
new file mode 100644
index 0000000..bdefe3f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/state_bitmap.h
@@ -0,0 +1,491 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STATE_BITMAP_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STATE_BITMAP_H_
+
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+
+#include <algorithm>
+#include <array>
+#include <atomic>
+#include <tuple>
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/bits.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+
+namespace partition_alloc::internal {
+
+// Bitmap which tracks allocation states. An allocation can be in one of 3
+// states:
+// - freed (00),
+// - allocated (11),
+// - quarantined (01 or 10, depending on the *Scan epoch).
+//
+// The state machine of allocation states:
+//         +-------------+                +-------------+
+//         |             |    malloc()    |             |
+//         |    Freed    +--------------->|  Allocated  |
+//         |    (00)     |    (or 11)     |    (11)     |
+//         |             |                |             |
+//         +-------------+                +------+------+
+//                ^                              |
+//                |                              |
+//    real_free() | (and 00)              free() | (and 01(10))
+//                |                              |
+//                |       +-------------+        |
+//                |       |             |        |
+//                +-------+ Quarantined |<-------+
+//                        |   (01,10)   |
+//                        |             |
+//                        +-------------+
+//                         ^           |
+//                         |  mark()   |
+//                         +-----------+
+//                           (xor 11)
+//
+// The bitmap can be safely accessed from multiple threads, but this doesn't
+// imply visibility on the data (i.e. no ordering guaranties, since relaxed
+// atomics are used underneath). The bitmap itself must be created inside a
+// page, size and alignment of which are specified as template arguments
+// |PageSize| and |PageAlignment|. |AllocationAlignment| specifies the minimal
+// alignment of objects that are allocated inside a page (serves as the
+// granularity in the bitmap).
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+class StateBitmap final {
+  enum class State : uint8_t {
+    kFreed = 0b00,
+    kQuarantined1 = 0b01,
+    kQuarantined2 = 0b10,
+    kAlloced = 0b11,
+    kNumOfStates = 4,
+  };
+
+  using CellType = uintptr_t;
+  static constexpr size_t kBitsPerCell = sizeof(CellType) * CHAR_BIT;
+  static constexpr size_t kBitsNeededForAllocation =
+      base::bits::Log2Floor(static_cast<size_t>(State::kNumOfStates));
+  static constexpr CellType kStateMask = (1 << kBitsNeededForAllocation) - 1;
+
+  static constexpr size_t kBitmapSize =
+      (PageSize + ((kBitsPerCell * AllocationAlignment) - 1)) /
+      (kBitsPerCell * AllocationAlignment) * kBitsNeededForAllocation;
+  static constexpr size_t kPageOffsetMask = PageAlignment - 1;
+  static constexpr size_t kPageBaseMask = ~kPageOffsetMask;
+
+ public:
+  using Epoch = size_t;
+
+  static constexpr size_t kPageSize = PageSize;
+  static constexpr size_t kPageAlignment = PageAlignment;
+  static constexpr size_t kAllocationAlignment = AllocationAlignment;
+  static constexpr size_t kMaxEntries =
+      (kBitmapSize / kBitsNeededForAllocation) * kBitsPerCell;
+
+  inline StateBitmap();
+
+  // Sets the bits corresponding to |address| as allocated.
+  PA_ALWAYS_INLINE void Allocate(uintptr_t address);
+
+  // Sets the bits corresponding to |address| as quarantined. Must be called
+  // only once, in which case returns |true|. Otherwise, if the object was
+  // already quarantined or freed before, returns |false|.
+  PA_ALWAYS_INLINE bool Quarantine(uintptr_t address, Epoch epoch);
+
+  // Marks ("promotes") quarantined object. Returns |true| on success, otherwise
+  // |false| if the object was marked before.
+  PA_ALWAYS_INLINE bool MarkQuarantinedAsReachable(uintptr_t address,
+                                                   Epoch epoch);
+
+  // Sets the bits corresponding to |address| as freed.
+  PA_ALWAYS_INLINE void Free(uintptr_t address);
+
+  // Getters that check object state.
+  PA_ALWAYS_INLINE bool IsAllocated(uintptr_t address) const;
+  PA_ALWAYS_INLINE bool IsQuarantined(uintptr_t address) const;
+  PA_ALWAYS_INLINE bool IsFreed(uintptr_t address) const;
+
+  // Iterate objects depending on their state.
+  //
+  // The callback is of type
+  //   void(uintptr_t object_start)
+  template <typename Callback>
+  inline void IterateAllocated(Callback) const;
+  // The callback is of type
+  //   void(uintptr_t object_start)
+  template <typename Callback, decltype(std::declval<Callback>()(0), 0) = 0>
+  inline void IterateQuarantined(Callback) const;
+  // The callback is of type
+  //   void(uintptr_t object_start, bool is_marked)
+  template <typename Callback,
+            decltype(std::declval<Callback>()(0, true), 0) = 0>
+  inline void IterateQuarantined(size_t epoch, Callback) const;
+  // The callback is of type
+  //   void(uintptr_t object_start)
+  template <typename Callback>
+  inline void IterateUnmarkedQuarantined(size_t epoch, Callback) const;
+  // The callback is of type
+  //   void(uintptr_t object_start)
+  // The function is similar as above, but it also frees (clears) the iterated
+  // bits.
+  template <typename Callback>
+  inline void IterateUnmarkedQuarantinedAndFree(size_t epoch, Callback);
+
+  inline void Clear();
+
+ private:
+  std::atomic<CellType>& AsAtomicCell(size_t cell_index) {
+    return reinterpret_cast<std::atomic<CellType>&>(bitmap_[cell_index]);
+  }
+  const std::atomic<CellType>& AsAtomicCell(size_t cell_index) const {
+    return reinterpret_cast<const std::atomic<CellType>&>(bitmap_[cell_index]);
+  }
+
+  PA_ALWAYS_INLINE unsigned GetBits(uintptr_t address) const;
+
+  struct FilterQuarantine {
+    PA_ALWAYS_INLINE bool operator()(CellType cell) const;
+    const size_t epoch;
+  };
+
+  struct FilterUnmarkedQuarantine {
+    PA_ALWAYS_INLINE bool operator()(CellType cell) const;
+    const size_t epoch;
+  };
+
+  struct FilterAllocated {
+    PA_ALWAYS_INLINE bool operator()(CellType cell) const;
+    const size_t epoch;
+  };
+
+  // Simply calls the callback.
+  struct SimpleCallbackForwarder {
+    PA_ALWAYS_INLINE explicit SimpleCallbackForwarder(size_t epoch) {}
+
+    template <typename Callback>
+    PA_ALWAYS_INLINE void operator()(Callback,
+                                     uintptr_t pointer,
+                                     CellType bits) const;
+  };
+
+  // Calls the callback and passes a bool argument, indicating whether a
+  // quarantine object is marked or not.
+  struct QuarantineCallbackForwarder {
+    PA_ALWAYS_INLINE explicit QuarantineCallbackForwarder(size_t epoch)
+        : is_unmarked{epoch} {}
+
+    template <typename Callback>
+    PA_ALWAYS_INLINE void operator()(Callback,
+                                     uintptr_t pointer,
+                                     CellType bits) const;
+    FilterUnmarkedQuarantine is_unmarked;
+  };
+
+  template <typename Filter,
+            typename CallbackForwarder,
+            typename Callback,
+            bool Clear>
+  inline void IterateImpl(size_t epoch, Callback);
+
+  PA_ALWAYS_INLINE CellType LoadCell(size_t cell_index) const;
+  PA_ALWAYS_INLINE static constexpr std::pair<size_t, size_t>
+      AllocationIndexAndBit(uintptr_t);
+
+  std::array<CellType, kBitmapSize> bitmap_;
+};
+
+// The constructor can be omitted, but the Chromium's clang plugin wrongly
+// warns that the type is not trivially constructible.
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+inline StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
+    StateBitmap() = default;
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+PA_ALWAYS_INLINE void
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::Allocate(
+    uintptr_t address) {
+  PA_SCAN_DCHECK(IsFreed(address));
+  auto [cell_index, object_bit] = AllocationIndexAndBit(address);
+  const CellType mask = static_cast<CellType>(State::kAlloced) << object_bit;
+  auto& cell = AsAtomicCell(cell_index);
+  cell.fetch_or(mask, std::memory_order_relaxed);
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+PA_ALWAYS_INLINE bool
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::Quarantine(
+    uintptr_t address,
+    Epoch epoch) {
+  // *Scan is enabled at runtime, which means that we can quarantine allocation,
+  // that was previously not recorded in the bitmap. Hence, we can't reliably
+  // check transition from kAlloced to kQuarantined.
+  static_assert((~static_cast<CellType>(State::kQuarantined1) & kStateMask) ==
+                    (static_cast<CellType>(State::kQuarantined2) & kStateMask),
+                "kQuarantined1 must be inverted kQuarantined2");
+  const State quarantine_state =
+      epoch & 0b1 ? State::kQuarantined1 : State::kQuarantined2;
+  auto [cell_index, object_bit] = AllocationIndexAndBit(address);
+  const CellType mask =
+      ~(static_cast<CellType>(quarantine_state) << object_bit);
+  auto& cell = AsAtomicCell(cell_index);
+  const CellType cell_before = cell.fetch_and(mask, std::memory_order_relaxed);
+  // Check if the previous state was also quarantined.
+  return __builtin_popcount(static_cast<unsigned>((cell_before >> object_bit) &
+                                                  kStateMask)) != 1;
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+PA_ALWAYS_INLINE bool
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
+    MarkQuarantinedAsReachable(uintptr_t address, Epoch epoch) {
+  static_assert((~static_cast<CellType>(State::kQuarantined1) & kStateMask) ==
+                    (static_cast<CellType>(State::kQuarantined2) & kStateMask),
+                "kQuarantined1 must be inverted kQuarantined2");
+  const State quarantine_state_old =
+      epoch & 0b1 ? State::kQuarantined2 : State::kQuarantined1;
+  auto [cell_index, object_bit] = AllocationIndexAndBit(address);
+  const CellType clear_mask =
+      ~(static_cast<CellType>(State::kAlloced) << object_bit);
+  const CellType set_mask_old = static_cast<CellType>(quarantine_state_old)
+                                << object_bit;
+  const CellType xor_mask = static_cast<CellType>(0b11) << object_bit;
+  auto& cell = AsAtomicCell(cell_index);
+  CellType expected =
+      (cell.load(std::memory_order_relaxed) & clear_mask) | set_mask_old;
+  CellType desired = expected ^ xor_mask;
+  while (PA_UNLIKELY(!cell.compare_exchange_weak(expected, desired,
+                                                 std::memory_order_relaxed,
+                                                 std::memory_order_relaxed))) {
+    // First check if the object was already marked before or in parallel.
+    if ((expected & set_mask_old) == 0) {
+      // Check that the bits can't be in any state other than
+      // marked-quarantined.
+      PA_SCAN_DCHECK(
+          ((expected >> object_bit) & kStateMask) ==
+          (~static_cast<CellType>(quarantine_state_old) & kStateMask));
+      return false;
+    }
+    // Otherwise, some other bits in the cell were concurrently changed. Update
+    // desired and retry.
+    desired = expected ^ xor_mask;
+  }
+  return true;
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+PA_ALWAYS_INLINE void
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::Free(
+    uintptr_t address) {
+  // *Scan is enabled at runtime, which means that we can free an allocation,
+  // that was previously not recorded as quarantined in the bitmap. Hence, we
+  // can't reliably check the transition from kQuarantined to kFreed.
+  static_assert((~static_cast<CellType>(State::kAlloced) & kStateMask) ==
+                    (static_cast<CellType>(State::kFreed) & kStateMask),
+                "kFreed must be inverted kAlloced");
+  auto [cell_index, object_bit] = AllocationIndexAndBit(address);
+  const CellType mask = ~(static_cast<CellType>(State::kAlloced) << object_bit);
+  auto& cell = AsAtomicCell(cell_index);
+  cell.fetch_and(mask, std::memory_order_relaxed);
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+PA_ALWAYS_INLINE bool
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IsAllocated(
+    uintptr_t address) const {
+  return GetBits(address) == static_cast<unsigned>(State::kAlloced);
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+PA_ALWAYS_INLINE bool
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IsQuarantined(
+    uintptr_t address) const {
+  // On x86 CPI of popcnt is the same as tzcnt, so we use it instead of tzcnt +
+  // inversion.
+  return __builtin_popcount(GetBits(address)) == 1;
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+PA_ALWAYS_INLINE bool
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IsFreed(
+    uintptr_t address) const {
+  return GetBits(address) == static_cast<unsigned>(State::kFreed);
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+PA_ALWAYS_INLINE
+    typename StateBitmap<PageSize, PageAlignment, AllocationAlignment>::CellType
+    StateBitmap<PageSize, PageAlignment, AllocationAlignment>::LoadCell(
+        size_t cell_index) const {
+  return AsAtomicCell(cell_index).load(std::memory_order_relaxed);
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+PA_ALWAYS_INLINE constexpr std::pair<size_t, size_t>
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
+    AllocationIndexAndBit(uintptr_t address) {
+  const uintptr_t offset_in_page = address & kPageOffsetMask;
+  const size_t allocation_number =
+      (offset_in_page / kAllocationAlignment) * kBitsNeededForAllocation;
+  const size_t cell_index = allocation_number / kBitsPerCell;
+  PA_SCAN_DCHECK(kBitmapSize > cell_index);
+  const size_t bit = allocation_number % kBitsPerCell;
+  return {cell_index, bit};
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+unsigned StateBitmap<PageSize, PageAlignment, AllocationAlignment>::GetBits(
+    uintptr_t address) const {
+  auto [cell_index, object_bit] = AllocationIndexAndBit(address);
+  return (LoadCell(cell_index) >> object_bit) & kStateMask;
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+bool StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
+    FilterQuarantine::operator()(CellType bits) const {
+  return __builtin_popcount(static_cast<unsigned>(bits)) == 1;
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+bool StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
+    FilterUnmarkedQuarantine::operator()(CellType bits) const {
+  // Truth table:
+  // epoch & 1 | bits | result
+  //     0     |  01  |   1
+  //     1     |  10  |   1
+  //     *     |  **  |   0
+  return bits - (epoch & 0b01) == 0b01;
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+bool StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
+    FilterAllocated::operator()(CellType bits) const {
+  return bits == 0b11;
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+template <typename Callback>
+PA_ALWAYS_INLINE void
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
+    SimpleCallbackForwarder::operator()(Callback callback,
+                                        uintptr_t pointer,
+                                        CellType bits) const {
+  callback(pointer);
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+template <typename Callback>
+PA_ALWAYS_INLINE void
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
+    QuarantineCallbackForwarder::operator()(Callback callback,
+                                            uintptr_t pointer,
+                                            CellType bits) const {
+  callback(pointer, !is_unmarked(bits));
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+template <typename Filter,
+          typename CallbackForwarder,
+          typename Callback,
+          bool Clear>
+inline void
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IterateImpl(
+    size_t epoch,
+    Callback callback) {
+  // The bitmap (|this|) is allocated inside the page with |kPageAlignment|.
+  Filter filter{epoch};
+  CallbackForwarder callback_forwarder{epoch};
+  const uintptr_t base = reinterpret_cast<uintptr_t>(this) & kPageBaseMask;
+  for (size_t cell_index = 0; cell_index < kBitmapSize; ++cell_index) {
+    CellType value = LoadCell(cell_index);
+    while (value) {
+      const size_t trailing_zeroes =
+          static_cast<size_t>(base::bits::CountTrailingZeroBits(value) & ~0b1);
+      const size_t clear_value_mask =
+          ~(static_cast<CellType>(kStateMask) << trailing_zeroes);
+      const CellType bits = (value >> trailing_zeroes) & kStateMask;
+      if (!filter(bits)) {
+        // Clear current object bit in temporary value to advance iteration.
+        value &= clear_value_mask;
+        continue;
+      }
+      const size_t object_number =
+          (cell_index * kBitsPerCell) + trailing_zeroes;
+      const uintptr_t object_address =
+          base +
+          (object_number * kAllocationAlignment / kBitsNeededForAllocation);
+
+      callback_forwarder(callback, object_address, bits);
+
+      if (Clear) {
+        // Clear the current bits.
+        AsAtomicCell(cell_index)
+            .fetch_and(clear_value_mask, std::memory_order_relaxed);
+      }
+
+      // Clear current object bit in temporary value to advance iteration.
+      value &= clear_value_mask;
+    }
+  }
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+template <typename Callback>
+inline void
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IterateAllocated(
+    Callback callback) const {
+  const_cast<StateBitmap*>(this)
+      ->IterateImpl<FilterAllocated, SimpleCallbackForwarder, Callback, false>(
+          0, std::move(callback));
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+template <typename Callback, decltype(std::declval<Callback>()(0), 0)>
+inline void
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IterateQuarantined(
+    Callback callback) const {
+  const_cast<StateBitmap*>(this)
+      ->IterateImpl<FilterQuarantine, SimpleCallbackForwarder, Callback, false>(
+          0, std::move(callback));
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+template <typename Callback, decltype(std::declval<Callback>()(0, true), 0)>
+inline void
+StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IterateQuarantined(
+    size_t epoch,
+    Callback callback) const {
+  const_cast<StateBitmap*>(this)
+      ->IterateImpl<FilterQuarantine, QuarantineCallbackForwarder, Callback,
+                    false>(epoch, std::move(callback));
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+template <typename Callback>
+inline void StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
+    IterateUnmarkedQuarantined(size_t epoch, Callback callback) const {
+  const_cast<StateBitmap*>(this)
+      ->IterateImpl<FilterUnmarkedQuarantine, SimpleCallbackForwarder, Callback,
+                    false>(epoch, std::move(callback));
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+template <typename Callback>
+inline void StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
+    IterateUnmarkedQuarantinedAndFree(size_t epoch, Callback callback) {
+  IterateImpl<FilterUnmarkedQuarantine, SimpleCallbackForwarder, Callback,
+              true>(epoch, std::move(callback));
+}
+
+template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
+void StateBitmap<PageSize, PageAlignment, AllocationAlignment>::Clear() {
+  std::fill(bitmap_.begin(), bitmap_.end(), '\0');
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STATE_BITMAP_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/state_bitmap_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/state_bitmap_unittest.cc
new file mode 100644
index 0000000..3878944
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/state_bitmap_unittest.cc
@@ -0,0 +1,346 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/state_bitmap.h"
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal {
+
+namespace {
+
+using TestBitmap = StateBitmap<kSuperPageSize, kSuperPageSize, kAlignment>;
+
+class PageWithBitmap final {
+ public:
+  PageWithBitmap()
+      : base_(AllocPages(kSuperPageSize,
+                         kSuperPageAlignment,
+                         PageAccessibilityConfiguration(
+                             PageAccessibilityConfiguration::kReadWrite),
+                         PageTag::kPartitionAlloc)),
+        bitmap_(new(reinterpret_cast<void*>(base_)) TestBitmap) {}
+
+  PageWithBitmap(const PageWithBitmap&) = delete;
+  PageWithBitmap& operator=(const PageWithBitmap&) = delete;
+
+  ~PageWithBitmap() { FreePages(base_, kSuperPageSize); }
+
+  TestBitmap& bitmap() const { return *bitmap_; }
+
+  void* base() const { return reinterpret_cast<void*>(base_); }
+  size_t size() const { return kSuperPageSize; }
+
+  uintptr_t base_;
+  TestBitmap* bitmap_;
+};
+
+class PartitionAllocStateBitmapTest : public ::testing::Test {
+ protected:
+  TestBitmap& bitmap() const { return page.bitmap(); }
+
+  void AllocateObject(size_t object_position) {
+    page.bitmap().Allocate(ObjectAddress(object_position));
+  }
+
+  void FreeObject(size_t object_position) {
+    page.bitmap().Free(ObjectAddress(object_position));
+  }
+
+  bool QuarantineObject(size_t object_position, size_t epoch) {
+    return page.bitmap().Quarantine(ObjectAddress(object_position), epoch);
+  }
+
+  bool MarkQuarantinedObject(size_t object_position, size_t epoch) {
+    return page.bitmap().MarkQuarantinedAsReachable(
+        ObjectAddress(object_position), epoch);
+  }
+
+  bool IsAllocated(size_t object_position) const {
+    return page.bitmap().IsAllocated(ObjectAddress(object_position));
+  }
+
+  bool IsQuarantined(size_t object_position) const {
+    return page.bitmap().IsQuarantined(ObjectAddress(object_position));
+  }
+
+  bool IsFreed(size_t object_position) const {
+    return page.bitmap().IsFreed(ObjectAddress(object_position));
+  }
+
+  void AssertAllocated(size_t object_position) const {
+    EXPECT_TRUE(IsAllocated(object_position));
+    EXPECT_FALSE(IsQuarantined(object_position));
+    EXPECT_FALSE(IsFreed(object_position));
+  }
+
+  void AssertFreed(size_t object_position) const {
+    EXPECT_FALSE(IsAllocated(object_position));
+    EXPECT_FALSE(IsQuarantined(object_position));
+    EXPECT_TRUE(IsFreed(object_position));
+  }
+
+  void AssertQuarantined(size_t object_position) const {
+    EXPECT_FALSE(IsAllocated(object_position));
+    EXPECT_TRUE(IsQuarantined(object_position));
+    EXPECT_FALSE(IsFreed(object_position));
+  }
+
+  size_t CountAllocated() const {
+    size_t count = 0;
+    bitmap().IterateAllocated([&count](uintptr_t) { count++; });
+    return count;
+  }
+
+  size_t CountQuarantined() const {
+    size_t count = 0;
+    bitmap().IterateQuarantined([&count](uintptr_t) { count++; });
+    return count;
+  }
+
+  bool IsQuarantineEmpty() const { return !CountQuarantined(); }
+
+  uintptr_t ObjectAddress(size_t pos) const {
+    return reinterpret_cast<uintptr_t>(page.base()) + sizeof(TestBitmap) +
+           pos * kAlignment;
+  }
+
+  static constexpr uintptr_t LastIndex() {
+    return TestBitmap::kMaxEntries - (sizeof(TestBitmap) / kAlignment) - 1;
+  }
+
+  static constexpr uintptr_t MiddleIndex() { return LastIndex() / 2; }
+
+ private:
+  PageWithBitmap page;
+};
+
+constexpr size_t kTestEpoch = 0;
+
+}  // namespace
+
+TEST_F(PartitionAllocStateBitmapTest, MoreThanZeroEntriesPossible) {
+  const size_t max_entries = TestBitmap::kMaxEntries;
+  EXPECT_LT(0u, max_entries);
+}
+
+TEST_F(PartitionAllocStateBitmapTest, InitialQuarantineEmpty) {
+  EXPECT_TRUE(IsQuarantineEmpty());
+}
+
+TEST_F(PartitionAllocStateBitmapTest, QuarantineImpliesNonEmpty) {
+  AllocateObject(0);
+  EXPECT_TRUE(IsQuarantineEmpty());
+  QuarantineObject(0, kTestEpoch);
+  EXPECT_FALSE(IsQuarantineEmpty());
+}
+
+TEST_F(PartitionAllocStateBitmapTest, RepetitiveQuarantine) {
+  AllocateObject(MiddleIndex());
+  EXPECT_TRUE(QuarantineObject(MiddleIndex(), kTestEpoch));
+  EXPECT_FALSE(QuarantineObject(MiddleIndex(), kTestEpoch));
+}
+
+TEST_F(PartitionAllocStateBitmapTest, CountAllocated) {
+  AllocateObject(0);
+  EXPECT_EQ(1u, CountAllocated());
+  QuarantineObject(0, kTestEpoch);
+  EXPECT_EQ(0u, CountAllocated());
+}
+
+TEST_F(PartitionAllocStateBitmapTest, StateTransititions) {
+  for (auto i : {uintptr_t{0}, uintptr_t{1}, LastIndex() - 1, LastIndex()}) {
+    AssertFreed(i);
+
+    AllocateObject(i);
+    AssertAllocated(i);
+
+    QuarantineObject(i, kTestEpoch);
+    AssertQuarantined(i);
+
+    MarkQuarantinedObject(i, kTestEpoch);
+    AssertQuarantined(i);
+
+    FreeObject(i);
+    AssertFreed(i);
+  }
+}
+
+TEST_F(PartitionAllocStateBitmapTest, MultipleMarks) {
+  AllocateObject(0);
+  QuarantineObject(0, kTestEpoch);
+
+  EXPECT_TRUE(MarkQuarantinedObject(0, kTestEpoch));
+  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch));
+  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch));
+
+  EXPECT_TRUE(MarkQuarantinedObject(0, kTestEpoch + 1));
+  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch + 1));
+  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch + 1));
+
+  EXPECT_TRUE(MarkQuarantinedObject(0, kTestEpoch + 2));
+  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch + 2));
+  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch + 2));
+}
+
+TEST_F(PartitionAllocStateBitmapTest, MultipleMarksAdjacent) {
+  AllocateObject(0);
+  QuarantineObject(0, kTestEpoch);
+
+  AllocateObject(1);
+  QuarantineObject(1, kTestEpoch);
+
+  AllocateObject(2);
+  QuarantineObject(2, kTestEpoch);
+
+  EXPECT_TRUE(MarkQuarantinedObject(0, kTestEpoch));
+  EXPECT_TRUE(MarkQuarantinedObject(1, kTestEpoch));
+  EXPECT_TRUE(MarkQuarantinedObject(2, kTestEpoch));
+  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch));
+  EXPECT_FALSE(MarkQuarantinedObject(1, kTestEpoch));
+  EXPECT_FALSE(MarkQuarantinedObject(2, kTestEpoch));
+
+  EXPECT_TRUE(MarkQuarantinedObject(0, kTestEpoch + 1));
+  EXPECT_TRUE(MarkQuarantinedObject(1, kTestEpoch + 1));
+  EXPECT_TRUE(MarkQuarantinedObject(2, kTestEpoch + 1));
+  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch + 1));
+  EXPECT_FALSE(MarkQuarantinedObject(1, kTestEpoch + 1));
+  EXPECT_FALSE(MarkQuarantinedObject(2, kTestEpoch + 1));
+}
+
+TEST_F(PartitionAllocStateBitmapTest, QuarantineFreeMultipleObjects) {
+  static constexpr size_t kCount = 256;
+  for (size_t i = 0; i < kCount; ++i) {
+    AllocateObject(i);
+  }
+  EXPECT_EQ(kCount, CountAllocated());
+  EXPECT_EQ(0u, CountQuarantined());
+
+  for (size_t i = 0; i < kCount; ++i) {
+    QuarantineObject(i, kTestEpoch);
+  }
+  EXPECT_EQ(0u, CountAllocated());
+  EXPECT_EQ(kCount, CountQuarantined());
+
+  for (size_t i = 0; i < kCount; ++i) {
+    FreeObject(i);
+    EXPECT_EQ(kCount - i - 1, CountQuarantined());
+  }
+  EXPECT_TRUE(IsQuarantineEmpty());
+}
+
+TEST_F(PartitionAllocStateBitmapTest, AdjacentQuarantinedObjectsAtBegin) {
+  AllocateObject(0);
+  QuarantineObject(0, kTestEpoch);
+  AllocateObject(1);
+  QuarantineObject(1, kTestEpoch);
+
+  EXPECT_FALSE(IsQuarantined(2));
+  {
+    size_t count = 0;
+    this->bitmap().IterateQuarantined([&count, this](uintptr_t current) {
+      if (count == 0) {
+        EXPECT_EQ(ObjectAddress(0), current);
+      } else if (count == 1) {
+        EXPECT_EQ(ObjectAddress(1), current);
+      }
+      count++;
+    });
+
+    EXPECT_EQ(2u, count);
+  }
+  // Now mark only the first object.
+  {
+    MarkQuarantinedObject(0, kTestEpoch);
+
+    size_t count = 0;
+    this->bitmap().IterateUnmarkedQuarantined(
+        kTestEpoch, [&count, this](uintptr_t current) {
+          if (count == 0) {
+            EXPECT_EQ(ObjectAddress(1), current);
+          }
+          count++;
+        });
+
+    EXPECT_EQ(1u, count);
+  }
+}
+
+TEST_F(PartitionAllocStateBitmapTest, AdjacentQuarantinedObjectsAtMiddle) {
+  AllocateObject(MiddleIndex());
+  QuarantineObject(MiddleIndex(), kTestEpoch);
+  AllocateObject(MiddleIndex() + 1);
+  QuarantineObject(MiddleIndex() + 1, kTestEpoch);
+  {
+    size_t count = 0;
+    this->bitmap().IterateQuarantined([&count, this](uintptr_t current) {
+      if (count == 0) {
+        EXPECT_EQ(ObjectAddress(MiddleIndex()), current);
+      } else if (count == 1) {
+        EXPECT_EQ(ObjectAddress(MiddleIndex() + 1), current);
+      }
+      count++;
+    });
+
+    EXPECT_EQ(2u, count);
+  }
+  // Now mark only the first object.
+  {
+    MarkQuarantinedObject(MiddleIndex(), kTestEpoch);
+
+    size_t count = 0;
+    this->bitmap().IterateUnmarkedQuarantined(
+        kTestEpoch, [&count, this](uintptr_t current) {
+          if (count == 0) {
+            EXPECT_EQ(ObjectAddress(MiddleIndex() + 1), current);
+          }
+          count++;
+        });
+
+    EXPECT_EQ(1u, count);
+  }
+}
+
+TEST_F(PartitionAllocStateBitmapTest, AdjacentQuarantinedObjectsAtEnd) {
+  AllocateObject(LastIndex());
+  QuarantineObject(LastIndex(), kTestEpoch);
+  AllocateObject(LastIndex() - 1);
+  QuarantineObject(LastIndex() - 1, kTestEpoch);
+
+  EXPECT_FALSE(IsQuarantined(LastIndex() - 2));
+  {
+    size_t count = 0;
+    this->bitmap().IterateQuarantined([&count, this](uintptr_t current) {
+      if (count == 0) {
+        EXPECT_EQ(ObjectAddress(LastIndex() - 1), current);
+      } else if (count == 1) {
+        EXPECT_EQ(ObjectAddress(LastIndex()), current);
+      }
+      count++;
+    });
+
+    EXPECT_EQ(2u, count);
+  }
+  // Now mark only the first object.
+  {
+    MarkQuarantinedObject(LastIndex(), kTestEpoch);
+
+    size_t count = 0;
+    this->bitmap().IterateUnmarkedQuarantined(
+        kTestEpoch, [&count, this](uintptr_t current) {
+          if (count == 0) {
+            EXPECT_EQ(ObjectAddress(LastIndex() - 1), current);
+          }
+          count++;
+        });
+
+    EXPECT_EQ(1u, count);
+  }
+}
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/stats_collector.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/stats_collector.cc
new file mode 100644
index 0000000..af622d1
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/stats_collector.cc
@@ -0,0 +1,115 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stats_collector.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stats_reporter.h"
+
+namespace partition_alloc::internal {
+
+StatsCollector::StatsCollector(const char* process_name,
+                               size_t quarantine_last_size)
+    : process_name_(process_name),
+      quarantine_last_size_(quarantine_last_size) {}
+
+StatsCollector::~StatsCollector() = default;
+
+base::TimeDelta StatsCollector::GetOverallTime() const {
+  return GetTimeImpl<Context::kMutator>(mutator_trace_events_,
+                                        MutatorId::kOverall) +
+         GetTimeImpl<Context::kScanner>(scanner_trace_events_,
+                                        ScannerId::kOverall);
+}
+
+void StatsCollector::ReportTracesAndHists(
+    partition_alloc::StatsReporter& reporter) const {
+  ReportTracesAndHistsImpl<Context::kMutator>(reporter, mutator_trace_events_);
+  ReportTracesAndHistsImpl<Context::kScanner>(reporter, scanner_trace_events_);
+  ReportSurvivalRate(reporter);
+}
+
+template <Context context>
+base::TimeDelta StatsCollector::GetTimeImpl(
+    const DeferredTraceEventMap<context>& event_map,
+    IdType<context> id) const {
+  base::TimeDelta overall;
+  for (const auto& tid_and_events : event_map.get_underlying_map_unsafe()) {
+    const auto& events = tid_and_events.second;
+    const auto& event = events[static_cast<size_t>(id)];
+    overall += (event.end_time - event.start_time);
+  }
+  return overall;
+}
+
+template <Context context>
+void StatsCollector::ReportTracesAndHistsImpl(
+    partition_alloc::StatsReporter& reporter,
+    const DeferredTraceEventMap<context>& event_map) const {
+  std::array<base::TimeDelta, static_cast<size_t>(IdType<context>::kNumIds)>
+      accumulated_events{};
+  // First, report traces and accumulate each trace scope to report UMA hists.
+  for (const auto& tid_and_events : event_map.get_underlying_map_unsafe()) {
+    const internal::base::PlatformThreadId tid = tid_and_events.first;
+    const auto& events = tid_and_events.second;
+    PA_DCHECK(accumulated_events.size() == events.size());
+    for (size_t id = 0; id < events.size(); ++id) {
+      const auto& event = events[id];
+      if (event.start_time.is_null()) {
+        // If start_time is null, the event was never triggered, e.g. safepoint
+        // bailed out if started at the end of scanning.
+        PA_DCHECK(event.end_time.is_null());
+        continue;
+      }
+      reporter.ReportTraceEvent(static_cast<IdType<context>>(id), tid,
+                                event.start_time.ToInternalValue(),
+                                event.end_time.ToInternalValue());
+      accumulated_events[id] += (event.end_time - event.start_time);
+    }
+  }
+  // Report UMA if process_name is set.
+  if (!process_name_) {
+    return;
+  }
+  for (size_t id = 0; id < accumulated_events.size(); ++id) {
+    if (accumulated_events[id].is_zero()) {
+      continue;
+    }
+    reporter.ReportStats(ToUMAString(static_cast<IdType<context>>(id)).c_str(),
+                         accumulated_events[id].InMicroseconds());
+  }
+}
+
+void StatsCollector::ReportSurvivalRate(
+    partition_alloc::StatsReporter& reporter) const {
+  const double survived_rate =
+      static_cast<double>(survived_quarantine_size()) / quarantine_last_size_;
+  reporter.ReportSurvivedQuarantineSize(survived_quarantine_size());
+  reporter.ReportSurvivedQuarantinePercent(survived_rate);
+  PA_PCSCAN_VLOG(2) << "quarantine size: " << quarantine_last_size_ << " -> "
+                    << survived_quarantine_size()
+                    << ", swept bytes: " << swept_size()
+                    << ", survival rate: " << survived_rate;
+  if (discarded_quarantine_size_) {
+    PA_PCSCAN_VLOG(2) << "discarded quarantine size: "
+                      << discarded_quarantine_size_;
+  }
+}
+
+template base::TimeDelta StatsCollector::GetTimeImpl(
+    const DeferredTraceEventMap<Context::kMutator>&,
+    IdType<Context::kMutator>) const;
+template base::TimeDelta StatsCollector::GetTimeImpl(
+    const DeferredTraceEventMap<Context::kScanner>&,
+    IdType<Context::kScanner>) const;
+
+template void StatsCollector::ReportTracesAndHistsImpl(
+    partition_alloc::StatsReporter& reporter,
+    const DeferredTraceEventMap<Context::kMutator>&) const;
+template void StatsCollector::ReportTracesAndHistsImpl(
+    partition_alloc::StatsReporter& reporter,
+    const DeferredTraceEventMap<Context::kScanner>&) const;
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/stats_collector.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/stats_collector.h
new file mode 100644
index 0000000..af060ad
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/stats_collector.h
@@ -0,0 +1,248 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STATS_COLLECTOR_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STATS_COLLECTOR_H_
+
+#include <array>
+#include <atomic>
+#include <functional>
+#include <mutex>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <utility>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/starscan_fwd.h"
+
+namespace partition_alloc {
+
+class StatsReporter;
+
+namespace internal {
+
+#define FOR_ALL_PCSCAN_SCANNER_SCOPES(V) \
+  V(Clear)                               \
+  V(Scan)                                \
+  V(Sweep)                               \
+  V(Overall)
+
+#define FOR_ALL_PCSCAN_MUTATOR_SCOPES(V) \
+  V(Clear)                               \
+  V(ScanStack)                           \
+  V(Scan)                                \
+  V(Overall)
+
+class StatsCollector final {
+ public:
+  enum class ScannerId {
+#define DECLARE_ENUM(name) k##name,
+    FOR_ALL_PCSCAN_SCANNER_SCOPES(DECLARE_ENUM)
+#undef DECLARE_ENUM
+        kNumIds,
+  };
+
+  enum class MutatorId {
+#define DECLARE_ENUM(name) k##name,
+    FOR_ALL_PCSCAN_MUTATOR_SCOPES(DECLARE_ENUM)
+#undef DECLARE_ENUM
+        kNumIds,
+  };
+
+  template <Context context>
+  using IdType =
+      std::conditional_t<context == Context::kMutator, MutatorId, ScannerId>;
+
+  // We don't immediately trace events, but instead defer it until scanning is
+  // done. This is needed to avoid unpredictable work that can be done by traces
+  // (e.g. recursive mutex lock).
+  struct DeferredTraceEvent {
+    base::TimeTicks start_time;
+    base::TimeTicks end_time;
+  };
+
+  // Thread-safe hash-map that maps thread id to scanner events. Doesn't
+  // accumulate events, i.e. every event can only be registered once.
+  template <Context context>
+  class DeferredTraceEventMap final {
+   public:
+    using IdType = StatsCollector::IdType<context>;
+    using PerThreadEvents =
+        std::array<DeferredTraceEvent, static_cast<size_t>(IdType::kNumIds)>;
+    using UnderlyingMap = std::unordered_map<
+        internal::base::PlatformThreadId,
+        PerThreadEvents,
+        std::hash<internal::base::PlatformThreadId>,
+        std::equal_to<>,
+        MetadataAllocator<std::pair<const internal::base::PlatformThreadId,
+                                    PerThreadEvents>>>;
+
+    inline void RegisterBeginEventFromCurrentThread(IdType id);
+    inline void RegisterEndEventFromCurrentThread(IdType id);
+
+    const UnderlyingMap& get_underlying_map_unsafe() const { return events_; }
+
+   private:
+    std::mutex mutex_;
+    UnderlyingMap events_;
+  };
+
+  template <Context context>
+  class Scope final {
+   public:
+    Scope(StatsCollector& stats, IdType<context> type)
+        : stats_(stats), type_(type) {
+      stats_.RegisterBeginEventFromCurrentThread(type);
+    }
+
+    Scope(const Scope&) = delete;
+    Scope& operator=(const Scope&) = delete;
+
+    ~Scope() { stats_.RegisterEndEventFromCurrentThread(type_); }
+
+   private:
+    StatsCollector& stats_;
+    IdType<context> type_;
+  };
+
+  using ScannerScope = Scope<Context::kScanner>;
+  using MutatorScope = Scope<Context::kMutator>;
+
+  StatsCollector(const char* process_name, size_t quarantine_last_size);
+
+  StatsCollector(const StatsCollector&) = delete;
+  StatsCollector& operator=(const StatsCollector&) = delete;
+
+  ~StatsCollector();
+
+  void IncreaseSurvivedQuarantineSize(size_t size) {
+    survived_quarantine_size_.fetch_add(size, std::memory_order_relaxed);
+  }
+  size_t survived_quarantine_size() const {
+    return survived_quarantine_size_.load(std::memory_order_relaxed);
+  }
+
+  void IncreaseSweptSize(size_t size) { swept_size_ += size; }
+  size_t swept_size() const { return swept_size_; }
+
+  void IncreaseDiscardedQuarantineSize(size_t size) {
+    discarded_quarantine_size_ += size;
+  }
+
+  base::TimeDelta GetOverallTime() const;
+  void ReportTracesAndHists(partition_alloc::StatsReporter& reporter) const;
+
+ private:
+  using MetadataString =
+      std::basic_string<char, std::char_traits<char>, MetadataAllocator<char>>;
+
+  MetadataString ToUMAString(ScannerId id) const;
+  MetadataString ToUMAString(MutatorId id) const;
+
+  void RegisterBeginEventFromCurrentThread(MutatorId id) {
+    mutator_trace_events_.RegisterBeginEventFromCurrentThread(id);
+  }
+  void RegisterEndEventFromCurrentThread(MutatorId id) {
+    mutator_trace_events_.RegisterEndEventFromCurrentThread(id);
+  }
+  void RegisterBeginEventFromCurrentThread(ScannerId id) {
+    scanner_trace_events_.RegisterBeginEventFromCurrentThread(id);
+  }
+  void RegisterEndEventFromCurrentThread(ScannerId id) {
+    scanner_trace_events_.RegisterEndEventFromCurrentThread(id);
+  }
+
+  template <Context context>
+  base::TimeDelta GetTimeImpl(const DeferredTraceEventMap<context>& event_map,
+                              IdType<context> id) const;
+
+  template <Context context>
+  void ReportTracesAndHistsImpl(
+      partition_alloc::StatsReporter& reporter,
+      const DeferredTraceEventMap<context>& event_map) const;
+
+  void ReportSurvivalRate(partition_alloc::StatsReporter& reporter) const;
+
+  DeferredTraceEventMap<Context::kMutator> mutator_trace_events_;
+  DeferredTraceEventMap<Context::kScanner> scanner_trace_events_;
+
+  std::atomic<size_t> survived_quarantine_size_{0u};
+  size_t swept_size_ = 0u;
+  size_t discarded_quarantine_size_ = 0u;
+  const char* process_name_ = nullptr;
+  const size_t quarantine_last_size_ = 0u;
+};
+
+template <Context context>
+inline void StatsCollector::DeferredTraceEventMap<
+    context>::RegisterBeginEventFromCurrentThread(IdType id) {
+  std::lock_guard<std::mutex> lock(mutex_);
+  const auto tid = base::PlatformThread::CurrentId();
+  const auto now = base::TimeTicks::Now();
+  auto& event_array = events_[tid];
+  auto& event = event_array[static_cast<size_t>(id)];
+  PA_DCHECK(event.start_time.is_null());
+  PA_DCHECK(event.end_time.is_null());
+  event.start_time = now;
+}
+
+template <Context context>
+inline void StatsCollector::DeferredTraceEventMap<
+    context>::RegisterEndEventFromCurrentThread(IdType id) {
+  std::lock_guard<std::mutex> lock(mutex_);
+  const auto tid = base::PlatformThread::CurrentId();
+  const auto now = base::TimeTicks::Now();
+  auto& event_array = events_[tid];
+  auto& event = event_array[static_cast<size_t>(id)];
+  PA_DCHECK(!event.start_time.is_null());
+  PA_DCHECK(event.end_time.is_null());
+  event.end_time = now;
+}
+
+inline StatsCollector::MetadataString StatsCollector::ToUMAString(
+    ScannerId id) const {
+  PA_DCHECK(process_name_);
+  const MetadataString process_name = process_name_;
+  switch (id) {
+    case ScannerId::kClear:
+      return "PA.PCScan." + process_name + ".Scanner.Clear";
+    case ScannerId::kScan:
+      return "PA.PCScan." + process_name + ".Scanner.Scan";
+    case ScannerId::kSweep:
+      return "PA.PCScan." + process_name + ".Scanner.Sweep";
+    case ScannerId::kOverall:
+      return "PA.PCScan." + process_name + ".Scanner";
+    case ScannerId::kNumIds:
+      __builtin_unreachable();
+  }
+}
+
+inline StatsCollector::MetadataString StatsCollector::ToUMAString(
+    MutatorId id) const {
+  PA_DCHECK(process_name_);
+  const MetadataString process_name = process_name_;
+  switch (id) {
+    case MutatorId::kClear:
+      return "PA.PCScan." + process_name + ".Mutator.Clear";
+    case MutatorId::kScanStack:
+      return "PA.PCScan." + process_name + ".Mutator.ScanStack";
+    case MutatorId::kScan:
+      return "PA.PCScan." + process_name + ".Mutator.Scan";
+    case MutatorId::kOverall:
+      return "PA.PCScan." + process_name + ".Mutator";
+    case MutatorId::kNumIds:
+      __builtin_unreachable();
+  }
+}
+
+#undef FOR_ALL_PCSCAN_MUTATOR_SCOPES
+#undef FOR_ALL_PCSCAN_SCANNER_SCOPES
+
+}  // namespace internal
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STATS_COLLECTOR_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/stats_reporter.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/stats_reporter.h
new file mode 100644
index 0000000..8f0f910
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/stats_reporter.h
@@ -0,0 +1,36 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STATS_REPORTER_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STATS_REPORTER_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stats_collector.h"
+
+namespace partition_alloc {
+
+// StatsReporter is a wrapper to invoke TRACE_EVENT_BEGIN/END, TRACE_COUNTER1,
+// and UmaHistogramTimes. It is used to just remove trace_log and uma
+// dependencies from partition allocator.
+class StatsReporter {
+ public:
+  virtual void ReportTraceEvent(internal::StatsCollector::ScannerId id,
+                                internal::base::PlatformThreadId tid,
+                                int64_t start_time_ticks_internal_value,
+                                int64_t end_time_ticks_internal_value) {}
+  virtual void ReportTraceEvent(internal::StatsCollector::MutatorId id,
+                                internal::base::PlatformThreadId tid,
+                                int64_t start_time_ticks_internal_value,
+                                int64_t end_time_ticks_internal_value) {}
+
+  virtual void ReportSurvivedQuarantineSize(size_t survived_size) {}
+
+  virtual void ReportSurvivedQuarantinePercent(double survivied_rate) {}
+
+  virtual void ReportStats(const char* stats_name, int64_t sample_in_usec) {}
+};
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_STATS_REPORTER_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/write_protector.cc b/base/allocator/partition_allocator/src/partition_alloc/starscan/write_protector.cc
new file mode 100644
index 0000000..cc00c74
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/write_protector.cc
@@ -0,0 +1,136 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/write_protector.h"
+
+#include <mutex>
+#include <thread>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/posix/eintr_wrapper.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "build/build_config.h"
+
+#if PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
+#include <fcntl.h>
+#include <linux/userfaultfd.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#endif  // PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
+
+namespace partition_alloc::internal {
+
+PCScan::ClearType NoWriteProtector::SupportedClearType() const {
+  return PCScan::ClearType::kLazy;
+}
+
+#if PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
+
+namespace {
+void UserFaultFDThread(int uffd) {
+  PA_DCHECK(-1 != uffd);
+
+  static constexpr char kThreadName[] = "PCScanPFHandler";
+  internal::base::PlatformThread::SetName(kThreadName);
+
+  while (true) {
+    // Pool on the uffd descriptor for page fault events.
+    pollfd pollfd{.fd = uffd, .events = POLLIN};
+    const int nready = PA_HANDLE_EINTR(poll(&pollfd, 1, -1));
+    PA_CHECK(-1 != nready);
+
+    // Get page fault info.
+    uffd_msg msg;
+    const int nread = PA_HANDLE_EINTR(read(uffd, &msg, sizeof(msg)));
+    PA_CHECK(0 != nread);
+
+    // We only expect page faults.
+    PA_DCHECK(UFFD_EVENT_PAGEFAULT == msg.event);
+    // We have subscribed only to wp-fault events.
+    PA_DCHECK(UFFD_PAGEFAULT_FLAG_WP & msg.arg.pagefault.flags);
+
+    // Enter the safepoint. Concurrent faulted writes will wait until safepoint
+    // finishes.
+    PCScan::JoinScanIfNeeded();
+  }
+}
+}  // namespace
+
+UserFaultFDWriteProtector::UserFaultFDWriteProtector()
+    : uffd_(syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK)) {
+  if (uffd_ == -1) {
+    PA_LOG(WARNING) << "userfaultfd is not supported by the current kernel";
+    return;
+  }
+
+  PA_PCHECK(-1 != uffd_);
+
+  uffdio_api uffdio_api;
+  uffdio_api.api = UFFD_API;
+  uffdio_api.features = 0;
+  PA_CHECK(-1 != ioctl(uffd_, UFFDIO_API, &uffdio_api));
+  PA_CHECK(UFFD_API == uffdio_api.api);
+
+  // Register the regular pool to listen uffd events.
+  struct uffdio_register uffdio_register;
+  uffdio_register.range.start = PartitionAddressSpace::RegularPoolBase();
+  uffdio_register.range.len = kPoolMaxSize;
+  uffdio_register.mode = UFFDIO_REGISTER_MODE_WP;
+  PA_CHECK(-1 != ioctl(uffd_, UFFDIO_REGISTER, &uffdio_register));
+
+  // Start uffd thread.
+  std::thread(UserFaultFDThread, uffd_).detach();
+}
+
+namespace {
+enum class UserFaultFDWPMode {
+  kProtect,
+  kUnprotect,
+};
+
+void UserFaultFDWPSet(int uffd,
+                      uintptr_t begin,
+                      size_t length,
+                      UserFaultFDWPMode mode) {
+  PA_DCHECK(0 == (begin % SystemPageSize()));
+  PA_DCHECK(0 == (length % SystemPageSize()));
+
+  uffdio_writeprotect wp;
+  wp.range.start = begin;
+  wp.range.len = length;
+  wp.mode =
+      (mode == UserFaultFDWPMode::kProtect) ? UFFDIO_WRITEPROTECT_MODE_WP : 0;
+  PA_PCHECK(-1 != ioctl(uffd, UFFDIO_WRITEPROTECT, &wp));
+}
+}  // namespace
+
+void UserFaultFDWriteProtector::ProtectPages(uintptr_t begin, size_t length) {
+  if (IsSupported()) {
+    UserFaultFDWPSet(uffd_, begin, length, UserFaultFDWPMode::kProtect);
+  }
+}
+
+void UserFaultFDWriteProtector::UnprotectPages(uintptr_t begin, size_t length) {
+  if (IsSupported()) {
+    UserFaultFDWPSet(uffd_, begin, length, UserFaultFDWPMode::kUnprotect);
+  }
+}
+
+PCScan::ClearType UserFaultFDWriteProtector::SupportedClearType() const {
+  return IsSupported() ? PCScan::ClearType::kEager : PCScan::ClearType::kLazy;
+}
+
+bool UserFaultFDWriteProtector::IsSupported() const {
+  return uffd_ != -1;
+}
+
+#endif  // PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/starscan/write_protector.h b/base/allocator/partition_allocator/src/partition_alloc/starscan/write_protector.h
new file mode 100644
index 0000000..55a095a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/starscan/write_protector.h
@@ -0,0 +1,75 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_WRITE_PROTECTOR_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_WRITE_PROTECTOR_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <mutex>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/metadata_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/raceful_worklist.h"
+#include "build/build_config.h"
+
+namespace partition_alloc::internal {
+
+// Interface for page protection/unprotection. This is used in DCScan to catch
+// concurrent mutator writes. Protection is done when the scanner starts
+// scanning a range. Unprotection happens at the end of the scanning phase.
+class WriteProtector : public AllocatedOnPCScanMetadataPartition {
+ public:
+  virtual ~WriteProtector() = default;
+
+  virtual void ProtectPages(uintptr_t begin, size_t length) = 0;
+  virtual void UnprotectPages(uintptr_t begin, size_t length) = 0;
+
+  virtual bool IsEnabled() const = 0;
+
+  virtual PCScan::ClearType SupportedClearType() const = 0;
+};
+
+class NoWriteProtector final : public WriteProtector {
+ public:
+  void ProtectPages(uintptr_t, size_t) final {}
+  void UnprotectPages(uintptr_t, size_t) final {}
+  PCScan::ClearType SupportedClearType() const final;
+  inline bool IsEnabled() const override;
+};
+
+bool NoWriteProtector::IsEnabled() const {
+  return false;
+}
+
+#if PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
+class UserFaultFDWriteProtector final : public WriteProtector {
+ public:
+  UserFaultFDWriteProtector();
+
+  UserFaultFDWriteProtector(const UserFaultFDWriteProtector&) = delete;
+  UserFaultFDWriteProtector& operator=(const UserFaultFDWriteProtector&) =
+      delete;
+
+  void ProtectPages(uintptr_t, size_t) final;
+  void UnprotectPages(uintptr_t, size_t) final;
+
+  PCScan::ClearType SupportedClearType() const final;
+
+  inline bool IsEnabled() const override;
+
+ private:
+  bool IsSupported() const;
+
+  const int uffd_ = 0;
+};
+
+bool UserFaultFDWriteProtector::IsEnabled() const {
+  return IsSupported();
+}
+#endif  // PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
+
+}  // namespace partition_alloc::internal
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_STARSCAN_WRITE_PROTECTOR_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/tagging.cc b/base/allocator/partition_allocator/src/partition_alloc/tagging.cc
new file mode 100644
index 0000000..9b44119
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/tagging.cc
@@ -0,0 +1,276 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "build/build_config.h"
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+#include <arm_acle.h>
+#include <asm/hwcap.h>
+#include <sys/auxv.h>
+#include <sys/ifunc.h>
+#include <sys/prctl.h>
+#define PR_SET_TAGGED_ADDR_CTRL 55
+#define PR_GET_TAGGED_ADDR_CTRL 56
+#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+
+#if BUILDFLAG(IS_LINUX)
+#include <linux/version.h>
+
+// Linux headers already provide these since v5.10.
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
+#define HAS_PR_MTE_MACROS
+#endif
+#endif
+
+#ifndef HAS_PR_MTE_MACROS
+#define PR_MTE_TCF_SHIFT 1
+#define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
+#define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
+#define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
+#define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
+#define PR_MTE_TAG_SHIFT 3
+#define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
+#define HWCAP2_MTE (1 << 18)
+#endif
+#endif
+
+#if BUILDFLAG(IS_ANDROID)
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/files/file_path.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/native_library.h"
+#endif  // BUILDFLAG(IS_ANDROID)
+
+namespace partition_alloc {
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+namespace {
+void ChangeMemoryTaggingModeInternal(unsigned prctl_mask) {
+  if (internal::base::CPU::GetInstanceNoAllocation().has_mte()) {
+    int status = prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_mask, 0, 0, 0);
+    PA_CHECK(status == 0);
+  }
+}
+}  // namespace
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+void ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode m) {
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  if (m == TagViolationReportingMode::kSynchronous) {
+    ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC |
+                                    (0xfffe << PR_MTE_TAG_SHIFT));
+  } else if (m == TagViolationReportingMode::kAsynchronous) {
+    ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC |
+                                    (0xfffe << PR_MTE_TAG_SHIFT));
+  } else {
+    ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_NONE);
+  }
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+}
+
+namespace internal {
+
+#if BUILDFLAG(IS_ANDROID)
+void ChangeMemoryTaggingModeForAllThreadsPerProcess(
+    TagViolationReportingMode m) {
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  // In order to support Android NDK API level below 26, we need to call
+  // mallopt via dynamic linker.
+  // int mallopt(int param, int value);
+  using MalloptSignature = int (*)(int, int);
+
+  static MalloptSignature mallopt_fnptr = []() {
+    base::FilePath module_path;
+    base::NativeLibraryLoadError load_error;
+    base::FilePath library_path = module_path.Append("libc.so");
+    base::NativeLibrary library =
+        base::LoadNativeLibrary(library_path, &load_error);
+    PA_CHECK(library);
+    void* func_ptr =
+        base::GetFunctionPointerFromNativeLibrary(library, "mallopt");
+    PA_CHECK(func_ptr);
+    return reinterpret_cast<MalloptSignature>(func_ptr);
+  }();
+
+  int status = 0;
+  if (m == TagViolationReportingMode::kSynchronous) {
+    status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
+                           M_HEAP_TAGGING_LEVEL_SYNC);
+  } else if (m == TagViolationReportingMode::kAsynchronous) {
+    status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
+                           M_HEAP_TAGGING_LEVEL_ASYNC);
+  } else {
+    status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
+                           M_HEAP_TAGGING_LEVEL_NONE);
+  }
+  PA_CHECK(status);
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+}
+#endif  // BUILDFLAG(IS_ANDROID)
+
+namespace {
+[[maybe_unused]] static bool CheckTagRegionParameters(void* ptr, size_t sz) {
+  // Check that ptr and size are correct for MTE
+  uintptr_t ptr_as_uint = reinterpret_cast<uintptr_t>(ptr);
+  bool ret = (ptr_as_uint % kMemTagGranuleSize == 0) &&
+             (sz % kMemTagGranuleSize == 0) && sz;
+  return ret;
+}
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
+  // Randomly tag a region (MTE-enabled systems only). The first 16-byte
+  // granule is randomly tagged, all other granules in the region are
+  // then assigned that initial tag via __arm_mte_set_tag.
+  if (!CheckTagRegionParameters(ptr, sz)) {
+    return nullptr;
+  }
+  // __arm_mte_create_random_tag generates a randomly tagged pointer via the
+  // hardware's random number generator, but does not apply it to the memory.
+  char* nptr = reinterpret_cast<char*>(__arm_mte_create_random_tag(ptr, mask));
+  for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
+    // Next, tag the first and all subsequent granules with the randomly tag.
+    __arm_mte_set_tag(nptr +
+                      i);  // Tag is taken from the top bits of the argument.
+  }
+  return nptr;
+}
+
+void* TagRegionIncrementForMTE(void* ptr, size_t sz) {
+  // Increment a region's tag (MTE-enabled systems only), using the tag of the
+  // first granule.
+  if (!CheckTagRegionParameters(ptr, sz)) {
+    return nullptr;
+  }
+  // Increment ptr's tag.
+  char* nptr = reinterpret_cast<char*>(__arm_mte_increment_tag(ptr, 1u));
+  for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
+    // Apply the tag to the first granule, and all subsequent granules.
+    __arm_mte_set_tag(nptr + i);
+  }
+  return nptr;
+}
+
+void* RemaskVoidPtrForMTE(void* ptr) {
+  if (PA_LIKELY(ptr)) {
+    // Can't look up the tag for a null ptr (segfaults).
+    return __arm_mte_get_tag(ptr);
+  }
+  return nullptr;
+}
+
+void* TagRegionIncrementNoOp(void* ptr, size_t sz) {
+  // Region parameters are checked even on non-MTE systems to check the
+  // intrinsics are used correctly.
+  return ptr;
+}
+
+void* TagRegionRandomlyNoOp(void* ptr, size_t sz, uint64_t mask) {
+  // Verifies a 16-byte aligned tagging granule, size tagging granule (all
+  // architectures).
+  return ptr;
+}
+
+void* RemaskVoidPtrNoOp(void* ptr) {
+  return ptr;
+}
+#endif
+
+}  // namespace
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+using RemaskPtrInternalFn = void*(void* ptr);
+using TagMemoryRangeIncrementInternalFn = void*(void* ptr, size_t size);
+
+using TagMemoryRangeRandomlyInternalFn = void*(void* ptr,
+                                               size_t size,
+                                               uint64_t mask);
+
+extern "C" TagMemoryRangeIncrementInternalFn(
+    *ResolveTagMemoryRangeIncrement(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
+  if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
+    return TagRegionIncrementForMTE;
+  }
+  return TagRegionIncrementNoOp;
+}
+
+extern "C" TagMemoryRangeRandomlyInternalFn(
+    *ResolveTagMemoryRandomly(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
+  if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
+    return TagRegionRandomlyForMTE;
+  }
+  return TagRegionRandomlyNoOp;
+}
+
+extern "C" RemaskPtrInternalFn(
+    *ResolveRemaskPointer(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
+  if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
+    return RemaskVoidPtrForMTE;
+  }
+  return RemaskVoidPtrNoOp;
+}
+
+void* TagMemoryRangeIncrementInternal(void* ptr, size_t size)
+    __attribute__((ifunc("ResolveTagMemoryRangeIncrement")));
+void* TagMemoryRangeRandomlyInternal(void* ptr, size_t size, uint64_t mask)
+    __attribute__((ifunc("ResolveTagMemoryRandomly")));
+void* RemaskPointerInternal(void* ptr)
+    __attribute__((ifunc("ResolveRemaskPointer")));
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+TagViolationReportingMode GetMemoryTaggingModeForCurrentThread() {
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  base::CPU cpu;
+  if (!cpu.has_mte()) {
+    return TagViolationReportingMode::kUndefined;
+  }
+  int status = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
+  PA_CHECK(status >= 0);
+  // Check for Asynchronous first because ASYNC on Android sets both
+  // PR_MTE_TCF_ASYNC and PR_MTE_TCF_SYNC bits.
+  if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_ASYNC)) {
+    return TagViolationReportingMode::kAsynchronous;
+  }
+  if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_SYNC)) {
+    return TagViolationReportingMode::kSynchronous;
+  }
+  return TagViolationReportingMode::kDisabled;
+#else
+  return TagViolationReportingMode::kUndefined;
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+}
+
+}  // namespace internal
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING) && BUILDFLAG(IS_ANDROID)
+bool PermissiveMte::enabled_ = false;
+
+// static
+void PermissiveMte::SetEnabled(bool enabled) {
+  PermissiveMte::enabled_ = enabled;
+}
+
+// static
+bool PermissiveMte::HandleCrash(int signo,
+                                siginfo_t* siginfo,
+                                ucontext_t* context) {
+  if (siginfo->si_signo == SIGSEGV &&
+      (siginfo->si_code == SEGV_MTESERR || siginfo->si_code == SEGV_MTEAERR) &&
+      PermissiveMte::enabled_) {
+    // In MTE permissive mode, do not crash the process. Instead, disable MTE
+    // and let the failing instruction be retried. The second time should
+    // succeed (except if there is another non-MTE fault).
+    internal::ChangeMemoryTaggingModeForAllThreadsPerProcess(
+        partition_alloc::TagViolationReportingMode::kDisabled);
+    return true;
+  }
+  return false;
+}
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING) && BUILDFLAG(IS_ANDROID)
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/tagging.h b/base/allocator/partition_allocator/src/partition_alloc/tagging.h
new file mode 100644
index 0000000..12ad5e9
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/tagging.h
@@ -0,0 +1,150 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_TAGGING_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_TAGGING_H_
+
+// This file contains method definitions to support Armv8.5-A's memory tagging
+// extension.
+
+#include <cstddef>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "build/build_config.h"
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING) && BUILDFLAG(IS_ANDROID)
+#include <csignal>
+#endif
+
+namespace partition_alloc {
+
+// Enum configures Arm's MTE extension to operate in different modes
+enum class TagViolationReportingMode {
+  // Default settings
+  kUndefined,
+  // MTE explicitly disabled.
+  kDisabled,
+  // Precise tag violation reports, higher overhead. Good for unittests
+  // and security critical threads.
+  kSynchronous,
+  // Imprecise tag violation reports (async mode). Lower overhead.
+  kAsynchronous,
+};
+
+// Changes the memory tagging mode for the calling thread.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode);
+
+namespace internal {
+
+constexpr int kMemTagGranuleSize = 16u;
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+constexpr uint64_t kPtrTagMask = 0xff00000000000000uLL;
+#else
+constexpr uint64_t kPtrTagMask = 0;
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+constexpr uint64_t kPtrUntagMask = ~kPtrTagMask;
+
+#if BUILDFLAG(IS_ANDROID)
+// Changes the memory tagging mode for all threads in the current process.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void ChangeMemoryTaggingModeForAllThreadsPerProcess(TagViolationReportingMode);
+#endif
+
+// Gets the memory tagging mode for the calling thread. Returns kUndefined if
+// MTE support is not available.
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+TagViolationReportingMode GetMemoryTaggingModeForCurrentThread();
+
+// These forward-defined functions do not really exist in tagging.cc, they're
+// resolved by the dynamic linker to MTE-capable versions on the right hardware.
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* TagMemoryRangeIncrementInternal(void* ptr, size_t size);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* TagMemoryRangeRandomlyInternal(void* ptr, size_t size, uint64_t mask);
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void* RemaskPointerInternal(void* ptr);
+#endif
+
+// Increments the tag of the memory range ptr. Useful for provable revocations
+// (e.g. free). Returns the pointer with the new tag. Ensures that the entire
+// range is set to the same tag.
+PA_ALWAYS_INLINE void* TagMemoryRangeIncrement(void* ptr, size_t size) {
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  return TagMemoryRangeIncrementInternal(ptr, size);
+#else
+  return ptr;
+#endif
+}
+
+PA_ALWAYS_INLINE void* TagMemoryRangeIncrement(uintptr_t address, size_t size) {
+  return TagMemoryRangeIncrement(reinterpret_cast<void*>(address), size);
+}
+
+// Randomly changes the tag of the ptr memory range. Useful for initial random
+// initialization. Returns the pointer with the new tag. Ensures that the entire
+// range is set to the same tag.
+PA_ALWAYS_INLINE void* TagMemoryRangeRandomly(uintptr_t address,
+                                              size_t size,
+                                              uint64_t mask = 0u) {
+  void* ptr = reinterpret_cast<void*>(address);
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  return reinterpret_cast<void*>(
+      TagMemoryRangeRandomlyInternal(ptr, size, mask));
+#else
+  return ptr;
+#endif
+}
+
+// Gets a version of ptr that's safe to dereference.
+template <typename T>
+PA_ALWAYS_INLINE T* TagPtr(T* ptr) {
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  return reinterpret_cast<T*>(RemaskPointerInternal(ptr));
+#else
+  return ptr;
+#endif
+}
+
+// Gets a version of |address| that's safe to dereference, and casts to a
+// pointer.
+PA_ALWAYS_INLINE void* TagAddr(uintptr_t address) {
+  return TagPtr(reinterpret_cast<void*>(address));
+}
+
+// Strips the tag bits off |address|.
+PA_ALWAYS_INLINE uintptr_t UntagAddr(uintptr_t address) {
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+  return address & internal::kPtrUntagMask;
+#else
+  return address;
+#endif
+}
+
+}  // namespace internal
+
+// Strips the tag bits off |ptr|.
+template <typename T>
+PA_ALWAYS_INLINE uintptr_t UntagPtr(T* ptr) {
+  return internal::UntagAddr(reinterpret_cast<uintptr_t>(ptr));
+}
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING) && BUILDFLAG(IS_ANDROID)
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PermissiveMte {
+ public:
+  static void SetEnabled(bool enabled);
+  static bool HandleCrash(int signo, siginfo_t* siginfo, ucontext_t* context);
+
+ private:
+  static bool enabled_;
+};
+#endif
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_TAGGING_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/tagging_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/tagging_unittest.cc
new file mode 100644
index 0000000..c0d0f84
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/tagging_unittest.cc
@@ -0,0 +1,216 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal {
+
+// Check whether we can call the tagging intrinsics safely on all architectures.
+TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeRandomlySafe) {
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteTagged),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  void* bufferp = TagMemoryRangeRandomly(buffer, 4 * kMemTagGranuleSize, 0u);
+  EXPECT_TRUE(bufferp);
+  int* buffer0 = static_cast<int*>(bufferp);
+  *buffer0 = 42;
+  EXPECT_EQ(42, *buffer0);
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeIncrementSafe) {
+  base::CPU cpu;
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteTagged),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  void* bufferp = TagMemoryRangeIncrement(buffer, 4 * kMemTagGranuleSize);
+  EXPECT_TRUE(bufferp);
+  int* buffer0 = static_cast<int*>(bufferp);
+  *buffer0 = 42;
+  EXPECT_EQ(42, *buffer0);
+  if (cpu.has_mte()) {
+    EXPECT_NE(bufferp, reinterpret_cast<void*>(buffer));
+  }
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+#if defined(ARCH_CPU_64_BITS)
+// Size / alignment constraints are only enforced on 64-bit architectures.
+TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeBadSz) {
+  base::CPU cpu;
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteTagged),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  void* bufferp =
+      TagMemoryRangeRandomly(buffer, 4 * kMemTagGranuleSize - 1, 0u);
+  if (cpu.has_mte()) {
+    EXPECT_FALSE(bufferp);
+  }
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeRandomlyNoSz) {
+  base::CPU cpu;
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteTagged),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  void* bufferp = TagMemoryRangeRandomly(buffer, 0, 0u);
+  if (cpu.has_mte()) {
+    EXPECT_FALSE(bufferp);
+  }
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeRandomlyBadAlign) {
+  base::CPU cpu;
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteTagged),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  void* bufferp =
+      TagMemoryRangeRandomly(buffer - 1, 4 * kMemTagGranuleSize, 0u);
+  if (cpu.has_mte()) {
+    EXPECT_FALSE(bufferp);
+  }
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeIncrementBadSz) {
+  base::CPU cpu;
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteTagged),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  void* bufferp = TagMemoryRangeIncrement(buffer, 4 * kMemTagGranuleSize - 1);
+  if (cpu.has_mte()) {
+    EXPECT_FALSE(bufferp);
+  }
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeIncrementNoSz) {
+  base::CPU cpu;
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteTagged),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  void* bufferp = TagMemoryRangeIncrement(buffer, 0);
+  if (cpu.has_mte()) {
+    EXPECT_FALSE(bufferp);
+  }
+  FreePages(buffer, PageAllocationGranularity());
+}
+
+TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeIncrementBadAlign) {
+  base::CPU cpu;
+  uintptr_t buffer =
+      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
+                 PageAccessibilityConfiguration(
+                     PageAccessibilityConfiguration::kReadWriteTagged),
+                 PageTag::kChromium);
+  EXPECT_TRUE(buffer);
+  void* bufferp = TagMemoryRangeIncrement(buffer - 1, 4 * kMemTagGranuleSize);
+  if (cpu.has_mte()) {
+    EXPECT_FALSE(bufferp);
+  }
+  FreePages(buffer, PageAllocationGranularity());
+}
+#endif  // defined(ARCH_CPU_64_BITS)
+
+#if PA_CONFIG(HAS_MEMORY_TAGGING)
+#if BUILDFLAG(IS_ANDROID)
+TEST(PartitionAllocMemoryTaggingTest,
+     ChangeMemoryTaggingModeForAllThreadsPerProcess) {
+  base::CPU cpu;
+  // If the underlying platform does not support MTE, skip this test to avoid
+  // hiding failures.
+  if (!cpu.has_mte()) {
+    GTEST_SKIP();
+  }
+
+  // The mode should be set to synchronous on startup by AndroidManifest.xml
+  // for base_unittests.
+  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
+            TagViolationReportingMode::kSynchronous);
+
+  // Skip changing to kDisabled, because scudo does not support enabling MTE
+  // once it is disabled.
+  ChangeMemoryTaggingModeForAllThreadsPerProcess(
+      TagViolationReportingMode::kAsynchronous);
+  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
+            TagViolationReportingMode::kAsynchronous);
+  ChangeMemoryTaggingModeForAllThreadsPerProcess(
+      TagViolationReportingMode::kSynchronous);
+  // End with mode changed back to synchronous.
+  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
+            TagViolationReportingMode::kSynchronous);
+}
+#endif  // BUILDFLAG(IS_ANDROID)
+
+TEST(PartitionAllocMemoryTaggingTest, ChangeMemoryTaggingModeForCurrentThread) {
+  base::CPU cpu;
+  // If the underlying platform does not support MTE, skip this test to avoid
+  // hiding failures.
+  if (!cpu.has_mte()) {
+    GTEST_SKIP();
+  }
+
+  TagViolationReportingMode original_mode =
+      GetMemoryTaggingModeForCurrentThread();
+
+  ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode::kDisabled);
+  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
+            TagViolationReportingMode::kDisabled);
+  ChangeMemoryTaggingModeForCurrentThread(
+      TagViolationReportingMode::kSynchronous);
+  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
+            TagViolationReportingMode::kSynchronous);
+  ChangeMemoryTaggingModeForCurrentThread(
+      TagViolationReportingMode::kAsynchronous);
+  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
+            TagViolationReportingMode::kAsynchronous);
+  ChangeMemoryTaggingModeForCurrentThread(
+      TagViolationReportingMode::kSynchronous);
+  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
+            TagViolationReportingMode::kSynchronous);
+  ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode::kDisabled);
+  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
+            TagViolationReportingMode::kDisabled);
+  ChangeMemoryTaggingModeForCurrentThread(
+      TagViolationReportingMode::kAsynchronous);
+  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
+            TagViolationReportingMode::kAsynchronous);
+
+  // Restore mode to original.
+  ChangeMemoryTaggingModeForCurrentThread(original_mode);
+}
+#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
+
+}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/src/partition_alloc/thread_cache.cc b/base/allocator/partition_allocator/src/partition_alloc/thread_cache.cc
new file mode 100644
index 0000000..130a7ab
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/thread_cache.cc
@@ -0,0 +1,825 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
+
+#include <sys/types.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "build/build_config.h"
+
+namespace partition_alloc {
+
+namespace {
+ThreadCacheRegistry g_instance;
+}  // namespace
+
+namespace tools {
+uintptr_t kThreadCacheNeedleArray[kThreadCacheNeedleArraySize] = {
+    kNeedle1, reinterpret_cast<uintptr_t>(&g_instance),
+#if BUILDFLAG(RECORD_ALLOC_INFO)
+    reinterpret_cast<uintptr_t>(&internal::g_allocs),
+#else
+    0,
+#endif
+    kNeedle2};
+}  // namespace tools
+
+namespace internal {
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionTlsKey g_thread_cache_key;
+#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+thread_local ThreadCache* g_thread_cache;
+#endif
+
+}  // namespace internal
+
+namespace {
+// Since |g_thread_cache_key| is shared, make sure that no more than one
+// PartitionRoot can use it.
+static std::atomic<PartitionRoot*> g_thread_cache_root;
+
+#if BUILDFLAG(IS_WIN)
+void OnDllProcessDetach() {
+  // Very late allocations do occur (see crbug.com/1159411#c7 for instance),
+  // including during CRT teardown. This is problematic for the thread cache
+  // which relies on the CRT for TLS access for instance. This cannot be
+  // mitigated inside the thread cache (since getting to it requires querying
+  // TLS), but the PartitionRoot associated wih the thread cache can be made to
+  // not use the thread cache anymore.
+  g_thread_cache_root.load(std::memory_order_relaxed)
+      ->settings.with_thread_cache = false;
+}
+#endif
+
+static bool g_thread_cache_key_created = false;
+}  // namespace
+
+uint8_t ThreadCache::global_limits_[ThreadCache::kBucketCount];
+
+// Start with the normal size, not the maximum one.
+uint16_t ThreadCache::largest_active_bucket_index_ =
+    internal::BucketIndexLookup::GetIndex(ThreadCache::kDefaultSizeThreshold);
+
+// static
+ThreadCacheRegistry& ThreadCacheRegistry::Instance() {
+  return g_instance;
+}
+
+void ThreadCacheRegistry::RegisterThreadCache(ThreadCache* cache) {
+  internal::ScopedGuard scoped_locker(GetLock());
+  cache->next_ = nullptr;
+  cache->prev_ = nullptr;
+
+  ThreadCache* previous_head = list_head_;
+  list_head_ = cache;
+  cache->next_ = previous_head;
+  if (previous_head) {
+    previous_head->prev_ = cache;
+  }
+}
+
+void ThreadCacheRegistry::UnregisterThreadCache(ThreadCache* cache) {
+  internal::ScopedGuard scoped_locker(GetLock());
+  if (cache->prev_) {
+    cache->prev_->next_ = cache->next_;
+  }
+  if (cache->next_) {
+    cache->next_->prev_ = cache->prev_;
+  }
+  if (cache == list_head_) {
+    list_head_ = cache->next_;
+  }
+}
+
+void ThreadCacheRegistry::DumpStats(bool my_thread_only,
+                                    ThreadCacheStats* stats) {
+  ThreadCache::EnsureThreadSpecificDataInitialized();
+  memset(reinterpret_cast<void*>(stats), 0, sizeof(ThreadCacheStats));
+
+  internal::ScopedGuard scoped_locker(GetLock());
+  if (my_thread_only) {
+    auto* tcache = ThreadCache::Get();
+    if (!ThreadCache::IsValid(tcache)) {
+      return;
+    }
+    tcache->AccumulateStats(stats);
+  } else {
+    ThreadCache* tcache = list_head_;
+    while (tcache) {
+      // Racy, as other threads are still allocating. This is not an issue,
+      // since we are only interested in statistics. However, this means that
+      // count is not necessarily equal to hits + misses for the various types
+      // of events.
+      tcache->AccumulateStats(stats);
+      tcache = tcache->next_;
+    }
+  }
+}
+
+void ThreadCacheRegistry::PurgeAll() {
+  auto* current_thread_tcache = ThreadCache::Get();
+
+  // May take a while, don't hold the lock while purging.
+  //
+  // In most cases, the current thread is more important than other ones. For
+  // instance in renderers, it is the main thread. It is also the only thread
+  // that we can synchronously purge.
+  //
+  // The reason why we trigger the purge for this one first is that assuming
+  // that all threads are allocating memory, they will start purging
+  // concurrently in the loop below. This will then make them all contend with
+  // the main thread for the partition lock, since it is acquired/released once
+  // per bucket. By purging the main thread first, we avoid these interferences
+  // for this thread at least.
+  if (ThreadCache::IsValid(current_thread_tcache)) {
+    current_thread_tcache->Purge();
+  }
+
+  {
+    internal::ScopedGuard scoped_locker(GetLock());
+    ThreadCache* tcache = list_head_;
+    while (tcache) {
+      PA_DCHECK(ThreadCache::IsValid(tcache));
+      // Cannot purge directly, need to ask the other thread to purge "at some
+      // point".
+      // Note that this will not work if the other thread is sleeping forever.
+      // TODO(lizeb): Handle sleeping threads.
+      if (tcache != current_thread_tcache) {
+        tcache->SetShouldPurge();
+      }
+      tcache = tcache->next_;
+    }
+  }
+}
+
+void ThreadCacheRegistry::ForcePurgeAllThreadAfterForkUnsafe() {
+  internal::ScopedGuard scoped_locker(GetLock());
+  ThreadCache* tcache = list_head_;
+  while (tcache) {
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+    // Before fork(), locks are acquired in the parent process. This means that
+    // a concurrent allocation in the parent which must be filled by the central
+    // allocator (i.e. the thread cache bucket is empty) will block inside the
+    // thread cache waiting for the lock to be released.
+    //
+    // In the child process, this allocation will never complete since this
+    // thread will not be resumed. However, calling |Purge()| triggers the
+    // reentrancy guard since the parent process thread was suspended from
+    // within the thread cache.
+    // Clear the guard to prevent this from crashing.
+    tcache->is_in_thread_cache_ = false;
+#endif
+    // There is a PA_DCHECK() in code called from |Purge()| checking that thread
+    // cache memory accounting is correct. Since we are after fork() and the
+    // other threads got interrupted mid-flight, this guarantee does not hold,
+    // and we get inconsistent results.  Rather than giving up on checking this
+    // invariant in regular code, reset it here so that the PA_DCHECK()
+    // passes. See crbug.com/1216964.
+    tcache->cached_memory_ = tcache->CachedMemory();
+
+    // At this point, we should call |TryPurge|. However, due to the thread
+    // cache being possibly inconsistent at this point, this may crash. Rather
+    // than crash, we'd prefer to simply not purge, even though this may leak
+    // memory in some cases.
+    //
+    // see crbug.com/1289092 for details of the crashes.
+
+    tcache = tcache->next_;
+  }
+}
+
+void ThreadCacheRegistry::SetLargestActiveBucketIndex(
+    uint8_t largest_active_bucket_index) {
+  largest_active_bucket_index_ = largest_active_bucket_index;
+}
+
+void ThreadCacheRegistry::SetThreadCacheMultiplier(float multiplier) {
+  // Two steps:
+  // - Set the global limits, which will affect newly created threads.
+  // - Enumerate all thread caches and set the limit to the global one.
+  {
+    internal::ScopedGuard scoped_locker(GetLock());
+    ThreadCache* tcache = list_head_;
+
+    // If this is called before *any* thread cache has serviced *any*
+    // allocation, which can happen in tests, and in theory in non-test code as
+    // well.
+    if (!tcache) {
+      return;
+    }
+
+    // Setting the global limit while locked, because we need |tcache->root_|.
+    ThreadCache::SetGlobalLimits(tcache->root_, multiplier);
+
+    while (tcache) {
+      PA_DCHECK(ThreadCache::IsValid(tcache));
+      for (int index = 0; index < ThreadCache::kBucketCount; index++) {
+        // This is racy, but we don't care if the limit is enforced later, and
+        // we really want to avoid atomic instructions on the fast path.
+        tcache->buckets_[index].limit.store(ThreadCache::global_limits_[index],
+                                            std::memory_order_relaxed);
+      }
+
+      tcache = tcache->next_;
+    }
+  }
+}
+
+void ThreadCacheRegistry::SetPurgingConfiguration(
+    const internal::base::TimeDelta min_purge_interval,
+    const internal::base::TimeDelta max_purge_interval,
+    const internal::base::TimeDelta default_purge_interval,
+    size_t min_cached_memory_for_purging_bytes) {
+  PA_CHECK(min_purge_interval <= default_purge_interval);
+  PA_CHECK(default_purge_interval <= max_purge_interval);
+  min_purge_interval_ = min_purge_interval;
+  max_purge_interval_ = max_purge_interval;
+  default_purge_interval_ = default_purge_interval;
+  min_cached_memory_for_purging_bytes_ = min_cached_memory_for_purging_bytes;
+  is_purging_configured_ = true;
+}
+
+void ThreadCacheRegistry::RunPeriodicPurge() {
+  if (!periodic_purge_is_initialized_) {
+    ThreadCache::EnsureThreadSpecificDataInitialized();
+    periodic_purge_is_initialized_ = true;
+  }
+
+  PA_CHECK(is_purging_configured_);
+
+  // Summing across all threads can be slow, but is necessary. Otherwise we rely
+  // on the assumption that the current thread is a good proxy for overall
+  // allocation activity. This is not the case for all process types.
+  //
+  // Since there is no synchronization with other threads, the value is stale,
+  // which is fine.
+  size_t cached_memory_approx = 0;
+  {
+    internal::ScopedGuard scoped_locker(GetLock());
+    ThreadCache* tcache = list_head_;
+    // Can run when there is no thread cache, in which case there is nothing to
+    // do, and the task should not be rescheduled. This would typically indicate
+    // a case where the thread cache was never enabled, or got disabled.
+    if (!tcache) {
+      return;
+    }
+
+    while (tcache) {
+      cached_memory_approx += tcache->cached_memory_;
+      tcache = tcache->next_;
+    }
+  }
+
+  // If cached memory is low, this means that either memory footprint is fine,
+  // or the process is mostly idle, and not allocating much since the last
+  // purge. In this case, back off. On the other hand, if there is a lot of
+  // cached memory, make purge more frequent, but always within a set frequency
+  // range.
+  //
+  // There is a potential drawback: a process that was idle for a long time and
+  // suddenly becomes very active will take some time to go back to regularly
+  // scheduled purge with a small enough interval. This is the case for instance
+  // of a renderer moving to foreground. To mitigate that, if cached memory
+  // jumps is very large, make a greater leap to faster purging.
+  if (cached_memory_approx > 10 * min_cached_memory_for_purging_bytes_) {
+    periodic_purge_next_interval_ =
+        std::min(default_purge_interval_, periodic_purge_next_interval_ / 2);
+  } else if (cached_memory_approx > 2 * min_cached_memory_for_purging_bytes_) {
+    periodic_purge_next_interval_ =
+        std::max(min_purge_interval_, periodic_purge_next_interval_ / 2);
+  } else if (cached_memory_approx < min_cached_memory_for_purging_bytes_) {
+    periodic_purge_next_interval_ =
+        std::min(max_purge_interval_, periodic_purge_next_interval_ * 2);
+  }
+
+  // Make sure that the next interval is in the right bounds. Even though the
+  // logic above should eventually converge to a reasonable interval, if a
+  // sleeping background thread holds onto a large amount of cached memory, then
+  // |PurgeAll()| will not free any memory from it, and the first branch above
+  // can be taken repeatedly until the interval gets very small, as the amount
+  // of cached memory cannot change between calls (since we do not purge
+  // background threads, but only ask them to purge their own cache at the next
+  // allocation).
+  periodic_purge_next_interval_ = std::clamp(
+      periodic_purge_next_interval_, min_purge_interval_, max_purge_interval_);
+
+  PurgeAll();
+}
+
+int64_t ThreadCacheRegistry::GetPeriodicPurgeNextIntervalInMicroseconds()
+    const {
+  return periodic_purge_next_interval_.InMicroseconds();
+}
+
+void ThreadCacheRegistry::ResetForTesting() {
+  periodic_purge_next_interval_ = default_purge_interval_;
+}
+
+// static
+void ThreadCache::EnsureThreadSpecificDataInitialized() {
+  // Using the registry lock to protect from concurrent initialization without
+  // adding a special-pupose lock.
+  internal::ScopedGuard scoped_locker(
+      ThreadCacheRegistry::Instance().GetLock());
+  if (g_thread_cache_key_created) {
+    return;
+  }
+
+  bool ok = internal::PartitionTlsCreate(&internal::g_thread_cache_key, Delete);
+  PA_CHECK(ok);
+  g_thread_cache_key_created = true;
+}
+
+// static
+void ThreadCache::DeleteForTesting(ThreadCache* tcache) {
+  ThreadCache::Delete(tcache);
+}
+
+// static
+void ThreadCache::SwapForTesting(PartitionRoot* root) {
+  auto* old_tcache = ThreadCache::Get();
+  g_thread_cache_root.store(nullptr, std::memory_order_relaxed);
+  if (old_tcache) {
+    ThreadCache::DeleteForTesting(old_tcache);
+  }
+  if (root) {
+    Init(root);
+    Create(root);
+  } else {
+#if BUILDFLAG(IS_WIN)
+    // OnDllProcessDetach accesses g_thread_cache_root which is nullptr now.
+    internal::PartitionTlsSetOnDllProcessDetach(nullptr);
+#endif
+  }
+}
+
+// static
+void ThreadCache::RemoveTombstoneForTesting() {
+  PA_CHECK(IsTombstone(Get()));
+  internal::PartitionTlsSet(internal::g_thread_cache_key, nullptr);
+}
+
+// static
+void ThreadCache::Init(PartitionRoot* root) {
+#if BUILDFLAG(IS_NACL)
+  static_assert(false, "PartitionAlloc isn't supported for NaCl");
+#endif
+  PA_CHECK(root->buckets[kBucketCount - 1].slot_size ==
+           ThreadCache::kLargeSizeThreshold);
+  PA_CHECK(root->buckets[largest_active_bucket_index_].slot_size ==
+           ThreadCache::kDefaultSizeThreshold);
+
+  EnsureThreadSpecificDataInitialized();
+
+  // Make sure that only one PartitionRoot wants a thread cache.
+  PartitionRoot* expected = nullptr;
+  if (!g_thread_cache_root.compare_exchange_strong(expected, root,
+                                                   std::memory_order_seq_cst,
+                                                   std::memory_order_seq_cst)) {
+    PA_CHECK(false)
+        << "Only one PartitionRoot is allowed to have a thread cache";
+  }
+
+#if BUILDFLAG(IS_WIN)
+  internal::PartitionTlsSetOnDllProcessDetach(OnDllProcessDetach);
+#endif
+
+  SetGlobalLimits(root, kDefaultMultiplier);
+}
+
+// static
+void ThreadCache::SetGlobalLimits(PartitionRoot* root, float multiplier) {
+  size_t initial_value =
+      static_cast<size_t>(kSmallBucketBaseCount) * multiplier;
+
+  for (int index = 0; index < kBucketCount; index++) {
+    const auto& root_bucket = root->buckets[index];
+    // Invalid bucket.
+    if (!root_bucket.active_slot_spans_head) {
+      global_limits_[index] = 0;
+      continue;
+    }
+
+    // Smaller allocations are more frequent, and more performance-sensitive.
+    // Cache more small objects, and fewer larger ones, to save memory.
+    size_t slot_size = root_bucket.slot_size;
+    size_t value;
+    if (slot_size <= 128) {
+      value = initial_value;
+    } else if (slot_size <= 256) {
+      value = initial_value / 2;
+    } else if (slot_size <= 512) {
+      value = initial_value / 4;
+    } else {
+      value = initial_value / 8;
+    }
+
+    // Bare minimum so that malloc() / free() in a loop will not hit the central
+    // allocator each time.
+    constexpr size_t kMinLimit = 1;
+    // |PutInBucket()| is called on a full bucket, which should not overflow.
+    constexpr size_t kMaxLimit = std::numeric_limits<uint8_t>::max() - 1;
+    global_limits_[index] =
+        static_cast<uint8_t>(std::clamp(value, kMinLimit, kMaxLimit));
+    PA_DCHECK(global_limits_[index] >= kMinLimit);
+    PA_DCHECK(global_limits_[index] <= kMaxLimit);
+  }
+}
+
+// static
+void ThreadCache::SetLargestCachedSize(size_t size) {
+  if (size > ThreadCache::kLargeSizeThreshold) {
+    size = ThreadCache::kLargeSizeThreshold;
+  }
+  largest_active_bucket_index_ = PartitionRoot::SizeToBucketIndex(
+      size, PartitionRoot::BucketDistribution::kNeutral);
+  PA_CHECK(largest_active_bucket_index_ < kBucketCount);
+  ThreadCacheRegistry::Instance().SetLargestActiveBucketIndex(
+      largest_active_bucket_index_);
+}
+
+// static
+ThreadCache* ThreadCache::Create(PartitionRoot* root) {
+  PA_CHECK(root);
+  // See comment in thread_cache.h, this is used to make sure
+  // kThreadCacheNeedleArray is kept in the final binary.
+  PA_CHECK(tools::kThreadCacheNeedleArray[0] == tools::kNeedle1);
+
+  // Placement new and RawAlloc() are used, as otherwise when this partition is
+  // the malloc() implementation, the memory allocated for the new thread cache
+  // would make this code reentrant.
+  //
+  // This also means that deallocation must use RawFreeStatic(), hence the
+  // operator delete() implementation below.
+  size_t raw_size = root->AdjustSizeForExtrasAdd(sizeof(ThreadCache));
+  size_t usable_size;
+  bool already_zeroed;
+
+  auto* bucket = root->buckets + PartitionRoot::SizeToBucketIndex(
+                                     raw_size, root->GetBucketDistribution());
+  uintptr_t buffer = root->RawAlloc<AllocFlags::kZeroFill>(
+      bucket, raw_size, internal::PartitionPageSize(), &usable_size,
+      &already_zeroed);
+  ThreadCache* tcache =
+      new (internal::SlotStartAddr2Ptr(buffer)) ThreadCache(root);
+
+  // This may allocate.
+  internal::PartitionTlsSet(internal::g_thread_cache_key, tcache);
+#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
+  // |thread_local| variables with destructors cause issues on some platforms.
+  // Since we need a destructor (to empty the thread cache), we cannot use it
+  // directly. However, TLS accesses with |thread_local| are typically faster,
+  // as it can turn into a fixed offset load from a register (GS/FS on Linux
+  // x86, for instance). On Windows, saving/restoring the last error increases
+  // cost as well.
+  //
+  // To still get good performance, use |thread_local| to store a raw pointer,
+  // and rely on the platform TLS to call the destructor.
+  internal::g_thread_cache = tcache;
+#endif  // PA_CONFIG(THREAD_CACHE_FAST_TLS)
+
+  return tcache;
+}
+
+ThreadCache::ThreadCache(PartitionRoot* root)
+    : should_purge_(false),
+      root_(root),
+      thread_id_(internal::base::PlatformThread::CurrentId()),
+      next_(nullptr),
+      prev_(nullptr) {
+  ThreadCacheRegistry::Instance().RegisterThreadCache(this);
+
+  memset(&stats_, 0, sizeof(stats_));
+
+  for (int index = 0; index < kBucketCount; index++) {
+    const auto& root_bucket = root->buckets[index];
+    Bucket* tcache_bucket = &buckets_[index];
+    tcache_bucket->freelist_head = nullptr;
+    tcache_bucket->count = 0;
+    tcache_bucket->limit.store(global_limits_[index],
+                               std::memory_order_relaxed);
+
+    tcache_bucket->slot_size = root_bucket.slot_size;
+    // Invalid bucket.
+    if (!root_bucket.is_valid()) {
+      // Explicitly set this, as size computations iterate over all buckets.
+      tcache_bucket->limit.store(0, std::memory_order_relaxed);
+    }
+  }
+}
+
+ThreadCache::~ThreadCache() {
+  ThreadCacheRegistry::Instance().UnregisterThreadCache(this);
+  Purge();
+}
+
+// static
+void ThreadCache::Delete(void* tcache_ptr) {
+  auto* tcache = static_cast<ThreadCache*>(tcache_ptr);
+
+  if (!IsValid(tcache)) {
+    return;
+  }
+
+#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
+  internal::g_thread_cache = nullptr;
+#else
+  internal::PartitionTlsSet(internal::g_thread_cache_key, nullptr);
+#endif
+
+  auto* root = tcache->root_;
+  tcache->~ThreadCache();
+  // TreadCache was allocated using RawAlloc() and SlotStartAddr2Ptr(), so it
+  // shifted by extras, but is MTE-tagged.
+  root->RawFree(internal::SlotStartPtr2Addr(tcache_ptr));
+
+#if BUILDFLAG(IS_WIN)
+  // On Windows, allocations do occur during thread/process teardown, make sure
+  // they don't resurrect the thread cache.
+  //
+  // Don't MTE-tag, as it'd mess with the sentinel value.
+  //
+  // TODO(lizeb): Investigate whether this is needed on POSIX as well.
+  internal::PartitionTlsSet(internal::g_thread_cache_key,
+                            reinterpret_cast<void*>(kTombstone));
+#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
+  internal::g_thread_cache = reinterpret_cast<ThreadCache*>(kTombstone);
+#endif
+
+#endif  // BUILDFLAG(IS_WIN)
+}
+
+ThreadCache::Bucket::Bucket() {
+  limit.store(0, std::memory_order_relaxed);
+}
+
+void ThreadCache::FillBucket(size_t bucket_index) {
+  // Filling multiple elements from the central allocator at a time has several
+  // advantages:
+  // - Amortize lock acquisition
+  // - Increase hit rate
+  // - Can improve locality, as consecutive allocations from the central
+  //   allocator will likely return close addresses, especially early on.
+  //
+  // However, do not take too many items, to prevent memory bloat.
+  //
+  // Cache filling / purging policy:
+  // We aim at keeping the buckets neither empty nor full, while minimizing
+  // requests to the central allocator.
+  //
+  // For each bucket, there is a |limit| of how many cached objects there are in
+  // the bucket, so |count| < |limit| at all times.
+  // - Clearing: limit -> limit / 2
+  // - Filling: 0 -> limit / kBatchFillRatio
+  //
+  // These thresholds are somewhat arbitrary, with these considerations:
+  // (1) Batched filling should not completely fill the bucket
+  // (2) Batched clearing should not completely clear the bucket
+  // (3) Batched filling should not be too eager
+  //
+  // If (1) and (2) do not hold, we risk oscillations of bucket filling /
+  // clearing which would greatly increase calls to the central allocator. (3)
+  // tries to keep memory usage low. So clearing half of the bucket, and filling
+  // a quarter of it are sensible defaults.
+  PA_INCREMENT_COUNTER(stats_.batch_fill_count);
+
+  Bucket& bucket = buckets_[bucket_index];
+  // Some buckets may have a limit lower than |kBatchFillRatio|, but we still
+  // want to at least allocate a single slot, otherwise we wrongly return
+  // nullptr, which ends up deactivating the bucket.
+  //
+  // In these cases, we do not really batch bucket filling, but this is expected
+  // to be used for the largest buckets, where over-allocating is not advised.
+  int count = std::max(
+      1, bucket.limit.load(std::memory_order_relaxed) / kBatchFillRatio);
+
+  size_t usable_size;
+  bool is_already_zeroed;
+
+  PA_DCHECK(!root_->buckets[bucket_index].CanStoreRawSize());
+  PA_DCHECK(!root_->buckets[bucket_index].is_direct_mapped());
+
+  size_t allocated_slots = 0;
+  // Same as calling RawAlloc() |count| times, but acquires the lock only once.
+  internal::ScopedGuard guard(internal::PartitionRootLock(root_));
+  for (int i = 0; i < count; i++) {
+    // Thread cache fill should not trigger expensive operations, to not grab
+    // the lock for a long time needlessly, but also to not inflate memory
+    // usage. Indeed, without AllocFlags::kFastPathOrReturnNull, cache
+    // fill may activate a new PartitionPage, or even a new SuperPage, which is
+    // clearly not desirable.
+    //
+    // |raw_size| is set to the slot size, as we don't know it. However, it is
+    // only used for direct-mapped allocations and single-slot ones anyway,
+    // which are not handled here.
+    uintptr_t slot_start =
+        root_->AllocFromBucket<AllocFlags::kFastPathOrReturnNull |
+                               AllocFlags::kReturnNull>(
+            &root_->buckets[bucket_index],
+            root_->buckets[bucket_index].slot_size /* raw_size */,
+            internal::PartitionPageSize(), &usable_size, &is_already_zeroed);
+
+    // Either the previous allocation would require a slow path allocation, or
+    // the central allocator is out of memory. If the bucket was filled with
+    // some objects, then the allocation will be handled normally. Otherwise,
+    // this goes to the central allocator, which will service the allocation,
+    // return nullptr or crash.
+    if (!slot_start) {
+      break;
+    }
+
+    allocated_slots++;
+    PutInBucket(bucket, slot_start);
+  }
+
+  cached_memory_ += allocated_slots * bucket.slot_size;
+}
+
+void ThreadCache::ClearBucket(Bucket& bucket, size_t limit) {
+  ClearBucketHelper<true>(bucket, limit);
+}
+
+template <bool crash_on_corruption>
+void ThreadCache::ClearBucketHelper(Bucket& bucket, size_t limit) {
+  // Avoids acquiring the lock needlessly.
+  if (!bucket.count || bucket.count <= limit) {
+    return;
+  }
+
+  // This serves two purposes: error checking and avoiding stalls when grabbing
+  // the lock:
+  // 1. Error checking: this is pretty clear. Since this path is taken
+  //    infrequently, and is going to walk the entire freelist anyway, its
+  //    incremental cost should be very small. Indeed, we free from the tail of
+  //    the list, so all calls here will end up walking the entire freelist, and
+  //    incurring the same amount of cache misses.
+  // 2. Avoiding stalls: If one of the freelist accesses in |FreeAfter()|
+  //    triggers a major page fault, and we are running on a low-priority
+  //    thread, we don't want the thread to be blocked while holding the lock,
+  //    causing a priority inversion.
+  if constexpr (crash_on_corruption) {
+    bucket.freelist_head->CheckFreeListForThreadCache(bucket.slot_size);
+  }
+
+  uint8_t count_before = bucket.count;
+  if (limit == 0) {
+    FreeAfter<crash_on_corruption>(bucket.freelist_head, bucket.slot_size);
+    bucket.freelist_head = nullptr;
+  } else {
+    // Free the *end* of the list, not the head, since the head contains the
+    // most recently touched memory.
+    auto* head = bucket.freelist_head;
+    size_t items = 1;  // Cannot free the freelist head.
+    while (items < limit) {
+      head = head->GetNextForThreadCache<crash_on_corruption>(bucket.slot_size);
+      items++;
+    }
+    FreeAfter<crash_on_corruption>(
+        head->GetNextForThreadCache<crash_on_corruption>(bucket.slot_size),
+        bucket.slot_size);
+    head->SetNext(nullptr);
+  }
+  bucket.count = limit;
+  uint8_t count_after = bucket.count;
+  size_t freed_memory = (count_before - count_after) * bucket.slot_size;
+  PA_DCHECK(cached_memory_ >= freed_memory);
+  cached_memory_ -= freed_memory;
+
+  PA_DCHECK(cached_memory_ == CachedMemory());
+}
+
+template <bool crash_on_corruption>
+void ThreadCache::FreeAfter(internal::EncodedNextFreelistEntry* head,
+                            size_t slot_size) {
+  // Acquire the lock once. Deallocation from the same bucket are likely to be
+  // hitting the same cache lines in the central allocator, and lock
+  // acquisitions can be expensive.
+  internal::ScopedGuard guard(internal::PartitionRootLock(root_));
+  while (head) {
+    uintptr_t slot_start = internal::SlotStartPtr2Addr(head);
+    head = head->GetNextForThreadCache<crash_on_corruption>(slot_size);
+    root_->RawFreeLocked(slot_start);
+  }
+}
+
+void ThreadCache::ResetForTesting() {
+  stats_.alloc_count = 0;
+  stats_.alloc_hits = 0;
+  stats_.alloc_misses = 0;
+
+  stats_.alloc_miss_empty = 0;
+  stats_.alloc_miss_too_large = 0;
+
+  stats_.cache_fill_count = 0;
+  stats_.cache_fill_hits = 0;
+  stats_.cache_fill_misses = 0;
+
+  stats_.batch_fill_count = 0;
+
+  stats_.bucket_total_memory = 0;
+  stats_.metadata_overhead = 0;
+
+  Purge();
+  PA_CHECK(cached_memory_ == 0u);
+  should_purge_.store(false, std::memory_order_relaxed);
+}
+
+size_t ThreadCache::CachedMemory() const {
+  size_t total = 0;
+  for (const Bucket& bucket : buckets_) {
+    total += bucket.count * static_cast<size_t>(bucket.slot_size);
+  }
+
+  return total;
+}
+
+void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
+  stats->alloc_count += stats_.alloc_count;
+  stats->alloc_hits += stats_.alloc_hits;
+  stats->alloc_misses += stats_.alloc_misses;
+
+  stats->alloc_miss_empty += stats_.alloc_miss_empty;
+  stats->alloc_miss_too_large += stats_.alloc_miss_too_large;
+
+  stats->cache_fill_count += stats_.cache_fill_count;
+  stats->cache_fill_hits += stats_.cache_fill_hits;
+  stats->cache_fill_misses += stats_.cache_fill_misses;
+
+  stats->batch_fill_count += stats_.batch_fill_count;
+
+#if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
+  for (size_t i = 0; i < internal::kNumBuckets + 1; i++) {
+    stats->allocs_per_bucket_[i] += stats_.allocs_per_bucket_[i];
+  }
+#endif  // PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
+
+  // cached_memory_ is not necessarily equal to |CachedMemory()| here, since
+  // this function can be called racily from another thread, to collect
+  // statistics. Hence no DCHECK_EQ(CachedMemory(), cached_memory_).
+  stats->bucket_total_memory += cached_memory_;
+
+  stats->metadata_overhead += sizeof(*this);
+}
+
+void ThreadCache::SetShouldPurge() {
+  should_purge_.store(true, std::memory_order_relaxed);
+}
+
+void ThreadCache::Purge() {
+  PA_REENTRANCY_GUARD(is_in_thread_cache_);
+  PurgeInternal();
+}
+
+void ThreadCache::TryPurge() {
+  PA_REENTRANCY_GUARD(is_in_thread_cache_);
+  PurgeInternalHelper<false>();
+}
+
+// static
+void ThreadCache::PurgeCurrentThread() {
+  auto* tcache = Get();
+  if (IsValid(tcache)) {
+    tcache->Purge();
+  }
+}
+
+void ThreadCache::PurgeInternal() {
+  PurgeInternalHelper<true>();
+}
+
+void ThreadCache::ResetPerThreadAllocationStatsForTesting() {
+  thread_alloc_stats_ = {};
+}
+
+template <bool crash_on_corruption>
+void ThreadCache::PurgeInternalHelper() {
+  should_purge_.store(false, std::memory_order_relaxed);
+  // TODO(lizeb): Investigate whether lock acquisition should be less
+  // frequent.
+  //
+  // Note: iterate over all buckets, even the inactive ones. Since
+  // |largest_active_bucket_index_| can be lowered at runtime, there may be
+  // memory already cached in the inactive buckets. They should still be
+  // purged.
+  for (auto& bucket : buckets_) {
+    ClearBucketHelper<crash_on_corruption>(bucket, 0);
+  }
+}
+
+}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/src/partition_alloc/thread_cache.h b/base/allocator/partition_allocator/src/partition_alloc/thread_cache.h
new file mode 100644
index 0000000..2bee33d
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/thread_cache.h
@@ -0,0 +1,671 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_CACHE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_CACHE_H_
+
+#include <atomic>
+#include <cstdint>
+#include <limits>
+#include <memory>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/gtest_prod_util.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/time/time.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_bucket_lookup.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_freelist_entry.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_stats.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_tls.h"
+#include "build/build_config.h"
+
+#if defined(ARCH_CPU_X86_64) && BUILDFLAG(HAS_64_BIT_POINTERS)
+#include <algorithm>
+#endif
+
+namespace partition_alloc {
+
+class ThreadCache;
+
+namespace tools {
+
+// This is used from ThreadCacheInspector, which runs in a different process. It
+// scans the process memory looking for the two needles, to locate the thread
+// cache registry instance.
+//
+// These two values were chosen randomly, and in particular neither is a valid
+// pointer on most 64 bit architectures.
+#if BUILDFLAG(HAS_64_BIT_POINTERS)
+constexpr uintptr_t kNeedle1 = 0xe69e32f3ad9ea63;
+constexpr uintptr_t kNeedle2 = 0x9615ee1c5eb14caf;
+#else
+constexpr uintptr_t kNeedle1 = 0xe69e32f3;
+constexpr uintptr_t kNeedle2 = 0x9615ee1c;
+#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+// This array contains, in order:
+// - kNeedle1
+// - &ThreadCacheRegistry::Instance()
+// - kNeedle2
+//
+// It is refererenced in the thread cache constructor to make sure it is not
+// removed by the compiler. It is also not const to make sure it ends up in
+// .data.
+constexpr size_t kThreadCacheNeedleArraySize = 4;
+extern uintptr_t kThreadCacheNeedleArray[kThreadCacheNeedleArraySize];
+
+class HeapDumper;
+class ThreadCacheInspector;
+
+}  // namespace tools
+
+namespace internal {
+
+extern PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionTlsKey g_thread_cache_key;
+
+#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
+extern PA_COMPONENT_EXPORT(
+    PARTITION_ALLOC) thread_local ThreadCache* g_thread_cache;
+#endif
+
+}  // namespace internal
+
+struct ThreadCacheLimits {
+  // When trying to conserve memory, set the thread cache limit to this.
+  static constexpr size_t kDefaultSizeThreshold = 512;
+  // 32kiB is chosen here as from local experiments, "zone" allocation in
+  // V8 is performance-sensitive, and zones can (and do) grow up to 32kiB for
+  // each individual allocation.
+  static constexpr size_t kLargeSizeThreshold = 1 << 15;
+  static_assert(kLargeSizeThreshold <= std::numeric_limits<uint16_t>::max(),
+                "");
+};
+
+constexpr internal::base::TimeDelta kMinPurgeInterval =
+    internal::base::Seconds(1);
+constexpr internal::base::TimeDelta kMaxPurgeInterval =
+    internal::base::Minutes(1);
+constexpr internal::base::TimeDelta kDefaultPurgeInterval =
+    2 * kMinPurgeInterval;
+constexpr size_t kMinCachedMemoryForPurgingBytes = 500 * 1024;
+
+// Global registry of all ThreadCache instances.
+//
+// This class cannot allocate in the (Un)registerThreadCache() functions, as
+// they are called from ThreadCache constructor, which is from within the
+// allocator. However the other members can allocate.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCacheRegistry {
+ public:
+  static ThreadCacheRegistry& Instance();
+  // Do not instantiate.
+  //
+  // Several things are surprising here:
+  // - The constructor is public even though this is intended to be a singleton:
+  //   we cannot use a "static local" variable in |Instance()| as this is
+  //   reached too early during CRT initialization on Windows, meaning that
+  //   static local variables don't work (as they call into the uninitialized
+  //   runtime). To sidestep that, we use a regular global variable in the .cc,
+  //   which is fine as this object's constructor is constexpr.
+  // - Marked inline so that the chromium style plugin doesn't complain that a
+  //   "complex constructor" has an inline body. This warning is disabled when
+  //   the constructor is explicitly marked "inline". Note that this is a false
+  //   positive of the plugin, since constexpr implies inline.
+  inline constexpr ThreadCacheRegistry();
+
+  void RegisterThreadCache(ThreadCache* cache);
+  void UnregisterThreadCache(ThreadCache* cache);
+  // Prints statistics for all thread caches, or this thread's only.
+  void DumpStats(bool my_thread_only, ThreadCacheStats* stats);
+  // Purge() this thread's cache, and asks the other ones to trigger Purge() at
+  // a later point (during a deallocation).
+  void PurgeAll();
+
+  // Runs `PurgeAll` and updates the next interval which
+  // `GetPeriodicPurgeNextIntervalInMicroseconds` returns.
+  //
+  // Note that it's a caller's responsibility to invoke this member function
+  // periodically with an appropriate interval. This function does not schedule
+  // any task nor timer.
+  void RunPeriodicPurge();
+  // Returns the appropriate interval to invoke `RunPeriodicPurge` next time.
+  int64_t GetPeriodicPurgeNextIntervalInMicroseconds() const;
+
+  // Controls the thread cache size, by setting the multiplier to a value above
+  // or below |ThreadCache::kDefaultMultiplier|.
+  void SetThreadCacheMultiplier(float multiplier);
+  void SetLargestActiveBucketIndex(uint8_t largest_active_bucket_index);
+
+  // Controls the thread cache purging configuration.
+  void SetPurgingConfiguration(
+      const internal::base::TimeDelta min_purge_interval,
+      const internal::base::TimeDelta max_purge_interval,
+      const internal::base::TimeDelta default_purge_interval,
+      size_t min_cached_memory_for_purging_bytes);
+  internal::base::TimeDelta min_purge_interval() const {
+    return min_purge_interval_;
+  }
+  internal::base::TimeDelta max_purge_interval() const {
+    return max_purge_interval_;
+  }
+  internal::base::TimeDelta default_purge_interval() const {
+    return default_purge_interval_;
+  }
+  size_t min_cached_memory_for_purging_bytes() const {
+    return min_cached_memory_for_purging_bytes_;
+  }
+  bool is_purging_configured() const { return is_purging_configured_; }
+
+  static internal::Lock& GetLock() { return Instance().lock_; }
+  // Purges all thread caches *now*. This is completely thread-unsafe, and
+  // should only be called in a post-fork() handler.
+  void ForcePurgeAllThreadAfterForkUnsafe();
+
+  void ResetForTesting();
+
+ private:
+  friend class tools::ThreadCacheInspector;
+  friend class tools::HeapDumper;
+
+  // Not using base::Lock as the object's constructor must be constexpr.
+  internal::Lock lock_;
+  ThreadCache* list_head_ PA_GUARDED_BY(GetLock()) = nullptr;
+  bool periodic_purge_is_initialized_ = false;
+  internal::base::TimeDelta min_purge_interval_;
+  internal::base::TimeDelta max_purge_interval_;
+  internal::base::TimeDelta default_purge_interval_;
+  size_t min_cached_memory_for_purging_bytes_ = 0u;
+  internal::base::TimeDelta periodic_purge_next_interval_;
+  bool is_purging_configured_ = false;
+
+  uint8_t largest_active_bucket_index_ = internal::BucketIndexLookup::GetIndex(
+      ThreadCacheLimits::kDefaultSizeThreshold);
+};
+
+constexpr ThreadCacheRegistry::ThreadCacheRegistry() = default;
+
+#if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
+#define PA_INCREMENT_COUNTER(counter) ++counter
+#else
+#define PA_INCREMENT_COUNTER(counter) \
+  do {                                \
+  } while (0)
+#endif  // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+
+namespace internal {
+
+class ReentrancyGuard {
+ public:
+  explicit ReentrancyGuard(bool& flag) : flag_(flag) {
+    PA_CHECK(!flag_);
+    flag_ = true;
+  }
+
+  ~ReentrancyGuard() { flag_ = false; }
+
+ private:
+  bool& flag_;
+};
+
+}  // namespace internal
+
+#define PA_REENTRANCY_GUARD(x)      \
+  internal::ReentrancyGuard guard { \
+    x                               \
+  }
+
+#else  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+#define PA_REENTRANCY_GUARD(x) \
+  do {                         \
+  } while (0)
+
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+// Per-thread cache. *Not* threadsafe, must only be accessed from a single
+// thread.
+//
+// In practice, this is easily enforced as long as only |instance| is
+// manipulated, as it is a thread_local member. As such, any
+// |ThreadCache::instance->*()| call will necessarily be done from a single
+// thread.
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
+ public:
+  // Initializes the thread cache for |root|. May allocate, so should be called
+  // with the thread cache disabled on the partition side, and without the
+  // partition lock held.
+  //
+  // May only be called by a single PartitionRoot.
+  static void Init(PartitionRoot* root);
+
+  static void DeleteForTesting(ThreadCache* tcache);
+
+  // Deletes existing thread cache and creates a new one for |root|.
+  static void SwapForTesting(PartitionRoot* root);
+
+  // Removes the tombstone marker that would be returned by Get() otherwise.
+  static void RemoveTombstoneForTesting();
+
+  // Can be called several times, must be called before any ThreadCache
+  // interactions.
+  static void EnsureThreadSpecificDataInitialized();
+
+  static ThreadCache* Get() {
+#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
+    return internal::g_thread_cache;
+#else
+    // This region isn't MTE-tagged.
+    return reinterpret_cast<ThreadCache*>(
+        internal::PartitionTlsGet(internal::g_thread_cache_key));
+#endif
+  }
+
+  static bool IsValid(ThreadCache* tcache) {
+    // Do not MTE-untag, as it'd mess up the sentinel value.
+    return reinterpret_cast<uintptr_t>(tcache) & kTombstoneMask;
+  }
+
+  static bool IsTombstone(ThreadCache* tcache) {
+    // Do not MTE-untag, as it'd mess up the sentinel value.
+    return reinterpret_cast<uintptr_t>(tcache) == kTombstone;
+  }
+
+  // Create a new ThreadCache associated with |root|.
+  // Must be called without the partition locked, as this may allocate.
+  static ThreadCache* Create(PartitionRoot* root);
+
+  ~ThreadCache();
+
+  // Force placement new.
+  void* operator new(size_t) = delete;
+  void* operator new(size_t, void* buffer) { return buffer; }
+  void operator delete(void* ptr) = delete;
+  ThreadCache(const ThreadCache&) = delete;
+  ThreadCache(const ThreadCache&&) = delete;
+  ThreadCache& operator=(const ThreadCache&) = delete;
+
+  // Tries to put a slot at |slot_start| into the cache.
+  // The slot comes from the bucket at index |bucket_index| from the partition
+  // this cache is for.
+  //
+  // Returns true if the slot was put in the cache, and false otherwise. This
+  // can happen either because the cache is full or the allocation was too
+  // large.
+  PA_ALWAYS_INLINE bool MaybePutInCache(uintptr_t slot_start,
+                                        size_t bucket_index,
+                                        size_t* slot_size);
+
+  // Tries to allocate a memory slot from the cache.
+  // Returns 0 on failure.
+  //
+  // Has the same behavior as RawAlloc(), that is: no cookie nor ref-count
+  // handling. Sets |slot_size| to the allocated size upon success.
+  PA_ALWAYS_INLINE uintptr_t GetFromCache(size_t bucket_index,
+                                          size_t* slot_size);
+
+  // Asks this cache to trigger |Purge()| at a later point. Can be called from
+  // any thread.
+  void SetShouldPurge();
+  // Empties the cache.
+  // The Partition lock must *not* be held when calling this.
+  // Must be called from the thread this cache is for.
+  void Purge();
+  // |TryPurge| is the same as |Purge|, except that |TryPurge| will
+  // not crash if the thread cache is inconsistent. Normally inconsistency
+  // is a sign of a bug somewhere, so |Purge| should be preferred in most cases.
+  void TryPurge();
+  // Amount of cached memory for this thread's cache, in bytes.
+  size_t CachedMemory() const;
+  void AccumulateStats(ThreadCacheStats* stats) const;
+
+  // Purge the thread cache of the current thread, if one exists.
+  static void PurgeCurrentThread();
+
+  const ThreadAllocStats& thread_alloc_stats() const {
+    return thread_alloc_stats_;
+  }
+  size_t bucket_count_for_testing(size_t index) const {
+    return buckets_[index].count;
+  }
+
+  internal::base::PlatformThreadId thread_id() const { return thread_id_; }
+
+  // Sets the maximum size of allocations that may be cached by the thread
+  // cache. This applies to all threads. However, the maximum size is bounded by
+  // |kLargeSizeThreshold|.
+  static void SetLargestCachedSize(size_t size);
+
+  // Cumulative stats about *all* allocations made on the `root_` partition on
+  // this thread, that is not only the allocations serviced by the thread cache,
+  // but all allocations, including large and direct-mapped ones. This should in
+  // theory be split into a separate PerThread data structure, but the thread
+  // cache is the only per-thread data we have as of now.
+  //
+  // TODO(lizeb): Investigate adding a proper per-thread data structure.
+  PA_ALWAYS_INLINE void RecordAllocation(size_t size);
+  PA_ALWAYS_INLINE void RecordDeallocation(size_t size);
+  void ResetPerThreadAllocationStatsForTesting();
+
+  // Fill 1 / kBatchFillRatio * bucket.limit slots at a time.
+  static constexpr uint16_t kBatchFillRatio = 8;
+
+  // Limit for the smallest bucket will be kDefaultMultiplier *
+  // kSmallBucketBaseCount by default.
+  static constexpr float kDefaultMultiplier = 2.;
+  static constexpr uint8_t kSmallBucketBaseCount = 64;
+
+  static constexpr size_t kDefaultSizeThreshold =
+      ThreadCacheLimits::kDefaultSizeThreshold;
+  static constexpr size_t kLargeSizeThreshold =
+      ThreadCacheLimits::kLargeSizeThreshold;
+
+  const ThreadCache* prev_for_testing() const
+      PA_EXCLUSIVE_LOCKS_REQUIRED(ThreadCacheRegistry::GetLock()) {
+    return prev_;
+  }
+  const ThreadCache* next_for_testing() const
+      PA_EXCLUSIVE_LOCKS_REQUIRED(ThreadCacheRegistry::GetLock()) {
+    return next_;
+  }
+
+ private:
+  friend class tools::HeapDumper;
+  friend class tools::ThreadCacheInspector;
+
+  struct Bucket {
+    internal::EncodedNextFreelistEntry* freelist_head = nullptr;
+    // Want to keep sizeof(Bucket) small, using small types.
+    uint8_t count = 0;
+    std::atomic<uint8_t> limit{};  // Can be changed from another thread.
+    uint16_t slot_size = 0;
+
+    Bucket();
+  };
+  static_assert(sizeof(Bucket) <= 2 * sizeof(void*), "Keep Bucket small.");
+
+  explicit ThreadCache(PartitionRoot* root);
+  static void Delete(void* thread_cache_ptr);
+
+  void PurgeInternal();
+  template <bool crash_on_corruption>
+  void PurgeInternalHelper();
+
+  // Fills a bucket from the central allocator.
+  void FillBucket(size_t bucket_index);
+  // Empties the |bucket| until there are at most |limit| objects in it.
+  template <bool crash_on_corruption>
+  void ClearBucketHelper(Bucket& bucket, size_t limit);
+  void ClearBucket(Bucket& bucket, size_t limit);
+  PA_ALWAYS_INLINE void PutInBucket(Bucket& bucket, uintptr_t slot_start);
+  void ResetForTesting();
+  // Releases the entire freelist starting at |head| to the root.
+  template <bool crash_on_corruption>
+  void FreeAfter(internal::EncodedNextFreelistEntry* head, size_t slot_size);
+  static void SetGlobalLimits(PartitionRoot* root, float multiplier);
+
+  static constexpr uint16_t kBucketCount =
+      internal::BucketIndexLookup::GetIndex(ThreadCache::kLargeSizeThreshold) +
+      1;
+  static_assert(
+      kBucketCount < internal::kNumBuckets,
+      "Cannot have more cached buckets than what the allocator supports");
+
+  // On some architectures, ThreadCache::Get() can be called and return
+  // something after the thread cache has been destroyed. In this case, we set
+  // it to this value, to signal that the thread is being terminated, and the
+  // thread cache should not be used.
+  //
+  // This happens in particular on Windows, during program termination.
+  //
+  // We choose 0x1 as the value as it is an invalid pointer value, since it is
+  // not aligned, and too low. Also, checking !(ptr & kTombstoneMask) checks for
+  // nullptr and kTombstone at the same time.
+  static constexpr uintptr_t kTombstone = 0x1;
+  static constexpr uintptr_t kTombstoneMask = ~kTombstone;
+
+  static uint8_t global_limits_[kBucketCount];
+  // Index of the largest active bucket. Not all processes/platforms will use
+  // all buckets, as using larger buckets increases the memory footprint.
+  //
+  // TODO(lizeb): Investigate making this per-thread rather than static, to
+  // improve locality, and open the door to per-thread settings.
+  static uint16_t largest_active_bucket_index_;
+
+  // These are at the beginning as they're accessed for each allocation.
+  uint32_t cached_memory_ = 0;
+  std::atomic<bool> should_purge_;
+  ThreadCacheStats stats_;
+  ThreadAllocStats thread_alloc_stats_;
+
+  // Buckets are quite big, though each is only 2 pointers.
+  Bucket buckets_[kBucketCount];
+
+  // Cold data below.
+  PartitionRoot* const root_;
+
+  const internal::base::PlatformThreadId thread_id_;
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  bool is_in_thread_cache_ = false;
+#endif
+
+  // Intrusive list since ThreadCacheRegistry::RegisterThreadCache() cannot
+  // allocate.
+  ThreadCache* next_ PA_GUARDED_BY(ThreadCacheRegistry::GetLock());
+  ThreadCache* prev_ PA_GUARDED_BY(ThreadCacheRegistry::GetLock());
+
+  friend class ThreadCacheRegistry;
+  friend class PartitionAllocThreadCacheTest;
+  friend class tools::ThreadCacheInspector;
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, Simple);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
+                              MultipleObjectsCachedPerBucket);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
+                              LargeAllocationsAreNotCached);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
+                              MultipleThreadCaches);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, RecordStats);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
+                              ThreadCacheRegistry);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
+                              MultipleThreadCachesAccounting);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
+                              DynamicCountPerBucket);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
+                              DynamicCountPerBucketClamping);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
+                              DynamicCountPerBucketMultipleThreads);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
+                              DynamicSizeThreshold);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
+                              DynamicSizeThresholdPurge);
+  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, ClearFromTail);
+};
+
+PA_ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start,
+                                                   size_t bucket_index,
+                                                   size_t* slot_size) {
+  PA_REENTRANCY_GUARD(is_in_thread_cache_);
+  PA_INCREMENT_COUNTER(stats_.cache_fill_count);
+
+  if (PA_UNLIKELY(bucket_index > largest_active_bucket_index_)) {
+    PA_INCREMENT_COUNTER(stats_.cache_fill_misses);
+    return false;
+  }
+
+  auto& bucket = buckets_[bucket_index];
+
+  PA_DCHECK(bucket.count != 0 || bucket.freelist_head == nullptr);
+
+  PutInBucket(bucket, slot_start);
+  cached_memory_ += bucket.slot_size;
+  PA_INCREMENT_COUNTER(stats_.cache_fill_hits);
+
+  // Relaxed ordering: we don't care about having an up-to-date or consistent
+  // value, just want it to not change while we are using it, hence using
+  // relaxed ordering, and loading into a local variable. Without it, we are
+  // gambling that the compiler would not issue multiple loads.
+  uint8_t limit = bucket.limit.load(std::memory_order_relaxed);
+  // Batched deallocation, amortizing lock acquisitions.
+  if (PA_UNLIKELY(bucket.count > limit)) {
+    ClearBucket(bucket, limit / 2);
+  }
+
+  if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed))) {
+    PurgeInternal();
+  }
+
+  *slot_size = bucket.slot_size;
+  return true;
+}
+
+PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
+                                                     size_t* slot_size) {
+#if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
+  stats_.allocs_per_bucket_[bucket_index]++;
+#endif
+
+  PA_REENTRANCY_GUARD(is_in_thread_cache_);
+  PA_INCREMENT_COUNTER(stats_.alloc_count);
+  // Only handle "small" allocations.
+  if (PA_UNLIKELY(bucket_index > largest_active_bucket_index_)) {
+    PA_INCREMENT_COUNTER(stats_.alloc_miss_too_large);
+    PA_INCREMENT_COUNTER(stats_.alloc_misses);
+    return 0;
+  }
+
+  auto& bucket = buckets_[bucket_index];
+  if (PA_LIKELY(bucket.freelist_head)) {
+    PA_INCREMENT_COUNTER(stats_.alloc_hits);
+  } else {
+    PA_DCHECK(bucket.count == 0);
+    PA_INCREMENT_COUNTER(stats_.alloc_miss_empty);
+    PA_INCREMENT_COUNTER(stats_.alloc_misses);
+
+    FillBucket(bucket_index);
+
+    // Very unlikely, means that the central allocator is out of memory. Let it
+    // deal with it (may return 0, may crash).
+    if (PA_UNLIKELY(!bucket.freelist_head)) {
+      return 0;
+    }
+  }
+
+  PA_DCHECK(bucket.count != 0);
+  internal::EncodedNextFreelistEntry* entry = bucket.freelist_head;
+  // TODO(lizeb): Consider removing once crbug.com/1382658 is fixed.
+#if BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) && \
+    BUILDFLAG(HAS_64_BIT_POINTERS)
+  // x86_64 architecture now supports 57 bits of address space, as of Ice Lake
+  // for Intel. However Chrome OS systems do not ship with kernel support for
+  // it, but with 48 bits, so all canonical addresses have the upper 16 bits
+  // zeroed (17 in practice, since the upper half of address space is reserved
+  // by the kernel).
+  constexpr uintptr_t kCanonicalPointerMask = (1ULL << 48) - 1;
+  PA_CHECK(!(reinterpret_cast<uintptr_t>(entry) & ~kCanonicalPointerMask));
+#endif  // BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) &&
+        // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+  // Passes the bucket size to |GetNext()|, so that in case of freelist
+  // corruption, we know the bucket size that lead to the crash, helping to
+  // narrow down the search for culprit. |bucket| was touched just now, so this
+  // does not introduce another cache miss.
+  internal::EncodedNextFreelistEntry* next =
+      entry->GetNextForThreadCache<true>(bucket.slot_size);
+  PA_DCHECK(entry != next);
+  bucket.count--;
+  PA_DCHECK(bucket.count != 0 || !next);
+  bucket.freelist_head = next;
+  *slot_size = bucket.slot_size;
+
+  PA_DCHECK(cached_memory_ >= bucket.slot_size);
+  cached_memory_ -= bucket.slot_size;
+
+  return internal::SlotStartPtr2Addr(entry);
+}
+
+PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
+                                               uintptr_t slot_start) {
+#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) && \
+    BUILDFLAG(HAS_64_BIT_POINTERS)
+  // We see freelist corruption crashes happening in the wild.  These are likely
+  // due to out-of-bounds accesses in the previous slot, or to a Use-After-Free
+  // somewhere in the code.
+  //
+  // The issue is that we detect the UaF far away from the place where it
+  // happens. As a consequence, we should try to make incorrect code crash as
+  // early as possible. Poisoning memory at free() time works for UaF, but it
+  // was seen in the past to incur a high performance cost.
+  //
+  // Here, only poison the current cacheline, which we are touching anyway.
+  // TODO(lizeb): Make sure this does not hurt performance.
+
+  // Everything below requires this alignment.
+  static_assert(internal::kAlignment == 16, "");
+
+  // The pointer is always 16 bytes aligned, so its start address is always == 0
+  // % 16. Its distance to the next cacheline is
+  //   `64 - ((slot_start & 63) / 16) * 16`
+  static_assert(
+      internal::kPartitionCachelineSize == 64,
+      "The computation below assumes that cache lines are 64 bytes long.");
+  int distance_to_next_cacheline_in_16_bytes = 4 - ((slot_start >> 4) & 3);
+  int slot_size_remaining_in_16_bytes =
+#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+      // When BRP is on in the "previous slot" mode, this slot may have a BRP
+      // ref-count of the next, potentially allocated slot. Make sure we don't
+      // overwrite it.
+      (bucket.slot_size - sizeof(PartitionRefCount)) / 16;
+#else
+      bucket.slot_size / 16;
+#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
+
+  slot_size_remaining_in_16_bytes = std::min(
+      slot_size_remaining_in_16_bytes, distance_to_next_cacheline_in_16_bytes);
+
+  static const uint32_t poison_16_bytes[4] = {0xbadbad00, 0xbadbad00,
+                                              0xbadbad00, 0xbadbad00};
+  // Give a hint to the compiler in hope it'll vectorize the loop.
+#if PA_HAS_BUILTIN(__builtin_assume_aligned)
+  void* slot_start_tagged = __builtin_assume_aligned(
+      internal::SlotStartAddr2Ptr(slot_start), internal::kAlignment);
+#else
+  void* slot_start_tagged = internal::SlotStartAddr2Ptr(slot_start);
+#endif
+  uint32_t* address_aligned = static_cast<uint32_t*>(slot_start_tagged);
+  for (int i = 0; i < slot_size_remaining_in_16_bytes; i++) {
+    // Clang will expand the memcpy to a 16-byte write (movups on x86).
+    memcpy(address_aligned, poison_16_bytes, sizeof(poison_16_bytes));
+    address_aligned += 4;
+  }
+#endif  // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) &&
+        // BUILDFLAG(HAS_64_BIT_POINTERS)
+
+  auto* entry =
+      internal::EncodedNextFreelistEntry::EmplaceAndInitForThreadCache(
+          slot_start, bucket.freelist_head);
+  bucket.freelist_head = entry;
+  bucket.count++;
+}
+
+PA_ALWAYS_INLINE void ThreadCache::RecordAllocation(size_t size) {
+  thread_alloc_stats_.alloc_count++;
+  thread_alloc_stats_.alloc_total_size += size;
+}
+
+PA_ALWAYS_INLINE void ThreadCache::RecordDeallocation(size_t size) {
+  thread_alloc_stats_.dealloc_count++;
+  thread_alloc_stats_.dealloc_total_size += size;
+}
+
+}  // namespace partition_alloc
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_CACHE_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/thread_cache_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/thread_cache_unittest.cc
new file mode 100644
index 0000000..3923756
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/thread_cache_unittest.cc
@@ -0,0 +1,1532 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
+
+#include <algorithm>
+#include <atomic>
+#include <vector>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/extended_api.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/thread_annotations.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_lock.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// With *SAN, PartitionAlloc is replaced in partition_alloc.h by ASAN, so we
+// cannot test the thread cache.
+//
+// Finally, the thread cache is not supported on all platforms.
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
+    PA_CONFIG(THREAD_CACHE_SUPPORTED)
+
+namespace partition_alloc {
+
+using BucketDistribution = PartitionRoot::BucketDistribution;
+namespace {
+
+constexpr size_t kSmallSize = 33;  // Must be large enough to fit extras.
+constexpr size_t kDefaultCountForSmallBucket =
+    ThreadCache::kSmallBucketBaseCount * ThreadCache::kDefaultMultiplier;
+constexpr size_t kFillCountForSmallBucket =
+    kDefaultCountForSmallBucket / ThreadCache::kBatchFillRatio;
+
+constexpr size_t kMediumSize = 200;
+constexpr size_t kDefaultCountForMediumBucket = kDefaultCountForSmallBucket / 2;
+constexpr size_t kFillCountForMediumBucket =
+    kDefaultCountForMediumBucket / ThreadCache::kBatchFillRatio;
+
+static_assert(kMediumSize <= ThreadCache::kDefaultSizeThreshold, "");
+
+class DeltaCounter {
+ public:
+  explicit DeltaCounter(uint64_t& value)
+      : current_value_(value), initial_value_(value) {}
+  void Reset() { initial_value_ = current_value_; }
+  uint64_t Delta() const { return current_value_ - initial_value_; }
+
+ private:
+  uint64_t& current_value_;
+  uint64_t initial_value_;
+};
+
+// Forbid extras, since they make finding out which bucket is used harder.
+std::unique_ptr<PartitionAllocatorForTesting> CreateAllocator() {
+  std::unique_ptr<PartitionAllocatorForTesting> allocator =
+      std::make_unique<PartitionAllocatorForTesting>(PartitionOptions {
+        .aligned_alloc = PartitionOptions::kAllowed,
+#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+        .thread_cache = PartitionOptions::kEnabled,
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+        .star_scan_quarantine = PartitionOptions::kAllowed,
+      });
+  allocator->root()->UncapEmptySlotSpanMemoryForTesting();
+
+  return allocator;
+}
+
+}  // namespace
+
+class PartitionAllocThreadCacheTest
+    : public ::testing::TestWithParam<PartitionRoot::BucketDistribution> {
+ public:
+  PartitionAllocThreadCacheTest()
+      : allocator_(CreateAllocator()), scope_(allocator_->root()) {}
+
+  ~PartitionAllocThreadCacheTest() override {
+    ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
+
+    // Cleanup the global state so next test can recreate ThreadCache.
+    if (ThreadCache::IsTombstone(ThreadCache::Get())) {
+      ThreadCache::RemoveTombstoneForTesting();
+    }
+  }
+
+ protected:
+  void SetUp() override {
+    PartitionRoot* root = allocator_->root();
+    switch (GetParam()) {
+      case BucketDistribution::kNeutral:
+        root->ResetBucketDistributionForTesting();
+        break;
+      case BucketDistribution::kDenser:
+        root->SwitchToDenserBucketDistribution();
+        break;
+    }
+
+    ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
+        ThreadCache::kDefaultMultiplier);
+    ThreadCacheRegistry::Instance().SetPurgingConfiguration(
+        kMinPurgeInterval, kMaxPurgeInterval, kDefaultPurgeInterval,
+        kMinCachedMemoryForPurgingBytes);
+    ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
+
+    // Make sure that enough slot spans have been touched, otherwise cache fill
+    // becomes unpredictable (because it doesn't take slow paths in the
+    // allocator), which is an issue for tests.
+    FillThreadCacheAndReturnIndex(kSmallSize, 1000);
+    FillThreadCacheAndReturnIndex(kMediumSize, 1000);
+
+    // There are allocations, a thread cache is created.
+    auto* tcache = root->thread_cache_for_testing();
+    ASSERT_TRUE(tcache);
+
+    ThreadCacheRegistry::Instance().ResetForTesting();
+    tcache->ResetForTesting();
+  }
+
+  void TearDown() override {
+    auto* tcache = root()->thread_cache_for_testing();
+    ASSERT_TRUE(tcache);
+    tcache->Purge();
+
+    ASSERT_EQ(root()->get_total_size_of_allocated_bytes(),
+              GetBucketSizeForThreadCache());
+  }
+
+  PartitionRoot* root() { return allocator_->root(); }
+
+  // Returns the size of the smallest bucket fitting an allocation of
+  // |sizeof(ThreadCache)| bytes.
+  size_t GetBucketSizeForThreadCache() {
+    size_t tc_bucket_index = root()->SizeToBucketIndex(
+        sizeof(ThreadCache), PartitionRoot::BucketDistribution::kNeutral);
+    auto* tc_bucket = &root()->buckets[tc_bucket_index];
+    return tc_bucket->slot_size;
+  }
+
+  static size_t SizeToIndex(size_t size) {
+    return PartitionRoot::SizeToBucketIndex(size, GetParam());
+  }
+
+  size_t FillThreadCacheAndReturnIndex(size_t raw_size, size_t count = 1) {
+    uint16_t bucket_index = SizeToIndex(raw_size);
+    std::vector<void*> allocated_data;
+
+    for (size_t i = 0; i < count; ++i) {
+      allocated_data.push_back(
+          root()->Alloc(root()->AdjustSizeForExtrasSubtract(raw_size), ""));
+    }
+    for (void* ptr : allocated_data) {
+      root()->Free(ptr);
+    }
+
+    return bucket_index;
+  }
+
+  void FillThreadCacheWithMemory(size_t target_cached_memory) {
+    for (int batch : {1, 2, 4, 8, 16}) {
+      for (size_t raw_size = root()->AdjustSizeForExtrasAdd(1);
+           raw_size <= ThreadCache::kLargeSizeThreshold; raw_size++) {
+        FillThreadCacheAndReturnIndex(raw_size, batch);
+
+        if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
+          return;
+        }
+      }
+    }
+
+    ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
+  }
+
+  std::unique_ptr<PartitionAllocatorForTesting> allocator_;
+  internal::ThreadCacheProcessScopeForTesting scope_;
+};
+
+INSTANTIATE_TEST_SUITE_P(AlternateBucketDistribution,
+                         PartitionAllocThreadCacheTest,
+                         ::testing::Values(BucketDistribution::kNeutral,
+                                           BucketDistribution::kDenser));
+
+TEST_P(PartitionAllocThreadCacheTest, Simple) {
+  // There is a cache.
+  auto* tcache = root()->thread_cache_for_testing();
+  EXPECT_TRUE(tcache);
+  DeltaCounter batch_fill_counter{tcache->stats_.batch_fill_count};
+
+  void* ptr =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
+  ASSERT_TRUE(ptr);
+
+  uint16_t index = SizeToIndex(kSmallSize);
+  EXPECT_EQ(kFillCountForSmallBucket - 1,
+            tcache->bucket_count_for_testing(index));
+
+  root()->Free(ptr);
+  // Freeing fills the thread cache.
+  EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
+
+  void* ptr2 =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
+  // MTE-untag, because Free() changes tag.
+  EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
+  // Allocated from the thread cache.
+  EXPECT_EQ(kFillCountForSmallBucket - 1,
+            tcache->bucket_count_for_testing(index));
+
+  EXPECT_EQ(1u, batch_fill_counter.Delta());
+
+  root()->Free(ptr2);
+}
+
+TEST_P(PartitionAllocThreadCacheTest, InexactSizeMatch) {
+  void* ptr =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
+  ASSERT_TRUE(ptr);
+
+  // There is a cache.
+  auto* tcache = root()->thread_cache_for_testing();
+  EXPECT_TRUE(tcache);
+
+  uint16_t index = SizeToIndex(kSmallSize);
+  EXPECT_EQ(kFillCountForSmallBucket - 1,
+            tcache->bucket_count_for_testing(index));
+
+  root()->Free(ptr);
+  // Freeing fills the thread cache.
+  EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
+
+  void* ptr2 =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize + 1), "");
+  // MTE-untag, because Free() changes tag.
+  EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
+  // Allocated from the thread cache.
+  EXPECT_EQ(kFillCountForSmallBucket - 1,
+            tcache->bucket_count_for_testing(index));
+  root()->Free(ptr2);
+}
+
+TEST_P(PartitionAllocThreadCacheTest, MultipleObjectsCachedPerBucket) {
+  auto* tcache = root()->thread_cache_for_testing();
+  DeltaCounter batch_fill_counter{tcache->stats_.batch_fill_count};
+  size_t bucket_index =
+      FillThreadCacheAndReturnIndex(kMediumSize, kFillCountForMediumBucket + 2);
+  EXPECT_EQ(2 * kFillCountForMediumBucket,
+            tcache->bucket_count_for_testing(bucket_index));
+  // 2 batches, since there were more than |kFillCountForMediumBucket|
+  // allocations.
+  EXPECT_EQ(2u, batch_fill_counter.Delta());
+}
+
+TEST_P(PartitionAllocThreadCacheTest, ObjectsCachedCountIsLimited) {
+  size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, 1000);
+  auto* tcache = root()->thread_cache_for_testing();
+  EXPECT_LT(tcache->bucket_count_for_testing(bucket_index), 1000u);
+}
+
+TEST_P(PartitionAllocThreadCacheTest, Purge) {
+  size_t allocations = 10;
+  size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, allocations);
+  auto* tcache = root()->thread_cache_for_testing();
+  EXPECT_EQ(
+      (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket,
+      tcache->bucket_count_for_testing(bucket_index));
+  tcache->Purge();
+  EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
+}
+
+TEST_P(PartitionAllocThreadCacheTest, NoCrossPartitionCache) {
+  PartitionAllocatorForTesting allocator(PartitionOptions{
+      .aligned_alloc = PartitionOptions::kAllowed,
+      .star_scan_quarantine = PartitionOptions::kAllowed,
+  });
+
+  size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
+  void* ptr = allocator.root()->Alloc(
+      allocator.root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
+  ASSERT_TRUE(ptr);
+
+  auto* tcache = root()->thread_cache_for_testing();
+  EXPECT_EQ(kFillCountForSmallBucket,
+            tcache->bucket_count_for_testing(bucket_index));
+
+  allocator.root()->Free(ptr);
+  EXPECT_EQ(kFillCountForSmallBucket,
+            tcache->bucket_count_for_testing(bucket_index));
+}
+
+// Required to record hits and misses.
+#if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
+TEST_P(PartitionAllocThreadCacheTest, LargeAllocationsAreNotCached) {
+  auto* tcache = root()->thread_cache_for_testing();
+  DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
+  DeltaCounter alloc_miss_too_large_counter{
+      tcache->stats_.alloc_miss_too_large};
+  DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
+  DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
+
+  FillThreadCacheAndReturnIndex(100 * 1024);
+  tcache = root()->thread_cache_for_testing();
+  EXPECT_EQ(1u, alloc_miss_counter.Delta());
+  EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
+  EXPECT_EQ(1u, cache_fill_counter.Delta());
+  EXPECT_EQ(1u, cache_fill_misses_counter.Delta());
+}
+#endif  // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
+
+TEST_P(PartitionAllocThreadCacheTest, DirectMappedAllocationsAreNotCached) {
+  FillThreadCacheAndReturnIndex(1024 * 1024);
+  // The line above would crash due to out of bounds access if this wasn't
+  // properly handled.
+}
+
+// This tests that Realloc properly handles bookkeeping, specifically the path
+// that reallocates in place.
+TEST_P(PartitionAllocThreadCacheTest, DirectMappedReallocMetrics) {
+  root()->ResetBookkeepingForTesting();
+
+  size_t expected_allocated_size = root()->get_total_size_of_allocated_bytes();
+
+  EXPECT_EQ(expected_allocated_size,
+            root()->get_total_size_of_allocated_bytes());
+  EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
+
+  void* ptr = root()->Alloc(
+      root()->AdjustSizeForExtrasSubtract(10 * internal::kMaxBucketed), "");
+
+  EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
+            root()->get_total_size_of_allocated_bytes());
+
+  void* ptr2 = root()->Realloc(
+      ptr, root()->AdjustSizeForExtrasSubtract(9 * internal::kMaxBucketed), "");
+
+  ASSERT_EQ(ptr, ptr2);
+  EXPECT_EQ(expected_allocated_size + 9 * internal::kMaxBucketed,
+            root()->get_total_size_of_allocated_bytes());
+
+  ptr2 = root()->Realloc(
+      ptr, root()->AdjustSizeForExtrasSubtract(10 * internal::kMaxBucketed),
+      "");
+
+  ASSERT_EQ(ptr, ptr2);
+  EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
+            root()->get_total_size_of_allocated_bytes());
+
+  root()->Free(ptr);
+}
+
+namespace {
+
+size_t FillThreadCacheAndReturnIndex(PartitionRoot* root,
+                                     size_t size,
+                                     BucketDistribution bucket_distribution,
+                                     size_t count = 1) {
+  uint16_t bucket_index =
+      PartitionRoot::SizeToBucketIndex(size, bucket_distribution);
+  std::vector<void*> allocated_data;
+
+  for (size_t i = 0; i < count; ++i) {
+    allocated_data.push_back(
+        root->Alloc(root->AdjustSizeForExtrasSubtract(size), ""));
+  }
+  for (void* ptr : allocated_data) {
+    root->Free(ptr);
+  }
+
+  return bucket_index;
+}
+
+// TODO(1151236): To remove callback from partition allocator's DEPS,
+// rewrite the tests without BindLambdaForTesting and RepeatingClosure.
+// However this makes a little annoying to add more tests using their
+// own threads. Need to support an easier way to implement tests using
+// PlatformThreadForTesting::Create().
+class ThreadDelegateForMultipleThreadCaches
+    : public internal::base::PlatformThreadForTesting::Delegate {
+ public:
+  ThreadDelegateForMultipleThreadCaches(ThreadCache* parent_thread_cache,
+                                        PartitionRoot* root,
+                                        BucketDistribution bucket_distribution)
+      : parent_thread_tcache_(parent_thread_cache),
+        root_(root),
+        bucket_distribution_(bucket_distribution) {}
+
+  void ThreadMain() override {
+    EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
+    FillThreadCacheAndReturnIndex(root_, kMediumSize, bucket_distribution_);
+    auto* tcache = root_->thread_cache_for_testing();
+    EXPECT_TRUE(tcache);
+
+    EXPECT_NE(parent_thread_tcache_, tcache);
+  }
+
+ private:
+  ThreadCache* parent_thread_tcache_ = nullptr;
+  PartitionRoot* root_ = nullptr;
+  PartitionRoot::BucketDistribution bucket_distribution_;
+};
+
+}  // namespace
+
+TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCaches) {
+  FillThreadCacheAndReturnIndex(kMediumSize);
+  auto* parent_thread_tcache = root()->thread_cache_for_testing();
+  ASSERT_TRUE(parent_thread_tcache);
+
+  ThreadDelegateForMultipleThreadCaches delegate(parent_thread_tcache, root(),
+                                                 GetParam());
+
+  internal::base::PlatformThreadHandle thread_handle;
+  internal::base::PlatformThreadForTesting::Create(0, &delegate,
+                                                   &thread_handle);
+  internal::base::PlatformThreadForTesting::Join(thread_handle);
+}
+
+namespace {
+
+class ThreadDelegateForThreadCacheReclaimedWhenThreadExits
+    : public internal::base::PlatformThreadForTesting::Delegate {
+ public:
+  ThreadDelegateForThreadCacheReclaimedWhenThreadExits(PartitionRoot* root,
+                                                       void*& other_thread_ptr)
+      : root_(root), other_thread_ptr_(other_thread_ptr) {}
+
+  void ThreadMain() override {
+    EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
+    other_thread_ptr_ =
+        root_->Alloc(root_->AdjustSizeForExtrasSubtract(kMediumSize), "");
+    root_->Free(other_thread_ptr_);
+    // |other_thread_ptr| is now in the thread cache.
+  }
+
+ private:
+  PartitionRoot* root_ = nullptr;
+  void*& other_thread_ptr_;
+};
+
+}  // namespace
+
+TEST_P(PartitionAllocThreadCacheTest, ThreadCacheReclaimedWhenThreadExits) {
+  // Make sure that there is always at least one object allocated in the test
+  // bucket, so that the PartitionPage is no reclaimed.
+  //
+  // Allocate enough objects to force a cache fill at the next allocation.
+  std::vector<void*> tmp;
+  for (size_t i = 0; i < kDefaultCountForMediumBucket / 4; i++) {
+    tmp.push_back(
+        root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), ""));
+  }
+
+  void* other_thread_ptr = nullptr;
+  ThreadDelegateForThreadCacheReclaimedWhenThreadExits delegate(
+      root(), other_thread_ptr);
+
+  internal::base::PlatformThreadHandle thread_handle;
+  internal::base::PlatformThreadForTesting::Create(0, &delegate,
+                                                   &thread_handle);
+  internal::base::PlatformThreadForTesting::Join(thread_handle);
+
+  void* this_thread_ptr =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
+  // |other_thread_ptr| was returned to the central allocator, and is returned
+  // here, as it comes from the freelist.
+  EXPECT_EQ(UntagPtr(this_thread_ptr), UntagPtr(other_thread_ptr));
+  root()->Free(other_thread_ptr);
+
+  for (void* ptr : tmp) {
+    root()->Free(ptr);
+  }
+}
+
+namespace {
+
+class ThreadDelegateForThreadCacheRegistry
+    : public internal::base::PlatformThreadForTesting::Delegate {
+ public:
+  ThreadDelegateForThreadCacheRegistry(ThreadCache* parent_thread_cache,
+                                       PartitionRoot* root,
+                                       BucketDistribution bucket_distribution)
+      : parent_thread_tcache_(parent_thread_cache),
+        root_(root),
+        bucket_distribution_(bucket_distribution) {}
+
+  void ThreadMain() override {
+    EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
+    FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_);
+    auto* tcache = root_->thread_cache_for_testing();
+    EXPECT_TRUE(tcache);
+
+    internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
+    EXPECT_EQ(tcache->prev_for_testing(), nullptr);
+    EXPECT_EQ(tcache->next_for_testing(), parent_thread_tcache_);
+  }
+
+ private:
+  ThreadCache* parent_thread_tcache_ = nullptr;
+  PartitionRoot* root_ = nullptr;
+  BucketDistribution bucket_distribution_;
+};
+
+}  // namespace
+
+TEST_P(PartitionAllocThreadCacheTest, ThreadCacheRegistry) {
+  auto* parent_thread_tcache = root()->thread_cache_for_testing();
+  ASSERT_TRUE(parent_thread_tcache);
+
+#if !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) ||   \
+      BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)) && \
+    BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  // iOS and MacOS 15 create worker threads internally(start_wqthread).
+  // So thread caches are created for the worker threads, because the threads
+  // allocate memory for initialization (_dispatch_calloc is invoked).
+  // We cannot assume that there is only 1 thread cache here.
+
+  // Regarding Linux, ChromeOS and Android, some other tests may create
+  // non-joinable threads. E.g. FilePathWatcherTest will create
+  // non-joinable thread at InotifyReader::StartThread(). The thread will
+  // be still running after the tests are finished, and will break
+  // an assumption that there exists only main thread here.
+  {
+    internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
+    EXPECT_EQ(parent_thread_tcache->prev_, nullptr);
+    EXPECT_EQ(parent_thread_tcache->next_, nullptr);
+  }
+#endif
+
+  ThreadDelegateForThreadCacheRegistry delegate(parent_thread_tcache, root(),
+                                                GetParam());
+
+  internal::base::PlatformThreadHandle thread_handle;
+  internal::base::PlatformThreadForTesting::Create(0, &delegate,
+                                                   &thread_handle);
+  internal::base::PlatformThreadForTesting::Join(thread_handle);
+
+#if !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) ||   \
+      BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)) && \
+    BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
+  EXPECT_EQ(parent_thread_tcache->prev_, nullptr);
+  EXPECT_EQ(parent_thread_tcache->next_, nullptr);
+#endif
+}
+
+#if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
+TEST_P(PartitionAllocThreadCacheTest, RecordStats) {
+  auto* tcache = root()->thread_cache_for_testing();
+  DeltaCounter alloc_counter{tcache->stats_.alloc_count};
+  DeltaCounter alloc_hits_counter{tcache->stats_.alloc_hits};
+  DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
+
+  DeltaCounter alloc_miss_empty_counter{tcache->stats_.alloc_miss_empty};
+
+  DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
+  DeltaCounter cache_fill_hits_counter{tcache->stats_.cache_fill_hits};
+  DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
+
+  // Cache has been purged, first allocation is a miss.
+  void* data =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
+  EXPECT_EQ(1u, alloc_counter.Delta());
+  EXPECT_EQ(1u, alloc_miss_counter.Delta());
+  EXPECT_EQ(0u, alloc_hits_counter.Delta());
+
+  // Cache fill worked.
+  root()->Free(data);
+  EXPECT_EQ(1u, cache_fill_counter.Delta());
+  EXPECT_EQ(1u, cache_fill_hits_counter.Delta());
+  EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
+
+  tcache->Purge();
+  cache_fill_counter.Reset();
+  // Buckets are never full, fill always succeeds.
+  size_t allocations = 10;
+  size_t bucket_index = FillThreadCacheAndReturnIndex(
+      kMediumSize, kDefaultCountForMediumBucket + allocations);
+  EXPECT_EQ(kDefaultCountForMediumBucket + allocations,
+            cache_fill_counter.Delta());
+  EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
+
+  // Memory footprint.
+  ThreadCacheStats stats;
+  ThreadCacheRegistry::Instance().DumpStats(true, &stats);
+  // Bucket was cleared (set to kDefaultCountForMediumBucket / 2) after going
+  // above the limit (-1), then refilled by batches (1 + floor(allocations /
+  // kFillCountForSmallBucket) times).
+  size_t expected_count =
+      kDefaultCountForMediumBucket / 2 - 1 +
+      (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket;
+  EXPECT_EQ(root()->buckets[bucket_index].slot_size * expected_count,
+            stats.bucket_total_memory);
+  EXPECT_EQ(sizeof(ThreadCache), stats.metadata_overhead);
+}
+
+namespace {
+
+class ThreadDelegateForMultipleThreadCachesAccounting
+    : public internal::base::PlatformThreadForTesting::Delegate {
+ public:
+  ThreadDelegateForMultipleThreadCachesAccounting(
+      PartitionRoot* root,
+      const ThreadCacheStats& wqthread_stats,
+      int alloc_count,
+      BucketDistribution bucket_distribution)
+      : root_(root),
+        bucket_distribution_(bucket_distribution),
+        wqthread_stats_(wqthread_stats),
+        alloc_count_(alloc_count) {}
+
+  void ThreadMain() override {
+    EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
+    size_t bucket_index =
+        FillThreadCacheAndReturnIndex(root_, kMediumSize, bucket_distribution_);
+
+    ThreadCacheStats stats;
+    ThreadCacheRegistry::Instance().DumpStats(false, &stats);
+    // 2* for this thread and the parent one.
+    EXPECT_EQ(
+        2 * root_->buckets[bucket_index].slot_size * kFillCountForMediumBucket,
+        stats.bucket_total_memory - wqthread_stats_.bucket_total_memory);
+    EXPECT_EQ(2 * sizeof(ThreadCache),
+              stats.metadata_overhead - wqthread_stats_.metadata_overhead);
+
+    ThreadCacheStats this_thread_cache_stats{};
+    root_->thread_cache_for_testing()->AccumulateStats(
+        &this_thread_cache_stats);
+    EXPECT_EQ(alloc_count_ + this_thread_cache_stats.alloc_count,
+              stats.alloc_count - wqthread_stats_.alloc_count);
+  }
+
+ private:
+  PartitionRoot* root_ = nullptr;
+  BucketDistribution bucket_distribution_;
+  const ThreadCacheStats wqthread_stats_;
+  const int alloc_count_;
+};
+
+}  // namespace
+
+TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCachesAccounting) {
+  ThreadCacheStats wqthread_stats{0};
+#if (BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS) || \
+     BUILDFLAG(IS_LINUX)) &&                                                   \
+    BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  {
+    // iOS and MacOS 15 create worker threads internally(start_wqthread).
+    // So thread caches are created for the worker threads, because the threads
+    // allocate memory for initialization (_dispatch_calloc is invoked).
+    // We need to count worker threads created by iOS and Mac system.
+
+    // Regarding Linux, ChromeOS and Android, some other tests may create
+    // non-joinable threads. E.g. FilePathWatcherTest will create
+    // non-joinable thread at InotifyReader::StartThread(). The thread will
+    // be still running after the tests are finished. We need to count
+    // the joinable threads here.
+    ThreadCacheRegistry::Instance().DumpStats(false, &wqthread_stats);
+
+    // Remove this thread's thread cache stats from wqthread_stats.
+    ThreadCacheStats this_stats;
+    ThreadCacheRegistry::Instance().DumpStats(true, &this_stats);
+
+    wqthread_stats.alloc_count -= this_stats.alloc_count;
+    wqthread_stats.metadata_overhead -= this_stats.metadata_overhead;
+    wqthread_stats.bucket_total_memory -= this_stats.bucket_total_memory;
+  }
+#endif
+  FillThreadCacheAndReturnIndex(kMediumSize);
+  uint64_t alloc_count = root()->thread_cache_for_testing()->stats_.alloc_count;
+
+  ThreadDelegateForMultipleThreadCachesAccounting delegate(
+      root(), wqthread_stats, alloc_count, GetParam());
+
+  internal::base::PlatformThreadHandle thread_handle;
+  internal::base::PlatformThreadForTesting::Create(0, &delegate,
+                                                   &thread_handle);
+  internal::base::PlatformThreadForTesting::Join(thread_handle);
+}
+
+#endif  // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
+
+// TODO(https://crbug.com/1287799): Flaky on IOS.
+#if BUILDFLAG(IS_IOS)
+#define MAYBE_PurgeAll DISABLED_PurgeAll
+#else
+#define MAYBE_PurgeAll PurgeAll
+#endif
+
+namespace {
+
+class ThreadDelegateForPurgeAll
+    : public internal::base::PlatformThreadForTesting::Delegate {
+ public:
+  ThreadDelegateForPurgeAll(PartitionRoot* root,
+                            ThreadCache*& other_thread_tcache,
+                            std::atomic<bool>& other_thread_started,
+                            std::atomic<bool>& purge_called,
+                            int bucket_index,
+                            BucketDistribution bucket_distribution)
+      : root_(root),
+        other_thread_tcache_(other_thread_tcache),
+        other_thread_started_(other_thread_started),
+        purge_called_(purge_called),
+        bucket_index_(bucket_index),
+        bucket_distribution_(bucket_distribution) {}
+
+  void ThreadMain() override PA_NO_THREAD_SAFETY_ANALYSIS {
+    FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_);
+    other_thread_tcache_ = root_->thread_cache_for_testing();
+
+    other_thread_started_.store(true, std::memory_order_release);
+    while (!purge_called_.load(std::memory_order_acquire)) {
+    }
+
+    // Purge() was not triggered from the other thread.
+    EXPECT_EQ(kFillCountForSmallBucket,
+              other_thread_tcache_->bucket_count_for_testing(bucket_index_));
+    // Allocations do not trigger Purge().
+    void* data =
+        root_->Alloc(root_->AdjustSizeForExtrasSubtract(kSmallSize), "");
+    EXPECT_EQ(kFillCountForSmallBucket - 1,
+              other_thread_tcache_->bucket_count_for_testing(bucket_index_));
+    // But deallocations do.
+    root_->Free(data);
+    EXPECT_EQ(0u,
+              other_thread_tcache_->bucket_count_for_testing(bucket_index_));
+  }
+
+ private:
+  PartitionRoot* root_ = nullptr;
+  ThreadCache*& other_thread_tcache_;
+  std::atomic<bool>& other_thread_started_;
+  std::atomic<bool>& purge_called_;
+  const int bucket_index_;
+  BucketDistribution bucket_distribution_;
+};
+
+}  // namespace
+
+TEST_P(PartitionAllocThreadCacheTest, MAYBE_PurgeAll)
+PA_NO_THREAD_SAFETY_ANALYSIS {
+  std::atomic<bool> other_thread_started{false};
+  std::atomic<bool> purge_called{false};
+
+  size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
+  ThreadCache* this_thread_tcache = root()->thread_cache_for_testing();
+  ThreadCache* other_thread_tcache = nullptr;
+
+  ThreadDelegateForPurgeAll delegate(root(), other_thread_tcache,
+                                     other_thread_started, purge_called,
+                                     bucket_index, GetParam());
+  internal::base::PlatformThreadHandle thread_handle;
+  internal::base::PlatformThreadForTesting::Create(0, &delegate,
+                                                   &thread_handle);
+
+  while (!other_thread_started.load(std::memory_order_acquire)) {
+  }
+
+  EXPECT_EQ(kFillCountForSmallBucket,
+            this_thread_tcache->bucket_count_for_testing(bucket_index));
+  EXPECT_EQ(kFillCountForSmallBucket,
+            other_thread_tcache->bucket_count_for_testing(bucket_index));
+
+  ThreadCacheRegistry::Instance().PurgeAll();
+  // This thread is synchronously purged.
+  EXPECT_EQ(0u, this_thread_tcache->bucket_count_for_testing(bucket_index));
+  // Not the other one.
+  EXPECT_EQ(kFillCountForSmallBucket,
+            other_thread_tcache->bucket_count_for_testing(bucket_index));
+
+  purge_called.store(true, std::memory_order_release);
+  internal::base::PlatformThreadForTesting::Join(thread_handle);
+}
+
+TEST_P(PartitionAllocThreadCacheTest, PeriodicPurge) {
+  auto& registry = ThreadCacheRegistry::Instance();
+  auto NextInterval = [&registry]() {
+    return internal::base::Microseconds(
+        registry.GetPeriodicPurgeNextIntervalInMicroseconds());
+  };
+
+  EXPECT_EQ(NextInterval(), registry.default_purge_interval());
+
+  // Small amount of memory, the period gets longer.
+  auto* tcache = ThreadCache::Get();
+  ASSERT_LT(tcache->CachedMemory(),
+            registry.min_cached_memory_for_purging_bytes());
+  registry.RunPeriodicPurge();
+  EXPECT_EQ(NextInterval(), 2 * registry.default_purge_interval());
+  registry.RunPeriodicPurge();
+  EXPECT_EQ(NextInterval(), 4 * registry.default_purge_interval());
+
+  // Check that the purge interval is clamped at the maximum value.
+  while (NextInterval() < registry.max_purge_interval()) {
+    registry.RunPeriodicPurge();
+  }
+  registry.RunPeriodicPurge();
+
+  // Not enough memory to decrease the interval.
+  FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes() + 1);
+  registry.RunPeriodicPurge();
+  EXPECT_EQ(NextInterval(), registry.max_purge_interval());
+
+  FillThreadCacheWithMemory(2 * registry.min_cached_memory_for_purging_bytes() +
+                            1);
+  registry.RunPeriodicPurge();
+  EXPECT_EQ(NextInterval(), registry.max_purge_interval() / 2);
+
+  // Enough memory, interval doesn't change.
+  FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes());
+  registry.RunPeriodicPurge();
+  EXPECT_EQ(NextInterval(), registry.max_purge_interval() / 2);
+
+  // No cached memory, increase the interval.
+  registry.RunPeriodicPurge();
+  EXPECT_EQ(NextInterval(), registry.max_purge_interval());
+
+  // Cannot test the very large size with only one thread, this is tested below
+  // in the multiple threads test.
+}
+
+namespace {
+
+void FillThreadCacheWithMemory(PartitionRoot* root,
+                               size_t target_cached_memory,
+                               BucketDistribution bucket_distribution) {
+  for (int batch : {1, 2, 4, 8, 16}) {
+    for (size_t allocation_size = 1;
+         allocation_size <= ThreadCache::kLargeSizeThreshold;
+         allocation_size++) {
+      FillThreadCacheAndReturnIndex(
+          root, root->AdjustSizeForExtrasAdd(allocation_size),
+          bucket_distribution, batch);
+
+      if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
+        return;
+      }
+    }
+  }
+
+  ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
+}
+
+class ThreadDelegateForPeriodicPurgeSumsOverAllThreads
+    : public internal::base::PlatformThreadForTesting::Delegate {
+ public:
+  ThreadDelegateForPeriodicPurgeSumsOverAllThreads(
+      PartitionRoot* root,
+      std::atomic<int>& allocations_done,
+      std::atomic<bool>& can_finish,
+      BucketDistribution bucket_distribution)
+      : root_(root),
+        allocations_done_(allocations_done),
+        can_finish_(can_finish),
+        bucket_distribution_(bucket_distribution) {}
+
+  void ThreadMain() override {
+    FillThreadCacheWithMemory(root_,
+                              5 * ThreadCacheRegistry::Instance()
+                                      .min_cached_memory_for_purging_bytes(),
+                              bucket_distribution_);
+    allocations_done_.fetch_add(1, std::memory_order_release);
+
+    // This thread needs to be alive when the next periodic purge task runs.
+    while (!can_finish_.load(std::memory_order_acquire)) {
+    }
+  }
+
+ private:
+  PartitionRoot* root_ = nullptr;
+  std::atomic<int>& allocations_done_;
+  std::atomic<bool>& can_finish_;
+  BucketDistribution bucket_distribution_;
+};
+
+}  // namespace
+
+// Disabled due to flakiness: crbug.com/1220371
+TEST_P(PartitionAllocThreadCacheTest,
+       DISABLED_PeriodicPurgeSumsOverAllThreads) {
+  auto& registry = ThreadCacheRegistry::Instance();
+  auto NextInterval = [&registry]() {
+    return internal::base::Microseconds(
+        registry.GetPeriodicPurgeNextIntervalInMicroseconds());
+  };
+  EXPECT_EQ(NextInterval(), registry.default_purge_interval());
+
+  // Small amount of memory, the period gets longer.
+  auto* tcache = ThreadCache::Get();
+  ASSERT_LT(tcache->CachedMemory(),
+            registry.min_cached_memory_for_purging_bytes());
+  registry.RunPeriodicPurge();
+  EXPECT_EQ(NextInterval(), 2 * registry.default_purge_interval());
+  registry.RunPeriodicPurge();
+  EXPECT_EQ(NextInterval(), 4 * registry.default_purge_interval());
+
+  // Check that the purge interval is clamped at the maximum value.
+  while (NextInterval() < registry.max_purge_interval()) {
+    registry.RunPeriodicPurge();
+  }
+  registry.RunPeriodicPurge();
+
+  // Not enough memory on this thread to decrease the interval.
+  FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes() / 2);
+  registry.RunPeriodicPurge();
+  EXPECT_EQ(NextInterval(), registry.max_purge_interval());
+
+  std::atomic<int> allocations_done{0};
+  std::atomic<bool> can_finish{false};
+  ThreadDelegateForPeriodicPurgeSumsOverAllThreads delegate(
+      root(), allocations_done, can_finish, GetParam());
+
+  internal::base::PlatformThreadHandle thread_handle;
+  internal::base::PlatformThreadForTesting::Create(0, &delegate,
+                                                   &thread_handle);
+  internal::base::PlatformThreadHandle thread_handle_2;
+  internal::base::PlatformThreadForTesting::Create(0, &delegate,
+                                                   &thread_handle_2);
+
+  while (allocations_done.load(std::memory_order_acquire) != 2) {
+    internal::base::PlatformThreadForTesting::YieldCurrentThread();
+  }
+
+  // Many allocations on the other thread.
+  registry.RunPeriodicPurge();
+  EXPECT_EQ(NextInterval(), registry.default_purge_interval());
+
+  can_finish.store(true, std::memory_order_release);
+  internal::base::PlatformThreadForTesting::Join(thread_handle);
+  internal::base::PlatformThreadForTesting::Join(thread_handle_2);
+}
+
+// TODO(https://crbug.com/1287799): Flaky on IOS.
+#if BUILDFLAG(IS_IOS)
+#define MAYBE_DynamicCountPerBucket DISABLED_DynamicCountPerBucket
+#else
+#define MAYBE_DynamicCountPerBucket DynamicCountPerBucket
+#endif
+TEST_P(PartitionAllocThreadCacheTest, MAYBE_DynamicCountPerBucket) {
+  auto* tcache = root()->thread_cache_for_testing();
+  size_t bucket_index =
+      FillThreadCacheAndReturnIndex(kMediumSize, kDefaultCountForMediumBucket);
+
+  EXPECT_EQ(kDefaultCountForMediumBucket, tcache->buckets_[bucket_index].count);
+
+  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
+      ThreadCache::kDefaultMultiplier / 2);
+  // No immediate batch deallocation.
+  EXPECT_EQ(kDefaultCountForMediumBucket, tcache->buckets_[bucket_index].count);
+  void* data =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
+  // Not triggered by allocations.
+  EXPECT_EQ(kDefaultCountForMediumBucket - 1,
+            tcache->buckets_[bucket_index].count);
+
+  // Free() triggers the purge within limits.
+  root()->Free(data);
+  EXPECT_LE(tcache->buckets_[bucket_index].count,
+            kDefaultCountForMediumBucket / 2);
+
+  // Won't go above anymore.
+  FillThreadCacheAndReturnIndex(kMediumSize, 1000);
+  EXPECT_LE(tcache->buckets_[bucket_index].count,
+            kDefaultCountForMediumBucket / 2);
+
+  // Limit can be raised.
+  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
+      ThreadCache::kDefaultMultiplier * 2);
+  FillThreadCacheAndReturnIndex(kMediumSize, 1000);
+  EXPECT_GT(tcache->buckets_[bucket_index].count,
+            kDefaultCountForMediumBucket / 2);
+}
+
+TEST_P(PartitionAllocThreadCacheTest, DynamicCountPerBucketClamping) {
+  auto* tcache = root()->thread_cache_for_testing();
+
+  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
+      ThreadCache::kDefaultMultiplier / 1000.);
+  for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
+    // Invalid bucket.
+    if (!tcache->buckets_[i].limit.load(std::memory_order_relaxed)) {
+      EXPECT_EQ(root()->buckets[i].active_slot_spans_head, nullptr);
+      continue;
+    }
+    EXPECT_GE(tcache->buckets_[i].limit.load(std::memory_order_relaxed), 1u);
+  }
+
+  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
+      ThreadCache::kDefaultMultiplier * 1000.);
+  for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
+    // Invalid bucket.
+    if (!tcache->buckets_[i].limit.load(std::memory_order_relaxed)) {
+      EXPECT_EQ(root()->buckets[i].active_slot_spans_head, nullptr);
+      continue;
+    }
+    EXPECT_LT(tcache->buckets_[i].limit.load(std::memory_order_relaxed), 0xff);
+  }
+}
+
+// TODO(https://crbug.com/1287799): Flaky on IOS.
+#if BUILDFLAG(IS_IOS)
+#define MAYBE_DynamicCountPerBucketMultipleThreads \
+  DISABLED_DynamicCountPerBucketMultipleThreads
+#else
+#define MAYBE_DynamicCountPerBucketMultipleThreads \
+  DynamicCountPerBucketMultipleThreads
+#endif
+
+namespace {
+
+class ThreadDelegateForDynamicCountPerBucketMultipleThreads
+    : public internal::base::PlatformThreadForTesting::Delegate {
+ public:
+  ThreadDelegateForDynamicCountPerBucketMultipleThreads(
+      PartitionRoot* root,
+      std::atomic<bool>& other_thread_started,
+      std::atomic<bool>& threshold_changed,
+      int bucket_index,
+      BucketDistribution bucket_distribution)
+      : root_(root),
+        other_thread_started_(other_thread_started),
+        threshold_changed_(threshold_changed),
+        bucket_index_(bucket_index),
+        bucket_distribution_(bucket_distribution) {}
+
+  void ThreadMain() override {
+    FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_,
+                                  kDefaultCountForSmallBucket + 10);
+    auto* this_thread_tcache = root_->thread_cache_for_testing();
+    // More than the default since the multiplier has changed.
+    EXPECT_GT(this_thread_tcache->bucket_count_for_testing(bucket_index_),
+              kDefaultCountForSmallBucket + 10);
+
+    other_thread_started_.store(true, std::memory_order_release);
+    while (!threshold_changed_.load(std::memory_order_acquire)) {
+    }
+
+    void* data =
+        root_->Alloc(root_->AdjustSizeForExtrasSubtract(kSmallSize), "");
+    // Deallocations trigger limit enforcement.
+    root_->Free(data);
+    // Since the bucket is too full, it gets halved by batched deallocation.
+    EXPECT_EQ(static_cast<uint8_t>(ThreadCache::kSmallBucketBaseCount / 2),
+              this_thread_tcache->bucket_count_for_testing(bucket_index_));
+  }
+
+ private:
+  PartitionRoot* root_ = nullptr;
+  std::atomic<bool>& other_thread_started_;
+  std::atomic<bool>& threshold_changed_;
+  const int bucket_index_;
+  PartitionRoot::BucketDistribution bucket_distribution_;
+};
+
+}  // namespace
+
+TEST_P(PartitionAllocThreadCacheTest,
+       MAYBE_DynamicCountPerBucketMultipleThreads) {
+  std::atomic<bool> other_thread_started{false};
+  std::atomic<bool> threshold_changed{false};
+
+  auto* tcache = root()->thread_cache_for_testing();
+  size_t bucket_index =
+      FillThreadCacheAndReturnIndex(kSmallSize, kDefaultCountForSmallBucket);
+  EXPECT_EQ(kDefaultCountForSmallBucket, tcache->buckets_[bucket_index].count);
+
+  // Change the ratio before starting the threads, checking that it will applied
+  // to newly-created threads.
+  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
+      ThreadCache::kDefaultMultiplier + 1);
+
+  ThreadDelegateForDynamicCountPerBucketMultipleThreads delegate(
+      root(), other_thread_started, threshold_changed, bucket_index,
+      GetParam());
+
+  internal::base::PlatformThreadHandle thread_handle;
+  internal::base::PlatformThreadForTesting::Create(0, &delegate,
+                                                   &thread_handle);
+
+  while (!other_thread_started.load(std::memory_order_acquire)) {
+  }
+
+  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(1.);
+  threshold_changed.store(true, std::memory_order_release);
+
+  internal::base::PlatformThreadForTesting::Join(thread_handle);
+}
+
+TEST_P(PartitionAllocThreadCacheTest, DynamicSizeThreshold) {
+  auto* tcache = root()->thread_cache_for_testing();
+  DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
+  DeltaCounter alloc_miss_too_large_counter{
+      tcache->stats_.alloc_miss_too_large};
+  DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
+  DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
+
+  // Default threshold at first.
+  ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
+  FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold);
+
+  EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
+  EXPECT_EQ(1u, cache_fill_counter.Delta());
+
+  // Too large to be cached.
+  FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
+  EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
+
+  // Increase.
+  ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
+  FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
+  // No new miss.
+  EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
+
+  // Lower.
+  ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
+  FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
+  EXPECT_EQ(2u, alloc_miss_too_large_counter.Delta());
+
+  // Value is clamped.
+  size_t too_large = 1024 * 1024;
+  ThreadCache::SetLargestCachedSize(too_large);
+  FillThreadCacheAndReturnIndex(too_large);
+  EXPECT_EQ(3u, alloc_miss_too_large_counter.Delta());
+}
+
+// Disabled due to flakiness: crbug.com/1287811
+TEST_P(PartitionAllocThreadCacheTest, DISABLED_DynamicSizeThresholdPurge) {
+  auto* tcache = root()->thread_cache_for_testing();
+  DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
+  DeltaCounter alloc_miss_too_large_counter{
+      tcache->stats_.alloc_miss_too_large};
+  DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
+  DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
+
+  // Cache large allocations.
+  size_t large_allocation_size = ThreadCache::kLargeSizeThreshold;
+  ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
+  size_t index = FillThreadCacheAndReturnIndex(large_allocation_size);
+  EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
+
+  // Lower.
+  ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
+  FillThreadCacheAndReturnIndex(large_allocation_size);
+  EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
+
+  // There is memory trapped in the cache bucket.
+  EXPECT_GT(tcache->buckets_[index].count, 0u);
+
+  // Which is reclaimed by Purge().
+  tcache->Purge();
+  EXPECT_EQ(0u, tcache->buckets_[index].count);
+}
+
+TEST_P(PartitionAllocThreadCacheTest, ClearFromTail) {
+  auto count_items = [](ThreadCache* tcache, size_t index) {
+    uint8_t count = 0;
+    auto* head = tcache->buckets_[index].freelist_head;
+    while (head) {
+      head =
+          head->GetNextForThreadCache<true>(tcache->buckets_[index].slot_size);
+      count++;
+    }
+    return count;
+  };
+
+  auto* tcache = root()->thread_cache_for_testing();
+  size_t index = FillThreadCacheAndReturnIndex(kSmallSize, 10);
+  ASSERT_GE(count_items(tcache, index), 10);
+  void* head = tcache->buckets_[index].freelist_head;
+
+  for (size_t limit : {8, 3, 1}) {
+    tcache->ClearBucket(tcache->buckets_[index], limit);
+    EXPECT_EQ(head, static_cast<void*>(tcache->buckets_[index].freelist_head));
+    EXPECT_EQ(count_items(tcache, index), limit);
+  }
+  tcache->ClearBucket(tcache->buckets_[index], 0);
+  EXPECT_EQ(nullptr, static_cast<void*>(tcache->buckets_[index].freelist_head));
+}
+
+// TODO(https://crbug.com/1287799): Flaky on IOS.
+#if BUILDFLAG(IS_IOS)
+#define MAYBE_Bookkeeping DISABLED_Bookkeeping
+#else
+#define MAYBE_Bookkeeping Bookkeeping
+#endif
+TEST_P(PartitionAllocThreadCacheTest, MAYBE_Bookkeeping) {
+  void* arr[kFillCountForMediumBucket] = {};
+  auto* tcache = root()->thread_cache_for_testing();
+
+  root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
+                      PurgeFlags::kDiscardUnusedSystemPages);
+  root()->ResetBookkeepingForTesting();
+
+  // The ThreadCache is allocated before we change buckets, so its size is
+  // always based on the neutral distribution.
+  size_t tc_bucket_index = root()->SizeToBucketIndex(
+      sizeof(ThreadCache), PartitionRoot::BucketDistribution::kNeutral);
+  auto* tc_bucket = &root()->buckets[tc_bucket_index];
+  size_t expected_allocated_size =
+      tc_bucket->slot_size;  // For the ThreadCache itself.
+  size_t expected_committed_size = kUseLazyCommit
+                                       ? internal::SystemPageSize()
+                                       : tc_bucket->get_bytes_per_span();
+
+  EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
+  EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
+  EXPECT_EQ(expected_allocated_size,
+            root()->get_total_size_of_allocated_bytes());
+  EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
+
+  void* ptr =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
+
+  auto* medium_bucket = root()->buckets + SizeToIndex(kMediumSize);
+  size_t medium_alloc_size = medium_bucket->slot_size;
+  expected_allocated_size += medium_alloc_size;
+  expected_committed_size += kUseLazyCommit
+                                 ? internal::SystemPageSize()
+                                 : medium_bucket->get_bytes_per_span();
+
+  EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
+  EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
+  EXPECT_EQ(expected_allocated_size,
+            root()->get_total_size_of_allocated_bytes());
+  EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
+
+  expected_allocated_size += kFillCountForMediumBucket * medium_alloc_size;
+
+  // These allocations all come from the thread-cache.
+  for (size_t i = 0; i < kFillCountForMediumBucket; i++) {
+    arr[i] =
+        root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
+    EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
+    EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
+    EXPECT_EQ(expected_allocated_size,
+              root()->get_total_size_of_allocated_bytes());
+    EXPECT_EQ(expected_allocated_size,
+              root()->get_max_size_of_allocated_bytes());
+    EXPECT_EQ((kFillCountForMediumBucket - 1 - i) * medium_alloc_size,
+              tcache->CachedMemory());
+  }
+
+  EXPECT_EQ(0U, tcache->CachedMemory());
+
+  root()->Free(ptr);
+
+  for (auto*& el : arr) {
+    root()->Free(el);
+  }
+  EXPECT_EQ(root()->get_total_size_of_allocated_bytes(),
+            expected_allocated_size);
+  tcache->Purge();
+  EXPECT_EQ(root()->get_total_size_of_allocated_bytes(),
+            GetBucketSizeForThreadCache());
+}
+
+TEST_P(PartitionAllocThreadCacheTest, TryPurgeNoAllocs) {
+  auto* tcache = root()->thread_cache_for_testing();
+  tcache->TryPurge();
+}
+
+TEST_P(PartitionAllocThreadCacheTest, TryPurgeMultipleCorrupted) {
+  auto* tcache = root()->thread_cache_for_testing();
+
+  void* ptr =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
+
+  auto* medium_bucket = root()->buckets + SizeToIndex(kMediumSize);
+
+  auto* curr = medium_bucket->active_slot_spans_head->get_freelist_head();
+  curr = curr->GetNextForThreadCache<true>(kMediumSize);
+  curr->CorruptNextForTesting(0x12345678);
+  tcache->TryPurge();
+  curr->SetNext(nullptr);
+  root()->Free(ptr);
+}
+
+TEST(AlternateBucketDistributionTest, SizeToIndex) {
+  using internal::BucketIndexLookup;
+
+  // The first 12 buckets are the same as the default bucket index.
+  for (size_t i = 1 << 0; i < 1 << 8; i <<= 1) {
+    for (size_t offset = 0; offset < 4; offset++) {
+      size_t n = i * (4 + offset) / 4;
+      EXPECT_EQ(BucketIndexLookup::GetIndex(n),
+                BucketIndexLookup::GetIndexForNeutralBuckets(n));
+    }
+  }
+
+  // The alternate bucket distribution is different in the middle values.
+  //
+  // For each order, the top two buckets are removed compared with the default
+  // distribution. Values that would be allocated in those two buckets are
+  // instead allocated in the next power of two bucket.
+  //
+  // The first two buckets (each power of two and the next bucket up) remain
+  // the same between the two bucket distributions.
+  size_t expected_index = BucketIndexLookup::GetIndex(1 << 8);
+  for (size_t i = 1 << 8; i < internal::kHighThresholdForAlternateDistribution;
+       i <<= 1) {
+    // The first two buckets in the order should match up to the normal bucket
+    // distribution.
+    for (size_t offset = 0; offset < 2; offset++) {
+      size_t n = i * (4 + offset) / 4;
+      EXPECT_EQ(BucketIndexLookup::GetIndex(n),
+                BucketIndexLookup::GetIndexForNeutralBuckets(n));
+      EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
+      expected_index += 2;
+    }
+    // The last two buckets in the order are "rounded up" to the same bucket
+    // as the next power of two.
+    expected_index += 4;
+    for (size_t offset = 2; offset < 4; offset++) {
+      size_t n = i * (4 + offset) / 4;
+      // These two are rounded up in the alternate distribution, so we expect
+      // the bucket index to be larger than the bucket index for the same
+      // allocation under the default distribution.
+      EXPECT_GT(BucketIndexLookup::GetIndex(n),
+                BucketIndexLookup::GetIndexForNeutralBuckets(n));
+      // We expect both allocations in this loop to be rounded up to the next
+      // power of two bucket.
+      EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
+    }
+  }
+
+  // The rest of the buckets all match up exactly with the existing
+  // bucket distribution.
+  for (size_t i = internal::kHighThresholdForAlternateDistribution;
+       i < internal::kMaxBucketed; i <<= 1) {
+    for (size_t offset = 0; offset < 4; offset++) {
+      size_t n = i * (4 + offset) / 4;
+      EXPECT_EQ(BucketIndexLookup::GetIndex(n),
+                BucketIndexLookup::GetIndexForNeutralBuckets(n));
+    }
+  }
+}
+
+TEST_P(PartitionAllocThreadCacheTest, AllocationRecording) {
+  // There is a cache.
+  auto* tcache = root()->thread_cache_for_testing();
+  EXPECT_TRUE(tcache);
+  tcache->ResetPerThreadAllocationStatsForTesting();
+
+  constexpr size_t kBucketedNotCached = 1 << 12;
+  constexpr size_t kDirectMapped = 4 * (1 << 20);
+  // Not a "nice" size on purpose, to check that the raw size accounting works.
+  const size_t kSingleSlot = internal::PartitionPageSize() + 1;
+
+  size_t expected_total_size = 0;
+  void* ptr =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
+  ASSERT_TRUE(ptr);
+  expected_total_size += root()->GetUsableSize(ptr);
+  void* ptr2 = root()->Alloc(
+      root()->AdjustSizeForExtrasSubtract(kBucketedNotCached), "");
+  ASSERT_TRUE(ptr2);
+  expected_total_size += root()->GetUsableSize(ptr2);
+  void* ptr3 =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kDirectMapped), "");
+  ASSERT_TRUE(ptr3);
+  expected_total_size += root()->GetUsableSize(ptr3);
+  void* ptr4 =
+      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSingleSlot), "");
+  ASSERT_TRUE(ptr4);
+  expected_total_size += root()->GetUsableSize(ptr4);
+
+  EXPECT_EQ(4u, tcache->thread_alloc_stats().alloc_count);
+  EXPECT_EQ(expected_total_size, tcache->thread_alloc_stats().alloc_total_size);
+
+  root()->Free(ptr);
+  root()->Free(ptr2);
+  root()->Free(ptr3);
+  root()->Free(ptr4);
+
+  EXPECT_EQ(4u, tcache->thread_alloc_stats().alloc_count);
+  EXPECT_EQ(expected_total_size, tcache->thread_alloc_stats().alloc_total_size);
+  EXPECT_EQ(4u, tcache->thread_alloc_stats().dealloc_count);
+  EXPECT_EQ(expected_total_size,
+            tcache->thread_alloc_stats().dealloc_total_size);
+
+  auto stats = internal::GetAllocStatsForCurrentThread();
+  EXPECT_EQ(4u, stats.alloc_count);
+  EXPECT_EQ(expected_total_size, stats.alloc_total_size);
+  EXPECT_EQ(4u, stats.dealloc_count);
+  EXPECT_EQ(expected_total_size, stats.dealloc_total_size);
+}
+
+TEST_P(PartitionAllocThreadCacheTest, AllocationRecordingAligned) {
+  // There is a cache.
+  auto* tcache = root()->thread_cache_for_testing();
+  EXPECT_TRUE(tcache);
+  tcache->ResetPerThreadAllocationStatsForTesting();
+
+  // Aligned allocations take different paths depending on whether they are (in
+  // the same order as the test cases below):
+  // - Not really aligned (since alignment is always good-enough)
+  // - Already satisfied by PA's alignment guarantees
+  // - Requiring extra padding
+  // - Already satisfied by PA's alignment guarantees
+  // - In need of a special slot span (very large alignment)
+  // - Direct-mapped with large alignment
+  size_t alloc_count = 0;
+  size_t total_size = 0;
+  size_t size_alignments[][2] = {{128, 4},
+                                 {128, 128},
+                                 {1024, 128},
+                                 {128, 1024},
+                                 {128, 2 * internal::PartitionPageSize()},
+                                 {(4 << 20) + 1, 1 << 19}};
+  for (auto [requested_size, alignment] : size_alignments) {
+    void* ptr = root()->AlignedAlloc(alignment, requested_size);
+    ASSERT_TRUE(ptr);
+    alloc_count++;
+    total_size += root()->GetUsableSize(ptr);
+    EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
+    EXPECT_EQ(total_size, tcache->thread_alloc_stats().alloc_total_size);
+    root()->Free(ptr);
+    EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().dealloc_count);
+    EXPECT_EQ(total_size, tcache->thread_alloc_stats().dealloc_total_size);
+  }
+
+  EXPECT_EQ(tcache->thread_alloc_stats().alloc_total_size,
+            tcache->thread_alloc_stats().dealloc_total_size);
+
+  auto stats = internal::GetAllocStatsForCurrentThread();
+  EXPECT_EQ(alloc_count, stats.alloc_count);
+  EXPECT_EQ(total_size, stats.alloc_total_size);
+  EXPECT_EQ(alloc_count, stats.dealloc_count);
+  EXPECT_EQ(total_size, stats.dealloc_total_size);
+}
+
+TEST_P(PartitionAllocThreadCacheTest, AllocationRecordingRealloc) {
+  // There is a cache.
+  auto* tcache = root()->thread_cache_for_testing();
+  EXPECT_TRUE(tcache);
+  tcache->ResetPerThreadAllocationStatsForTesting();
+
+  size_t alloc_count = 0;
+  size_t dealloc_count = 0;
+  size_t total_alloc_size = 0;
+  size_t total_dealloc_size = 0;
+  size_t size_new_sizes[][2] = {
+      {16, 15},
+      {16, 64},
+      {16, internal::PartitionPageSize() + 1},
+      {4 << 20, 8 << 20},
+      {8 << 20, 4 << 20},
+      {(8 << 20) - internal::SystemPageSize(), 8 << 20}};
+  for (auto [size, new_size] : size_new_sizes) {
+    void* ptr = root()->Alloc(size);
+    ASSERT_TRUE(ptr);
+    alloc_count++;
+    size_t usable_size = root()->GetUsableSize(ptr);
+    total_alloc_size += usable_size;
+
+    ptr = root()->Realloc(ptr, new_size, "");
+    ASSERT_TRUE(ptr);
+    total_dealloc_size += usable_size;
+    dealloc_count++;
+    usable_size = root()->GetUsableSize(ptr);
+    total_alloc_size += usable_size;
+    alloc_count++;
+
+    EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
+    EXPECT_EQ(total_alloc_size, tcache->thread_alloc_stats().alloc_total_size);
+    EXPECT_EQ(dealloc_count, tcache->thread_alloc_stats().dealloc_count);
+    EXPECT_EQ(total_dealloc_size,
+              tcache->thread_alloc_stats().dealloc_total_size)
+        << new_size;
+
+    root()->Free(ptr);
+    dealloc_count++;
+    total_dealloc_size += usable_size;
+
+    EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
+    EXPECT_EQ(total_alloc_size, tcache->thread_alloc_stats().alloc_total_size);
+    EXPECT_EQ(dealloc_count, tcache->thread_alloc_stats().dealloc_count);
+    EXPECT_EQ(total_dealloc_size,
+              tcache->thread_alloc_stats().dealloc_total_size);
+  }
+  EXPECT_EQ(tcache->thread_alloc_stats().alloc_total_size,
+            tcache->thread_alloc_stats().dealloc_total_size);
+}
+
+// This test makes sure it's safe to switch to the alternate bucket distribution
+// at runtime. This is intended to happen once, near the start of Chrome,
+// once we have enabled features.
+TEST(AlternateBucketDistributionTest, SwitchBeforeAlloc) {
+  std::unique_ptr<PartitionAllocatorForTesting> allocator(CreateAllocator());
+  PartitionRoot* root = allocator->root();
+
+  root->SwitchToDenserBucketDistribution();
+  constexpr size_t n = (1 << 12) * 3 / 2;
+  EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
+            internal::BucketIndexLookup::GetIndexForNeutralBuckets(n));
+
+  void* ptr = root->Alloc(n);
+
+  root->ResetBucketDistributionForTesting();
+
+  root->Free(ptr);
+}
+
+// This test makes sure it's safe to switch to the alternate bucket distribution
+// at runtime. This is intended to happen once, near the start of Chrome,
+// once we have enabled features.
+TEST(AlternateBucketDistributionTest, SwitchAfterAlloc) {
+  std::unique_ptr<PartitionAllocatorForTesting> allocator(CreateAllocator());
+  constexpr size_t n = (1 << 12) * 3 / 2;
+  EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
+            internal::BucketIndexLookup::GetIndexForNeutralBuckets(n));
+
+  PartitionRoot* root = allocator->root();
+  void* ptr = root->Alloc(n);
+
+  root->SwitchToDenserBucketDistribution();
+
+  void* ptr2 = root->Alloc(n);
+
+  root->Free(ptr2);
+  root->Free(ptr);
+}
+
+}  // namespace partition_alloc
+
+#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) &&
+        // PA_CONFIG(THREAD_CACHE_SUPPORTED)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/alignment.h b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/alignment.h
new file mode 100644
index 0000000..0d72d45
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/alignment.h
@@ -0,0 +1,44 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_ISOLATION_ALIGNMENT_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_ISOLATION_ALIGNMENT_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+
+#define PA_THREAD_ISOLATED_ALIGN_SZ partition_alloc::internal::SystemPageSize()
+#define PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK (PA_THREAD_ISOLATED_ALIGN_SZ - 1)
+#define PA_THREAD_ISOLATED_ALIGN_BASE_MASK \
+  (~PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK)
+#define PA_THREAD_ISOLATED_ALIGN alignas(PA_THREAD_ISOLATED_ALIGN_SZ)
+
+#define PA_THREAD_ISOLATED_FILL_PAGE_SZ(size)          \
+  ((PA_THREAD_ISOLATED_ALIGN_SZ -                      \
+    ((size) & PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK)) % \
+   PA_THREAD_ISOLATED_ALIGN_SZ)
+// Calculate the required padding so that the last element of a page-aligned
+// array lands on a page boundary. In other words, calculate that padding so
+// that (count-1) elements are a multiple of page size.
+// The offset parameter additionally skips bytes in the object, e.g.
+// object+offset will be page aligned.
+#define PA_THREAD_ISOLATED_ARRAY_PAD_SZ_WITH_OFFSET(Type, count, offset) \
+  PA_THREAD_ISOLATED_FILL_PAGE_SZ(sizeof(Type) * (count - 1) + offset)
+
+#define PA_THREAD_ISOLATED_ARRAY_PAD_SZ(Type, count) \
+  PA_THREAD_ISOLATED_ARRAY_PAD_SZ_WITH_OFFSET(Type, count, 0)
+
+#else  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+#define PA_THREAD_ISOLATED_ALIGN
+#define PA_THREAD_ISOLATED_FILL_PAGE_SZ(size) 0
+#define PA_THREAD_ISOLATED_ARRAY_PAD_SZ(Type, size) 0
+#define PA_THREAD_ISOLATED_ARRAY_PAD_SZ_WITH_OFFSET(Type, size, offset) 0
+
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_ISOLATION_ALIGNMENT_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey.cc b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey.cc
new file mode 100644
index 0000000..ad0b8ad
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey.cc
@@ -0,0 +1,89 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey.h"
+
+#if BUILDFLAG(ENABLE_PKEYS)
+
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/cpu.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+
+#if !BUILDFLAG(IS_LINUX)
+#error "This pkey code is currently only supported on Linux"
+#endif
+
+namespace partition_alloc::internal {
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+bool CPUHasPkeySupport() {
+  return base::CPU::GetInstanceNoAllocation().has_pku();
+}
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+int PkeyMprotect(void* addr, size_t len, int prot, int pkey) {
+  return syscall(SYS_pkey_mprotect, addr, len, prot, pkey);
+}
+
+void TagMemoryWithPkey(int pkey, void* address, size_t size) {
+  PA_DCHECK((reinterpret_cast<uintptr_t>(address) &
+             PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK) == 0);
+  PA_PCHECK(PkeyMprotect(address,
+                         (size + PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK) &
+                             PA_THREAD_ISOLATED_ALIGN_BASE_MASK,
+                         PROT_READ | PROT_WRITE, pkey) == 0);
+}
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+int PkeyAlloc(int access_rights) {
+  return syscall(SYS_pkey_alloc, 0, access_rights);
+}
+
+PA_COMPONENT_EXPORT(PARTITION_ALLOC)
+void PkeyFree(int pkey) {
+  PA_PCHECK(syscall(SYS_pkey_free, pkey) == 0);
+}
+
+uint32_t Rdpkru() {
+  uint32_t pkru;
+  asm volatile(".byte 0x0f,0x01,0xee\n" : "=a"(pkru) : "c"(0), "d"(0));
+  return pkru;
+}
+
+void Wrpkru(uint32_t pkru) {
+  asm volatile(".byte 0x0f,0x01,0xef\n" : : "a"(pkru), "c"(0), "d"(0));
+}
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+
+LiftPkeyRestrictionsScope::LiftPkeyRestrictionsScope()
+    : saved_pkey_value_(kDefaultPkeyValue) {
+  if (!ThreadIsolationSettings::settings.enabled) {
+    return;
+  }
+  saved_pkey_value_ = Rdpkru();
+  if (saved_pkey_value_ != kDefaultPkeyValue) {
+    Wrpkru(kAllowAllPkeyValue);
+  }
+}
+
+LiftPkeyRestrictionsScope::~LiftPkeyRestrictionsScope() {
+  if (!ThreadIsolationSettings::settings.enabled) {
+    return;
+  }
+  if (Rdpkru() != saved_pkey_value_) {
+    Wrpkru(saved_pkey_value_);
+  }
+}
+
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+}  // namespace partition_alloc::internal
+
+#endif  // BUILDFLAG(ENABLE_PKEYS)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey.h b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey.h
new file mode 100644
index 0000000..697779f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey.h
@@ -0,0 +1,62 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_ISOLATION_PKEY_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_ISOLATION_PKEY_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(ENABLE_PKEYS)
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/alignment.h"
+
+#include <cstddef>
+#include <cstdint>
+
+namespace partition_alloc::internal {
+
+constexpr int kDefaultPkey = 0;
+constexpr int kInvalidPkey = -1;
+
+// Check if the CPU supports pkeys.
+bool CPUHasPkeySupport();
+
+// A wrapper around the pkey_mprotect syscall.
+[[nodiscard]] int PkeyMprotect(void* addr, size_t len, int prot, int pkey);
+
+void TagMemoryWithPkey(int pkey, void* address, size_t size);
+
+int PkeyAlloc(int access_rights);
+
+void PkeyFree(int pkey);
+
+// Read the pkru register (the current pkey state).
+uint32_t Rdpkru();
+
+// Write the pkru register (the current pkey state).
+void Wrpkru(uint32_t pkru);
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+
+class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LiftPkeyRestrictionsScope {
+ public:
+  static constexpr uint32_t kDefaultPkeyValue = 0x55555554;
+  static constexpr uint32_t kAllowAllPkeyValue = 0x0;
+
+  LiftPkeyRestrictionsScope();
+  ~LiftPkeyRestrictionsScope();
+
+ private:
+  uint32_t saved_pkey_value_;
+};
+
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+}  // namespace partition_alloc::internal
+
+#endif  // BUILDFLAG(ENABLE_PKEYS)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_ISOLATION_PKEY_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey_unittest.cc b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey_unittest.cc
new file mode 100644
index 0000000..d0b1b9f
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey_unittest.cc
@@ -0,0 +1,273 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+
+#if BUILDFLAG(ENABLE_PKEYS)
+
+#include <link.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_space_stats.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#define ISOLATED_FUNCTION extern "C" __attribute__((used))
+constexpr size_t kIsolatedThreadStackSize = 64 * 1024;
+constexpr int kNumPkey = 16;
+constexpr size_t kTestReturnValue = 0x8765432187654321llu;
+constexpr uint32_t kPKRUAllowAccessNoWrite = 0b10101010101010101010101010101000;
+
+namespace partition_alloc::internal {
+
+struct PA_THREAD_ISOLATED_ALIGN IsolatedGlobals {
+  int pkey = kInvalidPkey;
+  void* stack;
+  partition_alloc::internal::base::NoDestructor<
+      partition_alloc::PartitionAllocator>
+      allocator{};
+} isolated_globals;
+
+int ProtFromSegmentFlags(ElfW(Word) flags) {
+  int prot = 0;
+  if (flags & PF_R) {
+    prot |= PROT_READ;
+  }
+  if (flags & PF_W) {
+    prot |= PROT_WRITE;
+  }
+  if (flags & PF_X) {
+    prot |= PROT_EXEC;
+  }
+  return prot;
+}
+
+int ProtectROSegments(struct dl_phdr_info* info, size_t info_size, void* data) {
+  if (!strcmp(info->dlpi_name, "linux-vdso.so.1")) {
+    return 0;
+  }
+  for (int i = 0; i < info->dlpi_phnum; i++) {
+    const ElfW(Phdr)* phdr = &info->dlpi_phdr[i];
+    if (phdr->p_type != PT_LOAD && phdr->p_type != PT_GNU_RELRO) {
+      continue;
+    }
+    if (phdr->p_flags & PF_W) {
+      continue;
+    }
+    uintptr_t start = info->dlpi_addr + phdr->p_vaddr;
+    uintptr_t end = start + phdr->p_memsz;
+    uintptr_t start_page = RoundDownToSystemPage(start);
+    uintptr_t end_page = RoundUpToSystemPage(end);
+    uintptr_t size = end_page - start_page;
+    PA_PCHECK(PkeyMprotect(reinterpret_cast<void*>(start_page), size,
+                           ProtFromSegmentFlags(phdr->p_flags),
+                           isolated_globals.pkey) == 0);
+  }
+  return 0;
+}
+
+class PkeyTest : public testing::Test {
+ protected:
+  static void PkeyProtectMemory() {
+    PA_PCHECK(dl_iterate_phdr(ProtectROSegments, nullptr) == 0);
+
+    PA_PCHECK(PkeyMprotect(&isolated_globals, sizeof(isolated_globals),
+                           PROT_READ | PROT_WRITE, isolated_globals.pkey) == 0);
+
+    PA_PCHECK(PkeyMprotect(isolated_globals.stack, kIsolatedThreadStackSize,
+                           PROT_READ | PROT_WRITE, isolated_globals.pkey) == 0);
+  }
+
+  static void InitializeIsolatedThread() {
+    isolated_globals.stack =
+        mmap(nullptr, kIsolatedThreadStackSize, PROT_READ | PROT_WRITE,
+             MAP_ANONYMOUS | MAP_PRIVATE | MAP_STACK, -1, 0);
+    PA_PCHECK(isolated_globals.stack != MAP_FAILED);
+
+    PkeyProtectMemory();
+  }
+
+  void SetUp() override {
+    // SetUp only once, but we can't do it in SetUpTestSuite since that runs
+    // before other PartitionAlloc initialization happened.
+    if (isolated_globals.pkey != kInvalidPkey) {
+      return;
+    }
+
+    int pkey = PkeyAlloc(0);
+    if (pkey == -1) {
+      return;
+    }
+    isolated_globals.pkey = pkey;
+
+    isolated_globals.allocator->init(PartitionOptions{
+        .aligned_alloc = PartitionOptions::kAllowed,
+        .thread_isolation = ThreadIsolationOption(isolated_globals.pkey),
+    });
+
+    InitializeIsolatedThread();
+
+    Wrpkru(kPKRUAllowAccessNoWrite);
+  }
+
+  static void TearDownTestSuite() {
+    if (isolated_globals.pkey == kInvalidPkey) {
+      return;
+    }
+    PA_PCHECK(PkeyMprotect(&isolated_globals, sizeof(isolated_globals),
+                           PROT_READ | PROT_WRITE, kDefaultPkey) == 0);
+    isolated_globals.pkey = kDefaultPkey;
+    InitializeIsolatedThread();
+    PkeyFree(isolated_globals.pkey);
+  }
+};
+
+// This code will run with access limited to pkey 1, no default pkey access.
+// Note that we're stricter than required for debugging purposes.
+// In the final use, we'll likely allow at least read access to the default
+// pkey.
+ISOLATED_FUNCTION uint64_t IsolatedAllocFree(void* arg) {
+  char* buf = (char*)isolated_globals.allocator->root()
+                  ->Alloc<partition_alloc::AllocFlags::kNoHooks>(1024);
+  if (!buf) {
+    return 0xffffffffffffffffllu;
+  }
+  isolated_globals.allocator->root()->Free<FreeFlags::kNoHooks>(buf);
+
+  return kTestReturnValue;
+}
+
+// This test is a bit compliated. We want to ensure that the code
+// allocating/freeing from the pkey pool doesn't *unexpectedly* access memory
+// tagged with the default pkey (pkey 0). This could be a security issue since
+// in our CFI threat model that memory might be attacker controlled.
+// To test for this, we run alloc/free without access to the default pkey. In
+// order to do this, we need to tag all global read-only memory with our pkey as
+// well as switch to a pkey-tagged stack.
+TEST_F(PkeyTest, AllocWithoutDefaultPkey) {
+  if (isolated_globals.pkey == kInvalidPkey) {
+    return;
+  }
+
+  uint64_t ret;
+  uint32_t pkru_value = 0;
+  for (int pkey = 0; pkey < kNumPkey; pkey++) {
+    if (pkey != isolated_globals.pkey) {
+      pkru_value |= (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE) << (2 * pkey);
+    }
+  }
+
+  // Switch to the safe stack with inline assembly.
+  //
+  // The simple solution would be to use one asm statement as a prologue to
+  // switch to the protected stack and a second one to switch it back. However,
+  // that doesn't work since inline assembly doesn't support a clobbered stack
+  // register. So instead, we switch the stack, perform a function call
+  // to the
+  // actual code and switch back afterwards.
+  //
+  // The inline asm docs mention that special care must be taken
+  // when calling a function in inline assembly. I.e. we will
+  // need to make sure that we follow the ABI of the platform.
+  // In this example, we use the System-V ABI.
+  //
+  // == Caller-saved registers ==
+  // We had two ideas for handling caller-saved registers. Option 1 was chosen,
+  // but I'll describe both to show why option 2 didn't work out:
+  // * Option 1) mark all caller-saved registers as clobbered. This should be
+  //             in line with how the compiler would create the function call.
+  //             Problem: future additions to caller-saved registers can break
+  //             this.
+  // * Option 2) use attribute no_caller_saved_registers. This prohibits use of
+  //             sse/mmx/x87. We can disable sse/mmx with a "target" attribute,
+  //             but I couldn't find a way to disable x87.
+  //             The docs tell you to use -mgeneral-regs-only. Maybe we
+  //             could move the isolated code to a separate file and then
+  //             use that flag for compiling that file only.
+  //             !!! This doesn't work: the inner function can call out to code
+  //             that uses caller-saved registers and won't save
+  //             them itself.
+  //
+  // == stack alignment ==
+  // The ABI requires us to have a 16 byte aligned rsp on function
+  // entry. We push one qword onto the stack so we need to subtract
+  // an additional 8 bytes from the stack pointer.
+  //
+  // == additional clobbering ==
+  // As described above, we need to clobber everything besides
+  // callee-saved registers. The ABI requires all x87 registers to
+  // be set to empty on fn entry / return,
+  // so we should tell the compiler that this is the case. As I understand the
+  // docs, this is done by marking them as clobbered. Worst case, we'll notice
+  // any issues quickly and can fix them if it turned out to be false>
+  //
+  // == direction flag ==
+  // Theoretically, the DF flag could be set to 1 at asm entry. If this
+  // leads to problems, we might have to zero it before the fn call and
+  // restore it afterwards. I would'ave assumed that marking flags as
+  // clobbered would require the compiler to reset the DF before the next fn
+  // call, but that doesn't seem to be the case.
+  asm volatile(
+      // Set pkru to only allow access to pkey 1 memory.
+      ".byte 0x0f,0x01,0xef\n"  // wrpkru
+
+      // Move to the isolated stack and store the old value
+      "xchg %4, %%rsp\n"
+      "push %4\n"
+      "call IsolatedAllocFree\n"
+      // We need rax below, so move the return value to the stack
+      "push %%rax\n"
+
+      // Set pkru to only allow access to pkey 0 memory.
+      "mov $0b10101010101010101010101010101000, %%rax\n"
+      "xor %%rcx, %%rcx\n"
+      "xor %%rdx, %%rdx\n"
+      ".byte 0x0f,0x01,0xef\n"  // wrpkru
+
+      // Pop the return value
+      "pop %0\n"
+      // Restore the original stack
+      "pop %%rsp\n"
+
+      : "=r"(ret)
+      : "a"(pkru_value), "c"(0), "d"(0),
+        "r"(reinterpret_cast<uintptr_t>(isolated_globals.stack) +
+            kIsolatedThreadStackSize - 8)
+      : "memory", "cc", "r8", "r9", "r10", "r11", "xmm0", "xmm1", "xmm2",
+        "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10",
+        "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "flags", "fpsr", "st",
+        "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)");
+
+  ASSERT_EQ(ret, kTestReturnValue);
+}
+
+class MockAddressSpaceStatsDumper : public AddressSpaceStatsDumper {
+ public:
+  MockAddressSpaceStatsDumper() = default;
+  void DumpStats(const AddressSpaceStats* address_space_stats) override {}
+};
+
+TEST_F(PkeyTest, DumpPkeyPoolStats) {
+  if (isolated_globals.pkey == kInvalidPkey) {
+    return;
+  }
+
+  MockAddressSpaceStatsDumper mock_stats_dumper;
+  partition_alloc::internal::AddressPoolManager::GetInstance().DumpStats(
+      &mock_stats_dumper);
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.cc b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.cc
new file mode 100644
index 0000000..713d55a
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.cc
@@ -0,0 +1,83 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h"
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/reservation_offset_table.h"
+
+#if BUILDFLAG(ENABLE_PKEYS)
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey.h"
+#endif
+
+namespace partition_alloc::internal {
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+ThreadIsolationSettings ThreadIsolationSettings::settings;
+#endif
+
+void WriteProtectThreadIsolatedMemory(ThreadIsolationOption thread_isolation,
+                                      void* address,
+                                      size_t size) {
+  PA_DCHECK((reinterpret_cast<uintptr_t>(address) &
+             PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK) == 0);
+#if BUILDFLAG(ENABLE_PKEYS)
+  partition_alloc::internal::TagMemoryWithPkey(
+      thread_isolation.enabled ? thread_isolation.pkey : kDefaultPkey, address,
+      size);
+#else
+#error unexpected thread isolation mode
+#endif
+}
+
+template <typename T>
+void WriteProtectThreadIsolatedVariable(ThreadIsolationOption thread_isolation,
+                                        T& var,
+                                        size_t offset = 0) {
+  WriteProtectThreadIsolatedMemory(thread_isolation, (char*)&var + offset,
+                                   sizeof(T) - offset);
+}
+
+int MprotectWithThreadIsolation(void* addr,
+                                size_t len,
+                                int prot,
+                                ThreadIsolationOption thread_isolation) {
+#if BUILDFLAG(ENABLE_PKEYS)
+  return PkeyMprotect(addr, len, prot, thread_isolation.pkey);
+#endif
+}
+
+void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption thread_isolation) {
+  WriteProtectThreadIsolatedVariable(thread_isolation,
+                                     PartitionAddressSpace::setup_);
+
+  AddressPoolManager::Pool* pool =
+      AddressPoolManager::GetInstance().GetPool(kThreadIsolatedPoolHandle);
+  WriteProtectThreadIsolatedVariable(
+      thread_isolation, *pool,
+      offsetof(AddressPoolManager::Pool, alloc_bitset_));
+
+  uint16_t* pkey_reservation_offset_table =
+      GetReservationOffsetTable(kThreadIsolatedPoolHandle);
+  WriteProtectThreadIsolatedMemory(
+      thread_isolation, pkey_reservation_offset_table,
+      ReservationOffsetTable::kReservationOffsetTableLength);
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+  WriteProtectThreadIsolatedVariable(thread_isolation,
+                                     ThreadIsolationSettings::settings);
+#endif
+}
+
+void UnprotectThreadIsolatedGlobals() {
+  WriteProtectThreadIsolatedGlobals(ThreadIsolationOption(false));
+}
+
+}  // namespace partition_alloc::internal
+
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
diff --git a/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h
new file mode 100644
index 0000000..a734af9
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/thread_isolation/thread_isolation.h
@@ -0,0 +1,81 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_ISOLATION_THREAD_ISOLATION_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_ISOLATION_THREAD_ISOLATION_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+#include <cstddef>
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/component_export.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
+
+#if BUILDFLAG(ENABLE_PKEYS)
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_isolation/pkey.h"
+#endif
+
+#if !BUILDFLAG(HAS_64_BIT_POINTERS)
+#error "thread isolation support requires 64 bit pointers"
+#endif
+
+namespace partition_alloc {
+
+struct ThreadIsolationOption {
+  constexpr ThreadIsolationOption() = default;
+  explicit ThreadIsolationOption(bool enabled) : enabled(enabled) {}
+
+#if BUILDFLAG(ENABLE_PKEYS)
+  explicit ThreadIsolationOption(int pkey) : pkey(pkey) {
+    enabled = pkey != internal::kInvalidPkey;
+  }
+  int pkey = -1;
+#endif  // BUILDFLAG(ENABLE_PKEYS)
+
+  bool enabled = false;
+
+  bool operator==(const ThreadIsolationOption& other) const {
+#if BUILDFLAG(ENABLE_PKEYS)
+    if (pkey != other.pkey) {
+      return false;
+    }
+#endif  // BUILDFLAG(ENABLE_PKEYS)
+    return enabled == other.enabled;
+  }
+};
+
+}  // namespace partition_alloc
+
+namespace partition_alloc::internal {
+
+#if BUILDFLAG(PA_DCHECK_IS_ON)
+
+struct PA_THREAD_ISOLATED_ALIGN ThreadIsolationSettings {
+  bool enabled = false;
+  static ThreadIsolationSettings settings PA_CONSTINIT;
+};
+
+#if BUILDFLAG(ENABLE_PKEYS)
+
+using LiftThreadIsolationScope = LiftPkeyRestrictionsScope;
+
+#endif  // BUILDFLAG(ENABLE_PKEYS)
+#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
+
+void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption thread_isolation);
+void UnprotectThreadIsolatedGlobals();
+[[nodiscard]] int MprotectWithThreadIsolation(
+    void* addr,
+    size_t len,
+    int prot,
+    ThreadIsolationOption thread_isolation);
+
+}  // namespace partition_alloc::internal
+
+#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_THREAD_ISOLATION_THREAD_ISOLATION_H_
diff --git a/base/allocator/partition_allocator/src/partition_alloc/yield_processor.h b/base/allocator/partition_allocator/src/partition_alloc/yield_processor.h
new file mode 100644
index 0000000..c61ada7
--- /dev/null
+++ b/base/allocator/partition_allocator/src/partition_alloc/yield_processor.h
@@ -0,0 +1,51 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_YIELD_PROCESSOR_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_YIELD_PROCESSOR_H_
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "build/build_config.h"
+
+// The PA_YIELD_PROCESSOR macro wraps an architecture specific-instruction that
+// informs the processor we're in a busy wait, so it can handle the branch more
+// intelligently and e.g. reduce power to our core or give more resources to the
+// other hyper-thread on this core. See the following for context:
+// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
+
+#if PA_CONFIG(IS_NONCLANG_MSVC)
+
+// MSVC is in its own assemblyless world (crbug.com/1351310#c6).
+#include <windows.h>
+#define PA_YIELD_PROCESSOR (YieldProcessor())
+
+#else
+
+#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86)
+#define PA_YIELD_PROCESSOR __asm__ __volatile__("pause")
+#elif (defined(ARCH_CPU_ARMEL) && __ARM_ARCH >= 6) || defined(ARCH_CPU_ARM64)
+#define PA_YIELD_PROCESSOR __asm__ __volatile__("yield")
+#elif defined(ARCH_CPU_MIPSEL)
+// The MIPS32 docs state that the PAUSE instruction is a no-op on older
+// architectures (first added in MIPS32r2). To avoid assembler errors when
+// targeting pre-r2, we must encode the instruction manually.
+#define PA_YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140")
+#elif defined(ARCH_CPU_MIPS64EL) && __mips_isa_rev >= 2
+// Don't bother doing using .word here since r2 is the lowest supported mips64
+// that Chromium supports.
+#define PA_YIELD_PROCESSOR __asm__ __volatile__("pause")
+#elif defined(ARCH_CPU_PPC64_FAMILY)
+#define PA_YIELD_PROCESSOR __asm__ __volatile__("or 31,31,31")
+#elif defined(ARCH_CPU_S390_FAMILY)
+// just do nothing
+#define PA_YIELD_PROCESSOR ((void)0)
+#endif  // ARCH
+
+#ifndef PA_YIELD_PROCESSOR
+#define PA_YIELD_PROCESSOR ((void)0)
+#endif
+
+#endif  // PA_CONFIG(IS_NONCLANG_MSVC)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_YIELD_PROCESSOR_H_
diff --git a/base/allocator/partition_allocator/starscan/logging.h b/base/allocator/partition_allocator/starscan/logging.h
deleted file mode 100644
index 0c51f69..0000000
--- a/base/allocator/partition_allocator/starscan/logging.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_LOGGING_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_LOGGING_H_
-
-#include "base/allocator/partition_allocator/allocation_guard.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-
-namespace partition_alloc::internal {
-
-// Logging requires allocations. This logger allows reentrant allocations to
-// happen within the allocator context.
-struct LoggerWithAllowedAllocations : ScopedAllowAllocations,
-                                      logging::LogMessage {
-  using logging::LogMessage::LogMessage;
-};
-
-#define PA_PCSCAN_VLOG_STREAM(verbose_level)                 \
-  ::partition_alloc::internal::LoggerWithAllowedAllocations( \
-      __FILE__, __LINE__, -(verbose_level))                  \
-      .stream()
-
-// Logging macro that is meant to be used inside *Scan. Generally, reentrancy
-// may be an issue if the macro is called from malloc()/free(). Currently, it's
-// only called at the end of *Scan and when scheduling a new *Scan task.
-// Allocating from these paths should not be an issue, since we make sure that
-// no infinite recursion can occur (e.g. we can't schedule two *Scan tasks and
-// the inner free() call must be non-reentrant).  However, these sorts of things
-// are tricky to enforce and easy to mess up with. Since verbose *Scan logging
-// is essential for debugging, we choose to provide support for it inside *Scan.
-#define PA_PCSCAN_VLOG(verbose_level)                  \
-  PA_LAZY_STREAM(PA_PCSCAN_VLOG_STREAM(verbose_level), \
-                 PA_VLOG_IS_ON(verbose_level))
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_LOGGING_H_
diff --git a/base/allocator/partition_allocator/starscan/metadata_allocator.cc b/base/allocator/partition_allocator/starscan/metadata_allocator.cc
deleted file mode 100644
index d73a3b5..0000000
--- a/base/allocator/partition_allocator/starscan/metadata_allocator.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_root.h"
-
-#include <cstring>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
-
-namespace partition_alloc::internal {
-
-namespace {
-constexpr PartitionOptions kConfig{};
-}  // namespace
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-PartitionRoot& PCScanMetadataAllocator() {
-  static internal::base::NoDestructor<PartitionRoot> allocator(kConfig);
-  return *allocator;
-}
-
-// TODO(tasak): investigate whether PartitionAlloc tests really need this
-// function or not. If we found no tests need, remove it.
-void ReinitPCScanMetadataAllocatorForTesting() {
-  // First, purge memory owned by PCScanMetadataAllocator.
-  PCScanMetadataAllocator().PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
-                                        PurgeFlags::kDiscardUnusedSystemPages);
-  // Then, reinit the allocator.
-  PCScanMetadataAllocator().ResetForTesting(true);  // IN-TEST
-  PCScanMetadataAllocator().Init(kConfig);
-}
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/starscan/metadata_allocator.h b/base/allocator/partition_allocator/starscan/metadata_allocator.h
deleted file mode 100644
index 2270838..0000000
--- a/base/allocator/partition_allocator/starscan/metadata_allocator.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_METADATA_ALLOCATOR_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_METADATA_ALLOCATOR_H_
-
-#include <utility>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-
-namespace partition_alloc::internal {
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-PartitionRoot& PCScanMetadataAllocator();
-void ReinitPCScanMetadataAllocatorForTesting();
-
-// STL allocator which is needed to keep internal data structures required by
-// PCScan.
-template <typename T>
-class MetadataAllocator {
- public:
-  using value_type = T;
-
-  MetadataAllocator() = default;
-
-  template <typename U>
-  MetadataAllocator(const MetadataAllocator<U>&) {}  // NOLINT
-
-  template <typename U>
-  MetadataAllocator& operator=(const MetadataAllocator<U>&) {
-    return *this;
-  }
-
-  template <typename U>
-  bool operator==(const MetadataAllocator<U>&) {
-    return true;
-  }
-
-  template <typename U>
-  bool operator!=(const MetadataAllocator<U>& o) {
-    return !operator==(o);
-  }
-
-  value_type* allocate(size_t size) {
-    return static_cast<value_type*>(
-        PCScanMetadataAllocator().AllocWithFlagsNoHooks(
-            0, size * sizeof(value_type), PartitionPageSize()));
-  }
-
-  void deallocate(value_type* ptr, size_t size) {
-    PCScanMetadataAllocator().FreeNoHooks(ptr);
-  }
-};
-
-// Inherit from it to make a class allocated on the metadata partition.
-struct AllocatedOnPCScanMetadataPartition {
-  static void* operator new(size_t size) {
-    return PCScanMetadataAllocator().AllocWithFlagsNoHooks(0, size,
-                                                           PartitionPageSize());
-  }
-  static void operator delete(void* ptr) {
-    PCScanMetadataAllocator().FreeNoHooks(ptr);
-  }
-};
-
-template <typename T, typename... Args>
-T* MakePCScanMetadata(Args&&... args) {
-  auto* memory =
-      static_cast<T*>(PCScanMetadataAllocator().AllocWithFlagsNoHooks(
-          0, sizeof(T), PartitionPageSize()));
-  return new (memory) T(std::forward<Args>(args)...);
-}
-
-struct PCScanMetadataDeleter final {
-  inline void operator()(void* ptr) const {
-    PCScanMetadataAllocator().FreeNoHooks(ptr);
-  }
-};
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_METADATA_ALLOCATOR_H_
diff --git a/base/allocator/partition_allocator/starscan/pcscan.cc b/base/allocator/partition_allocator/starscan/pcscan.cc
deleted file mode 100644
index 3014777..0000000
--- a/base/allocator/partition_allocator/starscan/pcscan.cc
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/starscan/pcscan_internal.h"
-
-namespace partition_alloc::internal {
-
-void PCScan::Initialize(InitConfig config) {
-  PCScanInternal::Instance().Initialize(config);
-}
-
-bool PCScan::IsInitialized() {
-  return PCScanInternal::Instance().is_initialized();
-}
-
-void PCScan::Disable() {
-  auto& instance = PCScan::Instance();
-  instance.scheduler().scheduling_backend().DisableScheduling();
-}
-
-bool PCScan::IsEnabled() {
-  auto& instance = PCScan::Instance();
-  return instance.scheduler().scheduling_backend().is_scheduling_enabled();
-}
-
-void PCScan::Reenable() {
-  auto& instance = PCScan::Instance();
-  instance.scheduler().scheduling_backend().EnableScheduling();
-}
-
-void PCScan::RegisterScannableRoot(Root* root) {
-  PCScanInternal::Instance().RegisterScannableRoot(root);
-}
-
-void PCScan::RegisterNonScannableRoot(Root* root) {
-  PCScanInternal::Instance().RegisterNonScannableRoot(root);
-}
-
-void PCScan::RegisterNewSuperPage(Root* root, uintptr_t super_page_base) {
-  PCScanInternal::Instance().RegisterNewSuperPage(root, super_page_base);
-}
-
-void PCScan::PerformScan(InvocationMode invocation_mode) {
-  PCScanInternal::Instance().PerformScan(invocation_mode);
-}
-
-void PCScan::PerformScanIfNeeded(InvocationMode invocation_mode) {
-  PCScanInternal::Instance().PerformScanIfNeeded(invocation_mode);
-}
-
-void PCScan::PerformDelayedScan(int64_t delay_in_microseconds) {
-  PCScanInternal::Instance().PerformDelayedScan(
-      base::Microseconds(delay_in_microseconds));
-}
-
-void PCScan::JoinScan() {
-  PCScanInternal::Instance().JoinScan();
-}
-
-void PCScan::SetProcessName(const char* process_name) {
-  PCScanInternal::Instance().SetProcessName(process_name);
-}
-
-void PCScan::EnableStackScanning() {
-  PCScanInternal::Instance().EnableStackScanning();
-}
-void PCScan::DisableStackScanning() {
-  PCScanInternal::Instance().DisableStackScanning();
-}
-bool PCScan::IsStackScanningEnabled() {
-  return PCScanInternal::Instance().IsStackScanningEnabled();
-}
-
-void PCScan::EnableImmediateFreeing() {
-  PCScanInternal::Instance().EnableImmediateFreeing();
-}
-
-void PCScan::NotifyThreadCreated(void* stack_top) {
-  PCScanInternal::Instance().NotifyThreadCreated(stack_top);
-}
-void PCScan::NotifyThreadDestroyed() {
-  PCScanInternal::Instance().NotifyThreadDestroyed();
-}
-
-void PCScan::SetClearType(ClearType clear_type) {
-  PCScan& instance = Instance();
-  instance.clear_type_ = clear_type;
-}
-
-void PCScan::UninitForTesting() {
-  PCScanInternal::Instance().ClearRootsForTesting();  // IN-TEST
-  ReinitPCScanMetadataAllocatorForTesting();          // IN-TEST
-}
-
-void PCScan::ReinitForTesting(InitConfig config) {
-  PCScanInternal::Instance().ReinitForTesting(config);  // IN-TEST
-}
-
-void PCScan::FinishScanForTesting() {
-  PCScanInternal::Instance().FinishScanForTesting();  // IN-TEST
-}
-
-void PCScan::RegisterStatsReporter(partition_alloc::StatsReporter* reporter) {
-  PCScanInternal::Instance().RegisterStatsReporter(reporter);
-}
-
-PCScan PCScan::instance_ PA_CONSTINIT;
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/starscan/pcscan.h b/base/allocator/partition_allocator/starscan/pcscan.h
deleted file mode 100644
index 56596e9..0000000
--- a/base/allocator/partition_allocator/starscan/pcscan.h
+++ /dev/null
@@ -1,281 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_H_
-
-#include <atomic>
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
-#include "base/allocator/partition_allocator/tagging.h"
-
-namespace partition_alloc {
-
-class StatsReporter;
-
-namespace internal {
-
-[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED
-    PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DoubleFreeAttempt();
-
-// PCScan (Probabilistic Conservative Scanning) is the algorithm that eliminates
-// use-after-free bugs by verifying that there are no pointers in memory which
-// point to explicitly freed objects before actually releasing their memory. If
-// PCScan is enabled for a partition, freed objects are not immediately returned
-// to the allocator, but are stored in a quarantine. When the quarantine reaches
-// a certain threshold, a concurrent PCScan task gets posted. The task scans the
-// entire heap, looking for dangling pointers (those that point to the
-// quarantine entries). After scanning, the unvisited quarantine entries are
-// unreachable and therefore can be safely reclaimed.
-//
-// The driver class encapsulates the entire PCScan infrastructure.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScan final {
- public:
-  using Root = PartitionRoot;
-  using SlotSpan = SlotSpanMetadata;
-
-  enum class InvocationMode {
-    kBlocking,
-    kNonBlocking,
-    kForcedBlocking,
-    kScheduleOnlyForTesting,
-  };
-
-  enum class ClearType : uint8_t {
-    // Clear in the scanning task.
-    kLazy,
-    // Eagerly clear quarantined objects on MoveToQuarantine().
-    kEager,
-  };
-
-  // Parameters used to initialize *Scan.
-  struct InitConfig {
-    // Based on the provided mode, PCScan will try to use a certain
-    // WriteProtector, if supported by the system.
-    enum class WantedWriteProtectionMode : uint8_t {
-      kDisabled,
-      kEnabled,
-    } write_protection = WantedWriteProtectionMode::kDisabled;
-
-    // Flag that enables safepoints that stop mutator execution and help
-    // scanning.
-    enum class SafepointMode : uint8_t {
-      kDisabled,
-      kEnabled,
-    } safepoint = SafepointMode::kDisabled;
-  };
-
-  PCScan(const PCScan&) = delete;
-  PCScan& operator=(const PCScan&) = delete;
-
-  // Initializes PCScan and prepares internal data structures.
-  static void Initialize(InitConfig);
-  static bool IsInitialized();
-
-  // Disable/reenable PCScan. Temporal disabling can be useful in CPU demanding
-  // contexts.
-  static void Disable();
-  static void Reenable();
-  // Query if PCScan is enabled.
-  static bool IsEnabled();
-
-  // Registers a root for scanning.
-  static void RegisterScannableRoot(Root* root);
-  // Registers a root that doesn't need to be scanned but still contains
-  // quarantined objects.
-  static void RegisterNonScannableRoot(Root* root);
-
-  // Registers a newly allocated super page for |root|.
-  static void RegisterNewSuperPage(Root* root, uintptr_t super_page_base);
-
-  PA_ALWAYS_INLINE static void MoveToQuarantine(void* object,
-                                                size_t usable_size,
-                                                uintptr_t slot_start,
-                                                size_t slot_size);
-
-  // Performs scanning unconditionally.
-  static void PerformScan(InvocationMode invocation_mode);
-  // Performs scanning only if a certain quarantine threshold was reached.
-  static void PerformScanIfNeeded(InvocationMode invocation_mode);
-  // Performs scanning with specified delay.
-  static void PerformDelayedScan(int64_t delay_in_microseconds);
-
-  // Enables safepoints in mutator threads.
-  PA_ALWAYS_INLINE static void EnableSafepoints();
-  // Join scan from safepoint in mutator thread. As soon as PCScan is scheduled,
-  // mutators can join PCScan helping out with clearing and scanning.
-  PA_ALWAYS_INLINE static void JoinScanIfNeeded();
-
-  // Checks if there is a PCScan task currently in progress.
-  PA_ALWAYS_INLINE static bool IsInProgress();
-
-  // Sets process name (used for histograms). |name| must be a string literal.
-  static void SetProcessName(const char* name);
-
-  static void EnableStackScanning();
-  static void DisableStackScanning();
-  static bool IsStackScanningEnabled();
-
-  static void EnableImmediateFreeing();
-
-  // Notify PCScan that a new thread was created/destroyed. Can be called for
-  // uninitialized PCScan (before Initialize()).
-  static void NotifyThreadCreated(void* stack_top);
-  static void NotifyThreadDestroyed();
-
-  // Define when clearing should happen (on free() or in scanning task).
-  static void SetClearType(ClearType);
-
-  static void UninitForTesting();
-
-  static inline PCScanScheduler& scheduler();
-
-  // Registers reporting class.
-  static void RegisterStatsReporter(partition_alloc::StatsReporter* reporter);
-
- private:
-  class PCScanThread;
-  friend class PCScanTask;
-  friend class PartitionAllocPCScanTestBase;
-  friend class PCScanInternal;
-
-  enum class State : uint8_t {
-    // PCScan task is not scheduled.
-    kNotRunning,
-    // PCScan task is being started and about to be scheduled.
-    kScheduled,
-    // PCScan task is scheduled and can be scanning (or clearing).
-    kScanning,
-    // PCScan task is sweeping or finalizing.
-    kSweepingAndFinishing
-  };
-
-  PA_ALWAYS_INLINE static PCScan& Instance();
-
-  PA_ALWAYS_INLINE bool IsJoinable() const;
-  PA_ALWAYS_INLINE void SetJoinableIfSafepointEnabled(bool);
-
-  inline constexpr PCScan();
-
-  // Joins scan unconditionally.
-  static void JoinScan();
-
-  // Finish scan as scanner thread.
-  static void FinishScanForTesting();
-
-  // Reinitialize internal structures (e.g. card table).
-  static void ReinitForTesting(InitConfig);
-
-  size_t epoch() const { return scheduler_.epoch(); }
-
-  // PA_CONSTINIT for fast access (avoiding static thread-safe initialization).
-  static PCScan instance_ PA_CONSTINIT;
-
-  PCScanScheduler scheduler_{};
-  std::atomic<State> state_{State::kNotRunning};
-  std::atomic<bool> is_joinable_{false};
-  bool is_safepoint_enabled_{false};
-  ClearType clear_type_{ClearType::kLazy};
-};
-
-// To please Chromium's clang plugin.
-constexpr PCScan::PCScan() = default;
-
-PA_ALWAYS_INLINE PCScan& PCScan::Instance() {
-  // The instance is declared as a static member, not static local. The reason
-  // is that we want to use the require_constant_initialization attribute to
-  // avoid double-checked-locking which would otherwise have been introduced
-  // by the compiler for thread-safe dynamic initialization (see constinit
-  // from C++20).
-  return instance_;
-}
-
-PA_ALWAYS_INLINE bool PCScan::IsInProgress() {
-  const PCScan& instance = Instance();
-  return instance.state_.load(std::memory_order_relaxed) != State::kNotRunning;
-}
-
-PA_ALWAYS_INLINE bool PCScan::IsJoinable() const {
-  // This has acquire semantics since a mutator relies on the task being set up.
-  return is_joinable_.load(std::memory_order_acquire);
-}
-
-PA_ALWAYS_INLINE void PCScan::SetJoinableIfSafepointEnabled(bool value) {
-  if (!is_safepoint_enabled_) {
-    PA_DCHECK(!is_joinable_.load(std::memory_order_relaxed));
-    return;
-  }
-  // Release semantics is required to "publish" the change of the state so that
-  // the mutators can join scanning and expect the consistent state.
-  is_joinable_.store(value, std::memory_order_release);
-}
-
-PA_ALWAYS_INLINE void PCScan::EnableSafepoints() {
-  PCScan& instance = Instance();
-  instance.is_safepoint_enabled_ = true;
-}
-
-PA_ALWAYS_INLINE void PCScan::JoinScanIfNeeded() {
-  PCScan& instance = Instance();
-  if (PA_UNLIKELY(instance.IsJoinable())) {
-    instance.JoinScan();
-  }
-}
-
-PA_ALWAYS_INLINE void PCScan::MoveToQuarantine(void* object,
-                                               size_t usable_size,
-                                               uintptr_t slot_start,
-                                               size_t slot_size) {
-  PCScan& instance = Instance();
-  if (instance.clear_type_ == ClearType::kEager) {
-    // We need to distinguish between usable_size and slot_size in this context:
-    // - for large buckets usable_size can be noticeably smaller than slot_size;
-    // - usable_size is safe as it doesn't cover extras as opposed to slot_size.
-    // TODO(bikineev): If we start protecting quarantine memory, we can lose
-    // double-free coverage (the check below). Consider performing the
-    // double-free check before protecting if eager clearing becomes default.
-    SecureMemset(object, 0, usable_size);
-  }
-
-  auto* state_bitmap = StateBitmapFromAddr(slot_start);
-
-  // Mark the state in the state bitmap as quarantined. Make sure to do it after
-  // the clearing to avoid racing with *Scan Sweeper.
-  [[maybe_unused]] const bool succeeded =
-      state_bitmap->Quarantine(slot_start, instance.epoch());
-#if PA_CONFIG(STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED)
-  if (PA_UNLIKELY(!succeeded)) {
-    DoubleFreeAttempt();
-  }
-#else
-  // The compiler is able to optimize cmpxchg to a lock-prefixed and.
-#endif  // PA_CONFIG(STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED)
-
-  const bool is_limit_reached = instance.scheduler_.AccountFreed(slot_size);
-  if (PA_UNLIKELY(is_limit_reached)) {
-    // Perform a quick check if another scan is already in progress.
-    if (instance.IsInProgress()) {
-      return;
-    }
-    // Avoid blocking the current thread for regular scans.
-    instance.PerformScan(InvocationMode::kNonBlocking);
-  }
-}
-
-inline PCScanScheduler& PCScan::scheduler() {
-  PCScan& instance = Instance();
-  return instance.scheduler_;
-}
-
-}  // namespace internal
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_H_
diff --git a/base/allocator/partition_allocator/starscan/pcscan_internal.cc b/base/allocator/partition_allocator/starscan/pcscan_internal.cc
deleted file mode 100644
index ad27d1f..0000000
--- a/base/allocator/partition_allocator/starscan/pcscan_internal.cc
+++ /dev/null
@@ -1,1642 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/starscan/pcscan_internal.h"
-
-#include <algorithm>
-#include <array>
-#include <chrono>
-#include <condition_variable>
-#include <cstdint>
-#include <mutex>
-#include <numeric>
-#include <set>
-#include <thread>
-#include <type_traits>
-#include <unordered_map>
-#include <vector>
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/allocation_guard.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/memory/ref_counted.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/memory/scoped_refptr.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-#include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
-#include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
-#include "base/allocator/partition_allocator/starscan/raceful_worklist.h"
-#include "base/allocator/partition_allocator/starscan/scan_loop.h"
-#include "base/allocator/partition_allocator/starscan/snapshot.h"
-#include "base/allocator/partition_allocator/starscan/stack/stack.h"
-#include "base/allocator/partition_allocator/starscan/stats_collector.h"
-#include "base/allocator/partition_allocator/starscan/stats_reporter.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "base/allocator/partition_allocator/thread_cache.h"
-#include "build/build_config.h"
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
-#endif
-
-#if PA_CONFIG(STARSCAN_NOINLINE_SCAN_FUNCTIONS)
-#define PA_SCAN_INLINE PA_NOINLINE
-#else
-#define PA_SCAN_INLINE PA_ALWAYS_INLINE
-#endif
-
-namespace partition_alloc::internal {
-
-[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void DoubleFreeAttempt() {
-  PA_NO_CODE_FOLDING();
-  PA_IMMEDIATE_CRASH();
-}
-
-namespace {
-
-#if PA_CONFIG(HAS_ALLOCATION_GUARD)
-// Currently, check reentracy only on Linux. On Android TLS is emulated by the
-// runtime lib, which can allocate and therefore cause reentrancy.
-struct ReentrantScannerGuard final {
- public:
-  ReentrantScannerGuard() {
-    PA_CHECK(!guard_);
-    guard_ = true;
-  }
-  ~ReentrantScannerGuard() { guard_ = false; }
-
- private:
-  // Since this variable has hidden visibility (not referenced by other DSOs),
-  // assume that thread_local works on all supported architectures.
-  static thread_local size_t guard_;
-};
-thread_local size_t ReentrantScannerGuard::guard_ = 0;
-#else
-struct [[maybe_unused]] ReentrantScannerGuard final {};
-#endif  // PA_CONFIG(HAS_ALLOCATION_GUARD)
-
-// Scope that disables MTE checks. Only used inside scanning to avoid the race:
-// a slot tag is changed by the mutator, while the scanner sees an old value.
-struct DisableMTEScope final {
-  DisableMTEScope() {
-    ::partition_alloc::ChangeMemoryTaggingModeForCurrentThread(
-        ::partition_alloc::TagViolationReportingMode::kDisabled);
-  }
-  ~DisableMTEScope() {
-    ::partition_alloc::ChangeMemoryTaggingModeForCurrentThread(
-        parent_tagging_mode);
-  }
-
- private:
-  ::partition_alloc::TagViolationReportingMode parent_tagging_mode =
-      ::partition_alloc::internal::GetMemoryTaggingModeForCurrentThread();
-};
-
-#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-// Bytemap that represent regions (cards) that contain quarantined slots.
-// A single PCScan cycle consists of the following steps:
-// 1) clearing (memset quarantine + marking cards that contain quarantine);
-// 2) scanning;
-// 3) sweeping (freeing + unmarking cards that contain freed slots).
-// Marking cards on step 1) ensures that the card table stays in the consistent
-// state while scanning. Unmarking on the step 3) ensures that unmarking
-// actually happens (and we don't hit too many false positives).
-//
-// The code here relies on the fact that |address| is in the regular pool and
-// that the card table (this object) is allocated at the very beginning of that
-// pool.
-class QuarantineCardTable final {
- public:
-  // Avoid the load of the base of the regular pool.
-  PA_ALWAYS_INLINE static QuarantineCardTable& GetFrom(uintptr_t address) {
-    PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(address));
-    return *reinterpret_cast<QuarantineCardTable*>(
-        address & PartitionAddressSpace::RegularPoolBaseMask());
-  }
-
-  PA_ALWAYS_INLINE void Quarantine(uintptr_t begin, size_t size) {
-    return SetImpl(begin, size, true);
-  }
-
-  PA_ALWAYS_INLINE void Unquarantine(uintptr_t begin, size_t size) {
-    return SetImpl(begin, size, false);
-  }
-
-  // Returns whether the card to which |address| points to contains quarantined
-  // slots. May return false positives for but should never return false
-  // negatives, as otherwise this breaks security.
-  PA_ALWAYS_INLINE bool IsQuarantined(uintptr_t address) const {
-    const size_t byte = Byte(address);
-    PA_SCAN_DCHECK(byte < bytes_.size());
-    return bytes_[byte];
-  }
-
- private:
-  static constexpr size_t kCardSize = kPoolMaxSize / kSuperPageSize;
-  static constexpr size_t kBytes = kPoolMaxSize / kCardSize;
-
-  QuarantineCardTable() = default;
-
-  PA_ALWAYS_INLINE static size_t Byte(uintptr_t address) {
-    return (address & ~PartitionAddressSpace::RegularPoolBaseMask()) /
-           kCardSize;
-  }
-
-  PA_ALWAYS_INLINE void SetImpl(uintptr_t begin, size_t size, bool value) {
-    const size_t byte = Byte(begin);
-    const size_t need_bytes = (size + (kCardSize - 1)) / kCardSize;
-    PA_SCAN_DCHECK(bytes_.size() >= byte + need_bytes);
-    PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(begin));
-    for (size_t i = byte; i < byte + need_bytes; ++i) {
-      bytes_[i] = value;
-    }
-  }
-
-  std::array<bool, kBytes> bytes_;
-};
-static_assert(kSuperPageSize >= sizeof(QuarantineCardTable),
-              "Card table size must be less than kSuperPageSize, since this is "
-              "what is committed");
-#endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-
-template <typename T>
-using MetadataVector = std::vector<T, MetadataAllocator<T>>;
-template <typename T>
-using MetadataSet = std::set<T, std::less<>, MetadataAllocator<T>>;
-template <typename K, typename V>
-using MetadataHashMap =
-    std::unordered_map<K,
-                       V,
-                       std::hash<K>,
-                       std::equal_to<>,
-                       MetadataAllocator<std::pair<const K, V>>>;
-
-struct GetSlotStartResult final {
-  PA_ALWAYS_INLINE bool is_found() const {
-    PA_SCAN_DCHECK(!slot_start || slot_size);
-    return slot_start;
-  }
-
-  uintptr_t slot_start = 0;
-  size_t slot_size = 0;
-};
-
-// Returns the start of a slot, or 0 if |maybe_inner_address| is not inside of
-// an existing slot span. The function may return a non-0 address even inside a
-// decommitted or free slot span, it's the caller responsibility to check if
-// memory is actually allocated.
-//
-// |maybe_inner_address| must be within a normal-bucket super page and can also
-// point to guard pages or slot-span metadata.
-PA_SCAN_INLINE GetSlotStartResult
-GetSlotStartInSuperPage(uintptr_t maybe_inner_address) {
-  PA_SCAN_DCHECK(IsManagedByNormalBuckets(maybe_inner_address));
-  // Don't use SlotSpanMetadata/PartitionPage::FromAddr() and family, because
-  // they expect an address within a super page payload area, which we don't
-  // know yet if |maybe_inner_address| is.
-  const uintptr_t super_page = maybe_inner_address & kSuperPageBaseMask;
-
-  const uintptr_t partition_page_index =
-      (maybe_inner_address & kSuperPageOffsetMask) >> PartitionPageShift();
-  auto* page =
-      PartitionSuperPageToMetadataArea(super_page) + partition_page_index;
-  // Check if page is valid. The check also works for the guard pages and the
-  // metadata page.
-  if (!page->is_valid) {
-    return {};
-  }
-
-  page -= page->slot_span_metadata_offset;
-  PA_SCAN_DCHECK(page->is_valid);
-  PA_SCAN_DCHECK(!page->slot_span_metadata_offset);
-  auto* slot_span = &page->slot_span_metadata;
-  // Check if the slot span is actually used and valid.
-  if (!slot_span->bucket) {
-    return {};
-  }
-#if PA_SCAN_DCHECK_IS_ON()
-  DCheckIsValidSlotSpan(slot_span);
-#endif
-  const uintptr_t slot_span_start =
-      SlotSpanMetadata::ToSlotSpanStart(slot_span);
-  const ptrdiff_t ptr_offset = maybe_inner_address - slot_span_start;
-  PA_SCAN_DCHECK(0 <= ptr_offset &&
-                 ptr_offset < static_cast<ptrdiff_t>(
-                                  slot_span->bucket->get_pages_per_slot_span() *
-                                  PartitionPageSize()));
-  // Slot span size in bytes is not necessarily multiple of partition page.
-  // Don't check if the pointer points outside of usable area, since checking
-  // the quarantine bit will anyway return false in this case.
-  const size_t slot_size = slot_span->bucket->slot_size;
-  const size_t slot_number = slot_span->bucket->GetSlotNumber(ptr_offset);
-  const uintptr_t slot_start = slot_span_start + (slot_number * slot_size);
-  PA_SCAN_DCHECK(slot_start <= maybe_inner_address &&
-                 maybe_inner_address < slot_start + slot_size);
-  return {.slot_start = slot_start, .slot_size = slot_size};
-}
-
-#if PA_SCAN_DCHECK_IS_ON()
-bool IsQuarantineEmptyOnSuperPage(uintptr_t super_page) {
-  auto* bitmap = SuperPageStateBitmap(super_page);
-  size_t visited = 0;
-  bitmap->IterateQuarantined([&visited](auto) { ++visited; });
-  return !visited;
-}
-#endif
-
-SimdSupport DetectSimdSupport() {
-#if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
-  return SimdSupport::kNEON;
-#else
-  const base::CPU& cpu = base::CPU::GetInstanceNoAllocation();
-  if (cpu.has_avx2()) {
-    return SimdSupport::kAVX2;
-  }
-  if (cpu.has_sse41()) {
-    return SimdSupport::kSSE41;
-  }
-  return SimdSupport::kUnvectorized;
-#endif  // PA_CONFIG(STARSCAN_NEON_SUPPORTED)
-}
-
-void CommitCardTable() {
-#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-  RecommitSystemPages(PartitionAddressSpace::RegularPoolBase(),
-                      sizeof(QuarantineCardTable),
-                      PageAccessibilityConfiguration(
-                          PageAccessibilityConfiguration::kReadWrite),
-                      PageAccessibilityDisposition::kRequireUpdate);
-#endif
-}
-
-template <class Function>
-void IterateNonEmptySlotSpans(uintptr_t super_page,
-                              size_t nonempty_slot_spans,
-                              Function function) {
-  PA_SCAN_DCHECK(!(super_page % kSuperPageAlignment));
-  PA_SCAN_DCHECK(nonempty_slot_spans);
-
-  size_t slot_spans_to_visit = nonempty_slot_spans;
-#if PA_SCAN_DCHECK_IS_ON()
-  size_t visited = 0;
-#endif
-
-  IterateSlotSpans(super_page, true /*with_quarantine*/,
-                   [&function, &slot_spans_to_visit
-#if PA_SCAN_DCHECK_IS_ON()
-                    ,
-                    &visited
-#endif
-  ](SlotSpanMetadata* slot_span) {
-                     if (slot_span->is_empty() || slot_span->is_decommitted()) {
-                       // Skip empty/decommitted slot spans.
-                       return false;
-                     }
-                     function(slot_span);
-                     --slot_spans_to_visit;
-#if PA_SCAN_DCHECK_IS_ON()
-                     // In debug builds, scan all the slot spans to check that
-                     // number of visited slot spans is equal to the number of
-                     // nonempty_slot_spans.
-                     ++visited;
-                     return false;
-#else
-        return slot_spans_to_visit == 0;
-#endif
-                   });
-#if PA_SCAN_DCHECK_IS_ON()
-  // Check that exactly all non-empty slot spans have been visited.
-  PA_DCHECK(nonempty_slot_spans == visited);
-#endif
-}
-
-// SuperPageSnapshot is used to record all slot spans that contain live slots.
-// The class avoids dynamic allocations and is designed to be instantiated on
-// stack. To avoid stack overflow, internal data structures are kept packed.
-class SuperPageSnapshot final {
-  // The following constants are used to define a conservative estimate for
-  // maximum number of slot spans in a super page.
-  //
-  // For systems with runtime-defined page size, assume partition page size is
-  // at least 16kiB.
-  static constexpr size_t kMinPartitionPageSize =
-      __builtin_constant_p(PartitionPageSize()) ? PartitionPageSize() : 1 << 14;
-  static constexpr size_t kStateBitmapMinReservedSize =
-      __builtin_constant_p(ReservedStateBitmapSize())
-          ? ReservedStateBitmapSize()
-          : partition_alloc::internal::base::bits::AlignUp(
-                sizeof(AllocationStateMap),
-                kMinPartitionPageSize);
-  // Take into account guard partition page at the end of super-page.
-  static constexpr size_t kGuardPagesSize = 2 * kMinPartitionPageSize;
-
-  static constexpr size_t kPayloadMaxSize =
-      kSuperPageSize - kStateBitmapMinReservedSize - kGuardPagesSize;
-  static_assert(kPayloadMaxSize % kMinPartitionPageSize == 0,
-                "kPayloadMaxSize must be multiple of kMinPartitionPageSize");
-
-  static constexpr size_t kMaxSlotSpansInSuperPage =
-      kPayloadMaxSize / kMinPartitionPageSize;
-
- public:
-  struct ScanArea {
-    // Use packed integer types to save stack space. In theory, kAlignment could
-    // be used instead of words, but it doesn't seem to bring savings.
-    uint32_t offset_within_page_in_words;
-    uint32_t size_in_words;
-    uint32_t slot_size_in_words;
-  };
-
-  class ScanAreas : private std::array<ScanArea, kMaxSlotSpansInSuperPage> {
-    using Base = std::array<ScanArea, kMaxSlotSpansInSuperPage>;
-
-   public:
-    using iterator = Base::iterator;
-    using const_iterator = Base::const_iterator;
-    using Base::operator[];
-
-    iterator begin() { return Base::begin(); }
-    const_iterator begin() const { return Base::begin(); }
-
-    iterator end() { return std::next(begin(), size_); }
-    const_iterator end() const { return std::next(begin(), size_); }
-
-    void set_size(size_t new_size) { size_ = new_size; }
-
-   private:
-    size_t size_;
-  };
-
-  static_assert(std::is_trivially_default_constructible<ScanAreas>::value,
-                "ScanAreas must be trivially default constructible to ensure "
-                "that no memsets are generated by the compiler as a "
-                "result of value-initialization (or zero-initialization)");
-
-  void* operator new(size_t) = delete;
-  void operator delete(void*) = delete;
-
-  // Creates snapshot for a single super page. In theory, we could simply
-  // iterate over slot spans without taking a snapshot. However, we do this to
-  // minimize the mutex locking time. The mutex must be acquired to make sure
-  // that no mutator is concurrently changing any of the slot spans.
-  explicit SuperPageSnapshot(uintptr_t super_page_base);
-
-  const ScanAreas& scan_areas() const { return scan_areas_; }
-
- private:
-  ScanAreas scan_areas_;
-};
-
-static_assert(
-    sizeof(SuperPageSnapshot) <= 2048,
-    "SuperPageSnapshot must stay relatively small to be allocated on stack");
-
-SuperPageSnapshot::SuperPageSnapshot(uintptr_t super_page) {
-  using SlotSpan = SlotSpanMetadata;
-
-  auto* extent_entry = PartitionSuperPageToExtent(super_page);
-
-  ::partition_alloc::internal::ScopedGuard lock(
-      ::partition_alloc::internal::PartitionRootLock(extent_entry->root));
-
-  const size_t nonempty_slot_spans =
-      extent_entry->number_of_nonempty_slot_spans;
-  if (!nonempty_slot_spans) {
-#if PA_SCAN_DCHECK_IS_ON()
-    // Check that quarantine bitmap is empty for super-pages that contain
-    // only empty/decommitted slot-spans.
-    PA_CHECK(IsQuarantineEmptyOnSuperPage(super_page));
-#endif
-    scan_areas_.set_size(0);
-    return;
-  }
-
-  size_t current = 0;
-
-  IterateNonEmptySlotSpans(
-      super_page, nonempty_slot_spans, [this, &current](SlotSpan* slot_span) {
-        const uintptr_t payload_begin = SlotSpan::ToSlotSpanStart(slot_span);
-        // For single-slot slot-spans, scan only utilized slot part.
-        const size_t provisioned_size =
-            PA_UNLIKELY(slot_span->CanStoreRawSize())
-                ? slot_span->GetRawSize()
-                : slot_span->GetProvisionedSize();
-        // Free & decommitted slot spans are skipped.
-        PA_SCAN_DCHECK(provisioned_size > 0);
-        const uintptr_t payload_end = payload_begin + provisioned_size;
-        auto& area = scan_areas_[current];
-
-        const size_t offset_in_words =
-            (payload_begin & kSuperPageOffsetMask) / sizeof(uintptr_t);
-        const size_t size_in_words =
-            (payload_end - payload_begin) / sizeof(uintptr_t);
-        const size_t slot_size_in_words =
-            slot_span->bucket->slot_size / sizeof(uintptr_t);
-
-#if PA_SCAN_DCHECK_IS_ON()
-        PA_DCHECK(offset_in_words <=
-                  std::numeric_limits<
-                      decltype(area.offset_within_page_in_words)>::max());
-        PA_DCHECK(size_in_words <=
-                  std::numeric_limits<decltype(area.size_in_words)>::max());
-        PA_DCHECK(
-            slot_size_in_words <=
-            std::numeric_limits<decltype(area.slot_size_in_words)>::max());
-#endif
-
-        area.offset_within_page_in_words = offset_in_words;
-        area.size_in_words = size_in_words;
-        area.slot_size_in_words = slot_size_in_words;
-
-        ++current;
-      });
-
-  PA_SCAN_DCHECK(kMaxSlotSpansInSuperPage >= current);
-  scan_areas_.set_size(current);
-}
-
-}  // namespace
-
-class PCScanScanLoop;
-
-// This class is responsible for performing the entire PCScan task.
-// TODO(bikineev): Move PCScan algorithm out of PCScanTask.
-class PCScanTask final : public base::RefCountedThreadSafe<PCScanTask>,
-                         public AllocatedOnPCScanMetadataPartition {
- public:
-  // Creates and initializes a PCScan state.
-  PCScanTask(PCScan& pcscan, size_t quarantine_last_size);
-
-  PCScanTask(PCScanTask&&) noexcept = delete;
-  PCScanTask& operator=(PCScanTask&&) noexcept = delete;
-
-  // Execute PCScan from mutator inside safepoint.
-  void RunFromMutator();
-
-  // Execute PCScan from the scanner thread. Must be called only once from the
-  // scanner thread.
-  void RunFromScanner();
-
-  PCScanScheduler& scheduler() const { return pcscan_.scheduler(); }
-
- private:
-  class StackVisitor;
-  friend class PCScanScanLoop;
-
-  using Root = PCScan::Root;
-  using SlotSpan = SlotSpanMetadata;
-
-  // This is used:
-  // - to synchronize all scanning threads (mutators and the scanner);
-  // - for the scanner, to transition through the state machine
-  //   (kScheduled -> kScanning (ctor) -> kSweepingAndFinishing (dtor).
-  template <Context context>
-  class SyncScope final {
-   public:
-    explicit SyncScope(PCScanTask& task) : task_(task) {
-      task_.number_of_scanning_threads_.fetch_add(1, std::memory_order_relaxed);
-      if (context == Context::kScanner) {
-        task_.pcscan_.state_.store(PCScan::State::kScanning,
-                                   std::memory_order_relaxed);
-        task_.pcscan_.SetJoinableIfSafepointEnabled(true);
-      }
-    }
-    ~SyncScope() {
-      // First, notify the scanning thread that this thread is done.
-      NotifyThreads();
-      if (context == Context::kScanner) {
-        // The scanner thread must wait here until all safepoints leave.
-        // Otherwise, sweeping may free a page that can later be accessed by a
-        // descheduled mutator.
-        WaitForOtherThreads();
-        task_.pcscan_.state_.store(PCScan::State::kSweepingAndFinishing,
-                                   std::memory_order_relaxed);
-      }
-    }
-
-   private:
-    void NotifyThreads() {
-      {
-        // The lock is required as otherwise there is a race between
-        // fetch_sub/notify in the mutator and checking
-        // number_of_scanning_threads_/waiting in the scanner.
-        std::lock_guard<std::mutex> lock(task_.mutex_);
-        task_.number_of_scanning_threads_.fetch_sub(1,
-                                                    std::memory_order_relaxed);
-        {
-          // Notify that scan is done and there is no need to enter
-          // the safepoint. This also helps a mutator to avoid repeating
-          // entering. Since the scanner thread waits for all threads to finish,
-          // there is no ABA problem here.
-          task_.pcscan_.SetJoinableIfSafepointEnabled(false);
-        }
-      }
-      task_.condvar_.notify_all();
-    }
-
-    void WaitForOtherThreads() {
-      std::unique_lock<std::mutex> lock(task_.mutex_);
-      task_.condvar_.wait(lock, [this] {
-        return !task_.number_of_scanning_threads_.load(
-            std::memory_order_relaxed);
-      });
-    }
-
-    PCScanTask& task_;
-  };
-
-  friend class base::RefCountedThreadSafe<PCScanTask>;
-  ~PCScanTask() = default;
-
-  PA_SCAN_INLINE AllocationStateMap* TryFindScannerBitmapForPointer(
-      uintptr_t maybe_ptr) const;
-
-  // Lookup and marking functions. Return size of the slot if marked, or zero
-  // otherwise.
-  PA_SCAN_INLINE size_t TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const;
-
-  // Scans stack, only called from safepoints.
-  void ScanStack();
-
-  // Scan individual areas.
-  void ScanNormalArea(PCScanInternal& pcscan,
-                      PCScanScanLoop& scan_loop,
-                      uintptr_t begin,
-                      uintptr_t end);
-  void ScanLargeArea(PCScanInternal& pcscan,
-                     PCScanScanLoop& scan_loop,
-                     uintptr_t begin,
-                     uintptr_t end,
-                     size_t slot_size);
-
-  // Scans all registered partitions and marks reachable quarantined slots.
-  void ScanPartitions();
-
-  // Clear quarantined slots and prepare card table for fast lookup
-  void ClearQuarantinedSlotsAndPrepareCardTable();
-
-  // Unprotect all slot spans from all partitions.
-  void UnprotectPartitions();
-
-  // Sweeps (frees) unreachable quarantined entries.
-  void SweepQuarantine();
-
-  // Finishes the scanner (updates limits, UMA, etc).
-  void FinishScanner();
-
-  // Cache the pcscan epoch to avoid the compiler loading the atomic
-  // QuarantineData::epoch_ on each access.
-  const size_t pcscan_epoch_;
-  std::unique_ptr<StarScanSnapshot> snapshot_;
-  StatsCollector stats_;
-  // Mutex and codvar that are used to synchronize scanning threads.
-  std::mutex mutex_;
-  std::condition_variable condvar_;
-  std::atomic<size_t> number_of_scanning_threads_{0u};
-  // We can unprotect only once to reduce context-switches.
-  std::once_flag unprotect_once_flag_;
-  bool immediatelly_free_slots_{false};
-  PCScan& pcscan_;
-};
-
-PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
-    uintptr_t maybe_ptr) const {
-  PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(maybe_ptr));
-  // First, check if |maybe_ptr| points to a valid super page or a quarantined
-  // card.
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-  // Check if |maybe_ptr| points to a quarantined card.
-  if (PA_LIKELY(
-          !QuarantineCardTable::GetFrom(maybe_ptr).IsQuarantined(maybe_ptr))) {
-    return nullptr;
-  }
-#else   // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-  // Without the card table, use the reservation offset table to check if
-  // |maybe_ptr| points to a valid super-page. It's not as precise (meaning that
-  // we may have hit the slow path more frequently), but reduces the memory
-  // overhead.  Since we are certain here, that |maybe_ptr| refers to the
-  // regular pool, it's okay to use non-checking version of
-  // ReservationOffsetPointer().
-  const uintptr_t offset =
-      maybe_ptr & ~PartitionAddressSpace::RegularPoolBaseMask();
-  if (PA_LIKELY(*ReservationOffsetPointer(kRegularPoolHandle, offset) !=
-                kOffsetTagNormalBuckets)) {
-    return nullptr;
-  }
-#endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-#else   // BUILDFLAG(HAS_64_BIT_POINTERS)
-  if (PA_LIKELY(!IsManagedByPartitionAllocRegularPool(maybe_ptr))) {
-    return nullptr;
-  }
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-  // We are certain here that |maybe_ptr| points to an allocated super-page.
-  return StateBitmapFromAddr(maybe_ptr);
-}
-
-// Looks up and marks a potential dangling pointer. Returns the size of the slot
-// (which is then accounted as quarantined), or zero if no slot is found.
-// For normal bucket super pages, PCScan uses two quarantine bitmaps, the
-// mutator and the scanner one. The former is used by mutators when slots are
-// freed, while the latter is used concurrently by the PCScan thread. The
-// bitmaps are swapped as soon as PCScan is triggered. Once a dangling pointer
-// (which points to a slot in the scanner bitmap) is found,
-// TryMarkSlotInNormalBuckets() marks it again in the bitmap and clears
-// from the scanner bitmap. This way, when scanning is done, all uncleared
-// entries in the scanner bitmap correspond to unreachable slots.
-PA_SCAN_INLINE size_t
-PCScanTask::TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const {
-  // Check if |maybe_ptr| points somewhere to the heap.
-  // The caller has to make sure that |maybe_ptr| isn't MTE-tagged.
-  auto* state_map = TryFindScannerBitmapForPointer(maybe_ptr);
-  if (!state_map) {
-    return 0;
-  }
-
-  // Beyond this point, we know that |maybe_ptr| is a pointer within a
-  // normal-bucket super page.
-  PA_SCAN_DCHECK(IsManagedByNormalBuckets(maybe_ptr));
-
-#if !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-  // Pointer from a normal bucket is always in the first superpage.
-  auto* root = Root::FromAddrInFirstSuperpage(maybe_ptr);
-  // Without the card table, we must make sure that |maybe_ptr| doesn't point to
-  // metadata partition.
-  // TODO(bikineev): To speed things up, consider removing the check and
-  // committing quarantine bitmaps for metadata partition.
-  // TODO(bikineev): Marking an entry in the reservation-table is not a
-  // publishing operation, meaning that the |root| pointer may not be assigned
-  // yet. This can happen as arbitrary pointers may point into a super-page
-  // during its set up. Make sure to check |root| is not null before
-  // dereferencing it.
-  if (PA_UNLIKELY(!root || !root->IsQuarantineEnabled())) {
-    return 0;
-  }
-#endif  // !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-
-  // Check if pointer was in the quarantine bitmap.
-  const GetSlotStartResult slot_start_result =
-      GetSlotStartInSuperPage(maybe_ptr);
-  if (!slot_start_result.is_found()) {
-    return 0;
-  }
-
-  const uintptr_t slot_start = slot_start_result.slot_start;
-  if (PA_LIKELY(!state_map->IsQuarantined(slot_start))) {
-    return 0;
-  }
-
-  PA_SCAN_DCHECK((maybe_ptr & kSuperPageBaseMask) ==
-                 (slot_start & kSuperPageBaseMask));
-
-  if (PA_UNLIKELY(immediatelly_free_slots_)) {
-    return 0;
-  }
-
-  // Now we are certain that |maybe_ptr| is a dangling pointer. Mark it again in
-  // the mutator bitmap and clear from the scanner bitmap. Note that since
-  // PCScan has exclusive access to the scanner bitmap, we can avoid atomic rmw
-  // operation for it.
-  if (PA_LIKELY(
-          state_map->MarkQuarantinedAsReachable(slot_start, pcscan_epoch_))) {
-    return slot_start_result.slot_size;
-  }
-
-  return 0;
-}
-
-void PCScanTask::ClearQuarantinedSlotsAndPrepareCardTable() {
-  const PCScan::ClearType clear_type = pcscan_.clear_type_;
-
-#if !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-  if (clear_type == PCScan::ClearType::kEager) {
-    return;
-  }
-#endif
-
-  StarScanSnapshot::ClearingView view(*snapshot_);
-  view.VisitConcurrently([clear_type](uintptr_t super_page) {
-    auto* bitmap = StateBitmapFromAddr(super_page);
-    auto* root = Root::FromFirstSuperPage(super_page);
-    bitmap->IterateQuarantined([root, clear_type](uintptr_t slot_start) {
-      auto* slot_span = SlotSpan::FromSlotStart(slot_start);
-      // Use zero as a zapping value to speed up the fast bailout check in
-      // ScanPartitions.
-      const size_t size = root->GetSlotUsableSize(slot_span);
-      if (clear_type == PCScan::ClearType::kLazy) {
-        void* object = root->SlotStartToObject(slot_start);
-        memset(object, 0, size);
-      }
-#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-      // Set card(s) for this quarantined slot.
-      QuarantineCardTable::GetFrom(slot_start).Quarantine(slot_start, size);
-#endif
-    });
-  });
-}
-
-void PCScanTask::UnprotectPartitions() {
-  auto& pcscan = PCScanInternal::Instance();
-  if (!pcscan.WriteProtectionEnabled()) {
-    return;
-  }
-
-  StarScanSnapshot::UnprotectingView unprotect_view(*snapshot_);
-  unprotect_view.VisitConcurrently([&pcscan](uintptr_t super_page) {
-    SuperPageSnapshot super_page_snapshot(super_page);
-
-    for (const auto& scan_area : super_page_snapshot.scan_areas()) {
-      const uintptr_t begin =
-          super_page |
-          (scan_area.offset_within_page_in_words * sizeof(uintptr_t));
-      const uintptr_t end =
-          begin + (scan_area.size_in_words * sizeof(uintptr_t));
-
-      pcscan.UnprotectPages(begin, end - begin);
-    }
-  });
-}
-
-class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
-  friend class ScanLoop<PCScanScanLoop>;
-
- public:
-  explicit PCScanScanLoop(const PCScanTask& task)
-      : ScanLoop(PCScanInternal::Instance().simd_support()), task_(task) {}
-
-  size_t quarantine_size() const { return quarantine_size_; }
-
- private:
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
-    return PartitionAddressSpace::RegularPoolBase();
-  }
-  PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() {
-    return PartitionAddressSpace::RegularPoolBaseMask();
-  }
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-  PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) {
-    // |maybe_ptr| may have an MTE tag, so remove it first.
-    quarantine_size_ +=
-        task_.TryMarkSlotInNormalBuckets(UntagAddr(maybe_ptr_maybe_tagged));
-  }
-
-  const PCScanTask& task_;
-  DisableMTEScope disable_mte_;
-  size_t quarantine_size_ = 0;
-};
-
-class PCScanTask::StackVisitor final : public internal::StackVisitor {
- public:
-  explicit StackVisitor(const PCScanTask& task) : task_(task) {}
-
-  void VisitStack(uintptr_t* stack_ptr, uintptr_t* stack_top) override {
-    static constexpr size_t kMinimalAlignment = 32;
-    uintptr_t begin =
-        reinterpret_cast<uintptr_t>(stack_ptr) & ~(kMinimalAlignment - 1);
-    uintptr_t end =
-        (reinterpret_cast<uintptr_t>(stack_top) + kMinimalAlignment - 1) &
-        ~(kMinimalAlignment - 1);
-    PA_CHECK(begin < end);
-    PCScanScanLoop loop(task_);
-    loop.Run(begin, end);
-    quarantine_size_ += loop.quarantine_size();
-  }
-
-  // Returns size of quarantined slots that are reachable from the current
-  // stack.
-  size_t quarantine_size() const { return quarantine_size_; }
-
- private:
-  const PCScanTask& task_;
-  size_t quarantine_size_ = 0;
-};
-
-PCScanTask::PCScanTask(PCScan& pcscan, size_t quarantine_last_size)
-    : pcscan_epoch_(pcscan.epoch() - 1),
-      snapshot_(StarScanSnapshot::Create(PCScanInternal::Instance())),
-      stats_(PCScanInternal::Instance().process_name(), quarantine_last_size),
-      immediatelly_free_slots_(
-          PCScanInternal::Instance().IsImmediateFreeingEnabled()),
-      pcscan_(pcscan) {}
-
-void PCScanTask::ScanStack() {
-  const auto& pcscan = PCScanInternal::Instance();
-  if (!pcscan.IsStackScanningEnabled()) {
-    return;
-  }
-  // Check if the stack top was registered. It may happen that it's not if the
-  // current allocation happens from pthread trampolines.
-  void* stack_top = pcscan.GetCurrentThreadStackTop();
-  if (PA_UNLIKELY(!stack_top)) {
-    return;
-  }
-
-  Stack stack_scanner(stack_top);
-  StackVisitor visitor(*this);
-  stack_scanner.IteratePointers(&visitor);
-  stats_.IncreaseSurvivedQuarantineSize(visitor.quarantine_size());
-}
-
-void PCScanTask::ScanNormalArea(PCScanInternal& pcscan,
-                                PCScanScanLoop& scan_loop,
-                                uintptr_t begin,
-                                uintptr_t end) {
-  // Protect slot span before scanning it.
-  pcscan.ProtectPages(begin, end - begin);
-  scan_loop.Run(begin, end);
-}
-
-void PCScanTask::ScanLargeArea(PCScanInternal& pcscan,
-                               PCScanScanLoop& scan_loop,
-                               uintptr_t begin,
-                               uintptr_t end,
-                               size_t slot_size) {
-  // For scanning large areas, it's worthwhile checking whether the range that
-  // is scanned contains allocated slots. It also helps to skip discarded
-  // freed slots.
-  // Protect slot span before scanning it.
-  pcscan.ProtectPages(begin, end - begin);
-
-  auto* bitmap = StateBitmapFromAddr(begin);
-
-  for (uintptr_t current_slot = begin; current_slot < end;
-       current_slot += slot_size) {
-    // It is okay to skip slots as the object they hold has been zapped at this
-    // point, which means that the pointers no longer retain other slots.
-    if (!bitmap->IsAllocated(current_slot)) {
-      continue;
-    }
-    uintptr_t current_slot_end = current_slot + slot_size;
-    // |slot_size| may be larger than |raw_size| for single-slot slot spans.
-    scan_loop.Run(current_slot, std::min(current_slot_end, end));
-  }
-}
-
-void PCScanTask::ScanPartitions() {
-  // Threshold for which bucket size it is worthwhile in checking whether the
-  // slot is allocated and needs to be scanned. PartitionPurgeSlotSpan()
-  // purges only slots >= page-size, this helps us to avoid faulting in
-  // discarded pages. We actually lower it further to 1024, to take advantage of
-  // skipping unallocated slots, but don't want to go any lower, as this comes
-  // at a cost of expensive bitmap checking.
-  static constexpr size_t kLargeScanAreaThresholdInWords =
-      1024 / sizeof(uintptr_t);
-
-  PCScanScanLoop scan_loop(*this);
-  auto& pcscan = PCScanInternal::Instance();
-
-  StarScanSnapshot::ScanningView snapshot_view(*snapshot_);
-  snapshot_view.VisitConcurrently([this, &pcscan,
-                                   &scan_loop](uintptr_t super_page) {
-    SuperPageSnapshot super_page_snapshot(super_page);
-
-    for (const auto& scan_area : super_page_snapshot.scan_areas()) {
-      const uintptr_t begin =
-          super_page |
-          (scan_area.offset_within_page_in_words * sizeof(uintptr_t));
-      PA_SCAN_DCHECK(begin ==
-                     super_page + (scan_area.offset_within_page_in_words *
-                                   sizeof(uintptr_t)));
-      const uintptr_t end = begin + scan_area.size_in_words * sizeof(uintptr_t);
-
-      if (PA_UNLIKELY(scan_area.slot_size_in_words >=
-                      kLargeScanAreaThresholdInWords)) {
-        ScanLargeArea(pcscan, scan_loop, begin, end,
-                      scan_area.slot_size_in_words * sizeof(uintptr_t));
-      } else {
-        ScanNormalArea(pcscan, scan_loop, begin, end);
-      }
-    }
-  });
-
-  stats_.IncreaseSurvivedQuarantineSize(scan_loop.quarantine_size());
-}
-
-namespace {
-
-struct SweepStat {
-  // Bytes that were really swept (by calling free()).
-  size_t swept_bytes = 0;
-  // Bytes of marked quarantine memory that were discarded (by calling
-  // madvice(DONT_NEED)).
-  size_t discarded_bytes = 0;
-};
-
-void UnmarkInCardTable(uintptr_t slot_start, SlotSpanMetadata* slot_span) {
-#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-  // Reset card(s) for this quarantined slot. Please note that the cards may
-  // still contain quarantined slots (which were promoted in this scan cycle),
-  // but ClearQuarantinedSlotsAndPrepareCardTable() will set them again in the
-  // next PCScan cycle.
-  QuarantineCardTable::GetFrom(slot_start)
-      .Unquarantine(slot_start, slot_span->GetUtilizedSlotSize());
-#endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
-}
-
-[[maybe_unused]] size_t FreeAndUnmarkInCardTable(PartitionRoot* root,
-                                                 SlotSpanMetadata* slot_span,
-                                                 uintptr_t slot_start) {
-  void* object = root->SlotStartToObject(slot_start);
-  root->FreeNoHooksImmediate(object, slot_span, slot_start);
-  UnmarkInCardTable(slot_start, slot_span);
-  return slot_span->bucket->slot_size;
-}
-
-[[maybe_unused]] void SweepSuperPage(PartitionRoot* root,
-                                     uintptr_t super_page,
-                                     size_t epoch,
-                                     SweepStat& stat) {
-  auto* bitmap = StateBitmapFromAddr(super_page);
-  PartitionRoot::FromFirstSuperPage(super_page);
-  bitmap->IterateUnmarkedQuarantined(epoch, [root,
-                                             &stat](uintptr_t slot_start) {
-    auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
-    stat.swept_bytes += FreeAndUnmarkInCardTable(root, slot_span, slot_start);
-  });
-}
-
-[[maybe_unused]] void SweepSuperPageAndDiscardMarkedQuarantine(
-    PartitionRoot* root,
-    uintptr_t super_page,
-    size_t epoch,
-    SweepStat& stat) {
-  auto* bitmap = StateBitmapFromAddr(super_page);
-  bitmap->IterateQuarantined(epoch, [root, &stat](uintptr_t slot_start,
-                                                  bool is_marked) {
-    auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
-    if (PA_LIKELY(!is_marked)) {
-      stat.swept_bytes += FreeAndUnmarkInCardTable(root, slot_span, slot_start);
-      return;
-    }
-    // Otherwise, try to discard pages for marked quarantine. Since no data is
-    // stored in quarantined slots (e.g. the |next| pointer), this can be
-    // freely done.
-    const size_t slot_size = slot_span->bucket->slot_size;
-    if (slot_size >= SystemPageSize()) {
-      const uintptr_t discard_end =
-          base::bits::AlignDown(slot_start + slot_size, SystemPageSize());
-      const uintptr_t discard_begin =
-          base::bits::AlignUp(slot_start, SystemPageSize());
-      const intptr_t discard_size = discard_end - discard_begin;
-      if (discard_size > 0) {
-        DiscardSystemPages(discard_begin, discard_size);
-        stat.discarded_bytes += discard_size;
-      }
-    }
-  });
-}
-
-[[maybe_unused]] void SweepSuperPageWithBatchedFree(PartitionRoot* root,
-                                                    uintptr_t super_page,
-                                                    size_t epoch,
-                                                    SweepStat& stat) {
-  using SlotSpan = SlotSpanMetadata;
-
-  auto* bitmap = StateBitmapFromAddr(super_page);
-  SlotSpan* previous_slot_span = nullptr;
-  internal::PartitionFreelistEntry* freelist_tail = nullptr;
-  internal::PartitionFreelistEntry* freelist_head = nullptr;
-  size_t freelist_entries = 0;
-
-  const auto bitmap_iterator = [&](uintptr_t slot_start) {
-    SlotSpan* current_slot_span = SlotSpan::FromSlotStart(slot_start);
-    auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
-
-    if (current_slot_span != previous_slot_span) {
-      // We started scanning a new slot span. Flush the accumulated freelist to
-      // the slot-span's freelist. This is a single lock acquired per slot span.
-      if (previous_slot_span && freelist_entries) {
-        root->RawFreeBatch(freelist_head, freelist_tail, freelist_entries,
-                           previous_slot_span);
-      }
-      freelist_head = entry;
-      freelist_tail = nullptr;
-      freelist_entries = 0;
-      previous_slot_span = current_slot_span;
-    }
-
-    if (freelist_tail) {
-      freelist_tail->SetNext(entry);
-    }
-    freelist_tail = entry;
-    ++freelist_entries;
-
-    UnmarkInCardTable(slot_start, current_slot_span);
-
-    stat.swept_bytes += current_slot_span->bucket->slot_size;
-  };
-
-  bitmap->IterateUnmarkedQuarantinedAndFree(epoch, bitmap_iterator);
-
-  if (previous_slot_span && freelist_entries) {
-    root->RawFreeBatch(freelist_head, freelist_tail, freelist_entries,
-                       previous_slot_span);
-  }
-}
-
-}  // namespace
-
-void PCScanTask::SweepQuarantine() {
-  // Check that scan is unjoinable by this time.
-  PA_DCHECK(!pcscan_.IsJoinable());
-  // Discard marked quarantine memory on every Nth scan.
-  // TODO(bikineev): Find a better signal (e.g. memory pressure, high
-  // survival rate, etc).
-  static constexpr size_t kDiscardMarkedQuarantineFrequency = 16;
-  const bool should_discard =
-      (pcscan_epoch_ % kDiscardMarkedQuarantineFrequency == 0) &&
-      (pcscan_.clear_type_ == PCScan::ClearType::kEager);
-
-  SweepStat stat;
-  StarScanSnapshot::SweepingView sweeping_view(*snapshot_);
-  sweeping_view.VisitNonConcurrently(
-      [this, &stat, should_discard](uintptr_t super_page) {
-        auto* root = PartitionRoot::FromFirstSuperPage(super_page);
-
-#if PA_CONFIG(STARSCAN_BATCHED_FREE)
-        SweepSuperPageWithBatchedFree(root, super_page, pcscan_epoch_, stat);
-        (void)should_discard;
-#else
-        if (PA_UNLIKELY(should_discard && !root->settings.use_cookie)) {
-          SweepSuperPageAndDiscardMarkedQuarantine(root, super_page,
-                                                   pcscan_epoch_, stat);
-        } else {
-          SweepSuperPage(root, super_page, pcscan_epoch_, stat);
-        }
-#endif  // PA_CONFIG(STARSCAN_BATCHED_FREE)
-      });
-
-  stats_.IncreaseSweptSize(stat.swept_bytes);
-  stats_.IncreaseDiscardedQuarantineSize(stat.discarded_bytes);
-
-#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
-  // Sweeping potentially frees into the current thread's thread cache. Purge
-  // releases the cache back to the global allocator.
-  auto* current_thread_tcache = ThreadCache::Get();
-  if (ThreadCache::IsValid(current_thread_tcache)) {
-    current_thread_tcache->Purge();
-  }
-#endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
-}
-
-void PCScanTask::FinishScanner() {
-  stats_.ReportTracesAndHists(PCScanInternal::Instance().GetReporter());
-
-  pcscan_.scheduler_.scheduling_backend().UpdateScheduleAfterScan(
-      stats_.survived_quarantine_size(), stats_.GetOverallTime(),
-      PCScanInternal::Instance().CalculateTotalHeapSize());
-
-  PCScanInternal::Instance().ResetCurrentPCScanTask();
-  // Change the state and check that concurrent task can't be scheduled twice.
-  PA_CHECK(pcscan_.state_.exchange(PCScan::State::kNotRunning,
-                                   std::memory_order_acq_rel) ==
-           PCScan::State::kSweepingAndFinishing);
-}
-
-void PCScanTask::RunFromMutator() {
-  ReentrantScannerGuard reentrancy_guard;
-  StatsCollector::MutatorScope overall_scope(
-      stats_, StatsCollector::MutatorId::kOverall);
-  {
-    SyncScope<Context::kMutator> sync_scope(*this);
-    // Mutator might start entering the safepoint while scanning was already
-    // finished.
-    if (!pcscan_.IsJoinable()) {
-      return;
-    }
-    {
-      // Clear all quarantined slots and prepare card table.
-      StatsCollector::MutatorScope clear_scope(
-          stats_, StatsCollector::MutatorId::kClear);
-      ClearQuarantinedSlotsAndPrepareCardTable();
-    }
-    {
-      // Scan the thread's stack to find dangling references.
-      StatsCollector::MutatorScope scan_scope(
-          stats_, StatsCollector::MutatorId::kScanStack);
-      ScanStack();
-    }
-    {
-      // Unprotect all scanned pages, if needed.
-      UnprotectPartitions();
-    }
-    {
-      // Scan heap for dangling references.
-      StatsCollector::MutatorScope scan_scope(stats_,
-                                              StatsCollector::MutatorId::kScan);
-      ScanPartitions();
-    }
-  }
-}
-
-void PCScanTask::RunFromScanner() {
-  ReentrantScannerGuard reentrancy_guard;
-  {
-    StatsCollector::ScannerScope overall_scope(
-        stats_, StatsCollector::ScannerId::kOverall);
-    {
-      SyncScope<Context::kScanner> sync_scope(*this);
-      {
-        // Clear all quarantined slots and prepare the card table.
-        StatsCollector::ScannerScope clear_scope(
-            stats_, StatsCollector::ScannerId::kClear);
-        ClearQuarantinedSlotsAndPrepareCardTable();
-      }
-      {
-        // Scan heap for dangling references.
-        StatsCollector::ScannerScope scan_scope(
-            stats_, StatsCollector::ScannerId::kScan);
-        ScanPartitions();
-      }
-      {
-        // Unprotect all scanned pages, if needed.
-        UnprotectPartitions();
-      }
-    }
-    {
-      // Sweep unreachable quarantined slots.
-      StatsCollector::ScannerScope sweep_scope(
-          stats_, StatsCollector::ScannerId::kSweep);
-      SweepQuarantine();
-    }
-  }
-  FinishScanner();
-}
-
-class PCScan::PCScanThread final {
- public:
-  using TaskHandle = PCScanInternal::TaskHandle;
-
-  static PCScanThread& Instance() {
-    // Lazily instantiate the scanning thread.
-    static internal::base::NoDestructor<PCScanThread> instance;
-    return *instance;
-  }
-
-  void PostTask(TaskHandle task) {
-    {
-      std::lock_guard<std::mutex> lock(mutex_);
-      PA_DCHECK(!posted_task_.get());
-      posted_task_ = std::move(task);
-      wanted_delay_ = base::TimeDelta();
-    }
-    condvar_.notify_one();
-  }
-
-  void PostDelayedTask(base::TimeDelta delay) {
-    {
-      std::lock_guard<std::mutex> lock(mutex_);
-      if (posted_task_.get()) {
-        return;
-      }
-      wanted_delay_ = delay;
-    }
-    condvar_.notify_one();
-  }
-
- private:
-  friend class internal::base::NoDestructor<PCScanThread>;
-
-  PCScanThread() {
-    ScopedAllowAllocations allow_allocations_within_std_thread;
-    std::thread{[](PCScanThread* instance) {
-                  static constexpr const char* kThreadName = "PCScan";
-                  // Ideally we should avoid mixing base:: and std:: API for
-                  // threading, but this is useful for visualizing the pcscan
-                  // thread in chrome://tracing.
-                  internal::base::PlatformThread::SetName(kThreadName);
-                  instance->TaskLoop();
-                },
-                this}
-        .detach();
-  }
-
-  // Waits and returns whether the delay should be recomputed.
-  bool Wait(std::unique_lock<std::mutex>& lock) {
-    PA_DCHECK(lock.owns_lock());
-    if (wanted_delay_.is_zero()) {
-      condvar_.wait(lock, [this] {
-        // Re-evaluate if either delay changed, or a task was
-        // enqueued.
-        return !wanted_delay_.is_zero() || posted_task_.get();
-      });
-      // The delay has already been set up and should not be queried again.
-      return false;
-    }
-    condvar_.wait_for(
-        lock, std::chrono::microseconds(wanted_delay_.InMicroseconds()));
-    // If no task has been posted, the delay should be recomputed at this point.
-    return !posted_task_.get();
-  }
-
-  void TaskLoop() {
-    while (true) {
-      TaskHandle current_task;
-      {
-        std::unique_lock<std::mutex> lock(mutex_);
-        // Scheduling.
-        while (!posted_task_.get()) {
-          if (Wait(lock)) {
-            wanted_delay_ =
-                scheduler().scheduling_backend().UpdateDelayedSchedule();
-            if (wanted_delay_.is_zero()) {
-              break;
-            }
-          }
-        }
-        // Differentiate between a posted task and a delayed task schedule.
-        if (posted_task_.get()) {
-          std::swap(current_task, posted_task_);
-          wanted_delay_ = base::TimeDelta();
-        } else {
-          PA_DCHECK(wanted_delay_.is_zero());
-        }
-      }
-      // Differentiate between a posted task and a delayed task schedule.
-      if (current_task.get()) {
-        current_task->RunFromScanner();
-      } else {
-        PCScan::Instance().PerformScan(PCScan::InvocationMode::kNonBlocking);
-      }
-    }
-  }
-
-  PCScanScheduler& scheduler() const { return PCScan::Instance().scheduler(); }
-
-  std::mutex mutex_;
-  std::condition_variable condvar_;
-  TaskHandle posted_task_;
-  base::TimeDelta wanted_delay_;
-};
-
-PCScanInternal::PCScanInternal() : simd_support_(DetectSimdSupport()) {}
-
-PCScanInternal::~PCScanInternal() = default;
-
-void PCScanInternal::Initialize(PCScan::InitConfig config) {
-  PA_DCHECK(!is_initialized_);
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  // Make sure that pools are initialized.
-  PartitionAddressSpace::Init();
-#endif
-  CommitCardTable();
-#if PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
-  if (config.write_protection ==
-      PCScan::InitConfig::WantedWriteProtectionMode::kEnabled) {
-    write_protector_ = std::make_unique<UserFaultFDWriteProtector>();
-  } else {
-    write_protector_ = std::make_unique<NoWriteProtector>();
-  }
-#else
-  write_protector_ = std::make_unique<NoWriteProtector>();
-#endif  // PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
-  PCScan::SetClearType(write_protector_->SupportedClearType());
-
-  if (config.safepoint == PCScan::InitConfig::SafepointMode::kEnabled) {
-    PCScan::Instance().EnableSafepoints();
-  }
-  scannable_roots_ = RootsMap();
-  nonscannable_roots_ = RootsMap();
-
-  static partition_alloc::StatsReporter s_no_op_reporter;
-  PCScan::Instance().RegisterStatsReporter(&s_no_op_reporter);
-
-  // Don't initialize PCScanThread::Instance() as otherwise sandbox complains
-  // about multiple threads running on sandbox initialization.
-  is_initialized_ = true;
-}
-
-void PCScanInternal::PerformScan(PCScan::InvocationMode invocation_mode) {
-#if PA_SCAN_DCHECK_IS_ON()
-  PA_DCHECK(is_initialized());
-  PA_DCHECK(scannable_roots().size() > 0);
-  PA_DCHECK(std::all_of(
-      scannable_roots().begin(), scannable_roots().end(),
-      [](const auto& pair) { return pair.first->IsScanEnabled(); }));
-  PA_DCHECK(std::all_of(
-      nonscannable_roots().begin(), nonscannable_roots().end(),
-      [](const auto& pair) { return pair.first->IsQuarantineEnabled(); }));
-#endif
-
-  PCScan& frontend = PCScan::Instance();
-  {
-    // If scanning is already in progress, bail out.
-    PCScan::State expected = PCScan::State::kNotRunning;
-    if (!frontend.state_.compare_exchange_strong(
-            expected, PCScan::State::kScheduled, std::memory_order_acq_rel,
-            std::memory_order_relaxed)) {
-      return;
-    }
-  }
-
-  const size_t last_quarantine_size =
-      frontend.scheduler_.scheduling_backend().ScanStarted();
-
-  // Create PCScan task and set it as current.
-  auto task = base::MakeRefCounted<PCScanTask>(frontend, last_quarantine_size);
-  PCScanInternal::Instance().SetCurrentPCScanTask(task);
-
-  if (PA_UNLIKELY(invocation_mode ==
-                  PCScan::InvocationMode::kScheduleOnlyForTesting)) {
-    // Immediately change the state to enable safepoint testing.
-    frontend.state_.store(PCScan::State::kScanning, std::memory_order_release);
-    frontend.SetJoinableIfSafepointEnabled(true);
-    return;
-  }
-
-  // Post PCScan task.
-  if (PA_LIKELY(invocation_mode == PCScan::InvocationMode::kNonBlocking)) {
-    PCScan::PCScanThread::Instance().PostTask(std::move(task));
-  } else {
-    PA_SCAN_DCHECK(PCScan::InvocationMode::kBlocking == invocation_mode ||
-                   PCScan::InvocationMode::kForcedBlocking == invocation_mode);
-    std::move(*task).RunFromScanner();
-  }
-}
-
-void PCScanInternal::PerformScanIfNeeded(
-    PCScan::InvocationMode invocation_mode) {
-  if (!scannable_roots().size()) {
-    return;
-  }
-  PCScan& frontend = PCScan::Instance();
-  if (invocation_mode == PCScan::InvocationMode::kForcedBlocking ||
-      frontend.scheduler_.scheduling_backend()
-          .GetQuarantineData()
-          .MinimumScanningThresholdReached()) {
-    PerformScan(invocation_mode);
-  }
-}
-
-void PCScanInternal::PerformDelayedScan(base::TimeDelta delay) {
-  PCScan::PCScanThread::Instance().PostDelayedTask(delay);
-}
-
-void PCScanInternal::JoinScan() {
-  // Current task can be destroyed by the scanner. Check that it's valid.
-  if (auto current_task = CurrentPCScanTask()) {
-    current_task->RunFromMutator();
-  }
-}
-
-PCScanInternal::TaskHandle PCScanInternal::CurrentPCScanTask() const {
-  std::lock_guard<std::mutex> lock(current_task_mutex_);
-  return current_task_;
-}
-
-void PCScanInternal::SetCurrentPCScanTask(TaskHandle task) {
-  std::lock_guard<std::mutex> lock(current_task_mutex_);
-  current_task_ = std::move(task);
-}
-
-void PCScanInternal::ResetCurrentPCScanTask() {
-  std::lock_guard<std::mutex> lock(current_task_mutex_);
-  current_task_.reset();
-}
-
-namespace {
-PCScanInternal::SuperPages GetSuperPagesAndCommitStateBitmaps(
-    PCScan::Root& root) {
-  const size_t state_bitmap_size_to_commit = CommittedStateBitmapSize();
-  PCScanInternal::SuperPages super_pages;
-  for (auto* super_page_extent = root.first_extent; super_page_extent;
-       super_page_extent = super_page_extent->next) {
-    for (uintptr_t super_page = SuperPagesBeginFromExtent(super_page_extent),
-                   super_page_end = SuperPagesEndFromExtent(super_page_extent);
-         super_page != super_page_end; super_page += kSuperPageSize) {
-      // Make sure the metadata is committed.
-      // TODO(bikineev): Remove once this is known to work.
-      const volatile char* metadata =
-          reinterpret_cast<char*>(PartitionSuperPageToMetadataArea(super_page));
-      *metadata;
-      RecommitSystemPages(SuperPageStateBitmapAddr(super_page),
-                          state_bitmap_size_to_commit,
-                          PageAccessibilityConfiguration(
-                              PageAccessibilityConfiguration::kReadWrite),
-                          PageAccessibilityDisposition::kRequireUpdate);
-      super_pages.push_back(super_page);
-    }
-  }
-  return super_pages;
-}
-}  // namespace
-
-void PCScanInternal::RegisterScannableRoot(Root* root) {
-  PA_DCHECK(is_initialized());
-  PA_DCHECK(root);
-  // Avoid nesting locks and store super_pages in a temporary vector.
-  SuperPages super_pages;
-  {
-    ::partition_alloc::internal::ScopedGuard guard(
-        ::partition_alloc::internal::PartitionRootLock(root));
-    PA_CHECK(root->IsQuarantineAllowed());
-    if (root->IsScanEnabled()) {
-      return;
-    }
-    PA_CHECK(!root->IsQuarantineEnabled());
-    super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
-    root->settings.scan_mode = Root::ScanMode::kEnabled;
-    root->settings.quarantine_mode = Root::QuarantineMode::kEnabled;
-  }
-  std::lock_guard<std::mutex> lock(roots_mutex_);
-  PA_DCHECK(!scannable_roots_.count(root));
-  auto& root_super_pages = scannable_roots_[root];
-  root_super_pages.insert(root_super_pages.end(), super_pages.begin(),
-                          super_pages.end());
-}
-
-void PCScanInternal::RegisterNonScannableRoot(Root* root) {
-  PA_DCHECK(is_initialized());
-  PA_DCHECK(root);
-  // Avoid nesting locks and store super_pages in a temporary vector.
-  SuperPages super_pages;
-  {
-    ::partition_alloc::internal::ScopedGuard guard(
-        ::partition_alloc::internal::PartitionRootLock(root));
-    PA_CHECK(root->IsQuarantineAllowed());
-    PA_CHECK(!root->IsScanEnabled());
-    if (root->IsQuarantineEnabled()) {
-      return;
-    }
-    super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
-    root->settings.quarantine_mode = Root::QuarantineMode::kEnabled;
-  }
-  std::lock_guard<std::mutex> lock(roots_mutex_);
-  PA_DCHECK(!nonscannable_roots_.count(root));
-  auto& root_super_pages = nonscannable_roots_[root];
-  root_super_pages.insert(root_super_pages.end(), super_pages.begin(),
-                          super_pages.end());
-}
-
-void PCScanInternal::RegisterNewSuperPage(Root* root,
-                                          uintptr_t super_page_base) {
-  PA_DCHECK(is_initialized());
-  PA_DCHECK(root);
-  PA_CHECK(root->IsQuarantineAllowed());
-  PA_DCHECK(!(super_page_base % kSuperPageAlignment));
-  // Make sure the metadata is committed.
-  // TODO(bikineev): Remove once this is known to work.
-  const volatile char* metadata = reinterpret_cast<char*>(
-      PartitionSuperPageToMetadataArea(super_page_base));
-  *metadata;
-
-  std::lock_guard<std::mutex> lock(roots_mutex_);
-
-  // Dispatch based on whether root is scannable or not.
-  if (root->IsScanEnabled()) {
-    PA_DCHECK(scannable_roots_.count(root));
-    auto& super_pages = scannable_roots_[root];
-    PA_DCHECK(std::find(super_pages.begin(), super_pages.end(),
-                        super_page_base) == super_pages.end());
-    super_pages.push_back(super_page_base);
-  } else {
-    PA_DCHECK(root->IsQuarantineEnabled());
-    PA_DCHECK(nonscannable_roots_.count(root));
-    auto& super_pages = nonscannable_roots_[root];
-    PA_DCHECK(std::find(super_pages.begin(), super_pages.end(),
-                        super_page_base) == super_pages.end());
-    super_pages.push_back(super_page_base);
-  }
-}
-
-void PCScanInternal::SetProcessName(const char* process_name) {
-  PA_DCHECK(is_initialized());
-  PA_DCHECK(process_name);
-  PA_DCHECK(!process_name_);
-  process_name_ = process_name;
-}
-
-size_t PCScanInternal::CalculateTotalHeapSize() const {
-  PA_DCHECK(is_initialized());
-  std::lock_guard<std::mutex> lock(roots_mutex_);
-  const auto acc = [](size_t size, const auto& pair) {
-    return size + pair.first->get_total_size_of_committed_pages();
-  };
-  return std::accumulate(scannable_roots_.begin(), scannable_roots_.end(), 0u,
-                         acc) +
-         std::accumulate(nonscannable_roots_.begin(), nonscannable_roots_.end(),
-                         0u, acc);
-}
-
-void PCScanInternal::EnableStackScanning() {
-  PA_DCHECK(!stack_scanning_enabled_);
-  stack_scanning_enabled_ = true;
-}
-void PCScanInternal::DisableStackScanning() {
-  PA_DCHECK(stack_scanning_enabled_);
-  stack_scanning_enabled_ = false;
-}
-bool PCScanInternal::IsStackScanningEnabled() const {
-  return stack_scanning_enabled_;
-}
-
-void PCScanInternal::NotifyThreadCreated(void* stack_top) {
-  const auto tid = base::PlatformThread::CurrentId();
-  std::lock_guard<std::mutex> lock(stack_tops_mutex_);
-  const auto res = stack_tops_.insert({tid, stack_top});
-  PA_DCHECK(res.second);
-}
-
-void PCScanInternal::NotifyThreadDestroyed() {
-  const auto tid = base::PlatformThread::CurrentId();
-  std::lock_guard<std::mutex> lock(stack_tops_mutex_);
-  PA_DCHECK(1 == stack_tops_.count(tid));
-  stack_tops_.erase(tid);
-}
-
-void* PCScanInternal::GetCurrentThreadStackTop() const {
-  const auto tid = base::PlatformThread::CurrentId();
-  std::lock_guard<std::mutex> lock(stack_tops_mutex_);
-  auto it = stack_tops_.find(tid);
-  return it != stack_tops_.end() ? it->second : nullptr;
-}
-
-bool PCScanInternal::WriteProtectionEnabled() const {
-  return write_protector_->IsEnabled();
-}
-
-void PCScanInternal::ProtectPages(uintptr_t begin, size_t size) {
-  // Slot-span sizes are multiple of system page size. However, the ranges that
-  // are recorded are not, since in the snapshot we only record the used
-  // payload. Therefore we align up the incoming range by 4k. The unused part of
-  // slot-spans doesn't need to be protected (the allocator will enter the
-  // safepoint before trying to allocate from it).
-  PA_SCAN_DCHECK(write_protector_.get());
-  write_protector_->ProtectPages(
-      begin,
-      partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
-}
-
-void PCScanInternal::UnprotectPages(uintptr_t begin, size_t size) {
-  PA_SCAN_DCHECK(write_protector_.get());
-  write_protector_->UnprotectPages(
-      begin,
-      partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
-}
-
-void PCScanInternal::ClearRootsForTesting() {
-  std::lock_guard<std::mutex> lock(roots_mutex_);
-  // Set all roots as non-scannable and non-quarantinable.
-  for (auto& pair : scannable_roots_) {
-    Root* root = pair.first;
-    root->settings.scan_mode = Root::ScanMode::kDisabled;
-    root->settings.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
-  }
-  for (auto& pair : nonscannable_roots_) {
-    Root* root = pair.first;
-    root->settings.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
-  }
-  // Make sure to destroy maps so that on the following ReinitForTesting() call
-  // the maps don't attempt to destroy the backing.
-  scannable_roots_.clear();
-  scannable_roots_.~RootsMap();
-  nonscannable_roots_.clear();
-  nonscannable_roots_.~RootsMap();
-  // Destroy write protector object, so that there is no double free on the next
-  // call to ReinitForTesting();
-  write_protector_.reset();
-}
-
-void PCScanInternal::ReinitForTesting(PCScan::InitConfig config) {
-  is_initialized_ = false;
-  auto* new_this = new (this) PCScanInternal;
-  new_this->Initialize(config);
-}
-
-void PCScanInternal::FinishScanForTesting() {
-  auto current_task = CurrentPCScanTask();
-  PA_CHECK(current_task.get());
-  current_task->RunFromScanner();
-}
-
-void PCScanInternal::RegisterStatsReporter(
-    partition_alloc::StatsReporter* reporter) {
-  PA_DCHECK(reporter);
-  stats_reporter_ = reporter;
-}
-
-partition_alloc::StatsReporter& PCScanInternal::GetReporter() {
-  PA_DCHECK(stats_reporter_);
-  return *stats_reporter_;
-}
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/starscan/pcscan_internal.h b/base/allocator/partition_allocator/starscan/pcscan_internal.h
deleted file mode 100644
index 74bca4c..0000000
--- a/base/allocator/partition_allocator/starscan/pcscan_internal.h
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_INTERNAL_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_INTERNAL_H_
-
-#include <array>
-#include <functional>
-#include <memory>
-#include <mutex>
-#include <unordered_map>
-#include <utility>
-#include <vector>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/memory/scoped_refptr.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
-#include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#include "base/allocator/partition_allocator/starscan/starscan_fwd.h"
-#include "base/allocator/partition_allocator/starscan/write_protector.h"
-
-namespace partition_alloc::internal {
-
-class PCScanTask;
-
-// Internal PCScan singleton. The separation between frontend and backend is
-// needed to keep access to the hot data (quarantine) in the frontend fast,
-// whereas the backend can hold cold data.
-class PCScanInternal final {
- public:
-  using Root = PCScan::Root;
-  using TaskHandle = scoped_refptr<PCScanTask>;
-
-  using SuperPages = std::vector<uintptr_t, MetadataAllocator<uintptr_t>>;
-  using RootsMap =
-      std::unordered_map<Root*,
-                         SuperPages,
-                         std::hash<Root*>,
-                         std::equal_to<>,
-                         MetadataAllocator<std::pair<Root* const, SuperPages>>>;
-
-  static PCScanInternal& Instance() {
-    // Since the data that PCScanInternal holds is cold, it's fine to have the
-    // runtime check for thread-safe local static initialization.
-    static internal::base::NoDestructor<PCScanInternal> instance;
-    return *instance;
-  }
-
-  PCScanInternal(const PCScanInternal&) = delete;
-  PCScanInternal& operator=(const PCScanInternal&) = delete;
-
-  ~PCScanInternal();
-
-  void Initialize(PCScan::InitConfig);
-  bool is_initialized() const { return is_initialized_; }
-
-  void PerformScan(PCScan::InvocationMode);
-  void PerformScanIfNeeded(PCScan::InvocationMode);
-  void PerformDelayedScan(base::TimeDelta delay);
-  void JoinScan();
-
-  TaskHandle CurrentPCScanTask() const;
-  void SetCurrentPCScanTask(TaskHandle task);
-  void ResetCurrentPCScanTask();
-
-  void RegisterScannableRoot(Root*);
-  void RegisterNonScannableRoot(Root*);
-
-  RootsMap& scannable_roots() { return scannable_roots_; }
-  const RootsMap& scannable_roots() const { return scannable_roots_; }
-
-  RootsMap& nonscannable_roots() { return nonscannable_roots_; }
-  const RootsMap& nonscannable_roots() const { return nonscannable_roots_; }
-
-  void RegisterNewSuperPage(Root* root, uintptr_t super_page_base);
-
-  void SetProcessName(const char* name);
-  const char* process_name() const { return process_name_; }
-
-  // Get size of all committed pages from scannable and nonscannable roots.
-  size_t CalculateTotalHeapSize() const;
-
-  SimdSupport simd_support() const { return simd_support_; }
-
-  void EnableStackScanning();
-  void DisableStackScanning();
-  bool IsStackScanningEnabled() const;
-
-  void EnableImmediateFreeing() { immediate_freeing_enabled_ = true; }
-  bool IsImmediateFreeingEnabled() const { return immediate_freeing_enabled_; }
-
-  void NotifyThreadCreated(void* stack_top);
-  void NotifyThreadDestroyed();
-
-  void* GetCurrentThreadStackTop() const;
-
-  bool WriteProtectionEnabled() const;
-  void ProtectPages(uintptr_t begin, size_t size);
-  void UnprotectPages(uintptr_t begin, size_t size);
-
-  void ClearRootsForTesting();                // IN-TEST
-  void ReinitForTesting(PCScan::InitConfig);  // IN-TEST
-  void FinishScanForTesting();                // IN-TEST
-
-  void RegisterStatsReporter(partition_alloc::StatsReporter* reporter);
-  partition_alloc::StatsReporter& GetReporter();
-
- private:
-  friend internal::base::NoDestructor<PCScanInternal>;
-  friend class StarScanSnapshot;
-
-  using StackTops = std::unordered_map<
-      internal::base::PlatformThreadId,
-      void*,
-      std::hash<internal::base::PlatformThreadId>,
-      std::equal_to<>,
-      MetadataAllocator<
-          std::pair<const internal::base::PlatformThreadId, void*>>>;
-
-  PCScanInternal();
-
-  TaskHandle current_task_;
-  mutable std::mutex current_task_mutex_;
-
-  RootsMap scannable_roots_;
-  RootsMap nonscannable_roots_;
-  mutable std::mutex roots_mutex_;
-
-  bool stack_scanning_enabled_{false};
-  // TLS emulation of stack tops. Since this is guaranteed to go through
-  // non-quarantinable partition, using it from safepoints is safe.
-  StackTops stack_tops_;
-  mutable std::mutex stack_tops_mutex_;
-
-  bool immediate_freeing_enabled_{false};
-
-  const char* process_name_ = nullptr;
-  const SimdSupport simd_support_;
-
-  std::unique_ptr<WriteProtector> write_protector_;
-  partition_alloc::StatsReporter* stats_reporter_ = nullptr;
-
-  bool is_initialized_ = false;
-};
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_INTERNAL_H_
diff --git a/base/allocator/partition_allocator/starscan/pcscan_scheduling.cc b/base/allocator/partition_allocator/starscan/pcscan_scheduling.cc
deleted file mode 100644
index 7fe50ad..0000000
--- a/base/allocator/partition_allocator/starscan/pcscan_scheduling.cc
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
-
-#include <algorithm>
-#include <atomic>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-#include "base/allocator/partition_allocator/starscan/logging.h"
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-
-namespace partition_alloc::internal {
-
-// static
-constexpr size_t QuarantineData::kQuarantineSizeMinLimit;
-
-void PCScanScheduler::SetNewSchedulingBackend(
-    PCScanSchedulingBackend& backend) {
-  backend_ = &backend;
-}
-
-void PCScanSchedulingBackend::DisableScheduling() {
-  scheduling_enabled_.store(false, std::memory_order_relaxed);
-}
-
-void PCScanSchedulingBackend::EnableScheduling() {
-  scheduling_enabled_.store(true, std::memory_order_relaxed);
-  // Check if *Scan needs to be run immediately.
-  if (NeedsToImmediatelyScan()) {
-    PCScan::PerformScan(PCScan::InvocationMode::kNonBlocking);
-  }
-}
-
-size_t PCScanSchedulingBackend::ScanStarted() {
-  auto& data = GetQuarantineData();
-  data.epoch.fetch_add(1, std::memory_order_relaxed);
-  return data.current_size.exchange(0, std::memory_order_relaxed);
-}
-
-base::TimeDelta PCScanSchedulingBackend::UpdateDelayedSchedule() {
-  return base::TimeDelta();
-}
-
-// static
-constexpr double LimitBackend::kQuarantineSizeFraction;
-
-bool LimitBackend::LimitReached() {
-  return is_scheduling_enabled();
-}
-
-void LimitBackend::UpdateScheduleAfterScan(size_t survived_bytes,
-                                           base::TimeDelta,
-                                           size_t heap_size) {
-  scheduler_.AccountFreed(survived_bytes);
-  // |heap_size| includes the current quarantine size, we intentionally leave
-  // some slack till hitting the limit.
-  auto& data = GetQuarantineData();
-  data.size_limit.store(
-      std::max(QuarantineData::kQuarantineSizeMinLimit,
-               static_cast<size_t>(kQuarantineSizeFraction * heap_size)),
-      std::memory_order_relaxed);
-}
-
-bool LimitBackend::NeedsToImmediatelyScan() {
-  return false;
-}
-
-// static
-constexpr double MUAwareTaskBasedBackend::kSoftLimitQuarantineSizePercent;
-// static
-constexpr double MUAwareTaskBasedBackend::kHardLimitQuarantineSizePercent;
-// static
-constexpr double MUAwareTaskBasedBackend::kTargetMutatorUtilizationPercent;
-
-MUAwareTaskBasedBackend::MUAwareTaskBasedBackend(
-    PCScanScheduler& scheduler,
-    ScheduleDelayedScanFunc schedule_delayed_scan)
-    : PCScanSchedulingBackend(scheduler),
-      schedule_delayed_scan_(schedule_delayed_scan) {
-  PA_DCHECK(schedule_delayed_scan_);
-}
-
-MUAwareTaskBasedBackend::~MUAwareTaskBasedBackend() = default;
-
-bool MUAwareTaskBasedBackend::LimitReached() {
-  bool should_reschedule = false;
-  base::TimeDelta reschedule_delay;
-  {
-    ScopedGuard guard(scheduler_lock_);
-    // At this point we reached a limit where the schedule generally wants to
-    // trigger a scan.
-    if (hard_limit_) {
-      // The hard limit is not reset, indicating that the scheduler only hit the
-      // soft limit. See inlined comments for the algorithm.
-      auto& data = GetQuarantineData();
-      PA_DCHECK(hard_limit_ >= QuarantineData::kQuarantineSizeMinLimit);
-      // 1. Update the limit to the hard limit which will always immediately
-      // trigger a scan.
-      data.size_limit.store(hard_limit_, std::memory_order_relaxed);
-      hard_limit_ = 0;
-
-      // 2. Unlikely case: If also above hard limit, start scan right away. This
-      // ignores explicit PCScan disabling.
-      if (PA_UNLIKELY(data.current_size.load(std::memory_order_relaxed) >
-                      data.size_limit.load(std::memory_order_relaxed))) {
-        return true;
-      }
-
-      // 3. Check if PCScan was explicitly disabled.
-      if (PA_UNLIKELY(!is_scheduling_enabled())) {
-        return false;
-      }
-
-      // 4. Otherwise, the soft limit would trigger a scan immediately if the
-      // mutator utilization requirement is satisfied.
-      reschedule_delay = earliest_next_scan_time_ - base::TimeTicks::Now();
-      if (reschedule_delay <= base::TimeDelta()) {
-        // May invoke scan immediately.
-        return true;
-      }
-
-      PA_PCSCAN_VLOG(3) << "Rescheduling scan with delay: "
-                        << reschedule_delay.InMillisecondsF() << " ms";
-      // 5. If the MU requirement is not satisfied, schedule a delayed scan to
-      // the time instance when MU is satisfied.
-      should_reschedule = true;
-    }
-  }
-  // Don't reschedule under the lock as the callback can call free() and
-  // recursively enter the lock.
-  if (should_reschedule) {
-    schedule_delayed_scan_(reschedule_delay.InMicroseconds());
-    return false;
-  }
-  return true;
-}
-
-size_t MUAwareTaskBasedBackend::ScanStarted() {
-  ScopedGuard guard(scheduler_lock_);
-
-  return PCScanSchedulingBackend::ScanStarted();
-}
-
-void MUAwareTaskBasedBackend::UpdateScheduleAfterScan(
-    size_t survived_bytes,
-    base::TimeDelta time_spent_in_scan,
-    size_t heap_size) {
-  scheduler_.AccountFreed(survived_bytes);
-
-  ScopedGuard guard(scheduler_lock_);
-
-  // |heap_size| includes the current quarantine size, we intentionally leave
-  // some slack till hitting the limit.
-  auto& data = GetQuarantineData();
-  data.size_limit.store(
-      std::max(
-          QuarantineData::kQuarantineSizeMinLimit,
-          static_cast<size_t>(kSoftLimitQuarantineSizePercent * heap_size)),
-      std::memory_order_relaxed);
-  hard_limit_ = std::max(
-      QuarantineData::kQuarantineSizeMinLimit,
-      static_cast<size_t>(kHardLimitQuarantineSizePercent * heap_size));
-
-  // This computes the time window that the scheduler will reserve for the
-  // mutator. Scanning, unless reaching the hard limit, will generally be
-  // delayed until this time has passed.
-  const auto time_required_on_mutator =
-      time_spent_in_scan * kTargetMutatorUtilizationPercent /
-      (1.0 - kTargetMutatorUtilizationPercent);
-  earliest_next_scan_time_ = base::TimeTicks::Now() + time_required_on_mutator;
-}
-
-bool MUAwareTaskBasedBackend::NeedsToImmediatelyScan() {
-  bool should_reschedule = false;
-  base::TimeDelta reschedule_delay;
-  {
-    ScopedGuard guard(scheduler_lock_);
-    // If |hard_limit_| was set to zero, the soft limit was reached. Bail out if
-    // it's not.
-    if (hard_limit_) {
-      return false;
-    }
-
-    // Check if mutator utilization requiremet is satisfied.
-    reschedule_delay = earliest_next_scan_time_ - base::TimeTicks::Now();
-    if (reschedule_delay <= base::TimeDelta()) {
-      // May invoke scan immediately.
-      return true;
-    }
-
-    PA_PCSCAN_VLOG(3) << "Rescheduling scan with delay: "
-                      << reschedule_delay.InMillisecondsF() << " ms";
-    // Schedule a delayed scan to the time instance when MU is satisfied.
-    should_reschedule = true;
-  }
-  // Don't reschedule under the lock as the callback can call free() and
-  // recursively enter the lock.
-  if (should_reschedule) {
-    schedule_delayed_scan_(reschedule_delay.InMicroseconds());
-  }
-  return false;
-}
-
-base::TimeDelta MUAwareTaskBasedBackend::UpdateDelayedSchedule() {
-  ScopedGuard guard(scheduler_lock_);
-  // TODO(1197479): Adjust schedule to current heap sizing.
-  const auto delay = earliest_next_scan_time_ - base::TimeTicks::Now();
-  PA_PCSCAN_VLOG(3) << "Schedule is off by " << delay.InMillisecondsF() << "ms";
-  return delay >= base::TimeDelta() ? delay : base::TimeDelta();
-}
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/starscan/pcscan_scheduling.h b/base/allocator/partition_allocator/starscan/pcscan_scheduling.h
deleted file mode 100644
index f5466a4..0000000
--- a/base/allocator/partition_allocator/starscan/pcscan_scheduling.h
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_SCHEDULING_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_SCHEDULING_H_
-
-#include <atomic>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-
-namespace partition_alloc::internal {
-
-class PCScanScheduler;
-
-struct QuarantineData final {
-  static constexpr size_t kQuarantineSizeMinLimit = 1 * 1024 * 1024;
-
-  inline constexpr QuarantineData();
-
-  bool MinimumScanningThresholdReached() const {
-    return current_size.load(std::memory_order_relaxed) >
-           kQuarantineSizeMinLimit;
-  }
-
-  std::atomic<size_t> current_size{0u};
-  std::atomic<size_t> size_limit{kQuarantineSizeMinLimit};
-  std::atomic<size_t> epoch{0u};
-};
-
-// No virtual destructor to allow constant initialization of PCScan as
-// static global which directly embeds LimitBackend as default backend.
-#if defined(__clang__)
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
-#endif
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScanSchedulingBackend {
-#if defined(__clang__)
-#pragma clang diagnostic pop
-#endif
-
- public:
-  inline constexpr explicit PCScanSchedulingBackend(PCScanScheduler&);
-
-  PCScanSchedulingBackend(const PCScanSchedulingBackend&) = delete;
-  PCScanSchedulingBackend& operator=(const PCScanSchedulingBackend&) = delete;
-
-  void DisableScheduling();
-  void EnableScheduling();
-
-  bool is_scheduling_enabled() const {
-    return scheduling_enabled_.load(std::memory_order_relaxed);
-  }
-
-  inline QuarantineData& GetQuarantineData();
-
-  // Invoked when the limit in PCScanScheduler is reached. Returning true
-  // signals the caller to invoke a scan.
-  virtual bool LimitReached() = 0;
-
-  // Invoked on starting a scan. Returns current quarantine size.
-  virtual size_t ScanStarted();
-
-  // Invoked at the end of a scan to compute a new limit.
-  virtual void UpdateScheduleAfterScan(size_t survived_bytes,
-                                       base::TimeDelta time_spent_in_scan,
-                                       size_t heap_size) = 0;
-
-  // Invoked by PCScan to ask for a new timeout for a scheduled PCScan task.
-  // Only invoked if scheduler requests a delayed scan at some point.
-  virtual base::TimeDelta UpdateDelayedSchedule();
-
- protected:
-  inline bool SchedulingDisabled() const;
-
-  virtual bool NeedsToImmediatelyScan() = 0;
-
-  PCScanScheduler& scheduler_;
-  std::atomic<bool> scheduling_enabled_{true};
-};
-
-// Scheduling backend that just considers a single hard limit.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LimitBackend final
-    : public PCScanSchedulingBackend {
- public:
-  static constexpr double kQuarantineSizeFraction = 0.1;
-
-  inline constexpr explicit LimitBackend(PCScanScheduler&);
-
-  bool LimitReached() final;
-  void UpdateScheduleAfterScan(size_t, base::TimeDelta, size_t) final;
-
- private:
-  bool NeedsToImmediatelyScan() final;
-};
-
-// Task based backend that is aware of a target mutator utilization that
-// specifies how much percent of the execution should be reserved for the
-// mutator. I.e., the MU-aware scheduler ensures that scans are limit and
-// there is enough time left for the mutator to execute the actual application
-// workload.
-//
-// See constants below for trigger mechanisms.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MUAwareTaskBasedBackend final
-    : public PCScanSchedulingBackend {
- public:
-  using ScheduleDelayedScanFunc = void (*)(int64_t delay_in_microseconds);
-
-  MUAwareTaskBasedBackend(PCScanScheduler&, ScheduleDelayedScanFunc);
-  ~MUAwareTaskBasedBackend();
-
-  bool LimitReached() final;
-  size_t ScanStarted() final;
-  void UpdateScheduleAfterScan(size_t, base::TimeDelta, size_t) final;
-  base::TimeDelta UpdateDelayedSchedule() final;
-
- private:
-  // Limit triggering the scheduler. If `kTargetMutatorUtilizationPercent` is
-  // satisfied at this point then a scan is triggered immediately.
-  static constexpr double kSoftLimitQuarantineSizePercent = 0.1;
-  // Hard limit at which a scan is triggered in any case. Avoids blowing up the
-  // heap completely.
-  static constexpr double kHardLimitQuarantineSizePercent = 0.5;
-  // Target mutator utilization that is respected when invoking a scan.
-  // Specifies how much percent of walltime should be spent in the mutator.
-  // Inversely, specifies how much walltime (indirectly CPU) is spent on
-  // memory management in scan.
-  static constexpr double kTargetMutatorUtilizationPercent = 0.90;
-
-  bool NeedsToImmediatelyScan() final;
-
-  // Callback to schedule a delayed scan.
-  const ScheduleDelayedScanFunc schedule_delayed_scan_;
-
-  Lock scheduler_lock_;
-  size_t hard_limit_ PA_GUARDED_BY(scheduler_lock_){0};
-  base::TimeTicks earliest_next_scan_time_ PA_GUARDED_BY(scheduler_lock_);
-
-  friend class PartitionAllocPCScanMUAwareTaskBasedBackendTest;
-};
-
-// The scheduler that is embedded in the PCSCan frontend which requires a fast
-// path for freeing objects. The scheduler holds data needed to invoke a
-// `PCScanSchedulingBackend` upon hitting a limit. The backend implements
-// the actual scheduling strategy and is in charge of maintaining limits.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScanScheduler final {
- public:
-  inline constexpr PCScanScheduler();
-
-  PCScanScheduler(const PCScanScheduler&) = delete;
-  PCScanScheduler& operator=(const PCScanScheduler&) = delete;
-
-  // Account freed `bytes`. Returns true if scan should be triggered
-  // immediately, and false otherwise.
-  PA_ALWAYS_INLINE bool AccountFreed(size_t bytes);
-
-  size_t epoch() const {
-    return quarantine_data_.epoch.load(std::memory_order_relaxed);
-  }
-
-  // Sets a new scheduling backend that should be used by the scanner.
-  void SetNewSchedulingBackend(PCScanSchedulingBackend&);
-
-  PCScanSchedulingBackend& scheduling_backend() { return *backend_; }
-  const PCScanSchedulingBackend& scheduling_backend() const {
-    return *backend_;
-  }
-
- private:
-  QuarantineData quarantine_data_{};
-  // The default backend used is a simple LimitBackend that just triggers scan
-  // on reaching a hard limit.
-  LimitBackend default_scheduling_backend_{*this};
-  PCScanSchedulingBackend* backend_ = &default_scheduling_backend_;
-
-  friend PCScanSchedulingBackend;
-};
-
-// To please Chromium's clang plugin.
-constexpr PCScanScheduler::PCScanScheduler() = default;
-constexpr QuarantineData::QuarantineData() = default;
-
-constexpr PCScanSchedulingBackend::PCScanSchedulingBackend(
-    PCScanScheduler& scheduler)
-    : scheduler_(scheduler) {}
-
-QuarantineData& PCScanSchedulingBackend::GetQuarantineData() {
-  return scheduler_.quarantine_data_;
-}
-
-constexpr LimitBackend::LimitBackend(PCScanScheduler& scheduler)
-    : PCScanSchedulingBackend(scheduler) {}
-
-PA_ALWAYS_INLINE bool PCScanScheduler::AccountFreed(size_t size) {
-  const size_t size_before =
-      quarantine_data_.current_size.fetch_add(size, std::memory_order_relaxed);
-  return (size_before + size >
-          quarantine_data_.size_limit.load(std::memory_order_relaxed)) &&
-         backend_->LimitReached();
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_SCHEDULING_H_
diff --git a/base/allocator/partition_allocator/starscan/pcscan_scheduling_unittest.cc b/base/allocator/partition_allocator/starscan/pcscan_scheduling_unittest.cc
deleted file mode 100644
index 6f69e15..0000000
--- a/base/allocator/partition_allocator/starscan/pcscan_scheduling_unittest.cc
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal {
-
-namespace {
-constexpr size_t kMB = 1024 * 1024;
-}  // namespace
-
-TEST(PartitionAllocPCScanSchedulerLimitBackendTest,
-     NoScanBelowMinimumScanningThreshold) {
-  PCScanScheduler scheduler;
-  LimitBackend limit_backend(scheduler);
-  scheduler.SetNewSchedulingBackend(limit_backend);
-  constexpr size_t kMinimumScanningThreshold =
-      QuarantineData::kQuarantineSizeMinLimit;
-  EXPECT_FALSE(scheduler.AccountFreed(kMinimumScanningThreshold / 2));
-  EXPECT_FALSE(scheduler.AccountFreed(kMinimumScanningThreshold -
-                                      kMinimumScanningThreshold / 2));
-  EXPECT_TRUE(scheduler.AccountFreed(1));
-}
-
-TEST(PartitionAllocPCScanSchedulerLimitBackendTest,
-     ScanAtQuarantineSizeFraction) {
-  PCScanScheduler scheduler;
-  LimitBackend limit_backend(scheduler);
-  scheduler.SetNewSchedulingBackend(limit_backend);
-  constexpr size_t kHeapSize = 100 * kMB;
-  constexpr size_t kNoSurvivedBytes = 0;
-  limit_backend.UpdateScheduleAfterScan(kNoSurvivedBytes, base::TimeDelta(),
-                                        kHeapSize);
-  constexpr size_t kExpectedTriggerSize = static_cast<size_t>(
-      static_cast<double>(kHeapSize) * LimitBackend::kQuarantineSizeFraction);
-  EXPECT_FALSE(scheduler.AccountFreed(kExpectedTriggerSize / 2));
-  EXPECT_FALSE(
-      scheduler.AccountFreed(kExpectedTriggerSize - kExpectedTriggerSize / 2));
-  EXPECT_TRUE(scheduler.AccountFreed(1));
-}
-
-class PartitionAllocPCScanMUAwareTaskBasedBackendTest : public ::testing::Test {
- public:
-  static constexpr size_t kHeapSize = 100 * kMB;
-
-  static constexpr size_t HardLimitSize(size_t heap_size) {
-    return static_cast<size_t>(
-               static_cast<double>(heap_size) *
-               MUAwareTaskBasedBackend::kHardLimitQuarantineSizePercent) +
-           1;
-  }
-
-  static constexpr size_t SoftLimitSize(size_t heap_size) {
-    return static_cast<size_t>(
-               static_cast<double>(heap_size) *
-               MUAwareTaskBasedBackend::kSoftLimitQuarantineSizePercent) +
-           1;
-  }
-
-  PartitionAllocPCScanMUAwareTaskBasedBackendTest()
-      : backend_(scheduler_, &IncrementDelayedScanScheduledCount) {
-    scheduler_.SetNewSchedulingBackend(backend_);
-    constexpr size_t kNoSurvivedBytes = 0;
-    constexpr base::TimeDelta kZeroTimeForScan;
-    backend_.UpdateScheduleAfterScan(kNoSurvivedBytes, kZeroTimeForScan,
-                                     kHeapSize);
-  }
-
-  void SetUp() override { delayed_scan_scheduled_count_ = 0; }
-
-  PCScanScheduler& scheduler() { return scheduler_; }
-  MUAwareTaskBasedBackend& backend() { return backend_; }
-  size_t delayed_scan_scheduled_count() const {
-    return delayed_scan_scheduled_count_;
-  }
-
- private:
-  static void IncrementDelayedScanScheduledCount(
-      int64_t delay_in_microseconds) {
-    ++delayed_scan_scheduled_count_;
-  }
-
-  static size_t delayed_scan_scheduled_count_;
-  PCScanScheduler scheduler_;
-  MUAwareTaskBasedBackend backend_;
-};
-
-size_t PartitionAllocPCScanMUAwareTaskBasedBackendTest::
-    delayed_scan_scheduled_count_ = 0;
-
-namespace {
-
-class ScopedTimeTicksOverride final {
- public:
-  ScopedTimeTicksOverride()
-      : ScopedTimeTicksOverride(InitializeTimeAndReturnTimeTicksNow()) {}
-
-  void AddTicksToNow(base::TimeDelta ticks) { now_ticks_ += ticks; }
-
- private:
-  static base::TimeTicks Now() { return now_ticks_; }
-
-  static base::TimeTicksNowFunction InitializeTimeAndReturnTimeTicksNow() {
-    now_ticks_ = base::TimeTicks::Now();
-    return &Now;
-  }
-
-  explicit ScopedTimeTicksOverride(
-      base::TimeTicksNowFunction time_ticks_function)
-      : overrides_(nullptr, time_ticks_function, nullptr) {}
-
-  static base::TimeTicks now_ticks_;
-
-  base::subtle::ScopedTimeClockOverrides overrides_;
-};
-
-// static
-base::TimeTicks ScopedTimeTicksOverride::now_ticks_;
-
-}  // namespace
-
-TEST_F(PartitionAllocPCScanMUAwareTaskBasedBackendTest,
-       SoftLimitSchedulesScanIfMUNotSatisfied) {
-  // Stop the time.
-  ScopedTimeTicksOverride now_ticks_override;
-  // Simulate PCScan that processed kHeapSize in 1s. Since time is stopped that
-  // schedule is not reachable.
-  backend().UpdateScheduleAfterScan(0, base::Seconds(1), kHeapSize);
-
-  EXPECT_EQ(0u, delayed_scan_scheduled_count());
-  EXPECT_FALSE(scheduler().AccountFreed(SoftLimitSize(kHeapSize)));
-  EXPECT_EQ(1u, delayed_scan_scheduled_count());
-}
-
-TEST_F(PartitionAllocPCScanMUAwareTaskBasedBackendTest,
-       SoftLimitInvokesScanIfMUSatisfied) {
-  // Stop the time.
-  ScopedTimeTicksOverride now_ticks_override;
-  // Simulate PCScan that processed kHeapSize in 0s. The next scan should thus
-  // happen immediately.
-  backend().UpdateScheduleAfterScan(0, base::Seconds(0), kHeapSize);
-
-  EXPECT_EQ(0u, delayed_scan_scheduled_count());
-  EXPECT_TRUE(scheduler().AccountFreed(SoftLimitSize(kHeapSize)));
-  EXPECT_EQ(0u, delayed_scan_scheduled_count());
-}
-
-TEST_F(PartitionAllocPCScanMUAwareTaskBasedBackendTest,
-       HardLimitSchedulesScanImmediately) {
-  // Stop the time.
-  ScopedTimeTicksOverride now_ticks_override;
-  // Simulate PCScan that processed kHeapSize in 1s. Since time is stopped that
-  // schedule is not reachable.
-  backend().UpdateScheduleAfterScan(0, base::Seconds(0), kHeapSize);
-
-  EXPECT_EQ(0u, delayed_scan_scheduled_count());
-  // Triogering the hard limit should immediately require a scan and not
-  // schedule anything.
-  EXPECT_TRUE(scheduler().AccountFreed(HardLimitSize(kHeapSize)));
-  EXPECT_EQ(0u, delayed_scan_scheduled_count());
-}
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/starscan/pcscan_unittest.cc b/base/allocator/partition_allocator/starscan/pcscan_unittest.cc
deleted file mode 100644
index 7d579fd..0000000
--- a/base/allocator/partition_allocator/starscan/pcscan_unittest.cc
+++ /dev/null
@@ -1,840 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cstdint>
-
-#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-
-#include "base/allocator/partition_allocator/partition_alloc-inl.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_for_testing.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "base/allocator/partition_allocator/starscan/stack/stack.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if BUILDFLAG(USE_STARSCAN)
-
-namespace partition_alloc::internal {
-
-namespace {
-
-struct DisableStackScanningScope final {
-  DisableStackScanningScope() {
-    if (PCScan::IsStackScanningEnabled()) {
-      PCScan::DisableStackScanning();
-      changed_ = true;
-    }
-  }
-  ~DisableStackScanningScope() {
-    if (changed_) {
-      PCScan::EnableStackScanning();
-    }
-  }
-
- private:
-  bool changed_ = false;
-};
-
-}  // namespace
-
-class PartitionAllocPCScanTestBase : public testing::Test {
- public:
-  PartitionAllocPCScanTestBase()
-      : allocator_(PartitionOptions{
-            .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-            .star_scan_quarantine =
-                PartitionOptions::StarScanQuarantine::kAllowed,
-            .memory_tagging = {
-                .enabled = base::CPU::GetInstanceNoAllocation().has_mte()
-                               ? partition_alloc::PartitionOptions::
-                                     MemoryTagging::kEnabled
-                               : partition_alloc::PartitionOptions::
-                                     MemoryTagging::kDisabled}}) {
-    PartitionAllocGlobalInit([](size_t) { PA_LOG(FATAL) << "Out of memory"; });
-    // Previous test runs within the same process decommit pools, therefore
-    // we need to make sure that the card table is recommitted for each run.
-    PCScan::ReinitForTesting(
-        {PCScan::InitConfig::WantedWriteProtectionMode::kDisabled,
-         PCScan::InitConfig::SafepointMode::kEnabled});
-    allocator_.root()->UncapEmptySlotSpanMemoryForTesting();
-    allocator_.root()->SwitchToDenserBucketDistribution();
-
-    PCScan::RegisterScannableRoot(allocator_.root());
-  }
-
-  ~PartitionAllocPCScanTestBase() override {
-    allocator_.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
-                                   PurgeFlags::kDiscardUnusedSystemPages);
-    PartitionAllocGlobalUninitForTesting();
-  }
-
-  void RunPCScan() {
-    PCScan::Instance().PerformScan(PCScan::InvocationMode::kBlocking);
-  }
-
-  void SchedulePCScan() {
-    PCScan::Instance().PerformScan(
-        PCScan::InvocationMode::kScheduleOnlyForTesting);
-  }
-
-  void JoinPCScanAsMutator() {
-    auto& instance = PCScan::Instance();
-    PA_CHECK(instance.IsJoinable());
-    instance.JoinScan();
-  }
-
-  void FinishPCScanAsScanner() { PCScan::FinishScanForTesting(); }
-
-  bool IsInQuarantine(void* object) const {
-    uintptr_t slot_start = root().ObjectToSlotStart(object);
-    return StateBitmapFromAddr(slot_start)->IsQuarantined(slot_start);
-  }
-
-  PartitionRoot& root() { return *allocator_.root(); }
-  const PartitionRoot& root() const { return *allocator_.root(); }
-
- private:
-  // Leverage the already-templated version outside `internal::`.
-  partition_alloc::PartitionAllocatorAllowLeaksForTesting allocator_;
-};
-
-namespace {
-
-// The test that expects free() being quarantined only when tag overflow occurs.
-using PartitionAllocPCScanWithMTETest = PartitionAllocPCScanTestBase;
-
-// The test that expects every free() being quarantined.
-class PartitionAllocPCScanTest : public PartitionAllocPCScanTestBase {
- public:
-  PartitionAllocPCScanTest() { root().SetQuarantineAlwaysForTesting(true); }
-  ~PartitionAllocPCScanTest() override {
-    root().SetQuarantineAlwaysForTesting(false);
-  }
-};
-
-using SlotSpan = PartitionRoot::SlotSpan;
-
-struct FullSlotSpanAllocation {
-  SlotSpan* slot_span;
-  void* first;
-  void* last;
-};
-
-// Assumes heap is purged.
-FullSlotSpanAllocation GetFullSlotSpan(PartitionRoot& root,
-                                       size_t object_size) {
-  PA_CHECK(0u == root.get_total_size_of_committed_pages());
-
-  const size_t raw_size = root.AdjustSizeForExtrasAdd(object_size);
-  const size_t bucket_index =
-      root.SizeToBucketIndex(raw_size, root.GetBucketDistribution());
-  PartitionRoot::Bucket& bucket = root.buckets[bucket_index];
-  const size_t num_slots = (bucket.get_bytes_per_span()) / bucket.slot_size;
-
-  uintptr_t first = 0;
-  uintptr_t last = 0;
-  for (size_t i = 0; i < num_slots; ++i) {
-    void* ptr = root.AllocWithFlagsNoHooks(0, object_size, PartitionPageSize());
-    EXPECT_TRUE(ptr);
-    if (i == 0) {
-      first = root.ObjectToSlotStart(ptr);
-    } else if (i == num_slots - 1) {
-      last = root.ObjectToSlotStart(ptr);
-    }
-  }
-
-  EXPECT_EQ(SlotSpan::FromSlotStart(first), SlotSpan::FromSlotStart(last));
-  if (bucket.num_system_pages_per_slot_span ==
-      NumSystemPagesPerPartitionPage()) {
-    // Pointers are expected to be in the same partition page, but have a
-    // different MTE-tag.
-    EXPECT_EQ(UntagAddr(first & PartitionPageBaseMask()),
-              UntagAddr(last & PartitionPageBaseMask()));
-  }
-  EXPECT_EQ(num_slots, bucket.active_slot_spans_head->num_allocated_slots);
-  EXPECT_EQ(nullptr, bucket.active_slot_spans_head->get_freelist_head());
-  EXPECT_TRUE(bucket.is_valid());
-  EXPECT_TRUE(bucket.active_slot_spans_head !=
-              SlotSpan::get_sentinel_slot_span());
-
-  return {bucket.active_slot_spans_head, root.SlotStartToObject(first),
-          root.SlotStartToObject(last)};
-}
-
-bool IsInFreeList(uintptr_t slot_start) {
-  // slot_start isn't MTE-tagged, whereas pointers in the freelist are.
-  void* slot_start_tagged = SlotStartAddr2Ptr(slot_start);
-  auto* slot_span = SlotSpan::FromSlotStart(slot_start);
-  for (auto* entry = slot_span->get_freelist_head(); entry;
-       entry = entry->GetNext(slot_span->bucket->slot_size)) {
-    if (entry == slot_start_tagged) {
-      return true;
-    }
-  }
-  return false;
-}
-
-struct ListBase {
-  // Volatile to prevent the compiler from doing dead store elimination.
-  ListBase* volatile next = nullptr;
-};
-
-template <size_t Size, size_t Alignment = 0>
-struct List final : ListBase {
-  char buffer[Size];
-
-  static List* Create(PartitionRoot& root, ListBase* next = nullptr) {
-    List* list;
-    if (Alignment) {
-      list = static_cast<List*>(
-          root.AlignedAllocWithFlags(0, Alignment, sizeof(List)));
-    } else {
-      list = static_cast<List*>(root.Alloc(sizeof(List), nullptr));
-    }
-    list->next = next;
-    return list;
-  }
-
-  static void Destroy(PartitionRoot& root, List* list) { root.Free(list); }
-};
-
-TEST_F(PartitionAllocPCScanTest, ArbitraryObjectInQuarantine) {
-  using ListType = List<8>;
-
-  auto* obj1 = ListType::Create(root());
-  auto* obj2 = ListType::Create(root());
-  EXPECT_FALSE(IsInQuarantine(obj1));
-  EXPECT_FALSE(IsInQuarantine(obj2));
-
-  ListType::Destroy(root(), obj2);
-  EXPECT_FALSE(IsInQuarantine(obj1));
-  EXPECT_TRUE(IsInQuarantine(obj2));
-}
-
-TEST_F(PartitionAllocPCScanTest, FirstObjectInQuarantine) {
-  static constexpr size_t kAllocationSize = 16;
-
-  FullSlotSpanAllocation full_slot_span =
-      GetFullSlotSpan(root(), kAllocationSize);
-  EXPECT_FALSE(IsInQuarantine(full_slot_span.first));
-
-  root().FreeNoHooks(full_slot_span.first);
-  EXPECT_TRUE(IsInQuarantine(full_slot_span.first));
-}
-
-TEST_F(PartitionAllocPCScanTest, LastObjectInQuarantine) {
-  static constexpr size_t kAllocationSize = 16;
-
-  FullSlotSpanAllocation full_slot_span =
-      GetFullSlotSpan(root(), kAllocationSize);
-  EXPECT_FALSE(IsInQuarantine(full_slot_span.last));
-
-  root().FreeNoHooks(full_slot_span.last);
-  EXPECT_TRUE(IsInQuarantine(full_slot_span.last));
-}
-
-template <typename SourceList, typename ValueList>
-void TestDanglingReference(PartitionAllocPCScanTest& test,
-                           SourceList* source,
-                           ValueList* value,
-                           PartitionRoot& value_root) {
-  {
-    // Free |value| and leave the dangling reference in |source|.
-    ValueList::Destroy(value_root, value);
-    // Check that |value| is in the quarantine now.
-    EXPECT_TRUE(test.IsInQuarantine(value));
-    // Run PCScan.
-    test.RunPCScan();
-    // Check that the object is still quarantined since it's referenced by
-    // |source|.
-    EXPECT_TRUE(test.IsInQuarantine(value));
-  }
-  {
-    // Get rid of the dangling reference.
-    source->next = nullptr;
-    // Run PCScan again.
-    test.RunPCScan();
-    // Check that the object is no longer in the quarantine.
-    EXPECT_FALSE(test.IsInQuarantine(value));
-    // Check that the object is in the freelist now.
-    EXPECT_TRUE(IsInFreeList(value_root.ObjectToSlotStart(value)));
-  }
-}
-
-void TestDanglingReferenceNotVisited(PartitionAllocPCScanTest& test,
-                                     void* value,
-                                     PartitionRoot& value_root) {
-  value_root.Free(value);
-  // Check that |value| is in the quarantine now.
-  EXPECT_TRUE(test.IsInQuarantine(value));
-  // Run PCScan.
-  test.RunPCScan();
-  // Check that the object is no longer in the quarantine since the pointer to
-  // it was not scanned from the non-scannable partition.
-  EXPECT_FALSE(test.IsInQuarantine(value));
-  // Check that the object is in the freelist now.
-  EXPECT_TRUE(IsInFreeList(value_root.ObjectToSlotStart(value)));
-}
-
-TEST_F(PartitionAllocPCScanTest, DanglingReferenceSameBucket) {
-  using SourceList = List<8>;
-  using ValueList = SourceList;
-
-  // Create two objects, where |source| references |value|.
-  auto* value = ValueList::Create(root(), nullptr);
-  auto* source = SourceList::Create(root(), value);
-
-  TestDanglingReference(*this, source, value, root());
-}
-
-TEST_F(PartitionAllocPCScanTest, DanglingReferenceDifferentBuckets) {
-  using SourceList = List<8>;
-  using ValueList = List<128>;
-
-  // Create two objects, where |source| references |value|.
-  auto* value = ValueList::Create(root(), nullptr);
-  auto* source = SourceList::Create(root(), value);
-
-  TestDanglingReference(*this, source, value, root());
-}
-
-TEST_F(PartitionAllocPCScanTest, DanglingReferenceDifferentBucketsAligned) {
-  // Choose a high alignment that almost certainly will cause a gap between slot
-  // spans. But make it less than kMaxSupportedAlignment, or else two
-  // allocations will end up on different super pages.
-  constexpr size_t alignment = kMaxSupportedAlignment / 2;
-  using SourceList = List<8, alignment>;
-  using ValueList = List<128, alignment>;
-
-  // Create two objects, where |source| references |value|.
-  auto* value = ValueList::Create(root(), nullptr);
-  auto* source = SourceList::Create(root(), value);
-
-  // Double check the setup -- make sure that exactly two slot spans were
-  // allocated, within the same super page, with a gap in between.
-  {
-    ::partition_alloc::internal::ScopedGuard guard{root().lock_};
-
-    uintptr_t value_slot_start = root().ObjectToSlotStart(value);
-    uintptr_t source_slot_start = root().ObjectToSlotStart(source);
-    auto super_page = value_slot_start & kSuperPageBaseMask;
-    ASSERT_EQ(super_page, source_slot_start & kSuperPageBaseMask);
-    size_t i = 0;
-    uintptr_t first_slot_span_end = 0;
-    uintptr_t second_slot_span_start = 0;
-    IterateSlotSpans(
-        super_page, true, [&](SlotSpan* slot_span) -> bool {
-          if (i == 0) {
-            first_slot_span_end = SlotSpan::ToSlotSpanStart(slot_span) +
-                                  slot_span->bucket->get_pages_per_slot_span() *
-                                      PartitionPageSize();
-          } else {
-            second_slot_span_start = SlotSpan::ToSlotSpanStart(slot_span);
-          }
-          ++i;
-          return false;
-        });
-    ASSERT_EQ(i, 2u);
-    ASSERT_GT(second_slot_span_start, first_slot_span_end);
-  }
-
-  TestDanglingReference(*this, source, value, root());
-}
-
-TEST_F(PartitionAllocPCScanTest,
-       DanglingReferenceSameSlotSpanButDifferentPages) {
-  using SourceList = List<8>;
-  using ValueList = SourceList;
-
-  static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages =
-      static_cast<size_t>(PartitionPageSize() * 0.75);
-
-  FullSlotSpanAllocation full_slot_span = GetFullSlotSpan(
-      root(), root().AdjustSizeForExtrasSubtract(
-                  kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages));
-
-  // Assert that the first and the last objects are in the same slot span but on
-  // different partition pages.
-  // Converting to slot start also takes care of the MTE-tag difference.
-  ASSERT_EQ(SlotSpan::FromObject(full_slot_span.first),
-            SlotSpan::FromObject(full_slot_span.last));
-  uintptr_t first_slot_start = root().ObjectToSlotStart(full_slot_span.first);
-  uintptr_t last_slot_start = root().ObjectToSlotStart(full_slot_span.last);
-  ASSERT_NE(first_slot_start & PartitionPageBaseMask(),
-            last_slot_start & PartitionPageBaseMask());
-
-  // Create two objects, on different partition pages.
-  auto* value = new (full_slot_span.first) ValueList;
-  auto* source = new (full_slot_span.last) SourceList;
-  source->next = value;
-
-  TestDanglingReference(*this, source, value, root());
-}
-
-TEST_F(PartitionAllocPCScanTest, DanglingReferenceFromFullPage) {
-  using SourceList = List<64>;
-  using ValueList = SourceList;
-
-  FullSlotSpanAllocation full_slot_span =
-      GetFullSlotSpan(root(), sizeof(SourceList));
-  void* source_buffer = full_slot_span.first;
-  // This allocation must go through the slow path and call SetNewActivePage(),
-  // which will flush the full page from the active page list.
-  void* value_buffer =
-      root().AllocWithFlagsNoHooks(0, sizeof(ValueList), PartitionPageSize());
-
-  // Assert that the first and the last objects are in different slot spans but
-  // in the same bucket.
-  SlotSpan* source_slot_span =
-      PartitionRoot::SlotSpan::FromObject(source_buffer);
-  SlotSpan* value_slot_span = PartitionRoot::SlotSpan::FromObject(value_buffer);
-  ASSERT_NE(source_slot_span, value_slot_span);
-  ASSERT_EQ(source_slot_span->bucket, value_slot_span->bucket);
-
-  // Create two objects, where |source| is in a full detached page.
-  auto* value = new (value_buffer) ValueList;
-  auto* source = new (source_buffer) SourceList;
-  source->next = value;
-
-  TestDanglingReference(*this, source, value, root());
-}
-
-template <size_t Size>
-struct ListWithInnerReference {
-  char buffer1[Size];
-  // Volatile to prevent the compiler from doing dead store elimination.
-  char* volatile next = nullptr;
-  char buffer2[Size];
-
-  static ListWithInnerReference* Create(PartitionRoot& root) {
-    auto* list = static_cast<ListWithInnerReference*>(
-        root.Alloc(sizeof(ListWithInnerReference), nullptr));
-    return list;
-  }
-
-  static void Destroy(PartitionRoot& root, ListWithInnerReference* list) {
-    root.Free(list);
-  }
-};
-
-// Disabled due to consistent failure http://crbug.com/1242407
-#if BUILDFLAG(IS_ANDROID)
-#define MAYBE_DanglingInnerReference DISABLED_DanglingInnerReference
-#else
-#define MAYBE_DanglingInnerReference DanglingInnerReference
-#endif
-TEST_F(PartitionAllocPCScanTest, MAYBE_DanglingInnerReference) {
-  using SourceList = ListWithInnerReference<64>;
-  using ValueList = SourceList;
-
-  auto* source = SourceList::Create(root());
-  auto* value = ValueList::Create(root());
-  source->next = value->buffer2;
-
-  TestDanglingReference(*this, source, value, root());
-}
-
-TEST_F(PartitionAllocPCScanTest, DanglingReferenceFromSingleSlotSlotSpan) {
-  using SourceList = List<kMaxBucketed - 4096>;
-  using ValueList = SourceList;
-
-  auto* source = SourceList::Create(root());
-  auto* slot_span = SlotSpanMetadata::FromObject(source);
-  ASSERT_TRUE(slot_span->CanStoreRawSize());
-
-  auto* value = ValueList::Create(root());
-  source->next = value;
-
-  TestDanglingReference(*this, source, value, root());
-}
-
-TEST_F(PartitionAllocPCScanTest, DanglingInterPartitionReference) {
-  using SourceList = List<64>;
-  using ValueList = SourceList;
-
-  PartitionRoot source_root(PartitionOptions{
-      .star_scan_quarantine = PartitionOptions::StarScanQuarantine::kAllowed,
-  });
-  source_root.UncapEmptySlotSpanMemoryForTesting();
-  PartitionRoot value_root(PartitionOptions{
-      .star_scan_quarantine = PartitionOptions::StarScanQuarantine::kAllowed,
-  });
-  value_root.UncapEmptySlotSpanMemoryForTesting();
-
-  PCScan::RegisterScannableRoot(&source_root);
-  source_root.SetQuarantineAlwaysForTesting(true);
-  PCScan::RegisterScannableRoot(&value_root);
-  value_root.SetQuarantineAlwaysForTesting(true);
-
-  auto* source = SourceList::Create(source_root);
-  auto* value = ValueList::Create(value_root);
-  source->next = value;
-
-  TestDanglingReference(*this, source, value, value_root);
-}
-
-TEST_F(PartitionAllocPCScanTest, DanglingReferenceToNonScannablePartition) {
-  using SourceList = List<64>;
-  using ValueList = SourceList;
-
-  PartitionRoot source_root(PartitionOptions{
-      .star_scan_quarantine = PartitionOptions::StarScanQuarantine::kAllowed,
-  });
-  source_root.UncapEmptySlotSpanMemoryForTesting();
-  PartitionRoot value_root(PartitionOptions{
-      .star_scan_quarantine = PartitionOptions::StarScanQuarantine::kAllowed,
-  });
-  value_root.UncapEmptySlotSpanMemoryForTesting();
-
-  PCScan::RegisterScannableRoot(&source_root);
-  source_root.SetQuarantineAlwaysForTesting(true);
-  PCScan::RegisterNonScannableRoot(&value_root);
-  value_root.SetQuarantineAlwaysForTesting(true);
-
-  auto* source = SourceList::Create(source_root);
-  auto* value = ValueList::Create(value_root);
-  source->next = value;
-
-  TestDanglingReference(*this, source, value, value_root);
-}
-
-TEST_F(PartitionAllocPCScanTest, DanglingReferenceFromNonScannablePartition) {
-  using SourceList = List<64>;
-  using ValueList = SourceList;
-
-  PartitionRoot source_root(PartitionOptions{
-      .star_scan_quarantine = PartitionOptions::StarScanQuarantine::kAllowed,
-  });
-  source_root.UncapEmptySlotSpanMemoryForTesting();
-  PartitionRoot value_root(PartitionOptions{
-      .star_scan_quarantine = PartitionOptions::StarScanQuarantine::kAllowed,
-  });
-  value_root.UncapEmptySlotSpanMemoryForTesting();
-
-  PCScan::RegisterNonScannableRoot(&source_root);
-  value_root.SetQuarantineAlwaysForTesting(true);
-  PCScan::RegisterScannableRoot(&value_root);
-  source_root.SetQuarantineAlwaysForTesting(true);
-
-  auto* source = SourceList::Create(source_root);
-  auto* value = ValueList::Create(value_root);
-  source->next = value;
-
-  TestDanglingReferenceNotVisited(*this, value, value_root);
-}
-
-// Death tests misbehave on Android, http://crbug.com/643760.
-#if defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
-#if PA_CONFIG(STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED)
-TEST_F(PartitionAllocPCScanTest, DoubleFree) {
-  auto* list = List<1>::Create(root());
-  List<1>::Destroy(root(), list);
-  EXPECT_DEATH(List<1>::Destroy(root(), list), "");
-}
-#endif  // PA_CONFIG(STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED)
-#endif  // defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
-
-template <typename SourceList, typename ValueList>
-void TestDanglingReferenceWithSafepoint(PartitionAllocPCScanTest& test,
-                                        SourceList* source,
-                                        ValueList* value,
-                                        PartitionRoot& value_root) {
-  {
-    // Free |value| and leave the dangling reference in |source|.
-    ValueList::Destroy(value_root, value);
-    // Check that |value| is in the quarantine now.
-    EXPECT_TRUE(test.IsInQuarantine(value));
-    // Schedule PCScan but don't scan.
-    test.SchedulePCScan();
-    // Enter safepoint and scan from mutator.
-    test.JoinPCScanAsMutator();
-    // Check that the object is still quarantined since it's referenced by
-    // |source|.
-    EXPECT_TRUE(test.IsInQuarantine(value));
-    // Check that |value| is not in the freelist.
-    EXPECT_FALSE(IsInFreeList(test.root().ObjectToSlotStart(value)));
-    // Run sweeper.
-    test.FinishPCScanAsScanner();
-    // Check that |value| still exists.
-    EXPECT_FALSE(IsInFreeList(test.root().ObjectToSlotStart(value)));
-  }
-  {
-    // Get rid of the dangling reference.
-    source->next = nullptr;
-    // Schedule PCScan but don't scan.
-    test.SchedulePCScan();
-    // Enter safepoint and scan from mutator.
-    test.JoinPCScanAsMutator();
-    // Check that |value| is not in the freelist yet, since sweeper didn't run.
-    EXPECT_FALSE(IsInFreeList(test.root().ObjectToSlotStart(value)));
-    test.FinishPCScanAsScanner();
-    // Check that the object is no longer in the quarantine.
-    EXPECT_FALSE(test.IsInQuarantine(value));
-    // Check that |value| is in the freelist now.
-    EXPECT_TRUE(IsInFreeList(test.root().ObjectToSlotStart(value)));
-  }
-}
-
-TEST_F(PartitionAllocPCScanTest, Safepoint) {
-  using SourceList = List<64>;
-  using ValueList = SourceList;
-
-  DisableStackScanningScope no_stack_scanning;
-
-  auto* source = SourceList::Create(root());
-  auto* value = ValueList::Create(root());
-  source->next = value;
-
-  TestDanglingReferenceWithSafepoint(*this, source, value, root());
-}
-
-class PartitionAllocPCScanStackScanningTest : public PartitionAllocPCScanTest {
- protected:
-  // Creates and sets a dangling reference in `dangling_reference_`.
-  PA_NOINLINE void CreateDanglingReference() {
-    using ValueList = List<8>;
-    auto* value = ValueList::Create(root(), nullptr);
-    ValueList::Destroy(root(), value);
-    dangling_reference_ = value;
-  }
-
-  PA_NOINLINE void SetupAndRunTest() {
-    // Register the top of the stack to be the current pointer.
-    PCScan::NotifyThreadCreated(GetStackPointer());
-    RunTest();
-  }
-
-  PA_NOINLINE void RunTest() {
-    // This writes the pointer to the stack.
-    [[maybe_unused]] auto* volatile stack_ref = dangling_reference_;
-    // Call the non-inline function that would scan the stack. Don't execute
-    // the rest of the actions inside the function, since otherwise it would
-    // be tail-call optimized and the parent frame's stack with the dangling
-    // pointer would be missed.
-    ScanStack();
-    // Check that the object is still quarantined since it's referenced by
-    // |dangling_reference_|.
-    EXPECT_TRUE(IsInQuarantine(dangling_reference_));
-    // Check that value is not in the freelist.
-    EXPECT_FALSE(IsInFreeList(root().ObjectToSlotStart(dangling_reference_)));
-    // Run sweeper.
-    FinishPCScanAsScanner();
-    // Check that |dangling_reference_| still exists.
-    EXPECT_FALSE(IsInFreeList(root().ObjectToSlotStart(dangling_reference_)));
-  }
-
-  PA_NOINLINE void ScanStack() {
-    // Schedule PCScan but don't scan.
-    SchedulePCScan();
-    // Enter safepoint and scan from mutator. This will scan the stack.
-    JoinPCScanAsMutator();
-  }
-
-  static void* dangling_reference_;
-};
-
-// static
-void* PartitionAllocPCScanStackScanningTest::dangling_reference_ = nullptr;
-
-// The test currently fails on some platform due to the stack dangling reference
-// not being found.
-TEST_F(PartitionAllocPCScanStackScanningTest, DISABLED_StackScanning) {
-  PCScan::EnableStackScanning();
-
-  // Set to nullptr if the test is retried.
-  dangling_reference_ = nullptr;
-
-  CreateDanglingReference();
-
-  SetupAndRunTest();
-}
-
-TEST_F(PartitionAllocPCScanTest, DontScanUnusedRawSize) {
-  using ValueList = List<8>;
-
-  // Make sure to commit more memory than requested to have slack for storing
-  // dangling reference outside of the raw size.
-  const size_t big_size = kMaxBucketed - SystemPageSize() + 1;
-  void* ptr = root().Alloc(big_size, nullptr);
-
-  uintptr_t slot_start = root().ObjectToSlotStart(ptr);
-  auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
-  ASSERT_TRUE(big_size + sizeof(void*) <=
-              root().AllocationCapacityFromSlotStart(slot_start));
-  ASSERT_TRUE(slot_span->CanStoreRawSize());
-
-  auto* value = ValueList::Create(root());
-
-  // This not only points past the object, but past all extras around it.
-  // However, there should be enough space between this and the end of slot, to
-  // store some data.
-  uintptr_t source_end = slot_start + slot_span->GetRawSize();
-  // Write the pointer.
-  // Since we stripped the MTE-tag to get |slot_start|, we need to retag it.
-  *static_cast<ValueList**>(TagAddr(source_end)) = value;
-
-  TestDanglingReferenceNotVisited(*this, value, root());
-}
-
-TEST_F(PartitionAllocPCScanTest, PointersToGuardPages) {
-  struct Pointers {
-    void* super_page;
-    void* metadata_page;
-    void* guard_page1;
-    void* scan_bitmap;
-    void* guard_page2;
-  };
-
-  auto* const pointers = static_cast<Pointers*>(
-      root().AllocWithFlagsNoHooks(0, sizeof(Pointers), PartitionPageSize()));
-
-  // Converting to slot start strips MTE tag.
-  const uintptr_t super_page =
-      root().ObjectToSlotStart(pointers) & kSuperPageBaseMask;
-
-  // Initialize scannable pointers with addresses of guard pages and metadata.
-  // None of these point to an MTE-tagged area, so no need for retagging.
-  pointers->super_page = reinterpret_cast<void*>(super_page);
-  pointers->metadata_page = PartitionSuperPageToMetadataArea(super_page);
-  pointers->guard_page1 =
-      static_cast<char*>(pointers->metadata_page) + SystemPageSize();
-  pointers->scan_bitmap = SuperPageStateBitmap(super_page);
-  pointers->guard_page2 = reinterpret_cast<void*>(super_page + kSuperPageSize -
-                                                  PartitionPageSize());
-
-  // Simply run PCScan and expect no crashes.
-  RunPCScan();
-}
-
-TEST_F(PartitionAllocPCScanTest, TwoDanglingPointersToSameObject) {
-  using SourceList = List<8>;
-  using ValueList = List<128>;
-
-  auto* value = ValueList::Create(root(), nullptr);
-  // Create two source objects referring to |value|.
-  SourceList::Create(root(), value);
-  SourceList::Create(root(), value);
-
-  // Destroy |value| and run PCScan.
-  ValueList::Destroy(root(), value);
-  RunPCScan();
-  EXPECT_TRUE(IsInQuarantine(value));
-
-  // Check that accounted size after the cycle is only sizeof ValueList.
-  auto* slot_span_metadata = SlotSpan::FromObject(value);
-  const auto& quarantine =
-      PCScan::scheduler().scheduling_backend().GetQuarantineData();
-  EXPECT_EQ(slot_span_metadata->bucket->slot_size, quarantine.current_size);
-}
-
-TEST_F(PartitionAllocPCScanTest, DanglingPointerToInaccessibleArea) {
-  static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages =
-      static_cast<size_t>(PartitionPageSize() * 1.25);
-
-  FullSlotSpanAllocation full_slot_span = GetFullSlotSpan(
-      root(), root().AdjustSizeForExtrasSubtract(
-                  kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages));
-
-  // Assert that number of allocatable bytes for this bucket is smaller or equal
-  // to all allocated partition pages.
-  auto* bucket = full_slot_span.slot_span->bucket;
-  ASSERT_LE(bucket->get_bytes_per_span(),
-            bucket->get_pages_per_slot_span() * PartitionPageSize());
-
-  // Let the first object point past the end of the last one + some random
-  // offset.
-  // It should fall within the same slot, so no need for MTE-retagging.
-  static constexpr size_t kOffsetPastEnd = 7;
-  *reinterpret_cast<uint8_t**>(full_slot_span.first) =
-      reinterpret_cast<uint8_t*>(full_slot_span.last) +
-      kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages + kOffsetPastEnd;
-
-  // Destroy the last object and put it in quarantine.
-  root().Free(full_slot_span.last);
-  EXPECT_TRUE(IsInQuarantine(full_slot_span.last));
-
-  // Run PCScan. After it, the quarantined object should not be promoted.
-  RunPCScan();
-  EXPECT_FALSE(IsInQuarantine(full_slot_span.last));
-}
-
-TEST_F(PartitionAllocPCScanTest, DanglingPointerOutsideUsablePart) {
-  using ValueList = List<kMaxBucketed - 4096>;
-  using SourceList = List<64>;
-
-  auto* value = ValueList::Create(root());
-  auto* slot_span = SlotSpanMetadata::FromObject(value);
-  ASSERT_TRUE(slot_span->CanStoreRawSize());
-
-  auto* source = SourceList::Create(root());
-
-  // Let the |source| object point to the unused area of |value| and expect
-  // |value| to be nevertheless marked during scanning.
-  // It should fall within the same slot, so no need for MTE-retagging.
-  static constexpr size_t kOffsetPastEnd = 7;
-  source->next = reinterpret_cast<ListBase*>(
-      reinterpret_cast<uint8_t*>(value + 1) + kOffsetPastEnd);
-
-  TestDanglingReference(*this, source, value, root());
-}
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-TEST_F(PartitionAllocPCScanWithMTETest, QuarantineOnlyOnTagOverflow) {
-  using ListType = List<64>;
-
-  if (!base::CPU::GetInstanceNoAllocation().has_mte()) {
-    return;
-  }
-
-  {
-    auto* obj1 = ListType::Create(root());
-    ListType::Destroy(root(), obj1);
-    auto* obj2 = ListType::Create(root());
-    // The test relies on unrandomized freelist! If the slot was not moved to
-    // quarantine, assert that the obj2 is the same as obj1 and the tags are
-    // different.
-    // MTE-retag |obj1|, as the tag changed when freeing it.
-    if (!HasOverflowTag(TagPtr(obj1))) {
-      // Assert that the pointer is the same.
-      ASSERT_EQ(UntagPtr(obj1), UntagPtr(obj2));
-      // Assert that the tag is different.
-      ASSERT_NE(obj1, obj2);
-    }
-  }
-
-  for (size_t i = 0; i < 16; ++i) {
-    auto* obj = ListType::Create(root());
-    ListType::Destroy(root(), obj);
-    // MTE-retag |obj|, as the tag changed when freeing it.
-    obj = TagPtr(obj);
-    // Check if the tag overflows. If so, the object must be in quarantine.
-    if (HasOverflowTag(obj)) {
-      EXPECT_TRUE(IsInQuarantine(obj));
-      EXPECT_FALSE(IsInFreeList(root().ObjectToSlotStart(obj)));
-      return;
-    } else {
-      EXPECT_FALSE(IsInQuarantine(obj));
-      EXPECT_TRUE(IsInFreeList(root().ObjectToSlotStart(obj)));
-    }
-  }
-
-  EXPECT_FALSE(true && "Should never be reached");
-}
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
-}  // namespace
-
-}  // namespace partition_alloc::internal
-
-#endif  // BUILDFLAG(USE_STARSCAN)
-#endif  // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/starscan/raceful_worklist.h b/base/allocator/partition_allocator/starscan/raceful_worklist.h
deleted file mode 100644
index 7eff9cc..0000000
--- a/base/allocator/partition_allocator/starscan/raceful_worklist.h
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_RACEFUL_WORKLIST_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_RACEFUL_WORKLIST_H_
-
-#include <algorithm>
-#include <atomic>
-#include <vector>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
-
-namespace partition_alloc::internal {
-
-template <typename T>
-class RacefulWorklist {
-  struct Node {
-    explicit Node(const T& value) : value(value) {}
-    Node(const Node& other)
-        : value(other.value),
-          is_being_visited(
-              other.is_being_visited.load(std::memory_order_relaxed)),
-          is_visited(other.is_visited.load(std::memory_order_relaxed)) {}
-
-    T value;
-    std::atomic<bool> is_being_visited{false};
-    std::atomic<bool> is_visited{false};
-  };
-  using Underlying = std::vector<Node, MetadataAllocator<Node>>;
-
- public:
-  class RandomizedView {
-   public:
-    explicit RandomizedView(RacefulWorklist& worklist)
-        : worklist_(worklist), offset_(0) {
-      if (worklist.data_.size() > 0) {
-        offset_ = static_cast<size_t>(
-            internal::base::RandGenerator(worklist.data_.size()));
-      }
-    }
-
-    RandomizedView(const RandomizedView&) = delete;
-    const RandomizedView& operator=(const RandomizedView&) = delete;
-
-    template <typename Function>
-    void Visit(Function f);
-
-   private:
-    RacefulWorklist& worklist_;
-    size_t offset_;
-  };
-
-  RacefulWorklist() = default;
-
-  RacefulWorklist(const RacefulWorklist&) = delete;
-  RacefulWorklist& operator=(const RacefulWorklist&) = delete;
-
-  void Push(const T& t) { data_.push_back(Node(t)); }
-
-  template <typename It>
-  void Push(It begin, It end) {
-    std::transform(begin, end, std::back_inserter(data_),
-                   [](const T& t) { return Node(t); });
-  }
-
-  template <typename Function>
-  void VisitNonConcurrently(Function) const;
-
- private:
-  Underlying data_;
-  std::atomic<bool> fully_visited_{false};
-};
-
-template <typename T>
-template <typename Function>
-void RacefulWorklist<T>::VisitNonConcurrently(Function f) const {
-  for (const auto& t : data_) {
-    f(t.value);
-  }
-}
-
-template <typename T>
-template <typename Function>
-void RacefulWorklist<T>::RandomizedView::Visit(Function f) {
-  auto& data = worklist_.data_;
-  std::vector<typename Underlying::iterator,
-              MetadataAllocator<typename Underlying::iterator>>
-      to_revisit;
-
-  // To avoid worklist iteration, quick check if the worklist was already
-  // visited.
-  if (worklist_.fully_visited_.load(std::memory_order_acquire)) {
-    return;
-  }
-
-  const auto offset_it = std::next(data.begin(), offset_);
-
-  // First, visit items starting from the offset.
-  for (auto it = offset_it; it != data.end(); ++it) {
-    if (it->is_visited.load(std::memory_order_relaxed)) {
-      continue;
-    }
-    if (it->is_being_visited.load(std::memory_order_relaxed)) {
-      to_revisit.push_back(it);
-      continue;
-    }
-    it->is_being_visited.store(true, std::memory_order_relaxed);
-    f(it->value);
-    it->is_visited.store(true, std::memory_order_relaxed);
-  }
-
-  // Then, visit items before the offset.
-  for (auto it = data.begin(); it != offset_it; ++it) {
-    if (it->is_visited.load(std::memory_order_relaxed)) {
-      continue;
-    }
-    if (it->is_being_visited.load(std::memory_order_relaxed)) {
-      to_revisit.push_back(it);
-      continue;
-    }
-    it->is_being_visited.store(true, std::memory_order_relaxed);
-    f(it->value);
-    it->is_visited.store(true, std::memory_order_relaxed);
-  }
-
-  // Finally, racefully visit items that were scanned by some other thread.
-  for (auto it : to_revisit) {
-    if (PA_LIKELY(it->is_visited.load(std::memory_order_relaxed))) {
-      continue;
-    }
-    // Don't bail out here if the item is being visited by another thread.
-    // This is helpful to guarantee forward progress if the other thread
-    // is making slow progress.
-    it->is_being_visited.store(true, std::memory_order_relaxed);
-    f(it->value);
-    it->is_visited.store(true, std::memory_order_relaxed);
-  }
-
-  worklist_.fully_visited_.store(true, std::memory_order_release);
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_RACEFUL_WORKLIST_H_
diff --git a/base/allocator/partition_allocator/starscan/scan_loop.h b/base/allocator/partition_allocator/starscan/scan_loop.h
deleted file mode 100644
index 7619873..0000000
--- a/base/allocator/partition_allocator/starscan/scan_loop.h
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SCAN_LOOP_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SCAN_LOOP_H_
-
-#include <cstddef>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/starscan/starscan_fwd.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "build/build_config.h"
-
-#if defined(ARCH_CPU_X86_64)
-// Include order is important, so we disable formatting.
-// clang-format off
-// Including these headers directly should generally be avoided. For the
-// scanning loop, we check at runtime which SIMD extension we can use. Since
-// Chrome is compiled with -msse3 (the minimal requirement), we include the
-// headers directly to make the intrinsics available. Another option could be to
-// use inline assembly, but that would hinder compiler optimization for
-// vectorized instructions.
-#include <immintrin.h>
-#include <smmintrin.h>
-#include <avxintrin.h>
-#include <avx2intrin.h>
-// clang-format on
-#endif
-
-#if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
-#include <arm_neon.h>
-#endif
-
-namespace partition_alloc::internal {
-
-// Iterates over range of memory using the best available SIMD extension.
-// Assumes that 64bit platforms have pool support and the begin pointer of
-// incoming ranges are properly aligned. The class is designed around the CRTP
-// version of the "template method" (in GoF terms). CRTP is needed for fast
-// static dispatch.
-template <typename Derived>
-class ScanLoop {
- public:
-  explicit ScanLoop(SimdSupport simd_type) : simd_type_(simd_type) {}
-
-  ScanLoop(const ScanLoop&) = delete;
-  ScanLoop& operator=(const ScanLoop&) = delete;
-
-  // Scan input range. Assumes the range is properly aligned. Please note that
-  // the function doesn't MTE-tag the input range as it assumes that MTE is
-  // disabled when function is called. See DisableMTEScope for details.
-  void Run(uintptr_t begin, uintptr_t end);
-
- private:
-  const Derived& derived() const { return static_cast<const Derived&>(*this); }
-  Derived& derived() { return static_cast<Derived&>(*this); }
-
-#if defined(ARCH_CPU_X86_64)
-  __attribute__((target("avx2"))) void RunAVX2(uintptr_t, uintptr_t);
-  __attribute__((target("sse4.1"))) void RunSSE4(uintptr_t, uintptr_t);
-#endif
-#if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
-  void RunNEON(uintptr_t, uintptr_t);
-#endif
-
-  void RunUnvectorized(uintptr_t, uintptr_t);
-
-  SimdSupport simd_type_;
-};
-
-template <typename Derived>
-void ScanLoop<Derived>::Run(uintptr_t begin, uintptr_t end) {
-// We allow vectorization only for 64bit since they require support of the
-// 64bit regular pool, and only for x86 because a special instruction set is
-// required.
-#if defined(ARCH_CPU_X86_64)
-  if (simd_type_ == SimdSupport::kAVX2) {
-    return RunAVX2(begin, end);
-  }
-  if (simd_type_ == SimdSupport::kSSE41) {
-    return RunSSE4(begin, end);
-  }
-#elif PA_CONFIG(STARSCAN_NEON_SUPPORTED)
-  if (simd_type_ == SimdSupport::kNEON) {
-    return RunNEON(begin, end);
-  }
-#endif  // PA_CONFIG(STARSCAN_NEON_SUPPORTED)
-  return RunUnvectorized(begin, end);
-}
-
-template <typename Derived>
-void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
-  PA_SCAN_DCHECK(!(begin % sizeof(uintptr_t)));
-  PA_SCAN_DCHECK(!(end % sizeof(uintptr_t)));
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-  // If the read value is a pointer into the PA region, it's likely
-  // MTE-tagged. Piggyback on |mask| to untag, for efficiency.
-  const uintptr_t mask = Derived::RegularPoolMask() & kPtrUntagMask;
-  const uintptr_t base = Derived::RegularPoolBase();
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-  for (; begin < end; begin += sizeof(uintptr_t)) {
-    // Read the region word-by-word. Everything that we read is a potential
-    // pointer to or inside an object on heap. Such an object should be
-    // quarantined, if attempted to free.
-    //
-    // Keep it MTE-untagged. See DisableMTEScope for details.
-    const uintptr_t maybe_ptr = *reinterpret_cast<uintptr_t*>(begin);
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-    if (PA_LIKELY((maybe_ptr & mask) != base)) {
-      continue;
-    }
-#else
-    if (!maybe_ptr) {
-      continue;
-    }
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-    derived().CheckPointer(maybe_ptr);
-  }
-}
-
-#if defined(ARCH_CPU_X86_64)
-template <typename Derived>
-__attribute__((target("avx2"))) void ScanLoop<Derived>::RunAVX2(uintptr_t begin,
-                                                                uintptr_t end) {
-  static constexpr size_t kAlignmentRequirement = 32;
-  static constexpr size_t kWordsInVector = 4;
-  static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
-  PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
-  // Stick to integer instructions. This brings slightly better throughput. For
-  // example, according to the Intel docs, on Broadwell and Haswell the CPI of
-  // vmovdqa (_mm256_load_si256) is twice smaller (0.25) than that of vmovapd
-  // (_mm256_load_pd).
-  const __m256i vbase = _mm256_set1_epi64x(derived().RegularPoolBase());
-  // If the read value is a pointer into the PA region, it's likely
-  // MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency.
-  const __m256i regular_pool_mask =
-      _mm256_set1_epi64x(derived().RegularPoolMask() & kPtrUntagMask);
-
-  static_assert(sizeof(__m256i) == kBytesInVector);
-  for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
-    // Keep it MTE-untagged. See DisableMTEScope for details.
-    const __m256i maybe_ptrs =
-        _mm256_load_si256(reinterpret_cast<__m256i*>(begin));
-    const __m256i vand = _mm256_and_si256(maybe_ptrs, regular_pool_mask);
-    const __m256i vcmp = _mm256_cmpeq_epi64(vand, vbase);
-    const int mask = _mm256_movemask_pd(_mm256_castsi256_pd(vcmp));
-    if (PA_LIKELY(!mask)) {
-      continue;
-    }
-    // It's important to extract pointers from the already loaded vector.
-    // Otherwise, new loads can break in-pool assumption checked above.
-    if (mask & 0b0001) {
-      derived().CheckPointer(_mm256_extract_epi64(maybe_ptrs, 0));
-    }
-    if (mask & 0b0010) {
-      derived().CheckPointer(_mm256_extract_epi64(maybe_ptrs, 1));
-    }
-    if (mask & 0b0100) {
-      derived().CheckPointer(_mm256_extract_epi64(maybe_ptrs, 2));
-    }
-    if (mask & 0b1000) {
-      derived().CheckPointer(_mm256_extract_epi64(maybe_ptrs, 3));
-    }
-  }
-  // Run unvectorized on the remainder of the region.
-  RunUnvectorized(begin, end);
-}
-
-template <typename Derived>
-__attribute__((target("sse4.1"))) void ScanLoop<Derived>::RunSSE4(
-    uintptr_t begin,
-    uintptr_t end) {
-  static constexpr size_t kAlignmentRequirement = 16;
-  static constexpr size_t kWordsInVector = 2;
-  static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
-  PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
-  const __m128i vbase = _mm_set1_epi64x(derived().RegularPoolBase());
-  // If the read value is a pointer into the PA region, it's likely
-  // MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency.
-  const __m128i regular_pool_mask =
-      _mm_set1_epi64x(derived().RegularPoolMask() & kPtrUntagMask);
-
-  static_assert(sizeof(__m128i) == kBytesInVector);
-  for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
-    // Keep it MTE-untagged. See DisableMTEScope for details.
-    const __m128i maybe_ptrs =
-        _mm_loadu_si128(reinterpret_cast<__m128i*>(begin));
-    const __m128i vand = _mm_and_si128(maybe_ptrs, regular_pool_mask);
-    const __m128i vcmp = _mm_cmpeq_epi64(vand, vbase);
-    const int mask = _mm_movemask_pd(_mm_castsi128_pd(vcmp));
-    if (PA_LIKELY(!mask)) {
-      continue;
-    }
-    // It's important to extract pointers from the already loaded vector.
-    // Otherwise, new loads can break in-pool assumption checked above.
-    if (mask & 0b01) {
-      derived().CheckPointer(_mm_cvtsi128_si64(maybe_ptrs));
-    }
-    if (mask & 0b10) {
-      // The mask is used to move the 4th and 3rd dwords into the second and
-      // first position.
-      static constexpr int kSecondWordMask = (3 << 2) | (2 << 0);
-      const __m128i shuffled = _mm_shuffle_epi32(maybe_ptrs, kSecondWordMask);
-      derived().CheckPointer(_mm_cvtsi128_si64(shuffled));
-    }
-  }
-  // Run unvectorized on the remainder of the region.
-  RunUnvectorized(begin, end);
-}
-#endif  // defined(ARCH_CPU_X86_64)
-
-#if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
-template <typename Derived>
-void ScanLoop<Derived>::RunNEON(uintptr_t begin, uintptr_t end) {
-  static constexpr size_t kAlignmentRequirement = 16;
-  static constexpr size_t kWordsInVector = 2;
-  static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
-  PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
-  const uint64x2_t vbase = vdupq_n_u64(derived().RegularPoolBase());
-  // If the read value is a pointer into the PA region, it's likely
-  // MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency.
-  const uint64x2_t regular_pool_mask =
-      vdupq_n_u64(derived().RegularPoolMask() & kPtrUntagMask);
-
-  for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
-    // Keep it MTE-untagged. See DisableMTEScope for details.
-    const uint64x2_t maybe_ptrs = vld1q_u64(reinterpret_cast<uint64_t*>(begin));
-    const uint64x2_t vand = vandq_u64(maybe_ptrs, regular_pool_mask);
-    const uint64x2_t vcmp = vceqq_u64(vand, vbase);
-    const uint32_t max = vmaxvq_u32(vreinterpretq_u32_u64(vcmp));
-    if (PA_LIKELY(!max)) {
-      continue;
-    }
-    // It's important to extract pointers from the already loaded vector.
-    // Otherwise, new loads can break in-pool assumption checked above.
-    if (vgetq_lane_u64(vcmp, 0)) {
-      derived().CheckPointer(vgetq_lane_u64(maybe_ptrs, 0));
-    }
-    if (vgetq_lane_u64(vcmp, 1)) {
-      derived().CheckPointer(vgetq_lane_u64(maybe_ptrs, 1));
-    }
-  }
-  // Run unvectorized on the remainder of the region.
-  RunUnvectorized(begin, end);
-}
-#endif  // PA_CONFIG(STARSCAN_NEON_SUPPORTED)
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SCAN_LOOP_H_
diff --git a/base/allocator/partition_allocator/starscan/scan_loop_unittest.cc b/base/allocator/partition_allocator/starscan/scan_loop_unittest.cc
deleted file mode 100644
index ba3b02a..0000000
--- a/base/allocator/partition_allocator/starscan/scan_loop_unittest.cc
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/starscan/scan_loop.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "build/build_config.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-
-namespace partition_alloc::internal {
-
-namespace {
-
-class TestScanLoop final : public ScanLoop<TestScanLoop> {
-  friend class ScanLoop<TestScanLoop>;
-
- public:
-  explicit TestScanLoop(SimdSupport ss) : ScanLoop(ss) {}
-
-  size_t visited() const { return visited_; }
-
-  void Reset() { visited_ = 0; }
-
- private:
-  static constexpr uintptr_t kRegularPoolMask = 0xffffff0000000000;
-  static constexpr uintptr_t kBasePtr = 0x0000560000000000;
-
-  static uintptr_t RegularPoolBase() { return kBasePtr; }
-  static uintptr_t RegularPoolMask() { return kRegularPoolMask; }
-
-  void CheckPointer(uintptr_t maybe_ptr) { ++visited_; }
-
-  size_t visited_ = 0;
-};
-
-static constexpr uintptr_t kValidPtr = 0x000056789abcdef0;
-static constexpr uintptr_t kInvalidPtr = 0x0000aaaaaaaaaaaa;
-static constexpr uintptr_t kZeroPtr = 0x0;
-
-// Tests all possible compbinations of incoming args.
-template <size_t Alignment, typename... Args>
-void TestOnRangeWithAlignment(TestScanLoop& sl,
-                              size_t expected_visited,
-                              Args... args) {
-  alignas(Alignment) uintptr_t range[] = {args...};
-  std::sort(std::begin(range), std::end(range));
-  do {
-    sl.Run(reinterpret_cast<uintptr_t>(std::begin(range)),
-           reinterpret_cast<uintptr_t>(std::end(range)));
-    EXPECT_EQ(expected_visited, sl.visited());
-    sl.Reset();
-  } while (std::next_permutation(std::begin(range), std::end(range)));
-}
-
-}  // namespace
-
-TEST(PartitionAllocScanLoopTest, UnvectorizedWithRegularPool) {
-  {
-    TestScanLoop sl(SimdSupport::kUnvectorized);
-    TestOnRangeWithAlignment<8>(sl, 0u, kInvalidPtr, kInvalidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kUnvectorized);
-    TestOnRangeWithAlignment<8>(sl, 1u, kValidPtr, kInvalidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kUnvectorized);
-    TestOnRangeWithAlignment<8>(sl, 2u, kValidPtr, kValidPtr, kInvalidPtr);
-  }
-  {
-    // Make sure zeros are skipped.
-    TestScanLoop sl(SimdSupport::kUnvectorized);
-    TestOnRangeWithAlignment<8>(sl, 1u, kValidPtr, kInvalidPtr, kZeroPtr);
-  }
-}
-
-#if defined(ARCH_CPU_X86_64)
-TEST(PartitionAllocScanLoopTest, VectorizedSSE4) {
-  base::CPU cpu;
-  if (!cpu.has_sse41()) {
-    return;
-  }
-  {
-    TestScanLoop sl(SimdSupport::kSSE41);
-    TestOnRangeWithAlignment<16>(sl, 0u, kInvalidPtr, kInvalidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kSSE41);
-    TestOnRangeWithAlignment<16>(sl, 1u, kValidPtr, kInvalidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kSSE41);
-    TestOnRangeWithAlignment<16>(sl, 2u, kValidPtr, kValidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kSSE41);
-    TestOnRangeWithAlignment<16>(sl, 3u, kValidPtr, kValidPtr, kValidPtr);
-  }
-}
-
-TEST(PartitionAllocScanLoopTest, VectorizedAVX2) {
-  base::CPU cpu;
-  if (!cpu.has_avx2()) {
-    return;
-  }
-  {
-    TestScanLoop sl(SimdSupport::kAVX2);
-    TestOnRangeWithAlignment<32>(sl, 0u, kInvalidPtr, kInvalidPtr, kInvalidPtr,
-                                 kInvalidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kAVX2);
-    TestOnRangeWithAlignment<32>(sl, 1u, kValidPtr, kInvalidPtr, kInvalidPtr,
-                                 kInvalidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kAVX2);
-    TestOnRangeWithAlignment<32>(sl, 2u, kValidPtr, kValidPtr, kInvalidPtr,
-                                 kInvalidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kAVX2);
-    TestOnRangeWithAlignment<32>(sl, 3u, kValidPtr, kValidPtr, kValidPtr,
-                                 kInvalidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kAVX2);
-    TestOnRangeWithAlignment<32>(sl, 4u, kValidPtr, kValidPtr, kValidPtr,
-                                 kValidPtr, kInvalidPtr);
-  }
-  {
-    // Check that the residual pointer is also visited.
-    TestScanLoop sl(SimdSupport::kAVX2);
-    TestOnRangeWithAlignment<32>(sl, 5u, kValidPtr, kValidPtr, kValidPtr,
-                                 kValidPtr, kValidPtr);
-  }
-}
-#endif  // defined(ARCH_CPU_X86_64)
-
-#if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
-TEST(PartitionAllocScanLoopTest, VectorizedNEON) {
-  {
-    TestScanLoop sl(SimdSupport::kNEON);
-    TestOnRangeWithAlignment<16>(sl, 0u, kInvalidPtr, kInvalidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kNEON);
-    TestOnRangeWithAlignment<16>(sl, 1u, kValidPtr, kInvalidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kNEON);
-    TestOnRangeWithAlignment<16>(sl, 2u, kValidPtr, kValidPtr, kInvalidPtr);
-  }
-  {
-    TestScanLoop sl(SimdSupport::kNEON);
-    TestOnRangeWithAlignment<16>(sl, 3u, kValidPtr, kValidPtr, kValidPtr);
-  }
-  {
-    // Don't visit zeroes.
-    TestScanLoop sl(SimdSupport::kNEON);
-    TestOnRangeWithAlignment<16>(sl, 1u, kInvalidPtr, kValidPtr, kZeroPtr);
-  }
-}
-#endif  // PA_CONFIG(STARSCAN_NEON_SUPPORTED)
-
-}  // namespace partition_alloc::internal
-
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
diff --git a/base/allocator/partition_allocator/starscan/snapshot.cc b/base/allocator/partition_allocator/starscan/snapshot.cc
deleted file mode 100644
index 98183f5..0000000
--- a/base/allocator/partition_allocator/starscan/snapshot.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/starscan/snapshot.h"
-
-#include <memory>
-#include <mutex>
-
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/starscan/pcscan_internal.h"
-
-namespace partition_alloc::internal {
-
-std::unique_ptr<StarScanSnapshot> StarScanSnapshot::Create(
-    const PCScanInternal& pcscan) {
-  // Create unique_ptr object to avoid presubmit error.
-  std::unique_ptr<StarScanSnapshot> snapshot(new StarScanSnapshot(pcscan));
-  return snapshot;
-}
-
-StarScanSnapshot::StarScanSnapshot(const PCScanInternal& pcscan) {
-  PA_DCHECK(pcscan.is_initialized());
-  std::lock_guard<std::mutex> lock(pcscan.roots_mutex_);
-
-  for (const auto& root : pcscan.scannable_roots()) {
-    const auto& super_pages = root.second;
-    clear_worklist_.Push(super_pages.begin(), super_pages.end());
-    scan_worklist_.Push(super_pages.begin(), super_pages.end());
-    sweep_worklist_.Push(super_pages.begin(), super_pages.end());
-    if (pcscan.WriteProtectionEnabled()) {
-      unprotect_worklist_.Push(super_pages.begin(), super_pages.end());
-    }
-  }
-
-  for (const auto& root : pcscan.nonscannable_roots()) {
-    const auto& super_pages = root.second;
-    clear_worklist_.Push(super_pages.begin(), super_pages.end());
-    sweep_worklist_.Push(super_pages.begin(), super_pages.end());
-    if (pcscan.WriteProtectionEnabled()) {
-      unprotect_worklist_.Push(super_pages.begin(), super_pages.end());
-    }
-  }
-}
-
-StarScanSnapshot::~StarScanSnapshot() = default;
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/starscan/snapshot.h b/base/allocator/partition_allocator/starscan/snapshot.h
deleted file mode 100644
index c92ab80..0000000
--- a/base/allocator/partition_allocator/starscan/snapshot.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SNAPSHOT_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SNAPSHOT_H_
-
-#include <memory>
-#include <utility>
-
-#include "base/allocator/partition_allocator/starscan/pcscan_internal.h"
-#include "base/allocator/partition_allocator/starscan/raceful_worklist.h"
-
-namespace partition_alloc::internal {
-
-class StarScanSnapshot final : public AllocatedOnPCScanMetadataPartition {
- public:
-  using SuperPageBase = uintptr_t;
-  using SuperPagesWorklist = RacefulWorklist<SuperPageBase>;
-
-  class ViewBase {
-   public:
-    template <typename Function>
-    void VisitConcurrently(Function);
-
-    template <typename Function>
-    void VisitNonConcurrently(Function);
-
-   protected:
-    explicit ViewBase(SuperPagesWorklist& worklist) : worklist_(worklist) {}
-
-   private:
-    SuperPagesWorklist& worklist_;
-  };
-
-  class ClearingView : public ViewBase {
-   public:
-    inline explicit ClearingView(StarScanSnapshot& snapshot);
-  };
-  class ScanningView : public ViewBase {
-   public:
-    inline explicit ScanningView(StarScanSnapshot& snapshot);
-  };
-  class SweepingView : public ViewBase {
-   public:
-    inline explicit SweepingView(StarScanSnapshot& snapshot);
-  };
-  class UnprotectingView : public ViewBase {
-   public:
-    inline explicit UnprotectingView(StarScanSnapshot& snapshot);
-  };
-
-  static std::unique_ptr<StarScanSnapshot> Create(const PCScanInternal&);
-
-  StarScanSnapshot(const StarScanSnapshot&) = delete;
-  StarScanSnapshot& operator=(const StarScanSnapshot&) = delete;
-
-  ~StarScanSnapshot();
-
- private:
-  explicit StarScanSnapshot(const PCScanInternal&);
-
-  SuperPagesWorklist clear_worklist_;
-  SuperPagesWorklist scan_worklist_;
-  SuperPagesWorklist unprotect_worklist_;
-  SuperPagesWorklist sweep_worklist_;
-};
-
-template <typename Function>
-void StarScanSnapshot::ViewBase::VisitConcurrently(Function f) {
-  SuperPagesWorklist::RandomizedView view(worklist_);
-  view.Visit(std::move(f));
-}
-
-template <typename Function>
-void StarScanSnapshot::ViewBase::VisitNonConcurrently(Function f) {
-  worklist_.VisitNonConcurrently(std::move(f));
-}
-
-StarScanSnapshot::ClearingView::ClearingView(StarScanSnapshot& snapshot)
-    : StarScanSnapshot::ViewBase(snapshot.clear_worklist_) {}
-
-StarScanSnapshot::ScanningView::ScanningView(StarScanSnapshot& snapshot)
-    : StarScanSnapshot::ViewBase(snapshot.scan_worklist_) {}
-
-StarScanSnapshot::SweepingView::SweepingView(StarScanSnapshot& snapshot)
-    : StarScanSnapshot::ViewBase(snapshot.sweep_worklist_) {}
-
-StarScanSnapshot::UnprotectingView::UnprotectingView(StarScanSnapshot& snapshot)
-    : StarScanSnapshot::ViewBase(snapshot.unprotect_worklist_) {}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SNAPSHOT_H_
diff --git a/base/allocator/partition_allocator/starscan/stack/stack.cc b/base/allocator/partition_allocator/starscan/stack/stack.cc
deleted file mode 100644
index e5dccc0..0000000
--- a/base/allocator/partition_allocator/starscan/stack/stack.cc
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/starscan/stack/stack.h"
-
-#include <cstdint>
-#include <limits>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_WIN)
-#include <windows.h>
-#else
-#include <pthread.h>
-#endif
-
-#if defined(LIBC_GLIBC)
-extern "C" void* __libc_stack_end;
-#endif
-
-namespace partition_alloc::internal {
-
-#if BUILDFLAG(IS_WIN)
-
-void* GetStackTop() {
-#if defined(ARCH_CPU_X86_64)
-  return reinterpret_cast<void*>(
-      reinterpret_cast<NT_TIB64*>(NtCurrentTeb())->StackBase);
-#elif defined(ARCH_CPU_32_BITS)
-  return reinterpret_cast<void*>(
-      reinterpret_cast<NT_TIB*>(NtCurrentTeb())->StackBase);
-#elif defined(ARCH_CPU_ARM64)
-  // Windows 8 and later, see
-  // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getcurrentthreadstacklimits
-  ULONG_PTR lowLimit, highLimit;
-  ::GetCurrentThreadStackLimits(&lowLimit, &highLimit);
-  return reinterpret_cast<void*>(highLimit);
-#else
-#error "Unsupported GetStackStart"
-#endif
-}
-
-#elif BUILDFLAG(IS_APPLE)
-
-void* GetStackTop() {
-  return pthread_get_stackaddr_np(pthread_self());
-}
-
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-
-void* GetStackTop() {
-  pthread_attr_t attr;
-  int error = pthread_getattr_np(pthread_self(), &attr);
-  if (!error) {
-    void* base;
-    size_t size;
-    error = pthread_attr_getstack(&attr, &base, &size);
-    PA_CHECK(!error);
-    pthread_attr_destroy(&attr);
-    return reinterpret_cast<uint8_t*>(base) + size;
-  }
-
-#if defined(LIBC_GLIBC)
-  // pthread_getattr_np can fail for the main thread. In this case
-  // just like NaCl we rely on the __libc_stack_end to give us
-  // the start of the stack.
-  // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
-  return __libc_stack_end;
-#else
-  return nullptr;
-#endif  // defined(LIBC_GLIBC)
-}
-
-#else  // BUILDFLAG(IS_WIN)
-#error "Unsupported GetStackTop"
-#endif  // BUILDFLAG(IS_WIN)
-
-using IterateStackCallback = void (*)(const Stack*, StackVisitor*, uintptr_t*);
-extern "C" void PAPushAllRegistersAndIterateStack(const Stack*,
-                                                  StackVisitor*,
-                                                  IterateStackCallback);
-
-Stack::Stack(void* stack_top) : stack_top_(stack_top) {
-  PA_DCHECK(stack_top);
-}
-
-PA_NOINLINE uintptr_t* GetStackPointer() {
-  return reinterpret_cast<uintptr_t*>(__builtin_frame_address(0));
-}
-
-namespace {
-
-[[maybe_unused]] void IterateSafeStackIfNecessary(StackVisitor* visitor) {
-#if defined(__has_feature)
-#if __has_feature(safe_stack)
-  // Source:
-  // https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/safestack/safestack.cpp
-  constexpr size_t kSafeStackAlignmentBytes = 16;
-  void* stack_ptr = __builtin___get_unsafe_stack_ptr();
-  void* stack_top = __builtin___get_unsafe_stack_top();
-  PA_CHECK(stack_top > stack_ptr);
-  PA_CHECK(0u == (reinterpret_cast<uintptr_t>(stack_ptr) &
-                  (kSafeStackAlignmentBytes - 1)));
-  PA_CHECK(0u == (reinterpret_cast<uintptr_t>(stack_top) &
-                  (kSafeStackAlignmentBytes - 1)));
-  visitor->VisitStack(reinterpret_cast<uintptr_t*>(stack_ptr),
-                      reinterpret_cast<uintptr_t*>(stack_top));
-#endif  // __has_feature(safe_stack)
-#endif  // defined(__has_feature)
-}
-
-// Called by the trampoline that pushes registers on the stack. This method
-// should never be inlined to ensure that a possible redzone cannot contain
-// any data that needs to be scanned.
-// No ASAN support as method accesses redzones while walking the stack.
-[[maybe_unused]] PA_NOINLINE PA_NO_SANITIZE("address") void IteratePointersImpl(
-    const Stack* stack,
-    StackVisitor* visitor,
-    uintptr_t* stack_ptr) {
-  PA_DCHECK(stack);
-  PA_DCHECK(visitor);
-  PA_CHECK(nullptr != stack->stack_top());
-  // All supported platforms should have their stack aligned to at least
-  // sizeof(void*).
-  constexpr size_t kMinStackAlignment = sizeof(void*);
-  PA_CHECK(0u ==
-           (reinterpret_cast<uintptr_t>(stack_ptr) & (kMinStackAlignment - 1)));
-  visitor->VisitStack(stack_ptr,
-                      reinterpret_cast<uintptr_t*>(stack->stack_top()));
-}
-
-}  // namespace
-
-void Stack::IteratePointers(StackVisitor* visitor) const {
-#if BUILDFLAG(PCSCAN_STACK_SUPPORTED)
-  PAPushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
-  // No need to deal with callee-saved registers as they will be kept alive by
-  // the regular conservative stack iteration.
-  IterateSafeStackIfNecessary(visitor);
-#endif  // BUILDFLAG(PCSCAN_STACK_SUPPORTED)
-}
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/starscan/stack/stack.h b/base/allocator/partition_allocator/starscan/stack/stack.h
deleted file mode 100644
index 8d9907a..0000000
--- a/base/allocator/partition_allocator/starscan/stack/stack.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STACK_STACK_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STACK_STACK_H_
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-
-namespace partition_alloc::internal {
-
-// Returns the current stack pointer.
-// TODO(bikineev,1202644): Remove this once base/stack_util.h lands.
-PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t* GetStackPointer();
-// Returns the top of the stack using system API.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) void* GetStackTop();
-
-// Interface for stack visitation.
-class StackVisitor {
- public:
-  virtual void VisitStack(uintptr_t* stack_ptr, uintptr_t* stack_top) = 0;
-};
-
-// Abstraction over the stack. Supports handling of:
-// - native stack;
-// - SafeStack: https://releases.llvm.org/10.0.0/tools/clang/docs/SafeStack.html
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) Stack final {
- public:
-  // Sets start of the stack.
-  explicit Stack(void* stack_top);
-
-  // Word-aligned iteration of the stack. Flushes callee saved registers and
-  // passes the range of the stack on to |visitor|.
-  void IteratePointers(StackVisitor* visitor) const;
-
-  // Returns the top of the stack.
-  void* stack_top() const { return stack_top_; }
-
- private:
-  void* stack_top_;
-};
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STACK_STACK_H_
diff --git a/base/allocator/partition_allocator/starscan/stack/stack_unittest.cc b/base/allocator/partition_allocator/starscan/stack/stack_unittest.cc
deleted file mode 100644
index d3335a4..0000000
--- a/base/allocator/partition_allocator/starscan/stack/stack_unittest.cc
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/starscan/stack/stack.h"
-
-#include <memory>
-#include <ostream>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-
-#if BUILDFLAG(IS_LINUX) && (defined(ARCH_CPU_X86) || defined(ARCH_CPU_X86_64))
-#include <xmmintrin.h>
-#endif
-
-namespace partition_alloc::internal {
-
-namespace {
-
-class PartitionAllocStackTest : public ::testing::Test {
- protected:
-  PartitionAllocStackTest() : stack_(std::make_unique<Stack>(GetStackTop())) {}
-
-  Stack* GetStack() const { return stack_.get(); }
-
- private:
-  std::unique_ptr<Stack> stack_;
-};
-
-class StackScanner final : public StackVisitor {
- public:
-  struct Container {
-    std::unique_ptr<int> value;
-  };
-
-  StackScanner() : container_(std::make_unique<Container>()) {
-    container_->value = std::make_unique<int>();
-  }
-
-  void VisitStack(uintptr_t* stack_ptr, uintptr_t* stack_top) final {
-    for (; stack_ptr != stack_top; ++stack_ptr) {
-      if (*stack_ptr == reinterpret_cast<uintptr_t>(container_->value.get()))
-        found_ = true;
-    }
-  }
-
-  void Reset() { found_ = false; }
-  bool found() const { return found_; }
-  int* needle() const { return container_->value.get(); }
-
- private:
-  std::unique_ptr<Container> container_;
-  bool found_ = false;
-};
-
-}  // namespace
-
-TEST_F(PartitionAllocStackTest, IteratePointersFindsOnStackValue) {
-  auto scanner = std::make_unique<StackScanner>();
-
-  // No check that the needle is initially not found as on some platforms it
-  // may be part of temporaries after setting it up through StackScanner.
-  {
-    [[maybe_unused]] int* volatile tmp = scanner->needle();
-    GetStack()->IteratePointers(scanner.get());
-    EXPECT_TRUE(scanner->found());
-  }
-}
-
-TEST_F(PartitionAllocStackTest,
-       IteratePointersFindsOnStackValuePotentiallyUnaligned) {
-  auto scanner = std::make_unique<StackScanner>();
-
-  // No check that the needle is initially not found as on some platforms it
-  // may be part of  temporaries after setting it up through StackScanner.
-  {
-    [[maybe_unused]] char a = 'c';
-    [[maybe_unused]] int* volatile tmp = scanner->needle();
-    GetStack()->IteratePointers(scanner.get());
-    EXPECT_TRUE(scanner->found());
-  }
-}
-
-namespace {
-
-// Prevent inlining as that would allow the compiler to prove that the parameter
-// must not actually be materialized.
-//
-// Parameter positiosn are explicit to test various calling conventions.
-PA_NOINLINE void* RecursivelyPassOnParameterImpl(void* p1,
-                                                 void* p2,
-                                                 void* p3,
-                                                 void* p4,
-                                                 void* p5,
-                                                 void* p6,
-                                                 void* p7,
-                                                 void* p8,
-                                                 Stack* stack,
-                                                 StackVisitor* visitor) {
-  if (p1) {
-    return RecursivelyPassOnParameterImpl(nullptr, p1, nullptr, nullptr,
-                                          nullptr, nullptr, nullptr, nullptr,
-                                          stack, visitor);
-  } else if (p2) {
-    return RecursivelyPassOnParameterImpl(nullptr, nullptr, p2, nullptr,
-                                          nullptr, nullptr, nullptr, nullptr,
-                                          stack, visitor);
-  } else if (p3) {
-    return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, p3,
-                                          nullptr, nullptr, nullptr, nullptr,
-                                          stack, visitor);
-  } else if (p4) {
-    return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
-                                          p4, nullptr, nullptr, nullptr, stack,
-                                          visitor);
-  } else if (p5) {
-    return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
-                                          nullptr, p5, nullptr, nullptr, stack,
-                                          visitor);
-  } else if (p6) {
-    return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
-                                          nullptr, nullptr, p6, nullptr, stack,
-                                          visitor);
-  } else if (p7) {
-    return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
-                                          nullptr, nullptr, nullptr, p7, stack,
-                                          visitor);
-  } else if (p8) {
-    stack->IteratePointers(visitor);
-    return p8;
-  }
-  return nullptr;
-}
-
-PA_NOINLINE void* RecursivelyPassOnParameter(size_t num,
-                                             void* parameter,
-                                             Stack* stack,
-                                             StackVisitor* visitor) {
-  switch (num) {
-    case 0:
-      stack->IteratePointers(visitor);
-      return parameter;
-    case 1:
-      return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
-                                            nullptr, nullptr, nullptr,
-                                            parameter, stack, visitor);
-    case 2:
-      return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
-                                            nullptr, nullptr, parameter,
-                                            nullptr, stack, visitor);
-    case 3:
-      return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
-                                            nullptr, parameter, nullptr,
-                                            nullptr, stack, visitor);
-    case 4:
-      return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
-                                            parameter, nullptr, nullptr,
-                                            nullptr, stack, visitor);
-    case 5:
-      return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr,
-                                            parameter, nullptr, nullptr,
-                                            nullptr, nullptr, stack, visitor);
-    case 6:
-      return RecursivelyPassOnParameterImpl(nullptr, nullptr, parameter,
-                                            nullptr, nullptr, nullptr, nullptr,
-                                            nullptr, stack, visitor);
-    case 7:
-      return RecursivelyPassOnParameterImpl(nullptr, parameter, nullptr,
-                                            nullptr, nullptr, nullptr, nullptr,
-                                            nullptr, stack, visitor);
-    case 8:
-      return RecursivelyPassOnParameterImpl(parameter, nullptr, nullptr,
-                                            nullptr, nullptr, nullptr, nullptr,
-                                            nullptr, stack, visitor);
-    default:
-      __builtin_unreachable();
-  }
-  __builtin_unreachable();
-}
-
-}  // namespace
-
-TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting0) {
-  auto scanner = std::make_unique<StackScanner>();
-  void* needle = RecursivelyPassOnParameter(0, scanner->needle(), GetStack(),
-                                            scanner.get());
-  EXPECT_EQ(scanner->needle(), needle);
-  EXPECT_TRUE(scanner->found());
-}
-
-TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting1) {
-  auto scanner = std::make_unique<StackScanner>();
-  void* needle = RecursivelyPassOnParameter(1, scanner->needle(), GetStack(),
-                                            scanner.get());
-  EXPECT_EQ(scanner->needle(), needle);
-  EXPECT_TRUE(scanner->found());
-}
-
-TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting2) {
-  auto scanner = std::make_unique<StackScanner>();
-  void* needle = RecursivelyPassOnParameter(2, scanner->needle(), GetStack(),
-                                            scanner.get());
-  EXPECT_EQ(scanner->needle(), needle);
-  EXPECT_TRUE(scanner->found());
-}
-
-TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting3) {
-  auto scanner = std::make_unique<StackScanner>();
-  void* needle = RecursivelyPassOnParameter(3, scanner->needle(), GetStack(),
-                                            scanner.get());
-  EXPECT_EQ(scanner->needle(), needle);
-  EXPECT_TRUE(scanner->found());
-}
-
-TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting4) {
-  auto scanner = std::make_unique<StackScanner>();
-  void* needle = RecursivelyPassOnParameter(4, scanner->needle(), GetStack(),
-                                            scanner.get());
-  EXPECT_EQ(scanner->needle(), needle);
-  EXPECT_TRUE(scanner->found());
-}
-
-TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting5) {
-  auto scanner = std::make_unique<StackScanner>();
-  void* needle = RecursivelyPassOnParameter(5, scanner->needle(), GetStack(),
-                                            scanner.get());
-  EXPECT_EQ(scanner->needle(), needle);
-  EXPECT_TRUE(scanner->found());
-}
-
-TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting6) {
-  auto scanner = std::make_unique<StackScanner>();
-  void* needle = RecursivelyPassOnParameter(6, scanner->needle(), GetStack(),
-                                            scanner.get());
-  EXPECT_EQ(scanner->needle(), needle);
-  EXPECT_TRUE(scanner->found());
-}
-
-TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting7) {
-  auto scanner = std::make_unique<StackScanner>();
-  void* needle = RecursivelyPassOnParameter(7, scanner->needle(), GetStack(),
-                                            scanner.get());
-  EXPECT_EQ(scanner->needle(), needle);
-  EXPECT_TRUE(scanner->found());
-}
-
-TEST_F(PartitionAllocStackTest, IteratePointersFindsParameterNesting8) {
-  auto scanner = std::make_unique<StackScanner>();
-  void* needle = RecursivelyPassOnParameter(8, scanner->needle(), GetStack(),
-                                            scanner.get());
-  EXPECT_EQ(scanner->needle(), needle);
-  EXPECT_TRUE(scanner->found());
-}
-
-// The following test uses inline assembly and has been checked to work on clang
-// to verify that the stack-scanning trampoline pushes callee-saved registers.
-//
-// The test uses a macro loop as asm() can only be passed string literals.
-#if defined(__clang__) && defined(ARCH_CPU_X86_64) && !BUILDFLAG(IS_WIN)
-
-// Excluded from test: rbp
-#define FOR_ALL_CALLEE_SAVED_REGS(V) \
-  V(rbx)                             \
-  V(r12)                             \
-  V(r13)                             \
-  V(r14)                             \
-  V(r15)
-
-namespace {
-
-extern "C" void IteratePointersNoMangling(Stack* stack, StackVisitor* visitor) {
-  stack->IteratePointers(visitor);
-}
-
-#define DEFINE_MOVE_INTO(reg)                                         \
-  PA_NOINLINE void MoveInto##reg(Stack* local_stack,                  \
-                                 StackScanner* local_scanner) {       \
-    asm volatile("   mov %0, %%" #reg                                 \
-                 "\n mov %1, %%rdi"                                   \
-                 "\n mov %2, %%rsi"                                   \
-                 "\n call %P3"                                        \
-                 "\n mov $0, %%" #reg                                 \
-                 :                                                    \
-                 : "r"(local_scanner->needle()), "r"(local_stack),    \
-                   "r"(local_scanner), "i"(IteratePointersNoMangling) \
-                 : "memory", #reg, "rdi", "rsi", "cc");               \
-  }
-
-FOR_ALL_CALLEE_SAVED_REGS(DEFINE_MOVE_INTO)
-
-}  // namespace
-
-TEST_F(PartitionAllocStackTest, IteratePointersFindsCalleeSavedRegisters) {
-  auto scanner = std::make_unique<StackScanner>();
-
-  // No check that the needle is initially not found as on some platforms it
-  // may be part of  temporaries after setting it up through StackScanner.
-
-// First, clear all callee-saved registers.
-#define CLEAR_REGISTER(reg) asm("mov $0, %%" #reg : : : #reg);
-
-  FOR_ALL_CALLEE_SAVED_REGS(CLEAR_REGISTER)
-#undef CLEAR_REGISTER
-
-  // Keep local raw pointers to keep instruction sequences small below.
-  auto* local_stack = GetStack();
-  auto* local_scanner = scanner.get();
-
-// Moves |local_scanner->needle()| into a callee-saved register, leaving the
-// callee-saved register as the only register referencing the needle.
-// (Ignoring implementation-dependent dirty registers/stack.)
-#define KEEP_ALIVE_FROM_CALLEE_SAVED(reg)                                 \
-  local_scanner->Reset();                                                 \
-  MoveInto##reg(local_stack, local_scanner);                              \
-  EXPECT_TRUE(local_scanner->found())                                     \
-      << "pointer in callee-saved register not found. register: " << #reg \
-      << std::endl;
-
-  FOR_ALL_CALLEE_SAVED_REGS(KEEP_ALIVE_FROM_CALLEE_SAVED)
-#undef KEEP_ALIVE_FROM_CALLEE_SAVED
-#undef FOR_ALL_CALLEE_SAVED_REGS
-}
-
-#endif  // defined(__clang__) && defined(ARCH_CPU_X86_64) && !BUILDFLAG(IS_WIN)
-
-#if BUILDFLAG(IS_LINUX) && (defined(ARCH_CPU_X86) || defined(ARCH_CPU_X86_64))
-class CheckStackAlignmentVisitor final : public StackVisitor {
- public:
-  void VisitStack(uintptr_t*, uintptr_t*) final {
-    // Check that the stack doesn't get misaligned by asm trampolines.
-    float f[4] = {0.};
-    [[maybe_unused]] volatile auto xmm = ::_mm_load_ps(f);
-  }
-};
-
-TEST_F(PartitionAllocStackTest, StackAlignment) {
-  auto checker = std::make_unique<CheckStackAlignmentVisitor>();
-  GetStack()->IteratePointers(checker.get());
-}
-#endif  // BUILDFLAG(IS_LINUX) && (defined(ARCH_CPU_X86) ||
-        // defined(ARCH_CPU_X86_64))
-
-}  // namespace partition_alloc::internal
-
-#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/starscan/starscan_fwd.h b/base/allocator/partition_allocator/starscan/starscan_fwd.h
deleted file mode 100644
index 9773130..0000000
--- a/base/allocator/partition_allocator/starscan/starscan_fwd.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STARSCAN_FWD_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STARSCAN_FWD_H_
-
-#include <cstdint>
-
-namespace partition_alloc::internal {
-
-// Defines what thread executes a StarScan task.
-enum class Context {
-  // For tasks executed from mutator threads (safepoints).
-  kMutator,
-  // For concurrent scanner tasks.
-  kScanner
-};
-
-// Defines ISA extension for scanning.
-enum class SimdSupport : uint8_t {
-  kUnvectorized,
-  kSSE41,
-  kAVX2,
-  kNEON,
-};
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STARSCAN_FWD_H_
diff --git a/base/allocator/partition_allocator/starscan/state_bitmap.h b/base/allocator/partition_allocator/starscan/state_bitmap.h
deleted file mode 100644
index b9cec60..0000000
--- a/base/allocator/partition_allocator/starscan/state_bitmap.h
+++ /dev/null
@@ -1,491 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATE_BITMAP_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATE_BITMAP_H_
-
-#include <climits>
-#include <cstddef>
-#include <cstdint>
-
-#include <algorithm>
-#include <array>
-#include <atomic>
-#include <tuple>
-#include <utility>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-
-namespace partition_alloc::internal {
-
-// Bitmap which tracks allocation states. An allocation can be in one of 3
-// states:
-// - freed (00),
-// - allocated (11),
-// - quarantined (01 or 10, depending on the *Scan epoch).
-//
-// The state machine of allocation states:
-//         +-------------+                +-------------+
-//         |             |    malloc()    |             |
-//         |    Freed    +--------------->|  Allocated  |
-//         |    (00)     |    (or 11)     |    (11)     |
-//         |             |                |             |
-//         +-------------+                +------+------+
-//                ^                              |
-//                |                              |
-//    real_free() | (and 00)              free() | (and 01(10))
-//                |                              |
-//                |       +-------------+        |
-//                |       |             |        |
-//                +-------+ Quarantined |<-------+
-//                        |   (01,10)   |
-//                        |             |
-//                        +-------------+
-//                         ^           |
-//                         |  mark()   |
-//                         +-----------+
-//                           (xor 11)
-//
-// The bitmap can be safely accessed from multiple threads, but this doesn't
-// imply visibility on the data (i.e. no ordering guaranties, since relaxed
-// atomics are used underneath). The bitmap itself must be created inside a
-// page, size and alignment of which are specified as template arguments
-// |PageSize| and |PageAlignment|. |AllocationAlignment| specifies the minimal
-// alignment of objects that are allocated inside a page (serves as the
-// granularity in the bitmap).
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-class StateBitmap final {
-  enum class State : uint8_t {
-    kFreed = 0b00,
-    kQuarantined1 = 0b01,
-    kQuarantined2 = 0b10,
-    kAlloced = 0b11,
-    kNumOfStates = 4,
-  };
-
-  using CellType = uintptr_t;
-  static constexpr size_t kBitsPerCell = sizeof(CellType) * CHAR_BIT;
-  static constexpr size_t kBitsNeededForAllocation =
-      base::bits::Log2Floor(static_cast<size_t>(State::kNumOfStates));
-  static constexpr CellType kStateMask = (1 << kBitsNeededForAllocation) - 1;
-
-  static constexpr size_t kBitmapSize =
-      (PageSize + ((kBitsPerCell * AllocationAlignment) - 1)) /
-      (kBitsPerCell * AllocationAlignment) * kBitsNeededForAllocation;
-  static constexpr size_t kPageOffsetMask = PageAlignment - 1;
-  static constexpr size_t kPageBaseMask = ~kPageOffsetMask;
-
- public:
-  using Epoch = size_t;
-
-  static constexpr size_t kPageSize = PageSize;
-  static constexpr size_t kPageAlignment = PageAlignment;
-  static constexpr size_t kAllocationAlignment = AllocationAlignment;
-  static constexpr size_t kMaxEntries =
-      (kBitmapSize / kBitsNeededForAllocation) * kBitsPerCell;
-
-  inline StateBitmap();
-
-  // Sets the bits corresponding to |address| as allocated.
-  PA_ALWAYS_INLINE void Allocate(uintptr_t address);
-
-  // Sets the bits corresponding to |address| as quarantined. Must be called
-  // only once, in which case returns |true|. Otherwise, if the object was
-  // already quarantined or freed before, returns |false|.
-  PA_ALWAYS_INLINE bool Quarantine(uintptr_t address, Epoch epoch);
-
-  // Marks ("promotes") quarantined object. Returns |true| on success, otherwise
-  // |false| if the object was marked before.
-  PA_ALWAYS_INLINE bool MarkQuarantinedAsReachable(uintptr_t address,
-                                                   Epoch epoch);
-
-  // Sets the bits corresponding to |address| as freed.
-  PA_ALWAYS_INLINE void Free(uintptr_t address);
-
-  // Getters that check object state.
-  PA_ALWAYS_INLINE bool IsAllocated(uintptr_t address) const;
-  PA_ALWAYS_INLINE bool IsQuarantined(uintptr_t address) const;
-  PA_ALWAYS_INLINE bool IsFreed(uintptr_t address) const;
-
-  // Iterate objects depending on their state.
-  //
-  // The callback is of type
-  //   void(uintptr_t object_start)
-  template <typename Callback>
-  inline void IterateAllocated(Callback) const;
-  // The callback is of type
-  //   void(uintptr_t object_start)
-  template <typename Callback, decltype(std::declval<Callback>()(0), 0) = 0>
-  inline void IterateQuarantined(Callback) const;
-  // The callback is of type
-  //   void(uintptr_t object_start, bool is_marked)
-  template <typename Callback,
-            decltype(std::declval<Callback>()(0, true), 0) = 0>
-  inline void IterateQuarantined(size_t epoch, Callback) const;
-  // The callback is of type
-  //   void(uintptr_t object_start)
-  template <typename Callback>
-  inline void IterateUnmarkedQuarantined(size_t epoch, Callback) const;
-  // The callback is of type
-  //   void(uintptr_t object_start)
-  // The function is similar as above, but it also frees (clears) the iterated
-  // bits.
-  template <typename Callback>
-  inline void IterateUnmarkedQuarantinedAndFree(size_t epoch, Callback);
-
-  inline void Clear();
-
- private:
-  std::atomic<CellType>& AsAtomicCell(size_t cell_index) {
-    return reinterpret_cast<std::atomic<CellType>&>(bitmap_[cell_index]);
-  }
-  const std::atomic<CellType>& AsAtomicCell(size_t cell_index) const {
-    return reinterpret_cast<const std::atomic<CellType>&>(bitmap_[cell_index]);
-  }
-
-  PA_ALWAYS_INLINE unsigned GetBits(uintptr_t address) const;
-
-  struct FilterQuarantine {
-    PA_ALWAYS_INLINE bool operator()(CellType cell) const;
-    const size_t epoch;
-  };
-
-  struct FilterUnmarkedQuarantine {
-    PA_ALWAYS_INLINE bool operator()(CellType cell) const;
-    const size_t epoch;
-  };
-
-  struct FilterAllocated {
-    PA_ALWAYS_INLINE bool operator()(CellType cell) const;
-    const size_t epoch;
-  };
-
-  // Simply calls the callback.
-  struct SimpleCallbackForwarder {
-    PA_ALWAYS_INLINE explicit SimpleCallbackForwarder(size_t epoch) {}
-
-    template <typename Callback>
-    PA_ALWAYS_INLINE void operator()(Callback,
-                                     uintptr_t pointer,
-                                     CellType bits) const;
-  };
-
-  // Calls the callback and passes a bool argument, indicating whether a
-  // quarantine object is marked or not.
-  struct QuarantineCallbackForwarder {
-    PA_ALWAYS_INLINE explicit QuarantineCallbackForwarder(size_t epoch)
-        : is_unmarked{epoch} {}
-
-    template <typename Callback>
-    PA_ALWAYS_INLINE void operator()(Callback,
-                                     uintptr_t pointer,
-                                     CellType bits) const;
-    FilterUnmarkedQuarantine is_unmarked;
-  };
-
-  template <typename Filter,
-            typename CallbackForwarder,
-            typename Callback,
-            bool Clear>
-  inline void IterateImpl(size_t epoch, Callback);
-
-  PA_ALWAYS_INLINE CellType LoadCell(size_t cell_index) const;
-  PA_ALWAYS_INLINE static constexpr std::pair<size_t, size_t>
-      AllocationIndexAndBit(uintptr_t);
-
-  std::array<CellType, kBitmapSize> bitmap_;
-};
-
-// The constructor can be omitted, but the Chromium's clang plugin wrongly
-// warns that the type is not trivially constructible.
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-inline StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
-    StateBitmap() = default;
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-PA_ALWAYS_INLINE void
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::Allocate(
-    uintptr_t address) {
-  PA_SCAN_DCHECK(IsFreed(address));
-  auto [cell_index, object_bit] = AllocationIndexAndBit(address);
-  const CellType mask = static_cast<CellType>(State::kAlloced) << object_bit;
-  auto& cell = AsAtomicCell(cell_index);
-  cell.fetch_or(mask, std::memory_order_relaxed);
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-PA_ALWAYS_INLINE bool
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::Quarantine(
-    uintptr_t address,
-    Epoch epoch) {
-  // *Scan is enabled at runtime, which means that we can quarantine allocation,
-  // that was previously not recorded in the bitmap. Hence, we can't reliably
-  // check transition from kAlloced to kQuarantined.
-  static_assert((~static_cast<CellType>(State::kQuarantined1) & kStateMask) ==
-                    (static_cast<CellType>(State::kQuarantined2) & kStateMask),
-                "kQuarantined1 must be inverted kQuarantined2");
-  const State quarantine_state =
-      epoch & 0b1 ? State::kQuarantined1 : State::kQuarantined2;
-  auto [cell_index, object_bit] = AllocationIndexAndBit(address);
-  const CellType mask =
-      ~(static_cast<CellType>(quarantine_state) << object_bit);
-  auto& cell = AsAtomicCell(cell_index);
-  const CellType cell_before = cell.fetch_and(mask, std::memory_order_relaxed);
-  // Check if the previous state was also quarantined.
-  return __builtin_popcount(static_cast<unsigned>((cell_before >> object_bit) &
-                                                  kStateMask)) != 1;
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-PA_ALWAYS_INLINE bool
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
-    MarkQuarantinedAsReachable(uintptr_t address, Epoch epoch) {
-  static_assert((~static_cast<CellType>(State::kQuarantined1) & kStateMask) ==
-                    (static_cast<CellType>(State::kQuarantined2) & kStateMask),
-                "kQuarantined1 must be inverted kQuarantined2");
-  const State quarantine_state_old =
-      epoch & 0b1 ? State::kQuarantined2 : State::kQuarantined1;
-  auto [cell_index, object_bit] = AllocationIndexAndBit(address);
-  const CellType clear_mask =
-      ~(static_cast<CellType>(State::kAlloced) << object_bit);
-  const CellType set_mask_old = static_cast<CellType>(quarantine_state_old)
-                                << object_bit;
-  const CellType xor_mask = static_cast<CellType>(0b11) << object_bit;
-  auto& cell = AsAtomicCell(cell_index);
-  CellType expected =
-      (cell.load(std::memory_order_relaxed) & clear_mask) | set_mask_old;
-  CellType desired = expected ^ xor_mask;
-  while (PA_UNLIKELY(!cell.compare_exchange_weak(expected, desired,
-                                                 std::memory_order_relaxed,
-                                                 std::memory_order_relaxed))) {
-    // First check if the object was already marked before or in parallel.
-    if ((expected & set_mask_old) == 0) {
-      // Check that the bits can't be in any state other than
-      // marked-quarantined.
-      PA_SCAN_DCHECK(
-          ((expected >> object_bit) & kStateMask) ==
-          (~static_cast<CellType>(quarantine_state_old) & kStateMask));
-      return false;
-    }
-    // Otherwise, some other bits in the cell were concurrently changed. Update
-    // desired and retry.
-    desired = expected ^ xor_mask;
-  }
-  return true;
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-PA_ALWAYS_INLINE void
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::Free(
-    uintptr_t address) {
-  // *Scan is enabled at runtime, which means that we can free an allocation,
-  // that was previously not recorded as quarantined in the bitmap. Hence, we
-  // can't reliably check the transition from kQuarantined to kFreed.
-  static_assert((~static_cast<CellType>(State::kAlloced) & kStateMask) ==
-                    (static_cast<CellType>(State::kFreed) & kStateMask),
-                "kFreed must be inverted kAlloced");
-  auto [cell_index, object_bit] = AllocationIndexAndBit(address);
-  const CellType mask = ~(static_cast<CellType>(State::kAlloced) << object_bit);
-  auto& cell = AsAtomicCell(cell_index);
-  cell.fetch_and(mask, std::memory_order_relaxed);
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-PA_ALWAYS_INLINE bool
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IsAllocated(
-    uintptr_t address) const {
-  return GetBits(address) == static_cast<unsigned>(State::kAlloced);
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-PA_ALWAYS_INLINE bool
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IsQuarantined(
-    uintptr_t address) const {
-  // On x86 CPI of popcnt is the same as tzcnt, so we use it instead of tzcnt +
-  // inversion.
-  return __builtin_popcount(GetBits(address)) == 1;
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-PA_ALWAYS_INLINE bool
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IsFreed(
-    uintptr_t address) const {
-  return GetBits(address) == static_cast<unsigned>(State::kFreed);
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-PA_ALWAYS_INLINE
-    typename StateBitmap<PageSize, PageAlignment, AllocationAlignment>::CellType
-    StateBitmap<PageSize, PageAlignment, AllocationAlignment>::LoadCell(
-        size_t cell_index) const {
-  return AsAtomicCell(cell_index).load(std::memory_order_relaxed);
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-PA_ALWAYS_INLINE constexpr std::pair<size_t, size_t>
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
-    AllocationIndexAndBit(uintptr_t address) {
-  const uintptr_t offset_in_page = address & kPageOffsetMask;
-  const size_t allocation_number =
-      (offset_in_page / kAllocationAlignment) * kBitsNeededForAllocation;
-  const size_t cell_index = allocation_number / kBitsPerCell;
-  PA_SCAN_DCHECK(kBitmapSize > cell_index);
-  const size_t bit = allocation_number % kBitsPerCell;
-  return {cell_index, bit};
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-unsigned StateBitmap<PageSize, PageAlignment, AllocationAlignment>::GetBits(
-    uintptr_t address) const {
-  auto [cell_index, object_bit] = AllocationIndexAndBit(address);
-  return (LoadCell(cell_index) >> object_bit) & kStateMask;
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-bool StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
-    FilterQuarantine::operator()(CellType bits) const {
-  return __builtin_popcount(static_cast<unsigned>(bits)) == 1;
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-bool StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
-    FilterUnmarkedQuarantine::operator()(CellType bits) const {
-  // Truth table:
-  // epoch & 1 | bits | result
-  //     0     |  01  |   1
-  //     1     |  10  |   1
-  //     *     |  **  |   0
-  return bits - (epoch & 0b01) == 0b01;
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-bool StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
-    FilterAllocated::operator()(CellType bits) const {
-  return bits == 0b11;
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-template <typename Callback>
-PA_ALWAYS_INLINE void
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
-    SimpleCallbackForwarder::operator()(Callback callback,
-                                        uintptr_t pointer,
-                                        CellType bits) const {
-  callback(pointer);
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-template <typename Callback>
-PA_ALWAYS_INLINE void
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
-    QuarantineCallbackForwarder::operator()(Callback callback,
-                                            uintptr_t pointer,
-                                            CellType bits) const {
-  callback(pointer, !is_unmarked(bits));
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-template <typename Filter,
-          typename CallbackForwarder,
-          typename Callback,
-          bool Clear>
-inline void
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IterateImpl(
-    size_t epoch,
-    Callback callback) {
-  // The bitmap (|this|) is allocated inside the page with |kPageAlignment|.
-  Filter filter{epoch};
-  CallbackForwarder callback_forwarder{epoch};
-  const uintptr_t base = reinterpret_cast<uintptr_t>(this) & kPageBaseMask;
-  for (size_t cell_index = 0; cell_index < kBitmapSize; ++cell_index) {
-    CellType value = LoadCell(cell_index);
-    while (value) {
-      const size_t trailing_zeroes =
-          static_cast<size_t>(base::bits::CountTrailingZeroBits(value) & ~0b1);
-      const size_t clear_value_mask =
-          ~(static_cast<CellType>(kStateMask) << trailing_zeroes);
-      const CellType bits = (value >> trailing_zeroes) & kStateMask;
-      if (!filter(bits)) {
-        // Clear current object bit in temporary value to advance iteration.
-        value &= clear_value_mask;
-        continue;
-      }
-      const size_t object_number =
-          (cell_index * kBitsPerCell) + trailing_zeroes;
-      const uintptr_t object_address =
-          base +
-          (object_number * kAllocationAlignment / kBitsNeededForAllocation);
-
-      callback_forwarder(callback, object_address, bits);
-
-      if (Clear) {
-        // Clear the current bits.
-        AsAtomicCell(cell_index)
-            .fetch_and(clear_value_mask, std::memory_order_relaxed);
-      }
-
-      // Clear current object bit in temporary value to advance iteration.
-      value &= clear_value_mask;
-    }
-  }
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-template <typename Callback>
-inline void
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IterateAllocated(
-    Callback callback) const {
-  const_cast<StateBitmap*>(this)
-      ->IterateImpl<FilterAllocated, SimpleCallbackForwarder, Callback, false>(
-          0, std::move(callback));
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-template <typename Callback, decltype(std::declval<Callback>()(0), 0)>
-inline void
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IterateQuarantined(
-    Callback callback) const {
-  const_cast<StateBitmap*>(this)
-      ->IterateImpl<FilterQuarantine, SimpleCallbackForwarder, Callback, false>(
-          0, std::move(callback));
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-template <typename Callback, decltype(std::declval<Callback>()(0, true), 0)>
-inline void
-StateBitmap<PageSize, PageAlignment, AllocationAlignment>::IterateQuarantined(
-    size_t epoch,
-    Callback callback) const {
-  const_cast<StateBitmap*>(this)
-      ->IterateImpl<FilterQuarantine, QuarantineCallbackForwarder, Callback,
-                    false>(epoch, std::move(callback));
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-template <typename Callback>
-inline void StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
-    IterateUnmarkedQuarantined(size_t epoch, Callback callback) const {
-  const_cast<StateBitmap*>(this)
-      ->IterateImpl<FilterUnmarkedQuarantine, SimpleCallbackForwarder, Callback,
-                    false>(epoch, std::move(callback));
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-template <typename Callback>
-inline void StateBitmap<PageSize, PageAlignment, AllocationAlignment>::
-    IterateUnmarkedQuarantinedAndFree(size_t epoch, Callback callback) {
-  IterateImpl<FilterUnmarkedQuarantine, SimpleCallbackForwarder, Callback,
-              true>(epoch, std::move(callback));
-}
-
-template <size_t PageSize, size_t PageAlignment, size_t AllocationAlignment>
-void StateBitmap<PageSize, PageAlignment, AllocationAlignment>::Clear() {
-  std::fill(bitmap_.begin(), bitmap_.end(), '\0');
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATE_BITMAP_H_
diff --git a/base/allocator/partition_allocator/starscan/state_bitmap_unittest.cc b/base/allocator/partition_allocator/starscan/state_bitmap_unittest.cc
deleted file mode 100644
index d03c874..0000000
--- a/base/allocator/partition_allocator/starscan/state_bitmap_unittest.cc
+++ /dev/null
@@ -1,346 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal {
-
-namespace {
-
-using TestBitmap = StateBitmap<kSuperPageSize, kSuperPageSize, kAlignment>;
-
-class PageWithBitmap final {
- public:
-  PageWithBitmap()
-      : base_(AllocPages(kSuperPageSize,
-                         kSuperPageAlignment,
-                         PageAccessibilityConfiguration(
-                             PageAccessibilityConfiguration::kReadWrite),
-                         PageTag::kPartitionAlloc)),
-        bitmap_(new(reinterpret_cast<void*>(base_)) TestBitmap) {}
-
-  PageWithBitmap(const PageWithBitmap&) = delete;
-  PageWithBitmap& operator=(const PageWithBitmap&) = delete;
-
-  ~PageWithBitmap() { FreePages(base_, kSuperPageSize); }
-
-  TestBitmap& bitmap() const { return *bitmap_; }
-
-  void* base() const { return reinterpret_cast<void*>(base_); }
-  size_t size() const { return kSuperPageSize; }
-
-  uintptr_t base_;
-  TestBitmap* bitmap_;
-};
-
-class PartitionAllocStateBitmapTest : public ::testing::Test {
- protected:
-  TestBitmap& bitmap() const { return page.bitmap(); }
-
-  void AllocateObject(size_t object_position) {
-    page.bitmap().Allocate(ObjectAddress(object_position));
-  }
-
-  void FreeObject(size_t object_position) {
-    page.bitmap().Free(ObjectAddress(object_position));
-  }
-
-  bool QuarantineObject(size_t object_position, size_t epoch) {
-    return page.bitmap().Quarantine(ObjectAddress(object_position), epoch);
-  }
-
-  bool MarkQuarantinedObject(size_t object_position, size_t epoch) {
-    return page.bitmap().MarkQuarantinedAsReachable(
-        ObjectAddress(object_position), epoch);
-  }
-
-  bool IsAllocated(size_t object_position) const {
-    return page.bitmap().IsAllocated(ObjectAddress(object_position));
-  }
-
-  bool IsQuarantined(size_t object_position) const {
-    return page.bitmap().IsQuarantined(ObjectAddress(object_position));
-  }
-
-  bool IsFreed(size_t object_position) const {
-    return page.bitmap().IsFreed(ObjectAddress(object_position));
-  }
-
-  void AssertAllocated(size_t object_position) const {
-    EXPECT_TRUE(IsAllocated(object_position));
-    EXPECT_FALSE(IsQuarantined(object_position));
-    EXPECT_FALSE(IsFreed(object_position));
-  }
-
-  void AssertFreed(size_t object_position) const {
-    EXPECT_FALSE(IsAllocated(object_position));
-    EXPECT_FALSE(IsQuarantined(object_position));
-    EXPECT_TRUE(IsFreed(object_position));
-  }
-
-  void AssertQuarantined(size_t object_position) const {
-    EXPECT_FALSE(IsAllocated(object_position));
-    EXPECT_TRUE(IsQuarantined(object_position));
-    EXPECT_FALSE(IsFreed(object_position));
-  }
-
-  size_t CountAllocated() const {
-    size_t count = 0;
-    bitmap().IterateAllocated([&count](uintptr_t) { count++; });
-    return count;
-  }
-
-  size_t CountQuarantined() const {
-    size_t count = 0;
-    bitmap().IterateQuarantined([&count](uintptr_t) { count++; });
-    return count;
-  }
-
-  bool IsQuarantineEmpty() const { return !CountQuarantined(); }
-
-  uintptr_t ObjectAddress(size_t pos) const {
-    return reinterpret_cast<uintptr_t>(page.base()) + sizeof(TestBitmap) +
-           pos * kAlignment;
-  }
-
-  static constexpr uintptr_t LastIndex() {
-    return TestBitmap::kMaxEntries - (sizeof(TestBitmap) / kAlignment) - 1;
-  }
-
-  static constexpr uintptr_t MiddleIndex() { return LastIndex() / 2; }
-
- private:
-  PageWithBitmap page;
-};
-
-constexpr size_t kTestEpoch = 0;
-
-}  // namespace
-
-TEST_F(PartitionAllocStateBitmapTest, MoreThanZeroEntriesPossible) {
-  const size_t max_entries = TestBitmap::kMaxEntries;
-  EXPECT_LT(0u, max_entries);
-}
-
-TEST_F(PartitionAllocStateBitmapTest, InitialQuarantineEmpty) {
-  EXPECT_TRUE(IsQuarantineEmpty());
-}
-
-TEST_F(PartitionAllocStateBitmapTest, QuarantineImpliesNonEmpty) {
-  AllocateObject(0);
-  EXPECT_TRUE(IsQuarantineEmpty());
-  QuarantineObject(0, kTestEpoch);
-  EXPECT_FALSE(IsQuarantineEmpty());
-}
-
-TEST_F(PartitionAllocStateBitmapTest, RepetitiveQuarantine) {
-  AllocateObject(MiddleIndex());
-  EXPECT_TRUE(QuarantineObject(MiddleIndex(), kTestEpoch));
-  EXPECT_FALSE(QuarantineObject(MiddleIndex(), kTestEpoch));
-}
-
-TEST_F(PartitionAllocStateBitmapTest, CountAllocated) {
-  AllocateObject(0);
-  EXPECT_EQ(1u, CountAllocated());
-  QuarantineObject(0, kTestEpoch);
-  EXPECT_EQ(0u, CountAllocated());
-}
-
-TEST_F(PartitionAllocStateBitmapTest, StateTransititions) {
-  for (auto i : {uintptr_t{0}, uintptr_t{1}, LastIndex() - 1, LastIndex()}) {
-    AssertFreed(i);
-
-    AllocateObject(i);
-    AssertAllocated(i);
-
-    QuarantineObject(i, kTestEpoch);
-    AssertQuarantined(i);
-
-    MarkQuarantinedObject(i, kTestEpoch);
-    AssertQuarantined(i);
-
-    FreeObject(i);
-    AssertFreed(i);
-  }
-}
-
-TEST_F(PartitionAllocStateBitmapTest, MultipleMarks) {
-  AllocateObject(0);
-  QuarantineObject(0, kTestEpoch);
-
-  EXPECT_TRUE(MarkQuarantinedObject(0, kTestEpoch));
-  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch));
-  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch));
-
-  EXPECT_TRUE(MarkQuarantinedObject(0, kTestEpoch + 1));
-  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch + 1));
-  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch + 1));
-
-  EXPECT_TRUE(MarkQuarantinedObject(0, kTestEpoch + 2));
-  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch + 2));
-  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch + 2));
-}
-
-TEST_F(PartitionAllocStateBitmapTest, MultipleMarksAdjacent) {
-  AllocateObject(0);
-  QuarantineObject(0, kTestEpoch);
-
-  AllocateObject(1);
-  QuarantineObject(1, kTestEpoch);
-
-  AllocateObject(2);
-  QuarantineObject(2, kTestEpoch);
-
-  EXPECT_TRUE(MarkQuarantinedObject(0, kTestEpoch));
-  EXPECT_TRUE(MarkQuarantinedObject(1, kTestEpoch));
-  EXPECT_TRUE(MarkQuarantinedObject(2, kTestEpoch));
-  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch));
-  EXPECT_FALSE(MarkQuarantinedObject(1, kTestEpoch));
-  EXPECT_FALSE(MarkQuarantinedObject(2, kTestEpoch));
-
-  EXPECT_TRUE(MarkQuarantinedObject(0, kTestEpoch + 1));
-  EXPECT_TRUE(MarkQuarantinedObject(1, kTestEpoch + 1));
-  EXPECT_TRUE(MarkQuarantinedObject(2, kTestEpoch + 1));
-  EXPECT_FALSE(MarkQuarantinedObject(0, kTestEpoch + 1));
-  EXPECT_FALSE(MarkQuarantinedObject(1, kTestEpoch + 1));
-  EXPECT_FALSE(MarkQuarantinedObject(2, kTestEpoch + 1));
-}
-
-TEST_F(PartitionAllocStateBitmapTest, QuarantineFreeMultipleObjects) {
-  static constexpr size_t kCount = 256;
-  for (size_t i = 0; i < kCount; ++i) {
-    AllocateObject(i);
-  }
-  EXPECT_EQ(kCount, CountAllocated());
-  EXPECT_EQ(0u, CountQuarantined());
-
-  for (size_t i = 0; i < kCount; ++i) {
-    QuarantineObject(i, kTestEpoch);
-  }
-  EXPECT_EQ(0u, CountAllocated());
-  EXPECT_EQ(kCount, CountQuarantined());
-
-  for (size_t i = 0; i < kCount; ++i) {
-    FreeObject(i);
-    EXPECT_EQ(kCount - i - 1, CountQuarantined());
-  }
-  EXPECT_TRUE(IsQuarantineEmpty());
-}
-
-TEST_F(PartitionAllocStateBitmapTest, AdjacentQuarantinedObjectsAtBegin) {
-  AllocateObject(0);
-  QuarantineObject(0, kTestEpoch);
-  AllocateObject(1);
-  QuarantineObject(1, kTestEpoch);
-
-  EXPECT_FALSE(IsQuarantined(2));
-  {
-    size_t count = 0;
-    this->bitmap().IterateQuarantined([&count, this](uintptr_t current) {
-      if (count == 0) {
-        EXPECT_EQ(ObjectAddress(0), current);
-      } else if (count == 1) {
-        EXPECT_EQ(ObjectAddress(1), current);
-      }
-      count++;
-    });
-
-    EXPECT_EQ(2u, count);
-  }
-  // Now mark only the first object.
-  {
-    MarkQuarantinedObject(0, kTestEpoch);
-
-    size_t count = 0;
-    this->bitmap().IterateUnmarkedQuarantined(
-        kTestEpoch, [&count, this](uintptr_t current) {
-          if (count == 0) {
-            EXPECT_EQ(ObjectAddress(1), current);
-          }
-          count++;
-        });
-
-    EXPECT_EQ(1u, count);
-  }
-}
-
-TEST_F(PartitionAllocStateBitmapTest, AdjacentQuarantinedObjectsAtMiddle) {
-  AllocateObject(MiddleIndex());
-  QuarantineObject(MiddleIndex(), kTestEpoch);
-  AllocateObject(MiddleIndex() + 1);
-  QuarantineObject(MiddleIndex() + 1, kTestEpoch);
-  {
-    size_t count = 0;
-    this->bitmap().IterateQuarantined([&count, this](uintptr_t current) {
-      if (count == 0) {
-        EXPECT_EQ(ObjectAddress(MiddleIndex()), current);
-      } else if (count == 1) {
-        EXPECT_EQ(ObjectAddress(MiddleIndex() + 1), current);
-      }
-      count++;
-    });
-
-    EXPECT_EQ(2u, count);
-  }
-  // Now mark only the first object.
-  {
-    MarkQuarantinedObject(MiddleIndex(), kTestEpoch);
-
-    size_t count = 0;
-    this->bitmap().IterateUnmarkedQuarantined(
-        kTestEpoch, [&count, this](uintptr_t current) {
-          if (count == 0) {
-            EXPECT_EQ(ObjectAddress(MiddleIndex() + 1), current);
-          }
-          count++;
-        });
-
-    EXPECT_EQ(1u, count);
-  }
-}
-
-TEST_F(PartitionAllocStateBitmapTest, AdjacentQuarantinedObjectsAtEnd) {
-  AllocateObject(LastIndex());
-  QuarantineObject(LastIndex(), kTestEpoch);
-  AllocateObject(LastIndex() - 1);
-  QuarantineObject(LastIndex() - 1, kTestEpoch);
-
-  EXPECT_FALSE(IsQuarantined(LastIndex() - 2));
-  {
-    size_t count = 0;
-    this->bitmap().IterateQuarantined([&count, this](uintptr_t current) {
-      if (count == 0) {
-        EXPECT_EQ(ObjectAddress(LastIndex() - 1), current);
-      } else if (count == 1) {
-        EXPECT_EQ(ObjectAddress(LastIndex()), current);
-      }
-      count++;
-    });
-
-    EXPECT_EQ(2u, count);
-  }
-  // Now mark only the first object.
-  {
-    MarkQuarantinedObject(LastIndex(), kTestEpoch);
-
-    size_t count = 0;
-    this->bitmap().IterateUnmarkedQuarantined(
-        kTestEpoch, [&count, this](uintptr_t current) {
-          if (count == 0) {
-            EXPECT_EQ(ObjectAddress(LastIndex() - 1), current);
-          }
-          count++;
-        });
-
-    EXPECT_EQ(1u, count);
-  }
-}
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/starscan/stats_collector.cc b/base/allocator/partition_allocator/starscan/stats_collector.cc
deleted file mode 100644
index b06e2c0..0000000
--- a/base/allocator/partition_allocator/starscan/stats_collector.cc
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/starscan/stats_collector.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "base/allocator/partition_allocator/starscan/logging.h"
-#include "base/allocator/partition_allocator/starscan/stats_reporter.h"
-
-namespace partition_alloc::internal {
-
-StatsCollector::StatsCollector(const char* process_name,
-                               size_t quarantine_last_size)
-    : process_name_(process_name),
-      quarantine_last_size_(quarantine_last_size) {}
-
-StatsCollector::~StatsCollector() = default;
-
-base::TimeDelta StatsCollector::GetOverallTime() const {
-  return GetTimeImpl<Context::kMutator>(mutator_trace_events_,
-                                        MutatorId::kOverall) +
-         GetTimeImpl<Context::kScanner>(scanner_trace_events_,
-                                        ScannerId::kOverall);
-}
-
-void StatsCollector::ReportTracesAndHists(
-    partition_alloc::StatsReporter& reporter) const {
-  ReportTracesAndHistsImpl<Context::kMutator>(reporter, mutator_trace_events_);
-  ReportTracesAndHistsImpl<Context::kScanner>(reporter, scanner_trace_events_);
-  ReportSurvivalRate(reporter);
-}
-
-template <Context context>
-base::TimeDelta StatsCollector::GetTimeImpl(
-    const DeferredTraceEventMap<context>& event_map,
-    IdType<context> id) const {
-  base::TimeDelta overall;
-  for (const auto& tid_and_events : event_map.get_underlying_map_unsafe()) {
-    const auto& events = tid_and_events.second;
-    const auto& event = events[static_cast<size_t>(id)];
-    overall += (event.end_time - event.start_time);
-  }
-  return overall;
-}
-
-template <Context context>
-void StatsCollector::ReportTracesAndHistsImpl(
-    partition_alloc::StatsReporter& reporter,
-    const DeferredTraceEventMap<context>& event_map) const {
-  std::array<base::TimeDelta, static_cast<size_t>(IdType<context>::kNumIds)>
-      accumulated_events{};
-  // First, report traces and accumulate each trace scope to report UMA hists.
-  for (const auto& tid_and_events : event_map.get_underlying_map_unsafe()) {
-    const internal::base::PlatformThreadId tid = tid_and_events.first;
-    const auto& events = tid_and_events.second;
-    PA_DCHECK(accumulated_events.size() == events.size());
-    for (size_t id = 0; id < events.size(); ++id) {
-      const auto& event = events[id];
-      if (event.start_time.is_null()) {
-        // If start_time is null, the event was never triggered, e.g. safepoint
-        // bailed out if started at the end of scanning.
-        PA_DCHECK(event.end_time.is_null());
-        continue;
-      }
-      reporter.ReportTraceEvent(static_cast<IdType<context>>(id), tid,
-                                event.start_time.ToInternalValue(),
-                                event.end_time.ToInternalValue());
-      accumulated_events[id] += (event.end_time - event.start_time);
-    }
-  }
-  // Report UMA if process_name is set.
-  if (!process_name_) {
-    return;
-  }
-  for (size_t id = 0; id < accumulated_events.size(); ++id) {
-    if (accumulated_events[id].is_zero()) {
-      continue;
-    }
-    reporter.ReportStats(ToUMAString(static_cast<IdType<context>>(id)).c_str(),
-                         accumulated_events[id].InMicroseconds());
-  }
-}
-
-void StatsCollector::ReportSurvivalRate(
-    partition_alloc::StatsReporter& reporter) const {
-  const double survived_rate =
-      static_cast<double>(survived_quarantine_size()) / quarantine_last_size_;
-  reporter.ReportSurvivedQuarantineSize(survived_quarantine_size());
-  reporter.ReportSurvivedQuarantinePercent(survived_rate);
-  PA_PCSCAN_VLOG(2) << "quarantine size: " << quarantine_last_size_ << " -> "
-                    << survived_quarantine_size()
-                    << ", swept bytes: " << swept_size()
-                    << ", survival rate: " << survived_rate;
-  if (discarded_quarantine_size_) {
-    PA_PCSCAN_VLOG(2) << "discarded quarantine size: "
-                      << discarded_quarantine_size_;
-  }
-}
-
-template base::TimeDelta StatsCollector::GetTimeImpl(
-    const DeferredTraceEventMap<Context::kMutator>&,
-    IdType<Context::kMutator>) const;
-template base::TimeDelta StatsCollector::GetTimeImpl(
-    const DeferredTraceEventMap<Context::kScanner>&,
-    IdType<Context::kScanner>) const;
-
-template void StatsCollector::ReportTracesAndHistsImpl(
-    partition_alloc::StatsReporter& reporter,
-    const DeferredTraceEventMap<Context::kMutator>&) const;
-template void StatsCollector::ReportTracesAndHistsImpl(
-    partition_alloc::StatsReporter& reporter,
-    const DeferredTraceEventMap<Context::kScanner>&) const;
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/starscan/stats_collector.h b/base/allocator/partition_allocator/starscan/stats_collector.h
deleted file mode 100644
index cf6f9238..0000000
--- a/base/allocator/partition_allocator/starscan/stats_collector.h
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_COLLECTOR_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_COLLECTOR_H_
-
-#include <array>
-#include <atomic>
-#include <functional>
-#include <mutex>
-#include <string>
-#include <type_traits>
-#include <unordered_map>
-#include <utility>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
-#include "base/allocator/partition_allocator/starscan/starscan_fwd.h"
-
-namespace partition_alloc {
-
-class StatsReporter;
-
-namespace internal {
-
-#define FOR_ALL_PCSCAN_SCANNER_SCOPES(V) \
-  V(Clear)                               \
-  V(Scan)                                \
-  V(Sweep)                               \
-  V(Overall)
-
-#define FOR_ALL_PCSCAN_MUTATOR_SCOPES(V) \
-  V(Clear)                               \
-  V(ScanStack)                           \
-  V(Scan)                                \
-  V(Overall)
-
-class StatsCollector final {
- public:
-  enum class ScannerId {
-#define DECLARE_ENUM(name) k##name,
-    FOR_ALL_PCSCAN_SCANNER_SCOPES(DECLARE_ENUM)
-#undef DECLARE_ENUM
-        kNumIds,
-  };
-
-  enum class MutatorId {
-#define DECLARE_ENUM(name) k##name,
-    FOR_ALL_PCSCAN_MUTATOR_SCOPES(DECLARE_ENUM)
-#undef DECLARE_ENUM
-        kNumIds,
-  };
-
-  template <Context context>
-  using IdType =
-      std::conditional_t<context == Context::kMutator, MutatorId, ScannerId>;
-
-  // We don't immediately trace events, but instead defer it until scanning is
-  // done. This is needed to avoid unpredictable work that can be done by traces
-  // (e.g. recursive mutex lock).
-  struct DeferredTraceEvent {
-    base::TimeTicks start_time;
-    base::TimeTicks end_time;
-  };
-
-  // Thread-safe hash-map that maps thread id to scanner events. Doesn't
-  // accumulate events, i.e. every event can only be registered once.
-  template <Context context>
-  class DeferredTraceEventMap final {
-   public:
-    using IdType = StatsCollector::IdType<context>;
-    using PerThreadEvents =
-        std::array<DeferredTraceEvent, static_cast<size_t>(IdType::kNumIds)>;
-    using UnderlyingMap = std::unordered_map<
-        internal::base::PlatformThreadId,
-        PerThreadEvents,
-        std::hash<internal::base::PlatformThreadId>,
-        std::equal_to<>,
-        MetadataAllocator<std::pair<const internal::base::PlatformThreadId,
-                                    PerThreadEvents>>>;
-
-    inline void RegisterBeginEventFromCurrentThread(IdType id);
-    inline void RegisterEndEventFromCurrentThread(IdType id);
-
-    const UnderlyingMap& get_underlying_map_unsafe() const { return events_; }
-
-   private:
-    std::mutex mutex_;
-    UnderlyingMap events_;
-  };
-
-  template <Context context>
-  class Scope final {
-   public:
-    Scope(StatsCollector& stats, IdType<context> type)
-        : stats_(stats), type_(type) {
-      stats_.RegisterBeginEventFromCurrentThread(type);
-    }
-
-    Scope(const Scope&) = delete;
-    Scope& operator=(const Scope&) = delete;
-
-    ~Scope() { stats_.RegisterEndEventFromCurrentThread(type_); }
-
-   private:
-    StatsCollector& stats_;
-    IdType<context> type_;
-  };
-
-  using ScannerScope = Scope<Context::kScanner>;
-  using MutatorScope = Scope<Context::kMutator>;
-
-  StatsCollector(const char* process_name, size_t quarantine_last_size);
-
-  StatsCollector(const StatsCollector&) = delete;
-  StatsCollector& operator=(const StatsCollector&) = delete;
-
-  ~StatsCollector();
-
-  void IncreaseSurvivedQuarantineSize(size_t size) {
-    survived_quarantine_size_.fetch_add(size, std::memory_order_relaxed);
-  }
-  size_t survived_quarantine_size() const {
-    return survived_quarantine_size_.load(std::memory_order_relaxed);
-  }
-
-  void IncreaseSweptSize(size_t size) { swept_size_ += size; }
-  size_t swept_size() const { return swept_size_; }
-
-  void IncreaseDiscardedQuarantineSize(size_t size) {
-    discarded_quarantine_size_ += size;
-  }
-
-  base::TimeDelta GetOverallTime() const;
-  void ReportTracesAndHists(partition_alloc::StatsReporter& reporter) const;
-
- private:
-  using MetadataString =
-      std::basic_string<char, std::char_traits<char>, MetadataAllocator<char>>;
-
-  MetadataString ToUMAString(ScannerId id) const;
-  MetadataString ToUMAString(MutatorId id) const;
-
-  void RegisterBeginEventFromCurrentThread(MutatorId id) {
-    mutator_trace_events_.RegisterBeginEventFromCurrentThread(id);
-  }
-  void RegisterEndEventFromCurrentThread(MutatorId id) {
-    mutator_trace_events_.RegisterEndEventFromCurrentThread(id);
-  }
-  void RegisterBeginEventFromCurrentThread(ScannerId id) {
-    scanner_trace_events_.RegisterBeginEventFromCurrentThread(id);
-  }
-  void RegisterEndEventFromCurrentThread(ScannerId id) {
-    scanner_trace_events_.RegisterEndEventFromCurrentThread(id);
-  }
-
-  template <Context context>
-  base::TimeDelta GetTimeImpl(const DeferredTraceEventMap<context>& event_map,
-                              IdType<context> id) const;
-
-  template <Context context>
-  void ReportTracesAndHistsImpl(
-      partition_alloc::StatsReporter& reporter,
-      const DeferredTraceEventMap<context>& event_map) const;
-
-  void ReportSurvivalRate(partition_alloc::StatsReporter& reporter) const;
-
-  DeferredTraceEventMap<Context::kMutator> mutator_trace_events_;
-  DeferredTraceEventMap<Context::kScanner> scanner_trace_events_;
-
-  std::atomic<size_t> survived_quarantine_size_{0u};
-  size_t swept_size_ = 0u;
-  size_t discarded_quarantine_size_ = 0u;
-  const char* process_name_ = nullptr;
-  const size_t quarantine_last_size_ = 0u;
-};
-
-template <Context context>
-inline void StatsCollector::DeferredTraceEventMap<
-    context>::RegisterBeginEventFromCurrentThread(IdType id) {
-  std::lock_guard<std::mutex> lock(mutex_);
-  const auto tid = base::PlatformThread::CurrentId();
-  const auto now = base::TimeTicks::Now();
-  auto& event_array = events_[tid];
-  auto& event = event_array[static_cast<size_t>(id)];
-  PA_DCHECK(event.start_time.is_null());
-  PA_DCHECK(event.end_time.is_null());
-  event.start_time = now;
-}
-
-template <Context context>
-inline void StatsCollector::DeferredTraceEventMap<
-    context>::RegisterEndEventFromCurrentThread(IdType id) {
-  std::lock_guard<std::mutex> lock(mutex_);
-  const auto tid = base::PlatformThread::CurrentId();
-  const auto now = base::TimeTicks::Now();
-  auto& event_array = events_[tid];
-  auto& event = event_array[static_cast<size_t>(id)];
-  PA_DCHECK(!event.start_time.is_null());
-  PA_DCHECK(event.end_time.is_null());
-  event.end_time = now;
-}
-
-inline StatsCollector::MetadataString StatsCollector::ToUMAString(
-    ScannerId id) const {
-  PA_DCHECK(process_name_);
-  const MetadataString process_name = process_name_;
-  switch (id) {
-    case ScannerId::kClear:
-      return "PA.PCScan." + process_name + ".Scanner.Clear";
-    case ScannerId::kScan:
-      return "PA.PCScan." + process_name + ".Scanner.Scan";
-    case ScannerId::kSweep:
-      return "PA.PCScan." + process_name + ".Scanner.Sweep";
-    case ScannerId::kOverall:
-      return "PA.PCScan." + process_name + ".Scanner";
-    case ScannerId::kNumIds:
-      __builtin_unreachable();
-  }
-}
-
-inline StatsCollector::MetadataString StatsCollector::ToUMAString(
-    MutatorId id) const {
-  PA_DCHECK(process_name_);
-  const MetadataString process_name = process_name_;
-  switch (id) {
-    case MutatorId::kClear:
-      return "PA.PCScan." + process_name + ".Mutator.Clear";
-    case MutatorId::kScanStack:
-      return "PA.PCScan." + process_name + ".Mutator.ScanStack";
-    case MutatorId::kScan:
-      return "PA.PCScan." + process_name + ".Mutator.Scan";
-    case MutatorId::kOverall:
-      return "PA.PCScan." + process_name + ".Mutator";
-    case MutatorId::kNumIds:
-      __builtin_unreachable();
-  }
-}
-
-#undef FOR_ALL_PCSCAN_MUTATOR_SCOPES
-#undef FOR_ALL_PCSCAN_SCANNER_SCOPES
-
-}  // namespace internal
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_COLLECTOR_H_
diff --git a/base/allocator/partition_allocator/starscan/stats_reporter.h b/base/allocator/partition_allocator/starscan/stats_reporter.h
deleted file mode 100644
index 33179d2..0000000
--- a/base/allocator/partition_allocator/starscan/stats_reporter.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_REPORTER_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_REPORTER_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-#include "base/allocator/partition_allocator/starscan/stats_collector.h"
-
-namespace partition_alloc {
-
-// StatsReporter is a wrapper to invoke TRACE_EVENT_BEGIN/END, TRACE_COUNTER1,
-// and UmaHistogramTimes. It is used to just remove trace_log and uma
-// dependencies from partition allocator.
-class StatsReporter {
- public:
-  virtual void ReportTraceEvent(internal::StatsCollector::ScannerId id,
-                                internal::base::PlatformThreadId tid,
-                                int64_t start_time_ticks_internal_value,
-                                int64_t end_time_ticks_internal_value) {}
-  virtual void ReportTraceEvent(internal::StatsCollector::MutatorId id,
-                                internal::base::PlatformThreadId tid,
-                                int64_t start_time_ticks_internal_value,
-                                int64_t end_time_ticks_internal_value) {}
-
-  virtual void ReportSurvivedQuarantineSize(size_t survived_size) {}
-
-  virtual void ReportSurvivedQuarantinePercent(double survivied_rate) {}
-
-  virtual void ReportStats(const char* stats_name, int64_t sample_in_usec) {}
-};
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_REPORTER_H_
diff --git a/base/allocator/partition_allocator/starscan/write_protector.cc b/base/allocator/partition_allocator/starscan/write_protector.cc
deleted file mode 100644
index dfd30a1..0000000
--- a/base/allocator/partition_allocator/starscan/write_protector.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/starscan/write_protector.h"
-
-#include <mutex>
-#include <thread>
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "build/build_config.h"
-
-#if PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
-#include <fcntl.h>
-#include <linux/userfaultfd.h>
-#include <poll.h>
-#include <sys/ioctl.h>
-#include <sys/stat.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#endif  // PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
-
-namespace partition_alloc::internal {
-
-PCScan::ClearType NoWriteProtector::SupportedClearType() const {
-  return PCScan::ClearType::kLazy;
-}
-
-#if PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
-
-namespace {
-void UserFaultFDThread(int uffd) {
-  PA_DCHECK(-1 != uffd);
-
-  static constexpr char kThreadName[] = "PCScanPFHandler";
-  internal::base::PlatformThread::SetName(kThreadName);
-
-  while (true) {
-    // Pool on the uffd descriptor for page fault events.
-    pollfd pollfd{.fd = uffd, .events = POLLIN};
-    const int nready = PA_HANDLE_EINTR(poll(&pollfd, 1, -1));
-    PA_CHECK(-1 != nready);
-
-    // Get page fault info.
-    uffd_msg msg;
-    const int nread = PA_HANDLE_EINTR(read(uffd, &msg, sizeof(msg)));
-    PA_CHECK(0 != nread);
-
-    // We only expect page faults.
-    PA_DCHECK(UFFD_EVENT_PAGEFAULT == msg.event);
-    // We have subscribed only to wp-fault events.
-    PA_DCHECK(UFFD_PAGEFAULT_FLAG_WP & msg.arg.pagefault.flags);
-
-    // Enter the safepoint. Concurrent faulted writes will wait until safepoint
-    // finishes.
-    PCScan::JoinScanIfNeeded();
-  }
-}
-}  // namespace
-
-UserFaultFDWriteProtector::UserFaultFDWriteProtector()
-    : uffd_(syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK)) {
-  if (uffd_ == -1) {
-    PA_LOG(WARNING) << "userfaultfd is not supported by the current kernel";
-    return;
-  }
-
-  PA_PCHECK(-1 != uffd_);
-
-  uffdio_api uffdio_api;
-  uffdio_api.api = UFFD_API;
-  uffdio_api.features = 0;
-  PA_CHECK(-1 != ioctl(uffd_, UFFDIO_API, &uffdio_api));
-  PA_CHECK(UFFD_API == uffdio_api.api);
-
-  // Register the regular pool to listen uffd events.
-  struct uffdio_register uffdio_register;
-  uffdio_register.range.start = PartitionAddressSpace::RegularPoolBase();
-  uffdio_register.range.len = kPoolMaxSize;
-  uffdio_register.mode = UFFDIO_REGISTER_MODE_WP;
-  PA_CHECK(-1 != ioctl(uffd_, UFFDIO_REGISTER, &uffdio_register));
-
-  // Start uffd thread.
-  std::thread(UserFaultFDThread, uffd_).detach();
-}
-
-namespace {
-enum class UserFaultFDWPMode {
-  kProtect,
-  kUnprotect,
-};
-
-void UserFaultFDWPSet(int uffd,
-                      uintptr_t begin,
-                      size_t length,
-                      UserFaultFDWPMode mode) {
-  PA_DCHECK(0 == (begin % SystemPageSize()));
-  PA_DCHECK(0 == (length % SystemPageSize()));
-
-  uffdio_writeprotect wp;
-  wp.range.start = begin;
-  wp.range.len = length;
-  wp.mode =
-      (mode == UserFaultFDWPMode::kProtect) ? UFFDIO_WRITEPROTECT_MODE_WP : 0;
-  PA_PCHECK(-1 != ioctl(uffd, UFFDIO_WRITEPROTECT, &wp));
-}
-}  // namespace
-
-void UserFaultFDWriteProtector::ProtectPages(uintptr_t begin, size_t length) {
-  if (IsSupported()) {
-    UserFaultFDWPSet(uffd_, begin, length, UserFaultFDWPMode::kProtect);
-  }
-}
-
-void UserFaultFDWriteProtector::UnprotectPages(uintptr_t begin, size_t length) {
-  if (IsSupported()) {
-    UserFaultFDWPSet(uffd_, begin, length, UserFaultFDWPMode::kUnprotect);
-  }
-}
-
-PCScan::ClearType UserFaultFDWriteProtector::SupportedClearType() const {
-  return IsSupported() ? PCScan::ClearType::kEager : PCScan::ClearType::kLazy;
-}
-
-bool UserFaultFDWriteProtector::IsSupported() const {
-  return uffd_ != -1;
-}
-
-#endif  // PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/starscan/write_protector.h b/base/allocator/partition_allocator/starscan/write_protector.h
deleted file mode 100644
index dab16b4..0000000
--- a/base/allocator/partition_allocator/starscan/write_protector.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_WRITE_PROTECTOR_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_WRITE_PROTECTOR_H_
-
-#include <cstddef>
-#include <cstdint>
-#include <mutex>
-
-#include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#include "base/allocator/partition_allocator/starscan/raceful_worklist.h"
-#include "build/build_config.h"
-
-namespace partition_alloc::internal {
-
-// Interface for page protection/unprotection. This is used in DCScan to catch
-// concurrent mutator writes. Protection is done when the scanner starts
-// scanning a range. Unprotection happens at the end of the scanning phase.
-class WriteProtector : public AllocatedOnPCScanMetadataPartition {
- public:
-  virtual ~WriteProtector() = default;
-
-  virtual void ProtectPages(uintptr_t begin, size_t length) = 0;
-  virtual void UnprotectPages(uintptr_t begin, size_t length) = 0;
-
-  virtual bool IsEnabled() const = 0;
-
-  virtual PCScan::ClearType SupportedClearType() const = 0;
-};
-
-class NoWriteProtector final : public WriteProtector {
- public:
-  void ProtectPages(uintptr_t, size_t) final {}
-  void UnprotectPages(uintptr_t, size_t) final {}
-  PCScan::ClearType SupportedClearType() const final;
-  inline bool IsEnabled() const override;
-};
-
-bool NoWriteProtector::IsEnabled() const {
-  return false;
-}
-
-#if PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
-class UserFaultFDWriteProtector final : public WriteProtector {
- public:
-  UserFaultFDWriteProtector();
-
-  UserFaultFDWriteProtector(const UserFaultFDWriteProtector&) = delete;
-  UserFaultFDWriteProtector& operator=(const UserFaultFDWriteProtector&) =
-      delete;
-
-  void ProtectPages(uintptr_t, size_t) final;
-  void UnprotectPages(uintptr_t, size_t) final;
-
-  PCScan::ClearType SupportedClearType() const final;
-
-  inline bool IsEnabled() const override;
-
- private:
-  bool IsSupported() const;
-
-  const int uffd_ = 0;
-};
-
-bool UserFaultFDWriteProtector::IsEnabled() const {
-  return IsSupported();
-}
-#endif  // PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
-
-}  // namespace partition_alloc::internal
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_WRITE_PROTECTOR_H_
diff --git a/base/allocator/partition_allocator/tagging.cc b/base/allocator/partition_allocator/tagging.cc
deleted file mode 100644
index a1584dc..0000000
--- a/base/allocator/partition_allocator/tagging.cc
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/tagging.h"
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "build/build_config.h"
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-#include <arm_acle.h>
-#include <asm/hwcap.h>
-#include <sys/auxv.h>
-#include <sys/ifunc.h>
-#include <sys/prctl.h>
-#define PR_SET_TAGGED_ADDR_CTRL 55
-#define PR_GET_TAGGED_ADDR_CTRL 56
-#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
-
-#if BUILDFLAG(IS_LINUX)
-#include <linux/version.h>
-
-// Linux headers already provide these since v5.10.
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
-#define HAS_PR_MTE_MACROS
-#endif
-#endif
-
-#ifndef HAS_PR_MTE_MACROS
-#define PR_MTE_TCF_SHIFT 1
-#define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
-#define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
-#define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
-#define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
-#define PR_MTE_TAG_SHIFT 3
-#define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
-#define HWCAP2_MTE (1 << 18)
-#endif
-#endif
-
-#if BUILDFLAG(IS_ANDROID)
-#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/native_library.h"
-#endif  // BUILDFLAG(IS_ANDROID)
-
-namespace partition_alloc {
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-namespace {
-void ChangeMemoryTaggingModeInternal(unsigned prctl_mask) {
-  if (internal::base::CPU::GetInstanceNoAllocation().has_mte()) {
-    int status = prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_mask, 0, 0, 0);
-    PA_CHECK(status == 0);
-  }
-}
-}  // namespace
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
-void ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode m) {
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  if (m == TagViolationReportingMode::kSynchronous) {
-    ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC |
-                                    (0xfffe << PR_MTE_TAG_SHIFT));
-  } else if (m == TagViolationReportingMode::kAsynchronous) {
-    ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC |
-                                    (0xfffe << PR_MTE_TAG_SHIFT));
-  } else {
-    ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_NONE);
-  }
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-}
-
-namespace internal {
-
-#if BUILDFLAG(IS_ANDROID)
-void ChangeMemoryTaggingModeForAllThreadsPerProcess(
-    TagViolationReportingMode m) {
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  // In order to support Android NDK API level below 26, we need to call
-  // mallopt via dynamic linker.
-  // int mallopt(int param, int value);
-  using MalloptSignature = int (*)(int, int);
-
-  static MalloptSignature mallopt_fnptr = []() {
-    base::FilePath module_path;
-    base::NativeLibraryLoadError load_error;
-    base::FilePath library_path = module_path.Append("libc.so");
-    base::NativeLibrary library =
-        base::LoadNativeLibrary(library_path, &load_error);
-    PA_CHECK(library);
-    void* func_ptr =
-        base::GetFunctionPointerFromNativeLibrary(library, "mallopt");
-    PA_CHECK(func_ptr);
-    return reinterpret_cast<MalloptSignature>(func_ptr);
-  }();
-
-  int status = 0;
-  if (m == TagViolationReportingMode::kSynchronous) {
-    status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
-                           M_HEAP_TAGGING_LEVEL_SYNC);
-  } else if (m == TagViolationReportingMode::kAsynchronous) {
-    status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
-                           M_HEAP_TAGGING_LEVEL_ASYNC);
-  } else {
-    status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
-                           M_HEAP_TAGGING_LEVEL_NONE);
-  }
-  PA_CHECK(status);
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-}
-#endif  // BUILDFLAG(IS_ANDROID)
-
-namespace {
-[[maybe_unused]] static bool CheckTagRegionParameters(void* ptr, size_t sz) {
-  // Check that ptr and size are correct for MTE
-  uintptr_t ptr_as_uint = reinterpret_cast<uintptr_t>(ptr);
-  bool ret = (ptr_as_uint % kMemTagGranuleSize == 0) &&
-             (sz % kMemTagGranuleSize == 0) && sz;
-  return ret;
-}
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
-  // Randomly tag a region (MTE-enabled systems only). The first 16-byte
-  // granule is randomly tagged, all other granules in the region are
-  // then assigned that initial tag via __arm_mte_set_tag.
-  if (!CheckTagRegionParameters(ptr, sz)) {
-    return nullptr;
-  }
-  // __arm_mte_create_random_tag generates a randomly tagged pointer via the
-  // hardware's random number generator, but does not apply it to the memory.
-  char* nptr = reinterpret_cast<char*>(__arm_mte_create_random_tag(ptr, mask));
-  for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
-    // Next, tag the first and all subsequent granules with the randomly tag.
-    __arm_mte_set_tag(nptr +
-                      i);  // Tag is taken from the top bits of the argument.
-  }
-  return nptr;
-}
-
-void* TagRegionIncrementForMTE(void* ptr, size_t sz) {
-  // Increment a region's tag (MTE-enabled systems only), using the tag of the
-  // first granule.
-  if (!CheckTagRegionParameters(ptr, sz)) {
-    return nullptr;
-  }
-  // Increment ptr's tag.
-  char* nptr = reinterpret_cast<char*>(__arm_mte_increment_tag(ptr, 1u));
-  for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
-    // Apply the tag to the first granule, and all subsequent granules.
-    __arm_mte_set_tag(nptr + i);
-  }
-  return nptr;
-}
-
-void* RemaskVoidPtrForMTE(void* ptr) {
-  if (PA_LIKELY(ptr)) {
-    // Can't look up the tag for a null ptr (segfaults).
-    return __arm_mte_get_tag(ptr);
-  }
-  return nullptr;
-}
-
-void* TagRegionIncrementNoOp(void* ptr, size_t sz) {
-  // Region parameters are checked even on non-MTE systems to check the
-  // intrinsics are used correctly.
-  return ptr;
-}
-
-void* TagRegionRandomlyNoOp(void* ptr, size_t sz, uint64_t mask) {
-  // Verifies a 16-byte aligned tagging granule, size tagging granule (all
-  // architectures).
-  return ptr;
-}
-
-void* RemaskVoidPtrNoOp(void* ptr) {
-  return ptr;
-}
-#endif
-
-}  // namespace
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-using RemaskPtrInternalFn = void*(void* ptr);
-using TagMemoryRangeIncrementInternalFn = void*(void* ptr, size_t size);
-
-using TagMemoryRangeRandomlyInternalFn = void*(void* ptr,
-                                               size_t size,
-                                               uint64_t mask);
-
-extern "C" TagMemoryRangeIncrementInternalFn(
-    *ResolveTagMemoryRangeIncrement(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
-  if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
-    return TagRegionIncrementForMTE;
-  }
-  return TagRegionIncrementNoOp;
-}
-
-extern "C" TagMemoryRangeRandomlyInternalFn(
-    *ResolveTagMemoryRandomly(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
-  if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
-    return TagRegionRandomlyForMTE;
-  }
-  return TagRegionRandomlyNoOp;
-}
-
-extern "C" RemaskPtrInternalFn(
-    *ResolveRemaskPointer(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
-  if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
-    return RemaskVoidPtrForMTE;
-  }
-  return RemaskVoidPtrNoOp;
-}
-
-void* TagMemoryRangeIncrementInternal(void* ptr, size_t size)
-    __attribute__((ifunc("ResolveTagMemoryRangeIncrement")));
-void* TagMemoryRangeRandomlyInternal(void* ptr, size_t size, uint64_t mask)
-    __attribute__((ifunc("ResolveTagMemoryRandomly")));
-void* RemaskPointerInternal(void* ptr)
-    __attribute__((ifunc("ResolveRemaskPointer")));
-#endif // PA_CONFIG(HAS_MEMORY_TAGGING)
-
-TagViolationReportingMode GetMemoryTaggingModeForCurrentThread() {
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  base::CPU cpu;
-  if (!cpu.has_mte()) {
-    return TagViolationReportingMode::kUndefined;
-  }
-  int status = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
-  PA_CHECK(status >= 0);
-  // Check for Asynchronous first because ASYNC on Android sets both
-  // PR_MTE_TCF_ASYNC and PR_MTE_TCF_SYNC bits.
-  if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_ASYNC)) {
-    return TagViolationReportingMode::kAsynchronous;
-  }
-  if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_SYNC)) {
-    return TagViolationReportingMode::kSynchronous;
-  }
-  return TagViolationReportingMode::kDisabled;
-#else
-  return TagViolationReportingMode::kUndefined;
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-}
-
-}  // namespace internal
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/tagging.h b/base/allocator/partition_allocator/tagging.h
deleted file mode 100644
index 1efbda2..0000000
--- a/base/allocator/partition_allocator/tagging.h
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_TAGGING_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_TAGGING_H_
-
-// This file contains method definitions to support Armv8.5-A's memory tagging
-// extension.
-
-#include <cstddef>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "build/build_config.h"
-
-namespace partition_alloc {
-
-// Enum configures Arm's MTE extension to operate in different modes
-enum class TagViolationReportingMode {
-  // Default settings
-  kUndefined,
-  // MTE explicitly disabled.
-  kDisabled,
-  // Precise tag violation reports, higher overhead. Good for unittests
-  // and security critical threads.
-  kSynchronous,
-  // Imprecise tag violation reports (async mode). Lower overhead.
-  kAsynchronous,
-};
-
-// Changes the memory tagging mode for the calling thread.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode);
-
-namespace internal {
-
-constexpr int kMemTagGranuleSize = 16u;
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-constexpr uint64_t kPtrTagMask = 0xff00000000000000uLL;
-#else
-constexpr uint64_t kPtrTagMask = 0;
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-constexpr uint64_t kPtrUntagMask = ~kPtrTagMask;
-
-#if BUILDFLAG(IS_ANDROID)
-// Changes the memory tagging mode for all threads in the current process.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void ChangeMemoryTaggingModeForAllThreadsPerProcess(TagViolationReportingMode);
-#endif
-
-// Gets the memory tagging mode for the calling thread. Returns kUndefined if
-// MTE support is not available.
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-TagViolationReportingMode GetMemoryTaggingModeForCurrentThread();
-
-// These forward-defined functions do not really exist in tagging.cc, they're
-// resolved by the dynamic linker to MTE-capable versions on the right hardware.
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* TagMemoryRangeIncrementInternal(void* ptr, size_t size);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* TagMemoryRangeRandomlyInternal(void* ptr, size_t size, uint64_t mask);
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void* RemaskPointerInternal(void* ptr);
-#endif
-
-// Increments the tag of the memory range ptr. Useful for provable revocations
-// (e.g. free). Returns the pointer with the new tag. Ensures that the entire
-// range is set to the same tag.
-PA_ALWAYS_INLINE void* TagMemoryRangeIncrement(void* ptr, size_t size) {
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  return TagMemoryRangeIncrementInternal(ptr, size);
-#else
-  return ptr;
-#endif
-}
-
-PA_ALWAYS_INLINE void* TagMemoryRangeIncrement(uintptr_t address, size_t size) {
-  return TagMemoryRangeIncrement(reinterpret_cast<void*>(address), size);
-}
-
-// Randomly changes the tag of the ptr memory range. Useful for initial random
-// initialization. Returns the pointer with the new tag. Ensures that the entire
-// range is set to the same tag.
-PA_ALWAYS_INLINE void* TagMemoryRangeRandomly(uintptr_t address,
-                                              size_t size,
-                                              uint64_t mask = 0u) {
-  void* ptr = reinterpret_cast<void*>(address);
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  return reinterpret_cast<void*>(
-      TagMemoryRangeRandomlyInternal(ptr, size, mask));
-#else
-  return ptr;
-#endif
-}
-
-// Gets a version of ptr that's safe to dereference.
-template <typename T>
-PA_ALWAYS_INLINE T* TagPtr(T* ptr) {
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  return reinterpret_cast<T*>(RemaskPointerInternal(ptr));
-#else
-  return ptr;
-#endif
-}
-
-// Gets a version of |address| that's safe to dereference, and casts to a
-// pointer.
-PA_ALWAYS_INLINE void* TagAddr(uintptr_t address) {
-  return TagPtr(reinterpret_cast<void*>(address));
-}
-
-// Strips the tag bits off |address|.
-PA_ALWAYS_INLINE uintptr_t UntagAddr(uintptr_t address) {
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-  return address & internal::kPtrUntagMask;
-#else
-  return address;
-#endif
-}
-
-}  // namespace internal
-
-// Strips the tag bits off |ptr|.
-template <typename T>
-PA_ALWAYS_INLINE uintptr_t UntagPtr(T* ptr) {
-  return internal::UntagAddr(reinterpret_cast<uintptr_t>(ptr));
-}
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_TAGGING_H_
diff --git a/base/allocator/partition_allocator/tagging_unittest.cc b/base/allocator/partition_allocator/tagging_unittest.cc
deleted file mode 100644
index 5c25a46..0000000
--- a/base/allocator/partition_allocator/tagging_unittest.cc
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/tagging.h"
-
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace partition_alloc::internal {
-
-// Check whether we can call the tagging intrinsics safely on all architectures.
-TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeRandomlySafe) {
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteTagged),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  void* bufferp = TagMemoryRangeRandomly(buffer, 4 * kMemTagGranuleSize, 0u);
-  EXPECT_TRUE(bufferp);
-  int* buffer0 = static_cast<int*>(bufferp);
-  *buffer0 = 42;
-  EXPECT_EQ(42, *buffer0);
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeIncrementSafe) {
-  base::CPU cpu;
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteTagged),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  void* bufferp = TagMemoryRangeIncrement(buffer, 4 * kMemTagGranuleSize);
-  EXPECT_TRUE(bufferp);
-  int* buffer0 = static_cast<int*>(bufferp);
-  *buffer0 = 42;
-  EXPECT_EQ(42, *buffer0);
-  if (cpu.has_mte()) {
-    EXPECT_NE(bufferp, reinterpret_cast<void*>(buffer));
-  }
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-#if defined(ARCH_CPU_64_BITS)
-// Size / alignment constraints are only enforced on 64-bit architectures.
-TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeBadSz) {
-  base::CPU cpu;
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteTagged),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  void* bufferp =
-      TagMemoryRangeRandomly(buffer, 4 * kMemTagGranuleSize - 1, 0u);
-  if (cpu.has_mte()) {
-    EXPECT_FALSE(bufferp);
-  }
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeRandomlyNoSz) {
-  base::CPU cpu;
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteTagged),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  void* bufferp = TagMemoryRangeRandomly(buffer, 0, 0u);
-  if (cpu.has_mte()) {
-    EXPECT_FALSE(bufferp);
-  }
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeRandomlyBadAlign) {
-  base::CPU cpu;
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteTagged),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  void* bufferp =
-      TagMemoryRangeRandomly(buffer - 1, 4 * kMemTagGranuleSize, 0u);
-  if (cpu.has_mte()) {
-    EXPECT_FALSE(bufferp);
-  }
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeIncrementBadSz) {
-  base::CPU cpu;
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteTagged),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  void* bufferp = TagMemoryRangeIncrement(buffer, 4 * kMemTagGranuleSize - 1);
-  if (cpu.has_mte()) {
-    EXPECT_FALSE(bufferp);
-  }
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeIncrementNoSz) {
-  base::CPU cpu;
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteTagged),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  void* bufferp = TagMemoryRangeIncrement(buffer, 0);
-  if (cpu.has_mte()) {
-    EXPECT_FALSE(bufferp);
-  }
-  FreePages(buffer, PageAllocationGranularity());
-}
-
-TEST(PartitionAllocMemoryTaggingTest, TagMemoryRangeIncrementBadAlign) {
-  base::CPU cpu;
-  uintptr_t buffer =
-      AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
-                 PageAccessibilityConfiguration(
-                     PageAccessibilityConfiguration::kReadWriteTagged),
-                 PageTag::kChromium);
-  EXPECT_TRUE(buffer);
-  void* bufferp = TagMemoryRangeIncrement(buffer - 1, 4 * kMemTagGranuleSize);
-  if (cpu.has_mte()) {
-    EXPECT_FALSE(bufferp);
-  }
-  FreePages(buffer, PageAllocationGranularity());
-}
-#endif  // defined(ARCH_CPU_64_BITS)
-
-#if PA_CONFIG(HAS_MEMORY_TAGGING)
-#if BUILDFLAG(IS_ANDROID)
-TEST(PartitionAllocMemoryTaggingTest,
-     ChangeMemoryTaggingModeForAllThreadsPerProcess) {
-  base::CPU cpu;
-  // If the underlying platform does not support MTE, skip this test to avoid
-  // hiding failures.
-  if (!cpu.has_mte()) {
-    GTEST_SKIP();
-  }
-
-  // The mode should be set to synchronous on startup by AndroidManifest.xml
-  // for base_unittests.
-  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
-            TagViolationReportingMode::kSynchronous);
-
-  // Skip changing to kDisabled, because scudo does not support enabling MTE
-  // once it is disabled.
-  ChangeMemoryTaggingModeForAllThreadsPerProcess(
-      TagViolationReportingMode::kAsynchronous);
-  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
-            TagViolationReportingMode::kAsynchronous);
-  ChangeMemoryTaggingModeForAllThreadsPerProcess(
-      TagViolationReportingMode::kSynchronous);
-  // End with mode changed back to synchronous.
-  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
-            TagViolationReportingMode::kSynchronous);
-}
-#endif  // BUILDFLAG(IS_ANDROID)
-
-TEST(PartitionAllocMemoryTaggingTest, ChangeMemoryTaggingModeForCurrentThread) {
-  base::CPU cpu;
-  // If the underlying platform does not support MTE, skip this test to avoid
-  // hiding failures.
-  if (!cpu.has_mte()) {
-    GTEST_SKIP();
-  }
-
-  TagViolationReportingMode original_mode =
-      GetMemoryTaggingModeForCurrentThread();
-
-  ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode::kDisabled);
-  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
-            TagViolationReportingMode::kDisabled);
-  ChangeMemoryTaggingModeForCurrentThread(
-      TagViolationReportingMode::kSynchronous);
-  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
-            TagViolationReportingMode::kSynchronous);
-  ChangeMemoryTaggingModeForCurrentThread(
-      TagViolationReportingMode::kAsynchronous);
-  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
-            TagViolationReportingMode::kAsynchronous);
-  ChangeMemoryTaggingModeForCurrentThread(
-      TagViolationReportingMode::kSynchronous);
-  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
-            TagViolationReportingMode::kSynchronous);
-  ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode::kDisabled);
-  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
-            TagViolationReportingMode::kDisabled);
-  ChangeMemoryTaggingModeForCurrentThread(
-      TagViolationReportingMode::kAsynchronous);
-  EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
-            TagViolationReportingMode::kAsynchronous);
-
-  // Restore mode to original.
-  ChangeMemoryTaggingModeForCurrentThread(original_mode);
-}
-#endif  // PA_CONFIG(HAS_MEMORY_TAGGING)
-
-}  // namespace partition_alloc::internal
diff --git a/base/allocator/partition_allocator/thread_cache.cc b/base/allocator/partition_allocator/thread_cache.cc
deleted file mode 100644
index b057aeb..0000000
--- a/base/allocator/partition_allocator/thread_cache.cc
+++ /dev/null
@@ -1,811 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/thread_cache.h"
-
-#include <sys/types.h>
-
-#include <algorithm>
-#include <atomic>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc-inl.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "build/build_config.h"
-
-namespace partition_alloc {
-
-namespace {
-ThreadCacheRegistry g_instance;
-}  // namespace
-
-namespace tools {
-uintptr_t kThreadCacheNeedleArray[kThreadCacheNeedleArraySize] = {
-    kNeedle1, reinterpret_cast<uintptr_t>(&g_instance),
-#if BUILDFLAG(RECORD_ALLOC_INFO)
-    reinterpret_cast<uintptr_t>(&internal::g_allocs),
-#else
-    0,
-#endif
-    kNeedle2};
-}  // namespace tools
-
-namespace internal {
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionTlsKey g_thread_cache_key;
-#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-thread_local ThreadCache* g_thread_cache;
-#endif
-
-}  // namespace internal
-
-namespace {
-// Since |g_thread_cache_key| is shared, make sure that no more than one
-// PartitionRoot can use it.
-static std::atomic<PartitionRoot*> g_thread_cache_root;
-
-#if BUILDFLAG(IS_WIN)
-void OnDllProcessDetach() {
-  // Very late allocations do occur (see crbug.com/1159411#c7 for instance),
-  // including during CRT teardown. This is problematic for the thread cache
-  // which relies on the CRT for TLS access for instance. This cannot be
-  // mitigated inside the thread cache (since getting to it requires querying
-  // TLS), but the PartitionRoot associated wih the thread cache can be made to
-  // not use the thread cache anymore.
-  g_thread_cache_root.load(std::memory_order_relaxed)
-      ->settings.with_thread_cache = false;
-}
-#endif
-
-static bool g_thread_cache_key_created = false;
-}  // namespace
-
-constexpr internal::base::TimeDelta ThreadCacheRegistry::kMinPurgeInterval;
-constexpr internal::base::TimeDelta ThreadCacheRegistry::kMaxPurgeInterval;
-constexpr internal::base::TimeDelta ThreadCacheRegistry::kDefaultPurgeInterval;
-constexpr size_t ThreadCacheRegistry::kMinCachedMemoryForPurging;
-uint8_t ThreadCache::global_limits_[ThreadCache::kBucketCount];
-
-// Start with the normal size, not the maximum one.
-uint16_t ThreadCache::largest_active_bucket_index_ =
-    internal::BucketIndexLookup::GetIndex(ThreadCache::kDefaultSizeThreshold);
-
-// static
-ThreadCacheRegistry& ThreadCacheRegistry::Instance() {
-  return g_instance;
-}
-
-void ThreadCacheRegistry::RegisterThreadCache(ThreadCache* cache) {
-  internal::ScopedGuard scoped_locker(GetLock());
-  cache->next_ = nullptr;
-  cache->prev_ = nullptr;
-
-  ThreadCache* previous_head = list_head_;
-  list_head_ = cache;
-  cache->next_ = previous_head;
-  if (previous_head) {
-    previous_head->prev_ = cache;
-  }
-}
-
-void ThreadCacheRegistry::UnregisterThreadCache(ThreadCache* cache) {
-  internal::ScopedGuard scoped_locker(GetLock());
-  if (cache->prev_) {
-    cache->prev_->next_ = cache->next_;
-  }
-  if (cache->next_) {
-    cache->next_->prev_ = cache->prev_;
-  }
-  if (cache == list_head_) {
-    list_head_ = cache->next_;
-  }
-}
-
-void ThreadCacheRegistry::DumpStats(bool my_thread_only,
-                                    ThreadCacheStats* stats) {
-  ThreadCache::EnsureThreadSpecificDataInitialized();
-  memset(reinterpret_cast<void*>(stats), 0, sizeof(ThreadCacheStats));
-
-  internal::ScopedGuard scoped_locker(GetLock());
-  if (my_thread_only) {
-    auto* tcache = ThreadCache::Get();
-    if (!ThreadCache::IsValid(tcache)) {
-      return;
-    }
-    tcache->AccumulateStats(stats);
-  } else {
-    ThreadCache* tcache = list_head_;
-    while (tcache) {
-      // Racy, as other threads are still allocating. This is not an issue,
-      // since we are only interested in statistics. However, this means that
-      // count is not necessarily equal to hits + misses for the various types
-      // of events.
-      tcache->AccumulateStats(stats);
-      tcache = tcache->next_;
-    }
-  }
-}
-
-void ThreadCacheRegistry::PurgeAll() {
-  auto* current_thread_tcache = ThreadCache::Get();
-
-  // May take a while, don't hold the lock while purging.
-  //
-  // In most cases, the current thread is more important than other ones. For
-  // instance in renderers, it is the main thread. It is also the only thread
-  // that we can synchronously purge.
-  //
-  // The reason why we trigger the purge for this one first is that assuming
-  // that all threads are allocating memory, they will start purging
-  // concurrently in the loop below. This will then make them all contend with
-  // the main thread for the partition lock, since it is acquired/released once
-  // per bucket. By purging the main thread first, we avoid these interferences
-  // for this thread at least.
-  if (ThreadCache::IsValid(current_thread_tcache)) {
-    current_thread_tcache->Purge();
-  }
-
-  {
-    internal::ScopedGuard scoped_locker(GetLock());
-    ThreadCache* tcache = list_head_;
-    while (tcache) {
-      PA_DCHECK(ThreadCache::IsValid(tcache));
-      // Cannot purge directly, need to ask the other thread to purge "at some
-      // point".
-      // Note that this will not work if the other thread is sleeping forever.
-      // TODO(lizeb): Handle sleeping threads.
-      if (tcache != current_thread_tcache) {
-        tcache->SetShouldPurge();
-      }
-      tcache = tcache->next_;
-    }
-  }
-}
-
-void ThreadCacheRegistry::ForcePurgeAllThreadAfterForkUnsafe() {
-  internal::ScopedGuard scoped_locker(GetLock());
-  ThreadCache* tcache = list_head_;
-  while (tcache) {
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-    // Before fork(), locks are acquired in the parent process. This means that
-    // a concurrent allocation in the parent which must be filled by the central
-    // allocator (i.e. the thread cache bucket is empty) will block inside the
-    // thread cache waiting for the lock to be released.
-    //
-    // In the child process, this allocation will never complete since this
-    // thread will not be resumed. However, calling |Purge()| triggers the
-    // reentrancy guard since the parent process thread was suspended from
-    // within the thread cache.
-    // Clear the guard to prevent this from crashing.
-    tcache->is_in_thread_cache_ = false;
-#endif
-    // There is a PA_DCHECK() in code called from |Purge()| checking that thread
-    // cache memory accounting is correct. Since we are after fork() and the
-    // other threads got interrupted mid-flight, this guarantee does not hold,
-    // and we get inconsistent results.  Rather than giving up on checking this
-    // invariant in regular code, reset it here so that the PA_DCHECK()
-    // passes. See crbug.com/1216964.
-    tcache->cached_memory_ = tcache->CachedMemory();
-
-    // At this point, we should call |TryPurge|. However, due to the thread
-    // cache being possibly inconsistent at this point, this may crash. Rather
-    // than crash, we'd prefer to simply not purge, even though this may leak
-    // memory in some cases.
-    //
-    // see crbug.com/1289092 for details of the crashes.
-
-    tcache = tcache->next_;
-  }
-}
-
-void ThreadCacheRegistry::SetLargestActiveBucketIndex(
-    uint8_t largest_active_bucket_index) {
-  largest_active_bucket_index_ = largest_active_bucket_index;
-}
-
-void ThreadCacheRegistry::SetThreadCacheMultiplier(float multiplier) {
-  // Two steps:
-  // - Set the global limits, which will affect newly created threads.
-  // - Enumerate all thread caches and set the limit to the global one.
-  {
-    internal::ScopedGuard scoped_locker(GetLock());
-    ThreadCache* tcache = list_head_;
-
-    // If this is called before *any* thread cache has serviced *any*
-    // allocation, which can happen in tests, and in theory in non-test code as
-    // well.
-    if (!tcache) {
-      return;
-    }
-
-    // Setting the global limit while locked, because we need |tcache->root_|.
-    ThreadCache::SetGlobalLimits(tcache->root_, multiplier);
-
-    while (tcache) {
-      PA_DCHECK(ThreadCache::IsValid(tcache));
-      for (int index = 0; index < ThreadCache::kBucketCount; index++) {
-        // This is racy, but we don't care if the limit is enforced later, and
-        // we really want to avoid atomic instructions on the fast path.
-        tcache->buckets_[index].limit.store(ThreadCache::global_limits_[index],
-                                            std::memory_order_relaxed);
-      }
-
-      tcache = tcache->next_;
-    }
-  }
-}
-
-void ThreadCacheRegistry::RunPeriodicPurge() {
-  if (!periodic_purge_is_initialized_) {
-    ThreadCache::EnsureThreadSpecificDataInitialized();
-    periodic_purge_is_initialized_ = true;
-  }
-
-  // Summing across all threads can be slow, but is necessary. Otherwise we rely
-  // on the assumption that the current thread is a good proxy for overall
-  // allocation activity. This is not the case for all process types.
-  //
-  // Since there is no synchronization with other threads, the value is stale,
-  // which is fine.
-  size_t cached_memory_approx = 0;
-  {
-    internal::ScopedGuard scoped_locker(GetLock());
-    ThreadCache* tcache = list_head_;
-    // Can run when there is no thread cache, in which case there is nothing to
-    // do, and the task should not be rescheduled. This would typically indicate
-    // a case where the thread cache was never enabled, or got disabled.
-    if (!tcache) {
-      return;
-    }
-
-    while (tcache) {
-      cached_memory_approx += tcache->cached_memory_;
-      tcache = tcache->next_;
-    }
-  }
-
-  // If cached memory is low, this means that either memory footprint is fine,
-  // or the process is mostly idle, and not allocating much since the last
-  // purge. In this case, back off. On the other hand, if there is a lot of
-  // cached memory, make purge more frequent, but always within a set frequency
-  // range.
-  //
-  // There is a potential drawback: a process that was idle for a long time and
-  // suddenly becomes very active will take some time to go back to regularly
-  // scheduled purge with a small enough interval. This is the case for instance
-  // of a renderer moving to foreground. To mitigate that, if cached memory
-  // jumps is very large, make a greater leap to faster purging.
-  if (cached_memory_approx > 10 * kMinCachedMemoryForPurging) {
-    periodic_purge_next_interval_ =
-        std::min(kDefaultPurgeInterval, periodic_purge_next_interval_ / 2);
-  } else if (cached_memory_approx > 2 * kMinCachedMemoryForPurging) {
-    periodic_purge_next_interval_ =
-        std::max(kMinPurgeInterval, periodic_purge_next_interval_ / 2);
-  } else if (cached_memory_approx < kMinCachedMemoryForPurging) {
-    periodic_purge_next_interval_ =
-        std::min(kMaxPurgeInterval, periodic_purge_next_interval_ * 2);
-  }
-
-  // Make sure that the next interval is in the right bounds. Even though the
-  // logic above should eventually converge to a reasonable interval, if a
-  // sleeping background thread holds onto a large amount of cached memory, then
-  // |PurgeAll()| will not free any memory from it, and the first branch above
-  // can be taken repeatedly until the interval gets very small, as the amount
-  // of cached memory cannot change between calls (since we do not purge
-  // background threads, but only ask them to purge their own cache at the next
-  // allocation).
-  periodic_purge_next_interval_ = std::clamp(
-      periodic_purge_next_interval_, kMinPurgeInterval, kMaxPurgeInterval);
-
-  PurgeAll();
-}
-
-int64_t ThreadCacheRegistry::GetPeriodicPurgeNextIntervalInMicroseconds()
-    const {
-  return periodic_purge_next_interval_.InMicroseconds();
-}
-
-void ThreadCacheRegistry::ResetForTesting() {
-  periodic_purge_next_interval_ = kDefaultPurgeInterval;
-}
-
-// static
-void ThreadCache::EnsureThreadSpecificDataInitialized() {
-  // Using the registry lock to protect from concurrent initialization without
-  // adding a special-pupose lock.
-  internal::ScopedGuard scoped_locker(
-      ThreadCacheRegistry::Instance().GetLock());
-  if (g_thread_cache_key_created) {
-    return;
-  }
-
-  bool ok = internal::PartitionTlsCreate(&internal::g_thread_cache_key, Delete);
-  PA_CHECK(ok);
-  g_thread_cache_key_created = true;
-}
-
-// static
-void ThreadCache::DeleteForTesting(ThreadCache* tcache) {
-  ThreadCache::Delete(tcache);
-}
-
-// static
-void ThreadCache::SwapForTesting(PartitionRoot* root) {
-  auto* old_tcache = ThreadCache::Get();
-  g_thread_cache_root.store(nullptr, std::memory_order_relaxed);
-  if (old_tcache) {
-    ThreadCache::DeleteForTesting(old_tcache);
-  }
-  if (root) {
-    Init(root);
-    Create(root);
-  } else {
-#if BUILDFLAG(IS_WIN)
-    // OnDllProcessDetach accesses g_thread_cache_root which is nullptr now.
-    internal::PartitionTlsSetOnDllProcessDetach(nullptr);
-#endif
-  }
-}
-
-// static
-void ThreadCache::RemoveTombstoneForTesting() {
-  PA_CHECK(IsTombstone(Get()));
-  internal::PartitionTlsSet(internal::g_thread_cache_key, nullptr);
-}
-
-// static
-void ThreadCache::Init(PartitionRoot* root) {
-#if BUILDFLAG(IS_NACL)
-  static_assert(false, "PartitionAlloc isn't supported for NaCl");
-#endif
-  PA_CHECK(root->buckets[kBucketCount - 1].slot_size ==
-           ThreadCache::kLargeSizeThreshold);
-  PA_CHECK(root->buckets[largest_active_bucket_index_].slot_size ==
-           ThreadCache::kDefaultSizeThreshold);
-
-  EnsureThreadSpecificDataInitialized();
-
-  // Make sure that only one PartitionRoot wants a thread cache.
-  PartitionRoot* expected = nullptr;
-  if (!g_thread_cache_root.compare_exchange_strong(expected, root,
-                                                   std::memory_order_seq_cst,
-                                                   std::memory_order_seq_cst)) {
-    PA_CHECK(false)
-        << "Only one PartitionRoot is allowed to have a thread cache";
-  }
-
-#if BUILDFLAG(IS_WIN)
-  internal::PartitionTlsSetOnDllProcessDetach(OnDllProcessDetach);
-#endif
-
-  SetGlobalLimits(root, kDefaultMultiplier);
-}
-
-// static
-void ThreadCache::SetGlobalLimits(PartitionRoot* root, float multiplier) {
-  size_t initial_value =
-      static_cast<size_t>(kSmallBucketBaseCount) * multiplier;
-
-  for (int index = 0; index < kBucketCount; index++) {
-    const auto& root_bucket = root->buckets[index];
-    // Invalid bucket.
-    if (!root_bucket.active_slot_spans_head) {
-      global_limits_[index] = 0;
-      continue;
-    }
-
-    // Smaller allocations are more frequent, and more performance-sensitive.
-    // Cache more small objects, and fewer larger ones, to save memory.
-    size_t slot_size = root_bucket.slot_size;
-    size_t value;
-    if (slot_size <= 128) {
-      value = initial_value;
-    } else if (slot_size <= 256) {
-      value = initial_value / 2;
-    } else if (slot_size <= 512) {
-      value = initial_value / 4;
-    } else {
-      value = initial_value / 8;
-    }
-
-    // Bare minimum so that malloc() / free() in a loop will not hit the central
-    // allocator each time.
-    constexpr size_t kMinLimit = 1;
-    // |PutInBucket()| is called on a full bucket, which should not overflow.
-    constexpr size_t kMaxLimit = std::numeric_limits<uint8_t>::max() - 1;
-    global_limits_[index] =
-        static_cast<uint8_t>(std::clamp(value, kMinLimit, kMaxLimit));
-    PA_DCHECK(global_limits_[index] >= kMinLimit);
-    PA_DCHECK(global_limits_[index] <= kMaxLimit);
-  }
-}
-
-// static
-void ThreadCache::SetLargestCachedSize(size_t size) {
-  if (size > ThreadCache::kLargeSizeThreshold) {
-    size = ThreadCache::kLargeSizeThreshold;
-  }
-  largest_active_bucket_index_ = PartitionRoot::SizeToBucketIndex(
-      size, PartitionRoot::BucketDistribution::kNeutral);
-  PA_CHECK(largest_active_bucket_index_ < kBucketCount);
-  ThreadCacheRegistry::Instance().SetLargestActiveBucketIndex(
-      largest_active_bucket_index_);
-}
-
-// static
-ThreadCache* ThreadCache::Create(PartitionRoot* root) {
-  PA_CHECK(root);
-  // See comment in thread_cache.h, this is used to make sure
-  // kThreadCacheNeedleArray is kept in the final binary.
-  PA_CHECK(tools::kThreadCacheNeedleArray[0] == tools::kNeedle1);
-
-  // Placement new and RawAlloc() are used, as otherwise when this partition is
-  // the malloc() implementation, the memory allocated for the new thread cache
-  // would make this code reentrant.
-  //
-  // This also means that deallocation must use RawFreeStatic(), hence the
-  // operator delete() implementation below.
-  size_t raw_size = root->AdjustSizeForExtrasAdd(sizeof(ThreadCache));
-  size_t usable_size;
-  bool already_zeroed;
-
-  auto* bucket = root->buckets + PartitionRoot::SizeToBucketIndex(
-                                     raw_size, root->GetBucketDistribution());
-  uintptr_t buffer = root->RawAlloc(bucket, AllocFlags::kZeroFill, raw_size,
-                                    internal::PartitionPageSize(), &usable_size,
-                                    &already_zeroed);
-  ThreadCache* tcache =
-      new (internal::SlotStartAddr2Ptr(buffer)) ThreadCache(root);
-
-  // This may allocate.
-  internal::PartitionTlsSet(internal::g_thread_cache_key, tcache);
-#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
-  // |thread_local| variables with destructors cause issues on some platforms.
-  // Since we need a destructor (to empty the thread cache), we cannot use it
-  // directly. However, TLS accesses with |thread_local| are typically faster,
-  // as it can turn into a fixed offset load from a register (GS/FS on Linux
-  // x86, for instance). On Windows, saving/restoring the last error increases
-  // cost as well.
-  //
-  // To still get good performance, use |thread_local| to store a raw pointer,
-  // and rely on the platform TLS to call the destructor.
-  internal::g_thread_cache = tcache;
-#endif  // PA_CONFIG(THREAD_CACHE_FAST_TLS)
-
-  return tcache;
-}
-
-ThreadCache::ThreadCache(PartitionRoot* root)
-    : should_purge_(false),
-      root_(root),
-      thread_id_(internal::base::PlatformThread::CurrentId()),
-      next_(nullptr),
-      prev_(nullptr) {
-  ThreadCacheRegistry::Instance().RegisterThreadCache(this);
-
-  memset(&stats_, 0, sizeof(stats_));
-
-  for (int index = 0; index < kBucketCount; index++) {
-    const auto& root_bucket = root->buckets[index];
-    Bucket* tcache_bucket = &buckets_[index];
-    tcache_bucket->freelist_head = nullptr;
-    tcache_bucket->count = 0;
-    tcache_bucket->limit.store(global_limits_[index],
-                               std::memory_order_relaxed);
-
-    tcache_bucket->slot_size = root_bucket.slot_size;
-    // Invalid bucket.
-    if (!root_bucket.is_valid()) {
-      // Explicitly set this, as size computations iterate over all buckets.
-      tcache_bucket->limit.store(0, std::memory_order_relaxed);
-    }
-  }
-}
-
-ThreadCache::~ThreadCache() {
-  ThreadCacheRegistry::Instance().UnregisterThreadCache(this);
-  Purge();
-}
-
-// static
-void ThreadCache::Delete(void* tcache_ptr) {
-  auto* tcache = static_cast<ThreadCache*>(tcache_ptr);
-
-  if (!IsValid(tcache)) {
-    return;
-  }
-
-#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
-  internal::g_thread_cache = nullptr;
-#else
-  internal::PartitionTlsSet(internal::g_thread_cache_key, nullptr);
-#endif
-
-  auto* root = tcache->root_;
-  tcache->~ThreadCache();
-  // TreadCache was allocated using RawAlloc() and SlotStartAddr2Ptr(), so it
-  // shifted by extras, but is MTE-tagged.
-  root->RawFree(internal::SlotStartPtr2Addr(tcache_ptr));
-
-#if BUILDFLAG(IS_WIN)
-  // On Windows, allocations do occur during thread/process teardown, make sure
-  // they don't resurrect the thread cache.
-  //
-  // Don't MTE-tag, as it'd mess with the sentinel value.
-  //
-  // TODO(lizeb): Investigate whether this is needed on POSIX as well.
-  internal::PartitionTlsSet(internal::g_thread_cache_key,
-                            reinterpret_cast<void*>(kTombstone));
-#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
-  internal::g_thread_cache = reinterpret_cast<ThreadCache*>(kTombstone);
-#endif
-
-#endif  // BUILDFLAG(IS_WIN)
-}
-
-ThreadCache::Bucket::Bucket() {
-  limit.store(0, std::memory_order_relaxed);
-}
-
-void ThreadCache::FillBucket(size_t bucket_index) {
-  // Filling multiple elements from the central allocator at a time has several
-  // advantages:
-  // - Amortize lock acquisition
-  // - Increase hit rate
-  // - Can improve locality, as consecutive allocations from the central
-  //   allocator will likely return close addresses, especially early on.
-  //
-  // However, do not take too many items, to prevent memory bloat.
-  //
-  // Cache filling / purging policy:
-  // We aim at keeping the buckets neither empty nor full, while minimizing
-  // requests to the central allocator.
-  //
-  // For each bucket, there is a |limit| of how many cached objects there are in
-  // the bucket, so |count| < |limit| at all times.
-  // - Clearing: limit -> limit / 2
-  // - Filling: 0 -> limit / kBatchFillRatio
-  //
-  // These thresholds are somewhat arbitrary, with these considerations:
-  // (1) Batched filling should not completely fill the bucket
-  // (2) Batched clearing should not completely clear the bucket
-  // (3) Batched filling should not be too eager
-  //
-  // If (1) and (2) do not hold, we risk oscillations of bucket filling /
-  // clearing which would greatly increase calls to the central allocator. (3)
-  // tries to keep memory usage low. So clearing half of the bucket, and filling
-  // a quarter of it are sensible defaults.
-  PA_INCREMENT_COUNTER(stats_.batch_fill_count);
-
-  Bucket& bucket = buckets_[bucket_index];
-  // Some buckets may have a limit lower than |kBatchFillRatio|, but we still
-  // want to at least allocate a single slot, otherwise we wrongly return
-  // nullptr, which ends up deactivating the bucket.
-  //
-  // In these cases, we do not really batch bucket filling, but this is expected
-  // to be used for the largest buckets, where over-allocating is not advised.
-  int count = std::max(
-      1, bucket.limit.load(std::memory_order_relaxed) / kBatchFillRatio);
-
-  size_t usable_size;
-  bool is_already_zeroed;
-
-  PA_DCHECK(!root_->buckets[bucket_index].CanStoreRawSize());
-  PA_DCHECK(!root_->buckets[bucket_index].is_direct_mapped());
-
-  size_t allocated_slots = 0;
-  // Same as calling RawAlloc() |count| times, but acquires the lock only once.
-  internal::ScopedGuard guard(internal::PartitionRootLock(root_));
-  for (int i = 0; i < count; i++) {
-    // Thread cache fill should not trigger expensive operations, to not grab
-    // the lock for a long time needlessly, but also to not inflate memory
-    // usage. Indeed, without AllocFlags::kFastPathOrReturnNull, cache
-    // fill may activate a new PartitionPage, or even a new SuperPage, which is
-    // clearly not desirable.
-    //
-    // |raw_size| is set to the slot size, as we don't know it. However, it is
-    // only used for direct-mapped allocations and single-slot ones anyway,
-    // which are not handled here.
-    uintptr_t slot_start = root_->AllocFromBucket(
-        &root_->buckets[bucket_index],
-        AllocFlags::kFastPathOrReturnNull | AllocFlags::kReturnNull,
-        root_->buckets[bucket_index].slot_size /* raw_size */,
-        internal::PartitionPageSize(), &usable_size, &is_already_zeroed);
-
-    // Either the previous allocation would require a slow path allocation, or
-    // the central allocator is out of memory. If the bucket was filled with
-    // some objects, then the allocation will be handled normally. Otherwise,
-    // this goes to the central allocator, which will service the allocation,
-    // return nullptr or crash.
-    if (!slot_start) {
-      break;
-    }
-
-    allocated_slots++;
-    PutInBucket(bucket, slot_start);
-  }
-
-  cached_memory_ += allocated_slots * bucket.slot_size;
-}
-
-void ThreadCache::ClearBucket(Bucket& bucket, size_t limit) {
-  ClearBucketHelper<true>(bucket, limit);
-}
-
-template <bool crash_on_corruption>
-void ThreadCache::ClearBucketHelper(Bucket& bucket, size_t limit) {
-  // Avoids acquiring the lock needlessly.
-  if (!bucket.count || bucket.count <= limit) {
-    return;
-  }
-
-  // This serves two purposes: error checking and avoiding stalls when grabbing
-  // the lock:
-  // 1. Error checking: this is pretty clear. Since this path is taken
-  //    infrequently, and is going to walk the entire freelist anyway, its
-  //    incremental cost should be very small. Indeed, we free from the tail of
-  //    the list, so all calls here will end up walking the entire freelist, and
-  //    incurring the same amount of cache misses.
-  // 2. Avoiding stalls: If one of the freelist accesses in |FreeAfter()|
-  //    triggers a major page fault, and we are running on a low-priority
-  //    thread, we don't want the thread to be blocked while holding the lock,
-  //    causing a priority inversion.
-  if constexpr (crash_on_corruption) {
-    bucket.freelist_head->CheckFreeListForThreadCache(bucket.slot_size);
-  }
-
-  uint8_t count_before = bucket.count;
-  if (limit == 0) {
-    FreeAfter<crash_on_corruption>(bucket.freelist_head, bucket.slot_size);
-    bucket.freelist_head = nullptr;
-  } else {
-    // Free the *end* of the list, not the head, since the head contains the
-    // most recently touched memory.
-    auto* head = bucket.freelist_head;
-    size_t items = 1;  // Cannot free the freelist head.
-    while (items < limit) {
-      head = head->GetNextForThreadCache<crash_on_corruption>(bucket.slot_size);
-      items++;
-    }
-    FreeAfter<crash_on_corruption>(
-        head->GetNextForThreadCache<crash_on_corruption>(bucket.slot_size),
-        bucket.slot_size);
-    head->SetNext(nullptr);
-  }
-  bucket.count = limit;
-  uint8_t count_after = bucket.count;
-  size_t freed_memory = (count_before - count_after) * bucket.slot_size;
-  PA_DCHECK(cached_memory_ >= freed_memory);
-  cached_memory_ -= freed_memory;
-
-  PA_DCHECK(cached_memory_ == CachedMemory());
-}
-
-template <bool crash_on_corruption>
-void ThreadCache::FreeAfter(internal::PartitionFreelistEntry* head,
-                            size_t slot_size) {
-  // Acquire the lock once. Deallocation from the same bucket are likely to be
-  // hitting the same cache lines in the central allocator, and lock
-  // acquisitions can be expensive.
-  internal::ScopedGuard guard(internal::PartitionRootLock(root_));
-  while (head) {
-    uintptr_t slot_start = internal::SlotStartPtr2Addr(head);
-    head = head->GetNextForThreadCache<crash_on_corruption>(slot_size);
-    root_->RawFreeLocked(slot_start);
-  }
-}
-
-void ThreadCache::ResetForTesting() {
-  stats_.alloc_count = 0;
-  stats_.alloc_hits = 0;
-  stats_.alloc_misses = 0;
-
-  stats_.alloc_miss_empty = 0;
-  stats_.alloc_miss_too_large = 0;
-
-  stats_.cache_fill_count = 0;
-  stats_.cache_fill_hits = 0;
-  stats_.cache_fill_misses = 0;
-
-  stats_.batch_fill_count = 0;
-
-  stats_.bucket_total_memory = 0;
-  stats_.metadata_overhead = 0;
-
-  Purge();
-  PA_CHECK(cached_memory_ == 0u);
-  should_purge_.store(false, std::memory_order_relaxed);
-}
-
-size_t ThreadCache::CachedMemory() const {
-  size_t total = 0;
-  for (const Bucket& bucket : buckets_) {
-    total += bucket.count * static_cast<size_t>(bucket.slot_size);
-  }
-
-  return total;
-}
-
-void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
-  stats->alloc_count += stats_.alloc_count;
-  stats->alloc_hits += stats_.alloc_hits;
-  stats->alloc_misses += stats_.alloc_misses;
-
-  stats->alloc_miss_empty += stats_.alloc_miss_empty;
-  stats->alloc_miss_too_large += stats_.alloc_miss_too_large;
-
-  stats->cache_fill_count += stats_.cache_fill_count;
-  stats->cache_fill_hits += stats_.cache_fill_hits;
-  stats->cache_fill_misses += stats_.cache_fill_misses;
-
-  stats->batch_fill_count += stats_.batch_fill_count;
-
-#if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
-  for (size_t i = 0; i < internal::kNumBuckets + 1; i++) {
-    stats->allocs_per_bucket_[i] += stats_.allocs_per_bucket_[i];
-  }
-#endif  // PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
-
-  // cached_memory_ is not necessarily equal to |CachedMemory()| here, since
-  // this function can be called racily from another thread, to collect
-  // statistics. Hence no DCHECK_EQ(CachedMemory(), cached_memory_).
-  stats->bucket_total_memory += cached_memory_;
-
-  stats->metadata_overhead += sizeof(*this);
-}
-
-void ThreadCache::SetShouldPurge() {
-  should_purge_.store(true, std::memory_order_relaxed);
-}
-
-void ThreadCache::Purge() {
-  PA_REENTRANCY_GUARD(is_in_thread_cache_);
-  PurgeInternal();
-}
-
-void ThreadCache::TryPurge() {
-  PA_REENTRANCY_GUARD(is_in_thread_cache_);
-  PurgeInternalHelper<false>();
-}
-
-// static
-void ThreadCache::PurgeCurrentThread() {
-  auto* tcache = Get();
-  if (IsValid(tcache)) {
-    tcache->Purge();
-  }
-}
-
-void ThreadCache::PurgeInternal() {
-  PurgeInternalHelper<true>();
-}
-
-void ThreadCache::ResetPerThreadAllocationStatsForTesting() {
-  thread_alloc_stats_ = {};
-}
-
-template <bool crash_on_corruption>
-void ThreadCache::PurgeInternalHelper() {
-  should_purge_.store(false, std::memory_order_relaxed);
-  // TODO(lizeb): Investigate whether lock acquisition should be less
-  // frequent.
-  //
-  // Note: iterate over all buckets, even the inactive ones. Since
-  // |largest_active_bucket_index_| can be lowered at runtime, there may be
-  // memory already cached in the inactive buckets. They should still be
-  // purged.
-  for (auto& bucket : buckets_) {
-    ClearBucketHelper<crash_on_corruption>(bucket, 0);
-  }
-}
-
-}  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/thread_cache.h b/base/allocator/partition_allocator/thread_cache.h
deleted file mode 100644
index 9ff1ba1..0000000
--- a/base/allocator/partition_allocator/thread_cache.h
+++ /dev/null
@@ -1,646 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_
-
-#include <atomic>
-#include <cstdint>
-#include <limits>
-#include <memory>
-
-#include "base/allocator/partition_allocator/partition_alloc-inl.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_bucket_lookup.h"
-#include "base/allocator/partition_allocator/partition_freelist_entry.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-#include "base/allocator/partition_allocator/partition_stats.h"
-#include "base/allocator/partition_allocator/partition_tls.h"
-#include "build/build_config.h"
-
-#if defined(ARCH_CPU_X86_64) && BUILDFLAG(HAS_64_BIT_POINTERS)
-#include <algorithm>
-#endif
-
-namespace partition_alloc {
-
-class ThreadCache;
-
-namespace tools {
-
-// This is used from ThreadCacheInspector, which runs in a different process. It
-// scans the process memory looking for the two needles, to locate the thread
-// cache registry instance.
-//
-// These two values were chosen randomly, and in particular neither is a valid
-// pointer on most 64 bit architectures.
-#if BUILDFLAG(HAS_64_BIT_POINTERS)
-constexpr uintptr_t kNeedle1 = 0xe69e32f3ad9ea63;
-constexpr uintptr_t kNeedle2 = 0x9615ee1c5eb14caf;
-#else
-constexpr uintptr_t kNeedle1 = 0xe69e32f3;
-constexpr uintptr_t kNeedle2 = 0x9615ee1c;
-#endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-// This array contains, in order:
-// - kNeedle1
-// - &ThreadCacheRegistry::Instance()
-// - kNeedle2
-//
-// It is refererenced in the thread cache constructor to make sure it is not
-// removed by the compiler. It is also not const to make sure it ends up in
-// .data.
-constexpr size_t kThreadCacheNeedleArraySize = 4;
-extern uintptr_t kThreadCacheNeedleArray[kThreadCacheNeedleArraySize];
-
-class HeapDumper;
-class ThreadCacheInspector;
-
-}  // namespace tools
-
-namespace internal {
-
-extern PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionTlsKey g_thread_cache_key;
-
-#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
-extern PA_COMPONENT_EXPORT(
-    PARTITION_ALLOC) thread_local ThreadCache* g_thread_cache;
-#endif
-
-}  // namespace internal
-
-struct ThreadCacheLimits {
-  // When trying to conserve memory, set the thread cache limit to this.
-  static constexpr size_t kDefaultSizeThreshold = 512;
-  // 32kiB is chosen here as from local experiments, "zone" allocation in
-  // V8 is performance-sensitive, and zones can (and do) grow up to 32kiB for
-  // each individual allocation.
-  static constexpr size_t kLargeSizeThreshold = 1 << 15;
-  static_assert(kLargeSizeThreshold <= std::numeric_limits<uint16_t>::max(),
-                "");
-};
-
-// Global registry of all ThreadCache instances.
-//
-// This class cannot allocate in the (Un)registerThreadCache() functions, as
-// they are called from ThreadCache constructor, which is from within the
-// allocator. However the other members can allocate.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCacheRegistry {
- public:
-  static ThreadCacheRegistry& Instance();
-  // Do not instantiate.
-  //
-  // Several things are surprising here:
-  // - The constructor is public even though this is intended to be a singleton:
-  //   we cannot use a "static local" variable in |Instance()| as this is
-  //   reached too early during CRT initialization on Windows, meaning that
-  //   static local variables don't work (as they call into the uninitialized
-  //   runtime). To sidestep that, we use a regular global variable in the .cc,
-  //   which is fine as this object's constructor is constexpr.
-  // - Marked inline so that the chromium style plugin doesn't complain that a
-  //   "complex constructor" has an inline body. This warning is disabled when
-  //   the constructor is explicitly marked "inline". Note that this is a false
-  //   positive of the plugin, since constexpr implies inline.
-  inline constexpr ThreadCacheRegistry();
-
-  void RegisterThreadCache(ThreadCache* cache);
-  void UnregisterThreadCache(ThreadCache* cache);
-  // Prints statistics for all thread caches, or this thread's only.
-  void DumpStats(bool my_thread_only, ThreadCacheStats* stats);
-  // Purge() this thread's cache, and asks the other ones to trigger Purge() at
-  // a later point (during a deallocation).
-  void PurgeAll();
-
-  // Runs `PurgeAll` and updates the next interval which
-  // `GetPeriodicPurgeNextIntervalInMicroseconds` returns.
-  //
-  // Note that it's a caller's responsibility to invoke this member function
-  // periodically with an appropriate interval. This function does not schedule
-  // any task nor timer.
-  void RunPeriodicPurge();
-  // Returns the appropriate interval to invoke `RunPeriodicPurge` next time.
-  int64_t GetPeriodicPurgeNextIntervalInMicroseconds() const;
-
-  // Controls the thread cache size, by setting the multiplier to a value above
-  // or below |ThreadCache::kDefaultMultiplier|.
-  void SetThreadCacheMultiplier(float multiplier);
-  void SetLargestActiveBucketIndex(uint8_t largest_active_bucket_index);
-
-  static internal::Lock& GetLock() { return Instance().lock_; }
-  // Purges all thread caches *now*. This is completely thread-unsafe, and
-  // should only be called in a post-fork() handler.
-  void ForcePurgeAllThreadAfterForkUnsafe();
-
-  void ResetForTesting();
-
-  static constexpr internal::base::TimeDelta kMinPurgeInterval =
-      internal::base::Seconds(1);
-  static constexpr internal::base::TimeDelta kMaxPurgeInterval =
-      internal::base::Minutes(1);
-  static constexpr internal::base::TimeDelta kDefaultPurgeInterval =
-      2 * kMinPurgeInterval;
-  static constexpr size_t kMinCachedMemoryForPurging = 500 * 1024;
-
- private:
-  friend class tools::ThreadCacheInspector;
-  friend class tools::HeapDumper;
-
-  // Not using base::Lock as the object's constructor must be constexpr.
-  internal::Lock lock_;
-  ThreadCache* list_head_ PA_GUARDED_BY(GetLock()) = nullptr;
-  bool periodic_purge_is_initialized_ = false;
-  internal::base::TimeDelta periodic_purge_next_interval_ =
-      kDefaultPurgeInterval;
-
-  uint8_t largest_active_bucket_index_ = internal::BucketIndexLookup::GetIndex(
-      ThreadCacheLimits::kDefaultSizeThreshold);
-};
-
-constexpr ThreadCacheRegistry::ThreadCacheRegistry() = default;
-
-#if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
-#define PA_INCREMENT_COUNTER(counter) ++counter
-#else
-#define PA_INCREMENT_COUNTER(counter) \
-  do {                                \
-  } while (0)
-#endif  // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-
-namespace internal {
-
-class ReentrancyGuard {
- public:
-  explicit ReentrancyGuard(bool& flag) : flag_(flag) {
-    PA_CHECK(!flag_);
-    flag_ = true;
-  }
-
-  ~ReentrancyGuard() { flag_ = false; }
-
- private:
-  bool& flag_;
-};
-
-}  // namespace internal
-
-#define PA_REENTRANCY_GUARD(x)      \
-  internal::ReentrancyGuard guard { \
-    x                               \
-  }
-
-#else  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-#define PA_REENTRANCY_GUARD(x) \
-  do {                         \
-  } while (0)
-
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-// Per-thread cache. *Not* threadsafe, must only be accessed from a single
-// thread.
-//
-// In practice, this is easily enforced as long as only |instance| is
-// manipulated, as it is a thread_local member. As such, any
-// |ThreadCache::instance->*()| call will necessarily be done from a single
-// thread.
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
- public:
-  // Initializes the thread cache for |root|. May allocate, so should be called
-  // with the thread cache disabled on the partition side, and without the
-  // partition lock held.
-  //
-  // May only be called by a single PartitionRoot.
-  static void Init(PartitionRoot* root);
-
-  static void DeleteForTesting(ThreadCache* tcache);
-
-  // Deletes existing thread cache and creates a new one for |root|.
-  static void SwapForTesting(PartitionRoot* root);
-
-  // Removes the tombstone marker that would be returned by Get() otherwise.
-  static void RemoveTombstoneForTesting();
-
-  // Can be called several times, must be called before any ThreadCache
-  // interactions.
-  static void EnsureThreadSpecificDataInitialized();
-
-  static ThreadCache* Get() {
-#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
-    return internal::g_thread_cache;
-#else
-    // This region isn't MTE-tagged.
-    return reinterpret_cast<ThreadCache*>(
-        internal::PartitionTlsGet(internal::g_thread_cache_key));
-#endif
-  }
-
-  static bool IsValid(ThreadCache* tcache) {
-    // Do not MTE-untag, as it'd mess up the sentinel value.
-    return reinterpret_cast<uintptr_t>(tcache) & kTombstoneMask;
-  }
-
-  static bool IsTombstone(ThreadCache* tcache) {
-    // Do not MTE-untag, as it'd mess up the sentinel value.
-    return reinterpret_cast<uintptr_t>(tcache) == kTombstone;
-  }
-
-  // Create a new ThreadCache associated with |root|.
-  // Must be called without the partition locked, as this may allocate.
-  static ThreadCache* Create(PartitionRoot* root);
-
-  ~ThreadCache();
-
-  // Force placement new.
-  void* operator new(size_t) = delete;
-  void* operator new(size_t, void* buffer) { return buffer; }
-  void operator delete(void* ptr) = delete;
-  ThreadCache(const ThreadCache&) = delete;
-  ThreadCache(const ThreadCache&&) = delete;
-  ThreadCache& operator=(const ThreadCache&) = delete;
-
-  // Tries to put a slot at |slot_start| into the cache.
-  // The slot comes from the bucket at index |bucket_index| from the partition
-  // this cache is for.
-  //
-  // Returns true if the slot was put in the cache, and false otherwise. This
-  // can happen either because the cache is full or the allocation was too
-  // large.
-  PA_ALWAYS_INLINE bool MaybePutInCache(uintptr_t slot_start,
-                                        size_t bucket_index,
-                                        size_t* slot_size);
-
-  // Tries to allocate a memory slot from the cache.
-  // Returns 0 on failure.
-  //
-  // Has the same behavior as RawAlloc(), that is: no cookie nor ref-count
-  // handling. Sets |slot_size| to the allocated size upon success.
-  PA_ALWAYS_INLINE uintptr_t GetFromCache(size_t bucket_index,
-                                          size_t* slot_size);
-
-  // Asks this cache to trigger |Purge()| at a later point. Can be called from
-  // any thread.
-  void SetShouldPurge();
-  // Empties the cache.
-  // The Partition lock must *not* be held when calling this.
-  // Must be called from the thread this cache is for.
-  void Purge();
-  // |TryPurge| is the same as |Purge|, except that |TryPurge| will
-  // not crash if the thread cache is inconsistent. Normally inconsistency
-  // is a sign of a bug somewhere, so |Purge| should be preferred in most cases.
-  void TryPurge();
-  // Amount of cached memory for this thread's cache, in bytes.
-  size_t CachedMemory() const;
-  void AccumulateStats(ThreadCacheStats* stats) const;
-
-  // Purge the thread cache of the current thread, if one exists.
-  static void PurgeCurrentThread();
-
-  const ThreadAllocStats& thread_alloc_stats() const {
-    return thread_alloc_stats_;
-  }
-  size_t bucket_count_for_testing(size_t index) const {
-    return buckets_[index].count;
-  }
-
-  internal::base::PlatformThreadId thread_id() const { return thread_id_; }
-
-  // Sets the maximum size of allocations that may be cached by the thread
-  // cache. This applies to all threads. However, the maximum size is bounded by
-  // |kLargeSizeThreshold|.
-  static void SetLargestCachedSize(size_t size);
-
-  // Cumulative stats about *all* allocations made on the `root_` partition on
-  // this thread, that is not only the allocations serviced by the thread cache,
-  // but all allocations, including large and direct-mapped ones. This should in
-  // theory be split into a separate PerThread data structure, but the thread
-  // cache is the only per-thread data we have as of now.
-  //
-  // TODO(lizeb): Investigate adding a proper per-thread data structure.
-  PA_ALWAYS_INLINE void RecordAllocation(size_t size);
-  PA_ALWAYS_INLINE void RecordDeallocation(size_t size);
-  void ResetPerThreadAllocationStatsForTesting();
-
-  // Fill 1 / kBatchFillRatio * bucket.limit slots at a time.
-  static constexpr uint16_t kBatchFillRatio = 8;
-
-  // Limit for the smallest bucket will be kDefaultMultiplier *
-  // kSmallBucketBaseCount by default.
-  static constexpr float kDefaultMultiplier = 2.;
-  static constexpr uint8_t kSmallBucketBaseCount = 64;
-
-  static constexpr size_t kDefaultSizeThreshold =
-      ThreadCacheLimits::kDefaultSizeThreshold;
-  static constexpr size_t kLargeSizeThreshold =
-      ThreadCacheLimits::kLargeSizeThreshold;
-
-  const ThreadCache* prev_for_testing() const
-      PA_EXCLUSIVE_LOCKS_REQUIRED(ThreadCacheRegistry::GetLock()) {
-    return prev_;
-  }
-  const ThreadCache* next_for_testing() const
-      PA_EXCLUSIVE_LOCKS_REQUIRED(ThreadCacheRegistry::GetLock()) {
-    return next_;
-  }
-
- private:
-  friend class tools::HeapDumper;
-  friend class tools::ThreadCacheInspector;
-
-  struct Bucket {
-    internal::PartitionFreelistEntry* freelist_head = nullptr;
-    // Want to keep sizeof(Bucket) small, using small types.
-    uint8_t count = 0;
-    std::atomic<uint8_t> limit{};  // Can be changed from another thread.
-    uint16_t slot_size = 0;
-
-    Bucket();
-  };
-  static_assert(sizeof(Bucket) <= 2 * sizeof(void*), "Keep Bucket small.");
-
-  explicit ThreadCache(PartitionRoot* root);
-  static void Delete(void* thread_cache_ptr);
-
-  void PurgeInternal();
-  template <bool crash_on_corruption>
-  void PurgeInternalHelper();
-
-  // Fills a bucket from the central allocator.
-  void FillBucket(size_t bucket_index);
-  // Empties the |bucket| until there are at most |limit| objects in it.
-  template <bool crash_on_corruption>
-  void ClearBucketHelper(Bucket& bucket, size_t limit);
-  void ClearBucket(Bucket& bucket, size_t limit);
-  PA_ALWAYS_INLINE void PutInBucket(Bucket& bucket, uintptr_t slot_start);
-  void ResetForTesting();
-  // Releases the entire freelist starting at |head| to the root.
-  template <bool crash_on_corruption>
-  void FreeAfter(internal::PartitionFreelistEntry* head, size_t slot_size);
-  static void SetGlobalLimits(PartitionRoot* root, float multiplier);
-
-  static constexpr uint16_t kBucketCount =
-      internal::BucketIndexLookup::GetIndex(ThreadCache::kLargeSizeThreshold) +
-      1;
-  static_assert(
-      kBucketCount < internal::kNumBuckets,
-      "Cannot have more cached buckets than what the allocator supports");
-
-  // On some architectures, ThreadCache::Get() can be called and return
-  // something after the thread cache has been destroyed. In this case, we set
-  // it to this value, to signal that the thread is being terminated, and the
-  // thread cache should not be used.
-  //
-  // This happens in particular on Windows, during program termination.
-  //
-  // We choose 0x1 as the value as it is an invalid pointer value, since it is
-  // not aligned, and too low. Also, checking !(ptr & kTombstoneMask) checks for
-  // nullptr and kTombstone at the same time.
-  static constexpr uintptr_t kTombstone = 0x1;
-  static constexpr uintptr_t kTombstoneMask = ~kTombstone;
-
-  static uint8_t global_limits_[kBucketCount];
-  // Index of the largest active bucket. Not all processes/platforms will use
-  // all buckets, as using larger buckets increases the memory footprint.
-  //
-  // TODO(lizeb): Investigate making this per-thread rather than static, to
-  // improve locality, and open the door to per-thread settings.
-  static uint16_t largest_active_bucket_index_;
-
-  // These are at the beginning as they're accessed for each allocation.
-  uint32_t cached_memory_ = 0;
-  std::atomic<bool> should_purge_;
-  ThreadCacheStats stats_;
-  ThreadAllocStats thread_alloc_stats_;
-
-  // Buckets are quite big, though each is only 2 pointers.
-  Bucket buckets_[kBucketCount];
-
-  // Cold data below.
-  PartitionRoot* const root_;
-
-  const internal::base::PlatformThreadId thread_id_;
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  bool is_in_thread_cache_ = false;
-#endif
-
-  // Intrusive list since ThreadCacheRegistry::RegisterThreadCache() cannot
-  // allocate.
-  ThreadCache* next_ PA_GUARDED_BY(ThreadCacheRegistry::GetLock());
-  ThreadCache* prev_ PA_GUARDED_BY(ThreadCacheRegistry::GetLock());
-
-  friend class ThreadCacheRegistry;
-  friend class PartitionAllocThreadCacheTest;
-  friend class tools::ThreadCacheInspector;
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, Simple);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
-                              MultipleObjectsCachedPerBucket);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
-                              LargeAllocationsAreNotCached);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
-                              MultipleThreadCaches);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, RecordStats);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
-                              ThreadCacheRegistry);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
-                              MultipleThreadCachesAccounting);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
-                              DynamicCountPerBucket);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
-                              DynamicCountPerBucketClamping);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
-                              DynamicCountPerBucketMultipleThreads);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
-                              DynamicSizeThreshold);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
-                              DynamicSizeThresholdPurge);
-  PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, ClearFromTail);
-};
-
-PA_ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start,
-                                                   size_t bucket_index,
-                                                   size_t* slot_size) {
-  PA_REENTRANCY_GUARD(is_in_thread_cache_);
-  PA_INCREMENT_COUNTER(stats_.cache_fill_count);
-
-  if (PA_UNLIKELY(bucket_index > largest_active_bucket_index_)) {
-    PA_INCREMENT_COUNTER(stats_.cache_fill_misses);
-    return false;
-  }
-
-  auto& bucket = buckets_[bucket_index];
-
-  PA_DCHECK(bucket.count != 0 || bucket.freelist_head == nullptr);
-
-  PutInBucket(bucket, slot_start);
-  cached_memory_ += bucket.slot_size;
-  PA_INCREMENT_COUNTER(stats_.cache_fill_hits);
-
-  // Relaxed ordering: we don't care about having an up-to-date or consistent
-  // value, just want it to not change while we are using it, hence using
-  // relaxed ordering, and loading into a local variable. Without it, we are
-  // gambling that the compiler would not issue multiple loads.
-  uint8_t limit = bucket.limit.load(std::memory_order_relaxed);
-  // Batched deallocation, amortizing lock acquisitions.
-  if (PA_UNLIKELY(bucket.count > limit)) {
-    ClearBucket(bucket, limit / 2);
-  }
-
-  if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed))) {
-    PurgeInternal();
-  }
-
-  *slot_size = bucket.slot_size;
-  return true;
-}
-
-PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
-                                                     size_t* slot_size) {
-#if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
-  stats_.allocs_per_bucket_[bucket_index]++;
-#endif
-
-  PA_REENTRANCY_GUARD(is_in_thread_cache_);
-  PA_INCREMENT_COUNTER(stats_.alloc_count);
-  // Only handle "small" allocations.
-  if (PA_UNLIKELY(bucket_index > largest_active_bucket_index_)) {
-    PA_INCREMENT_COUNTER(stats_.alloc_miss_too_large);
-    PA_INCREMENT_COUNTER(stats_.alloc_misses);
-    return 0;
-  }
-
-  auto& bucket = buckets_[bucket_index];
-  if (PA_LIKELY(bucket.freelist_head)) {
-    PA_INCREMENT_COUNTER(stats_.alloc_hits);
-  } else {
-    PA_DCHECK(bucket.count == 0);
-    PA_INCREMENT_COUNTER(stats_.alloc_miss_empty);
-    PA_INCREMENT_COUNTER(stats_.alloc_misses);
-
-    FillBucket(bucket_index);
-
-    // Very unlikely, means that the central allocator is out of memory. Let it
-    // deal with it (may return 0, may crash).
-    if (PA_UNLIKELY(!bucket.freelist_head)) {
-      return 0;
-    }
-  }
-
-  PA_DCHECK(bucket.count != 0);
-  internal::PartitionFreelistEntry* entry = bucket.freelist_head;
-  // TODO(lizeb): Consider removing once crbug.com/1382658 is fixed.
-#if BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) && \
-    BUILDFLAG(HAS_64_BIT_POINTERS)
-  // x86_64 architecture now supports 57 bits of address space, as of Ice Lake
-  // for Intel. However Chrome OS systems do not ship with kernel support for
-  // it, but with 48 bits, so all canonical addresses have the upper 16 bits
-  // zeroed (17 in practice, since the upper half of address space is reserved
-  // by the kernel).
-  constexpr uintptr_t kCanonicalPointerMask = (1ULL << 48) - 1;
-  PA_CHECK(!(reinterpret_cast<uintptr_t>(entry) & ~kCanonicalPointerMask));
-#endif  // BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) &&
-        // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-  // Passes the bucket size to |GetNext()|, so that in case of freelist
-  // corruption, we know the bucket size that lead to the crash, helping to
-  // narrow down the search for culprit. |bucket| was touched just now, so this
-  // does not introduce another cache miss.
-  internal::PartitionFreelistEntry* next =
-      entry->GetNextForThreadCache<true>(bucket.slot_size);
-  PA_DCHECK(entry != next);
-  bucket.count--;
-  PA_DCHECK(bucket.count != 0 || !next);
-  bucket.freelist_head = next;
-  *slot_size = bucket.slot_size;
-
-  PA_DCHECK(cached_memory_ >= bucket.slot_size);
-  cached_memory_ -= bucket.slot_size;
-
-  return internal::SlotStartPtr2Addr(entry);
-}
-
-PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
-                                               uintptr_t slot_start) {
-#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) && \
-    BUILDFLAG(HAS_64_BIT_POINTERS)
-  // We see freelist corruption crashes happening in the wild.  These are likely
-  // due to out-of-bounds accesses in the previous slot, or to a Use-After-Free
-  // somewhere in the code.
-  //
-  // The issue is that we detect the UaF far away from the place where it
-  // happens. As a consequence, we should try to make incorrect code crash as
-  // early as possible. Poisoning memory at free() time works for UaF, but it
-  // was seen in the past to incur a high performance cost.
-  //
-  // Here, only poison the current cacheline, which we are touching anyway.
-  // TODO(lizeb): Make sure this does not hurt performance.
-
-  // Everything below requires this alignment.
-  static_assert(internal::kAlignment == 16, "");
-
-  // The pointer is always 16 bytes aligned, so its start address is always == 0
-  // % 16. Its distance to the next cacheline is
-  //   `64 - ((slot_start & 63) / 16) * 16`
-  static_assert(
-      internal::kPartitionCachelineSize == 64,
-      "The computation below assumes that cache lines are 64 bytes long.");
-  int distance_to_next_cacheline_in_16_bytes = 4 - ((slot_start >> 4) & 3);
-  int slot_size_remaining_in_16_bytes =
-#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-      // When BRP is on in the "previous slot" mode, this slot may have a BRP
-      // ref-count of the next, potentially allocated slot. Make sure we don't
-      // overwrite it.
-      (bucket.slot_size - sizeof(PartitionRefCount)) / 16;
-#else
-      bucket.slot_size / 16;
-#endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
-
-  slot_size_remaining_in_16_bytes = std::min(
-      slot_size_remaining_in_16_bytes, distance_to_next_cacheline_in_16_bytes);
-
-  static const uint32_t poison_16_bytes[4] = {0xbadbad00, 0xbadbad00,
-                                              0xbadbad00, 0xbadbad00};
-  // Give a hint to the compiler in hope it'll vectorize the loop.
-#if PA_HAS_BUILTIN(__builtin_assume_aligned)
-  void* slot_start_tagged = __builtin_assume_aligned(
-      internal::SlotStartAddr2Ptr(slot_start), internal::kAlignment);
-#else
-  void* slot_start_tagged = internal::SlotStartAddr2Ptr(slot_start);
-#endif
-  uint32_t* address_aligned = static_cast<uint32_t*>(slot_start_tagged);
-  for (int i = 0; i < slot_size_remaining_in_16_bytes; i++) {
-    // Clang will expand the memcpy to a 16-byte write (movups on x86).
-    memcpy(address_aligned, poison_16_bytes, sizeof(poison_16_bytes));
-    address_aligned += 4;
-  }
-#endif  // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) &&
-        // BUILDFLAG(HAS_64_BIT_POINTERS)
-
-  auto* entry = internal::PartitionFreelistEntry::EmplaceAndInitForThreadCache(
-      slot_start, bucket.freelist_head);
-  bucket.freelist_head = entry;
-  bucket.count++;
-}
-
-PA_ALWAYS_INLINE void ThreadCache::RecordAllocation(size_t size) {
-  thread_alloc_stats_.alloc_count++;
-  thread_alloc_stats_.alloc_total_size += size;
-}
-
-PA_ALWAYS_INLINE void ThreadCache::RecordDeallocation(size_t size) {
-  thread_alloc_stats_.dealloc_count++;
-  thread_alloc_stats_.dealloc_total_size += size;
-}
-
-}  // namespace partition_alloc
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_
diff --git a/base/allocator/partition_allocator/thread_cache_unittest.cc b/base/allocator/partition_allocator/thread_cache_unittest.cc
deleted file mode 100644
index 78d73eb..0000000
--- a/base/allocator/partition_allocator/thread_cache_unittest.cc
+++ /dev/null
@@ -1,1530 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/thread_cache.h"
-
-#include <algorithm>
-#include <atomic>
-#include <vector>
-
-#include "base/allocator/partition_allocator/extended_api.h"
-#include "base/allocator/partition_allocator/partition_address_space.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_alloc_for_testing.h"
-#include "base/allocator/partition_allocator/partition_lock.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "base/allocator/partition_allocator/tagging.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// With *SAN, PartitionAlloc is replaced in partition_alloc.h by ASAN, so we
-// cannot test the thread cache.
-//
-// Finally, the thread cache is not supported on all platforms.
-#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
-    PA_CONFIG(THREAD_CACHE_SUPPORTED)
-
-namespace partition_alloc {
-
-using BucketDistribution = PartitionRoot::BucketDistribution;
-namespace {
-
-constexpr size_t kSmallSize = 33;  // Must be large enough to fit extras.
-constexpr size_t kDefaultCountForSmallBucket =
-    ThreadCache::kSmallBucketBaseCount * ThreadCache::kDefaultMultiplier;
-constexpr size_t kFillCountForSmallBucket =
-    kDefaultCountForSmallBucket / ThreadCache::kBatchFillRatio;
-
-constexpr size_t kMediumSize = 200;
-constexpr size_t kDefaultCountForMediumBucket = kDefaultCountForSmallBucket / 2;
-constexpr size_t kFillCountForMediumBucket =
-    kDefaultCountForMediumBucket / ThreadCache::kBatchFillRatio;
-
-static_assert(kMediumSize <= ThreadCache::kDefaultSizeThreshold, "");
-
-class DeltaCounter {
- public:
-  explicit DeltaCounter(uint64_t& value)
-      : current_value_(value), initial_value_(value) {}
-  void Reset() { initial_value_ = current_value_; }
-  uint64_t Delta() const { return current_value_ - initial_value_; }
-
- private:
-  uint64_t& current_value_;
-  uint64_t initial_value_;
-};
-
-// Forbid extras, since they make finding out which bucket is used harder.
-std::unique_ptr<PartitionAllocatorForTesting> CreateAllocator() {
-  std::unique_ptr<PartitionAllocatorForTesting> allocator =
-      std::make_unique<PartitionAllocatorForTesting>(PartitionOptions {
-        .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-        .thread_cache = PartitionOptions::ThreadCache::kEnabled,
-#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-        .star_scan_quarantine = PartitionOptions::StarScanQuarantine::kAllowed,
-      });
-  allocator->root()->UncapEmptySlotSpanMemoryForTesting();
-
-  return allocator;
-}
-
-}  // namespace
-
-class PartitionAllocThreadCacheTest
-    : public ::testing::TestWithParam<PartitionRoot::BucketDistribution> {
- public:
-  PartitionAllocThreadCacheTest()
-      : allocator_(CreateAllocator()), scope_(allocator_->root()) {}
-
-  ~PartitionAllocThreadCacheTest() override {
-    ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
-
-    // Cleanup the global state so next test can recreate ThreadCache.
-    if (ThreadCache::IsTombstone(ThreadCache::Get())) {
-      ThreadCache::RemoveTombstoneForTesting();
-    }
-  }
-
- protected:
-  void SetUp() override {
-    PartitionRoot* root = allocator_->root();
-    switch (GetParam()) {
-      case BucketDistribution::kNeutral:
-        root->ResetBucketDistributionForTesting();
-        break;
-      case BucketDistribution::kDenser:
-        root->SwitchToDenserBucketDistribution();
-        break;
-    }
-
-    ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
-        ThreadCache::kDefaultMultiplier);
-    ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
-
-    // Make sure that enough slot spans have been touched, otherwise cache fill
-    // becomes unpredictable (because it doesn't take slow paths in the
-    // allocator), which is an issue for tests.
-    FillThreadCacheAndReturnIndex(kSmallSize, 1000);
-    FillThreadCacheAndReturnIndex(kMediumSize, 1000);
-
-    // There are allocations, a thread cache is created.
-    auto* tcache = root->thread_cache_for_testing();
-    ASSERT_TRUE(tcache);
-
-    ThreadCacheRegistry::Instance().ResetForTesting();
-    tcache->ResetForTesting();
-  }
-
-  void TearDown() override {
-    auto* tcache = root()->thread_cache_for_testing();
-    ASSERT_TRUE(tcache);
-    tcache->Purge();
-
-    ASSERT_EQ(root()->get_total_size_of_allocated_bytes(),
-              GetBucketSizeForThreadCache());
-  }
-
-  PartitionRoot* root() { return allocator_->root(); }
-
-  // Returns the size of the smallest bucket fitting an allocation of
-  // |sizeof(ThreadCache)| bytes.
-  size_t GetBucketSizeForThreadCache() {
-    size_t tc_bucket_index = root()->SizeToBucketIndex(
-        sizeof(ThreadCache), PartitionRoot::BucketDistribution::kNeutral);
-    auto* tc_bucket = &root()->buckets[tc_bucket_index];
-    return tc_bucket->slot_size;
-  }
-
-  static size_t SizeToIndex(size_t size) {
-    return PartitionRoot::SizeToBucketIndex(size, GetParam());
-  }
-
-  size_t FillThreadCacheAndReturnIndex(size_t raw_size, size_t count = 1) {
-    uint16_t bucket_index = SizeToIndex(raw_size);
-    std::vector<void*> allocated_data;
-
-    for (size_t i = 0; i < count; ++i) {
-      allocated_data.push_back(
-          root()->Alloc(root()->AdjustSizeForExtrasSubtract(raw_size), ""));
-    }
-    for (void* ptr : allocated_data) {
-      root()->Free(ptr);
-    }
-
-    return bucket_index;
-  }
-
-  void FillThreadCacheWithMemory(size_t target_cached_memory) {
-    for (int batch : {1, 2, 4, 8, 16}) {
-      for (size_t raw_size = root()->AdjustSizeForExtrasAdd(1);
-           raw_size <= ThreadCache::kLargeSizeThreshold; raw_size++) {
-        FillThreadCacheAndReturnIndex(raw_size, batch);
-
-        if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
-          return;
-        }
-      }
-    }
-
-    ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
-  }
-
-  std::unique_ptr<PartitionAllocatorForTesting> allocator_;
-  internal::ThreadCacheProcessScopeForTesting scope_;
-};
-
-INSTANTIATE_TEST_SUITE_P(AlternateBucketDistribution,
-                         PartitionAllocThreadCacheTest,
-                         ::testing::Values(BucketDistribution::kNeutral,
-                                           BucketDistribution::kDenser));
-
-TEST_P(PartitionAllocThreadCacheTest, Simple) {
-  // There is a cache.
-  auto* tcache = root()->thread_cache_for_testing();
-  EXPECT_TRUE(tcache);
-  DeltaCounter batch_fill_counter{tcache->stats_.batch_fill_count};
-
-  void* ptr =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
-  ASSERT_TRUE(ptr);
-
-  uint16_t index = SizeToIndex(kSmallSize);
-  EXPECT_EQ(kFillCountForSmallBucket - 1,
-            tcache->bucket_count_for_testing(index));
-
-  root()->Free(ptr);
-  // Freeing fills the thread cache.
-  EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
-
-  void* ptr2 =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
-  // MTE-untag, because Free() changes tag.
-  EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
-  // Allocated from the thread cache.
-  EXPECT_EQ(kFillCountForSmallBucket - 1,
-            tcache->bucket_count_for_testing(index));
-
-  EXPECT_EQ(1u, batch_fill_counter.Delta());
-
-  root()->Free(ptr2);
-}
-
-TEST_P(PartitionAllocThreadCacheTest, InexactSizeMatch) {
-  void* ptr =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
-  ASSERT_TRUE(ptr);
-
-  // There is a cache.
-  auto* tcache = root()->thread_cache_for_testing();
-  EXPECT_TRUE(tcache);
-
-  uint16_t index = SizeToIndex(kSmallSize);
-  EXPECT_EQ(kFillCountForSmallBucket - 1,
-            tcache->bucket_count_for_testing(index));
-
-  root()->Free(ptr);
-  // Freeing fills the thread cache.
-  EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
-
-  void* ptr2 =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize + 1), "");
-  // MTE-untag, because Free() changes tag.
-  EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
-  // Allocated from the thread cache.
-  EXPECT_EQ(kFillCountForSmallBucket - 1,
-            tcache->bucket_count_for_testing(index));
-  root()->Free(ptr2);
-}
-
-TEST_P(PartitionAllocThreadCacheTest, MultipleObjectsCachedPerBucket) {
-  auto* tcache = root()->thread_cache_for_testing();
-  DeltaCounter batch_fill_counter{tcache->stats_.batch_fill_count};
-  size_t bucket_index =
-      FillThreadCacheAndReturnIndex(kMediumSize, kFillCountForMediumBucket + 2);
-  EXPECT_EQ(2 * kFillCountForMediumBucket,
-            tcache->bucket_count_for_testing(bucket_index));
-  // 2 batches, since there were more than |kFillCountForMediumBucket|
-  // allocations.
-  EXPECT_EQ(2u, batch_fill_counter.Delta());
-}
-
-TEST_P(PartitionAllocThreadCacheTest, ObjectsCachedCountIsLimited) {
-  size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, 1000);
-  auto* tcache = root()->thread_cache_for_testing();
-  EXPECT_LT(tcache->bucket_count_for_testing(bucket_index), 1000u);
-}
-
-TEST_P(PartitionAllocThreadCacheTest, Purge) {
-  size_t allocations = 10;
-  size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, allocations);
-  auto* tcache = root()->thread_cache_for_testing();
-  EXPECT_EQ(
-      (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket,
-      tcache->bucket_count_for_testing(bucket_index));
-  tcache->Purge();
-  EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
-}
-
-TEST_P(PartitionAllocThreadCacheTest, NoCrossPartitionCache) {
-  PartitionAllocatorForTesting allocator(PartitionOptions{
-      .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-      .star_scan_quarantine = PartitionOptions::StarScanQuarantine::kAllowed,
-  });
-
-  size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
-  void* ptr = allocator.root()->Alloc(
-      allocator.root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
-  ASSERT_TRUE(ptr);
-
-  auto* tcache = root()->thread_cache_for_testing();
-  EXPECT_EQ(kFillCountForSmallBucket,
-            tcache->bucket_count_for_testing(bucket_index));
-
-  allocator.root()->Free(ptr);
-  EXPECT_EQ(kFillCountForSmallBucket,
-            tcache->bucket_count_for_testing(bucket_index));
-}
-
-// Required to record hits and misses.
-#if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
-TEST_P(PartitionAllocThreadCacheTest, LargeAllocationsAreNotCached) {
-  auto* tcache = root()->thread_cache_for_testing();
-  DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
-  DeltaCounter alloc_miss_too_large_counter{
-      tcache->stats_.alloc_miss_too_large};
-  DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
-  DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
-
-  FillThreadCacheAndReturnIndex(100 * 1024);
-  tcache = root()->thread_cache_for_testing();
-  EXPECT_EQ(1u, alloc_miss_counter.Delta());
-  EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
-  EXPECT_EQ(1u, cache_fill_counter.Delta());
-  EXPECT_EQ(1u, cache_fill_misses_counter.Delta());
-}
-#endif  // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
-
-TEST_P(PartitionAllocThreadCacheTest, DirectMappedAllocationsAreNotCached) {
-  FillThreadCacheAndReturnIndex(1024 * 1024);
-  // The line above would crash due to out of bounds access if this wasn't
-  // properly handled.
-}
-
-// This tests that Realloc properly handles bookkeeping, specifically the path
-// that reallocates in place.
-TEST_P(PartitionAllocThreadCacheTest, DirectMappedReallocMetrics) {
-  root()->ResetBookkeepingForTesting();
-
-  size_t expected_allocated_size = root()->get_total_size_of_allocated_bytes();
-
-  EXPECT_EQ(expected_allocated_size,
-            root()->get_total_size_of_allocated_bytes());
-  EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
-
-  void* ptr = root()->Alloc(
-      root()->AdjustSizeForExtrasSubtract(10 * internal::kMaxBucketed), "");
-
-  EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
-            root()->get_total_size_of_allocated_bytes());
-
-  void* ptr2 = root()->Realloc(
-      ptr, root()->AdjustSizeForExtrasSubtract(9 * internal::kMaxBucketed), "");
-
-  ASSERT_EQ(ptr, ptr2);
-  EXPECT_EQ(expected_allocated_size + 9 * internal::kMaxBucketed,
-            root()->get_total_size_of_allocated_bytes());
-
-  ptr2 = root()->Realloc(
-      ptr, root()->AdjustSizeForExtrasSubtract(10 * internal::kMaxBucketed),
-      "");
-
-  ASSERT_EQ(ptr, ptr2);
-  EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
-            root()->get_total_size_of_allocated_bytes());
-
-  root()->Free(ptr);
-}
-
-namespace {
-
-size_t FillThreadCacheAndReturnIndex(PartitionRoot* root,
-                                     size_t size,
-                                     BucketDistribution bucket_distribution,
-                                     size_t count = 1) {
-  uint16_t bucket_index =
-      PartitionRoot::SizeToBucketIndex(size, bucket_distribution);
-  std::vector<void*> allocated_data;
-
-  for (size_t i = 0; i < count; ++i) {
-    allocated_data.push_back(
-        root->Alloc(root->AdjustSizeForExtrasSubtract(size), ""));
-  }
-  for (void* ptr : allocated_data) {
-    root->Free(ptr);
-  }
-
-  return bucket_index;
-}
-
-// TODO(1151236): To remove callback from partition allocator's DEPS,
-// rewrite the tests without BindLambdaForTesting and RepeatingClosure.
-// However this makes a little annoying to add more tests using their
-// own threads. Need to support an easier way to implement tests using
-// PlatformThreadForTesting::Create().
-class ThreadDelegateForMultipleThreadCaches
-    : public internal::base::PlatformThreadForTesting::Delegate {
- public:
-  ThreadDelegateForMultipleThreadCaches(ThreadCache* parent_thread_cache,
-                                        PartitionRoot* root,
-                                        BucketDistribution bucket_distribution)
-      : parent_thread_tcache_(parent_thread_cache),
-        root_(root),
-        bucket_distribution_(bucket_distribution) {}
-
-  void ThreadMain() override {
-    EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
-    FillThreadCacheAndReturnIndex(root_, kMediumSize, bucket_distribution_);
-    auto* tcache = root_->thread_cache_for_testing();
-    EXPECT_TRUE(tcache);
-
-    EXPECT_NE(parent_thread_tcache_, tcache);
-  }
-
- private:
-  ThreadCache* parent_thread_tcache_ = nullptr;
-  PartitionRoot* root_ = nullptr;
-  PartitionRoot::BucketDistribution bucket_distribution_;
-};
-
-}  // namespace
-
-TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCaches) {
-  FillThreadCacheAndReturnIndex(kMediumSize);
-  auto* parent_thread_tcache = root()->thread_cache_for_testing();
-  ASSERT_TRUE(parent_thread_tcache);
-
-  ThreadDelegateForMultipleThreadCaches delegate(parent_thread_tcache, root(),
-                                                 GetParam());
-
-  internal::base::PlatformThreadHandle thread_handle;
-  internal::base::PlatformThreadForTesting::Create(0, &delegate,
-                                                   &thread_handle);
-  internal::base::PlatformThreadForTesting::Join(thread_handle);
-}
-
-namespace {
-
-class ThreadDelegateForThreadCacheReclaimedWhenThreadExits
-    : public internal::base::PlatformThreadForTesting::Delegate {
- public:
-  ThreadDelegateForThreadCacheReclaimedWhenThreadExits(PartitionRoot* root,
-                                                       void*& other_thread_ptr)
-      : root_(root), other_thread_ptr_(other_thread_ptr) {}
-
-  void ThreadMain() override {
-    EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
-    other_thread_ptr_ =
-        root_->Alloc(root_->AdjustSizeForExtrasSubtract(kMediumSize), "");
-    root_->Free(other_thread_ptr_);
-    // |other_thread_ptr| is now in the thread cache.
-  }
-
- private:
-  PartitionRoot* root_ = nullptr;
-  void*& other_thread_ptr_;
-};
-
-}  // namespace
-
-TEST_P(PartitionAllocThreadCacheTest, ThreadCacheReclaimedWhenThreadExits) {
-  // Make sure that there is always at least one object allocated in the test
-  // bucket, so that the PartitionPage is no reclaimed.
-  //
-  // Allocate enough objects to force a cache fill at the next allocation.
-  std::vector<void*> tmp;
-  for (size_t i = 0; i < kDefaultCountForMediumBucket / 4; i++) {
-    tmp.push_back(
-        root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), ""));
-  }
-
-  void* other_thread_ptr = nullptr;
-  ThreadDelegateForThreadCacheReclaimedWhenThreadExits delegate(
-      root(), other_thread_ptr);
-
-  internal::base::PlatformThreadHandle thread_handle;
-  internal::base::PlatformThreadForTesting::Create(0, &delegate,
-                                                   &thread_handle);
-  internal::base::PlatformThreadForTesting::Join(thread_handle);
-
-  void* this_thread_ptr =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
-  // |other_thread_ptr| was returned to the central allocator, and is returned
-  // here, as it comes from the freelist.
-  EXPECT_EQ(UntagPtr(this_thread_ptr), UntagPtr(other_thread_ptr));
-  root()->Free(other_thread_ptr);
-
-  for (void* ptr : tmp) {
-    root()->Free(ptr);
-  }
-}
-
-namespace {
-
-class ThreadDelegateForThreadCacheRegistry
-    : public internal::base::PlatformThreadForTesting::Delegate {
- public:
-  ThreadDelegateForThreadCacheRegistry(ThreadCache* parent_thread_cache,
-                                       PartitionRoot* root,
-                                       BucketDistribution bucket_distribution)
-      : parent_thread_tcache_(parent_thread_cache),
-        root_(root),
-        bucket_distribution_(bucket_distribution) {}
-
-  void ThreadMain() override {
-    EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
-    FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_);
-    auto* tcache = root_->thread_cache_for_testing();
-    EXPECT_TRUE(tcache);
-
-    internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
-    EXPECT_EQ(tcache->prev_for_testing(), nullptr);
-    EXPECT_EQ(tcache->next_for_testing(), parent_thread_tcache_);
-  }
-
- private:
-  ThreadCache* parent_thread_tcache_ = nullptr;
-  PartitionRoot* root_ = nullptr;
-  BucketDistribution bucket_distribution_;
-};
-
-}  // namespace
-
-TEST_P(PartitionAllocThreadCacheTest, ThreadCacheRegistry) {
-  auto* parent_thread_tcache = root()->thread_cache_for_testing();
-  ASSERT_TRUE(parent_thread_tcache);
-
-#if !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) ||   \
-      BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)) && \
-    BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  // iOS and MacOS 15 create worker threads internally(start_wqthread).
-  // So thread caches are created for the worker threads, because the threads
-  // allocate memory for initialization (_dispatch_calloc is invoked).
-  // We cannot assume that there is only 1 thread cache here.
-
-  // Regarding Linux, ChromeOS and Android, some other tests may create
-  // non-joinable threads. E.g. FilePathWatcherTest will create
-  // non-joinable thread at InotifyReader::StartThread(). The thread will
-  // be still running after the tests are finished, and will break
-  // an assumption that there exists only main thread here.
-  {
-    internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
-    EXPECT_EQ(parent_thread_tcache->prev_, nullptr);
-    EXPECT_EQ(parent_thread_tcache->next_, nullptr);
-  }
-#endif
-
-  ThreadDelegateForThreadCacheRegistry delegate(parent_thread_tcache, root(),
-                                                GetParam());
-
-  internal::base::PlatformThreadHandle thread_handle;
-  internal::base::PlatformThreadForTesting::Create(0, &delegate,
-                                                   &thread_handle);
-  internal::base::PlatformThreadForTesting::Join(thread_handle);
-
-#if !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) ||   \
-      BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)) && \
-    BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
-  EXPECT_EQ(parent_thread_tcache->prev_, nullptr);
-  EXPECT_EQ(parent_thread_tcache->next_, nullptr);
-#endif
-}
-
-#if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
-TEST_P(PartitionAllocThreadCacheTest, RecordStats) {
-  auto* tcache = root()->thread_cache_for_testing();
-  DeltaCounter alloc_counter{tcache->stats_.alloc_count};
-  DeltaCounter alloc_hits_counter{tcache->stats_.alloc_hits};
-  DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
-
-  DeltaCounter alloc_miss_empty_counter{tcache->stats_.alloc_miss_empty};
-
-  DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
-  DeltaCounter cache_fill_hits_counter{tcache->stats_.cache_fill_hits};
-  DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
-
-  // Cache has been purged, first allocation is a miss.
-  void* data =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
-  EXPECT_EQ(1u, alloc_counter.Delta());
-  EXPECT_EQ(1u, alloc_miss_counter.Delta());
-  EXPECT_EQ(0u, alloc_hits_counter.Delta());
-
-  // Cache fill worked.
-  root()->Free(data);
-  EXPECT_EQ(1u, cache_fill_counter.Delta());
-  EXPECT_EQ(1u, cache_fill_hits_counter.Delta());
-  EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
-
-  tcache->Purge();
-  cache_fill_counter.Reset();
-  // Buckets are never full, fill always succeeds.
-  size_t allocations = 10;
-  size_t bucket_index = FillThreadCacheAndReturnIndex(
-      kMediumSize, kDefaultCountForMediumBucket + allocations);
-  EXPECT_EQ(kDefaultCountForMediumBucket + allocations,
-            cache_fill_counter.Delta());
-  EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
-
-  // Memory footprint.
-  ThreadCacheStats stats;
-  ThreadCacheRegistry::Instance().DumpStats(true, &stats);
-  // Bucket was cleared (set to kDefaultCountForMediumBucket / 2) after going
-  // above the limit (-1), then refilled by batches (1 + floor(allocations /
-  // kFillCountForSmallBucket) times).
-  size_t expected_count =
-      kDefaultCountForMediumBucket / 2 - 1 +
-      (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket;
-  EXPECT_EQ(root()->buckets[bucket_index].slot_size * expected_count,
-            stats.bucket_total_memory);
-  EXPECT_EQ(sizeof(ThreadCache), stats.metadata_overhead);
-}
-
-namespace {
-
-class ThreadDelegateForMultipleThreadCachesAccounting
-    : public internal::base::PlatformThreadForTesting::Delegate {
- public:
-  ThreadDelegateForMultipleThreadCachesAccounting(
-      PartitionRoot* root,
-      const ThreadCacheStats& wqthread_stats,
-      int alloc_count,
-      BucketDistribution bucket_distribution)
-      : root_(root),
-        bucket_distribution_(bucket_distribution),
-        wqthread_stats_(wqthread_stats),
-        alloc_count_(alloc_count) {}
-
-  void ThreadMain() override {
-    EXPECT_FALSE(root_->thread_cache_for_testing());  // No allocations yet.
-    size_t bucket_index =
-        FillThreadCacheAndReturnIndex(root_, kMediumSize, bucket_distribution_);
-
-    ThreadCacheStats stats;
-    ThreadCacheRegistry::Instance().DumpStats(false, &stats);
-    // 2* for this thread and the parent one.
-    EXPECT_EQ(
-        2 * root_->buckets[bucket_index].slot_size * kFillCountForMediumBucket,
-        stats.bucket_total_memory - wqthread_stats_.bucket_total_memory);
-    EXPECT_EQ(2 * sizeof(ThreadCache),
-              stats.metadata_overhead - wqthread_stats_.metadata_overhead);
-
-    ThreadCacheStats this_thread_cache_stats{};
-    root_->thread_cache_for_testing()->AccumulateStats(
-        &this_thread_cache_stats);
-    EXPECT_EQ(alloc_count_ + this_thread_cache_stats.alloc_count,
-              stats.alloc_count - wqthread_stats_.alloc_count);
-  }
-
- private:
-  PartitionRoot* root_ = nullptr;
-  BucketDistribution bucket_distribution_;
-  const ThreadCacheStats wqthread_stats_;
-  const int alloc_count_;
-};
-
-}  // namespace
-
-TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCachesAccounting) {
-  ThreadCacheStats wqthread_stats{0};
-#if !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) ||   \
-      BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)) && \
-    BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  {
-    // iOS and MacOS 15 create worker threads internally(start_wqthread).
-    // So thread caches are created for the worker threads, because the threads
-    // allocate memory for initialization (_dispatch_calloc is invoked).
-    // We need to count worker threads created by iOS and Mac system.
-
-    // Regarding Linux, ChromeOS and Android, some other tests may create
-    // non-joinable threads. E.g. FilePathWatcherTest will create
-    // non-joinable thread at InotifyReader::StartThread(). The thread will
-    // be still running after the tests are finished. We need to count
-    // the joinable threads here.
-    ThreadCacheRegistry::Instance().DumpStats(false, &wqthread_stats);
-
-    // Remove this thread's thread cache stats from wqthread_stats.
-    ThreadCacheStats this_stats;
-    ThreadCacheRegistry::Instance().DumpStats(true, &this_stats);
-
-    wqthread_stats.alloc_count -= this_stats.alloc_count;
-    wqthread_stats.metadata_overhead -= this_stats.metadata_overhead;
-    wqthread_stats.bucket_total_memory -= this_stats.bucket_total_memory;
-  }
-#endif
-  FillThreadCacheAndReturnIndex(kMediumSize);
-  uint64_t alloc_count = root()->thread_cache_for_testing()->stats_.alloc_count;
-
-  ThreadDelegateForMultipleThreadCachesAccounting delegate(
-      root(), wqthread_stats, alloc_count, GetParam());
-
-  internal::base::PlatformThreadHandle thread_handle;
-  internal::base::PlatformThreadForTesting::Create(0, &delegate,
-                                                   &thread_handle);
-  internal::base::PlatformThreadForTesting::Join(thread_handle);
-}
-
-#endif  // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
-
-// TODO(https://crbug.com/1287799): Flaky on IOS.
-#if BUILDFLAG(IS_IOS)
-#define MAYBE_PurgeAll DISABLED_PurgeAll
-#else
-#define MAYBE_PurgeAll PurgeAll
-#endif
-
-namespace {
-
-class ThreadDelegateForPurgeAll
-    : public internal::base::PlatformThreadForTesting::Delegate {
- public:
-  ThreadDelegateForPurgeAll(PartitionRoot* root,
-                            ThreadCache*& other_thread_tcache,
-                            std::atomic<bool>& other_thread_started,
-                            std::atomic<bool>& purge_called,
-                            int bucket_index,
-                            BucketDistribution bucket_distribution)
-      : root_(root),
-        other_thread_tcache_(other_thread_tcache),
-        other_thread_started_(other_thread_started),
-        purge_called_(purge_called),
-        bucket_index_(bucket_index),
-        bucket_distribution_(bucket_distribution) {}
-
-  void ThreadMain() override PA_NO_THREAD_SAFETY_ANALYSIS {
-    FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_);
-    other_thread_tcache_ = root_->thread_cache_for_testing();
-
-    other_thread_started_.store(true, std::memory_order_release);
-    while (!purge_called_.load(std::memory_order_acquire)) {
-    }
-
-    // Purge() was not triggered from the other thread.
-    EXPECT_EQ(kFillCountForSmallBucket,
-              other_thread_tcache_->bucket_count_for_testing(bucket_index_));
-    // Allocations do not trigger Purge().
-    void* data =
-        root_->Alloc(root_->AdjustSizeForExtrasSubtract(kSmallSize), "");
-    EXPECT_EQ(kFillCountForSmallBucket - 1,
-              other_thread_tcache_->bucket_count_for_testing(bucket_index_));
-    // But deallocations do.
-    root_->Free(data);
-    EXPECT_EQ(0u,
-              other_thread_tcache_->bucket_count_for_testing(bucket_index_));
-  }
-
- private:
-  PartitionRoot* root_ = nullptr;
-  ThreadCache*& other_thread_tcache_;
-  std::atomic<bool>& other_thread_started_;
-  std::atomic<bool>& purge_called_;
-  const int bucket_index_;
-  BucketDistribution bucket_distribution_;
-};
-
-}  // namespace
-
-TEST_P(PartitionAllocThreadCacheTest, MAYBE_PurgeAll)
-PA_NO_THREAD_SAFETY_ANALYSIS {
-  std::atomic<bool> other_thread_started{false};
-  std::atomic<bool> purge_called{false};
-
-  size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
-  ThreadCache* this_thread_tcache = root()->thread_cache_for_testing();
-  ThreadCache* other_thread_tcache = nullptr;
-
-  ThreadDelegateForPurgeAll delegate(root(), other_thread_tcache,
-                                     other_thread_started, purge_called,
-                                     bucket_index, GetParam());
-  internal::base::PlatformThreadHandle thread_handle;
-  internal::base::PlatformThreadForTesting::Create(0, &delegate,
-                                                   &thread_handle);
-
-  while (!other_thread_started.load(std::memory_order_acquire)) {
-  }
-
-  EXPECT_EQ(kFillCountForSmallBucket,
-            this_thread_tcache->bucket_count_for_testing(bucket_index));
-  EXPECT_EQ(kFillCountForSmallBucket,
-            other_thread_tcache->bucket_count_for_testing(bucket_index));
-
-  ThreadCacheRegistry::Instance().PurgeAll();
-  // This thread is synchronously purged.
-  EXPECT_EQ(0u, this_thread_tcache->bucket_count_for_testing(bucket_index));
-  // Not the other one.
-  EXPECT_EQ(kFillCountForSmallBucket,
-            other_thread_tcache->bucket_count_for_testing(bucket_index));
-
-  purge_called.store(true, std::memory_order_release);
-  internal::base::PlatformThreadForTesting::Join(thread_handle);
-}
-
-TEST_P(PartitionAllocThreadCacheTest, PeriodicPurge) {
-  auto& registry = ThreadCacheRegistry::Instance();
-  auto NextInterval = [&registry]() {
-    return internal::base::Microseconds(
-        registry.GetPeriodicPurgeNextIntervalInMicroseconds());
-  };
-
-  EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
-
-  // Small amount of memory, the period gets longer.
-  auto* tcache = ThreadCache::Get();
-  ASSERT_LT(tcache->CachedMemory(),
-            ThreadCacheRegistry::kMinCachedMemoryForPurging);
-  registry.RunPeriodicPurge();
-  EXPECT_EQ(NextInterval(), 2 * ThreadCacheRegistry::kDefaultPurgeInterval);
-  registry.RunPeriodicPurge();
-  EXPECT_EQ(NextInterval(), 4 * ThreadCacheRegistry::kDefaultPurgeInterval);
-
-  // Check that the purge interval is clamped at the maximum value.
-  while (NextInterval() < ThreadCacheRegistry::kMaxPurgeInterval) {
-    registry.RunPeriodicPurge();
-  }
-  registry.RunPeriodicPurge();
-
-  // Not enough memory to decrease the interval.
-  FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging +
-                            1);
-  registry.RunPeriodicPurge();
-  EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
-
-  FillThreadCacheWithMemory(
-      2 * ThreadCacheRegistry::kMinCachedMemoryForPurging + 1);
-  registry.RunPeriodicPurge();
-  EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval / 2);
-
-  // Enough memory, interval doesn't change.
-  FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging);
-  registry.RunPeriodicPurge();
-  EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval / 2);
-
-  // No cached memory, increase the interval.
-  registry.RunPeriodicPurge();
-  EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
-
-  // Cannot test the very large size with only one thread, this is tested below
-  // in the multiple threads test.
-}
-
-namespace {
-
-void FillThreadCacheWithMemory(PartitionRoot* root,
-                               size_t target_cached_memory,
-                               BucketDistribution bucket_distribution) {
-  for (int batch : {1, 2, 4, 8, 16}) {
-    for (size_t allocation_size = 1;
-         allocation_size <= ThreadCache::kLargeSizeThreshold;
-         allocation_size++) {
-      FillThreadCacheAndReturnIndex(
-          root, root->AdjustSizeForExtrasAdd(allocation_size),
-          bucket_distribution, batch);
-
-      if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
-        return;
-      }
-    }
-  }
-
-  ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
-}
-
-class ThreadDelegateForPeriodicPurgeSumsOverAllThreads
-    : public internal::base::PlatformThreadForTesting::Delegate {
- public:
-  ThreadDelegateForPeriodicPurgeSumsOverAllThreads(
-      PartitionRoot* root,
-      std::atomic<int>& allocations_done,
-      std::atomic<bool>& can_finish,
-      BucketDistribution bucket_distribution)
-      : root_(root),
-        allocations_done_(allocations_done),
-        can_finish_(can_finish),
-        bucket_distribution_(bucket_distribution) {}
-
-  void ThreadMain() override {
-    FillThreadCacheWithMemory(
-        root_, 5 * ThreadCacheRegistry::kMinCachedMemoryForPurging,
-        bucket_distribution_);
-    allocations_done_.fetch_add(1, std::memory_order_release);
-
-    // This thread needs to be alive when the next periodic purge task runs.
-    while (!can_finish_.load(std::memory_order_acquire)) {
-    }
-  }
-
- private:
-  PartitionRoot* root_ = nullptr;
-  std::atomic<int>& allocations_done_;
-  std::atomic<bool>& can_finish_;
-  BucketDistribution bucket_distribution_;
-};
-
-}  // namespace
-
-// Disabled due to flakiness: crbug.com/1220371
-TEST_P(PartitionAllocThreadCacheTest,
-       DISABLED_PeriodicPurgeSumsOverAllThreads) {
-  auto& registry = ThreadCacheRegistry::Instance();
-  auto NextInterval = [&registry]() {
-    return internal::base::Microseconds(
-        registry.GetPeriodicPurgeNextIntervalInMicroseconds());
-  };
-  EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
-
-  // Small amount of memory, the period gets longer.
-  auto* tcache = ThreadCache::Get();
-  ASSERT_LT(tcache->CachedMemory(),
-            ThreadCacheRegistry::kMinCachedMemoryForPurging);
-  registry.RunPeriodicPurge();
-  EXPECT_EQ(NextInterval(), 2 * ThreadCacheRegistry::kDefaultPurgeInterval);
-  registry.RunPeriodicPurge();
-  EXPECT_EQ(NextInterval(), 4 * ThreadCacheRegistry::kDefaultPurgeInterval);
-
-  // Check that the purge interval is clamped at the maximum value.
-  while (NextInterval() < ThreadCacheRegistry::kMaxPurgeInterval) {
-    registry.RunPeriodicPurge();
-  }
-  registry.RunPeriodicPurge();
-
-  // Not enough memory on this thread to decrease the interval.
-  FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging /
-                            2);
-  registry.RunPeriodicPurge();
-  EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
-
-  std::atomic<int> allocations_done{0};
-  std::atomic<bool> can_finish{false};
-  ThreadDelegateForPeriodicPurgeSumsOverAllThreads delegate(
-      root(), allocations_done, can_finish, GetParam());
-
-  internal::base::PlatformThreadHandle thread_handle;
-  internal::base::PlatformThreadForTesting::Create(0, &delegate,
-                                                   &thread_handle);
-  internal::base::PlatformThreadHandle thread_handle_2;
-  internal::base::PlatformThreadForTesting::Create(0, &delegate,
-                                                   &thread_handle_2);
-
-  while (allocations_done.load(std::memory_order_acquire) != 2) {
-    internal::base::PlatformThreadForTesting::YieldCurrentThread();
-  }
-
-  // Many allocations on the other thread.
-  registry.RunPeriodicPurge();
-  EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
-
-  can_finish.store(true, std::memory_order_release);
-  internal::base::PlatformThreadForTesting::Join(thread_handle);
-  internal::base::PlatformThreadForTesting::Join(thread_handle_2);
-}
-
-// TODO(https://crbug.com/1287799): Flaky on IOS.
-#if BUILDFLAG(IS_IOS)
-#define MAYBE_DynamicCountPerBucket DISABLED_DynamicCountPerBucket
-#else
-#define MAYBE_DynamicCountPerBucket DynamicCountPerBucket
-#endif
-TEST_P(PartitionAllocThreadCacheTest, MAYBE_DynamicCountPerBucket) {
-  auto* tcache = root()->thread_cache_for_testing();
-  size_t bucket_index =
-      FillThreadCacheAndReturnIndex(kMediumSize, kDefaultCountForMediumBucket);
-
-  EXPECT_EQ(kDefaultCountForMediumBucket, tcache->buckets_[bucket_index].count);
-
-  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
-      ThreadCache::kDefaultMultiplier / 2);
-  // No immediate batch deallocation.
-  EXPECT_EQ(kDefaultCountForMediumBucket, tcache->buckets_[bucket_index].count);
-  void* data =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
-  // Not triggered by allocations.
-  EXPECT_EQ(kDefaultCountForMediumBucket - 1,
-            tcache->buckets_[bucket_index].count);
-
-  // Free() triggers the purge within limits.
-  root()->Free(data);
-  EXPECT_LE(tcache->buckets_[bucket_index].count,
-            kDefaultCountForMediumBucket / 2);
-
-  // Won't go above anymore.
-  FillThreadCacheAndReturnIndex(kMediumSize, 1000);
-  EXPECT_LE(tcache->buckets_[bucket_index].count,
-            kDefaultCountForMediumBucket / 2);
-
-  // Limit can be raised.
-  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
-      ThreadCache::kDefaultMultiplier * 2);
-  FillThreadCacheAndReturnIndex(kMediumSize, 1000);
-  EXPECT_GT(tcache->buckets_[bucket_index].count,
-            kDefaultCountForMediumBucket / 2);
-}
-
-TEST_P(PartitionAllocThreadCacheTest, DynamicCountPerBucketClamping) {
-  auto* tcache = root()->thread_cache_for_testing();
-
-  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
-      ThreadCache::kDefaultMultiplier / 1000.);
-  for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
-    // Invalid bucket.
-    if (!tcache->buckets_[i].limit.load(std::memory_order_relaxed)) {
-      EXPECT_EQ(root()->buckets[i].active_slot_spans_head, nullptr);
-      continue;
-    }
-    EXPECT_GE(tcache->buckets_[i].limit.load(std::memory_order_relaxed), 1u);
-  }
-
-  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
-      ThreadCache::kDefaultMultiplier * 1000.);
-  for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
-    // Invalid bucket.
-    if (!tcache->buckets_[i].limit.load(std::memory_order_relaxed)) {
-      EXPECT_EQ(root()->buckets[i].active_slot_spans_head, nullptr);
-      continue;
-    }
-    EXPECT_LT(tcache->buckets_[i].limit.load(std::memory_order_relaxed), 0xff);
-  }
-}
-
-// TODO(https://crbug.com/1287799): Flaky on IOS.
-#if BUILDFLAG(IS_IOS)
-#define MAYBE_DynamicCountPerBucketMultipleThreads \
-  DISABLED_DynamicCountPerBucketMultipleThreads
-#else
-#define MAYBE_DynamicCountPerBucketMultipleThreads \
-  DynamicCountPerBucketMultipleThreads
-#endif
-
-namespace {
-
-class ThreadDelegateForDynamicCountPerBucketMultipleThreads
-    : public internal::base::PlatformThreadForTesting::Delegate {
- public:
-  ThreadDelegateForDynamicCountPerBucketMultipleThreads(
-      PartitionRoot* root,
-      std::atomic<bool>& other_thread_started,
-      std::atomic<bool>& threshold_changed,
-      int bucket_index,
-      BucketDistribution bucket_distribution)
-      : root_(root),
-        other_thread_started_(other_thread_started),
-        threshold_changed_(threshold_changed),
-        bucket_index_(bucket_index),
-        bucket_distribution_(bucket_distribution) {}
-
-  void ThreadMain() override {
-    FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_,
-                                  kDefaultCountForSmallBucket + 10);
-    auto* this_thread_tcache = root_->thread_cache_for_testing();
-    // More than the default since the multiplier has changed.
-    EXPECT_GT(this_thread_tcache->bucket_count_for_testing(bucket_index_),
-              kDefaultCountForSmallBucket + 10);
-
-    other_thread_started_.store(true, std::memory_order_release);
-    while (!threshold_changed_.load(std::memory_order_acquire)) {
-    }
-
-    void* data =
-        root_->Alloc(root_->AdjustSizeForExtrasSubtract(kSmallSize), "");
-    // Deallocations trigger limit enforcement.
-    root_->Free(data);
-    // Since the bucket is too full, it gets halved by batched deallocation.
-    EXPECT_EQ(static_cast<uint8_t>(ThreadCache::kSmallBucketBaseCount / 2),
-              this_thread_tcache->bucket_count_for_testing(bucket_index_));
-  }
-
- private:
-  PartitionRoot* root_ = nullptr;
-  std::atomic<bool>& other_thread_started_;
-  std::atomic<bool>& threshold_changed_;
-  const int bucket_index_;
-  PartitionRoot::BucketDistribution bucket_distribution_;
-};
-
-}  // namespace
-
-TEST_P(PartitionAllocThreadCacheTest,
-       MAYBE_DynamicCountPerBucketMultipleThreads) {
-  std::atomic<bool> other_thread_started{false};
-  std::atomic<bool> threshold_changed{false};
-
-  auto* tcache = root()->thread_cache_for_testing();
-  size_t bucket_index =
-      FillThreadCacheAndReturnIndex(kSmallSize, kDefaultCountForSmallBucket);
-  EXPECT_EQ(kDefaultCountForSmallBucket, tcache->buckets_[bucket_index].count);
-
-  // Change the ratio before starting the threads, checking that it will applied
-  // to newly-created threads.
-  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
-      ThreadCache::kDefaultMultiplier + 1);
-
-  ThreadDelegateForDynamicCountPerBucketMultipleThreads delegate(
-      root(), other_thread_started, threshold_changed, bucket_index,
-      GetParam());
-
-  internal::base::PlatformThreadHandle thread_handle;
-  internal::base::PlatformThreadForTesting::Create(0, &delegate,
-                                                   &thread_handle);
-
-  while (!other_thread_started.load(std::memory_order_acquire)) {
-  }
-
-  ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(1.);
-  threshold_changed.store(true, std::memory_order_release);
-
-  internal::base::PlatformThreadForTesting::Join(thread_handle);
-}
-
-TEST_P(PartitionAllocThreadCacheTest, DynamicSizeThreshold) {
-  auto* tcache = root()->thread_cache_for_testing();
-  DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
-  DeltaCounter alloc_miss_too_large_counter{
-      tcache->stats_.alloc_miss_too_large};
-  DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
-  DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
-
-  // Default threshold at first.
-  ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
-  FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold);
-
-  EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
-  EXPECT_EQ(1u, cache_fill_counter.Delta());
-
-  // Too large to be cached.
-  FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
-  EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
-
-  // Increase.
-  ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
-  FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
-  // No new miss.
-  EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
-
-  // Lower.
-  ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
-  FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
-  EXPECT_EQ(2u, alloc_miss_too_large_counter.Delta());
-
-  // Value is clamped.
-  size_t too_large = 1024 * 1024;
-  ThreadCache::SetLargestCachedSize(too_large);
-  FillThreadCacheAndReturnIndex(too_large);
-  EXPECT_EQ(3u, alloc_miss_too_large_counter.Delta());
-}
-
-// Disabled due to flakiness: crbug.com/1287811
-TEST_P(PartitionAllocThreadCacheTest, DISABLED_DynamicSizeThresholdPurge) {
-  auto* tcache = root()->thread_cache_for_testing();
-  DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
-  DeltaCounter alloc_miss_too_large_counter{
-      tcache->stats_.alloc_miss_too_large};
-  DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
-  DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
-
-  // Cache large allocations.
-  size_t large_allocation_size = ThreadCache::kLargeSizeThreshold;
-  ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
-  size_t index = FillThreadCacheAndReturnIndex(large_allocation_size);
-  EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
-
-  // Lower.
-  ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
-  FillThreadCacheAndReturnIndex(large_allocation_size);
-  EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
-
-  // There is memory trapped in the cache bucket.
-  EXPECT_GT(tcache->buckets_[index].count, 0u);
-
-  // Which is reclaimed by Purge().
-  tcache->Purge();
-  EXPECT_EQ(0u, tcache->buckets_[index].count);
-}
-
-TEST_P(PartitionAllocThreadCacheTest, ClearFromTail) {
-  auto count_items = [](ThreadCache* tcache, size_t index) {
-    uint8_t count = 0;
-    auto* head = tcache->buckets_[index].freelist_head;
-    while (head) {
-      head =
-          head->GetNextForThreadCache<true>(tcache->buckets_[index].slot_size);
-      count++;
-    }
-    return count;
-  };
-
-  auto* tcache = root()->thread_cache_for_testing();
-  size_t index = FillThreadCacheAndReturnIndex(kSmallSize, 10);
-  ASSERT_GE(count_items(tcache, index), 10);
-  void* head = tcache->buckets_[index].freelist_head;
-
-  for (size_t limit : {8, 3, 1}) {
-    tcache->ClearBucket(tcache->buckets_[index], limit);
-    EXPECT_EQ(head, static_cast<void*>(tcache->buckets_[index].freelist_head));
-    EXPECT_EQ(count_items(tcache, index), limit);
-  }
-  tcache->ClearBucket(tcache->buckets_[index], 0);
-  EXPECT_EQ(nullptr, static_cast<void*>(tcache->buckets_[index].freelist_head));
-}
-
-// TODO(https://crbug.com/1287799): Flaky on IOS.
-#if BUILDFLAG(IS_IOS)
-#define MAYBE_Bookkeeping DISABLED_Bookkeeping
-#else
-#define MAYBE_Bookkeeping Bookkeeping
-#endif
-TEST_P(PartitionAllocThreadCacheTest, MAYBE_Bookkeeping) {
-  void* arr[kFillCountForMediumBucket] = {};
-  auto* tcache = root()->thread_cache_for_testing();
-
-  root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
-                      PurgeFlags::kDiscardUnusedSystemPages);
-  root()->ResetBookkeepingForTesting();
-
-  // The ThreadCache is allocated before we change buckets, so its size is
-  // always based on the neutral distribution.
-  size_t tc_bucket_index = root()->SizeToBucketIndex(
-      sizeof(ThreadCache), PartitionRoot::BucketDistribution::kNeutral);
-  auto* tc_bucket = &root()->buckets[tc_bucket_index];
-  size_t expected_allocated_size =
-      tc_bucket->slot_size;  // For the ThreadCache itself.
-  size_t expected_committed_size = kUseLazyCommit
-                                       ? internal::SystemPageSize()
-                                       : tc_bucket->get_bytes_per_span();
-
-  EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
-  EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
-  EXPECT_EQ(expected_allocated_size,
-            root()->get_total_size_of_allocated_bytes());
-  EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
-
-  void* ptr =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
-
-  auto* medium_bucket = root()->buckets + SizeToIndex(kMediumSize);
-  size_t medium_alloc_size = medium_bucket->slot_size;
-  expected_allocated_size += medium_alloc_size;
-  expected_committed_size += kUseLazyCommit
-                                 ? internal::SystemPageSize()
-                                 : medium_bucket->get_bytes_per_span();
-
-  EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
-  EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
-  EXPECT_EQ(expected_allocated_size,
-            root()->get_total_size_of_allocated_bytes());
-  EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
-
-  expected_allocated_size += kFillCountForMediumBucket * medium_alloc_size;
-
-  // These allocations all come from the thread-cache.
-  for (size_t i = 0; i < kFillCountForMediumBucket; i++) {
-    arr[i] =
-        root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
-    EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
-    EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
-    EXPECT_EQ(expected_allocated_size,
-              root()->get_total_size_of_allocated_bytes());
-    EXPECT_EQ(expected_allocated_size,
-              root()->get_max_size_of_allocated_bytes());
-    EXPECT_EQ((kFillCountForMediumBucket - 1 - i) * medium_alloc_size,
-              tcache->CachedMemory());
-  }
-
-  EXPECT_EQ(0U, tcache->CachedMemory());
-
-  root()->Free(ptr);
-
-  for (auto*& el : arr) {
-    root()->Free(el);
-  }
-  EXPECT_EQ(root()->get_total_size_of_allocated_bytes(),
-            expected_allocated_size);
-  tcache->Purge();
-  EXPECT_EQ(root()->get_total_size_of_allocated_bytes(),
-            GetBucketSizeForThreadCache());
-}
-
-TEST_P(PartitionAllocThreadCacheTest, TryPurgeNoAllocs) {
-  auto* tcache = root()->thread_cache_for_testing();
-  tcache->TryPurge();
-}
-
-TEST_P(PartitionAllocThreadCacheTest, TryPurgeMultipleCorrupted) {
-  auto* tcache = root()->thread_cache_for_testing();
-
-  void* ptr =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
-
-  auto* medium_bucket = root()->buckets + SizeToIndex(kMediumSize);
-
-  auto* curr = medium_bucket->active_slot_spans_head->get_freelist_head();
-  curr = curr->GetNextForThreadCache<true>(kMediumSize);
-  curr->CorruptNextForTesting(0x12345678);
-  tcache->TryPurge();
-  curr->SetNext(nullptr);
-  root()->Free(ptr);
-}
-
-TEST(AlternateBucketDistributionTest, SizeToIndex) {
-  using internal::BucketIndexLookup;
-
-  // The first 12 buckets are the same as the default bucket index.
-  for (size_t i = 1 << 0; i < 1 << 8; i <<= 1) {
-    for (size_t offset = 0; offset < 4; offset++) {
-      size_t n = i * (4 + offset) / 4;
-      EXPECT_EQ(BucketIndexLookup::GetIndex(n),
-                BucketIndexLookup::GetIndexForNeutralBuckets(n));
-    }
-  }
-
-  // The alternate bucket distribution is different in the middle values.
-  //
-  // For each order, the top two buckets are removed compared with the default
-  // distribution. Values that would be allocated in those two buckets are
-  // instead allocated in the next power of two bucket.
-  //
-  // The first two buckets (each power of two and the next bucket up) remain
-  // the same between the two bucket distributions.
-  size_t expected_index = BucketIndexLookup::GetIndex(1 << 8);
-  for (size_t i = 1 << 8; i < internal::kHighThresholdForAlternateDistribution;
-       i <<= 1) {
-    // The first two buckets in the order should match up to the normal bucket
-    // distribution.
-    for (size_t offset = 0; offset < 2; offset++) {
-      size_t n = i * (4 + offset) / 4;
-      EXPECT_EQ(BucketIndexLookup::GetIndex(n),
-                BucketIndexLookup::GetIndexForNeutralBuckets(n));
-      EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
-      expected_index += 2;
-    }
-    // The last two buckets in the order are "rounded up" to the same bucket
-    // as the next power of two.
-    expected_index += 4;
-    for (size_t offset = 2; offset < 4; offset++) {
-      size_t n = i * (4 + offset) / 4;
-      // These two are rounded up in the alternate distribution, so we expect
-      // the bucket index to be larger than the bucket index for the same
-      // allocation under the default distribution.
-      EXPECT_GT(BucketIndexLookup::GetIndex(n),
-                BucketIndexLookup::GetIndexForNeutralBuckets(n));
-      // We expect both allocations in this loop to be rounded up to the next
-      // power of two bucket.
-      EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
-    }
-  }
-
-  // The rest of the buckets all match up exactly with the existing
-  // bucket distribution.
-  for (size_t i = internal::kHighThresholdForAlternateDistribution;
-       i < internal::kMaxBucketed; i <<= 1) {
-    for (size_t offset = 0; offset < 4; offset++) {
-      size_t n = i * (4 + offset) / 4;
-      EXPECT_EQ(BucketIndexLookup::GetIndex(n),
-                BucketIndexLookup::GetIndexForNeutralBuckets(n));
-    }
-  }
-}
-
-TEST_P(PartitionAllocThreadCacheTest, AllocationRecording) {
-  // There is a cache.
-  auto* tcache = root()->thread_cache_for_testing();
-  EXPECT_TRUE(tcache);
-  tcache->ResetPerThreadAllocationStatsForTesting();
-
-  constexpr size_t kBucketedNotCached = 1 << 12;
-  constexpr size_t kDirectMapped = 4 * (1 << 20);
-  // Not a "nice" size on purpose, to check that the raw size accounting works.
-  const size_t kSingleSlot = internal::PartitionPageSize() + 1;
-
-  size_t expected_total_size = 0;
-  void* ptr =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
-  ASSERT_TRUE(ptr);
-  expected_total_size += root()->GetUsableSize(ptr);
-  void* ptr2 = root()->Alloc(
-      root()->AdjustSizeForExtrasSubtract(kBucketedNotCached), "");
-  ASSERT_TRUE(ptr2);
-  expected_total_size += root()->GetUsableSize(ptr2);
-  void* ptr3 =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kDirectMapped), "");
-  ASSERT_TRUE(ptr3);
-  expected_total_size += root()->GetUsableSize(ptr3);
-  void* ptr4 =
-      root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSingleSlot), "");
-  ASSERT_TRUE(ptr4);
-  expected_total_size += root()->GetUsableSize(ptr4);
-
-  EXPECT_EQ(4u, tcache->thread_alloc_stats().alloc_count);
-  EXPECT_EQ(expected_total_size, tcache->thread_alloc_stats().alloc_total_size);
-
-  root()->Free(ptr);
-  root()->Free(ptr2);
-  root()->Free(ptr3);
-  root()->Free(ptr4);
-
-  EXPECT_EQ(4u, tcache->thread_alloc_stats().alloc_count);
-  EXPECT_EQ(expected_total_size, tcache->thread_alloc_stats().alloc_total_size);
-  EXPECT_EQ(4u, tcache->thread_alloc_stats().dealloc_count);
-  EXPECT_EQ(expected_total_size,
-            tcache->thread_alloc_stats().dealloc_total_size);
-
-  auto stats = internal::GetAllocStatsForCurrentThread();
-  EXPECT_EQ(4u, stats.alloc_count);
-  EXPECT_EQ(expected_total_size, stats.alloc_total_size);
-  EXPECT_EQ(4u, stats.dealloc_count);
-  EXPECT_EQ(expected_total_size, stats.dealloc_total_size);
-}
-
-TEST_P(PartitionAllocThreadCacheTest, AllocationRecordingAligned) {
-  // There is a cache.
-  auto* tcache = root()->thread_cache_for_testing();
-  EXPECT_TRUE(tcache);
-  tcache->ResetPerThreadAllocationStatsForTesting();
-
-  // Aligned allocations take different paths depending on whether they are (in
-  // the same order as the test cases below):
-  // - Not really aligned (since alignment is always good-enough)
-  // - Already satisfied by PA's alignment guarantees
-  // - Requiring extra padding
-  // - Already satisfied by PA's alignment guarantees
-  // - In need of a special slot span (very large alignment)
-  // - Direct-mapped with large alignment
-  size_t alloc_count = 0;
-  size_t total_size = 0;
-  size_t size_alignments[][2] = {{128, 4},
-                                 {128, 128},
-                                 {1024, 128},
-                                 {128, 1024},
-                                 {128, 2 * internal::PartitionPageSize()},
-                                 {(4 << 20) + 1, 1 << 19}};
-  for (auto [requested_size, alignment] : size_alignments) {
-    void* ptr = root()->AlignedAllocWithFlags(0, alignment, requested_size);
-    ASSERT_TRUE(ptr);
-    alloc_count++;
-    total_size += root()->GetUsableSize(ptr);
-    EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
-    EXPECT_EQ(total_size, tcache->thread_alloc_stats().alloc_total_size);
-    root()->Free(ptr);
-    EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().dealloc_count);
-    EXPECT_EQ(total_size, tcache->thread_alloc_stats().dealloc_total_size);
-  }
-
-  EXPECT_EQ(tcache->thread_alloc_stats().alloc_total_size,
-            tcache->thread_alloc_stats().dealloc_total_size);
-
-  auto stats = internal::GetAllocStatsForCurrentThread();
-  EXPECT_EQ(alloc_count, stats.alloc_count);
-  EXPECT_EQ(total_size, stats.alloc_total_size);
-  EXPECT_EQ(alloc_count, stats.dealloc_count);
-  EXPECT_EQ(total_size, stats.dealloc_total_size);
-}
-
-TEST_P(PartitionAllocThreadCacheTest, AllocationRecordingRealloc) {
-  // There is a cache.
-  auto* tcache = root()->thread_cache_for_testing();
-  EXPECT_TRUE(tcache);
-  tcache->ResetPerThreadAllocationStatsForTesting();
-
-  size_t alloc_count = 0;
-  size_t dealloc_count = 0;
-  size_t total_alloc_size = 0;
-  size_t total_dealloc_size = 0;
-  size_t size_new_sizes[][2] = {
-      {16, 15},
-      {16, 64},
-      {16, internal::PartitionPageSize() + 1},
-      {4 << 20, 8 << 20},
-      {8 << 20, 4 << 20},
-      {(8 << 20) - internal::SystemPageSize(), 8 << 20}};
-  for (auto [size, new_size] : size_new_sizes) {
-    void* ptr = root()->Alloc(size, "");
-    ASSERT_TRUE(ptr);
-    alloc_count++;
-    size_t usable_size = root()->GetUsableSize(ptr);
-    total_alloc_size += usable_size;
-
-    ptr = root()->Realloc(ptr, new_size, "");
-    ASSERT_TRUE(ptr);
-    total_dealloc_size += usable_size;
-    dealloc_count++;
-    usable_size = root()->GetUsableSize(ptr);
-    total_alloc_size += usable_size;
-    alloc_count++;
-
-    EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
-    EXPECT_EQ(total_alloc_size, tcache->thread_alloc_stats().alloc_total_size);
-    EXPECT_EQ(dealloc_count, tcache->thread_alloc_stats().dealloc_count);
-    EXPECT_EQ(total_dealloc_size,
-              tcache->thread_alloc_stats().dealloc_total_size)
-        << new_size;
-
-    root()->Free(ptr);
-    dealloc_count++;
-    total_dealloc_size += usable_size;
-
-    EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
-    EXPECT_EQ(total_alloc_size, tcache->thread_alloc_stats().alloc_total_size);
-    EXPECT_EQ(dealloc_count, tcache->thread_alloc_stats().dealloc_count);
-    EXPECT_EQ(total_dealloc_size,
-              tcache->thread_alloc_stats().dealloc_total_size);
-  }
-  EXPECT_EQ(tcache->thread_alloc_stats().alloc_total_size,
-            tcache->thread_alloc_stats().dealloc_total_size);
-}
-
-// This test makes sure it's safe to switch to the alternate bucket distribution
-// at runtime. This is intended to happen once, near the start of Chrome,
-// once we have enabled features.
-TEST(AlternateBucketDistributionTest, SwitchBeforeAlloc) {
-  std::unique_ptr<PartitionAllocatorForTesting> allocator(CreateAllocator());
-  PartitionRoot* root = allocator->root();
-
-  root->SwitchToDenserBucketDistribution();
-  constexpr size_t n = (1 << 12) * 3 / 2;
-  EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
-            internal::BucketIndexLookup::GetIndexForNeutralBuckets(n));
-
-  void* ptr = root->Alloc(n, "");
-
-  root->ResetBucketDistributionForTesting();
-
-  root->Free(ptr);
-}
-
-// This test makes sure it's safe to switch to the alternate bucket distribution
-// at runtime. This is intended to happen once, near the start of Chrome,
-// once we have enabled features.
-TEST(AlternateBucketDistributionTest, SwitchAfterAlloc) {
-  std::unique_ptr<PartitionAllocatorForTesting> allocator(CreateAllocator());
-  constexpr size_t n = (1 << 12) * 3 / 2;
-  EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
-            internal::BucketIndexLookup::GetIndexForNeutralBuckets(n));
-
-  PartitionRoot* root = allocator->root();
-  void* ptr = root->Alloc(n, "");
-
-  root->SwitchToDenserBucketDistribution();
-
-  void* ptr2 = root->Alloc(n, "");
-
-  root->Free(ptr2);
-  root->Free(ptr);
-}
-
-}  // namespace partition_alloc
-
-#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) &&
-        // PA_CONFIG(THREAD_CACHE_SUPPORTED)
diff --git a/base/allocator/partition_allocator/thread_isolation/alignment.h b/base/allocator/partition_allocator/thread_isolation/alignment.h
deleted file mode 100644
index 493a1bd..0000000
--- a/base/allocator/partition_allocator/thread_isolation/alignment.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_ISOLATION_ALIGNMENT_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_ISOLATION_ALIGNMENT_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-
-#define PA_THREAD_ISOLATED_ALIGN_SZ partition_alloc::internal::SystemPageSize()
-#define PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK (PA_THREAD_ISOLATED_ALIGN_SZ - 1)
-#define PA_THREAD_ISOLATED_ALIGN_BASE_MASK \
-  (~PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK)
-#define PA_THREAD_ISOLATED_ALIGN alignas(PA_THREAD_ISOLATED_ALIGN_SZ)
-
-#define PA_THREAD_ISOLATED_FILL_PAGE_SZ(size)        \
-  ((PA_THREAD_ISOLATED_ALIGN_SZ -                    \
-    ((size)&PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK)) % \
-   PA_THREAD_ISOLATED_ALIGN_SZ)
-// Calculate the required padding so that the last element of a page-aligned
-// array lands on a page boundary. In other words, calculate that padding so
-// that (count-1) elements are a multiple of page size.
-// The offset parameter additionally skips bytes in the object, e.g.
-// object+offset will be page aligned.
-#define PA_THREAD_ISOLATED_ARRAY_PAD_SZ_WITH_OFFSET(Type, count, offset) \
-  PA_THREAD_ISOLATED_FILL_PAGE_SZ(sizeof(Type) * (count - 1) + offset)
-
-#define PA_THREAD_ISOLATED_ARRAY_PAD_SZ(Type, count) \
-  PA_THREAD_ISOLATED_ARRAY_PAD_SZ_WITH_OFFSET(Type, count, 0)
-
-#else  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-#define PA_THREAD_ISOLATED_ALIGN
-#define PA_THREAD_ISOLATED_FILL_PAGE_SZ(size) 0
-#define PA_THREAD_ISOLATED_ARRAY_PAD_SZ(Type, size) 0
-#define PA_THREAD_ISOLATED_ARRAY_PAD_SZ_WITH_OFFSET(Type, size, offset) 0
-
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_ISOLATION_ALIGNMENT_H_
diff --git a/base/allocator/partition_allocator/thread_isolation/pkey.cc b/base/allocator/partition_allocator/thread_isolation/pkey.cc
deleted file mode 100644
index 9f2f792..0000000
--- a/base/allocator/partition_allocator/thread_isolation/pkey.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/thread_isolation/pkey.h"
-
-#if BUILDFLAG(ENABLE_PKEYS)
-
-#include <errno.h>
-#include <sys/mman.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-
-#if !BUILDFLAG(IS_LINUX)
-#error "This pkey code is currently only supported on Linux"
-#endif
-
-namespace partition_alloc::internal {
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-bool CPUHasPkeySupport() {
-  return base::CPU::GetInstanceNoAllocation().has_pku();
-}
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-int PkeyMprotect(void* addr, size_t len, int prot, int pkey) {
-  return syscall(SYS_pkey_mprotect, addr, len, prot, pkey);
-}
-
-void TagMemoryWithPkey(int pkey, void* address, size_t size) {
-  PA_DCHECK((reinterpret_cast<uintptr_t>(address) &
-             PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK) == 0);
-  PA_PCHECK(PkeyMprotect(address,
-                         (size + PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK) &
-                             PA_THREAD_ISOLATED_ALIGN_BASE_MASK,
-                         PROT_READ | PROT_WRITE, pkey) == 0);
-}
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-int PkeyAlloc(int access_rights) {
-  return syscall(SYS_pkey_alloc, 0, access_rights);
-}
-
-PA_COMPONENT_EXPORT(PARTITION_ALLOC)
-void PkeyFree(int pkey) {
-  PA_PCHECK(syscall(SYS_pkey_free, pkey) == 0);
-}
-
-uint32_t Rdpkru() {
-  uint32_t pkru;
-  asm volatile(".byte 0x0f,0x01,0xee\n" : "=a"(pkru) : "c"(0), "d"(0));
-  return pkru;
-}
-
-void Wrpkru(uint32_t pkru) {
-  asm volatile(".byte 0x0f,0x01,0xef\n" : : "a"(pkru), "c"(0), "d"(0));
-}
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-
-LiftPkeyRestrictionsScope::LiftPkeyRestrictionsScope()
-    : saved_pkey_value_(kDefaultPkeyValue) {
-  if (!ThreadIsolationSettings::settings.enabled) {
-    return;
-  }
-  saved_pkey_value_ = Rdpkru();
-  if (saved_pkey_value_ != kDefaultPkeyValue) {
-    Wrpkru(kAllowAllPkeyValue);
-  }
-}
-
-LiftPkeyRestrictionsScope::~LiftPkeyRestrictionsScope() {
-  if (!ThreadIsolationSettings::settings.enabled) {
-    return;
-  }
-  if (Rdpkru() != saved_pkey_value_) {
-    Wrpkru(saved_pkey_value_);
-  }
-}
-
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-}  // namespace partition_alloc::internal
-
-#endif  // BUILDFLAG(ENABLE_PKEYS)
diff --git a/base/allocator/partition_allocator/thread_isolation/pkey.h b/base/allocator/partition_allocator/thread_isolation/pkey.h
deleted file mode 100644
index 418c70c..0000000
--- a/base/allocator/partition_allocator/thread_isolation/pkey.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_ISOLATION_PKEY_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_ISOLATION_PKEY_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-
-#if BUILDFLAG(ENABLE_PKEYS)
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-#include "base/allocator/partition_allocator/thread_isolation/alignment.h"
-
-#include <cstddef>
-#include <cstdint>
-
-namespace partition_alloc::internal {
-
-constexpr int kDefaultPkey = 0;
-constexpr int kInvalidPkey = -1;
-
-// Check if the CPU supports pkeys.
-bool CPUHasPkeySupport();
-
-// A wrapper around the pkey_mprotect syscall.
-[[nodiscard]] int PkeyMprotect(void* addr, size_t len, int prot, int pkey);
-
-void TagMemoryWithPkey(int pkey, void* address, size_t size);
-
-int PkeyAlloc(int access_rights);
-
-void PkeyFree(int pkey);
-
-// Read the pkru register (the current pkey state).
-uint32_t Rdpkru();
-
-// Write the pkru register (the current pkey state).
-void Wrpkru(uint32_t pkru);
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-
-class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LiftPkeyRestrictionsScope {
- public:
-  static constexpr uint32_t kDefaultPkeyValue = 0x55555554;
-  static constexpr uint32_t kAllowAllPkeyValue = 0x0;
-
-  LiftPkeyRestrictionsScope();
-  ~LiftPkeyRestrictionsScope();
-
- private:
-  uint32_t saved_pkey_value_;
-};
-
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-}  // namespace partition_alloc::internal
-
-#endif  // BUILDFLAG(ENABLE_PKEYS)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_ISOLATION_PKEY_H_
diff --git a/base/allocator/partition_allocator/thread_isolation/pkey_unittest.cc b/base/allocator/partition_allocator/thread_isolation/pkey_unittest.cc
deleted file mode 100644
index c9fa911..0000000
--- a/base/allocator/partition_allocator/thread_isolation/pkey_unittest.cc
+++ /dev/null
@@ -1,272 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_root.h"
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-
-#if BUILDFLAG(ENABLE_PKEYS)
-
-#include <link.h>
-#include <sys/mman.h>
-#include <sys/syscall.h>
-
-#include "base/allocator/partition_allocator/address_space_stats.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/thread_isolation/pkey.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#define ISOLATED_FUNCTION extern "C" __attribute__((used))
-constexpr size_t kIsolatedThreadStackSize = 64 * 1024;
-constexpr int kNumPkey = 16;
-constexpr size_t kTestReturnValue = 0x8765432187654321llu;
-constexpr uint32_t kPKRUAllowAccessNoWrite = 0b10101010101010101010101010101000;
-
-namespace partition_alloc::internal {
-
-struct PA_THREAD_ISOLATED_ALIGN IsolatedGlobals {
-  int pkey = kInvalidPkey;
-  void* stack;
-  partition_alloc::internal::base::NoDestructor<
-      partition_alloc::PartitionAllocator>
-      allocator{};
-} isolated_globals;
-
-int ProtFromSegmentFlags(ElfW(Word) flags) {
-  int prot = 0;
-  if (flags & PF_R) {
-    prot |= PROT_READ;
-  }
-  if (flags & PF_W) {
-    prot |= PROT_WRITE;
-  }
-  if (flags & PF_X) {
-    prot |= PROT_EXEC;
-  }
-  return prot;
-}
-
-int ProtectROSegments(struct dl_phdr_info* info, size_t info_size, void* data) {
-  if (!strcmp(info->dlpi_name, "linux-vdso.so.1")) {
-    return 0;
-  }
-  for (int i = 0; i < info->dlpi_phnum; i++) {
-    const ElfW(Phdr)* phdr = &info->dlpi_phdr[i];
-    if (phdr->p_type != PT_LOAD && phdr->p_type != PT_GNU_RELRO) {
-      continue;
-    }
-    if (phdr->p_flags & PF_W) {
-      continue;
-    }
-    uintptr_t start = info->dlpi_addr + phdr->p_vaddr;
-    uintptr_t end = start + phdr->p_memsz;
-    uintptr_t start_page = RoundDownToSystemPage(start);
-    uintptr_t end_page = RoundUpToSystemPage(end);
-    uintptr_t size = end_page - start_page;
-    PA_PCHECK(PkeyMprotect(reinterpret_cast<void*>(start_page), size,
-                           ProtFromSegmentFlags(phdr->p_flags),
-                           isolated_globals.pkey) == 0);
-  }
-  return 0;
-}
-
-class PkeyTest : public testing::Test {
- protected:
-  static void PkeyProtectMemory() {
-    PA_PCHECK(dl_iterate_phdr(ProtectROSegments, nullptr) == 0);
-
-    PA_PCHECK(PkeyMprotect(&isolated_globals, sizeof(isolated_globals),
-                           PROT_READ | PROT_WRITE, isolated_globals.pkey) == 0);
-
-    PA_PCHECK(PkeyMprotect(isolated_globals.stack, kIsolatedThreadStackSize,
-                           PROT_READ | PROT_WRITE, isolated_globals.pkey) == 0);
-  }
-
-  static void InitializeIsolatedThread() {
-    isolated_globals.stack =
-        mmap(nullptr, kIsolatedThreadStackSize, PROT_READ | PROT_WRITE,
-             MAP_ANONYMOUS | MAP_PRIVATE | MAP_STACK, -1, 0);
-    PA_PCHECK(isolated_globals.stack != MAP_FAILED);
-
-    PkeyProtectMemory();
-  }
-
-  void SetUp() override {
-    // SetUp only once, but we can't do it in SetUpTestSuite since that runs
-    // before other PartitionAlloc initialization happened.
-    if (isolated_globals.pkey != kInvalidPkey) {
-      return;
-    }
-
-    int pkey = PkeyAlloc(0);
-    if (pkey == -1) {
-      return;
-    }
-    isolated_globals.pkey = pkey;
-
-    isolated_globals.allocator->init(PartitionOptions{
-        .aligned_alloc = PartitionOptions::AlignedAlloc::kAllowed,
-        .thread_isolation = ThreadIsolationOption(isolated_globals.pkey),
-    });
-
-    InitializeIsolatedThread();
-
-    Wrpkru(kPKRUAllowAccessNoWrite);
-  }
-
-  static void TearDownTestSuite() {
-    if (isolated_globals.pkey == kInvalidPkey) {
-      return;
-    }
-    PA_PCHECK(PkeyMprotect(&isolated_globals, sizeof(isolated_globals),
-                           PROT_READ | PROT_WRITE, kDefaultPkey) == 0);
-    isolated_globals.pkey = kDefaultPkey;
-    InitializeIsolatedThread();
-    PkeyFree(isolated_globals.pkey);
-  }
-};
-
-// This code will run with access limited to pkey 1, no default pkey access.
-// Note that we're stricter than required for debugging purposes.
-// In the final use, we'll likely allow at least read access to the default
-// pkey.
-ISOLATED_FUNCTION uint64_t IsolatedAllocFree(void* arg) {
-  char* buf = (char*)isolated_globals.allocator->root()->AllocWithFlagsNoHooks(
-      0, 1024, partition_alloc::PartitionPageSize());
-  if (!buf) {
-    return 0xffffffffffffffffllu;
-  }
-  isolated_globals.allocator->root()->FreeNoHooks(buf);
-
-  return kTestReturnValue;
-}
-
-// This test is a bit compliated. We want to ensure that the code
-// allocating/freeing from the pkey pool doesn't *unexpectedly* access memory
-// tagged with the default pkey (pkey 0). This could be a security issue since
-// in our CFI threat model that memory might be attacker controlled.
-// To test for this, we run alloc/free without access to the default pkey. In
-// order to do this, we need to tag all global read-only memory with our pkey as
-// well as switch to a pkey-tagged stack.
-TEST_F(PkeyTest, AllocWithoutDefaultPkey) {
-  if (isolated_globals.pkey == kInvalidPkey) {
-    return;
-  }
-
-  uint64_t ret;
-  uint32_t pkru_value = 0;
-  for (int pkey = 0; pkey < kNumPkey; pkey++) {
-    if (pkey != isolated_globals.pkey) {
-      pkru_value |= (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE) << (2 * pkey);
-    }
-  }
-
-  // Switch to the safe stack with inline assembly.
-  //
-  // The simple solution would be to use one asm statement as a prologue to
-  // switch to the protected stack and a second one to switch it back. However,
-  // that doesn't work since inline assembly doesn't support a clobbered stack
-  // register. So instead, we switch the stack, perform a function call
-  // to the
-  // actual code and switch back afterwards.
-  //
-  // The inline asm docs mention that special care must be taken
-  // when calling a function in inline assembly. I.e. we will
-  // need to make sure that we follow the ABI of the platform.
-  // In this example, we use the System-V ABI.
-  //
-  // == Caller-saved registers ==
-  // We had two ideas for handling caller-saved registers. Option 1 was chosen,
-  // but I'll describe both to show why option 2 didn't work out:
-  // * Option 1) mark all caller-saved registers as clobbered. This should be
-  //             in line with how the compiler would create the function call.
-  //             Problem: future additions to caller-saved registers can break
-  //             this.
-  // * Option 2) use attribute no_caller_saved_registers. This prohibits use of
-  //             sse/mmx/x87. We can disable sse/mmx with a "target" attribute,
-  //             but I couldn't find a way to disable x87.
-  //             The docs tell you to use -mgeneral-regs-only. Maybe we
-  //             could move the isolated code to a separate file and then
-  //             use that flag for compiling that file only.
-  //             !!! This doesn't work: the inner function can call out to code
-  //             that uses caller-saved registers and won't save
-  //             them itself.
-  //
-  // == stack alignment ==
-  // The ABI requires us to have a 16 byte aligned rsp on function
-  // entry. We push one qword onto the stack so we need to subtract
-  // an additional 8 bytes from the stack pointer.
-  //
-  // == additional clobbering ==
-  // As described above, we need to clobber everything besides
-  // callee-saved registers. The ABI requires all x87 registers to
-  // be set to empty on fn entry / return,
-  // so we should tell the compiler that this is the case. As I understand the
-  // docs, this is done by marking them as clobbered. Worst case, we'll notice
-  // any issues quickly and can fix them if it turned out to be false>
-  //
-  // == direction flag ==
-  // Theoretically, the DF flag could be set to 1 at asm entry. If this
-  // leads to problems, we might have to zero it before the fn call and
-  // restore it afterwards. I would'ave assumed that marking flags as
-  // clobbered would require the compiler to reset the DF before the next fn
-  // call, but that doesn't seem to be the case.
-  asm volatile(
-      // Set pkru to only allow access to pkey 1 memory.
-      ".byte 0x0f,0x01,0xef\n"  // wrpkru
-
-      // Move to the isolated stack and store the old value
-      "xchg %4, %%rsp\n"
-      "push %4\n"
-      "call IsolatedAllocFree\n"
-      // We need rax below, so move the return value to the stack
-      "push %%rax\n"
-
-      // Set pkru to only allow access to pkey 0 memory.
-      "mov $0b10101010101010101010101010101000, %%rax\n"
-      "xor %%rcx, %%rcx\n"
-      "xor %%rdx, %%rdx\n"
-      ".byte 0x0f,0x01,0xef\n"  // wrpkru
-
-      // Pop the return value
-      "pop %0\n"
-      // Restore the original stack
-      "pop %%rsp\n"
-
-      : "=r"(ret)
-      : "a"(pkru_value), "c"(0), "d"(0),
-        "r"(reinterpret_cast<uintptr_t>(isolated_globals.stack) +
-            kIsolatedThreadStackSize - 8)
-      : "memory", "cc", "r8", "r9", "r10", "r11", "xmm0", "xmm1", "xmm2",
-        "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10",
-        "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "flags", "fpsr", "st",
-        "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)");
-
-  ASSERT_EQ(ret, kTestReturnValue);
-}
-
-class MockAddressSpaceStatsDumper : public AddressSpaceStatsDumper {
- public:
-  MockAddressSpaceStatsDumper() = default;
-  void DumpStats(const AddressSpaceStats* address_space_stats) override {}
-};
-
-TEST_F(PkeyTest, DumpPkeyPoolStats) {
-  if (isolated_globals.pkey == kInvalidPkey) {
-    return;
-  }
-
-  MockAddressSpaceStatsDumper mock_stats_dumper;
-  partition_alloc::internal::AddressPoolManager::GetInstance().DumpStats(
-      &mock_stats_dumper);
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
diff --git a/base/allocator/partition_allocator/thread_isolation/thread_isolation.cc b/base/allocator/partition_allocator/thread_isolation/thread_isolation.cc
deleted file mode 100644
index 3d0c888..0000000
--- a/base/allocator/partition_allocator/thread_isolation/thread_isolation.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h"
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/reservation_offset_table.h"
-
-#if BUILDFLAG(ENABLE_PKEYS)
-#include "base/allocator/partition_allocator/thread_isolation/pkey.h"
-#endif
-
-namespace partition_alloc::internal {
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-ThreadIsolationSettings ThreadIsolationSettings::settings;
-#endif
-
-void WriteProtectThreadIsolatedMemory(ThreadIsolationOption thread_isolation,
-                                      void* address,
-                                      size_t size) {
-  PA_DCHECK((reinterpret_cast<uintptr_t>(address) &
-             PA_THREAD_ISOLATED_ALIGN_OFFSET_MASK) == 0);
-#if BUILDFLAG(ENABLE_PKEYS)
-  partition_alloc::internal::TagMemoryWithPkey(
-      thread_isolation.enabled ? thread_isolation.pkey : kDefaultPkey, address,
-      size);
-#else
-#error unexpected thread isolation mode
-#endif
-}
-
-template <typename T>
-void WriteProtectThreadIsolatedVariable(ThreadIsolationOption thread_isolation,
-                                        T& var,
-                                        size_t offset = 0) {
-  WriteProtectThreadIsolatedMemory(thread_isolation, (char*)&var + offset,
-                                   sizeof(T) - offset);
-}
-
-int MprotectWithThreadIsolation(void* addr,
-                                size_t len,
-                                int prot,
-                                ThreadIsolationOption thread_isolation) {
-#if BUILDFLAG(ENABLE_PKEYS)
-  return PkeyMprotect(addr, len, prot, thread_isolation.pkey);
-#endif
-}
-
-void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption thread_isolation) {
-  WriteProtectThreadIsolatedVariable(thread_isolation,
-                                     PartitionAddressSpace::setup_);
-
-  AddressPoolManager::Pool* pool =
-      AddressPoolManager::GetInstance().GetPool(kThreadIsolatedPoolHandle);
-  WriteProtectThreadIsolatedVariable(
-      thread_isolation, *pool,
-      offsetof(AddressPoolManager::Pool, alloc_bitset_));
-
-  uint16_t* pkey_reservation_offset_table =
-      GetReservationOffsetTable(kThreadIsolatedPoolHandle);
-  WriteProtectThreadIsolatedMemory(
-      thread_isolation, pkey_reservation_offset_table,
-      ReservationOffsetTable::kReservationOffsetTableLength);
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-  WriteProtectThreadIsolatedVariable(thread_isolation,
-                                     ThreadIsolationSettings::settings);
-#endif
-}
-
-void UnprotectThreadIsolatedGlobals() {
-  WriteProtectThreadIsolatedGlobals(ThreadIsolationOption(false));
-}
-
-}  // namespace partition_alloc::internal
-
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
diff --git a/base/allocator/partition_allocator/thread_isolation/thread_isolation.h b/base/allocator/partition_allocator/thread_isolation/thread_isolation.h
deleted file mode 100644
index 7c74e0a..0000000
--- a/base/allocator/partition_allocator/thread_isolation/thread_isolation.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_ISOLATION_THREAD_ISOLATION_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_ISOLATION_THREAD_ISOLATION_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-
-#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-#include <cstddef>
-#include <cstdint>
-
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
-#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
-
-#if BUILDFLAG(ENABLE_PKEYS)
-#include "base/allocator/partition_allocator/thread_isolation/pkey.h"
-#endif
-
-#if !BUILDFLAG(HAS_64_BIT_POINTERS)
-#error "thread isolation support requires 64 bit pointers"
-#endif
-
-namespace partition_alloc {
-
-struct ThreadIsolationOption {
-  constexpr ThreadIsolationOption() = default;
-  explicit ThreadIsolationOption(bool enabled) : enabled(enabled) {}
-
-#if BUILDFLAG(ENABLE_PKEYS)
-  explicit ThreadIsolationOption(int pkey) : pkey(pkey) {
-    enabled = pkey != internal::kInvalidPkey;
-  }
-  int pkey = -1;
-#endif  // BUILDFLAG(ENABLE_PKEYS)
-
-  bool enabled = false;
-
-  bool operator==(const ThreadIsolationOption& other) const {
-#if BUILDFLAG(ENABLE_PKEYS)
-    if (pkey != other.pkey) {
-      return false;
-    }
-#endif  // BUILDFLAG(ENABLE_PKEYS)
-    return enabled == other.enabled;
-  }
-};
-
-}  // namespace partition_alloc
-
-namespace partition_alloc::internal {
-
-#if BUILDFLAG(PA_DCHECK_IS_ON)
-
-struct PA_THREAD_ISOLATED_ALIGN ThreadIsolationSettings {
-  bool enabled = false;
-  static ThreadIsolationSettings settings PA_CONSTINIT;
-};
-
-#if BUILDFLAG(ENABLE_PKEYS)
-
-using LiftThreadIsolationScope = LiftPkeyRestrictionsScope;
-
-#endif  // BUILDFLAG(ENABLE_PKEYS)
-#endif  // BUILDFLAG(PA_DCHECK_IS_ON)
-
-void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption thread_isolation);
-void UnprotectThreadIsolatedGlobals();
-[[nodiscard]] int MprotectWithThreadIsolation(
-    void* addr,
-    size_t len,
-    int prot,
-    ThreadIsolationOption thread_isolation);
-
-}  // namespace partition_alloc::internal
-
-#endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_ISOLATION_THREAD_ISOLATION_H_
diff --git a/base/allocator/partition_allocator/yield_processor.h b/base/allocator/partition_allocator/yield_processor.h
deleted file mode 100644
index ff15107..0000000
--- a/base/allocator/partition_allocator/yield_processor.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_YIELD_PROCESSOR_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_YIELD_PROCESSOR_H_
-
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "build/build_config.h"
-
-// The PA_YIELD_PROCESSOR macro wraps an architecture specific-instruction that
-// informs the processor we're in a busy wait, so it can handle the branch more
-// intelligently and e.g. reduce power to our core or give more resources to the
-// other hyper-thread on this core. See the following for context:
-// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
-
-#if PA_CONFIG(IS_NONCLANG_MSVC)
-
-// MSVC is in its own assemblyless world (crbug.com/1351310#c6).
-#include <windows.h>
-#define PA_YIELD_PROCESSOR (YieldProcessor())
-
-#else
-
-#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86)
-#define PA_YIELD_PROCESSOR __asm__ __volatile__("pause")
-#elif (defined(ARCH_CPU_ARMEL) && __ARM_ARCH >= 6) || defined(ARCH_CPU_ARM64)
-#define PA_YIELD_PROCESSOR __asm__ __volatile__("yield")
-#elif defined(ARCH_CPU_MIPSEL)
-// The MIPS32 docs state that the PAUSE instruction is a no-op on older
-// architectures (first added in MIPS32r2). To avoid assembler errors when
-// targeting pre-r2, we must encode the instruction manually.
-#define PA_YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140")
-#elif defined(ARCH_CPU_MIPS64EL) && __mips_isa_rev >= 2
-// Don't bother doing using .word here since r2 is the lowest supported mips64
-// that Chromium supports.
-#define PA_YIELD_PROCESSOR __asm__ __volatile__("pause")
-#elif defined(ARCH_CPU_PPC64_FAMILY)
-#define PA_YIELD_PROCESSOR __asm__ __volatile__("or 31,31,31")
-#elif defined(ARCH_CPU_S390_FAMILY)
-// just do nothing
-#define PA_YIELD_PROCESSOR ((void)0)
-#endif  // ARCH
-
-#ifndef PA_YIELD_PROCESSOR
-#define PA_YIELD_PROCESSOR ((void)0)
-#endif
-
-#endif  // PA_CONFIG(IS_NONCLANG_MSVC)
-
-#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_YIELD_PROCESSOR_H_
diff --git a/base/android/apk_assets.cc b/base/android/apk_assets.cc
index 3953b68..347516c 100644
--- a/base/android/apk_assets.cc
+++ b/base/android/apk_assets.cc
@@ -10,6 +10,8 @@
 #include "base/android/jni_string.h"
 #include "base/android/scoped_java_ref.h"
 #include "base/base_jni/ApkAssets_jni.h"
+#include "base/debug/crash_logging.h"
+#include "base/debug/dump_without_crashing.h"
 #include "base/file_descriptor_store.h"
 
 namespace base {
@@ -51,5 +53,17 @@
   return true;
 }
 
+void DumpLastOpenApkAssetFailure() {
+  JNIEnv* env = base::android::AttachCurrentThread();
+  base::android::ScopedJavaLocalRef<jstring> error =
+      Java_ApkAssets_takeLastErrorString(env);
+  if (!error) {
+    return;
+  }
+  SCOPED_CRASH_KEY_STRING256("base", "OpenApkAssetError",
+                             ConvertJavaStringToUTF8(env, error));
+  base::debug::DumpWithoutCrashing();
+}
+
 }  // namespace android
 }  // namespace base
diff --git a/base/android/apk_assets.h b/base/android/apk_assets.h
index 6990968..6389eb0 100644
--- a/base/android/apk_assets.h
+++ b/base/android/apk_assets.h
@@ -37,6 +37,10 @@
     const std::string& key,
     const base::FilePath& file_path);
 
+// If one of the above methods failed, call this to perform a
+// `DumpWithoutCrashing` containing errors relevant to the failure.
+BASE_EXPORT void DumpLastOpenApkAssetFailure();
+
 }  // namespace android
 }  // namespace base
 
diff --git a/base/android/base_features.cc b/base/android/base_features.cc
index 4050c5a..3254c5b 100644
--- a/base/android/base_features.cc
+++ b/base/android/base_features.cc
@@ -22,7 +22,7 @@
 // fresh when next used, hopefully resolving the issue.
 BASE_FEATURE(kCrashBrowserOnChildMismatchIfBrowserChanged,
              "CrashBrowserOnChildMismatchIfBrowserChanged",
-             FEATURE_DISABLED_BY_DEFAULT);
+             FEATURE_ENABLED_BY_DEFAULT);
 
 // Crash the browser process if a child process is created which does not match
 // the browser process regardless of whether the browser package appears to have
diff --git a/base/android/build_info.h b/base/android/build_info.h
index 076029c..12e42d1 100644
--- a/base/android/build_info.h
+++ b/base/android/build_info.h
@@ -87,10 +87,21 @@
     return gms_version_code_;
   }
 
+  // The package name of the host app which has loaded WebView, retrieved from
+  // the application context. In the context of the SDK Runtime, the package
+  // name of the app that owns this particular instance of the SDK Runtime will
+  // also be included. e.g.
+  // com.google.android.sdksandbox:com:com.example.myappwithads
   const char* host_package_name() const { return host_package_name_; }
 
+  // The application name (e.g. "Chrome"). For WebView, this is name of the
+  // embedding app. In the context of the SDK Runtime, this is the name of the
+  // app that owns this particular instance of the SDK Runtime.
   const char* host_version_code() const { return host_version_code_; }
 
+  // By default: same as versionCode. For WebView: versionCode of the embedding
+  // app. In the context of the SDK Runtime, this is the versionCode of the app
+  // that owns this particular instance of the SDK Runtime.
   const char* host_package_label() const { return host_package_label_; }
 
   const char* package_version_code() const {
diff --git a/base/android/callback_android.cc b/base/android/callback_android.cc
index df609db..b7d3b2f 100644
--- a/base/android/callback_android.cc
+++ b/base/android/callback_android.cc
@@ -33,7 +33,7 @@
 
 void RunTimeCallbackAndroid(const JavaRef<jobject>& callback, base::Time time) {
   Java_Helper_onTimeResultFromNative(AttachCurrentThread(), callback,
-                                     time.ToJavaTime());
+                                     time.InMillisecondsSinceUnixEpoch());
 }
 
 void RunStringCallbackAndroid(const JavaRef<jobject>& callback,
diff --git a/base/android/jank_metric_uma_recorder.cc b/base/android/jank_metric_uma_recorder.cc
index 85bc834..2c9c1b5 100644
--- a/base/android/jank_metric_uma_recorder.cc
+++ b/base/android/jank_metric_uma_recorder.cc
@@ -21,34 +21,87 @@
     int64_t reporting_interval_start_time,
     int64_t reporting_interval_duration,
     uint64_t janky_frame_count,
-    uint64_t non_janky_frame_count) {
-  if (reporting_interval_start_time < 0) {
+    uint64_t non_janky_frame_count,
+    int scenario) {
+  if (reporting_interval_start_time <= 0) {
     return;
   }
 
   // The following code does nothing if base tracing is disabled.
-  [[maybe_unused]] auto t =
-      perfetto::Track(static_cast<uint64_t>(reporting_interval_start_time));
+  [[maybe_unused]] auto t = perfetto::Track(
+      static_cast<uint64_t>(reporting_interval_start_time + scenario));
   TRACE_EVENT_BEGIN(
-      "android_webview.timeline", "JankMetricsReportingInterval", t,
+      "android_webview.timeline,android.ui.jank",
+      "JankMetricsReportingInterval", t,
       base::TimeTicks::FromUptimeMillis(reporting_interval_start_time),
       "janky_frames", janky_frame_count, "non_janky_frames",
-      non_janky_frame_count);
+      non_janky_frame_count, "scenario", scenario);
   TRACE_EVENT_END(
-      "android_webview.timeline", t,
+      "android_webview.timeline,android.ui.jank", t,
       base::TimeTicks::FromUptimeMillis(
           (reporting_interval_start_time + reporting_interval_duration)));
 }
 
 }  // namespace
 
-// These values are persisted to logs. Entries should not be renumbered and
-// numeric values should never be reused.
-enum class FrameJankStatus {
-  kJanky = 0,
-  kNonJanky = 1,
-  kMaxValue = kNonJanky,
-};
+const char* GetAndroidFrameTimelineJankHistogramName(JankScenario scenario) {
+#define HISTOGRAM_NAME(x) "Android.FrameTimelineJank.FrameJankStatus." #x
+  switch (scenario) {
+    case JankScenario::PERIODIC_REPORTING:
+      return HISTOGRAM_NAME(Total);
+    case JankScenario::OMNIBOX_FOCUS:
+      return HISTOGRAM_NAME(OmniboxFocus);
+    case JankScenario::NEW_TAB_PAGE:
+      return HISTOGRAM_NAME(NewTabPage);
+    case JankScenario::STARTUP:
+      return HISTOGRAM_NAME(Startup);
+    case JankScenario::TAB_SWITCHER:
+      return HISTOGRAM_NAME(TabSwitcher);
+    case JankScenario::OPEN_LINK_IN_NEW_TAB:
+      return HISTOGRAM_NAME(OpenLinkInNewTab);
+    case JankScenario::START_SURFACE_HOMEPAGE:
+      return HISTOGRAM_NAME(StartSurfaceHomepage);
+    case JankScenario::START_SURFACE_TAB_SWITCHER:
+      return HISTOGRAM_NAME(StartSurfaceTabSwitcher);
+    case JankScenario::FEED_SCROLLING:
+      return HISTOGRAM_NAME(FeedScrolling);
+    case JankScenario::WEBVIEW_SCROLLING:
+      return HISTOGRAM_NAME(WebviewScrolling);
+    default:
+      return HISTOGRAM_NAME(UNKNOWN);
+  }
+#undef HISTOGRAM_NAME
+}
+
+const char* GetAndroidFrameTimelineDurationHistogramName(
+    JankScenario scenario) {
+#define HISTOGRAM_NAME(x) "Android.FrameTimelineJank.Duration." #x
+  switch (scenario) {
+    case JankScenario::PERIODIC_REPORTING:
+      return HISTOGRAM_NAME(Total);
+    case JankScenario::OMNIBOX_FOCUS:
+      return HISTOGRAM_NAME(OmniboxFocus);
+    case JankScenario::NEW_TAB_PAGE:
+      return HISTOGRAM_NAME(NewTabPage);
+    case JankScenario::STARTUP:
+      return HISTOGRAM_NAME(Startup);
+    case JankScenario::TAB_SWITCHER:
+      return HISTOGRAM_NAME(TabSwitcher);
+    case JankScenario::OPEN_LINK_IN_NEW_TAB:
+      return HISTOGRAM_NAME(OpenLinkInNewTab);
+    case JankScenario::START_SURFACE_HOMEPAGE:
+      return HISTOGRAM_NAME(StartSurfaceHomepage);
+    case JankScenario::START_SURFACE_TAB_SWITCHER:
+      return HISTOGRAM_NAME(StartSurfaceTabSwitcher);
+    case JankScenario::FEED_SCROLLING:
+      return HISTOGRAM_NAME(FeedScrolling);
+    case JankScenario::WEBVIEW_SCROLLING:
+      return HISTOGRAM_NAME(WebviewScrolling);
+    default:
+      return HISTOGRAM_NAME(UNKNOWN);
+  }
+#undef HISTOGRAM_NAME
+}
 
 // This function is called from Java with JNI, it's declared in
 // base/base_jni/JankMetricUMARecorder_jni.h which is an autogenerated
@@ -59,10 +112,11 @@
     const base::android::JavaParamRef<jlongArray>& java_durations_ns,
     const base::android::JavaParamRef<jbooleanArray>& java_jank_status,
     jlong java_reporting_interval_start_time,
-    jlong java_reporting_interval_duration) {
+    jlong java_reporting_interval_duration,
+    jint java_scenario_enum) {
   RecordJankMetrics(env, java_durations_ns, java_jank_status,
                     java_reporting_interval_start_time,
-                    java_reporting_interval_duration);
+                    java_reporting_interval_duration, java_scenario_enum);
 }
 
 void RecordJankMetrics(
@@ -70,15 +124,20 @@
     const base::android::JavaParamRef<jlongArray>& java_durations_ns,
     const base::android::JavaParamRef<jbooleanArray>& java_jank_status,
     jlong java_reporting_interval_start_time,
-    jlong java_reporting_interval_duration) {
+    jlong java_reporting_interval_duration,
+    jint java_scenario_enum) {
   std::vector<int64_t> durations_ns;
   JavaLongArrayToInt64Vector(env, java_durations_ns, &durations_ns);
 
   std::vector<bool> jank_status;
   JavaBooleanArrayToBoolVector(env, java_jank_status, &jank_status);
 
-  std::string frame_duration_histogram_name = "Android.Jank.FrameDuration";
-  std::string janky_frames_histogram_name = "Android.Jank.FrameJankStatus";
+  JankScenario scenario = static_cast<JankScenario>(java_scenario_enum);
+
+  const char* frame_duration_histogram_name =
+      GetAndroidFrameTimelineDurationHistogramName(scenario);
+  const char* janky_frames_per_scenario_histogram_name =
+      GetAndroidFrameTimelineJankHistogramName(scenario);
 
   for (const int64_t frame_duration_ns : durations_ns) {
     base::UmaHistogramTimes(frame_duration_histogram_name,
@@ -89,7 +148,7 @@
 
   for (bool is_janky : jank_status) {
     base::UmaHistogramEnumeration(
-        janky_frames_histogram_name,
+        janky_frames_per_scenario_histogram_name,
         is_janky ? FrameJankStatus::kJanky : FrameJankStatus::kNonJanky);
     if (is_janky) {
       ++janky_frame_count;
@@ -98,7 +157,8 @@
 
   RecordJankMetricReportingIntervalTraceEvent(
       java_reporting_interval_start_time, java_reporting_interval_duration,
-      janky_frame_count, jank_status.size() - janky_frame_count);
+      janky_frame_count, jank_status.size() - janky_frame_count,
+      java_scenario_enum);
 }
 
 }  // namespace base::android
diff --git a/base/android/jank_metric_uma_recorder.h b/base/android/jank_metric_uma_recorder.h
index f6011fe..18fb1d4 100644
--- a/base/android/jank_metric_uma_recorder.h
+++ b/base/android/jank_metric_uma_recorder.h
@@ -7,14 +7,49 @@
 
 #include "base/android/jni_android.h"
 #include "base/base_export.h"
+#include "base/feature_list.h"
 
 namespace base::android {
 
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused.
+enum class FrameJankStatus {
+  kJanky = 0,
+  kNonJanky = 1,
+  kMaxValue = kNonJanky,
+};
+
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused.
+enum class JankScenario {
+  PERIODIC_REPORTING = 1,
+  OMNIBOX_FOCUS = 2,
+  NEW_TAB_PAGE = 3,
+  STARTUP = 4,
+  TAB_SWITCHER = 5,
+  OPEN_LINK_IN_NEW_TAB = 6,
+  START_SURFACE_HOMEPAGE = 7,
+  START_SURFACE_TAB_SWITCHER = 8,
+  FEED_SCROLLING = 9,
+  WEBVIEW_SCROLLING = 10,
+  // This value should always be last and is not persisted to logs, exposed only
+  // for testing.
+  MAX_VALUE = WEBVIEW_SCROLLING + 1
+};
+
+// Resolves the above name to a histogram value.
+BASE_EXPORT const char* GetAndroidFrameTimelineJankHistogramName(
+    JankScenario scenario);
+// Resolves the above name to a histogram value.
+BASE_EXPORT const char* GetAndroidFrameTimelineDurationHistogramName(
+    JankScenario scenario);
+
 BASE_EXPORT void RecordJankMetrics(
     JNIEnv* env,
     const base::android::JavaParamRef<jlongArray>& java_durations_ns,
     const base::android::JavaParamRef<jbooleanArray>& java_jank_status,
     jlong java_reporting_interval_start_time,
-    jlong java_reporting_interval_duration);
+    jlong java_reporting_interval_duration,
+    jint java_scenario_enum);
 }  // namespace base::android
 #endif  // BASE_ANDROID_JANK_METRIC_UMA_RECORDER_H_
diff --git a/base/android/jank_metric_uma_recorder_unittest.cc b/base/android/jank_metric_uma_recorder_unittest.cc
index 0dcc152..38bb1b0 100644
--- a/base/android/jank_metric_uma_recorder_unittest.cc
+++ b/base/android/jank_metric_uma_recorder_unittest.cc
@@ -18,6 +18,7 @@
 #include "testing/gtest/include/gtest/gtest.h"
 
 using ::testing::ElementsAre;
+using ::testing::IsEmpty;
 
 namespace base::android {
 namespace {
@@ -57,13 +58,11 @@
 const bool kJankStatus[] = {
     false, false, true, false, true, false, false, false,
 };
-
 const size_t kJankStatusLen = kDurationsLen;
 
 }  // namespace
 
 TEST(JankMetricUMARecorder, TestUMARecording) {
-  HistogramTester histogram_tester;
 
   JNIEnv* env = AttachCurrentThread();
 
@@ -73,21 +72,55 @@
   jbooleanArray java_jank_status =
       GenerateJavaBooleanArray(env, kJankStatus, kJankStatusLen);
 
-  RecordJankMetrics(
-      env,
-      /* java_durations_ns= */
-      base::android::JavaParamRef<jlongArray>(env, java_durations),
-      /* java_jank_status = */
-      base::android::JavaParamRef<jbooleanArray>(env, java_jank_status),
-      /* java_reporting_interval_start_time = */ 0,
-      /* java_reporting_interval_duration = */ 1000);
+  const int kMinScenario = static_cast<int>(JankScenario::PERIODIC_REPORTING);
+  const int kMaxScenario = static_cast<int>(JankScenario::MAX_VALUE);
+  // keep one histogram tester outside to ensure that each histogram is a
+  // different one rather than just the same string over and over.
+  HistogramTester complete_histogram_tester;
+  size_t total_histograms = 0;
+  for (int i = kMinScenario; i < kMaxScenario; ++i) {
+    // HistogramTester takes a snapshot of currently incremented counters so
+    // everything is scoped to just this iteration of the for loop.
+    HistogramTester histogram_tester;
 
-  EXPECT_THAT(histogram_tester.GetAllSamples("Android.Jank.FrameDuration"),
-              ElementsAre(Bucket(1, 3), Bucket(2, 1), Bucket(10, 1),
-                          Bucket(20, 1), Bucket(29, 1), Bucket(57, 1)));
+    RecordJankMetrics(
+        env,
+        /* java_durations_ns= */
+        base::android::JavaParamRef<jlongArray>(env, java_durations),
+        /* java_jank_status = */
+        base::android::JavaParamRef<jbooleanArray>(env, java_jank_status),
+        /* java_reporting_interval_start_time = */ 0,
+        /* java_reporting_interval_duration = */ 1000,
+        /* java_scenario_enum = */ i);
 
-  EXPECT_THAT(histogram_tester.GetAllSamples("Android.Jank.FrameJankStatus"),
-              ElementsAre(Bucket(0, 2), Bucket(1, 6)));
+    const std::string kDurationName =
+        GetAndroidFrameTimelineDurationHistogramName(
+            static_cast<JankScenario>(i));
+    const std::string kJankyName =
+        GetAndroidFrameTimelineJankHistogramName(static_cast<JankScenario>(i));
+
+    // Only one Duration and one Jank scenario should be incremented.
+    base::HistogramTester::CountsMap count_map =
+        histogram_tester.GetTotalCountsForPrefix("Android.FrameTimelineJank.");
+    EXPECT_EQ(count_map.size(), 2ul);
+    EXPECT_EQ(count_map[kDurationName], 8) << kDurationName;
+    EXPECT_EQ(count_map[kJankyName], 8) << kJankyName;
+    // And we should be two more then last iteration, but don't do any other
+    // verification because each iteration will do their own.
+    base::HistogramTester::CountsMap total_count_map =
+        complete_histogram_tester.GetTotalCountsForPrefix(
+            "Android.FrameTimelineJank.");
+    EXPECT_EQ(total_count_map.size(), total_histograms + 2);
+    total_histograms += 2;
+
+    EXPECT_THAT(histogram_tester.GetAllSamples(kDurationName),
+                ElementsAre(Bucket(1, 3), Bucket(2, 1), Bucket(10, 1),
+                            Bucket(20, 1), Bucket(29, 1), Bucket(57, 1)))
+        << kDurationName;
+    EXPECT_THAT(histogram_tester.GetAllSamples(kJankyName),
+                ElementsAre(Bucket(FrameJankStatus::kJanky, 2),
+                            Bucket(FrameJankStatus::kNonJanky, 6)))
+        << kJankyName;
+  }
 }
-
 }  // namespace base::android
diff --git a/base/android/java/src/org/chromium/base/ApiCompatibilityUtils.java b/base/android/java/src/org/chromium/base/ApiCompatibilityUtils.java
index baeb953..753b8e4 100644
--- a/base/android/java/src/org/chromium/base/ApiCompatibilityUtils.java
+++ b/base/android/java/src/org/chromium/base/ApiCompatibilityUtils.java
@@ -29,13 +29,9 @@
 import android.view.View;
 import android.view.textclassifier.TextClassifier;
 import android.widget.TextView;
-
 import androidx.annotation.NonNull;
 import androidx.annotation.Nullable;
-import androidx.annotation.OptIn;
 import androidx.annotation.RequiresApi;
-import androidx.core.os.BuildCompat;
-
 import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
@@ -290,10 +286,9 @@
      * passed to Chrome from a backgrounded app.
      * @param options {@ActivityOptions} to set the required mode to.
      */
-    @OptIn(markerClass = androidx.core.os.BuildCompat.PrereleaseSdkCheck.class)
     public static void setActivityOptionsBackgroundActivityStartMode(
             @NonNull ActivityOptions options) {
-        if (!BuildCompat.isAtLeastU()) return;
+        if (Build.VERSION.SDK_INT < Build.VERSION_CODES.UPSIDE_DOWN_CAKE) return;
         options.setPendingIntentBackgroundActivityStartMode(
                 ActivityOptions.MODE_BACKGROUND_ACTIVITY_START_ALLOWED);
     }
@@ -303,11 +298,10 @@
      * See https://crbug.com/1427112
      * @param view The view on which to set the handwriting bounds.
      */
-    @OptIn(markerClass = androidx.core.os.BuildCompat.PrereleaseSdkCheck.class)
     public static void clearHandwritingBoundsOffsetBottom(View view) {
         // TODO(crbug.com/1427112): Replace uses of this method with direct calls once the API is
         // available.
-        if (!BuildCompat.isAtLeastU()) return;
+        if (Build.VERSION.SDK_INT < Build.VERSION_CODES.UPSIDE_DOWN_CAKE) return;
         // Set the bottom handwriting bounds offset to 0 so that the view doesn't intercept
         // stylus events meant for the web contents.
         try {
diff --git a/base/android/java/src/org/chromium/base/ApkAssets.java b/base/android/java/src/org/chromium/base/ApkAssets.java
index 407438a..5f9577c 100644
--- a/base/android/java/src/org/chromium/base/ApkAssets.java
+++ b/base/android/java/src/org/chromium/base/ApkAssets.java
@@ -8,10 +8,9 @@
 import android.content.res.AssetFileDescriptor;
 import android.content.res.AssetManager;
 import android.text.TextUtils;
-import android.util.Log;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
 
 import java.io.IOException;
 
@@ -22,10 +21,15 @@
  */
 @JNINamespace("base::android")
 public class ApkAssets {
-    private static final String LOGTAG = "ApkAssets";
+    private static final String TAG = "ApkAssets";
+
+    // This isn't thread safe, but that's ok because it's only used for debugging.
+    // Note reference operations are atomic so there is no security issue.
+    private static String sLastError;
 
     @CalledByNative
     public static long[] open(String fileName, String splitName) {
+        sLastError = null;
         AssetFileDescriptor afd = null;
         try {
             Context context = ContextUtils.getApplicationContext();
@@ -37,6 +41,7 @@
             return new long[] {afd.getParcelFileDescriptor().detachFd(), afd.getStartOffset(),
                     afd.getLength()};
         } catch (IOException e) {
+            sLastError = "Error while loading asset " + fileName + " from " + splitName + ": " + e;
             // As a general rule there's no point logging here because the caller should handle
             // receiving an fd of -1 sensibly, and the log message is either mirrored later, or
             // unwanted (in the case where a missing file is expected), or wanted but will be
@@ -48,7 +53,7 @@
             // informative (Android framework passes the filename as the message on actual file not
             // found, and the empty string also wouldn't give any useful information for debugging).
             if (!e.getMessage().equals("") && !e.getMessage().equals(fileName)) {
-                Log.e(LOGTAG, "Error while loading asset " + fileName + ": " + e);
+                Log.e(TAG, sLastError);
             }
             return new long[] {-1, -1, -1};
         } finally {
@@ -57,8 +62,15 @@
                     afd.close();
                 }
             } catch (IOException e2) {
-                Log.e(LOGTAG, "Unable to close AssetFileDescriptor", e2);
+                Log.e(TAG, "Unable to close AssetFileDescriptor", e2);
             }
         }
     }
+
+    @CalledByNative
+    private static String takeLastErrorString() {
+        String rv = sLastError;
+        sLastError = null;
+        return rv;
+    }
 }
diff --git a/base/android/java/src/org/chromium/base/ApplicationStatus.java b/base/android/java/src/org/chromium/base/ApplicationStatus.java
index b95c95d..9f609c0 100644
--- a/base/android/java/src/org/chromium/base/ApplicationStatus.java
+++ b/base/android/java/src/org/chromium/base/ApplicationStatus.java
@@ -17,9 +17,10 @@
 import androidx.annotation.Nullable;
 import androidx.annotation.VisibleForTesting;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
+
 import org.chromium.build.BuildConfig;
 
 import java.lang.reflect.Field;
diff --git a/base/android/java/src/org/chromium/base/BaseFeatureMap.java b/base/android/java/src/org/chromium/base/BaseFeatureMap.java
index 1f7ac9b..3fc1c7f 100644
--- a/base/android/java/src/org/chromium/base/BaseFeatureMap.java
+++ b/base/android/java/src/org/chromium/base/BaseFeatureMap.java
@@ -4,8 +4,8 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 /**
  * Java accessor for base::Features listed in {@link BaseFeatures}
diff --git a/base/android/java/src/org/chromium/base/BuildInfo.java b/base/android/java/src/org/chromium/base/BuildInfo.java
index f6ca0cb..f4247a0 100644
--- a/base/android/java/src/org/chromium/base/BuildInfo.java
+++ b/base/android/java/src/org/chromium/base/BuildInfo.java
@@ -15,12 +15,11 @@
 import android.content.res.Configuration;
 import android.os.Build;
 import android.os.Build.VERSION_CODES;
+import android.os.Process;
 import android.text.TextUtils;
 
-import androidx.annotation.OptIn;
-import androidx.core.os.BuildCompat;
+import org.jni_zero.CalledByNative;
 
-import org.chromium.base.annotations.CalledByNative;
 import org.chromium.base.compat.ApiHelperForP;
 import org.chromium.build.BuildConfig;
 
@@ -36,11 +35,29 @@
     private static ApplicationInfo sBrowserApplicationInfo;
     private static boolean sInitialized;
 
-    /** The application name (e.g. "Chrome"). For WebView, this is name of the embedding app. */
+    /**
+     * The package name of the host app which has loaded WebView, retrieved from the application
+     * context. In the context of the SDK Runtime, the package name of the app that owns this
+     * particular instance of the SDK Runtime will also be included.
+     * e.g. com.google.android.sdksandbox:com:com.example.myappwithads
+     */
+    public final String hostPackageName;
+    /**
+     * The application name (e.g. "Chrome"). For WebView, this is name of the embedding app.
+     * In the context of the SDK Runtime, this is the name of the app that owns this particular
+     * instance of the SDK Runtime.
+     */
     public final String hostPackageLabel;
-    /** By default: same as versionCode. For WebView: versionCode of the embedding app. */
+    /**
+     * By default: same as versionCode. For WebView: versionCode of the embedding app.
+     * In the context of the SDK Runtime, this is the versionCode of the app that owns this
+     * particular instance of the SDK Runtime.
+     */
     public final long hostVersionCode;
-    /** The packageName of Chrome/WebView. Use application context for host app packageName. */
+    /**
+     * The packageName of Chrome/WebView. Use application context for host app packageName.
+     * Same as the host information within any child process.
+     */
     public final String packageName;
     /** The versionCode of the apk. */
     public final long versionCode;
@@ -62,6 +79,10 @@
     public final boolean isTV;
     /** Whether we're running on an Android Automotive OS device or not. */
     public final boolean isAutomotive;
+
+    /** Whether we're running on an Android Foldable OS device or not. */
+    public final boolean isFoldable;
+
     /**
      * version of the FEATURE_VULKAN_DEQP_LEVEL, if available. Queried only on Android T or above
      */
@@ -77,43 +98,41 @@
     }
 
     /** Returns a serialized string array of all properties of this class. */
-    @OptIn(markerClass = androidx.core.os.BuildCompat.PrereleaseSdkCheck.class)
     private String[] getAllProperties() {
-        String hostPackageName = ContextUtils.getApplicationContext().getPackageName();
         // This implementation needs to be kept in sync with the native BuildInfo constructor.
         return new String[] {
-                Build.BRAND,
-                Build.DEVICE,
-                Build.ID,
-                Build.MANUFACTURER,
-                Build.MODEL,
-                String.valueOf(Build.VERSION.SDK_INT),
-                Build.TYPE,
-                Build.BOARD,
-                hostPackageName,
-                String.valueOf(hostVersionCode),
-                hostPackageLabel,
-                packageName,
-                String.valueOf(versionCode),
-                versionName,
-                androidBuildFingerprint,
-                gmsVersionCode,
-                installerPackageName,
-                abiString,
-                customThemes,
-                resourcesVersion,
-                String.valueOf(
-                        ContextUtils.getApplicationContext().getApplicationInfo().targetSdkVersion),
-                isDebugAndroid() ? "1" : "0",
-                isTV ? "1" : "0",
-                Build.VERSION.INCREMENTAL,
-                Build.HARDWARE,
-                isAtLeastT() ? "1" : "0",
-                isAutomotive ? "1" : "0",
-                BuildCompat.isAtLeastU() ? "1" : "0",
-                targetsAtLeastU() ? "1" : "0",
-                Build.VERSION.CODENAME,
-                String.valueOf(vulkanDeqpLevel),
+            Build.BRAND,
+            Build.DEVICE,
+            Build.ID,
+            Build.MANUFACTURER,
+            Build.MODEL,
+            String.valueOf(Build.VERSION.SDK_INT),
+            Build.TYPE,
+            Build.BOARD,
+            hostPackageName,
+            String.valueOf(hostVersionCode),
+            hostPackageLabel,
+            packageName,
+            String.valueOf(versionCode),
+            versionName,
+            androidBuildFingerprint,
+            gmsVersionCode,
+            installerPackageName,
+            abiString,
+            customThemes,
+            resourcesVersion,
+            String.valueOf(
+                    ContextUtils.getApplicationContext().getApplicationInfo().targetSdkVersion),
+            isDebugAndroid() ? "1" : "0",
+            isTV ? "1" : "0",
+            Build.VERSION.INCREMENTAL,
+            Build.HARDWARE,
+            Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU ? "1" : "0",
+            isAutomotive ? "1" : "0",
+            Build.VERSION.SDK_INT >= VERSION_CODES.UPSIDE_DOWN_CAKE ? "1" : "0",
+            targetsAtLeastU() ? "1" : "0",
+            Build.VERSION.CODENAME,
+            String.valueOf(vulkanDeqpLevel),
         };
     }
 
@@ -160,25 +179,102 @@
     private BuildInfo() {
         sInitialized = true;
         Context appContext = ContextUtils.getApplicationContext();
-        String hostPackageName = appContext.getPackageName();
+        String appContextPackageName = appContext.getPackageName();
         PackageManager pm = appContext.getPackageManager();
-        PackageInfo pi = PackageUtils.getPackageInfo(hostPackageName, 0);
-        hostVersionCode = packageVersionCode(pi);
-        if (sBrowserPackageInfo != null) {
-            packageName = sBrowserPackageInfo.packageName;
-            versionCode = packageVersionCode(sBrowserPackageInfo);
-            versionName = nullToEmpty(sBrowserPackageInfo.versionName);
-            sBrowserApplicationInfo = sBrowserPackageInfo.applicationInfo;
-            sBrowserPackageInfo = null;
-        } else {
-            packageName = hostPackageName;
-            versionCode = hostVersionCode;
-            versionName = nullToEmpty(pi.versionName);
-            sBrowserApplicationInfo = appContext.getApplicationInfo();
+
+        String providedHostPackageName = null;
+        String providedHostPackageLabel = null;
+        String providedPackageName = null;
+        String providedPackageVersionName = null;
+        Long providedHostVersionCode = null;
+        Long providedPackageVersionCode = null;
+
+        // The child processes are running in an isolated process so they can't grab a lot of
+        // package information in the same way that we normally would retrieve them. To get around
+        // this, we feed the information as command line switches.
+        if (CommandLine.isInitialized()) {
+            CommandLine commandLine = CommandLine.getInstance();
+            providedHostPackageName = commandLine.getSwitchValue(BaseSwitches.HOST_PACKAGE_NAME);
+            providedHostPackageLabel = commandLine.getSwitchValue(BaseSwitches.HOST_PACKAGE_LABEL);
+            providedPackageName = commandLine.getSwitchValue(BaseSwitches.PACKAGE_NAME);
+            providedPackageVersionName =
+                    commandLine.getSwitchValue(BaseSwitches.PACKAGE_VERSION_NAME);
+
+            if (commandLine.hasSwitch(BaseSwitches.HOST_VERSION_CODE)) {
+                providedHostVersionCode =
+                        Long.parseLong(commandLine.getSwitchValue(BaseSwitches.HOST_VERSION_CODE));
+            }
+
+            if (commandLine.hasSwitch(BaseSwitches.PACKAGE_VERSION_CODE)) {
+                providedPackageVersionCode = Long.parseLong(
+                        commandLine.getSwitchValue(BaseSwitches.PACKAGE_VERSION_CODE));
+            }
         }
 
-        hostPackageLabel = nullToEmpty(pm.getApplicationLabel(pi.applicationInfo));
-        installerPackageName = nullToEmpty(pm.getInstallerPackageName(packageName));
+        boolean hostInformationProvided = providedHostPackageName != null
+                && providedHostPackageLabel != null && providedHostVersionCode != null
+                && providedPackageName != null && providedPackageVersionName != null
+                && providedPackageVersionCode != null;
+
+        // We want to retrieve the original package installed to verify to host package name.
+        // In the case of the SDK Runtime, we would like to retrieve the package name loading the
+        // SDK.
+        String appInstalledPackageName = appContextPackageName;
+
+        if (hostInformationProvided) {
+            hostPackageName = providedHostPackageName;
+            hostPackageLabel = providedHostPackageLabel;
+            hostVersionCode = providedHostVersionCode;
+            versionName = providedPackageVersionName;
+            packageName = providedPackageName;
+            versionCode = providedPackageVersionCode;
+
+            sBrowserApplicationInfo = appContext.getApplicationInfo();
+        } else {
+            // The SDK Qualified package name will retrieve the same information as
+            // appInstalledPackageName but prefix it with the SDK Sandbox process so that we can
+            // tell SDK Runtime data apart from regular data in our logs and metrics.
+            String sdkQualifiedName = appInstalledPackageName;
+
+            // TODO(bewise): There isn't currently an official API to grab the host package name
+            // with the SDK Runtime. We can work around this because SDKs loaded in the SDK
+            // Runtime have the host UID + 10000. This should be updated if a public API comes
+            // along that we can use.
+            // You can see more about this in the Android source:
+            // https://cs.android.com/android/platform/superproject/main/+/main:frameworks/base/core/java/android/os/Process.java;l=292;drc=47fffdd53115a9af1820e3f89d8108745be4b55d
+            if (ContextUtils.isSdkSandboxProcess()) {
+                final int hostId = Process.myUid() - 10000;
+                final String[] packageNames = pm.getPackagesForUid(hostId);
+
+                if (packageNames.length > 0) {
+                    // We could end up with more than one package name if the app used a
+                    // sharedUserId but these are deprecated so this is a safe bet to rely on the
+                    // first package name.
+                    appInstalledPackageName = packageNames[0];
+                    sdkQualifiedName += ":" + appInstalledPackageName;
+                }
+            }
+
+            PackageInfo pi = PackageUtils.getPackageInfo(appInstalledPackageName, 0);
+            hostPackageName = sdkQualifiedName;
+            hostPackageLabel = nullToEmpty(pm.getApplicationLabel(pi.applicationInfo));
+            hostVersionCode = packageVersionCode(pi);
+
+            if (sBrowserPackageInfo != null) {
+                packageName = sBrowserPackageInfo.packageName;
+                versionCode = packageVersionCode(sBrowserPackageInfo);
+                versionName = nullToEmpty(sBrowserPackageInfo.versionName);
+                sBrowserApplicationInfo = sBrowserPackageInfo.applicationInfo;
+                sBrowserPackageInfo = null;
+            } else {
+                packageName = appContextPackageName;
+                versionCode = hostVersionCode;
+                versionName = nullToEmpty(pi.versionName);
+                sBrowserApplicationInfo = appContext.getApplicationInfo();
+            }
+        }
+
+        installerPackageName = nullToEmpty(pm.getInstallerPackageName(appInstalledPackageName));
 
         PackageInfo gmsPackageInfo = PackageUtils.getPackageInfo("com.google.android.gms", 0);
         gmsVersionCode = gmsPackageInfo != null ? String.valueOf(packageVersionCode(gmsPackageInfo))
@@ -206,11 +302,7 @@
         }
         resourcesVersion = currentResourcesVersion;
 
-        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
-            abiString = TextUtils.join(", ", Build.SUPPORTED_ABIS);
-        } else {
-            abiString = String.format("ABI1: %s, ABI2: %s", Build.CPU_ABI, Build.CPU_ABI2);
-        }
+        abiString = TextUtils.join(", ", Build.SUPPORTED_ABIS);
 
         // The value is truncated, as this is used for crash and UMA reporting.
         androidBuildFingerprint = Build.FINGERPRINT.substring(
@@ -234,6 +326,11 @@
         }
         this.isAutomotive = isAutomotive;
 
+        // Detect whether device is foldable.
+        this.isFoldable =
+                Build.VERSION.SDK_INT >= VERSION_CODES.R
+                        && pm.hasSystemFeature(PackageManager.FEATURE_SENSOR_HINGE_ANGLE);
+
         int vulkanLevel = 0;
         if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
             FeatureInfo[] features = pm.getSystemAvailableFeatures();
@@ -275,17 +372,6 @@
     }
 
     /**
-     * @deprecated For most callers, just replace with an inline check:
-     * if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU)
-     * For Robolectric just set the SDK level to VERSION_CODES.TIRAMISU
-     */
-    @Deprecated
-    @OptIn(markerClass = androidx.core.os.BuildCompat.PrereleaseSdkCheck.class)
-    public static boolean isAtLeastT() {
-        return BuildCompat.isAtLeastT();
-    }
-
-    /**
      * Checks if the application targets the T SDK or later.
      * @deprecated Chrome callers should just remove this test - Chrome targets T or later now.
      * WebView callers should just inline the logic below to check the target level of the embedding
diff --git a/base/android/java/src/org/chromium/base/BundleUtils.java b/base/android/java/src/org/chromium/base/BundleUtils.java
index 632cbb2..256b01a 100644
--- a/base/android/java/src/org/chromium/base/BundleUtils.java
+++ b/base/android/java/src/org/chromium/base/BundleUtils.java
@@ -22,7 +22,8 @@
 import dalvik.system.BaseDexClassLoader;
 import dalvik.system.PathClassLoader;
 
-import org.chromium.base.annotations.CalledByNative;
+import org.jni_zero.CalledByNative;
+
 import org.chromium.base.compat.ApiHelperForO;
 import org.chromium.base.metrics.RecordHistogram;
 import org.chromium.build.BuildConfig;
diff --git a/base/android/java/src/org/chromium/base/Callback.java b/base/android/java/src/org/chromium/base/Callback.java
index 5cf653c..00aef7c 100644
--- a/base/android/java/src/org/chromium/base/Callback.java
+++ b/base/android/java/src/org/chromium/base/Callback.java
@@ -4,7 +4,9 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.CalledByNative;
+import androidx.annotation.Nullable;
+
+import org.jni_zero.CalledByNative;
 
 /**
  * A simple single-argument callback to handle the result of a computation.
@@ -31,6 +33,18 @@
     }
 
     /**
+     * Runs a callback checking if the callback may be null.
+     *
+     * <p>Can be used as syntactic sugar for: if (callback != null) callback.onResult(object);
+     *
+     * @param callback The {@link Callback} to run.
+     * @param object The payload to provide to the callback (may be null).
+     */
+    static <T> void runNullSafe(@Nullable Callback<T> callback, @Nullable T object) {
+        if (callback != null) callback.onResult(object);
+    }
+
+    /**
      * JNI Generator does not know how to target static methods on interfaces
      * (which is new in Java 8, and requires desugaring).
      */
diff --git a/base/android/java/src/org/chromium/base/CommandLine.java b/base/android/java/src/org/chromium/base/CommandLine.java
index 6d51a87..72091a0 100644
--- a/base/android/java/src/org/chromium/base/CommandLine.java
+++ b/base/android/java/src/org/chromium/base/CommandLine.java
@@ -9,8 +9,7 @@
 import androidx.annotation.Nullable;
 import androidx.annotation.VisibleForTesting;
 
-import org.chromium.base.annotations.NativeMethods;
-import org.chromium.build.annotations.MainDex;
+import org.jni_zero.NativeMethods;
 
 import java.io.File;
 import java.io.FileReader;
@@ -27,7 +26,6 @@
  * file at a specific location early during startup. Applications each define their own files, e.g.,
  * ContentShellApplication.COMMAND_LINE_FILE.
 **/
-@MainDex
 public abstract class CommandLine {
     // Public abstract interface, implemented in derived classes.
     // All these methods reflect their native-side counterparts.
@@ -158,8 +156,7 @@
      * Resets both the java proxy and the native command lines. This allows the entire
      * command line initialization to be re-run including the call to onJniLoaded.
      */
-    @VisibleForTesting
-    public static void reset() {
+    static void resetForTesting() {
         setInstance(null);
     }
 
@@ -173,8 +170,8 @@
      */
     @VisibleForTesting
     static String[] tokenizeQuotedArguments(char[] buffer) {
-        // Just field trials can take up to 10K of command line.
-        if (buffer.length > 64 * 1024) {
+        // Just field trials can take over 60K of command line.
+        if (buffer.length > 96 * 1024) {
             // Check that our test runners are setting a reasonable number of flags.
             throw new RuntimeException("Flags file too big: " + buffer.length);
         }
@@ -243,14 +240,6 @@
     }
 
     /**
-     * Set {@link CommandLine} for testing.
-     * @param commandLine The {@link CommandLine} to use.
-     */
-    public static void setInstanceForTesting(CommandLine commandLine) {
-        setInstance(commandLine);
-    }
-
-    /**
      * @param fileName the file to read in.
      * @return Array of chars read from the file, or null if the file cannot be read.
      */
@@ -268,7 +257,8 @@
 
     private CommandLine() {}
 
-    private static class JavaCommandLine extends CommandLine {
+    @VisibleForTesting
+    static class JavaCommandLine extends CommandLine {
         private HashMap<String, String> mSwitches = new HashMap<String, String>();
         private ArrayList<String> mArgs = new ArrayList<String>();
 
diff --git a/base/android/java/src/org/chromium/base/CommandLineInitUtil.java b/base/android/java/src/org/chromium/base/CommandLineInitUtil.java
index ccb10b1..b0cdcc3 100644
--- a/base/android/java/src/org/chromium/base/CommandLineInitUtil.java
+++ b/base/android/java/src/org/chromium/base/CommandLineInitUtil.java
@@ -34,10 +34,22 @@
      */
     private static final String COMMAND_LINE_FILE_PATH_DEBUG_APP = "/data/local/tmp";
 
+    /**
+     * The name of the command line file to pull arguments from.
+     */
+    private static String sFilenameOverrideForTesting;
+
     private CommandLineInitUtil() {
     }
 
     /**
+     * Set the filename to use.
+     */
+    public static void setFilenameOverrideForTesting(String value) {
+        sFilenameOverrideForTesting = value;
+    }
+
+    /**
      * Initializes the CommandLine class, pulling command line arguments from {@code fileName}.
      * @param fileName The name of the command line file to pull arguments from.
      */
@@ -52,6 +64,9 @@
      */
     public static void initCommandLine(
             String fileName, @Nullable Supplier<Boolean> shouldUseDebugFlags) {
+        if (sFilenameOverrideForTesting != null) {
+            fileName = sFilenameOverrideForTesting;
+        }
         assert !CommandLine.isInitialized();
         File commandLineFile = new File(COMMAND_LINE_FILE_PATH_DEBUG_APP, fileName);
         // shouldUseDebugCommandLine() uses IPC, so don't bother calling it if no flags file exists.
diff --git a/base/android/java/src/org/chromium/base/ContentUriUtils.java b/base/android/java/src/org/chromium/base/ContentUriUtils.java
index 2750131..a1eb000 100644
--- a/base/android/java/src/org/chromium/base/ContentUriUtils.java
+++ b/base/android/java/src/org/chromium/base/ContentUriUtils.java
@@ -18,7 +18,7 @@
 
 import androidx.annotation.Nullable;
 
-import org.chromium.base.annotations.CalledByNative;
+import org.jni_zero.CalledByNative;
 
 import java.io.File;
 import java.io.IOException;
diff --git a/base/android/java/src/org/chromium/base/ContextUtils.java b/base/android/java/src/org/chromium/base/ContextUtils.java
index c91ab41..fbb8718 100644
--- a/base/android/java/src/org/chromium/base/ContextUtils.java
+++ b/base/android/java/src/org/chromium/base/ContextUtils.java
@@ -20,7 +20,8 @@
 
 import androidx.annotation.Nullable;
 
-import org.chromium.base.annotations.JNINamespace;
+import org.jni_zero.JNINamespace;
+
 import org.chromium.base.compat.ApiHelperForM;
 import org.chromium.base.compat.ApiHelperForO;
 import org.chromium.build.BuildConfig;
diff --git a/base/android/java/src/org/chromium/base/CpuFeatures.java b/base/android/java/src/org/chromium/base/CpuFeatures.java
index 1533caf..a98460a 100644
--- a/base/android/java/src/org/chromium/base/CpuFeatures.java
+++ b/base/android/java/src/org/chromium/base/CpuFeatures.java
@@ -4,8 +4,8 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 // The only purpose of this class is to allow sending CPU properties
 // from the browser process to sandboxed renderer processes. This is
diff --git a/base/android/java/src/org/chromium/base/EarlyTraceEvent.java b/base/android/java/src/org/chromium/base/EarlyTraceEvent.java
index c0faa45..d6df3af 100644
--- a/base/android/java/src/org/chromium/base/EarlyTraceEvent.java
+++ b/base/android/java/src/org/chromium/base/EarlyTraceEvent.java
@@ -10,10 +10,9 @@
 
 import androidx.annotation.VisibleForTesting;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
-import org.chromium.build.annotations.MainDex;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 import java.io.File;
 import java.util.ArrayList;
@@ -38,7 +37,6 @@
  * final String| class member. Otherwise NoDynamicStringsInTraceEventCheck error will be thrown.
  */
 @JNINamespace("base::android")
-@MainDex
 public class EarlyTraceEvent {
     /** Single trace event. */
     @VisibleForTesting
@@ -242,10 +240,13 @@
      */
     @CalledByNative
     static void setBackgroundStartupTracingFlag(boolean enabled) {
-        ContextUtils.getAppSharedPreferences()
-                .edit()
-                .putBoolean(BACKGROUND_STARTUP_TRACING_ENABLED_KEY, enabled)
-                .apply();
+        // Setting preferences might cause a disk write
+        try (StrictModeContext ignored = StrictModeContext.allowDiskWrites()) {
+            ContextUtils.getAppSharedPreferences()
+                    .edit()
+                    .putBoolean(BACKGROUND_STARTUP_TRACING_ENABLED_KEY, enabled)
+                    .apply();
+        }
     }
 
     /**
diff --git a/base/android/java/src/org/chromium/base/EventLog.java b/base/android/java/src/org/chromium/base/EventLog.java
index ec7d05a..1804add 100644
--- a/base/android/java/src/org/chromium/base/EventLog.java
+++ b/base/android/java/src/org/chromium/base/EventLog.java
@@ -4,8 +4,8 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
 
 /**
  * A simple interface to Android's EventLog to be used by native code.
diff --git a/base/android/java/src/org/chromium/base/FeatureList.java b/base/android/java/src/org/chromium/base/FeatureList.java
index 707f506..319074b 100644
--- a/base/android/java/src/org/chromium/base/FeatureList.java
+++ b/base/android/java/src/org/chromium/base/FeatureList.java
@@ -10,10 +10,10 @@
 import androidx.annotation.Nullable;
 import androidx.annotation.VisibleForTesting;
 
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
+
 import org.chromium.base.library_loader.LibraryLoader;
-import org.chromium.build.annotations.MainDex;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -22,7 +22,6 @@
  * Provides shared capabilities for feature flag support.
  */
 @JNINamespace("base::android")
-@MainDex
 public class FeatureList {
     /**
      * Test value overrides for tests without native.
diff --git a/base/android/java/src/org/chromium/base/FeatureMap.java b/base/android/java/src/org/chromium/base/FeatureMap.java
index bfe35e9..379e123 100644
--- a/base/android/java/src/org/chromium/base/FeatureMap.java
+++ b/base/android/java/src/org/chromium/base/FeatureMap.java
@@ -4,9 +4,8 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
-import org.chromium.build.annotations.MainDex;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 import java.util.Collections;
 import java.util.HashMap;
@@ -24,7 +23,6 @@
  * access to the list of base::Features passed to the base::android::FeatureMap.
  */
 @JNINamespace("base::android")
-@MainDex
 public abstract class FeatureMap {
     private long mNativeMapPtr;
     protected FeatureMap() {}
diff --git a/base/android/java/src/org/chromium/base/Features.java b/base/android/java/src/org/chromium/base/Features.java
index a84a7c2..88178dd 100644
--- a/base/android/java/src/org/chromium/base/Features.java
+++ b/base/android/java/src/org/chromium/base/Features.java
@@ -4,9 +4,8 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
-import org.chromium.build.annotations.MainDex;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 /**
  * A class that serves as a bridge to native code to check the status of feature switches.
@@ -15,7 +14,6 @@
  * single C++ Feature.
  */
 @JNINamespace("base::android")
-@MainDex
 public abstract class Features {
     private final String mName;
 
diff --git a/base/android/java/src/org/chromium/base/FieldTrialList.java b/base/android/java/src/org/chromium/base/FieldTrialList.java
index 8787bea..7cb5f4c 100644
--- a/base/android/java/src/org/chromium/base/FieldTrialList.java
+++ b/base/android/java/src/org/chromium/base/FieldTrialList.java
@@ -4,13 +4,11 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.NativeMethods;
-import org.chromium.build.annotations.MainDex;
+import org.jni_zero.NativeMethods;
 
 /**
  * Helper to get field trial information.
  */
-@MainDex
 public class FieldTrialList {
 
     private FieldTrialList() {}
diff --git a/base/android/java/src/org/chromium/base/FileUtils.java b/base/android/java/src/org/chromium/base/FileUtils.java
index 4e962a6..627f494 100644
--- a/base/android/java/src/org/chromium/base/FileUtils.java
+++ b/base/android/java/src/org/chromium/base/FileUtils.java
@@ -13,8 +13,8 @@
 import androidx.annotation.NonNull;
 import androidx.annotation.Nullable;
 
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
diff --git a/base/android/java/src/org/chromium/base/Flag.java b/base/android/java/src/org/chromium/base/Flag.java
index 83c5191..a496bf8 100644
--- a/base/android/java/src/org/chromium/base/Flag.java
+++ b/base/android/java/src/org/chromium/base/Flag.java
@@ -26,7 +26,9 @@
     protected Boolean mValue;
 
     protected Flag(String featureName) {
-        assert !sFlagsCreated.containsKey(featureName);
+        assert !sFlagsCreated.containsKey(featureName)
+            : "Duplicate flag creation for feature: "
+                + featureName;
         mFeatureName = featureName;
         sFlagsCreated.put(mFeatureName, this);
     }
diff --git a/base/android/java/src/org/chromium/base/ImportantFileWriterAndroid.java b/base/android/java/src/org/chromium/base/ImportantFileWriterAndroid.java
index a6bf37d..22f4c9a 100644
--- a/base/android/java/src/org/chromium/base/ImportantFileWriterAndroid.java
+++ b/base/android/java/src/org/chromium/base/ImportantFileWriterAndroid.java
@@ -4,8 +4,8 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 /**
  * This class provides an interface to the native class for writing
diff --git a/base/android/java/src/org/chromium/base/IntStringCallback.java b/base/android/java/src/org/chromium/base/IntStringCallback.java
index ed0cc8b..2022d94 100644
--- a/base/android/java/src/org/chromium/base/IntStringCallback.java
+++ b/base/android/java/src/org/chromium/base/IntStringCallback.java
@@ -4,7 +4,7 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.CalledByNative;
+import org.jni_zero.CalledByNative;
 
 /**
  * A simple 2-argument callback with an int and a String as arguments.
diff --git a/base/android/java/src/org/chromium/base/IntentUtils.java b/base/android/java/src/org/chromium/base/IntentUtils.java
index 99cd4b0..24a3c2d 100644
--- a/base/android/java/src/org/chromium/base/IntentUtils.java
+++ b/base/android/java/src/org/chromium/base/IntentUtils.java
@@ -20,7 +20,6 @@
 import android.text.TextUtils;
 
 import androidx.annotation.Nullable;
-import androidx.core.app.BundleCompat;
 
 import org.chromium.base.compat.ApiHelperForM;
 import org.chromium.base.compat.ApiHelperForS;
@@ -343,12 +342,16 @@
     }
 
     /**
-     * Just like {@link BundleCompat#getBinder()}, but doesn't throw exceptions.
+     * Returns the value associated with the given name, or null if no mapping of the desired type
+     * exists for the given name or a null value is explicitly associated with the name.
+     *
+     * @param name a key string
+     * @return an IBinder value, or null
      */
     public static IBinder safeGetBinder(Bundle bundle, String name) {
         if (bundle == null) return null;
         try {
-            return BundleCompat.getBinder(bundle, name);
+            return bundle.getBinder(name);
         } catch (Throwable t) {
             // Catches un-parceling exceptions.
             Log.e(TAG, "getBinder failed on bundle " + bundle);
@@ -371,8 +374,6 @@
     /**
      * Inserts a {@link Binder} value into an Intent as an extra.
      *
-     * Uses {@link BundleCompat#putBinder()}, but doesn't throw exceptions.
-     *
      * @param intent Intent to put the binder into.
      * @param name Key.
      * @param binder Binder object.
@@ -381,7 +382,7 @@
         if (intent == null) return;
         Bundle bundle = new Bundle();
         try {
-            BundleCompat.putBinder(bundle, name, binder);
+            bundle.putBinder(name, binder);
         } catch (Throwable t) {
             // Catches parceling exceptions.
             Log.e(TAG, "putBinder failed on bundle " + bundle);
diff --git a/base/android/java/src/org/chromium/base/JNIUtils.java b/base/android/java/src/org/chromium/base/JNIUtils.java
index 4497e0d..c38d578 100644
--- a/base/android/java/src/org/chromium/base/JNIUtils.java
+++ b/base/android/java/src/org/chromium/base/JNIUtils.java
@@ -4,15 +4,13 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.build.annotations.MainDex;
+import org.jni_zero.CalledByNative;
 
 import java.util.Map;
 
 /**
  * This class provides JNI-related methods to the native library.
  */
-@MainDex
 public class JNIUtils {
     private static final String TAG = "JNIUtils";
     private static ClassLoader sJniClassLoader;
diff --git a/base/android/java/src/org/chromium/base/JavaExceptionReporter.java b/base/android/java/src/org/chromium/base/JavaExceptionReporter.java
index 4021441..21292ff 100644
--- a/base/android/java/src/org/chromium/base/JavaExceptionReporter.java
+++ b/base/android/java/src/org/chromium/base/JavaExceptionReporter.java
@@ -6,10 +6,9 @@
 
 import androidx.annotation.UiThread;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
-import org.chromium.build.annotations.MainDex;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 /**
  * This UncaughtExceptionHandler will create a breakpad minidump when there is an uncaught
@@ -19,7 +18,6 @@
  * to be reported in the same way as other native crashes.
  */
 @JNINamespace("base::android")
-@MainDex
 public class JavaExceptionReporter implements Thread.UncaughtExceptionHandler {
     private final Thread.UncaughtExceptionHandler mParent;
     private final boolean mCrashAfterReport;
diff --git a/base/android/java/src/org/chromium/base/JavaHandlerThread.java b/base/android/java/src/org/chromium/base/JavaHandlerThread.java
index 6723ea9..acb6bd0 100644
--- a/base/android/java/src/org/chromium/base/JavaHandlerThread.java
+++ b/base/android/java/src/org/chromium/base/JavaHandlerThread.java
@@ -8,10 +8,9 @@
 import android.os.HandlerThread;
 import android.os.Looper;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
-import org.chromium.build.annotations.MainDex;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 import java.lang.Thread.UncaughtExceptionHandler;
 
@@ -19,7 +18,6 @@
  * Thread in Java with an Android Handler. This class is not thread safe.
  */
 @JNINamespace("base::android")
-@MainDex
 public class JavaHandlerThread {
     private final HandlerThread mThread;
 
diff --git a/base/android/java/src/org/chromium/base/JniException.java b/base/android/java/src/org/chromium/base/JniException.java
deleted file mode 100644
index 8efab26..0000000
--- a/base/android/java/src/org/chromium/base/JniException.java
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.base;
-
-/**
- *  Error when calling native methods.
- */
-public class JniException extends RuntimeException {
-    public JniException(String msg) {
-        super(msg);
-    }
-}
diff --git a/base/android/java/src/org/chromium/base/JniStaticTestMocker.java b/base/android/java/src/org/chromium/base/JniStaticTestMocker.java
deleted file mode 100644
index 11ba144..0000000
--- a/base/android/java/src/org/chromium/base/JniStaticTestMocker.java
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.base;
-
-/**
- * Implemented by the TEST_HOOKS field in JNI wrapper classes that are generated
- * by the JNI annotation processor. Used in tests for setting the mock
- * implementation of a {@link org.chromium.base.annotations.NativeMethods} interface.
- * @param <T> The interface annotated with {@link org.chromium.base.annotations.NativeMethods}
- */
-public interface JniStaticTestMocker<T> { void setInstanceForTesting(T instance); }
diff --git a/base/android/java/src/org/chromium/base/LocaleUtils.java b/base/android/java/src/org/chromium/base/LocaleUtils.java
index fcef115..a56ad5e 100644
--- a/base/android/java/src/org/chromium/base/LocaleUtils.java
+++ b/base/android/java/src/org/chromium/base/LocaleUtils.java
@@ -13,7 +13,7 @@
 import androidx.annotation.RequiresApi;
 import androidx.annotation.VisibleForTesting;
 
-import org.chromium.base.annotations.CalledByNative;
+import org.jni_zero.CalledByNative;
 
 import java.util.ArrayList;
 import java.util.Locale;
@@ -106,39 +106,12 @@
 
     /**
      * This function creates a Locale object from xx-XX style string where xx is language code
-     * and XX is a country code. This works for API level lower than 21.
-     * @return the locale that best represents the language tag.
-     */
-    public static Locale forLanguageTagCompat(String languageTag) {
-        String[] tag = languageTag.split("-");
-        if (tag.length == 0) {
-            return new Locale("");
-        }
-        String language = getUpdatedLanguageForAndroid(tag[0]);
-        if ((language.length() != 2 && language.length() != 3)) {
-            return new Locale("");
-        }
-        if (tag.length == 1) {
-            return new Locale(language);
-        }
-        String country = tag[1];
-        if (country.length() != 2 && country.length() != 3) {
-            return new Locale(language);
-        }
-        return new Locale(language, country);
-    }
-
-    /**
-     * This function creates a Locale object from xx-XX style string where xx is language code
      * and XX is a country code.
      * @return the locale that best represents the language tag.
      */
     public static Locale forLanguageTag(String languageTag) {
-        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
-            Locale locale = Locale.forLanguageTag(languageTag);
-            return getUpdatedLocaleForAndroid(locale);
-        }
-        return forLanguageTagCompat(languageTag);
+        Locale locale = Locale.forLanguageTag(languageTag);
+        return getUpdatedLocaleForAndroid(locale);
     }
 
     /**
diff --git a/base/android/java/src/org/chromium/base/MemoryPressureListener.java b/base/android/java/src/org/chromium/base/MemoryPressureListener.java
index 9500e25..6fa06ef 100644
--- a/base/android/java/src/org/chromium/base/MemoryPressureListener.java
+++ b/base/android/java/src/org/chromium/base/MemoryPressureListener.java
@@ -7,10 +7,10 @@
 import android.app.Activity;
 import android.content.ComponentCallbacks2;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.NativeMethods;
+
 import org.chromium.base.memory.MemoryPressureCallback;
-import org.chromium.build.annotations.MainDex;
 
 /**
  * This class is Java equivalent of base::MemoryPressureListener: it distributes pressure
@@ -25,7 +25,6 @@
  * NOTE: this class should only be used on UiThread as defined by ThreadUtils (which is
  *       Android main thread for Chrome, but can be some other thread for WebView).
  */
-@MainDex
 public class MemoryPressureListener {
     /**
      * Sending an intent with this action to Chrome will cause it to issue a call to onLowMemory
diff --git a/base/android/java/src/org/chromium/base/NativeLibraryLoadedStatus.java b/base/android/java/src/org/chromium/base/NativeLibraryLoadedStatus.java
deleted file mode 100644
index e876c3c..0000000
--- a/base/android/java/src/org/chromium/base/NativeLibraryLoadedStatus.java
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.base;
-
-import org.chromium.build.BuildConfig;
-
-/**
- * Exposes native library loading status.
- */
-public class NativeLibraryLoadedStatus {
-    /**
-     * Interface for querying native method availability.
-     */
-    public interface NativeLibraryLoadedStatusProvider {
-        boolean areNativeMethodsReady();
-    }
-
-    private static NativeLibraryLoadedStatusProvider sProvider;
-
-    public static void checkLoaded() {
-        // Necessary to make sure all of these calls are stripped in release builds.
-        if (!BuildConfig.ENABLE_ASSERTS) return;
-
-        if (sProvider == null) return;
-
-        if (!sProvider.areNativeMethodsReady()) {
-            throw new JniException(
-                    String.format("Native method called before the native library was ready."));
-        }
-    }
-
-    public static void setProvider(NativeLibraryLoadedStatusProvider statusProvider) {
-        sProvider = statusProvider;
-    }
-
-    public static NativeLibraryLoadedStatusProvider getProviderForTesting() {
-        return sProvider;
-    }
-}
diff --git a/base/android/java/src/org/chromium/base/PathService.java b/base/android/java/src/org/chromium/base/PathService.java
index b3136bc..f058e72 100644
--- a/base/android/java/src/org/chromium/base/PathService.java
+++ b/base/android/java/src/org/chromium/base/PathService.java
@@ -4,8 +4,8 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 /**
  * This class provides java side access to the native PathService.
diff --git a/base/android/java/src/org/chromium/base/PathUtils.java b/base/android/java/src/org/chromium/base/PathUtils.java
index ad2f625..b2910b7 100644
--- a/base/android/java/src/org/chromium/base/PathUtils.java
+++ b/base/android/java/src/org/chromium/base/PathUtils.java
@@ -4,7 +4,6 @@
 
 package org.chromium.base;
 
-import android.annotation.SuppressLint;
 import android.content.Context;
 import android.content.pm.ApplicationInfo;
 import android.os.Build;
@@ -17,12 +16,12 @@
 import androidx.annotation.NonNull;
 import androidx.annotation.RequiresApi;
 
-import org.chromium.base.annotations.CalledByNative;
+import org.jni_zero.CalledByNative;
+
 import org.chromium.base.compat.ApiHelperForM;
 import org.chromium.base.compat.ApiHelperForQ;
 import org.chromium.base.compat.ApiHelperForR;
 import org.chromium.base.task.AsyncTask;
-import org.chromium.build.annotations.MainDex;
 
 import java.io.File;
 import java.util.ArrayList;
@@ -35,7 +34,6 @@
 /**
  * This class provides the path related methods for the native library.
  */
-@MainDex
 public abstract class PathUtils {
     private static final String TAG = "PathUtils";
     private static final String THUMBNAIL_DIRECTORY_NAME = "textures";
@@ -88,11 +86,7 @@
         }
     }
 
-    @SuppressLint("NewApi")
     private static void chmod(String path, int mode) {
-        // Both Os.chmod and ErrnoException require SDK >= 21. But while Dalvik on < 21 tolerates
-        // Os.chmod, it throws VerifyError for ErrnoException, so catch Exception instead.
-        if (Build.VERSION.SDK_INT < Build.VERSION_CODES.LOLLIPOP) return;
         try {
             Os.chmod(path, mode);
         } catch (Exception e) {
diff --git a/base/android/java/src/org/chromium/base/PiiElider.java b/base/android/java/src/org/chromium/base/PiiElider.java
index 8d32c6b..7652671 100644
--- a/base/android/java/src/org/chromium/base/PiiElider.java
+++ b/base/android/java/src/org/chromium/base/PiiElider.java
@@ -7,7 +7,7 @@
 import android.text.TextUtils;
 import android.util.Patterns;
 
-import org.chromium.base.annotations.CalledByNative;
+import org.jni_zero.CalledByNative;
 
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
diff --git a/base/android/java/src/org/chromium/base/PowerMonitor.java b/base/android/java/src/org/chromium/base/PowerMonitor.java
index 5047788..6c61d9d 100644
--- a/base/android/java/src/org/chromium/base/PowerMonitor.java
+++ b/base/android/java/src/org/chromium/base/PowerMonitor.java
@@ -12,9 +12,10 @@
 import android.os.Build;
 import android.os.PowerManager;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
+
 import org.chromium.base.compat.ApiHelperForQ;
 
 /**
@@ -95,9 +96,6 @@
 
     @CalledByNative
     private static int getRemainingBatteryCapacity() {
-        // BatteryManager's property for charge level is only supported since Lollipop.
-        if (Build.VERSION.SDK_INT < Build.VERSION_CODES.LOLLIPOP) return 0;
-
         // Creation of the PowerMonitor can be deferred based on the browser startup path.  If the
         // battery power is requested prior to the browser triggering the creation, force it to be
         // created now.
diff --git a/base/android/java/src/org/chromium/base/RadioUtils.java b/base/android/java/src/org/chromium/base/RadioUtils.java
index 563a9a4..cbf4e27 100644
--- a/base/android/java/src/org/chromium/base/RadioUtils.java
+++ b/base/android/java/src/org/chromium/base/RadioUtils.java
@@ -17,8 +17,9 @@
 
 import androidx.annotation.RequiresApi;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+
 import org.chromium.base.compat.ApiHelperForM;
 import org.chromium.base.compat.ApiHelperForP;
 
diff --git a/base/android/java/src/org/chromium/base/StrictModeContext.java b/base/android/java/src/org/chromium/base/StrictModeContext.java
index 0eeec4b..e7b91fe 100644
--- a/base/android/java/src/org/chromium/base/StrictModeContext.java
+++ b/base/android/java/src/org/chromium/base/StrictModeContext.java
@@ -27,6 +27,9 @@
     private final StrictMode.VmPolicy mVmPolicy;
 
     private StrictModeContext(StrictMode.ThreadPolicy threadPolicy, StrictMode.VmPolicy vmPolicy) {
+        // TODO(crbug/1475610): Determine after auditing strict mode context usage if we should keep
+        // or remove these trace events.
+        TraceEvent.startAsync("StrictModeContext", hashCode());
         mThreadPolicy = threadPolicy;
         mVmPolicy = vmPolicy;
     }
@@ -45,9 +48,11 @@
      *     https://developer.android.com/reference/android/os/StrictMode.VmPolicy.Builder.html
      */
     public static StrictModeContext allowAllVmPolicies() {
-        StrictMode.VmPolicy oldPolicy = StrictMode.getVmPolicy();
-        StrictMode.setVmPolicy(StrictMode.VmPolicy.LAX);
-        return new StrictModeContext(oldPolicy);
+        try (TraceEvent e = TraceEvent.scoped("StrictModeContext.allowAllVmPolicies")) {
+            StrictMode.VmPolicy oldPolicy = StrictMode.getVmPolicy();
+            StrictMode.setVmPolicy(StrictMode.VmPolicy.LAX);
+            return new StrictModeContext(oldPolicy);
+        }
     }
 
     /**
@@ -56,35 +61,43 @@
      *     https://developer.android.com/reference/android/os/StrictMode.ThreadPolicy.Builder.html
      */
     public static StrictModeContext allowAllThreadPolicies() {
-        StrictMode.ThreadPolicy oldPolicy = StrictMode.getThreadPolicy();
-        StrictMode.setThreadPolicy(StrictMode.ThreadPolicy.LAX);
-        return new StrictModeContext(oldPolicy);
+        try (TraceEvent e = TraceEvent.scoped("StrictModeContext.allowAllThreadPolicies")) {
+            StrictMode.ThreadPolicy oldPolicy = StrictMode.getThreadPolicy();
+            StrictMode.setThreadPolicy(StrictMode.ThreadPolicy.LAX);
+            return new StrictModeContext(oldPolicy);
+        }
     }
 
     /**
      * Convenience method for disabling StrictMode for disk-writes with try-with-resources.
      */
     public static StrictModeContext allowDiskWrites() {
-        StrictMode.ThreadPolicy oldPolicy = StrictMode.allowThreadDiskWrites();
-        return new StrictModeContext(oldPolicy);
+        try (TraceEvent e = TraceEvent.scoped("StrictModeContext.allowDiskWrites")) {
+            StrictMode.ThreadPolicy oldPolicy = StrictMode.allowThreadDiskWrites();
+            return new StrictModeContext(oldPolicy);
+        }
     }
 
     /**
      * Convenience method for disabling StrictMode for disk-reads with try-with-resources.
      */
     public static StrictModeContext allowDiskReads() {
-        StrictMode.ThreadPolicy oldPolicy = StrictMode.allowThreadDiskReads();
-        return new StrictModeContext(oldPolicy);
+        try (TraceEvent e = TraceEvent.scoped("StrictModeContext.allowDiskReads")) {
+            StrictMode.ThreadPolicy oldPolicy = StrictMode.allowThreadDiskReads();
+            return new StrictModeContext(oldPolicy);
+        }
     }
 
     /**
      * Convenience method for disabling StrictMode for slow calls with try-with-resources.
      */
     public static StrictModeContext allowSlowCalls() {
-        StrictMode.ThreadPolicy oldPolicy = StrictMode.getThreadPolicy();
-        StrictMode.setThreadPolicy(
-                new StrictMode.ThreadPolicy.Builder(oldPolicy).permitCustomSlowCalls().build());
-        return new StrictModeContext(oldPolicy);
+        try (TraceEvent e = TraceEvent.scoped("StrictModeContext.allowSlowCalls")) {
+            StrictMode.ThreadPolicy oldPolicy = StrictMode.getThreadPolicy();
+            StrictMode.setThreadPolicy(
+                    new StrictMode.ThreadPolicy.Builder(oldPolicy).permitCustomSlowCalls().build());
+            return new StrictModeContext(oldPolicy);
+        }
     }
 
     /**
@@ -94,12 +107,15 @@
      * because StrictMode.ThreadPolicy.Builder#permitUnbufferedIo is added in API level 26.
      */
     public static StrictModeContext allowUnbufferedIo() {
-        StrictMode.ThreadPolicy oldPolicy = StrictMode.getThreadPolicy();
-        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
-            StrictMode.setThreadPolicy(
-                    new StrictMode.ThreadPolicy.Builder(oldPolicy).permitUnbufferedIo().build());
+        try (TraceEvent e = TraceEvent.scoped("StrictModeContext.allowUnbufferedIo")) {
+            StrictMode.ThreadPolicy oldPolicy = StrictMode.getThreadPolicy();
+            if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
+                StrictMode.setThreadPolicy(new StrictMode.ThreadPolicy.Builder(oldPolicy)
+                                                   .permitUnbufferedIo()
+                                                   .build());
+            }
+            return new StrictModeContext(oldPolicy);
         }
-        return new StrictModeContext(oldPolicy);
     }
 
     @Override
@@ -110,5 +126,6 @@
         if (mVmPolicy != null) {
             StrictMode.setVmPolicy(mVmPolicy);
         }
+        TraceEvent.finishAsync("StrictModeContext", hashCode());
     }
 }
diff --git a/base/android/java/src/org/chromium/base/SysUtils.java b/base/android/java/src/org/chromium/base/SysUtils.java
index 10a20b6..47686b5 100644
--- a/base/android/java/src/org/chromium/base/SysUtils.java
+++ b/base/android/java/src/org/chromium/base/SysUtils.java
@@ -13,10 +13,11 @@
 import android.os.StrictMode;
 import android.util.Log;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
-import org.chromium.build.annotations.MainDex;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
+
+import org.chromium.build.BuildConfig;
 
 import java.io.BufferedReader;
 import java.io.FileReader;
@@ -27,7 +28,6 @@
  * Exposes system related information about the current device.
  */
 @JNINamespace("base::android")
-@MainDex
 public class SysUtils {
     // A device reporting strictly more total memory in megabytes cannot be considered 'low-end'.
     private static final int ANDROID_LOW_MEMORY_DEVICE_THRESHOLD_MB = 512;
@@ -112,7 +112,8 @@
      */
     @CalledByNative
     public static boolean isLowEndDevice() {
-        if (sLowEndDevice == null) {
+        // Do not cache in tests since command-line flags can change.
+        if (sLowEndDevice == null || BuildConfig.IS_FOR_TEST) {
             sLowEndDevice = detectLowEndDevice();
         }
         return sLowEndDevice.booleanValue();
@@ -138,16 +139,14 @@
                 (ActivityManager) ContextUtils.getApplicationContext().getSystemService(
                         Context.ACTIVITY_SERVICE);
         ActivityManager.MemoryInfo info = new ActivityManager.MemoryInfo();
-        am.getMemoryInfo(info);
-        return info.lowMemory;
-    }
-
-    /**
-     * Resets the cached value, if any.
-     */
-    public static void resetForTesting() {
-        sLowEndDevice = null;
-        sAmountOfPhysicalMemoryKB = null;
+        try {
+            am.getMemoryInfo(info);
+            return info.lowMemory;
+        } catch (SecurityException e) {
+            // Occurs on Redmi devices when called from isolated processes.
+            // https://crbug.com/1480655
+            return false;
+        }
     }
 
     public static boolean hasCamera(final Context context) {
@@ -165,14 +164,14 @@
         }
 
         // If this logic changes, update the comments above base::SysUtils::IsLowEndDevice.
-        sAmountOfPhysicalMemoryKB = detectAmountOfPhysicalMemoryKB();
+        int physicalMemoryKb = amountOfPhysicalMemoryKB();
         boolean isLowEnd = true;
-        if (sAmountOfPhysicalMemoryKB <= 0) {
+        if (physicalMemoryKb <= 0) {
             isLowEnd = false;
         } else if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
-            isLowEnd = sAmountOfPhysicalMemoryKB / 1024 <= ANDROID_O_LOW_MEMORY_DEVICE_THRESHOLD_MB;
+            isLowEnd = physicalMemoryKb / 1024 <= ANDROID_O_LOW_MEMORY_DEVICE_THRESHOLD_MB;
         } else {
-            isLowEnd = sAmountOfPhysicalMemoryKB / 1024 <= ANDROID_LOW_MEMORY_DEVICE_THRESHOLD_MB;
+            isLowEnd = physicalMemoryKb / 1024 <= ANDROID_LOW_MEMORY_DEVICE_THRESHOLD_MB;
         }
 
         return isLowEnd;
@@ -208,14 +207,6 @@
         return false;
     }
 
-    public static void setAmountOfPhysicalMemoryKBForTesting(int physicalMemoryKB) {
-        sAmountOfPhysicalMemoryKB = physicalMemoryKB;
-        ResettersForTesting.register(() -> {
-            sLowEndDevice = null;
-            sAmountOfPhysicalMemoryKB = null;
-        });
-    }
-
     /**
      * @return Whether this device is running Android Go. This is assumed when we're running Android
      * O or later and we're on a low-end device.
diff --git a/base/android/java/src/org/chromium/base/ThreadUtils.java b/base/android/java/src/org/chromium/base/ThreadUtils.java
index c2e6eb2..78a8662 100644
--- a/base/android/java/src/org/chromium/base/ThreadUtils.java
+++ b/base/android/java/src/org/chromium/base/ThreadUtils.java
@@ -8,7 +8,8 @@
 import android.os.Looper;
 import android.os.Process;
 
-import org.chromium.base.annotations.CalledByNative;
+import org.jni_zero.CalledByNative;
+
 import org.chromium.base.task.PostTask;
 import org.chromium.base.task.TaskTraits;
 
diff --git a/base/android/java/src/org/chromium/base/TimezoneUtils.java b/base/android/java/src/org/chromium/base/TimezoneUtils.java
index dca2aa3..3ea44b0 100644
--- a/base/android/java/src/org/chromium/base/TimezoneUtils.java
+++ b/base/android/java/src/org/chromium/base/TimezoneUtils.java
@@ -6,14 +6,12 @@
 
 import android.os.StrictMode;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.build.annotations.MainDex;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
 
 import java.util.TimeZone;
 
 @JNINamespace("base::android")
-@MainDex
 class TimezoneUtils {
     /**
      * Guards this class from being instantiated.
diff --git a/base/android/java/src/org/chromium/base/TraceEvent.java b/base/android/java/src/org/chromium/base/TraceEvent.java
index aabc934..2a6608c 100644
--- a/base/android/java/src/org/chromium/base/TraceEvent.java
+++ b/base/android/java/src/org/chromium/base/TraceEvent.java
@@ -15,12 +15,12 @@
 
 import androidx.annotation.VisibleForTesting;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
+
 import org.chromium.base.task.PostTask;
 import org.chromium.base.task.TaskTraits;
-import org.chromium.build.annotations.MainDex;
 
 import java.util.ArrayList;
 
@@ -41,7 +41,6 @@
  * @see EarlyTraceEvent for details.
  */
 @JNINamespace("base::android")
-@MainDex
 public class TraceEvent implements AutoCloseable {
     private static volatile boolean sEnabled; // True when tracing into Chrome's tracing service.
     private static volatile boolean sUiThreadReady;
diff --git a/base/android/java/src/org/chromium/base/UnguessableToken.java b/base/android/java/src/org/chromium/base/UnguessableToken.java
index 019311b..e93fb85 100644
--- a/base/android/java/src/org/chromium/base/UnguessableToken.java
+++ b/base/android/java/src/org/chromium/base/UnguessableToken.java
@@ -9,7 +9,7 @@
 
 import androidx.annotation.Nullable;
 
-import org.chromium.base.annotations.CalledByNative;
+import org.jni_zero.CalledByNative;
 
 /**
  * This class mirrors unguessable_token.h .  Since tokens are passed by value,
diff --git a/base/android/java/src/org/chromium/base/annotations/AccessedByNative.java b/base/android/java/src/org/chromium/base/annotations/AccessedByNative.java
deleted file mode 100644
index 24acba5..0000000
--- a/base/android/java/src/org/chromium/base/annotations/AccessedByNative.java
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.base.annotations;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * Applied to fields that are accessed from native via JNI. Causes R8 to not
- * rename them.
- */
-@Target(ElementType.FIELD)
-@Retention(RetentionPolicy.CLASS)
-public @interface AccessedByNative {
-    public String value() default "";
-}
diff --git a/base/android/java/src/org/chromium/base/annotations/CalledByNative.java b/base/android/java/src/org/chromium/base/annotations/CalledByNative.java
deleted file mode 100644
index 5da03a2..0000000
--- a/base/android/java/src/org/chromium/base/annotations/CalledByNative.java
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.base.annotations;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * Used by the JNI generator to create the necessary JNI bindings and expose this method to native
- * code.
- *
- * <p>Any uncaught Java exceptions will crash the current process. This is generally the desired
- * behavior, since most exceptions indicate an unexpected error. If your java method expects an
- * exception, we recommend refactoring to catch exceptions and indicate errors with special return
- * values instead. If this is not possible, see {@link CalledByNativeUnchecked} instead.
- */
-@Target({ElementType.CONSTRUCTOR, ElementType.METHOD})
-@Retention(RetentionPolicy.CLASS)
-public @interface CalledByNative {
-    /*
-     *  If present, tells which inner class the method belongs to.
-     */
-    public String value() default "";
-}
diff --git a/base/android/java/src/org/chromium/base/annotations/CalledByNativeForTesting.java b/base/android/java/src/org/chromium/base/annotations/CalledByNativeForTesting.java
deleted file mode 100644
index b2c7a85..0000000
--- a/base/android/java/src/org/chromium/base/annotations/CalledByNativeForTesting.java
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.base.annotations;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * Used by the JNI generator to create the necessary JNI bindings and expose this method to native
- * test-only code.
- *
- * Any method annotated by this will be kept around for tests only. If you wish to call your method
- * from non-test code, see {@link CalledByNative} instead.
- */
-@Target({ElementType.CONSTRUCTOR, ElementType.METHOD})
-@Retention(RetentionPolicy.CLASS)
-public @interface CalledByNativeForTesting {
-    /*
-     *  If present, tells which inner class the method belongs to.
-     */
-    public String value() default "";
-}
diff --git a/base/android/java/src/org/chromium/base/annotations/CalledByNativeUnchecked.java b/base/android/java/src/org/chromium/base/annotations/CalledByNativeUnchecked.java
deleted file mode 100644
index 563d8c4..0000000
--- a/base/android/java/src/org/chromium/base/annotations/CalledByNativeUnchecked.java
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.base.annotations;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * Similar to {@link CalledByNative}, this also exposes JNI bindings to native code. The main
- * difference is this <b>will not</b> crash the browser process if the Java method throws an
- * exception. However, the C++ caller <b>must</b> handle and clear the exception before calling into
- * any other Java code, otherwise the next Java method call will crash (with the previous call's
- * exception, which leads to a very confusing debugging experience).
- *
- * <p>Usage of this annotation should be very rare; due to the complexity of correctly handling
- * exceptions in C++, prefer using {@link CalledByNative}.
- */
-@Target(ElementType.METHOD)
-@Retention(RetentionPolicy.CLASS)
-public @interface CalledByNativeUnchecked {
-    /*
-     *  If present, tells which inner class the method belongs to.
-     */
-    public String value() default "";
-}
diff --git a/base/android/java/src/org/chromium/base/annotations/JNINamespace.java b/base/android/java/src/org/chromium/base/annotations/JNINamespace.java
deleted file mode 100644
index 7d98448..0000000
--- a/base/android/java/src/org/chromium/base/annotations/JNINamespace.java
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.base.annotations;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * @JNINamespace is used by the JNI generator to create the necessary JNI
- * bindings and expose this method to native code using the specified namespace.
- */
-@Target(ElementType.TYPE)
-@Retention(RetentionPolicy.RUNTIME)
-public @interface JNINamespace {
-    public String value();
-}
diff --git a/base/android/java/src/org/chromium/base/annotations/NativeClassQualifiedName.java b/base/android/java/src/org/chromium/base/annotations/NativeClassQualifiedName.java
deleted file mode 100644
index d13e051..0000000
--- a/base/android/java/src/org/chromium/base/annotations/NativeClassQualifiedName.java
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.base.annotations;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * @NativeClassQualifiedName is used by the JNI generator to create the necessary JNI
- * bindings to call into the specified native class name.
- */
-@Target(ElementType.METHOD)
-@Retention(RetentionPolicy.RUNTIME)
-public @interface NativeClassQualifiedName {
-    /*
-     * Tells which native class the method is going to be bound to.
-     * The first parameter of the annotated method must be an int nativePtr pointing to
-     * an instance of this class.
-     */
-    public String value();
-}
diff --git a/base/android/java/src/org/chromium/base/annotations/NativeMethods.java b/base/android/java/src/org/chromium/base/annotations/NativeMethods.java
deleted file mode 100644
index edf6930..0000000
--- a/base/android/java/src/org/chromium/base/annotations/NativeMethods.java
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-package org.chromium.base.annotations;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-@Target(ElementType.TYPE)
-@Retention(RetentionPolicy.SOURCE)
-public @interface NativeMethods {
-    /**
-     * Tells the build system to call a different GEN_JNI, prefixed by the value we put here. This
-     * should only be used for feature modules where we need a different GEN_JNI. For example, if
-     * you did @NativeMethods("dfmname"), this would call into dfmname_GEN_JNI.java.
-     */
-    public String value() default "";
-}
diff --git a/base/android/java/src/org/chromium/base/compat/ApiHelperForM.java b/base/android/java/src/org/chromium/base/compat/ApiHelperForM.java
index 6c54203..fc1fdc8 100644
--- a/base/android/java/src/org/chromium/base/compat/ApiHelperForM.java
+++ b/base/android/java/src/org/chromium/base/compat/ApiHelperForM.java
@@ -99,8 +99,9 @@
         return userManager.isSystemUser();
     }
 
-    /*
+    /**
      * See {@link ActionMode#invalidateContentRect()}.
+     *
      * @param actionMode
      */
     public static void invalidateContentRectOnActionMode(ActionMode actionMode) {
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/FrameMetricsListener.java b/base/android/java/src/org/chromium/base/jank_tracker/FrameMetricsListener.java
index dadbcd7..51493ac 100644
--- a/base/android/java/src/org/chromium/base/jank_tracker/FrameMetricsListener.java
+++ b/base/android/java/src/org/chromium/base/jank_tracker/FrameMetricsListener.java
@@ -5,16 +5,16 @@
 package org.chromium.base.jank_tracker;
 
 import android.os.Build.VERSION_CODES;
-import android.os.SystemClock;
 import android.view.FrameMetrics;
 import android.view.Window;
 import android.view.Window.OnFrameMetricsAvailableListener;
 
 import androidx.annotation.RequiresApi;
 
-import org.chromium.base.ThreadUtils;
 import org.chromium.base.TraceEvent;
 
+import java.util.concurrent.atomic.AtomicBoolean;
+
 /**
  * This class receives OnFrameMetricsAvailableListener.onFrameMetricsAvailable() callbacks and
  * records frame durations in a FrameMetricsStore instance.
@@ -22,14 +22,7 @@
 @RequiresApi(api = VERSION_CODES.N)
 public class FrameMetricsListener implements OnFrameMetricsAvailableListener {
     private final FrameMetricsStore mFrameMetricsStore;
-    private boolean mIsRecording;
-
-    // The reporting interval start and duration are passed to the reporting code and used in the
-    // 'JankMetricsReportingInterval' trace event.
-    private long mReportingIntervalStartTime;
-    private long mReportingIntervalDurationMillis;
-
-    private final ThreadUtils.ThreadChecker mThreadChecker = new ThreadUtils.ThreadChecker();
+    private AtomicBoolean mIsRecording = new AtomicBoolean(false);
 
     public FrameMetricsListener(FrameMetricsStore frameMetricsStore) {
         mFrameMetricsStore = frameMetricsStore;
@@ -41,42 +34,26 @@
      * @param isRecording
      */
     public void setIsListenerRecording(boolean isRecording) {
-        mThreadChecker.assertOnValidThread();
-        mIsRecording = isRecording;
-        if (isRecording && mReportingIntervalStartTime == 0) {
-            mReportingIntervalStartTime = SystemClock.uptimeMillis();
-        } else if (!isRecording) {
-            if (mReportingIntervalStartTime != 0) {
-                mReportingIntervalDurationMillis =
-                        SystemClock.uptimeMillis() - mReportingIntervalStartTime;
-            }
-            reportMetrics();
-        }
+        mIsRecording.set(isRecording);
     }
 
     @RequiresApi(api = VERSION_CODES.N)
     @Override
     public void onFrameMetricsAvailable(
             Window window, FrameMetrics frameMetrics, int dropCountSinceLastInvocation) {
-        mThreadChecker.assertOnValidThread();
-        if (!mIsRecording) {
+        if (!mIsRecording.get()) {
             return;
         }
 
         long frameTotalDurationNs = frameMetrics.getMetric(FrameMetrics.TOTAL_DURATION);
+        long frame_start_vsync_ts = frameMetrics.getMetric(FrameMetrics.VSYNC_TIMESTAMP);
 
         try (TraceEvent e = TraceEvent.scoped(
                      "onFrameMetricsAvailable", Long.toString(frameTotalDurationNs))) {
             long deadlineNs = frameMetrics.getMetric(FrameMetrics.DEADLINE);
             boolean isJanky = frameTotalDurationNs >= deadlineNs;
-            mFrameMetricsStore.addFrameMeasurement(frameTotalDurationNs, isJanky);
+            mFrameMetricsStore.addFrameMeasurement(
+                    frameTotalDurationNs, isJanky, frame_start_vsync_ts);
         }
     }
-
-    private void reportMetrics() {
-        JankMetricUMARecorder.recordJankMetricsToUMA(mFrameMetricsStore.takeMetrics(),
-                mReportingIntervalStartTime, mReportingIntervalDurationMillis);
-        mReportingIntervalStartTime = 0;
-        mReportingIntervalDurationMillis = 0;
-    }
 }
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/FrameMetricsStore.java b/base/android/java/src/org/chromium/base/jank_tracker/FrameMetricsStore.java
index bc8ad2b..45f3ea4 100644
--- a/base/android/java/src/org/chromium/base/jank_tracker/FrameMetricsStore.java
+++ b/base/android/java/src/org/chromium/base/jank_tracker/FrameMetricsStore.java
@@ -4,52 +4,255 @@
 
 package org.chromium.base.jank_tracker;
 
+import org.chromium.base.ThreadUtils.ThreadChecker;
+import org.chromium.base.TraceEvent;
+import org.chromium.build.BuildConfig;
+
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
 
 /**
  * This class stores relevant metrics from FrameMetrics between the calls to UMA reporting methods.
  */
 public class FrameMetricsStore {
+    // FrameMetricsStore can only be accessed on the handler thread (from the
+    // JankReportingScheduler.getOrCreateHandler() method). However construction occurs on a
+    // separate thread so the ThreadChecker is instead constructed later.
+    private ThreadChecker mThreadChecker;
+    // An arbitrary value from which to create a trace event async track. The only risk if this
+    // clashes with another track is trace events will show up on both potentially looking weird in
+    // the tracing UI. No other issue will occur.
+    private static final long TRACE_EVENT_TRACK_ID = 84186319646187624L;
+    // Android FrameMetrics promises in order frame metrics so this is just the latest timestamp.
+    private long mMaxTimestamp = -1;
+    // Array of timestamps stored in nanoseconds, they represent the moment when each frame
+    // began (VSYNC_TIMESTAMP), must always be the same size as mTotalDurationsNs.
+    private final ArrayList<Long> mTimestampsNs = new ArrayList<>();
     // Array of total durations stored in nanoseconds, they represent how long each frame took to
     // draw.
     private final ArrayList<Long> mTotalDurationsNs = new ArrayList<>();
-
     // Array of boolean values denoting whether a given frame is janky or not. Must always be the
     // same size as mTotalDurationsNs.
     private final ArrayList<Boolean> mIsJanky = new ArrayList<>();
+    // Stores the timestamp (nanoseconds) of the most recent frame metric as a scenario started.
+    // Zero if no FrameMetrics have been received.
+    private final HashMap<Integer, Long> mScenarioPreviousFrameTimestampNs = new HashMap<>();
+
+    // Convert an enum value to string to use as an UMA histogram name, changes to strings should be
+    // reflected in android/histograms.xml and base/android/jank_
+    public static String scenarioToString(@JankScenario int scenario) {
+        switch (scenario) {
+            case JankScenario.PERIODIC_REPORTING:
+                return "Total";
+            case JankScenario.OMNIBOX_FOCUS:
+                return "OmniboxFocus";
+            case JankScenario.NEW_TAB_PAGE:
+                return "NewTabPage";
+            case JankScenario.STARTUP:
+                return "Startup";
+            case JankScenario.TAB_SWITCHER:
+                return "TabSwitcher";
+            case JankScenario.OPEN_LINK_IN_NEW_TAB:
+                return "OpenLinkInNewTab";
+            case JankScenario.START_SURFACE_HOMEPAGE:
+                return "StartSurfaceHomepage";
+            case JankScenario.START_SURFACE_TAB_SWITCHER:
+                return "StartSurfaceTabSwitcher";
+            case JankScenario.FEED_SCROLLING:
+                return "FeedScrolling";
+            case JankScenario.WEBVIEW_SCROLLING:
+                return "WebviewScrolling";
+            default:
+                throw new IllegalArgumentException("Invalid scenario value");
+        }
+    }
+
+    /**
+     * initialize is the first entry point that is on the HandlerThread, so set up our thread
+     * checking.
+     */
+    void initialize() {
+        mThreadChecker = new ThreadChecker();
+    }
 
     /**
      * Records the total draw duration and jankiness for a single frame.
      */
-    void addFrameMeasurement(long totalDurationNs, boolean isJanky) {
+    void addFrameMeasurement(long totalDurationNs, boolean isJanky, long frameStartVsyncTs) {
+        mThreadChecker.assertOnValidThread();
         mTotalDurationsNs.add(totalDurationNs);
         mIsJanky.add(isJanky);
+        mTimestampsNs.add(frameStartVsyncTs);
+        mMaxTimestamp = frameStartVsyncTs;
     }
 
-    /**
-     * Returns a copy of accumulated metrics and clears the internal storage.
-     */
-    JankMetrics takeMetrics() {
-        Long[] longDurations;
-        Boolean[] booleanIsJanky;
+    @SuppressWarnings("NoDynamicStringsInTraceEventCheck")
+    void startTrackingScenario(@JankScenario int scenario) {
+        try (TraceEvent e =
+                        TraceEvent.scoped("startTrackingScenario: " + scenarioToString(scenario))) {
+            mThreadChecker.assertOnValidThread();
+            // Ignore multiple calls to startTrackingScenario without corresponding
+            // stopTrackingScenario calls.
+            if (mScenarioPreviousFrameTimestampNs.containsKey(scenario)) {
+                return;
+            }
+            // Make a unique ID for each scenario for tracing.
+            TraceEvent.startAsync(
+                    "JankCUJ:" + scenarioToString(scenario), TRACE_EVENT_TRACK_ID + scenario);
 
-        longDurations = mTotalDurationsNs.toArray(new Long[mTotalDurationsNs.size()]);
-        mTotalDurationsNs.clear();
+            // Scenarios are tracked based on the latest stored timestamp to allow fast lookups
+            // (find index of [timestamp] vs find first index that's >= [timestamp]). In case there
+            // are no stored timestamps then we hardcode the scenario's starting timestamp to 0L,
+            // this is handled as a special case in stopTrackingScenario by returning all stored
+            // frames.
+            Long startingTimestamp = 0L;
+            if (!mTimestampsNs.isEmpty()) {
+                startingTimestamp = mTimestampsNs.get(mTimestampsNs.size() - 1);
+            }
 
-        booleanIsJanky = mIsJanky.toArray(new Boolean[mIsJanky.size()]);
-        mIsJanky.clear();
+            mScenarioPreviousFrameTimestampNs.put(scenario, startingTimestamp);
+        }
+    }
 
-        long[] durations = new long[longDurations.length];
-        for (int i = 0; i < longDurations.length; i++) {
-            durations[i] = longDurations[i].longValue();
+    boolean hasReceivedMetricsPast(long endScenarioTimeNs) {
+        mThreadChecker.assertOnValidThread();
+        return mMaxTimestamp > endScenarioTimeNs;
+    }
+
+    JankMetrics stopTrackingScenario(@JankScenario int scenario) {
+        return stopTrackingScenario(scenario, -1);
+    }
+
+    // The string added is a static string.
+    @SuppressWarnings("NoDynamicStringsInTraceEventCheck")
+    JankMetrics stopTrackingScenario(@JankScenario int scenario, long endScenarioTimeNs) {
+        try (TraceEvent e =
+                        TraceEvent.scoped("finishTrackingScenario: " + scenarioToString(scenario),
+                                Long.toString(endScenarioTimeNs))) {
+            mThreadChecker.assertOnValidThread();
+            TraceEvent.finishAsync(
+                    "JankCUJ:" + scenarioToString(scenario), TRACE_EVENT_TRACK_ID + scenario);
+            // Get the timestamp of the latest frame before startTrackingScenario was called. This
+            // can be null if tracking never started for scenario, or 0L if tracking started when no
+            // frames were stored.
+            Long previousFrameTimestamp = mScenarioPreviousFrameTimestampNs.remove(scenario);
+
+            // If stopTrackingScenario is called without a corresponding startTrackingScenario then
+            // return an empty FrameMetrics object.
+            if (previousFrameTimestamp == null) {
+                removeUnusedFrames();
+                return new JankMetrics();
+            }
+
+            int startingIndex;
+            // Starting timestamp may be 0 if a scenario starts without any frames stored, in this
+            // case return all frames.
+            if (previousFrameTimestamp == 0) {
+                startingIndex = 0;
+            } else {
+                startingIndex = mTimestampsNs.indexOf(previousFrameTimestamp);
+                // The scenario starts with the frame after the tracking timestamp.
+                startingIndex++;
+
+                // If startingIndex is out of bounds then we haven't recorded any frames since
+                // tracking started, return an empty FrameMetrics object.
+                if (startingIndex >= mTimestampsNs.size()) {
+                    return new JankMetrics();
+                }
+            }
+
+            // Ending index is exclusive, so this is not out of bounds.
+            int endingIndex = mTimestampsNs.size();
+            if (endScenarioTimeNs > 0) {
+                // binarySearch returns
+                // index of the search key (non-negative value) or (-(insertion point) - 1).
+                // The insertion point is defined as the index of the first element greater than the
+                // key, or a.length if all elements in the array are less than the specified key.
+                endingIndex = Collections.binarySearch(mTimestampsNs, endScenarioTimeNs);
+                if (endingIndex < 0) {
+                    endingIndex = -1 * (endingIndex + 1);
+                } else {
+                    endingIndex = Math.min(endingIndex + 1, mTimestampsNs.size());
+                }
+                if (endingIndex <= startingIndex) {
+                    // Something went wrong reset
+                    TraceEvent.instant("FrameMetricsStore invalid endScenarioTimeNs");
+                    endingIndex = mTimestampsNs.size();
+                }
+            }
+
+            JankMetrics jankMetrics =
+                    convertArraysToJankMetrics(mTimestampsNs.subList(startingIndex, endingIndex),
+                            mTotalDurationsNs.subList(startingIndex, endingIndex),
+                            mIsJanky.subList(startingIndex, endingIndex));
+            removeUnusedFrames();
+
+            return jankMetrics;
+        }
+    }
+
+    private void removeUnusedFrames() {
+        if (mScenarioPreviousFrameTimestampNs.isEmpty()) {
+            TraceEvent.instant("removeUnusedFrames", Long.toString(mTimestampsNs.size()));
+            mTimestampsNs.clear();
+            mTotalDurationsNs.clear();
+            mIsJanky.clear();
+            return;
         }
 
-        boolean[] isJanky = new boolean[booleanIsJanky.length];
-        for (int i = 0; i < booleanIsJanky.length; i++) {
-            isJanky[i] = booleanIsJanky[i].booleanValue();
+        long firstUsedTimestamp = findFirstUsedTimestamp();
+        // If the earliest timestamp tracked is 0 then that scenario contains every frame
+        // stored, so we shouldn't delete anything.
+        if (firstUsedTimestamp == 0L) {
+            return;
         }
 
-        JankMetrics jankMetrics = new JankMetrics(durations, isJanky);
+        int firstUsedIndex = mTimestampsNs.indexOf(firstUsedTimestamp);
+        if (firstUsedIndex == -1) {
+            if (BuildConfig.ENABLE_ASSERTS) {
+                throw new IllegalStateException("Timestamp for tracked scenario not found");
+            }
+            // This shouldn't happen.
+            return;
+        }
+        TraceEvent.instant("removeUnusedFrames", Long.toString(firstUsedIndex));
+
+        mTimestampsNs.subList(0, firstUsedIndex).clear();
+        mTotalDurationsNs.subList(0, firstUsedIndex).clear();
+        mIsJanky.subList(0, firstUsedIndex).clear();
+    }
+
+    private long findFirstUsedTimestamp() {
+        long firstTimestamp = Long.MAX_VALUE;
+        for (long timestamp : mScenarioPreviousFrameTimestampNs.values()) {
+            if (timestamp < firstTimestamp) {
+                firstTimestamp = timestamp;
+            }
+        }
+
+        return firstTimestamp;
+    }
+
+    private JankMetrics convertArraysToJankMetrics(
+            List<Long> longTimestampsNs, List<Long> longDurations, List<Boolean> booleanIsJanky) {
+        long[] timestamps = new long[longTimestampsNs.size()];
+        for (int i = 0; i < longTimestampsNs.size(); i++) {
+            timestamps[i] = longTimestampsNs.get(i).longValue();
+        }
+
+        long[] durations = new long[longDurations.size()];
+        for (int i = 0; i < longDurations.size(); i++) {
+            durations[i] = longDurations.get(i).longValue();
+        }
+
+        boolean[] isJanky = new boolean[booleanIsJanky.size()];
+        for (int i = 0; i < booleanIsJanky.size(); i++) {
+            isJanky[i] = booleanIsJanky.get(i).booleanValue();
+        }
+
+        JankMetrics jankMetrics = new JankMetrics(timestamps, durations, isJanky);
         return jankMetrics;
     }
 }
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/JankActivityTracker.java b/base/android/java/src/org/chromium/base/jank_tracker/JankActivityTracker.java
new file mode 100644
index 0000000..b664fc1
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/jank_tracker/JankActivityTracker.java
@@ -0,0 +1,89 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+import android.app.Activity;
+import android.os.Build.VERSION_CODES;
+
+import androidx.annotation.RequiresApi;
+
+import org.chromium.base.ActivityState;
+import org.chromium.base.ApplicationStatus;
+import org.chromium.base.ApplicationStatus.ActivityStateListener;
+import org.chromium.base.ThreadUtils.ThreadChecker;
+import org.chromium.base.lifetime.DestroyChecker;
+
+import java.lang.ref.WeakReference;
+
+/**
+ * This class takes an Activity and attaches a FrameMetricsListener to it, in addition it controls
+ * periodic jank metric reporting and frame metric recording based on the Activity's lifecycle
+ * events.
+ */
+@RequiresApi(api = VERSION_CODES.N)
+class JankActivityTracker extends JankTrackerStateController implements ActivityStateListener {
+    private final ThreadChecker mThreadChecker = new ThreadChecker();
+    private final DestroyChecker mDestroyChecker = new DestroyChecker();
+
+    private WeakReference<Activity> mActivityReference;
+
+    JankActivityTracker(Activity context, FrameMetricsListener listener,
+            JankReportingScheduler reportingScheduler) {
+        super(listener, reportingScheduler);
+        mActivityReference = new WeakReference<>(context);
+    }
+
+    @Override
+    public void initialize() {
+        assertValidState();
+        Activity activity = mActivityReference.get();
+        if (activity != null) {
+            ApplicationStatus.registerStateListenerForActivity(this, activity);
+            @ActivityState
+            int activityState = ApplicationStatus.getStateForActivity(activity);
+            onActivityStateChange(activity, activityState);
+            startMetricCollection(activity.getWindow());
+        }
+    }
+
+    @Override
+    public void destroy() {
+        mThreadChecker.assertOnValidThread();
+        ApplicationStatus.unregisterActivityStateListener(this);
+        stopPeriodicReporting();
+        Activity activity = mActivityReference.get();
+        if (activity != null) {
+            stopMetricCollection(activity.getWindow());
+        }
+        mDestroyChecker.destroy();
+    }
+
+    private void assertValidState() {
+        mThreadChecker.assertOnValidThread();
+        mDestroyChecker.checkNotDestroyed();
+    }
+
+    @Override
+    public void onActivityStateChange(Activity activity, @ActivityState int newState) {
+        assertValidState();
+        switch (newState) {
+            case ActivityState.STARTED: // Intentional fallthrough.
+            case ActivityState.RESUMED:
+                startPeriodicReporting();
+                startMetricCollection(null);
+                break;
+            case ActivityState.PAUSED:
+                // This method can be called at any moment safely, we want to report metrics even
+                // when the activity is paused.
+                startPeriodicReporting();
+                stopMetricCollection(null);
+                break;
+            case ActivityState.STOPPED:
+                stopPeriodicReporting();
+                stopMetricCollection(null);
+                break;
+        }
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/JankEndScenarioTime.java b/base/android/java/src/org/chromium/base/jank_tracker/JankEndScenarioTime.java
new file mode 100644
index 0000000..d2d1a9e
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/jank_tracker/JankEndScenarioTime.java
@@ -0,0 +1,26 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+/**
+ * A simple Data structure that holds a uptimeNanos that we wish to have data up until, and a delay
+ * to wait for this data from the Android FrameMetrics API.
+ */
+public final class JankEndScenarioTime {
+    public final long endScenarioTimeNs;
+    // 100ms should be long enough to receive frame metric timeline if they haven't been dropped.
+    public final long timeoutDelayMs = 100;
+
+    public static JankEndScenarioTime endAt(long uptimeNanos) {
+        if (uptimeNanos <= 0) {
+            return null;
+        }
+        return new JankEndScenarioTime(uptimeNanos);
+    }
+
+    private JankEndScenarioTime(long uptimeNanos) {
+        endScenarioTimeNs = uptimeNanos;
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/JankMetricUMARecorder.java b/base/android/java/src/org/chromium/base/jank_tracker/JankMetricUMARecorder.java
index 9bb6892..fb3ee8a 100644
--- a/base/android/java/src/org/chromium/base/jank_tracker/JankMetricUMARecorder.java
+++ b/base/android/java/src/org/chromium/base/jank_tracker/JankMetricUMARecorder.java
@@ -4,27 +4,26 @@
 
 package org.chromium.base.jank_tracker;
 
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 /**
  * Sends Android jank metrics to native to be recorded using UMA.
  */
 @JNINamespace("base::android")
 public class JankMetricUMARecorder {
-    public static void recordJankMetricsToUMA(
-            JankMetrics metric, long reportingIntervalStartTime, long reportingIntervalDuration) {
+    public static void recordJankMetricsToUMA(JankMetrics metric, long reportingIntervalStartTime,
+            long reportingIntervalDuration, @JankScenario int scenario) {
         if (metric == null) {
             return;
         }
-
         JankMetricUMARecorderJni.get().recordJankMetrics(metric.durationsNs, metric.isJanky,
-                reportingIntervalStartTime, reportingIntervalDuration);
+                reportingIntervalStartTime, reportingIntervalDuration, scenario);
     }
 
     @NativeMethods
     public interface Natives {
         void recordJankMetrics(long[] durationsNs, boolean[] jankStatus,
-                long reportingIntervalStartTime, long reportingIntervalDuration);
+                long reportingIntervalStartTime, long reportingIntervalDuration, int scenario);
     }
 }
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/JankMetrics.java b/base/android/java/src/org/chromium/base/jank_tracker/JankMetrics.java
index 6c94177..36b84e9 100644
--- a/base/android/java/src/org/chromium/base/jank_tracker/JankMetrics.java
+++ b/base/android/java/src/org/chromium/base/jank_tracker/JankMetrics.java
@@ -9,10 +9,16 @@
  * to UMA.
  */
 class JankMetrics {
+    public final long[] timestampsNs;
     public final long[] durationsNs;
     public final boolean[] isJanky;
-
-    public JankMetrics(long[] durationsNs, boolean[] isJanky) {
+    public JankMetrics() {
+        timestampsNs = new long[0];
+        durationsNs = new long[0];
+        isJanky = new boolean[0];
+    }
+    public JankMetrics(long[] timestampsNs, long[] durationsNs, boolean[] isJanky) {
+        this.timestampsNs = timestampsNs;
         this.durationsNs = durationsNs;
         this.isJanky = isJanky;
     }
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/JankReportingRunnable.java b/base/android/java/src/org/chromium/base/jank_tracker/JankReportingRunnable.java
new file mode 100644
index 0000000..272b56d
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/jank_tracker/JankReportingRunnable.java
@@ -0,0 +1,98 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+import android.os.Handler;
+
+import org.chromium.base.TraceEvent;
+/**
+ * This runnable receives a FrameMetricsStore instance and starts/stops tracking a given scenario.
+ * When a scenario stops it takes its metrics and sends them to native to be recorded in UMA.
+ * This is executed by JankReportingScheduler on its own thread.
+ */
+class JankReportingRunnable implements Runnable {
+    private final FrameMetricsStore mMetricsStore;
+    private final @JankScenario int mScenario;
+    private final boolean mIsStartingTracking;
+    // This must be the same handler that this is running on.
+    private final Handler mHandler;
+    // If metrics should be collected based on the state (scrolling) specify a
+    // JankEndScenarioTime.
+    private final JankEndScenarioTime mJankEndScenarioTime;
+
+    // When a JankEndScenarioTime is specified we don't immediately collect the metrics but instead
+    // post a task (this runnable). However to keep code reuse the same between delay/no-delay we
+    // use the same runnable but don't post it when there is no delay.
+    private class FinalReportingRunnable implements Runnable {
+        @Override
+        public void run() {
+            try (TraceEvent e = TraceEvent.scoped("ReportingCUJScenarioData", mScenario)) {
+                JankMetrics metrics;
+                if (mJankEndScenarioTime == null) {
+                    metrics = mMetricsStore.stopTrackingScenario(mScenario);
+                } else {
+                    // Since this is after the timeout we just unconditionally get the metrics.
+                    metrics = mMetricsStore.stopTrackingScenario(
+                            mScenario, mJankEndScenarioTime.endScenarioTimeNs);
+                }
+
+                if (metrics == null || metrics.timestampsNs.length == 0) {
+                    TraceEvent.instant("no metrics");
+                    return;
+                }
+
+                long startTime = metrics.timestampsNs[0] / 1000000;
+                long lastTime = metrics.timestampsNs[metrics.timestampsNs.length - 1] / 1000000;
+                long lastDuration = metrics.durationsNs[metrics.durationsNs.length - 1] / 1000000;
+                // The time that we have metrics covering is from the first VSYNC_TIMESTAMP
+                // (startTime) to the last frame has finished (lastTime + lastDuration).
+                long endTime = lastTime - startTime + lastDuration;
+
+                // Confirm that the current call context is valid.
+                // Debug builds will assert and fail; release builds will optimize this out.
+                JankMetricUMARecorderJni.get();
+                // TODO(salg@): Cache metrics in case native takes >30s to initialize.
+                JankMetricUMARecorder.recordJankMetricsToUMA(
+                        metrics, startTime, endTime, mScenario);
+            }
+        }
+    }
+
+    JankReportingRunnable(FrameMetricsStore metricsStore, @JankScenario int scenario,
+            boolean isStartingTracking, Handler handler, JankEndScenarioTime endScenarioTime) {
+        mMetricsStore = metricsStore;
+        mScenario = scenario;
+        mIsStartingTracking = isStartingTracking;
+        mHandler = handler;
+        mJankEndScenarioTime = endScenarioTime;
+    }
+
+    @Override
+    public void run() {
+        try (TraceEvent e = TraceEvent.scoped("StartingOrStoppingJankScenario",
+                     "StartingScenario:" + Boolean.toString(mIsStartingTracking)
+                             + ",Scenario:" + Integer.toString(mScenario))) {
+            if (mIsStartingTracking) {
+                if (mMetricsStore == null) {
+                    TraceEvent.instant("StartTrackingScenario metrics store null");
+                    return;
+                }
+                mMetricsStore.startTrackingScenario(mScenario);
+                return;
+            }
+            boolean dataIsReady = mJankEndScenarioTime == null
+                    || (mJankEndScenarioTime != null
+                            && mMetricsStore.hasReceivedMetricsPast(
+                                    mJankEndScenarioTime.endScenarioTimeNs));
+
+            if (dataIsReady) {
+                new FinalReportingRunnable().run();
+            } else {
+                mHandler.postDelayed(
+                        new FinalReportingRunnable(), mJankEndScenarioTime.timeoutDelayMs);
+            }
+        }
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/JankReportingScheduler.java b/base/android/java/src/org/chromium/base/jank_tracker/JankReportingScheduler.java
new file mode 100644
index 0000000..a781677
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/jank_tracker/JankReportingScheduler.java
@@ -0,0 +1,116 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+import android.os.Handler;
+import android.os.HandlerThread;
+
+import androidx.annotation.Nullable;
+
+import java.util.HashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * This class receives requests to start and stop jank scenario tracking and runs them in a
+ * HandlerThread it creates. In addition it handles the recording of periodic jank metrics.
+ */
+public class JankReportingScheduler {
+    private static final long PERIODIC_METRIC_DELAY_MS = 5_000;
+    private final FrameMetricsStore mFrameMetricsStore;
+    private final HashMap<Integer, JankReportingRunnable> mRunnableStore;
+
+    public JankReportingScheduler(FrameMetricsStore frameMetricsStore) {
+        mFrameMetricsStore = frameMetricsStore;
+        mRunnableStore = new HashMap<Integer, JankReportingRunnable>();
+    }
+
+    private final Runnable mPeriodicMetricReporter = new Runnable() {
+        @Override
+        public void run() {
+            // delay should never be null.
+            finishTrackingScenario(JankScenario.PERIODIC_REPORTING);
+
+            if (mIsPeriodicReporterLooping.get()) {
+                // We delay starting the next periodic reporting until the timeout has finished by
+                // taking the delay and +1 so that it will run in order (it was posted above).
+                startTrackingScenario(JankScenario.PERIODIC_REPORTING);
+                getOrCreateHandler().postDelayed(mPeriodicMetricReporter, PERIODIC_METRIC_DELAY_MS);
+            }
+        }
+    };
+
+    @Nullable
+    protected HandlerThread mHandlerThread;
+    @Nullable
+    private Handler mHandler;
+    private final AtomicBoolean mIsPeriodicReporterLooping = new AtomicBoolean(false);
+
+    public void startTrackingScenario(@JankScenario int scenario) {
+        // We check to see if there was already a stop task queued at some point and attempt to
+        // cancel it. Regardless we send the startTracking runnable because we will ignore this
+        // start if the stop did get canceled and the stopTask already ran we'll start a new
+        // scenario.
+        JankReportingRunnable stopTask = mRunnableStore.get(scenario);
+        if (stopTask != null) {
+            getOrCreateHandler().removeCallbacks(stopTask);
+            mRunnableStore.remove(scenario);
+        }
+        getOrCreateHandler().post(new JankReportingRunnable(
+                mFrameMetricsStore, scenario, /* isStartingTracking= */ true, mHandler, null));
+    }
+
+    public void finishTrackingScenario(@JankScenario int scenario) {
+        finishTrackingScenario(scenario, -1);
+    }
+
+    public void finishTrackingScenario(@JankScenario int scenario, long endScenarioTimeNs) {
+        finishTrackingScenario(scenario, JankEndScenarioTime.endAt(endScenarioTimeNs));
+    }
+
+    public void finishTrackingScenario(
+            @JankScenario int scenario, JankEndScenarioTime endScenarioTime) {
+        // We store the stop task in case the delay is greater than zero and we start this scenario
+        // again.
+        JankReportingRunnable runnable = mRunnableStore.getOrDefault(scenario,
+                new JankReportingRunnable(mFrameMetricsStore, scenario,
+                        /* isStartingTracking= */ false, mHandler, endScenarioTime));
+        getOrCreateHandler().post(runnable);
+    }
+
+    public Handler getOrCreateHandler() {
+        if (mHandler == null) {
+            mHandlerThread = new HandlerThread("Jank-Tracker");
+            mHandlerThread.start();
+            mHandler = new Handler(mHandlerThread.getLooper());
+            mHandler.post(new Runnable() {
+                @Override
+                public void run() {
+                    mFrameMetricsStore.initialize();
+                }
+            });
+        }
+        return mHandler;
+    }
+
+    public void startReportingPeriodicMetrics() {
+        // If mIsPeriodicReporterLooping was already true then there's no need to post another task.
+        if (mIsPeriodicReporterLooping.getAndSet(true)) {
+            return;
+        }
+        startTrackingScenario(JankScenario.PERIODIC_REPORTING);
+        getOrCreateHandler().postDelayed(mPeriodicMetricReporter, PERIODIC_METRIC_DELAY_MS);
+    }
+
+    public void stopReportingPeriodicMetrics() {
+        // Disable mPeriodicMetricReporter looping, and return early if it was already disabled.
+        if (!mIsPeriodicReporterLooping.getAndSet(false)) {
+            return;
+        }
+        // Remove any existing mPeriodicMetricReporter delayed tasks.
+        getOrCreateHandler().removeCallbacks(mPeriodicMetricReporter);
+        // Run mPeriodicMetricReporter one last time immediately.
+        getOrCreateHandler().post(mPeriodicMetricReporter);
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/JankScenario.java b/base/android/java/src/org/chromium/base/jank_tracker/JankScenario.java
new file mode 100644
index 0000000..ef789ce
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/jank_tracker/JankScenario.java
@@ -0,0 +1,36 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+import androidx.annotation.IntDef;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
+/**
+ * A list of jank scenarios to be tracked, each scenario corresponds to a specific user journey
+ * except by PERIODIC_REPORTING, which runs constantly and is uploaded every 30s.
+ *
+ * IMPORTANT: This must be kept up to date with the histograms.xml histograms
+ * (Android.FrameTimelineJank..*) and the JankScenario C++ enum in
+ * //base/android/jank_metric_uma_recorder.cc.
+ */
+@IntDef({JankScenario.PERIODIC_REPORTING, JankScenario.OMNIBOX_FOCUS, JankScenario.NEW_TAB_PAGE,
+        JankScenario.STARTUP, JankScenario.TAB_SWITCHER, JankScenario.OPEN_LINK_IN_NEW_TAB,
+        JankScenario.START_SURFACE_HOMEPAGE, JankScenario.START_SURFACE_TAB_SWITCHER,
+        JankScenario.FEED_SCROLLING, JankScenario.WEBVIEW_SCROLLING})
+@Retention(RetentionPolicy.SOURCE)
+public @interface JankScenario {
+    int PERIODIC_REPORTING = 1;
+    int OMNIBOX_FOCUS = 2;
+    int NEW_TAB_PAGE = 3;
+    int STARTUP = 4;
+    int TAB_SWITCHER = 5;
+    int OPEN_LINK_IN_NEW_TAB = 6;
+    int START_SURFACE_HOMEPAGE = 7;
+    int START_SURFACE_TAB_SWITCHER = 8;
+    int FEED_SCROLLING = 9;
+    int WEBVIEW_SCROLLING = 10;
+}
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/JankTracker.java b/base/android/java/src/org/chromium/base/jank_tracker/JankTracker.java
new file mode 100644
index 0000000..80ea847
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/jank_tracker/JankTracker.java
@@ -0,0 +1,35 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+/**
+ * Interface for Android UI jank tracking.
+ */
+public interface JankTracker {
+    /**
+     * Starts tracking UI jank for a specific use scenario (e.g. Tab switcher, Omnibox, etc.),
+     * calling this method more than once without calling {@code finishTrackingScenario} won't do
+     * anything.
+     * @param scenario A value from {@link JankScenario} that specifies a use scenario.
+     */
+    void startTrackingScenario(@JankScenario int scenario);
+
+    /**
+     * Finishes tracking UI jank for a use scenario (e.g. Tab switcher, Omnibox, etc.). Histograms
+     * for that scenario (e.g. Android.Jank.FrameDuration.Omnibox) are recorded immediately after
+     * calling this method. Calling this method without calling {@code startTrackingScenario}
+     * beforehand won't do anything.
+     * @param scenario A value from {@link JankScenario} that specifies a use scenario.
+     * @param endScenarioTime A value that determines the maximum frame metric (based on vsync time)
+     *         that should be included.
+     */
+    void finishTrackingScenario(@JankScenario int scenario, long endScenarioTimeNs);
+    void finishTrackingScenario(@JankScenario int scenario);
+
+    /**
+     * To be called when the jank tracker should stop listening to changes.
+     */
+    void destroy();
+}
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/JankTrackerImpl.java b/base/android/java/src/org/chromium/base/jank_tracker/JankTrackerImpl.java
new file mode 100644
index 0000000..59f8518
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/jank_tracker/JankTrackerImpl.java
@@ -0,0 +1,89 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+import android.app.Activity;
+import android.os.Build;
+
+/**
+ * Class for recording janky frame metrics for a specific Activity.
+ *
+ * It should be constructed when the activity is created, recording starts and stops automatically
+ * based on activity state. When the activity is being destroyed {@link #destroy()} should be called
+ * to clear the activity state observer. All methods should be called from the UI thread.
+ */
+public class JankTrackerImpl implements JankTracker {
+    // We use the DEADLINE field in the Android FrameMetrics which was added in S.
+    private static final boolean IS_TRACKING_ENABLED =
+            Build.VERSION.SDK_INT >= Build.VERSION_CODES.S;
+
+    private JankTrackerStateController mController;
+    private JankReportingScheduler mReportingScheduler;
+
+    /**
+     * Creates a new JankTracker instance tracking UI rendering of an activity. Metric recording
+     * starts when the activity starts, and it's paused when the activity stops.
+     */
+    public JankTrackerImpl(Activity activity) {
+        FrameMetricsStore metricsStore = new FrameMetricsStore();
+        if (!constructInternalPreController(new JankReportingScheduler(metricsStore))) return;
+
+        constructInternalFinal(new JankActivityTracker(
+                activity, new FrameMetricsListener(metricsStore), mReportingScheduler));
+    }
+
+    /**
+     * Creates a new JankTracker which allows the controller to determine when it should start and
+     * stop metric scenarios/collection.
+     */
+    public JankTrackerImpl(JankTrackerStateController controller) {
+        if (!constructInternalPreController(controller.mReportingScheduler)) return;
+        constructInternalFinal(controller);
+    }
+
+    private boolean constructInternalPreController(JankReportingScheduler scheduler) {
+        if (!IS_TRACKING_ENABLED) {
+            mReportingScheduler = null;
+            mController = null;
+            return false;
+        }
+        mReportingScheduler = scheduler;
+        return true;
+    }
+
+    private void constructInternalFinal(JankTrackerStateController controller) {
+        mController = controller;
+        mController.initialize();
+    }
+
+    @Override
+    public void startTrackingScenario(@JankScenario int scenario) {
+        if (!IS_TRACKING_ENABLED) return;
+
+        mReportingScheduler.startTrackingScenario(scenario);
+    }
+
+    @Override
+    public void finishTrackingScenario(@JankScenario int scenario) {
+        finishTrackingScenario(scenario, -1);
+    }
+
+    @Override
+    public void finishTrackingScenario(@JankScenario int scenario, long endScenarioTimeNs) {
+        if (!IS_TRACKING_ENABLED) return;
+
+        mReportingScheduler.finishTrackingScenario(scenario, endScenarioTimeNs);
+    }
+
+    /**
+     * Stops listening for Activity state changes.
+     */
+    @Override
+    public void destroy() {
+        if (!IS_TRACKING_ENABLED) return;
+
+        mController.destroy();
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/JankTrackerStateController.java b/base/android/java/src/org/chromium/base/jank_tracker/JankTrackerStateController.java
new file mode 100644
index 0000000..b73883d
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/jank_tracker/JankTrackerStateController.java
@@ -0,0 +1,60 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+import android.view.Window;
+
+import org.chromium.base.Log;
+
+/**
+ * A simple holder class to enable easy starting and stopping of metric listening as well as
+ * periodic reporting. This is used by JankTrackerImpl to hold the listener reference and this class
+ * should be hooked up to some sort of listener to when to start/stop listening and periodic
+ * metrics.
+ */
+public class JankTrackerStateController {
+    private static final String TAG = "JankTracker";
+    protected final FrameMetricsListener mFrameMetricsListener;
+    protected final JankReportingScheduler mReportingScheduler;
+
+    public JankTrackerStateController(
+            FrameMetricsListener listener, JankReportingScheduler scheduler) {
+        mFrameMetricsListener = listener;
+        mReportingScheduler = scheduler;
+    }
+
+    public void startPeriodicReporting() {
+        mReportingScheduler.startReportingPeriodicMetrics();
+    }
+
+    public void stopPeriodicReporting() {
+        mReportingScheduler.stopReportingPeriodicMetrics();
+    }
+
+    public void startMetricCollection(Window window) {
+        mFrameMetricsListener.setIsListenerRecording(true);
+        if (window != null) {
+            window.addOnFrameMetricsAvailableListener(
+                    mFrameMetricsListener, mReportingScheduler.getOrCreateHandler());
+        }
+    }
+
+    public void stopMetricCollection(Window window) {
+        mFrameMetricsListener.setIsListenerRecording(false);
+        if (window != null) {
+            try {
+                window.removeOnFrameMetricsAvailableListener(mFrameMetricsListener);
+            } catch (IllegalArgumentException e) {
+                // Adding the listener failed for whatever reason, so it could not be unregistered.
+                Log.e(TAG, "Could not remove listener %s from window %s", mFrameMetricsListener,
+                        window);
+            }
+        }
+    }
+
+    // Extra methods for subclasses that need to perform extra work on initialization/destruction.
+    public void initialize() {}
+    public void destroy() {}
+}
diff --git a/base/android/java/src/org/chromium/base/jank_tracker/PlaceholderJankTracker.java b/base/android/java/src/org/chromium/base/jank_tracker/PlaceholderJankTracker.java
new file mode 100644
index 0000000..a6a2aba
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/jank_tracker/PlaceholderJankTracker.java
@@ -0,0 +1,22 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+/**
+ * Placeholder implementation of JankTracker.
+ */
+public class PlaceholderJankTracker implements JankTracker {
+    @Override
+    public void startTrackingScenario(int scenario) {}
+
+    @Override
+    public void finishTrackingScenario(int scenario) {}
+
+    @Override
+    public void finishTrackingScenario(int scenario, long endScenarioTimeNs) {}
+
+    @Override
+    public void destroy() {}
+}
diff --git a/base/android/java/src/org/chromium/base/library_loader/LibraryLoader.java b/base/android/java/src/org/chromium/base/library_loader/LibraryLoader.java
index ac1eeaf..57dc74c 100644
--- a/base/android/java/src/org/chromium/base/library_loader/LibraryLoader.java
+++ b/base/android/java/src/org/chromium/base/library_loader/LibraryLoader.java
@@ -15,24 +15,25 @@
 import androidx.annotation.IntDef;
 import androidx.annotation.VisibleForTesting;
 
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeLibraryLoadedStatus;
+import org.jni_zero.NativeLibraryLoadedStatus.NativeLibraryLoadedStatusProvider;
+import org.jni_zero.NativeMethods;
+
 import org.chromium.base.BaseSwitches;
+import org.chromium.base.Callback;
 import org.chromium.base.CommandLine;
 import org.chromium.base.ContextUtils;
 import org.chromium.base.Log;
-import org.chromium.base.NativeLibraryLoadedStatus;
-import org.chromium.base.NativeLibraryLoadedStatus.NativeLibraryLoadedStatusProvider;
 import org.chromium.base.ResettersForTesting;
 import org.chromium.base.StrictModeContext;
 import org.chromium.base.TimeUtils.CurrentThreadTimeMillisTimer;
 import org.chromium.base.TimeUtils.UptimeMillisTimer;
 import org.chromium.base.TraceEvent;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
 import org.chromium.base.metrics.RecordHistogram;
 import org.chromium.base.metrics.UmaRecorderHolder;
 import org.chromium.build.BuildConfig;
 import org.chromium.build.NativeLibraries;
-import org.chromium.build.annotations.MainDex;
 
 import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
@@ -53,7 +54,6 @@
  * See also base/android/library_loader/library_loader_hooks.cc, which contains
  * the native counterpart to this class.
  */
-@MainDex
 @JNINamespace("base::android")
 public class LibraryLoader {
     private static final String TAG = "LibraryLoader";
@@ -161,6 +161,14 @@
         int CHILD_WITHOUT_ZYGOTE = 2;
     }
 
+    // Used by tests to ensure that sLoadFailedCallback is called, also referenced by
+    // SplitCompatApplication.
+    @VisibleForTesting
+    public static boolean sOverrideNativeLibraryCannotBeLoadedForTesting;
+
+    // Allow embedders to register a callback to handle native library load failures.
+    public static Callback<UnsatisfiedLinkError> sLoadFailedCallback;
+
     // Returns true when sharing RELRO between the browser process and the app zygote should *not*
     // be attempted.
     public static boolean mainProcessIntendsToProvideRelroFd() {
@@ -753,6 +761,10 @@
             UptimeMillisTimer uptimeTimer = new UptimeMillisTimer();
             CurrentThreadTimeMillisTimer threadTimeTimer = new CurrentThreadTimeMillisTimer();
 
+            if (sOverrideNativeLibraryCannotBeLoadedForTesting) {
+                throw new UnsatisfiedLinkError();
+            }
+
             if (useChromiumLinker() && !mFallbackToSystemLinker) {
                 if (DEBUG) Log.i(TAG, "Loading with the Chromium linker.");
                 // See base/android/linker/config.gni, the chromium linker is only enabled when
@@ -776,7 +788,11 @@
             getMediator().recordLoadTimeHistogram(loadTimeMs);
             getMediator().recordLoadThreadTimeHistogram(threadTimeTimer.getElapsedMillis());
         } catch (UnsatisfiedLinkError e) {
-            throw new ProcessInitException(LoaderErrors.NATIVE_LIBRARY_LOAD_FAILED, e);
+            if (sLoadFailedCallback != null) {
+                sLoadFailedCallback.onResult(e);
+            } else {
+                throw new ProcessInitException(LoaderErrors.NATIVE_LIBRARY_LOAD_FAILED, e);
+            }
         }
     }
 
@@ -822,7 +838,6 @@
         assert libraryProcessType == mLibraryProcessType;
     }
 
-    // Invoke base::android::LibraryLoaded in library_loader_hooks.cc
     @GuardedBy("mLock")
     private void initializeAlreadyLocked() {
         if (mInitialized) {
@@ -854,6 +869,8 @@
 
         ensureCommandLineSwitchedAlreadyLocked();
 
+        // Invoke content::LibraryLoaded() in //content/app/android/library_loader_hooks.cc
+        // via a hook stored in //base/android/library_loader/library_loader_hooks.cc.
         if (!LibraryLoaderJni.get().libraryLoaded(mLibraryProcessType)) {
             Log.e(TAG, "error calling LibraryLoaderJni.get().libraryLoaded");
             throw new ProcessInitException(LoaderErrors.FAILED_TO_REGISTER_JNI);
@@ -933,6 +950,16 @@
         }
     }
 
+    public static void setOverrideNativeLibraryCannotBeLoadedForTesting() {
+        sOverrideNativeLibraryCannotBeLoadedForTesting = true;
+        ResettersForTesting.register(() -> sOverrideNativeLibraryCannotBeLoadedForTesting = false);
+    }
+
+    public static void setLoadFailedCallbackForTesting(Callback<UnsatisfiedLinkError> callback) {
+        sLoadFailedCallback = callback;
+        ResettersForTesting.register(() -> sLoadFailedCallback = null);
+    }
+
     public static void setBrowserProcessStartupBlockedForTesting() {
         sBrowserStartupBlockedForTesting = true;
     }
diff --git a/base/android/java/src/org/chromium/base/library_loader/LibraryPrefetcher.java b/base/android/java/src/org/chromium/base/library_loader/LibraryPrefetcher.java
index e2a2107..cd2353c 100644
--- a/base/android/java/src/org/chromium/base/library_loader/LibraryPrefetcher.java
+++ b/base/android/java/src/org/chromium/base/library_loader/LibraryPrefetcher.java
@@ -4,16 +4,16 @@
 
 package org.chromium.base.library_loader;
 
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
+
 import org.chromium.base.CommandLine;
 import org.chromium.base.ContextUtils;
 import org.chromium.base.SysUtils;
 import org.chromium.base.TraceEvent;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
 import org.chromium.base.metrics.RecordHistogram;
 import org.chromium.base.task.PostTask;
 import org.chromium.base.task.TaskTraits;
-import org.chromium.build.annotations.MainDex;
 
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -23,7 +23,6 @@
  * See also base/android/library_loader/library_prefetcher_hooks.cc, which contains
  * the native counterpart to this class.
  */
-@MainDex
 @JNINamespace("base::android")
 public class LibraryPrefetcher {
 
diff --git a/base/android/java/src/org/chromium/base/library_loader/Linker.java b/base/android/java/src/org/chromium/base/library_loader/Linker.java
index 588b1d8..379a724 100644
--- a/base/android/java/src/org/chromium/base/library_loader/Linker.java
+++ b/base/android/java/src/org/chromium/base/library_loader/Linker.java
@@ -14,16 +14,12 @@
 import androidx.annotation.NonNull;
 import androidx.annotation.VisibleForTesting;
 
+import org.jni_zero.AccessedByNative;
+
 import org.chromium.base.Log;
 import org.chromium.base.StreamUtil;
-import org.chromium.base.TimeUtils.UptimeMillisTimer;
-import org.chromium.base.annotations.AccessedByNative;
 import org.chromium.base.metrics.RecordHistogram;
 
-import java.io.BufferedReader;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
 import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 
@@ -150,16 +146,6 @@
     @State
     private int mState = State.UNINITIALIZED;
 
-    private static final String DETAILED_LOAD_TIME_HISTOGRAM_PREFIX =
-            "ChromiumAndroidLinker.ModernLinkerDetailedLoadTime.";
-
-    private static final String DETAILED_LOAD_TIME_HISTOGRAM_PREFIX_BLKIO_CGROUP =
-            "ChromiumAndroidLinker.ModernLinkerDetailedLoadTimeByBlkioCgroup.";
-
-    private static final String SUFFIX_UNKNOWN = "Unknown";
-
-    private static final String SELF_CGROUP_FILE_NAME = "/proc/self/cgroup";
-
     void pretendLibraryIsLoadedForTesting() {
         synchronized (mLock) {
             mState = State.DONE;
@@ -379,9 +365,9 @@
     final void loadLibrary(String library) {
         synchronized (mLock) {
             try {
-                // Normally Chrome/Webview/Weblayer processes initialize when they choose whether to
-                // produce or consume the shared relocations. Initialization here is the last resort
-                // to choose the load address in tests that forget to decide whether they are a
+                // Normally Chrome/WebView processes initialize when they choose whether to produce
+                // or consume the shared relocations. Initialization here is the last resort to
+                // choose the load address in tests that forget to decide whether they are a
                 // producer or a consumer.
                 ensureInitializedImplicitlyAsLastResort();
 
@@ -521,17 +507,10 @@
         }
         assert mState == State.INITIALIZED; // Only one successful call.
 
-        // Determine whether library loading starts in a foreground or a background cgroup for the
-        // 'blkio' controller.
-        String backgroundStateBeforeLoad = readBackgroundStateFromCgroups();
-
         // Load or declare fallback to System.loadLibrary.
-        UptimeMillisTimer timer = new UptimeMillisTimer();
         String libFilePath = System.mapLibraryName(library);
-        boolean performedModernLoad = true;
         if (relroMode == RelroSharingMode.NO_SHARING) {
             // System.loadLibrary() below implements the fallback.
-            performedModernLoad = false;
             mState = State.DONE;
         } else if (relroMode == RelroSharingMode.PRODUCE) {
             loadAndProduceSharedRelro(libFilePath); // Throws on a failed load.
@@ -545,27 +524,6 @@
             mState = State.DONE;
         }
 
-        // The app can change the bg/fg state while loading the native library, but mostly only
-        // once. To reduce the likelihood of a foreground sample to be affected by partially
-        // backgrounded state, move the mixed samples to a separate category. The data collected may
-        // help proving this hypothesis: "The Linker is not a lot slower than the system
-        // linker when running in foreground".
-        String backgroundStateAfterLoad = readBackgroundStateFromCgroups();
-        if (!backgroundStateBeforeLoad.equals(backgroundStateAfterLoad)) {
-            if (backgroundStateBeforeLoad.equals(SUFFIX_UNKNOWN)
-                    || backgroundStateAfterLoad.equals(SUFFIX_UNKNOWN)) {
-                backgroundStateBeforeLoad = SUFFIX_UNKNOWN;
-            } else {
-                backgroundStateBeforeLoad = "Mixed";
-            }
-        }
-
-        if (performedModernLoad) {
-            recordDetailedLoadTimeSince(timer,
-                    relroMode == RelroSharingMode.PRODUCE ? "Produce" : "Consume",
-                    backgroundStateBeforeLoad);
-        }
-
         // Load the library a second time, in order to keep using lazy JNI registration. When
         // loading the library with the Chromium linker, ART doesn't know about our library, so
         // cannot resolve JNI methods lazily. Loading the library a second time makes sure it
@@ -573,14 +531,11 @@
         //
         // This is not wasteful though, as libraries are reference-counted, and as a consequence the
         // library is not really loaded a second time, and we keep relocation sharing.
-        timer = new UptimeMillisTimer();
         try {
             System.loadLibrary(library);
         } catch (UnsatisfiedLinkError e) {
             resetAndThrow("Failed at System.loadLibrary()", e);
         }
-        recordDetailedLoadTimeSince(
-                timer, performedModernLoad ? "Second" : "NoSharing", backgroundStateBeforeLoad);
     }
 
     /**
@@ -642,56 +597,6 @@
                 /* asRelroProducer= */ true, PreferAddress.RESERVE_RANDOM, /* addressHint= */ 0);
     }
 
-    private static String extractBlkioCgroupFromLine(String line) {
-        // The contents of /proc/self/cgroup for a background app looks like this:
-        // 5:schedtune:/background
-        // 4:memory:/
-        // 3:cpuset:/background
-        // 2:cpu:/system
-        // 1:blkio:/background
-        // 0::/uid_10179/pid_11869
-        //
-        // For a foreground app the relevant line looks like this:
-        // 1:blkio:/
-        int blkioStartsAt = line.indexOf(":blkio:");
-        if (blkioStartsAt == -1) return "";
-        return line.substring(blkioStartsAt + 7);
-    }
-
-    private String readBackgroundStateFromCgroups() {
-        String groupName = null;
-        try (BufferedReader reader = new BufferedReader(
-                     new InputStreamReader(new FileInputStream(SELF_CGROUP_FILE_NAME)));) {
-            String line;
-            while ((line = reader.readLine()) != null) {
-                groupName = extractBlkioCgroupFromLine(line);
-                if (!groupName.equals("")) break;
-            }
-            if (groupName == null || groupName.equals("")) return SUFFIX_UNKNOWN;
-        } catch (IOException e) {
-            Log.e(TAG, "IOException while reading %s", SELF_CGROUP_FILE_NAME);
-            return SUFFIX_UNKNOWN;
-        }
-        if (groupName.equals("/")) {
-            return "Foreground";
-        }
-        if (groupName.equals("/background")) {
-            return "Background";
-        }
-        Log.e(TAG, "blkio cgroup with unexpected name: '%s'", groupName);
-        return SUFFIX_UNKNOWN;
-    }
-
-    private void recordDetailedLoadTimeSince(
-            UptimeMillisTimer timer, String suffix, String backgroundStateSuffix) {
-        long durationMs = timer.getElapsedMillis();
-        RecordHistogram.recordTimesHistogram(
-                DETAILED_LOAD_TIME_HISTOGRAM_PREFIX + suffix, durationMs);
-        RecordHistogram.recordTimesHistogram(DETAILED_LOAD_TIME_HISTOGRAM_PREFIX_BLKIO_CGROUP
-                        + suffix + "." + backgroundStateSuffix,
-                durationMs);
-    }
-
     @GuardedBy("mLock")
     private void resetAndThrow(String message, UnsatisfiedLinkError cause) {
         mState = State.INITIALIZED;
diff --git a/base/android/java/src/org/chromium/base/memory/JavaHeapDumpGenerator.java b/base/android/java/src/org/chromium/base/memory/JavaHeapDumpGenerator.java
index 71a2258..d23da76 100644
--- a/base/android/java/src/org/chromium/base/memory/JavaHeapDumpGenerator.java
+++ b/base/android/java/src/org/chromium/base/memory/JavaHeapDumpGenerator.java
@@ -6,10 +6,12 @@
 
 import android.os.Debug;
 
+import org.jni_zero.CalledByNative;
+
 import org.chromium.base.Log;
-import org.chromium.base.annotations.CalledByNative;
 
 import java.io.IOException;
+
 /**
  * Enables the generation of hprof files from heap dumps.
  */
diff --git a/base/android/java/src/org/chromium/base/memory/MemoryInfoBridge.java b/base/android/java/src/org/chromium/base/memory/MemoryInfoBridge.java
index 35819f1..7f39289 100644
--- a/base/android/java/src/org/chromium/base/memory/MemoryInfoBridge.java
+++ b/base/android/java/src/org/chromium/base/memory/MemoryInfoBridge.java
@@ -11,8 +11,9 @@
 
 import androidx.annotation.Nullable;
 
+import org.jni_zero.CalledByNative;
+
 import org.chromium.base.ContextUtils;
-import org.chromium.base.annotations.CalledByNative;
 
 /**
  * Allows calling ActivityManager#getProcessMemoryInfo() from native.
diff --git a/base/android/java/src/org/chromium/base/memory/MemoryPressureMonitor.java b/base/android/java/src/org/chromium/base/memory/MemoryPressureMonitor.java
index 3aa7a2a..800ac36 100644
--- a/base/android/java/src/org/chromium/base/memory/MemoryPressureMonitor.java
+++ b/base/android/java/src/org/chromium/base/memory/MemoryPressureMonitor.java
@@ -16,7 +16,6 @@
 import org.chromium.base.ResettersForTesting;
 import org.chromium.base.ThreadUtils;
 import org.chromium.base.supplier.Supplier;
-import org.chromium.build.annotations.MainDex;
 
 /**
  * This class monitors memory pressure and reports it to the native side.
@@ -71,7 +70,6 @@
  * NOTE: This class should only be used on UiThread as defined by ThreadUtils (which is
  *       Android main thread for Chrome, but can be some other thread for WebView).
  */
-@MainDex
 public class MemoryPressureMonitor {
     private static final int DEFAULT_THROTTLING_INTERVAL_MS = 60 * 1000;
 
diff --git a/base/android/java/src/org/chromium/base/metrics/NativeUmaRecorder.java b/base/android/java/src/org/chromium/base/metrics/NativeUmaRecorder.java
index 87cd31a..fb7b7be 100644
--- a/base/android/java/src/org/chromium/base/metrics/NativeUmaRecorder.java
+++ b/base/android/java/src/org/chromium/base/metrics/NativeUmaRecorder.java
@@ -4,11 +4,11 @@
 
 package org.chromium.base.metrics;
 
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
+
 import org.chromium.base.Callback;
 import org.chromium.base.TimeUtils;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
-import org.chromium.build.annotations.MainDex;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -23,7 +23,6 @@
  * code.
  */
 @JNINamespace("base::android")
-@MainDex
 /* package */ final class NativeUmaRecorder implements UmaRecorder {
     /**
      * Internally, histograms objects are cached on the Java side by their pointer
diff --git a/base/android/java/src/org/chromium/base/metrics/RecordHistogram.java b/base/android/java/src/org/chromium/base/metrics/RecordHistogram.java
index 25f3500..6cb80f7 100644
--- a/base/android/java/src/org/chromium/base/metrics/RecordHistogram.java
+++ b/base/android/java/src/org/chromium/base/metrics/RecordHistogram.java
@@ -6,14 +6,11 @@
 
 import android.text.format.DateUtils;
 
-import org.chromium.build.annotations.MainDex;
-
 import java.util.List;
 
 /**
  * Java API for recording UMA histograms.
  * */
-@MainDex
 public class RecordHistogram {
     /**
      * Records a sample in a boolean UMA histogram of the given name. Boolean histogram has two
diff --git a/base/android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java b/base/android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java
index 20943e9..63f936c 100644
--- a/base/android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java
+++ b/base/android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java
@@ -4,8 +4,8 @@
 
 package org.chromium.base.metrics;
 
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 /**
  * Java API which exposes the registered histograms on the native side as
diff --git a/base/android/java/src/org/chromium/base/process_launcher/ChildProcessConnection.java b/base/android/java/src/org/chromium/base/process_launcher/ChildProcessConnection.java
index 1a41b09..47543e3 100644
--- a/base/android/java/src/org/chromium/base/process_launcher/ChildProcessConnection.java
+++ b/base/android/java/src/org/chromium/base/process_launcher/ChildProcessConnection.java
@@ -798,33 +798,49 @@
         assert isRunningOnLauncherThread();
         assert !mUnbound;
 
-        boolean success;
+        boolean success = bindUsingExistingBindings(useStrongBinding);
         boolean usedFallback = sAlwaysFallback && mFallbackServiceName != null;
-        if (useStrongBinding) {
-            mStrongBindingCount++;
-            success = mStrongBinding.bindServiceConnection();
-        } else {
-            mVisibleBindingCount++;
-            success = mVisibleBinding.bindServiceConnection();
-        }
-        if (!success) {
+        boolean canFallback = !sAlwaysFallback && mFallbackServiceName != null;
+        if (!success && !usedFallback && canFallback) {
             // Note this error condition is generally transient so `sAlwaysFallback` is
             // not set in this code path.
-            if (!usedFallback && mFallbackServiceName != null && retireBindingsAndBindFallback()) {
-                usedFallback = true;
-            } else {
-                return false;
-            }
+            retireAndCreateFallbackBindings();
+            success = bindUsingExistingBindings(useStrongBinding);
+            usedFallback = true;
+            canFallback = false;
         }
 
-        if (!usedFallback && mFallbackServiceName != null) {
+        if (success && !usedFallback && canFallback) {
             mLauncherHandler.postDelayed(
                     this::checkBindTimeOut, FALLBACK_TIMEOUT_IN_SECONDS * 1000);
         }
 
-        mWaivedBinding.bindServiceConnection();
-        updateBindingState();
-        return true;
+        return success;
+    }
+
+    private boolean bindUsingExistingBindings(boolean useStrongBinding) {
+        assert isRunningOnLauncherThread();
+
+        boolean success;
+        if (useStrongBinding) {
+            success = mStrongBinding.bindServiceConnection();
+            if (success) {
+                mStrongBindingCount++;
+            }
+        } else {
+            success = mVisibleBinding.bindServiceConnection();
+            if (success) {
+                mVisibleBindingCount++;
+            }
+        }
+
+        if (success) {
+            boolean result = mWaivedBinding.bindServiceConnection();
+            // One binding already succeeded. Waived binding should succeed too.
+            assert result;
+            updateBindingState();
+        }
+        return success;
     }
 
     private void checkBindTimeOut() {
@@ -842,19 +858,12 @@
 
     private boolean retireBindingsAndBindFallback() {
         assert mFallbackServiceName != null;
-        Log.w(TAG, "Fallback to %s", mFallbackServiceName);
         boolean isStrongBindingBound = mStrongBinding.isBound();
         boolean isVisibleBindingBound = mVisibleBinding.isBound();
         boolean isNotPerceptibleBindingBound =
                 supportNotPerceptibleBinding() && mNotPerceptibleBinding.isBound();
         boolean isWaivedBindingBound = mWaivedBinding.isBound();
-        mStrongBinding.retire();
-        mVisibleBinding.retire();
-        if (supportNotPerceptibleBinding()) {
-            mNotPerceptibleBinding.retire();
-        }
-        mWaivedBinding.retire();
-        createBindings(mFallbackServiceName);
+        retireAndCreateFallbackBindings();
         // Expect all bindings to succeed or fail together. So early out as soon as
         // one binding fails.
         if (isStrongBindingBound) {
@@ -880,6 +889,18 @@
         return true;
     }
 
+    private void retireAndCreateFallbackBindings() {
+        assert mFallbackServiceName != null;
+        Log.w(TAG, "Fallback to %s", mFallbackServiceName);
+        mStrongBinding.retire();
+        mVisibleBinding.retire();
+        if (supportNotPerceptibleBinding()) {
+            mNotPerceptibleBinding.retire();
+        }
+        mWaivedBinding.retire();
+        createBindings(mFallbackServiceName);
+    }
+
     @VisibleForTesting
     protected void unbind() {
         assert isRunningOnLauncherThread();
diff --git a/base/android/java/src/org/chromium/base/process_launcher/ChildProcessService.java b/base/android/java/src/org/chromium/base/process_launcher/ChildProcessService.java
index 28daeb7..45b83c2 100644
--- a/base/android/java/src/org/chromium/base/process_launcher/ChildProcessService.java
+++ b/base/android/java/src/org/chromium/base/process_launcher/ChildProcessService.java
@@ -21,6 +21,9 @@
 import android.text.TextUtils;
 import android.util.SparseArray;
 
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
+
 import org.chromium.base.BaseSwitches;
 import org.chromium.base.CommandLine;
 import org.chromium.base.ContextUtils;
@@ -28,13 +31,10 @@
 import org.chromium.base.Log;
 import org.chromium.base.MemoryPressureLevel;
 import org.chromium.base.ThreadUtils;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
 import org.chromium.base.compat.ApiHelperForN;
 import org.chromium.base.library_loader.LibraryLoader;
 import org.chromium.base.memory.MemoryPressureMonitor;
 import org.chromium.base.metrics.RecordHistogram;
-import org.chromium.build.annotations.MainDex;
 
 import java.util.List;
 
@@ -63,7 +63,6 @@
  * implementation which cannot directly inherit from this class (e.g. for WebLayer child services).
  */
 @JNINamespace("base::android")
-@MainDex
 public class ChildProcessService {
     private static final String MAIN_THREAD_NAME = "ChildProcessMain";
     private static final String TAG = "ChildProcessService";
diff --git a/base/android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java b/base/android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java
index 8eaa3d7..62f2f1f 100644
--- a/base/android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java
+++ b/base/android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java
@@ -8,7 +8,6 @@
 import android.os.ParcelFileDescriptor;
 import android.os.Parcelable;
 
-import org.chromium.build.annotations.MainDex;
 import org.chromium.build.annotations.UsedByReflection;
 
 import javax.annotation.concurrent.Immutable;
@@ -18,7 +17,6 @@
  * be passed to child processes.
  */
 @Immutable
-@MainDex
 @UsedByReflection("child_process_launcher_helper_android.cc")
 public final class FileDescriptorInfo implements Parcelable {
     public final int id;
diff --git a/base/android/java/src/org/chromium/base/shared_preferences/KeyPrefix.java b/base/android/java/src/org/chromium/base/shared_preferences/KeyPrefix.java
new file mode 100644
index 0000000..825dc58
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/shared_preferences/KeyPrefix.java
@@ -0,0 +1,43 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.shared_preferences;
+
+/**
+ * A prefix for a range of SharedPreferences keys generated dynamically.
+ *
+ * Instances should be declared as keys in the PreferenceKeys registry.
+ */
+public class KeyPrefix {
+    private final String mPrefix;
+
+    public KeyPrefix(String pattern) {
+        // More thorough checking is performed in ChromePreferenceKeysTest.
+        assert pattern.endsWith("*");
+        mPrefix = pattern.substring(0, pattern.length() - 1);
+    }
+
+    /**
+     * @param stem A non-empty string. The '*' character is reserved.
+     * @return The complete SharedPreferences key to be passed to {@link SharedPreferencesManager}.
+     */
+    public String createKey(String stem) {
+        return mPrefix + stem;
+    }
+    /**
+     * @param index An int to generate a unique key.
+     * @return The complete SharedPreferences key to be passed to {@link SharedPreferencesManager}.
+     */
+    public String createKey(int index) {
+        return mPrefix + index;
+    }
+
+    public String pattern() {
+        return mPrefix + "*";
+    }
+
+    public boolean hasGenerated(String key) {
+        return key.startsWith(mPrefix);
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/shared_preferences/KnownPreferenceKeyRegistries.java b/base/android/java/src/org/chromium/base/shared_preferences/KnownPreferenceKeyRegistries.java
new file mode 100644
index 0000000..80f4a51
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/shared_preferences/KnownPreferenceKeyRegistries.java
@@ -0,0 +1,94 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.shared_preferences;
+
+import com.google.common.collect.Sets;
+
+import org.chromium.base.ResettersForTesting;
+import org.chromium.build.BuildConfig;
+import org.chromium.build.annotations.CheckDiscard;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Ensures that all {@link PreferenceKeyRegistry}s used are known.
+ *
+ * A complement to ChromePreferenceKeysTest, which ensures that preference keys across all known
+ * registries are unique.
+ *
+ * This checking is done in tests in which |initializeKnownRegistries()| is called, which happens
+ * during browser process initialization.
+ */
+@CheckDiscard("Preference key checking should only happen on build with asserts")
+public class KnownPreferenceKeyRegistries {
+    private static Set<PreferenceKeyRegistry> sKnownRegistries;
+    private static Set<PreferenceKeyRegistry> sRegistriesUsedBeforeInitialization = new HashSet<>();
+
+    public static void onRegistryUsed(PreferenceKeyRegistry registry) {
+        if (!BuildConfig.ENABLE_ASSERTS) {
+            return;
+        }
+
+        if (sKnownRegistries == null) {
+            // Before initialization, keep track of registries used.
+            sRegistriesUsedBeforeInitialization.add(registry);
+        } else {
+            // After initialization, check if registry is known.
+            if (!sKnownRegistries.contains(registry)) {
+                String message =
+                        "An unknown registry was used, PreferenceKeyRegistries must be declared as "
+                        + "known in AllPreferenceKeyRegistries: "
+                        + String.join(",", registry.toDebugString());
+                assert false : message;
+            }
+        }
+    }
+
+    public static void initializeKnownRegistries(Set<PreferenceKeyRegistry> knownRegistries) {
+        if (!BuildConfig.ENABLE_ASSERTS) {
+            return;
+        }
+
+        if (sKnownRegistries != null) {
+            // Double initialization; make sure new known registries are the same.
+            assert sKnownRegistries.equals(knownRegistries);
+            return;
+        }
+
+        // Check that each registry already used is known; assert otherwise.
+        Set<PreferenceKeyRegistry> unknownRegistries =
+                Sets.difference(sRegistriesUsedBeforeInitialization, knownRegistries);
+        if (!unknownRegistries.isEmpty()) {
+            List<String> unknownRegistryNames = new ArrayList<>();
+            for (PreferenceKeyRegistry unknownRegistry : unknownRegistries) {
+                unknownRegistryNames.add(unknownRegistry.toDebugString());
+            }
+            String message =
+                    "Unknown registries were used, PreferenceKeyRegistries must be declared as "
+                    + "known in AllPreferenceKeyRegistries: "
+                    + String.join(",", unknownRegistryNames);
+            assert false : message;
+        }
+
+        sKnownRegistries = knownRegistries;
+        sRegistriesUsedBeforeInitialization = null;
+    }
+
+    static void clearForTesting() {
+        Set<PreferenceKeyRegistry> previousKnownRegistries = sKnownRegistries;
+        Set<PreferenceKeyRegistry> registriesUsedBeforeInitialization =
+                sRegistriesUsedBeforeInitialization;
+
+        ResettersForTesting.register(() -> {
+            sKnownRegistries = previousKnownRegistries;
+            sRegistriesUsedBeforeInitialization = registriesUsedBeforeInitialization;
+        });
+        sKnownRegistries = null;
+        sRegistriesUsedBeforeInitialization = new HashSet<>();
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/shared_preferences/NoOpPreferenceKeyChecker.java b/base/android/java/src/org/chromium/base/shared_preferences/NoOpPreferenceKeyChecker.java
new file mode 100644
index 0000000..344d8e4
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/shared_preferences/NoOpPreferenceKeyChecker.java
@@ -0,0 +1,20 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.shared_preferences;
+
+/**
+ * A placeholder key checker that never throws exceptions. Used in production builds.
+ */
+class NoOpPreferenceKeyChecker implements PreferenceKeyChecker {
+    @Override
+    public void checkIsKeyInUse(String key) {
+        // No-op.
+    }
+
+    @Override
+    public void checkIsPrefixInUse(KeyPrefix prefix) {
+        // No-op.
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/shared_preferences/PreferenceKeyChecker.java b/base/android/java/src/org/chromium/base/shared_preferences/PreferenceKeyChecker.java
new file mode 100644
index 0000000..a372d66
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/shared_preferences/PreferenceKeyChecker.java
@@ -0,0 +1,19 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.shared_preferences;
+
+/**
+ * A SharedPreferences key checker that may check if the key is in use.
+ *
+ * In official builds, {@link NoOpPreferenceKeyChecker} is used, which is a no-op.
+ * In debug builds, {@link StrictPreferenceKeyChecker} is used, which checks if a key is registered.
+ */
+interface PreferenceKeyChecker {
+    // Asserts that the SharedPreferences |key| is registered as "in use".
+    void checkIsKeyInUse(String key);
+
+    // Asserts that the SharedPreferences KeyPrefix |prefix| is registered as "in use".
+    void checkIsPrefixInUse(KeyPrefix prefix);
+}
diff --git a/base/android/java/src/org/chromium/base/shared_preferences/PreferenceKeyRegistry.java b/base/android/java/src/org/chromium/base/shared_preferences/PreferenceKeyRegistry.java
new file mode 100644
index 0000000..410ea2a
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/shared_preferences/PreferenceKeyRegistry.java
@@ -0,0 +1,35 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.shared_preferences;
+
+import androidx.annotation.NonNull;
+
+import org.chromium.build.annotations.CheckDiscard;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+
+@CheckDiscard("Preference key checking should only happen on build with asserts")
+public class PreferenceKeyRegistry {
+    private final String mModule;
+    public final HashSet<String> mKeysInUse;
+    public final HashSet<String> mLegacyFormatKeys;
+    public final List<KeyPrefix> mLegacyPrefixes;
+
+    public PreferenceKeyRegistry(String module, List<String> keysInUse, List<String> legacyKeys,
+            List<KeyPrefix> legacyPrefixes) {
+        mModule = module;
+        mKeysInUse = new HashSet<>(keysInUse);
+        mLegacyFormatKeys = new HashSet<>(legacyKeys);
+        mLegacyPrefixes = legacyPrefixes;
+    }
+
+    @NonNull
+    public String toDebugString() {
+        return String.format(Locale.getDefault(), "%s (%d in use, %d legacy, %d legacy prefixes)",
+                mModule, mKeysInUse.size(), mLegacyFormatKeys.size(), mLegacyPrefixes.size());
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/shared_preferences/SharedPreferencesManager.java b/base/android/java/src/org/chromium/base/shared_preferences/SharedPreferencesManager.java
new file mode 100644
index 0000000..d5bd240
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/shared_preferences/SharedPreferencesManager.java
@@ -0,0 +1,541 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.shared_preferences;
+
+import android.content.SharedPreferences;
+import android.content.SharedPreferences.Editor;
+
+import androidx.annotation.GuardedBy;
+import androidx.annotation.Nullable;
+import androidx.annotation.VisibleForTesting;
+
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+
+import org.chromium.base.ContextUtils;
+import org.chromium.base.ResettersForTesting;
+import org.chromium.build.BuildConfig;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Layer over android {@link SharedPreferences}.
+ */
+@JNINamespace("base::android")
+@SuppressWarnings("UseSharedPreferencesManagerFromChromeCheck")
+public class SharedPreferencesManager {
+    @GuardedBy("sInstances")
+    public static Map<PreferenceKeyRegistry, SharedPreferencesManager> sInstances = new HashMap<>();
+
+    private PreferenceKeyChecker mKeyChecker;
+
+    protected SharedPreferencesManager(PreferenceKeyRegistry registry) {
+        mKeyChecker = BuildConfig.ENABLE_ASSERTS ? new StrictPreferenceKeyChecker(registry)
+                                                 : new NoOpPreferenceKeyChecker();
+    }
+
+    @VisibleForTesting
+    SharedPreferencesManager(PreferenceKeyChecker keyChecker) {
+        mKeyChecker = keyChecker;
+    }
+
+    /**
+     * @param registry registry of supported and deprecated preference keys.
+     *                 Should be null when ENABLE_ASSERTS = false.
+     * @return a {@link SharedPreferencesManager} that operates on SharedPreferences keys registered
+     *         in the passed |registry|
+     */
+    public static SharedPreferencesManager getInstanceForRegistry(
+            @Nullable PreferenceKeyRegistry registry) {
+        SharedPreferencesManager manager;
+        synchronized (sInstances) {
+            manager = sInstances.get(registry);
+            if (manager == null) {
+                manager = new SharedPreferencesManager(registry);
+                sInstances.put(registry, manager);
+            }
+        }
+        return manager;
+    }
+
+    public void disableKeyCheckerForTesting() {
+        PreferenceKeyChecker swappedOut = mKeyChecker;
+        mKeyChecker = new NoOpPreferenceKeyChecker();
+        ResettersForTesting.register(() -> mKeyChecker = swappedOut);
+    }
+
+    /**
+     * Reads set of String values from preferences.
+     *
+     * If no value was set for the |key|, returns an unmodifiable empty set.
+     *
+     * @return unmodifiable Set with the values
+     */
+    public Set<String> readStringSet(String key) {
+        return readStringSet(key, Collections.emptySet());
+    }
+
+    /**
+     * Reads set of String values from preferences.
+     *
+     * If no value was set for the |key|, returns an unmodifiable view of |defaultValue|.
+     *
+     * @return unmodifiable Set with the values
+     */
+    @Nullable
+    public Set<String> readStringSet(String key, @Nullable Set<String> defaultValue) {
+        mKeyChecker.checkIsKeyInUse(key);
+        Set<String> values = ContextUtils.getAppSharedPreferences().getStringSet(key, defaultValue);
+        return (values != null) ? Collections.unmodifiableSet(values) : null;
+    }
+
+    /**
+     * Adds a value to string set in shared preferences.
+     */
+    public void addToStringSet(String key, String value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        // Construct a new set so it can be modified safely. See crbug.com/568369.
+        Set<String> values = new HashSet<>(
+                ContextUtils.getAppSharedPreferences().getStringSet(key, Collections.emptySet()));
+        values.add(value);
+        writeStringSetUnchecked(key, values);
+    }
+
+    /**
+     * Removes value from string set in shared preferences.
+     */
+    public void removeFromStringSet(String key, String value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        // Construct a new set so it can be modified safely. See crbug.com/568369.
+        Set<String> values = new HashSet<>(
+                ContextUtils.getAppSharedPreferences().getStringSet(key, Collections.emptySet()));
+        if (values.remove(value)) {
+            writeStringSetUnchecked(key, values);
+        }
+    }
+
+    /**
+     * Writes string set to shared preferences.
+     */
+    public void writeStringSet(String key, Set<String> values) {
+        mKeyChecker.checkIsKeyInUse(key);
+        writeStringSetUnchecked(key, values);
+    }
+
+    /**
+     * Writes string set to shared preferences.
+     */
+    private void writeStringSetUnchecked(String key, Set<String> values) {
+        Editor editor = ContextUtils.getAppSharedPreferences().edit().putStringSet(key, values);
+        editor.apply();
+    }
+
+    /**
+     * Writes the given string set to the named shared preference and immediately commit to disk.
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     * @return Whether the operation succeeded.
+     */
+    public boolean writeStringSetSync(String key, Set<String> value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        Editor editor = ContextUtils.getAppSharedPreferences().edit().putStringSet(key, value);
+        return editor.commit();
+    }
+
+    /**
+     * Writes the given int value to the named shared preference.
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     */
+    public void writeInt(String key, int value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        writeIntUnchecked(key, value);
+    }
+
+    private void writeIntUnchecked(String key, int value) {
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.putInt(key, value);
+        ed.apply();
+    }
+
+    /**
+     * Writes the given int value to the named shared preference and immediately commit to disk.
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     * @return Whether the operation succeeded.
+     */
+    public boolean writeIntSync(String key, int value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.putInt(key, value);
+        return ed.commit();
+    }
+
+    /**
+     * Reads the given int value from the named shared preference, defaulting to 0 if not found.
+     * @param key The name of the preference to return.
+     * @return The value of the preference.
+     */
+    public int readInt(String key) {
+        return readInt(key, 0);
+    }
+
+    /**
+     * Reads the given int value from the named shared preference.
+     * @param key The name of the preference to return.
+     * @param defaultValue The default value to return if the preference is not set.
+     * @return The value of the preference.
+     */
+    @CalledByNative
+    public int readInt(String key, int defaultValue) {
+        mKeyChecker.checkIsKeyInUse(key);
+        return ContextUtils.getAppSharedPreferences().getInt(key, defaultValue);
+    }
+
+    /**
+     * Reads all int values associated with keys with the given prefix.
+     *
+     * @param prefix The key prefix for which all values should be returned.
+     * @return Map from the keys (in full, not just stem) to Integer values.
+     */
+    public Map<String, Integer> readIntsWithPrefix(KeyPrefix prefix) {
+        return readAllWithPrefix(prefix);
+    }
+
+    /**
+     * Increments the integer value specified by the given key.  If no initial value is present then
+     * an initial value of 0 is assumed and incremented, so a new value of 1 is set.
+     * @param key The key specifying which integer value to increment.
+     * @return The newly incremented value.
+     */
+    public int incrementInt(String key) {
+        int value = readInt(key, 0);
+        writeIntUnchecked(key, ++value);
+        return value;
+    }
+
+    /**
+     * Writes the given long to the named shared preference.
+     *
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     */
+    public void writeLong(String key, long value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.putLong(key, value);
+        ed.apply();
+    }
+
+    /**
+     * Writes the given long value to the named shared preference and immediately commit to disk.
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     * @return Whether the operation succeeded.
+     */
+    public boolean writeLongSync(String key, long value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.putLong(key, value);
+        return ed.commit();
+    }
+
+    /**
+     * Reads the given long value from the named shared preference.
+     *
+     * @param key The name of the preference to return.
+     * @return The value of the preference if stored; defaultValue otherwise.
+     */
+    public long readLong(String key) {
+        return readLong(key, 0);
+    }
+
+    /**
+     * Reads the given long value from the named shared preference.
+     *
+     * @param key The name of the preference to return.
+     * @param defaultValue The default value to return if there's no value stored.
+     * @return The value of the preference if stored; defaultValue otherwise.
+     */
+    public long readLong(String key, long defaultValue) {
+        mKeyChecker.checkIsKeyInUse(key);
+        return ContextUtils.getAppSharedPreferences().getLong(key, defaultValue);
+    }
+
+    /**
+     * Reads all long values associated with keys with the given prefix.
+     *
+     * @param prefix The key prefix for which all values should be returned.
+     * @return Map from the keys (in full, not just stem) to Long values.
+     */
+    public Map<String, Long> readLongsWithPrefix(KeyPrefix prefix) {
+        return readAllWithPrefix(prefix);
+    }
+
+    /**
+     * Writes the given float to the named shared preference.
+     *
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     */
+    public void writeFloat(String key, float value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.putFloat(key, value);
+        ed.apply();
+    }
+
+    /**
+     * Writes the given float value to the named shared preference and immediately commit to disk.
+     *
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     * @return Whether the operation succeeded.
+     */
+    public boolean writeFloatSync(String key, float value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.putFloat(key, value);
+        return ed.commit();
+    }
+
+    /**
+     * Reads the given float value from the named shared preference.
+     *
+     * @param key The name of the preference to return.
+     * @param defaultValue The default value to return if there's no value stored.
+     * @return The value of the preference if stored; defaultValue otherwise.
+     */
+    public float readFloat(String key, float defaultValue) {
+        mKeyChecker.checkIsKeyInUse(key);
+        return ContextUtils.getAppSharedPreferences().getFloat(key, defaultValue);
+    }
+
+    /**
+     * Reads all float values associated with keys with the given prefix.
+     *
+     * @param prefix The key prefix for which all values should be returned.
+     * @return Map from the keys (in full, not just stem) to Float values.
+     */
+    public Map<String, Float> readFloatsWithPrefix(KeyPrefix prefix) {
+        return readAllWithPrefix(prefix);
+    }
+
+    /**
+     * Writes the given double value to the named shared preference.
+     *
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     */
+    public void writeDouble(String key, double value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        long ieee754LongValue = Double.doubleToRawLongBits(value);
+        ed.putLong(key, ieee754LongValue);
+        ed.apply();
+    }
+
+    /**
+     * Reads the given double value from the named shared preference.
+     *
+     * @param key The name of the preference to return.
+     * @param defaultValue The default value to return if there's no value stored.
+     * @return The value of the preference if stored; defaultValue otherwise.
+     */
+    public Double readDouble(String key, double defaultValue) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences prefs = ContextUtils.getAppSharedPreferences();
+        if (!prefs.contains(key)) {
+            return defaultValue;
+        }
+        long ieee754LongValue = prefs.getLong(key, 0L);
+        return Double.longBitsToDouble(ieee754LongValue);
+    }
+
+    /**
+     * Reads all double values associated with keys with the given prefix.
+     *
+     * @param prefix The key prefix for which all values should be returned.
+     * @return Map from the keys (in full, not just stem) to Double values.
+     */
+    public Map<String, Double> readDoublesWithPrefix(KeyPrefix prefix) {
+        Map<String, Long> longMap = readLongsWithPrefix(prefix);
+        Map<String, Double> doubleMap = new HashMap<>();
+
+        for (Map.Entry<String, Long> longEntry : longMap.entrySet()) {
+            long ieee754LongValue = longEntry.getValue();
+            double doubleValue = Double.longBitsToDouble(ieee754LongValue);
+            doubleMap.put(longEntry.getKey(), doubleValue);
+        }
+        return doubleMap;
+    }
+
+    /**
+     * Writes the given boolean to the named shared preference.
+     *
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     */
+    public void writeBoolean(String key, boolean value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.putBoolean(key, value);
+        ed.apply();
+    }
+
+    /**
+     * Writes the given boolean value to the named shared preference and immediately commit to disk.
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     * @return Whether the operation succeeded.
+     */
+    public boolean writeBooleanSync(String key, boolean value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.putBoolean(key, value);
+        return ed.commit();
+    }
+
+    /**
+     * Reads the given boolean value from the named shared preference.
+     *
+     * @param key The name of the preference to return.
+     * @param defaultValue The default value to return if there's no value stored.
+     * @return The value of the preference if stored; defaultValue otherwise.
+     */
+    @CalledByNative
+    public boolean readBoolean(String key, boolean defaultValue) {
+        mKeyChecker.checkIsKeyInUse(key);
+        return ContextUtils.getAppSharedPreferences().getBoolean(key, defaultValue);
+    }
+
+    /**
+     * Reads all boolean values associated with keys with the given prefix.
+     *
+     * @param prefix The key prefix for which all values should be returned.
+     * @return Map from the keys (in full, not just stem) to Boolean values.
+     */
+    public Map<String, Boolean> readBooleansWithPrefix(KeyPrefix prefix) {
+        return readAllWithPrefix(prefix);
+    }
+
+    /**
+     * Writes the given string to the named shared preference.
+     *
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     */
+    @CalledByNative
+    public void writeString(String key, String value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.putString(key, value);
+        ed.apply();
+    }
+
+    /**
+     * Writes the given string value to the named shared preference and immediately commit to disk.
+     * @param key The name of the preference to modify.
+     * @param value The new value for the preference.
+     * @return Whether the operation succeeded.
+     */
+    public boolean writeStringSync(String key, String value) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.putString(key, value);
+        return ed.commit();
+    }
+
+    /**
+     * Reads the given String value from the named shared preference.
+     *
+     * @param key The name of the preference to return.
+     * @param defaultValue The default value to return if there's no value stored.
+     * @return The value of the preference if stored; defaultValue otherwise.
+     */
+    @CalledByNative
+    @Nullable
+    public String readString(String key, @Nullable String defaultValue) {
+        mKeyChecker.checkIsKeyInUse(key);
+        return ContextUtils.getAppSharedPreferences().getString(key, defaultValue);
+    }
+
+    /**
+     * Reads all String values associated with keys with the given prefix.
+     *
+     * @param prefix The key prefix for which all values should be returned.
+     * @return Map from the keys (in full, not just stem) to String values.
+     */
+    public Map<String, String> readStringsWithPrefix(KeyPrefix prefix) {
+        return readAllWithPrefix(prefix);
+    }
+
+    /**
+     * Removes the shared preference entry.
+     *
+     * @param key The key of the preference to remove.
+     */
+    @CalledByNative
+    public void removeKey(String key) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.remove(key);
+        ed.apply();
+    }
+
+    public boolean removeKeySync(String key) {
+        mKeyChecker.checkIsKeyInUse(key);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        ed.remove(key);
+        return ed.commit();
+    }
+
+    /**
+     * Removes all shared preference entries with the given prefix.
+     *
+     * @param prefix The KeyPrefix for which all entries should be removed.
+     */
+    public void removeKeysWithPrefix(KeyPrefix prefix) {
+        mKeyChecker.checkIsPrefixInUse(prefix);
+        SharedPreferences.Editor ed = ContextUtils.getAppSharedPreferences().edit();
+        Map<String, ?> allPrefs = ContextUtils.getAppSharedPreferences().getAll();
+        for (Map.Entry<String, ?> pref : allPrefs.entrySet()) {
+            String key = pref.getKey();
+            if (prefix.hasGenerated(key)) {
+                ed.remove(key);
+            }
+        }
+        ed.apply();
+    }
+
+    /**
+     * Checks if any value was written associated to a key in shared preferences.
+     *
+     * @param key The key of the preference to check.
+     * @return Whether any value was written for that key.
+     */
+    @CalledByNative
+    public boolean contains(String key) {
+        mKeyChecker.checkIsKeyInUse(key);
+        return ContextUtils.getAppSharedPreferences().contains(key);
+    }
+
+    private <T> Map<String, T> readAllWithPrefix(KeyPrefix prefix) {
+        mKeyChecker.checkIsPrefixInUse(prefix);
+        Map<String, ?> allPrefs = ContextUtils.getAppSharedPreferences().getAll();
+        Map<String, T> allPrefsWithPrefix = new HashMap<>();
+        for (Map.Entry<String, ?> pref : allPrefs.entrySet()) {
+            String key = pref.getKey();
+            if (prefix.hasGenerated(key)) {
+                allPrefsWithPrefix.put(key, (T) pref.getValue());
+            }
+        }
+        return allPrefsWithPrefix;
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/shared_preferences/StrictPreferenceKeyChecker.java b/base/android/java/src/org/chromium/base/shared_preferences/StrictPreferenceKeyChecker.java
new file mode 100644
index 0000000..3b84739
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/shared_preferences/StrictPreferenceKeyChecker.java
@@ -0,0 +1,102 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.shared_preferences;
+
+import android.text.TextUtils;
+
+import org.chromium.build.annotations.CheckDiscard;
+
+import java.util.Arrays;
+import java.util.regex.Pattern;
+
+/**
+ * Class that checks if given Strings are valid SharedPreferences keys to use.
+ *
+ * Checks that:
+ * 1. Keys are registered as "in use".
+ * 2. The key format is valid, either:
+ *   - "Chrome.[Feature].[Key]"
+ *   - "Chrome.[Feature].[KeyPrefix].[Suffix]"
+ *   - Legacy key prior to this restriction
+ */
+@CheckDiscard("Validation is performed in tests and in debug builds.")
+class StrictPreferenceKeyChecker implements PreferenceKeyChecker {
+    // The dynamic part cannot be empty, but otherwise it is anything that does not contain
+    // stars.
+    private static final Pattern DYNAMIC_PART_PATTERN = Pattern.compile("[^\\*]+");
+
+    private final PreferenceKeyRegistry mRegistry;
+
+    StrictPreferenceKeyChecker(PreferenceKeyRegistry registry) {
+        mRegistry = registry;
+    }
+
+    /**
+     * Check that the |key| passed is in use.
+     * @throws RuntimeException if the key is not in use.
+     */
+    @Override
+    public void checkIsKeyInUse(String key) {
+        if (!isKeyInUse(key)) {
+            throw new RuntimeException("SharedPreferences key \"" + key
+                    + "\" is not registered in PreferenceKeyRegistry.mKeysInUse");
+        }
+        KnownPreferenceKeyRegistries.onRegistryUsed(mRegistry);
+    }
+
+    /**
+     * @return Whether |key| is in use.
+     */
+    private boolean isKeyInUse(String key) {
+        // For non-dynamic legacy keys, a simple map check is enough.
+        if (mRegistry.mLegacyFormatKeys.contains(key)) {
+            return true;
+        }
+
+        // For dynamic legacy keys, each legacy prefix has to be checked.
+        for (KeyPrefix prefix : mRegistry.mLegacyPrefixes) {
+            if (prefix.hasGenerated(key)) {
+                return true;
+            }
+        }
+
+        // If not a format-legacy key, assume it follows the format and find out if it is
+        // a prefixed key.
+        String[] parts = key.split("\\.", 4);
+        if (parts.length < 3) return false;
+        boolean isPrefixed = parts.length >= 4;
+
+        if (isPrefixed) {
+            // Key with prefix in format "Chrome.[Feature].[KeyPrefix].[Suffix]".
+
+            // Check if its prefix is registered in |mKeysInUse|.
+            String prefixFormat =
+                    TextUtils.join(".", Arrays.asList(parts[0], parts[1], parts[2], "*"));
+            if (!mRegistry.mKeysInUse.contains(prefixFormat)) return false;
+
+            // Check if the dynamic part is correctly formed.
+            String dynamicPart = parts[3];
+            return DYNAMIC_PART_PATTERN.matcher(dynamicPart).matches();
+        } else {
+            // Regular key in format "Chrome.[Feature].[Key]" which was not present in |mKeysInUse|.
+            // Just check if it is in [keys in use].
+            return mRegistry.mKeysInUse.contains(key);
+        }
+    }
+
+    @Override
+    public void checkIsPrefixInUse(KeyPrefix prefix) {
+        if (mRegistry.mLegacyPrefixes.contains(prefix)) {
+            return;
+        }
+
+        if (mRegistry.mKeysInUse.contains(prefix.pattern())) {
+            return;
+        }
+
+        throw new RuntimeException("SharedPreferences KeyPrefix \"" + prefix.pattern()
+                + "\" is not registered in PreferenceKeyRegistry.mKeysInUse()");
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplier.java b/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplier.java
new file mode 100644
index 0000000..ce9ba8f
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplier.java
@@ -0,0 +1,37 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.supplier;
+
+import androidx.annotation.Nullable;
+
+import org.chromium.base.Callback;
+
+/**
+ * Wraps a lazy-loaded nullable object, notifying observers a single time when the dependency
+ * becomes available. This intentionally doesn't extend {@link OneshotSupplier} to support the
+ * supplied value being null.
+ *
+ * @param <T> The type of the wrapped object.
+ */
+public interface LazyOneshotSupplier<T> {
+    /**
+     * Add a callback that's called when the object owned by this supplier is available. If the
+     * object is already available, the callback will be called at the end of the current message
+     * loop.
+     *
+     * @param callback The callback to be called.
+     */
+    void onAvailable(Callback<T> callback);
+
+    /**
+     * Returns the value currently held or <code>null</code> when none is held. Use {@link
+     * #hasValue} to tell if the value is intentionally null.
+     */
+    @Nullable
+    T get();
+
+    /** Returns whether the supplier holds a value currently. */
+    boolean hasValue();
+}
diff --git a/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplierImpl.java b/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplierImpl.java
new file mode 100644
index 0000000..a2fab0f
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplierImpl.java
@@ -0,0 +1,100 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.supplier;
+
+import androidx.annotation.Nullable;
+
+import org.chromium.base.Callback;
+import org.chromium.base.Promise;
+import org.chromium.base.ThreadUtils;
+
+/**
+ * Abstract implementation of {@link LazySupplier} to be used by classes providing it as a
+ * dependency to others. A call to {@link LazyOneshotSupplier#get()} will attempt to set the
+ * supplied object via {@link LazyOneshotSupplier#doSet()}. Additionally, {@link
+ * LazyOneshotSupplier#onAvailable(Callback<T>)} will not call {@link LazyOneshotSupplier#get()}
+ * unless it already has a value to prevent eager initialization. The supplied value can be null,
+ * {@link LazyOneshotSupplier#hasValue} should be used to differentiate between un/set states.
+ *
+ * <p>If eager initialization in response to {@link LazyOneshotSupplier#onAvailable(Callback<T>)} is
+ * required then a call to {@link LazyOneshotSupplier#get()} can be made just before attaching the
+ * callback.
+ *
+ * <p>Instances of this class must only be accessed from the thread they were created on.
+ *
+ * <p>To use:
+ *
+ * <ol>
+ *   <li>Create a new {@code LazyOneshotSupplier<T>} to pass as a dependency.
+ *   <li>Override {@link #doSet()} to invoke {@link #set(T)}. This will be invoked when {@link
+ *       #get()} is invoked if {@link #hasValue()} returns false. Note that invoking {@link
+ *       #doSet()} does not have to invoke {@link #set(T)} if there is reason not to such as
+ *       awaiting an async dependency. However, if this is the case clients of the supplier need to
+ *       be careful to properly understand the initialization lifecycle.
+ * </ol>
+ *
+ * @param <T> The type of the wrapped object.
+ */
+public abstract class LazyOneshotSupplierImpl<T> implements LazyOneshotSupplier<T> {
+    private final Promise<T> mPromise = new Promise<>();
+    private final ThreadUtils.ThreadChecker mThreadChecker = new ThreadUtils.ThreadChecker();
+
+    private boolean mDoSetCalled;
+
+    /**
+     * Lazily invokes the callback the first time {@link #set(T)} is invoked or immediately if
+     * already available.
+     */
+    @Override
+    public void onAvailable(Callback<T> callback) {
+        mThreadChecker.assertOnValidThread();
+        mPromise.then(callback);
+    }
+
+    /**
+     * Return the value of the supplier. Calling this the first time will initialize the value in
+     * the supplier via {@link #doSet()}.
+     *
+     * @return the value that was provided in {@link #set(T)} or null.
+     */
+    @Override
+    public @Nullable T get() {
+        mThreadChecker.assertOnValidThread();
+        if (!hasValue()) {
+            tryDoSet();
+        }
+        return hasValue() ? mPromise.getResult() : null;
+    }
+
+    /** Returns whether a value is set in the supplier. */
+    @Override
+    public boolean hasValue() {
+        return mPromise.isFulfilled();
+    }
+
+    /**
+     * Sets the value upon first {@link #get()}. Implementers should override this to invoke {@link
+     * #set(T)}.
+     */
+    public abstract void doSet();
+
+    /**
+     * Set the object supplied by this supplier. This will notify registered callbacks that the
+     * dependency is available. If set() has already been called, this method will assert.
+     *
+     * @param object The object to supply.
+     */
+    public void set(@Nullable T object) {
+        mThreadChecker.assertOnValidThread();
+        assert !mPromise.isFulfilled();
+        mPromise.fulfill(object);
+    }
+
+    private void tryDoSet() {
+        if (mDoSetCalled) return;
+        doSet();
+        mDoSetCalled = true;
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/supplier/ObservableSupplierImpl.java b/base/android/java/src/org/chromium/base/supplier/ObservableSupplierImpl.java
index 7a82072..10a629f 100644
--- a/base/android/java/src/org/chromium/base/supplier/ObservableSupplierImpl.java
+++ b/base/android/java/src/org/chromium/base/supplier/ObservableSupplierImpl.java
@@ -12,6 +12,8 @@
 import org.chromium.base.ObserverList;
 import org.chromium.base.ResettersForTesting;
 
+import java.util.Objects;
+
 /**
  * Concrete implementation of {@link ObservableSupplier} to be used by classes owning the
  * ObservableSupplier and providing it as a dependency to others.
@@ -64,12 +66,16 @@
 
     /**
      * Set the object supplied by this supplier. This will notify registered callbacks that the
-     * dependency is available.
+     * dependency is available if the object changes. Object equality is used when deciding if the
+     * object has changed, not reference equality.
+     *
      * @param object The object to supply.
      */
     public void set(E object) {
         checkThread();
-        if (object == mObject) return;
+        if (Objects.equals(object, mObject)) {
+            return;
+        }
 
         mObject = object;
 
@@ -84,6 +90,11 @@
         return mObject;
     }
 
+    /** Returns if there are any observers currently. */
+    public boolean hasObservers() {
+        return !mObservers.isEmpty();
+    }
+
     private void checkThread() {
         assert sIgnoreThreadChecksForTesting
                 || mThread
diff --git a/base/android/java/src/org/chromium/base/supplier/OneshotSupplierImpl.java b/base/android/java/src/org/chromium/base/supplier/OneshotSupplierImpl.java
index 5695656..234f29f 100644
--- a/base/android/java/src/org/chromium/base/supplier/OneshotSupplierImpl.java
+++ b/base/android/java/src/org/chromium/base/supplier/OneshotSupplierImpl.java
@@ -44,8 +44,9 @@
     }
 
     /**
-     * Set the object supplied by this supplier. This will notify registered callbacks that the
-     * dependency is available. If set() has already been called, this method will assert.
+     * Set the object supplied by this supplier. This will post notifications to registered
+     * callbacks that the dependency is available. If set() has already been called, this method
+     * will assert.
      *
      * @param object The object to supply.
      */
diff --git a/base/android/java/src/org/chromium/base/supplier/SyncOneshotSupplier.java b/base/android/java/src/org/chromium/base/supplier/SyncOneshotSupplier.java
new file mode 100644
index 0000000..6ad7f81
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/supplier/SyncOneshotSupplier.java
@@ -0,0 +1,40 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.supplier;
+
+import androidx.annotation.Nullable;
+
+import org.chromium.base.Callback;
+
+/**
+ * SyncOneshotSupplier wraps an asynchronously provided, non-null object {@code T}, synchronously
+ * notifying observers a single time when the dependency becomes available. Note that null is the
+ * sentinel value; a fulfilled supplier will never have a null value.
+ *
+ * <p>See {@link OneshotSupplier} for more details on when this might be useful. The key distinction
+ * between the two interfaces is that the callbacks registered to {@link #onAvailable(Callback)} are
+ * called synchronously when the object becomes is available. This is critical in some applications
+ * where the value might be needed immediately and the {@link Callback} cannot be posted. However,
+ * generally prefer {@link OneshotSupplier} if either will work to avoid main thread congestion.
+ *
+ * <p>This class must only be accessed from a single thread. Unless a particular thread designation
+ * is given by the owner of the OneshotSupplier, clients should assume it must only be accessed on
+ * the UI thread.
+ *
+ * <p>If you want to create a supplier, see an implementation in {@link SyncOneshotSupplierImpl}.
+ *
+ * @param <T> The type of the wrapped object.
+ */
+public interface SyncOneshotSupplier<T> extends Supplier<T> {
+    /**
+     * Add a callback that's synchronously called when the object owned by this supplier is
+     * available. If the object is already available, the callback will be called immediately.
+     *
+     * @param callback The callback to be called.
+     * @return The value for this supplier if already available. Null otherwise.
+     */
+    @Nullable
+    T onAvailable(Callback<T> callback);
+}
diff --git a/base/android/java/src/org/chromium/base/supplier/SyncOneshotSupplierImpl.java b/base/android/java/src/org/chromium/base/supplier/SyncOneshotSupplierImpl.java
new file mode 100644
index 0000000..287ad42
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/supplier/SyncOneshotSupplierImpl.java
@@ -0,0 +1,80 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.supplier;
+
+import androidx.annotation.NonNull;
+import androidx.annotation.Nullable;
+
+import org.chromium.base.Callback;
+import org.chromium.base.ThreadUtils;
+
+import java.util.ArrayList;
+
+/**
+ * A concrete implementation of {@link SyncOneshotSupplier} to used when callbacks need to be called
+ * synchronously when a value is set. This differs from {@link OneshotSupplierImpl} in that the
+ * pending {@link Callback}s are not posted when {@link #set(T)} is invoked.
+ *
+ * <p>Instances of this class must only be accessed from the thread they were created on.
+ *
+ * <p>To use:
+ *
+ * <ol>
+ *   <li>Create a new {@code SyncOneshotSupplierImpl<T>} to pass as a dependency.
+ *   <li>Call {@link #set(Object)} when the object becomes available. {@link #set(Object)} may only
+ *       be called once.
+ * </ol>
+ *
+ * @param <T> The type of the wrapped object.
+ */
+public class SyncOneshotSupplierImpl<T> implements SyncOneshotSupplier<T> {
+    private final ThreadUtils.ThreadChecker mThreadChecker = new ThreadUtils.ThreadChecker();
+
+    /** Lazily created list of pending callbacks to invoke when an object is set. */
+    private @Nullable ArrayList<Callback<T>> mPendingCallbacks;
+
+    private @Nullable T mObject;
+
+    @Override
+    public @Nullable T onAvailable(Callback<T> callback) {
+        mThreadChecker.assertOnValidThread();
+        T object = get();
+        if (object != null) {
+            callback.onResult(object);
+        } else {
+            if (mPendingCallbacks == null) {
+                mPendingCallbacks = new ArrayList<Callback<T>>();
+            }
+            mPendingCallbacks.add(callback);
+        }
+        return object;
+    }
+
+    @Override
+    public @Nullable T get() {
+        mThreadChecker.assertOnValidThread();
+        return mObject;
+    }
+
+    /**
+     * Set the object supplied by this supplier. This will synchronously notify registered callbacks
+     * that the dependency is available. If {@link #set(Object)} has already been called, this
+     * method will assert.
+     *
+     * @param object The object to supply.
+     */
+    public void set(@NonNull T object) {
+        mThreadChecker.assertOnValidThread();
+        assert mObject == null;
+        assert object != null;
+        mObject = object;
+        if (mPendingCallbacks == null) return;
+
+        for (Callback<T> callback : mPendingCallbacks) {
+            callback.onResult(object);
+        }
+        mPendingCallbacks = null;
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/supplier/TransitiveObservableSupplier.java b/base/android/java/src/org/chromium/base/supplier/TransitiveObservableSupplier.java
new file mode 100644
index 0000000..80b4107
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/supplier/TransitiveObservableSupplier.java
@@ -0,0 +1,120 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.supplier;
+
+import androidx.annotation.NonNull;
+import androidx.annotation.Nullable;
+
+import org.chromium.base.Callback;
+
+import java.util.function.Function;
+
+/**
+ * Useful when two observable suppliers are chained together. The client class may only want to know
+ * the value of the second, or "target", supplier. But to track this the client needs to observe the
+ * first, or "parent", supplier, and then [un]observe the current target. Instead this class is a
+ * single observable supplier that holds the current target value, greatly simplifying the client's
+ * job.
+ *
+ * <p>Attempts to only maintain observers on the relevant observers when there's an observer on this
+ * class. Clients should still remove themselves as observers from this class when done.
+ *
+ * @param <P> The parent object that's holding a reference to the target.
+ * @param <T> The target type that the client wants to observe.
+ */
+public class TransitiveObservableSupplier<P, T> implements ObservableSupplier<T> {
+    // Used to hold observers and current state. However the current value is only valid when there
+    // are observers, otherwise is may be stale.
+    private final @NonNull ObservableSupplierImpl<T> mDelegateSupplier =
+            new ObservableSupplierImpl<>();
+
+    private final @NonNull Callback<P> mOnParentSupplierChangeCallback =
+            this::onParentSupplierChange;
+    private final @NonNull Callback<T> mOnTargetSupplierChangeCallback =
+            this::onTargetSupplierChange;
+    private final @NonNull ObservableSupplier<P> mParentSupplier;
+    private final @NonNull Function<P, ObservableSupplier<T>> mUnwrapFunction;
+
+    // When this is set, then mOnTargetSupplierChangeCallback is an observer of the object
+    // referenced by mCurrentTargetSupplier. When this value is changed, the observer must be
+    // removed.
+    private @Nullable ObservableSupplier<T> mCurrentTargetSupplier;
+
+    public TransitiveObservableSupplier(
+            ObservableSupplier<P> parentSupplier,
+            Function<P, ObservableSupplier<T>> unwrapFunction) {
+        mParentSupplier = parentSupplier;
+        mUnwrapFunction = unwrapFunction;
+    }
+
+    @Override
+    public T addObserver(Callback<T> obs) {
+        if (!mDelegateSupplier.hasObservers()) {
+            onParentSupplierChange(mParentSupplier.addObserver(mOnParentSupplierChangeCallback));
+        }
+        return mDelegateSupplier.addObserver(obs);
+    }
+
+    @Override
+    public void removeObserver(Callback<T> obs) {
+        mDelegateSupplier.removeObserver(obs);
+        if (!mDelegateSupplier.hasObservers()) {
+            mParentSupplier.removeObserver(mOnParentSupplierChangeCallback);
+            if (mCurrentTargetSupplier != null) {
+                mCurrentTargetSupplier.removeObserver(mOnTargetSupplierChangeCallback);
+                mCurrentTargetSupplier = null;
+            }
+        }
+    }
+
+    @Override
+    public @Nullable T get() {
+        if (mDelegateSupplier.hasObservers()) {
+            return mDelegateSupplier.get();
+        }
+
+        // If we have no observers, the value stored by mDelegateSupplier might not be current.
+        P parentValue = mParentSupplier.get();
+        if (parentValue != null) {
+            ObservableSupplier<T> targetSupplier = mUnwrapFunction.apply(parentValue);
+            if (targetSupplier != null) {
+                return targetSupplier.get();
+            }
+        }
+        return null;
+    }
+
+    /**
+     * Conceptually this just removes our observer from the old target supplier, and our observer to
+     * to the new target supplier. In practice this is full of null checks. We also have to make
+     * sure we keep our delegate supplier's value up to date, which is also what drives client
+     * observations.
+     */
+    private void onParentSupplierChange(@Nullable P parentValue) {
+        if (mCurrentTargetSupplier != null) {
+            mCurrentTargetSupplier.removeObserver(mOnTargetSupplierChangeCallback);
+        }
+
+        // Keep track of the current target supplier, because if this ever changes, we'll need to
+        // remove our observer from it.
+        mCurrentTargetSupplier = parentValue == null ? null : mUnwrapFunction.apply(parentValue);
+
+        if (mCurrentTargetSupplier == null) {
+            onTargetSupplierChange(null);
+        } else {
+            // While addObserver will call us if a value is already set, we do not want to depend on
+            // that for two reasons. If there is no value set, we need to null out our supplier now.
+            // And if there is a value set, we're going to get invoked asynchronously, which means
+            // our delegate supplier could be in an intermediately incorrect state. By just setting
+            // our delegate eagerly we avoid both problems.
+            onTargetSupplierChange(
+                    mCurrentTargetSupplier.addObserver(mOnTargetSupplierChangeCallback));
+        }
+    }
+
+    private void onTargetSupplierChange(@Nullable T targetValue) {
+        mDelegateSupplier.set(targetValue);
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/task/AsyncTask.java b/base/android/java/src/org/chromium/base/task/AsyncTask.java
index 5f92d6e..e45d508 100644
--- a/base/android/java/src/org/chromium/base/task/AsyncTask.java
+++ b/base/android/java/src/org/chromium/base/task/AsyncTask.java
@@ -472,6 +472,17 @@
             try (TraceEvent e = TraceEvent.scoped(
                          "AsyncTask.run: " + mFuture.getBlamedClass().getName())) {
                 super.run();
+            } finally {
+                // Clear the interrupt on this background thread, if there is one, as it likely
+                // came from cancelling the FutureTask. It is possible this was already cleared
+                // in run() if something was listening for an interrupt; however, if it wasn't
+                // then the interrupt may still be around. By clearing it here the thread is in
+                // a clean state for the next task. See: crbug/1473731.
+
+                // This is safe and prevents future leaks because the state of the FutureTask
+                // should now be >= COMPLETING. Any future calls to cancel() will not trigger
+                // an interrupt.
+                Thread.interrupted();
             }
         }
 
diff --git a/base/android/java/src/org/chromium/base/task/PostTask.java b/base/android/java/src/org/chromium/base/task/PostTask.java
index dff73a0..f4a3774 100644
--- a/base/android/java/src/org/chromium/base/task/PostTask.java
+++ b/base/android/java/src/org/chromium/base/task/PostTask.java
@@ -6,11 +6,12 @@
 
 import android.os.Handler;
 
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+
 import org.chromium.base.Log;
 import org.chromium.base.ResettersForTesting;
 import org.chromium.base.ThreadUtils;
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
 
 import java.util.ArrayList;
 import java.util.List;
diff --git a/base/android/java/src/org/chromium/base/task/SingleThreadTaskRunnerImpl.java b/base/android/java/src/org/chromium/base/task/SingleThreadTaskRunnerImpl.java
index b024366..e999d83 100644
--- a/base/android/java/src/org/chromium/base/task/SingleThreadTaskRunnerImpl.java
+++ b/base/android/java/src/org/chromium/base/task/SingleThreadTaskRunnerImpl.java
@@ -8,7 +8,7 @@
 
 import androidx.annotation.Nullable;
 
-import org.chromium.base.annotations.JNINamespace;
+import org.jni_zero.JNINamespace;
 
 /**
  * Implementation of the abstract class {@link SingleThreadTaskRunner}. Before native initialization
diff --git a/base/android/java/src/org/chromium/base/task/TaskRunnerImpl.java b/base/android/java/src/org/chromium/base/task/TaskRunnerImpl.java
index 0ab9c39..b525e09 100644
--- a/base/android/java/src/org/chromium/base/task/TaskRunnerImpl.java
+++ b/base/android/java/src/org/chromium/base/task/TaskRunnerImpl.java
@@ -9,9 +9,10 @@
 
 import androidx.annotation.Nullable;
 
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
+
 import org.chromium.base.TraceEvent;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
 
 import java.lang.ref.ReferenceQueue;
 import java.lang.ref.WeakReference;
diff --git a/base/android/javatests/src/org/chromium/base/AdvancedMockContextTest.java b/base/android/javatests/src/org/chromium/base/AdvancedMockContextTest.java
index fadb10c..c98fffa 100644
--- a/base/android/javatests/src/org/chromium/base/AdvancedMockContextTest.java
+++ b/base/android/javatests/src/org/chromium/base/AdvancedMockContextTest.java
@@ -20,9 +20,7 @@
 import org.chromium.base.test.BaseJUnit4ClassRunner;
 import org.chromium.base.test.util.AdvancedMockContext;
 
-/**
- * Tests for {@link org.chromium.base.test.util.AdvancedMockContext}.
- */
+/** Tests for {@link org.chromium.base.test.util.AdvancedMockContext}. */
 @RunWith(BaseJUnit4ClassRunner.class)
 public class AdvancedMockContextTest {
     private static class Callback1 implements ComponentCallbacks {
@@ -66,13 +64,19 @@
 
         Configuration configuration = new Configuration();
         targetApplication.onConfigurationChanged(configuration);
-        Assert.assertEquals("onConfigurationChanged should have been called.", configuration,
+        Assert.assertEquals(
+                "onConfigurationChanged should have been called.",
+                configuration,
                 callback1.mConfiguration);
-        Assert.assertEquals("onConfigurationChanged should have been called.", configuration,
+        Assert.assertEquals(
+                "onConfigurationChanged should have been called.",
+                configuration,
                 callback2.mConfiguration);
 
         targetApplication.onTrimMemory(ComponentCallbacks2.TRIM_MEMORY_MODERATE);
-        Assert.assertEquals("onTrimMemory should have been called.",
-                ComponentCallbacks2.TRIM_MEMORY_MODERATE, callback2.mLevel);
+        Assert.assertEquals(
+                "onTrimMemory should have been called.",
+                ComponentCallbacks2.TRIM_MEMORY_MODERATE,
+                callback2.mLevel);
     }
 }
diff --git a/base/android/javatests/src/org/chromium/base/CommandLineFlagsTest.java b/base/android/javatests/src/org/chromium/base/CommandLineFlagsTest.java
index 5c3f6ae..100bad2 100644
--- a/base/android/javatests/src/org/chromium/base/CommandLineFlagsTest.java
+++ b/base/android/javatests/src/org/chromium/base/CommandLineFlagsTest.java
@@ -24,13 +24,14 @@
 
 import java.util.List;
 
-/**
- * Test class for {@link CommandLineFlags}.
- */
+/** Test class for {@link CommandLineFlags}. */
 @RunWith(CommandLineFlagsTest.ClassRunner.class)
 @Batch(Batch.UNIT_TESTS)
-@CommandLineFlags.
-Add({CommandLineFlagsTest.FLAG_1, "flagwithvalue=foo", "enable-features=feature1,feature2"})
[email protected]({
+    CommandLineFlagsTest.FLAG_1,
+    "flagwithvalue=foo",
+    "enable-features=feature1,feature2"
+})
 public class CommandLineFlagsTest {
     public static class ClassRunner extends BaseJUnit4ClassRunner {
         public ClassRunner(final Class<?> klass) throws InitializationError {
@@ -40,23 +41,28 @@
         // Verify class-level modifications are reset after class finishes.
         @Override
         protected List<ClassHook> getPostClassHooks() {
-            return addToList(ClassRunner.super.getPostClassHooks(), (targetContext, testClass) -> {
-                verifyCommandLine(false, false, false, false, false, false, false);
-                Assert.assertFalse(CommandLine.getInstance().hasSwitch("flagwithvalue"));
-                String enabledFeatures =
-                        CommandLine.getInstance().getSwitchValue("enable-features");
-                if (enabledFeatures != null) {
-                    Assert.assertFalse(enabledFeatures.contains("feature1"));
-                    Assert.assertFalse(enabledFeatures.contains("feature2"));
-                }
-            });
+            return addToList(
+                    ClassRunner.super.getPostClassHooks(),
+                    (targetContext, testClass) -> {
+                        verifyCommandLine(false, false, false, false, false, false, false);
+                        Assert.assertFalse(CommandLine.getInstance().hasSwitch("flagwithvalue"));
+                        String enabledFeatures =
+                                CommandLine.getInstance().getSwitchValue("enable-features");
+                        if (enabledFeatures != null) {
+                            Assert.assertFalse(enabledFeatures.contains("feature1"));
+                            Assert.assertFalse(enabledFeatures.contains("feature2"));
+                        }
+                    });
         }
 
         // Verify that after each test, flags are reset to class-level state.
         @Override
         protected List<TestHook> getPostTestHooks() {
-            return addToList(ClassRunner.super.getPostTestHooks(),
-                    (targetContext, testMethod) -> { verifyClassLevelStateOnly(); });
+            return addToList(
+                    ClassRunner.super.getPostTestHooks(),
+                    (targetContext, testMethod) -> {
+                        verifyClassLevelStateOnly();
+                    });
         }
     }
 
@@ -85,20 +91,24 @@
         @CommandLineFlags.Add(FLAG_5)
         private static class UnusedRule extends EmptyRule {}
 
-        @Rule
-        public InnerRule mInnerRule = new InnerRule();
+        @Rule public InnerRule mInnerRule = new InnerRule();
     }
 
-    @Rule
-    public MyRule mRule = new MyRule();
+    @Rule public MyRule mRule = new MyRule();
 
     @Before
     public void setUp() {
         LibraryLoader.getInstance().ensureInitialized();
     }
 
-    private static void verifyCommandLine(boolean flag1, boolean flag2, boolean flag3,
-            boolean flag4, boolean flag5, boolean flag6, boolean flag7) {
+    private static void verifyCommandLine(
+            boolean flag1,
+            boolean flag2,
+            boolean flag3,
+            boolean flag4,
+            boolean flag5,
+            boolean flag6,
+            boolean flag7) {
         CommandLine cmdLine = CommandLine.getInstance();
         Assert.assertEquals(flag1, cmdLine.hasSwitch(FLAG_1));
         Assert.assertEquals(flag2, cmdLine.hasSwitch(FLAG_2));
diff --git a/base/android/javatests/src/org/chromium/base/CommandLineTest.java b/base/android/javatests/src/org/chromium/base/CommandLineTest.java
index b8a0bc2..6b853bb 100644
--- a/base/android/javatests/src/org/chromium/base/CommandLineTest.java
+++ b/base/android/javatests/src/org/chromium/base/CommandLineTest.java
@@ -1,39 +1,44 @@
-// Copyright 2017 The Chromium Authors
+// Copyright 2013 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 package org.chromium.base;
 
-import androidx.test.filters.SmallTest;
+import androidx.test.filters.MediumTest;
 
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 
+import org.chromium.base.library_loader.LibraryLoader;
 import org.chromium.base.test.BaseJUnit4ClassRunner;
+import org.chromium.base.test.util.DoNotBatch;
 import org.chromium.base.test.util.Feature;
 
-import java.util.Map;
-
-/**
- * Tests for {@link CommandLine}.
- * TODO(bauerb): Convert to local JUnit test
- */
+/** Test class for command lines. */
 @RunWith(BaseJUnit4ClassRunner.class)
+@DoNotBatch(reason = "Tests java -> native transition")
 public class CommandLineTest {
     // A reference command line. Note that switch2 is [brea\d], switch3 is [and "butter"],
     // and switch4 is [a "quoted" 'food'!]
-    static final String INIT_SWITCHES[] = { "init_command", "--SWITCH", "Arg",
-        "--switch2=brea\\d", "--switch3=and \"butter\"",
+    static final String[] INIT_SWITCHES = {
+        "init_command",
+        "--switch",
+        "Arg",
+        "--switch2=brea\\d",
+        "--switch3=and \"butter\"",
         "--switch4=a \"quoted\" 'food'!",
-        "--", "--actually_an_arg" };
+        "--",
+        "--actually_an_arg"
+    };
 
     // The same command line, but in quoted string format.
-    static final char INIT_SWITCHES_BUFFER[] =
-        ("init_command --SWITCH Arg --switch2=brea\\d --switch3=\"and \\\"butt\"er\\\"   "
-        + "--switch4='a \"quoted\" \\'food\\'!' "
-        + "-- --actually_an_arg").toCharArray();
+    static final char[] INIT_SWITCHES_BUFFER =
+            ("init_command --switch Arg --switch2=brea\\d --switch3=\"and \\\"butt\"er\\\"   "
+                            + "--switch4='a \"quoted\" \\'food\\'!' "
+                            + "-- --actually_an_arg")
+                    .toCharArray();
 
     static final String CL_ADDED_SWITCH = "zappo-dappo-doggy-trainer";
     static final String CL_ADDED_SWITCH_2 = "username";
@@ -41,25 +46,30 @@
 
     @Before
     public void setUp() {
-        CommandLine.reset();
+        CommandLine.resetForTesting();
+    }
+
+    void loadJni() {
+        Assert.assertFalse(CommandLine.getInstance().isNativeImplementation());
+        LibraryLoader.getInstance().ensureInitialized();
+        Assert.assertTrue(CommandLine.getInstance().isNativeImplementation());
     }
 
     void checkInitSwitches() {
         CommandLine cl = CommandLine.getInstance();
         Assert.assertFalse(cl.hasSwitch("init_command"));
-        Assert.assertFalse(cl.hasSwitch("switch"));
-        Assert.assertTrue(cl.hasSwitch("SWITCH"));
-        Assert.assertFalse(cl.hasSwitch("--SWITCH"));
-        Assert.assertFalse(cl.hasSwitch("Arg"));
+        Assert.assertTrue(cl.hasSwitch("switch"));
+        Assert.assertFalse(cl.hasSwitch("--switch"));
+        Assert.assertFalse(cl.hasSwitch("arg"));
         Assert.assertFalse(cl.hasSwitch("actually_an_arg"));
         Assert.assertEquals("brea\\d", cl.getSwitchValue("switch2"));
         Assert.assertEquals("and \"butter\"", cl.getSwitchValue("switch3"));
         Assert.assertEquals("a \"quoted\" 'food'!", cl.getSwitchValue("switch4"));
-        Assert.assertNull(cl.getSwitchValue("SWITCH"));
+        Assert.assertNull(cl.getSwitchValue("switch"));
         Assert.assertNull(cl.getSwitchValue("non-existant"));
     }
 
-    void checkSettingThenGettingThenRemoving() {
+    void checkSettingThenGetting() {
         CommandLine cl = CommandLine.getInstance();
 
         // Add a plain switch.
@@ -71,124 +81,61 @@
         Assert.assertFalse(cl.hasSwitch(CL_ADDED_SWITCH_2));
         Assert.assertNull(cl.getSwitchValue(CL_ADDED_SWITCH_2));
         cl.appendSwitchWithValue(CL_ADDED_SWITCH_2, CL_ADDED_VALUE_2);
-        Assert.assertEquals(CL_ADDED_VALUE_2, cl.getSwitchValue(CL_ADDED_SWITCH_2));
-
-        // Update a switch's value.
-        cl.appendSwitchWithValue(CL_ADDED_SWITCH_2, "updatedValue");
-        Assert.assertEquals("updatedValue", cl.getSwitchValue(CL_ADDED_SWITCH_2));
+        Assert.assertTrue(CL_ADDED_VALUE_2.equals(cl.getSwitchValue(CL_ADDED_SWITCH_2)));
 
         // Append a few new things.
-        final String switchesAndArgs[] = { "dummy", "--superfast", "--speed=turbo" };
-        Assert.assertFalse(cl.hasSwitch("dummy"));
+        final String[] switchesAndArgs = {"thing", "--superfast", "--speed=turbo"};
+        Assert.assertFalse(cl.hasSwitch("thing"));
         Assert.assertFalse(cl.hasSwitch("superfast"));
         Assert.assertNull(cl.getSwitchValue("speed"));
         cl.appendSwitchesAndArguments(switchesAndArgs);
-        Assert.assertFalse(cl.hasSwitch("dummy"));
+        Assert.assertFalse(cl.hasSwitch("thing"));
         Assert.assertFalse(cl.hasSwitch("command"));
         Assert.assertTrue(cl.hasSwitch("superfast"));
-        Assert.assertEquals("turbo", cl.getSwitchValue("speed"));
-
-        // Get all switches
-        Map<String, String> switches = cl.getSwitches();
-        Assert.assertTrue(switches.containsKey(CL_ADDED_SWITCH));
-        Assert.assertTrue(switches.containsKey(CL_ADDED_SWITCH_2));
-
-        // Remove a plain switch.
-        cl.removeSwitch(CL_ADDED_SWITCH);
-        Assert.assertFalse(cl.hasSwitch(CL_ADDED_SWITCH));
-
-        // Remove a switch with a value.
-        cl.removeSwitch(CL_ADDED_SWITCH_2);
-        Assert.assertFalse(cl.hasSwitch(CL_ADDED_SWITCH_2));
-        Assert.assertNull(cl.getSwitchValue(CL_ADDED_SWITCH_2));
-
-        // Get all switches again to verify it updated.
-        switches = cl.getSwitches();
-        Assert.assertFalse(switches.containsKey(CL_ADDED_SWITCH));
-        Assert.assertFalse(switches.containsKey(CL_ADDED_SWITCH_2));
+        Assert.assertTrue("turbo".equals(cl.getSwitchValue("speed")));
     }
 
-    void checkTokenizer(String[] expected, String toParse) {
-        String[] actual = CommandLine.tokenizeQuotedArguments(toParse.toCharArray());
-        Assert.assertEquals(expected.length, actual.length);
-        for (int i = 0; i < expected.length; ++i) {
-            Assert.assertEquals("comparing element " + i, expected[i], actual[i]);
-        }
+    void checkAppendedSwitchesPassedThrough() {
+        CommandLine cl = CommandLine.getInstance();
+        Assert.assertTrue(cl.hasSwitch(CL_ADDED_SWITCH));
+        Assert.assertTrue(cl.hasSwitch(CL_ADDED_SWITCH_2));
+        Assert.assertTrue(CL_ADDED_VALUE_2.equals(cl.getSwitchValue(CL_ADDED_SWITCH_2)));
     }
 
     @Test
-    @SmallTest
+    @MediumTest
     @Feature({"Android-AppBase"})
-    public void testJavaInitialization() {
+    public void testJavaNativeTransition() {
         CommandLine.init(INIT_SWITCHES);
         checkInitSwitches();
-        checkSettingThenGettingThenRemoving();
-    }
-
-    @Test
-    @SmallTest
-    @Feature({"Android-AppBase"})
-    public void testBufferInitialization() {
-        CommandLine.init(CommandLine.tokenizeQuotedArguments(INIT_SWITCHES_BUFFER));
+        loadJni();
         checkInitSwitches();
-        checkSettingThenGettingThenRemoving();
+        checkSettingThenGetting();
     }
 
     @Test
-    @SmallTest
+    @MediumTest
     @Feature({"Android-AppBase"})
-    public void testArgumentTokenizer() {
-        String toParse = " a\"\\bc de\\\"f g\"\\h ij    k\" \"lm";
-        String[] expected = { "a\\bc de\"f g\\h",
-                              "ij",
-                              "k lm" };
-        checkTokenizer(expected, toParse);
-
-        toParse = "";
-        expected = new String[0];
-        checkTokenizer(expected, toParse);
-
-        toParse = " \t\n";
-        checkTokenizer(expected, toParse);
-
-        toParse = " \"a'b\" 'c\"d' \"e\\\"f\" 'g\\'h' \"i\\'j\" 'k\\\"l'"
-                + " m\"n\\'o\"p q'r\\\"s't";
-        expected = new String[] { "a'b",
-                                  "c\"d",
-                                  "e\"f",
-                                  "g'h",
-                                  "i\\'j",
-                                  "k\\\"l",
-                                  "mn\\'op",
-                                  "qr\\\"st"};
-        checkTokenizer(expected, toParse);
+    public void testJavaNativeTransitionAfterAppends() {
+        CommandLine.init(INIT_SWITCHES);
+        checkInitSwitches();
+        checkSettingThenGetting();
+        loadJni();
+        checkInitSwitches();
+        checkAppendedSwitchesPassedThrough();
     }
 
     @Test
-    @SmallTest
+    @MediumTest
     @Feature({"Android-AppBase"})
-    public void testUpdatingArgList() {
+    public void testNativeInitialization() {
         CommandLine.init(null);
-        CommandLine cl = CommandLine.getInstance();
-        cl.appendSwitch(CL_ADDED_SWITCH);
-        cl.appendSwitchWithValue(CL_ADDED_SWITCH_2, CL_ADDED_VALUE_2);
-        cl.appendSwitchWithValue(CL_ADDED_SWITCH_2, "updatedValue");
-
-        final String[] expectedValueForBothSwitches = {
-                "",
-                "--" + CL_ADDED_SWITCH,
-                "--" + CL_ADDED_SWITCH_2 + "=" + CL_ADDED_VALUE_2,
-                "--" + CL_ADDED_SWITCH_2 + "=updatedValue",
-        };
-        Assert.assertArrayEquals("Appending a switch multiple times should add multiple args",
-                expectedValueForBothSwitches, CommandLine.getJavaSwitchesOrNull());
-
-        cl.removeSwitch(CL_ADDED_SWITCH_2);
-        final String[] expectedValueWithSecondSwitchRemoved = {
-                "",
-                "--" + CL_ADDED_SWITCH,
-        };
-        Assert.assertArrayEquals("Removing a switch should remove all its args",
-                expectedValueWithSecondSwitchRemoved, CommandLine.getJavaSwitchesOrNull());
+        loadJni();
+        // Drop the program name for use with appendSwitchesAndArguments.
+        String[] args = new String[INIT_SWITCHES.length - 1];
+        System.arraycopy(INIT_SWITCHES, 1, args, 0, args.length);
+        CommandLine.getInstance().appendSwitchesAndArguments(args);
+        checkInitSwitches();
+        checkSettingThenGetting();
     }
 }
diff --git a/base/android/javatests/src/org/chromium/base/EarlyTraceEventTest.java b/base/android/javatests/src/org/chromium/base/EarlyTraceEventTest.java
index 47e572d..db5e50b 100644
--- a/base/android/javatests/src/org/chromium/base/EarlyTraceEventTest.java
+++ b/base/android/javatests/src/org/chromium/base/EarlyTraceEventTest.java
@@ -48,8 +48,8 @@
         long myThreadId = Process.myTid();
         long beforeNanos = System.nanoTime();
         long beforeThreadMillis = SystemClock.currentThreadTimeMillis();
-        EarlyTraceEvent.begin(EVENT_NAME, false /*isToplevel*/);
-        EarlyTraceEvent.end(EVENT_NAME, false /*isToplevel*/);
+        EarlyTraceEvent.begin(EVENT_NAME, /* isToplevel= */ false);
+        EarlyTraceEvent.end(EVENT_NAME, /* isToplevel= */ false);
         Assert.assertTrue(EarlyTraceEvent.enabled());
         long afterNanos = System.nanoTime();
         long afterThreadMillis = SystemClock.currentThreadTimeMillis();
@@ -131,7 +131,7 @@
     @Feature({"Android-AppBase"})
     public void testIncompleteEvent() {
         EarlyTraceEvent.enable();
-        EarlyTraceEvent.begin(EVENT_NAME, true /*isToplevel*/);
+        EarlyTraceEvent.begin(EVENT_NAME, /* isToplevel= */ true);
 
         List<Event> matchingEvents =
                 EarlyTraceEvent.getMatchingCompletedEventsForTesting(EVENT_NAME);
@@ -145,8 +145,8 @@
     @SmallTest
     @Feature({"Android-AppBase"})
     public void testIgnoreEventsWhenDisabled() {
-        EarlyTraceEvent.begin(EVENT_NAME, false /*isToplevel*/);
-        EarlyTraceEvent.end(EVENT_NAME, false /*isToplevel*/);
+        EarlyTraceEvent.begin(EVENT_NAME, /* isToplevel= */ false);
+        EarlyTraceEvent.end(EVENT_NAME, /* isToplevel= */ false);
         try (TraceEvent e = TraceEvent.scoped(EVENT_NAME2)) {
             // Required comment to pass presubmit checks.
         }
@@ -171,8 +171,8 @@
     @Feature({"Android-AppBase"})
     public void testCannotBeReenabledOnceFinished() {
         EarlyTraceEvent.enable();
-        EarlyTraceEvent.begin(EVENT_NAME, false /*isToplevel*/);
-        EarlyTraceEvent.end(EVENT_NAME, false /*isToplevel*/);
+        EarlyTraceEvent.begin(EVENT_NAME, /* isToplevel= */ false);
+        EarlyTraceEvent.end(EVENT_NAME, /* isToplevel= */ false);
         EarlyTraceEvent.disable();
         Assert.assertEquals(EarlyTraceEvent.STATE_FINISHED, EarlyTraceEvent.sState);
 
@@ -187,14 +187,15 @@
         EarlyTraceEvent.enable();
         final long[] threadId = {0};
 
-        Thread thread = new Thread() {
-            @Override
-            public void run() {
-                TraceEvent.begin(EVENT_NAME);
-                threadId[0] = Process.myTid();
-                TraceEvent.end(EVENT_NAME);
-            }
-        };
+        Thread thread =
+                new Thread() {
+                    @Override
+                    public void run() {
+                        TraceEvent.begin(EVENT_NAME);
+                        threadId[0] = Process.myTid();
+                        TraceEvent.end(EVENT_NAME);
+                    }
+                };
         thread.start();
         thread.join();
 
diff --git a/base/android/javatests/src/org/chromium/base/IntentUtilsTest.java b/base/android/javatests/src/org/chromium/base/IntentUtilsTest.java
index c89141d..5dc6c74 100644
--- a/base/android/javatests/src/org/chromium/base/IntentUtilsTest.java
+++ b/base/android/javatests/src/org/chromium/base/IntentUtilsTest.java
@@ -24,17 +24,13 @@
 import org.chromium.base.test.util.Batch;
 import org.chromium.build.BuildConfig;
 
-/**
- * Tests for {@link IntentUtils}.
- */
+/** Tests for {@link IntentUtils}. */
 @RunWith(BaseJUnit4ClassRunner.class)
 @Batch(Batch.UNIT_TESTS)
 public class IntentUtilsTest {
-    @Mock
-    private Context mContext;
+    @Mock private Context mContext;
 
-    @Rule
-    public MockitoRule mMockitoRule = MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS);
+    @Rule public MockitoRule mMockitoRule = MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS);
 
     private void assertTargetsSelf(boolean targetsSelf, Intent intent, boolean expectAssertion) {
         boolean asserted = false;
diff --git a/base/android/javatests/src/org/chromium/base/LocaleUtilsTest.java b/base/android/javatests/src/org/chromium/base/LocaleUtilsTest.java
index bc869dc..64f1144 100644
--- a/base/android/javatests/src/org/chromium/base/LocaleUtilsTest.java
+++ b/base/android/javatests/src/org/chromium/base/LocaleUtilsTest.java
@@ -59,7 +59,6 @@
     // This is also a part of test for toLanguageTags when API level is 24 or higher
     @Test
     @SmallTest
-    @MinAndroidSdkLevel(Build.VERSION_CODES.LOLLIPOP)
     public void testGetUpdatedLocaleForChromium() {
         Locale locale = new Locale("jp");
         Locale updatedLocale = LocaleUtils.getUpdatedLocaleForChromium(locale);
@@ -102,7 +101,6 @@
     // This is also a part of test for forLanguageTag when API level is 21 or higher
     @Test
     @SmallTest
-    @MinAndroidSdkLevel(Build.VERSION_CODES.LOLLIPOP)
     public void testGetUpdatedLocaleForAndroid() {
         Locale locale = new Locale("jp");
         Locale updatedLocale = LocaleUtils.getUpdatedLocaleForAndroid(locale);
@@ -194,80 +192,6 @@
         Assert.assertEquals("nn-NO", localeString);
     }
 
-    // Test for forLanguageTag when API level is lower than 21
-    @Test
-    @SmallTest
-    public void testForLanguageTagCompat() {
-        String languageTag = "";
-        Locale locale = new Locale("");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "und";
-        locale = new Locale("");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "en";
-        locale = new Locale("en");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "mas";
-        locale = new Locale("mas");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "en-GB";
-        locale = new Locale("en", "GB");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "es-419";
-        locale = new Locale("es", "419");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        // Tests if updated Chromium language code and deprecated language code
-        // are pointing to the same Locale Object.
-        languageTag = "he";
-        locale = new Locale("iw");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "iw";
-        locale = new Locale("he");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "ji";
-        locale = new Locale("yi");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "yi";
-        locale = new Locale("ji");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "in";
-        locale = new Locale("id");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "id";
-        locale = new Locale("in");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        // Tests for Tagalog/Filipino if updated Chromium language code and
-        // language code are pointing to the same Locale Object.
-        languageTag = "tl";
-        locale = new Locale("tl");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "fil";
-        locale = new Locale("tl");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        // Test with invalid inputs.
-        languageTag = "notValidLanguage";
-        locale = new Locale("");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-
-        languageTag = "en-notValidCountry";
-        locale = new Locale("en");
-        Assert.assertEquals(locale, LocaleUtils.forLanguageTagCompat(languageTag));
-    }
-
     // Test for toLanguage.
     @Test
     @SmallTest
diff --git a/base/android/javatests/src/org/chromium/base/PackageUtilsTest.java b/base/android/javatests/src/org/chromium/base/PackageUtilsTest.java
index 7bb53ea..a3bc3d8 100644
--- a/base/android/javatests/src/org/chromium/base/PackageUtilsTest.java
+++ b/base/android/javatests/src/org/chromium/base/PackageUtilsTest.java
@@ -25,22 +25,29 @@
 import java.util.Collections;
 import java.util.List;
 
-/**
- * Tests for {@link PackageUtils}.
- */
+/** Tests for {@link PackageUtils}. */
 @RunWith(BaseJUnit4ClassRunner.class)
 @Batch(Batch.UNIT_TESTS)
 public class PackageUtilsTest {
-    private static final byte[] BYTE_ARRAY = new byte[] {(byte) 0xaa, (byte) 0xbb, (byte) 0xcc,
-            (byte) 0x10, (byte) 0x20, (byte) 0x30, (byte) 0x01, (byte) 0x02};
+    private static final byte[] BYTE_ARRAY =
+            new byte[] {
+                (byte) 0xaa,
+                (byte) 0xbb,
+                (byte) 0xcc,
+                (byte) 0x10,
+                (byte) 0x20,
+                (byte) 0x30,
+                (byte) 0x01,
+                (byte) 0x02
+            };
     private static final String STRING_ARRAY = "AA:BB:CC:10:20:30:01:02";
 
     private static final String SHA_256_FINGERPRINT_PUBLIC =
             "32:A2:FC:74:D7:31:10:58:59:E5:A8:5D:F1:6D:95:F1:02:D8:5B"
-            + ":22:09:9B:80:64:C5:D8:91:5C:61:DA:D1:E0";
+                    + ":22:09:9B:80:64:C5:D8:91:5C:61:DA:D1:E0";
     private static final String SHA_256_FINGERPRINT_OFFICIAL =
             "19:75:B2:F1:71:77:BC:89:A5:DF:F3:1F:9E:64:A6:CA:E2:81:A5"
-            + ":3D:C1:D1:D5:9B:1D:14:7F:E1:C8:2A:FA:00";
+                    + ":3D:C1:D1:D5:9B:1D:14:7F:E1:C8:2A:FA:00";
     private static final String PACKAGE_NAME =
             ContextUtils.getApplicationContext().getPackageName();
 
@@ -56,8 +63,10 @@
         PackageManager pm = ContextUtils.getApplicationContext().getPackageManager();
         List<String> fingerprints = getCertificateSHA256FingerprintForPackage(PACKAGE_NAME);
 
-        assertThat(fingerprints,
-                anyOf(is(Collections.singletonList(SHA_256_FINGERPRINT_PUBLIC)),
+        assertThat(
+                fingerprints,
+                anyOf(
+                        is(Collections.singletonList(SHA_256_FINGERPRINT_PUBLIC)),
                         is(Collections.singletonList(SHA_256_FINGERPRINT_OFFICIAL))));
     }
 }
diff --git a/base/android/javatests/src/org/chromium/base/StrictModeContextTest.java b/base/android/javatests/src/org/chromium/base/StrictModeContextTest.java
index 5967130..314fdc6 100644
--- a/base/android/javatests/src/org/chromium/base/StrictModeContextTest.java
+++ b/base/android/javatests/src/org/chromium/base/StrictModeContextTest.java
@@ -21,9 +21,7 @@
 import java.io.FileOutputStream;
 import java.io.IOException;
 
-/**
- * Tests for the StrictModeContext class.
- */
+/** Tests for the StrictModeContext class. */
 @RunWith(BaseJUnit4ClassRunner.class)
 public class StrictModeContextTest {
     private StrictMode.ThreadPolicy mOldThreadPolicy;
@@ -48,11 +46,12 @@
     private void enableStrictMode() {
         mOldThreadPolicy = StrictMode.getThreadPolicy();
         mOldVmPolicy = StrictMode.getVmPolicy();
-        StrictMode.setThreadPolicy(new StrictMode.ThreadPolicy.Builder()
-                                           .detectAll()
-                                           .penaltyLog()
-                                           .penaltyDeath()
-                                           .build());
+        StrictMode.setThreadPolicy(
+                new StrictMode.ThreadPolicy.Builder()
+                        .detectAll()
+                        .penaltyLog()
+                        .penaltyDeath()
+                        .build());
         StrictMode.setVmPolicy(
                 new StrictMode.VmPolicy.Builder().detectAll().penaltyLog().penaltyDeath().build());
     }
diff --git a/base/android/javatests/src/org/chromium/base/library_loader/EarlyNativeTest.java b/base/android/javatests/src/org/chromium/base/library_loader/EarlyNativeTest.java
index 2126301..14f67a3 100644
--- a/base/android/javatests/src/org/chromium/base/library_loader/EarlyNativeTest.java
+++ b/base/android/javatests/src/org/chromium/base/library_loader/EarlyNativeTest.java
@@ -6,28 +6,23 @@
 
 import androidx.test.filters.SmallTest;
 
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeLibraryLoadedStatus;
+import org.jni_zero.NativeMethods;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 
-import org.chromium.base.JniException;
-import org.chromium.base.NativeLibraryLoadedStatus;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
 import org.chromium.base.test.BaseJUnit4ClassRunner;
 import org.chromium.base.test.util.Batch;
 import org.chromium.base.test.util.CallbackHelper;
 import org.chromium.build.BuildConfig;
-import org.chromium.build.annotations.MainDex;
 
-/**
- * Tests for early JNI initialization.
- */
+/** Tests for early JNI initialization. */
 @RunWith(BaseJUnit4ClassRunner.class)
 @JNINamespace("base")
-@MainDex
 @Batch(Batch.UNIT_TESTS)
 public class EarlyNativeTest {
     private boolean mWasInitialized;
@@ -53,6 +48,7 @@
     @NativeMethods
     interface Natives {
         boolean isCommandLineInitialized();
+
         boolean isProcessNameEmpty();
     }
 
@@ -95,7 +91,7 @@
         try {
             EarlyNativeTestJni.get().isCommandLineInitialized();
             Assert.fail("Using JNI before the library is loaded should throw an exception.");
-        } catch (JniException e) {
+        } catch (NativeLibraryLoadedStatus.NativeNotLoadedException e) {
         }
     }
 }
diff --git a/base/android/javatests/src/org/chromium/base/profiler/TestSupport.java b/base/android/javatests/src/org/chromium/base/profiler/TestSupport.java
index 4bbf3f2..be6f003 100644
--- a/base/android/javatests/src/org/chromium/base/profiler/TestSupport.java
+++ b/base/android/javatests/src/org/chromium/base/profiler/TestSupport.java
@@ -4,9 +4,9 @@
 
 package org.chromium.base.profiler;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 /**
  * Helper to run code through JNI layer to test JNI unwinding.
diff --git a/base/android/javatests/src/org/chromium/base/task/AsyncTaskTest.java b/base/android/javatests/src/org/chromium/base/task/AsyncTaskTest.java
index 17fe56f..f33c00d 100644
--- a/base/android/javatests/src/org/chromium/base/task/AsyncTaskTest.java
+++ b/base/android/javatests/src/org/chromium/base/task/AsyncTaskTest.java
@@ -50,8 +50,7 @@
 
     private static final int QUEUE_SIZE = 40;
 
-    @Rule
-    public ExpectedException thrown = ExpectedException.none();
+    @Rule public ExpectedException thrown = ExpectedException.none();
 
     /**
      * Test filling the queue with basic Runnables, then add a final AsyncTask to overfill it, and
@@ -60,19 +59,26 @@
     @Test
     @SmallTest
     public void testChromeThreadPoolExecutorRunnables() {
-        Executor executor = new ChromeThreadPoolExecutor(1, 1, 1, TimeUnit.SECONDS,
-                new ArrayBlockingQueue<Runnable>(QUEUE_SIZE), new ThreadFactory() {
-                    @Override
-                    public Thread newThread(@NonNull Runnable r) {
-                        return null;
-                    }
-                });
+        Executor executor =
+                new ChromeThreadPoolExecutor(
+                        1,
+                        1,
+                        1,
+                        TimeUnit.SECONDS,
+                        new ArrayBlockingQueue<Runnable>(QUEUE_SIZE),
+                        new ThreadFactory() {
+                            @Override
+                            public Thread newThread(@NonNull Runnable r) {
+                                return null;
+                            }
+                        });
         for (int i = 0; i < QUEUE_SIZE; i++) {
             executor.execute(new SpecialRunnable());
         }
         thrown.expect(RejectedExecutionException.class);
-        thrown.expectMessage(CoreMatchers.containsString(
-                "org.chromium.base.task.AsyncTaskTest$SpecialRunnable"));
+        thrown.expectMessage(
+                CoreMatchers.containsString(
+                        "org.chromium.base.task.AsyncTaskTest$SpecialRunnable"));
         thrown.expectMessage(
                 CoreMatchers.not(CoreMatchers.containsString("SpecialChromeAsyncTask")));
         new SpecialChromeAsyncTask().executeOnExecutor(executor);
@@ -85,19 +91,26 @@
     @Test
     @SmallTest
     public void testChromeThreadPoolExecutorChromeAsyncTask() {
-        Executor executor = new ChromeThreadPoolExecutor(1, 1, 1, TimeUnit.SECONDS,
-                new ArrayBlockingQueue<Runnable>(QUEUE_SIZE), new ThreadFactory() {
-                    @Override
-                    public Thread newThread(@NonNull Runnable r) {
-                        return null;
-                    }
-                });
+        Executor executor =
+                new ChromeThreadPoolExecutor(
+                        1,
+                        1,
+                        1,
+                        TimeUnit.SECONDS,
+                        new ArrayBlockingQueue<Runnable>(QUEUE_SIZE),
+                        new ThreadFactory() {
+                            @Override
+                            public Thread newThread(@NonNull Runnable r) {
+                                return null;
+                            }
+                        });
         for (int i = 0; i < QUEUE_SIZE; i++) {
             new SpecialChromeAsyncTask().executeOnExecutor(executor);
         }
         thrown.expect(RejectedExecutionException.class);
-        thrown.expectMessage(CoreMatchers.containsString(
-                "org.chromium.base.task.AsyncTaskTest$SpecialChromeAsyncTask"));
+        thrown.expectMessage(
+                CoreMatchers.containsString(
+                        "org.chromium.base.task.AsyncTaskTest$SpecialChromeAsyncTask"));
         thrown.expectMessage(CoreMatchers.not(CoreMatchers.containsString("SpecialOsAsyncTask")));
         new SpecialOsAsyncTask().executeOnExecutor(executor);
     }
@@ -109,19 +122,26 @@
     @Test
     @SmallTest
     public void testChromeThreadPoolExecutorOsAsyncTask() {
-        Executor executor = new ChromeThreadPoolExecutor(1, 1, 1, TimeUnit.SECONDS,
-                new ArrayBlockingQueue<Runnable>(QUEUE_SIZE), new ThreadFactory() {
-                    @Override
-                    public Thread newThread(@NonNull Runnable r) {
-                        return null;
-                    }
-                });
+        Executor executor =
+                new ChromeThreadPoolExecutor(
+                        1,
+                        1,
+                        1,
+                        TimeUnit.SECONDS,
+                        new ArrayBlockingQueue<Runnable>(QUEUE_SIZE),
+                        new ThreadFactory() {
+                            @Override
+                            public Thread newThread(@NonNull Runnable r) {
+                                return null;
+                            }
+                        });
         for (int i = 0; i < QUEUE_SIZE; i++) {
             new SpecialOsAsyncTask().executeOnExecutor(executor);
         }
         thrown.expect(RejectedExecutionException.class);
-        thrown.expectMessage(CoreMatchers.containsString(
-                "org.chromium.base.task.AsyncTaskTest$SpecialOsAsyncTask"));
+        thrown.expectMessage(
+                CoreMatchers.containsString(
+                        "org.chromium.base.task.AsyncTaskTest$SpecialOsAsyncTask"));
         thrown.expectMessage(
                 CoreMatchers.not(CoreMatchers.containsString("SpecialChromeAsyncTask")));
         new SpecialChromeAsyncTask().executeOnExecutor(executor);
diff --git a/base/android/javatests/src/org/chromium/base/task/PostTaskTest.java b/base/android/javatests/src/org/chromium/base/task/PostTaskTest.java
index d836455..baed132 100644
--- a/base/android/javatests/src/org/chromium/base/task/PostTaskTest.java
+++ b/base/android/javatests/src/org/chromium/base/task/PostTaskTest.java
@@ -36,15 +36,17 @@
         // This test should not timeout.
         final Object lock = new Object();
         final AtomicBoolean taskExecuted = new AtomicBoolean();
-        PostTask.postTask(TaskTraits.USER_BLOCKING, new Runnable() {
-            @Override
-            public void run() {
-                synchronized (lock) {
-                    taskExecuted.set(true);
-                    lock.notify();
-                }
-            }
-        });
+        PostTask.postTask(
+                TaskTraits.USER_BLOCKING,
+                new Runnable() {
+                    @Override
+                    public void run() {
+                        synchronized (lock) {
+                            taskExecuted.set(true);
+                            lock.notify();
+                        }
+                    }
+                });
         synchronized (lock) {
             try {
                 while (!taskExecuted.get()) {
diff --git a/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherTestBase.java b/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherTestBase.java
index 336aa0f..2903e15 100644
--- a/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherTestBase.java
+++ b/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherTestBase.java
@@ -22,8 +22,6 @@
  */
 public class HistogramWatcherTestBase {
     protected static final String TIMES_HISTOGRAM_1 = "TimesHistogram1";
-    protected static final String TIMES_HISTOGRAM_2 = "TimesHistogram2";
-    protected static final String TIMES_HISTOGRAM_3 = "TimesHistogram3";
     protected static final String BOOLEAN_HISTOGRAM = "BooleanHistogram";
     protected static final String EXACT_LINEAR_HISTOGRAM_1 = "ExactLinearHistogram"; // max 10
     protected static final String EXACT_LINEAR_HISTOGRAM_2 = "ExactLinearHistogram2"; // max 20
diff --git a/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithoutNativeTest.java b/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithoutNativeTest.java
index a173484..50e75b9 100644
--- a/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithoutNativeTest.java
+++ b/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithoutNativeTest.java
@@ -169,11 +169,12 @@
     @MediumTest
     public void testOutOfOrderExpectations_success() {
         // Arrange
-        mWatcher = HistogramWatcher.newBuilder()
-                           .expectIntRecord(TIMES_HISTOGRAM_1, 8000)
-                           .expectIntRecord(TIMES_HISTOGRAM_1, 6000)
-                           .expectIntRecord(TIMES_HISTOGRAM_1, 7000)
-                           .build();
+        mWatcher =
+                HistogramWatcher.newBuilder()
+                        .expectIntRecord(TIMES_HISTOGRAM_1, 8000)
+                        .expectIntRecord(TIMES_HISTOGRAM_1, 6000)
+                        .expectIntRecord(TIMES_HISTOGRAM_1, 7000)
+                        .build();
 
         // Act
         RecordHistogram.recordTimesHistogram(TIMES_HISTOGRAM_1, 6000);
@@ -188,11 +189,12 @@
     @MediumTest
     public void testOutOfOrderExpectations_failure() {
         // Arrange
-        mWatcher = HistogramWatcher.newBuilder()
-                           .expectIntRecord(TIMES_HISTOGRAM_1, 8000)
-                           .expectIntRecord(TIMES_HISTOGRAM_1, 6000)
-                           .expectIntRecord(TIMES_HISTOGRAM_1, 7000)
-                           .build();
+        mWatcher =
+                HistogramWatcher.newBuilder()
+                        .expectIntRecord(TIMES_HISTOGRAM_1, 8000)
+                        .expectIntRecord(TIMES_HISTOGRAM_1, 6000)
+                        .expectIntRecord(TIMES_HISTOGRAM_1, 7000)
+                        .build();
 
         // Act
         RecordHistogram.recordTimesHistogram(TIMES_HISTOGRAM_1, 7000);
@@ -214,9 +216,10 @@
     @MediumTest
     public void testZeroCountExpectations_failure() {
         try {
-            mWatcher = HistogramWatcher.newBuilder()
-                               .expectIntRecordTimes(TIMES_HISTOGRAM_1, 1, 0)
-                               .build();
+            mWatcher =
+                    HistogramWatcher.newBuilder()
+                            .expectIntRecordTimes(TIMES_HISTOGRAM_1, 1, 0)
+                            .build();
         } catch (IllegalArgumentException e) {
             assertContains("zero", e.getMessage());
             return;
@@ -228,13 +231,33 @@
     @MediumTest
     public void testNegativeCountExpectations_failure() {
         try {
-            mWatcher = HistogramWatcher.newBuilder()
-                               .expectIntRecordTimes(TIMES_HISTOGRAM_1, 1, -1)
-                               .build();
+            mWatcher =
+                    HistogramWatcher.newBuilder()
+                            .expectIntRecordTimes(TIMES_HISTOGRAM_1, 1, -1)
+                            .build();
         } catch (IllegalArgumentException e) {
             assertContains("negative", e.getMessage());
             return;
         }
         Assert.fail("Expected IllegalArgumentException");
     }
+
+    @Test
+    @MediumTest
+    public void testTryWithResources_success() {
+        try (HistogramWatcher ignored = HistogramWatcher.newSingleRecordWatcher(ENUM_HISTOGRAM)) {
+            RecordHistogram.recordEnumeratedHistogram(ENUM_HISTOGRAM, 0, 10);
+        }
+    }
+
+    @Test
+    @MediumTest
+    public void testTryWithResources_failure() {
+        try (HistogramWatcher ignored = HistogramWatcher.newSingleRecordWatcher(ENUM_HISTOGRAM)) {
+        } catch (AssertionError e) {
+            assertContains(ENUM_HISTOGRAM, e.getMessage());
+            return;
+        }
+        Assert.fail("Expected AssertionError");
+    }
 }
diff --git a/base/android/jni_android.cc b/base/android/jni_android.cc
index 704c4ac..74eb3cb 100644
--- a/base/android/jni_android.cc
+++ b/base/android/jni_android.cc
@@ -10,6 +10,7 @@
 #include "base/android/java_exception_reporter.h"
 #include "base/android/jni_string.h"
 #include "base/android/jni_utils.h"
+#include "base/android_runtime_jni_headers/Throwable_jni.h"
 #include "base/base_jni/PiiElider_jni.h"
 #include "base/debug/debugging_buildflags.h"
 #include "base/logging.h"
@@ -24,10 +25,6 @@
 jobject g_class_loader = nullptr;
 jmethodID g_class_loader_load_class_method_id = 0;
 
-#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
-ABSL_CONST_INIT thread_local void* stack_frame_pointer = nullptr;
-#endif
-
 bool g_fatal_exception_occurred = false;
 
 ScopedJavaLocalRef<jclass> GetClassInternal(JNIEnv* env,
@@ -129,6 +126,10 @@
   return g_jvm;
 }
 
+void DisableJvmForTesting() {
+  g_jvm = nullptr;
+}
+
 void InitGlobalClassLoader(JNIEnv* env) {
   DCHECK(g_class_loader == nullptr);
 
@@ -267,8 +268,8 @@
   if (!HasException(env))
     return;
 
-  jthrowable java_throwable = env->ExceptionOccurred();
-  if (java_throwable) {
+  ScopedJavaLocalRef<jthrowable> throwable(env, env->ExceptionOccurred());
+  if (throwable) {
     // Clear the pending exception, since a local reference is now held.
     env->ExceptionDescribe();
     env->ExceptionClear();
@@ -281,7 +282,7 @@
       g_fatal_exception_occurred = true;
       // RVO should avoid any extra copies of the exception string.
       base::android::SetJavaException(
-          GetJavaExceptionInfo(env, java_throwable).c_str());
+          GetJavaExceptionInfo(env, throwable).c_str());
     }
   }
 
@@ -289,26 +290,37 @@
   LOG(FATAL) << "Please include Java exception stack in crash report";
 }
 
-std::string GetJavaExceptionInfo(JNIEnv* env, jthrowable java_throwable) {
+std::string GetJavaExceptionInfo(JNIEnv* env,
+                                 const JavaRef<jthrowable>& throwable) {
   ScopedJavaLocalRef<jstring> sanitized_exception_string =
-      Java_PiiElider_getSanitizedStacktrace(
-          env, ScopedJavaLocalRef(env, java_throwable));
+      Java_PiiElider_getSanitizedStacktrace(env, throwable);
 
   return ConvertJavaStringToUTF8(sanitized_exception_string);
 }
 
-#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
-
-JNIStackFrameSaver::JNIStackFrameSaver(void* current_fp)
-    : resetter_(&stack_frame_pointer, current_fp) {}
-
-JNIStackFrameSaver::~JNIStackFrameSaver() = default;
-
-void* JNIStackFrameSaver::SavedFrame() {
-  return stack_frame_pointer;
+std::string GetJavaStackTraceIfPresent() {
+  JNIEnv* env = nullptr;
+  if (g_jvm) {
+    g_jvm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_2);
+  }
+  if (!env) {
+    // JNI has not been initialized on this thread.
+    return {};
+  }
+  ScopedJavaLocalRef<jthrowable> throwable =
+      JNI_Throwable::Java_Throwable_Constructor(env);
+  std::string ret = GetJavaExceptionInfo(env, throwable);
+  // Strip the exception message and leave only the "at" lines. Example:
+  // java.lang.Throwable:
+  // {tab}at Clazz.method(Clazz.java:111)
+  // {tab}at ...
+  size_t newline_idx = ret.find('\n');
+  if (newline_idx == std::string::npos) {
+    // There are no java frames.
+    return {};
+  }
+  return ret.substr(newline_idx + 1);
 }
 
-#endif  // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
-
 }  // namespace android
 }  // namespace base
diff --git a/base/android/jni_android.h b/base/android/jni_android.h
index 81888ca..b7db68b 100644
--- a/base/android/jni_android.h
+++ b/base/android/jni_android.h
@@ -18,37 +18,6 @@
 #include "base/debug/debugging_buildflags.h"
 #include "base/debug/stack_trace.h"
 
-#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
-
-// When profiling is enabled (enable_profiling=true) this macro is added to
-// all generated JNI stubs so that it becomes the last thing that runs before
-// control goes into Java.
-//
-// This macro saves stack frame pointer of the current function. Saved value
-// used later by JNI_LINK_SAVED_FRAME_POINTER.
-#define JNI_SAVE_FRAME_POINTER \
-  base::android::JNIStackFrameSaver jni_frame_saver(__builtin_frame_address(0))
-
-// When profiling is enabled (enable_profiling=true) this macro is added to
-// all generated JNI callbacks so that it becomes the first thing that runs
-// after control returns from Java.
-//
-// This macro links stack frame of the current function to the stack frame
-// saved by JNI_SAVE_FRAME_POINTER, allowing frame-based unwinding
-// (used by the heap profiler) to produce complete traces.
-#define JNI_LINK_SAVED_FRAME_POINTER                    \
-  base::debug::ScopedStackFrameLinker jni_frame_linker( \
-      __builtin_frame_address(0),                       \
-      base::android::JNIStackFrameSaver::SavedFrame())
-
-#else
-
-// Frame-based stack unwinding is not supported, do nothing.
-#define JNI_SAVE_FRAME_POINTER
-#define JNI_LINK_SAVED_FRAME_POINTER
-
-#endif  // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
-
 namespace base {
 namespace android {
 
@@ -83,6 +52,12 @@
 // Returns the global JVM, or nullptr if it has not been initialized.
 BASE_EXPORT JavaVM* GetVM();
 
+// Do not allow any future native->java calls.
+// This is necessary in gtest DEATH_TESTS to prevent
+// GetJavaStackTraceIfPresent() from accessing a defunct JVM (due to fork()).
+// https://crbug.com/1484834
+BASE_EXPORT void DisableJvmForTesting();
+
 // Initializes the global ClassLoader used by the GetClass and LazyGetClass
 // methods. This is needed because JNI will use the base ClassLoader when there
 // is no Java code on the stack. The base ClassLoader doesn't know about any of
@@ -154,28 +129,11 @@
 BASE_EXPORT void CheckException(JNIEnv* env);
 
 // This returns a string representation of the java stack trace.
-BASE_EXPORT std::string GetJavaExceptionInfo(JNIEnv* env,
-                                             jthrowable java_throwable);
-
-#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
-
-// Saves caller's PC and stack frame in a thread-local variable.
-// Implemented only when profiling is enabled (enable_profiling=true).
-class BASE_EXPORT JNIStackFrameSaver {
- public:
-  JNIStackFrameSaver(void* current_fp);
-
-  JNIStackFrameSaver(const JNIStackFrameSaver&) = delete;
-  JNIStackFrameSaver& operator=(const JNIStackFrameSaver&) = delete;
-
-  ~JNIStackFrameSaver();
-  static void* SavedFrame();
-
- private:
-  const AutoReset<void*> resetter_;
-};
-
-#endif  // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+BASE_EXPORT std::string GetJavaExceptionInfo(
+    JNIEnv* env,
+    const JavaRef<jthrowable>& throwable);
+// This returns a string representation of the java stack trace.
+BASE_EXPORT std::string GetJavaStackTraceIfPresent();
 
 }  // namespace android
 }  // namespace base
diff --git a/base/android/jni_android_unittest.cc b/base/android/jni_android_unittest.cc
index 8f04d82..a6032b4 100644
--- a/base/android/jni_android_unittest.cc
+++ b/base/android/jni_android_unittest.cc
@@ -6,9 +6,13 @@
 
 #include "base/at_exit.h"
 #include "base/logging.h"
+#include "base/threading/thread.h"
 #include "base/time/time.h"
+#include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
+using ::testing::StartsWith;
+
 namespace base {
 namespace android {
 
@@ -57,6 +61,33 @@
   LOG(ERROR) << "JNI " << o;
 }
 
+TEST(JNIAndroidTest, GetJavaStackTraceIfPresent) {
+  // The main thread should always have Java frames in it.
+  EXPECT_THAT(GetJavaStackTraceIfPresent(), StartsWith("\tat"));
+
+  class HelperThread : public Thread {
+   public:
+    HelperThread()
+        : Thread("TestThread"), java_stack_1_("X"), java_stack_2_("X") {}
+
+    void Init() override {
+      // Test without a JNIEnv.
+      java_stack_1_ = GetJavaStackTraceIfPresent();
+
+      // Test with a JNIEnv but no Java frames.
+      AttachCurrentThread();
+      java_stack_2_ = GetJavaStackTraceIfPresent();
+    }
+
+    std::string java_stack_1_;
+    std::string java_stack_2_;
+  };
+
+  HelperThread t;
+  t.StartAndWaitForTesting();
+  EXPECT_EQ(t.java_stack_1_, "");
+  EXPECT_EQ(t.java_stack_2_, "");
+}
 
 }  // namespace android
 }  // namespace base
diff --git a/base/android/jni_array.cc b/base/android/jni_array.cc
index 2de7862..6690bd8 100644
--- a/base/android/jni_array.cc
+++ b/base/android/jni_array.cc
@@ -174,7 +174,7 @@
 BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
     JNIEnv* env,
     base::span<const ScopedJavaLocalRef<jobject>> v,
-    ScopedJavaLocalRef<jclass> type) {
+    const JavaRef<jclass>& type) {
   jobjectArray joa =
       env->NewObjectArray(checked_cast<jsize>(v.size()), type.obj(), nullptr);
   CheckException(env);
@@ -188,7 +188,7 @@
 BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
     JNIEnv* env,
     base::span<const ScopedJavaGlobalRef<jobject>> v,
-    ScopedJavaLocalRef<jclass> type) {
+    const JavaRef<jclass>& type) {
   jobjectArray joa =
       env->NewObjectArray(checked_cast<jsize>(v.size()), type.obj(), nullptr);
   CheckException(env);
diff --git a/base/android/jni_array.h b/base/android/jni_array.h
index ee3bbb5..0b0f318 100644
--- a/base/android/jni_array.h
+++ b/base/android/jni_array.h
@@ -101,11 +101,11 @@
 BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
     JNIEnv* env,
     base::span<const ScopedJavaLocalRef<jobject>> v,
-    ScopedJavaLocalRef<jclass> type);
+    const JavaRef<jclass>& type);
 BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
     JNIEnv* env,
     base::span<const ScopedJavaGlobalRef<jobject>> v,
-    ScopedJavaLocalRef<jclass> type);
+    const JavaRef<jclass>& type);
 
 // Returns a array of Java byte array converted from |v|.
 BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfByteArray(
diff --git a/base/android/jni_generator/AndroidManifest.xml b/base/android/jni_generator/AndroidManifest.xml
deleted file mode 100644
index 8fd983c..0000000
--- a/base/android/jni_generator/AndroidManifest.xml
+++ /dev/null
@@ -1,12 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!--
-  Copyright 2017 The Chromium Authors
-  Use of this source code is governed by a BSD-style license that can be
-  found in the LICENSE file.
--->
-
-<manifest xmlns:android="http://schemas.android.com/apk/res/android" package="org.jni.generator">
-
-    <application></application>
-
-</manifest>
diff --git a/base/android/jni_generator/BUILD.gn b/base/android/jni_generator/BUILD.gn
deleted file mode 100644
index ca97c24..0000000
--- a/base/android/jni_generator/BUILD.gn
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright 2016 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/config/android/rules.gni")
-import("//testing/test.gni")
-
-generate_jni("jni_sample_header") {
-  sources = [
-    "//third_party/jni_zero/java/src/org/chromium/example/jni_generator/SampleForAnnotationProcessor.java",
-    "//third_party/jni_zero/java/src/org/chromium/example/jni_generator/SampleForTests.java",
-  ]
-}
-
-android_library("jni_sample_java") {
-  srcjar_deps = [ ":jni_sample_header" ]
-  sources = [
-    "//third_party/jni_zero/java/src/org/chromium/example/jni_generator/SampleForAnnotationProcessor.java",
-    "//third_party/jni_zero/java/src/org/chromium/example/jni_generator/SampleForTests.java",
-  ]
-
-  deps = [
-    "//base:jni_java",
-    "//build/android:build_java",
-  ]
-}
-
-source_set("jni_sample_native_side") {
-  deps = [
-    ":jni_sample_header",
-    "//base",
-  ]
-  sources = [
-    "sample_for_tests.cc",
-    "sample_for_tests.h",
-  ]
-}
-
-generate_jni_registration("jni_registration") {
-  java_targets = [ ":jni_sample_java" ]
-  manual_jni_registration = true
-}
-
-shared_library("jni_sample_lib") {
-  sources = [ "sample_entry_point.cc" ]
-
-  deps = [
-    ":jni_registration",
-    ":jni_sample_native_side",
-    "//base",
-  ]
-}
-
-android_apk("sample_jni_apk") {
-  apk_name = "SampleJni"
-  android_manifest = "AndroidManifest.xml"
-  deps = [ ":jni_sample_java" ]
-  shared_libraries = [ ":jni_sample_lib" ]
-}
-
-# Serves to test that generated bindings compile properly.
-group("jni_generator_tests") {
-  deps = [ ":sample_jni_apk" ]
-}
diff --git a/base/android/jni_generator/DEPS b/base/android/jni_generator/DEPS
new file mode 100644
index 0000000..9410601
--- /dev/null
+++ b/base/android/jni_generator/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+  "+third_party/jni_zero",
+]
diff --git a/base/android/jni_generator/DIR_METADATA b/base/android/jni_generator/DIR_METADATA
deleted file mode 100644
index 9601000..0000000
--- a/base/android/jni_generator/DIR_METADATA
+++ /dev/null
@@ -1,4 +0,0 @@
-monorail {
-  component: "Build"
-}
-team_email: "[email protected]"
diff --git a/base/android/jni_generator/README.md b/base/android/jni_generator/README.md
index 0a9fbf6..ead2b77 100644
--- a/base/android/jni_generator/README.md
+++ b/base/android/jni_generator/README.md
@@ -1,279 +1 @@
-# Overview
-JNI (Java Native Interface) is the mechanism that enables Java code to call
-native functions, and native code to call Java functions.
-
- * Native code calls into Java using apis from `<jni.h>`, which basically mirror
-   Java's reflection APIs.
- * Java code calls native functions by declaring body-less functions with the
-  `native` keyword, and then calling them as normal Java functions.
-
-`jni_generator` generates boiler-plate code with the goal of making our code:
- 1. easier to write, and
- 2. typesafe.
-
-`jni_generator` uses regular expressions to parse .Java files, so don't do
-anything too fancy. E.g.:
- * Classes must be either explicitly imported, or are assumed to be in
-the same package. To use `java.lang` classes, add an explicit import.
- * Inner classes need to be referenced through the outer class. E.g.:
-   `void call(Outer.Inner inner)`
-
-The presense of any JNI within a class will result in ProGuard obfuscation for
-the class to be disabled.
-
-### Exposing Native Methods
-
-There are two ways to have native methods be found by Java:
-1) Explicitly register the name -> function pointer mapping using JNI's
-   `RegisterNatives()` function.
-2) Export the symbols from the shared library, and let the runtime resolve them
-   on-demand (using `dlsym()`) the first time a native method is called.
-
-2) Is generally preferred due to a smaller code size and less up-front work, but
-1) is sometimes required (e.g. when OS bugs prevent `dlsym()` from working).
-Both ways are supported by this tool.
-
-### Exposing Java Methods
-
-Java methods just need to be annotated with `@CalledByNative`. The generated
-functions can be put into a namespace using `@JNINamespace("your_namespace")`.
-
-## Usage
-
-Because the generator does not generate any source files, generated headers must
-not be `#included` by multiple sources. If there are Java functions that need to
-be called by multiple sources, one source should be chosen to expose the
-functions to the others via additional wrapper functions.
-
-### Calling Java -> Native
-
-- Declare methods using a nested interface annotated with `@NativeMethods`.
-- The JNI annotation processor generates a class named `${OriginalClassName}Jni`
-  with a `get()` method that returns an implementation of the annotated
-  interface. The C++ function that it routes to is the same as if it would be
-  in the legacy method.
-- For each JNI method:
-  - C++ stubs are generated that forward to C++ functions that you must write.
-  - If the first parameter is a C++ object (e.g.
-    `long native${OriginalClassName}`), then the bindings will generate the
-    appropriate cast and call into C++ code.
-
-To add JNI to a class:
-
-1. Enable the JNI processor by adding to your `android_library` target:
-   ```python
-   annotation_processor_deps = [ "//base/android/jni_generator:jni_processor" ]
-   deps = [ "//base:jni_java" ]
-   ```
-2. Create a nested-interface annotated with `@NativeMethods` that contains
-   the declaration of the corresponding static methods you wish to have
-   implemented.
-3. Call native functions using `${OriginalClassName}Jni.get().${method}`
-4. In C++ code, #include the header `${OriginalClassName}_jni.h`. (The path will
-   depend on the location of the `generate_jni` BUILD rule that lists your Java
-   source code.) Only include this header from a single `.cc` file as the
-   header defines functions. That `.cc` must implement your native code by
-   defining non-member functions named `JNI_${OriginalClassName}_${UpperCamelCaseMethod}`
-   for static methods and member functions named `${OriginalClassName}::${UpperCamelCaseMethod}`
-   for non-static methods. Member functions need be declared in the header
-   file as well.
-
-Example:
-#### Java
-```java
-class MyClass {
-  // Cannot be private. Must be package or public.
-  @NativeMethods
-  /* package */ interface Natives {
-    void foo();
-    double bar(int a, int b);
-    // Either the |MyClass| part of the |nativeMyClass| parameter name must
-    // match the native class name exactly, or the method annotation
-    // @NativeClassQualifiedName("MyClass") must be used.
-    //
-    // If the native class is nested, use
-    // @NativeClassQualifiedName("FooClassName::BarClassName") and call the
-    // parameter |nativePointer|.
-    void nonStatic(long nativeMyClass);
-  }
-
-  void callNatives() {
-    // MyClassJni is generated by the JNI annotation processor.
-    // Storing MyClassJni.get() in a field defeats some of the desired R8
-    // optimizations, but local variables are fine.
-    Natives jni = MyClassJni.get();
-    jni.foo();
-    jni.bar(1,2);
-    jni.nonStatic(mNativePointer);
-  }
-}
-```
-#### C++
-```c++
-#include "base/android/jni_android.h"
-#include "<path to BUILD.gn>/<generate_jni target name>/MyClass_jni.h"
-
-class MyClass {
-public:
-  void NonStatic(JNIEnv* env);
-}
-
-// Notice that unlike Java, function names are capitalized in C++.
-// Static function names should follow this format and don't need to be declared.
-void JNI_MyClass_Foo(JNIEnv* env) { ... }
-void JNI_MyClass_Bar(JNIEnv* env, jint a, jint b) { ... }
-
-// Member functions need to be declared.
-void MyClass::NonStatic(JNIEnv* env) { ... }
-```
-
-**Using the 'native' keyword**
-
-- The binding generator also looks for `native` JNI method declarations and
-  generates stubs for them. This used to be the norm, but is now obsolete.
-
-#### Testing Mockable Natives
-
-1. Add the `JniMocker` rule to your test.
-2. Call `JniMocker#mock` in a `setUp()` method for each interface you want to
-   stub out.
-
-`JniMocker` will reset the stubs during `tearDown()`.
-
-```java
-/**
- * Tests for {@link AnimationFrameTimeHistogram}
- */
-@RunWith(BaseRobolectricTestRunner.class)
-@Config(manifest = Config.NONE)
-public class AnimationFrameTimeHistogramTest {
-    @Rule
-    public JniMocker mocker = new JniMocker();
-
-    @Mock
-    AnimationFrameTimeHistogram.Natives mNativeMock;
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-        mocker.mock(AnimationFrameTimeHistogramJni.TEST_HOOKS, mNativeMock);
-    }
-
-    @Test
-    public void testNatives() {
-        AnimationFrameTimeHistogram hist = new AnimationFrameTimeHistogram("histName");
-        hist.startRecording();
-        hist.endRecording();
-        verify(mNativeMock).saveHistogram(eq("histName"), any(long[].class), anyInt());
-    }
-}
-```
-
-If a native method is called without setting a mock in a unit test, an
-`UnsupportedOperationException` will be thrown.
-
-#### Special case: DFMs
-DFMs have their own generated `GEN_JNI`s, which are `<module_name>_GEN_JNI`. In
-order to get your DFM's JNI to use the `<module_name>` prefix, you must add your
-module name into the argument of the `@NativeMethods` annotation.
-
-So, for example, say your module was named `test_module`. You would annotate
-your `Natives` interface with `@NativeMethods("test_module")`, and this would
-result in `test_module_GEN_JNI`.
-
-
-### Testing for readiness: use `get()`
-
-JNI Generator automatically produces asserts that verify that the Natives interface can be safely
-called. These checks are compiled out of Release builds, making these an excellent way to determine
-whether your code is called safely.
-
-![Check Flow](doc/jni-check-flow.svg)
-
-It is not sufficient, however, to use `<Class>Jni.get()` to guarantee native is initialized - it is
-only a debugging tool to ensure that you're using native after native is loaded.
-
-If you expect your code to be called by an external caller, it's often helpful to know _ahead of
-time_ that the context is valid (ie. either native libraries are loaded or mocks are installed).
-In this case it is helpful to call `get()` method, that performs all the Debug checks listed
-above, but does not instantiate a new object for interfacing Native libraries.
-Note that the unused value returned by the `get()` method will be optimized away in release builds
-so there's no harm in ignoring it.
-
-#### Addressing `Jni.get()` exceptions.
-
-When you identify a scenario leading to an exception, relocate (or defer) the appropriate call to
-be made to a place where (or time when) you know the native libraries have been initialized (eg.
-`onStartWithNative`, `onNativeInitialized` etc).
-
-Please avoid calling `LibraryLoader.isInitialized()` / `LibraryLoader.isLoaded()` in new code.
-Using `LibraryLoader` calls makes unit-testing more difficult:
-* this call can not verify whether Mock object is used, making the use of mocks more complicated,
-* using `LibraryLoader.setLibrariesLoadedForNativeTests()` alters the state for subsequently
-executed tests, inaccurately reporting flakiness and failures of these victim tests.
-* Introducing `LibraryLoader.is*()` calls in your code immediately affects all callers, forcing
-the authors of the code up the call stack to override `LibraryLoader` internal state in order to be
-able to unit-test their code.
-
-However, if your code is going to be called both before and after native is initialized, you are
-forced to call `LibraryLoader.isInitialized()` to be able to differentiate. Calling
-`<Class>Jni.get()` only provides assertions, and will fail in debug builds if you call it when
-native isn't ready.
-
-### Calling Native -> Java
-
- * Methods annotated with `@CalledByNative` will have stubs generated for them.
-   * Inner class methods must provide the inner class name explicitly
-     (ex. `@CalledByNative("InnerClassName")`)
- * Just call the generated stubs defined in generated `.h` files.
- * For test-only methods you want to call from native, use
-   `@CalledByNativeForTesting` which will ensure that it is stripped in our
-   release binaries.
-
-### Java Objects and Garbage Collection
-
-All pointers to Java objects must be registered with JNI in order to prevent
-garbage collection from invalidating them.
-
-For Strings & Arrays - it's common practice to use the `//base/android/jni_*`
-helpers to convert them to `std::vectors` and `std::strings` as soon as
-possible.
-
-For other objects - use smart pointers to store them:
- * `ScopedJavaLocalRef<>` - When lifetime is the current function's scope.
- * `ScopedJavaGlobalRef<>` - When lifetime is longer than the current function's
-   scope.
- * `JavaObjectWeakGlobalRef<>` - Weak reference (do not prevent garbage
-   collection).
- * `JavaParamRef<>` - Use to accept any of the above as a parameter to a
-   function without creating a redundant registration.
-
-### Additional Guidelines / Advice
-
-Minimize the surface API between the two sides. Rather than calling multiple
-functions across boundaries, call only one (and then on the other side, call as
-many little functions as required).
-
-If a Java object "owns" a native one, store the pointer via
-`"long mNativeClassName"`. Ensure to eventually call a native method to delete
-the object. For example, have a `close()` that deletes the native object.
-
-The best way to pass "compound" types across in either direction is to
-create an inner class with PODs and a factory function. If possible, mark
-all the fields as "final".
-
-## Build Rules
-
- * `generate_jni` - Generates a header file with stubs for given `.java` files
- * `generate_jar_jni` - Generates a header file with stubs for a given `.jar`
-   file
- * `generate_jni_registration` - Generates a header file with functions to
-   register native-side JNI methods.
-
-Refer to [//build/config/android/rules.gni](https://cs.chromium.org/chromium/src/build/config/android/rules.gni)
-for more about the GN templates.
-
-## Changing `jni_generator`
-
- * Python tests live in `integration_tests.py`
- * A working demo app exists as `//base/android/jni_generator:sample_jni_apk`
+Moved to [//third_party/jni_zero](/third_party/jni_zero/README.md).
diff --git a/base/android/jni_generator/doc/jni-check-flow.puml b/base/android/jni_generator/doc/jni-check-flow.puml
deleted file mode 100755
index 861ec21..0000000
--- a/base/android/jni_generator/doc/jni-check-flow.puml
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/env plantuml -tsvg
-
-@startuml
-scale 1
-
-skinparam shadowing false
-skinparam monochrome true
-
-if (\nTesting enabled?\n) then (Yes)
-  -[#black,dotted]->
-
-partition "//Debug builds only//" #ddd {
-  if (\nMocks installed?\n) then (Yes)
-    :**Return mocks**;
-    detach
-  elseif (\nMocks required?\n) then (Yes)
-    #ffa0a0:throw;
-    detach
-  elseif (\nNative libraries loaded?\n) then (No)
-    #ffa0a0:throw;
-    detach
-  endif
-}
-
-endif
-
-:**Return live instance**;
-
-@enduml
diff --git a/base/android/jni_generator/doc/jni-check-flow.svg b/base/android/jni_generator/doc/jni-check-flow.svg
deleted file mode 100644
index 1ac86e6..0000000
--- a/base/android/jni_generator/doc/jni-check-flow.svg
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" contentScriptType="application/ecmascript" contentStyleType="text/css" height="337px" preserveAspectRatio="none" style="width:500px;height:337px;" version="1.1" viewBox="0 0 500 337" width="500px" zoomAndPan="magnify"><defs/><g><rect fill="#DDDDDD" height="156.4844" style="stroke: #000000; stroke-width: 2.0;" width="454" x="10" y="72.0205"/><path d="M144,73.0205 L144,81.3174 L134,91.3174 L10,91.3174 " fill="none" style="stroke: #000000; stroke-width: 2.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" font-style="italic" lengthAdjust="spacingAndGlyphs" textLength="124" x="13" y="86.0156">Debug builds only</text><polygon fill="#F8F8F8" points="32,111.3174,125,111.3174,137,130.5244,125,149.7314,32,149.7314,20,130.5244,32,111.3174" style="stroke: #383838; stroke-width: 1.5;"/><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="21" x="82.5" y="159.9419">Yes</text><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="0" x="36" y="121.5278"/><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="93" x="32" y="134.3325">Mocks installed?</text><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="0" x="36" y="147.1372"/><rect fill="#F8F8F8" height="33.9688" rx="12.5" ry="12.5" style="stroke: #383838; stroke-width: 1.5;" width="116" x="20.5" y="182.5361"/><text fill="#000000" font-family="sans-serif" font-size="12" font-weight="bold" lengthAdjust="spacingAndGlyphs" textLength="96" x="30.5" y="203.6748">Return mocks</text><polygon fill="#F8F8F8" points="169,111.3174,262,111.3174,274,130.5244,262,149.7314,169,149.7314,157,130.5244,169,111.3174" style="stroke: #383838; stroke-width: 1.5;"/><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="21" x="219.5" y="159.9419">Yes</text><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="0" x="173" y="121.5278"/><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="93" x="169" y="134.3325">Mocks required?</text><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="0" x="173" y="147.1372"/><rect fill="#BCBCBC" height="33.9688" rx="12.5" ry="12.5" style="stroke: #383838; stroke-width: 1.5;" width="55" x="188" y="182.5361"/><text fill="#000000" font-family="sans-serif" font-size="12" lengthAdjust="spacingAndGlyphs" textLength="35" x="198" y="203.6748">throw</text><polygon fill="#F8F8F8" points="306,111.3174,440,111.3174,452,130.5244,440,149.7314,306,149.7314,294,130.5244,306,111.3174" style="stroke: #383838; stroke-width: 1.5;"/><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="15" x="377" y="159.9419">No</text><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="0" x="310" y="121.5278"/><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="134" x="306" y="134.3325">Native libraries loaded?</text><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="0" x="310" y="147.1372"/><rect fill="#BCBCBC" height="33.9688" rx="12.5" ry="12.5" style="stroke: #383838; stroke-width: 1.5;" width="55" x="345.5" y="182.5361"/><text fill="#000000" font-family="sans-serif" font-size="12" lengthAdjust="spacingAndGlyphs" textLength="35" x="355.5" y="203.6748">throw</text><polygon fill="#F8F8F8" points="212.5,11.0942,309.5,11.0942,321.5,30.3013,309.5,49.5083,212.5,49.5083,200.5,30.3013,212.5,11.0942" style="stroke: #383838; stroke-width: 1.5;"/><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="21" x="265" y="59.7188">Yes</text><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="0" x="216.5" y="21.3047"/><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="97" x="212.5" y="34.1094">Testing enabled?</text><text fill="#000000" font-family="sans-serif" font-size="11" lengthAdjust="spacingAndGlyphs" textLength="0" x="216.5" y="46.9141"/><polygon fill="#F8F8F8" points="261,248.5049,273,260.5049,261,272.5049,249,260.5049,261,248.5049" style="stroke: #383838; stroke-width: 1.5;"/><rect fill="#F8F8F8" height="33.9688" rx="12.5" ry="12.5" style="stroke: #383838; stroke-width: 1.5;" width="161" x="180.5" y="292.5049"/><text fill="#000000" font-family="sans-serif" font-size="12" font-weight="bold" lengthAdjust="spacingAndGlyphs" textLength="141" x="190.5" y="313.6436">Return live instance</text><line style="stroke: #383838; stroke-width: 1.5;" x1="78.5" x2="78.5" y1="149.7314" y2="182.5361"/><polygon fill="#383838" points="74.5,172.5361,78.5,182.5361,82.5,172.5361,78.5,176.5361" style="stroke: #383838; stroke-width: 1.0;"/><line style="stroke: #383838; stroke-width: 1.5;" x1="215.5" x2="215.5" y1="149.7314" y2="182.5361"/><polygon fill="#383838" points="211.5,172.5361,215.5,182.5361,219.5,172.5361,215.5,176.5361" style="stroke: #383838; stroke-width: 1.0;"/><line style="stroke: #383838; stroke-width: 1.5;" x1="373" x2="373" y1="149.7314" y2="182.5361"/><polygon fill="#383838" points="369,172.5361,373,182.5361,377,172.5361,373,176.5361" style="stroke: #383838; stroke-width: 1.0;"/><line style="stroke: #383838; stroke-width: 1.5;" x1="137" x2="157" y1="130.5244" y2="130.5244"/><polygon fill="#383838" points="147,126.5244,157,130.5244,147,134.5244,151,130.5244" style="stroke: #383838; stroke-width: 1.0;"/><line style="stroke: #383838; stroke-width: 1.5;" x1="274" x2="294" y1="130.5244" y2="130.5244"/><polygon fill="#383838" points="284,126.5244,294,130.5244,284,134.5244,288,130.5244" style="stroke: #383838; stroke-width: 1.0;"/><line style="stroke: #000000; stroke-width: 1.5; stroke-dasharray: 1.0,3.0;" x1="261" x2="261" y1="49.5083" y2="96.3174"/><line style="stroke: #000000; stroke-width: 1.5; stroke-dasharray: 1.0,3.0;" x1="261" x2="78.5" y1="96.3174" y2="96.3174"/><line style="stroke: #000000; stroke-width: 1.5; stroke-dasharray: 1.0,3.0;" x1="78.5" x2="78.5" y1="96.3174" y2="111.3174"/><polygon fill="#000000" points="74.5,101.3174,78.5,111.3174,82.5,101.3174,78.5,105.3174" style="stroke: #000000; stroke-width: 1.0;"/><line style="stroke: #383838; stroke-width: 1.5;" x1="452" x2="457" y1="130.5244" y2="130.5244"/><line style="stroke: #383838; stroke-width: 1.5;" x1="457" x2="457" y1="130.5244" y2="221.5049"/><line style="stroke: #383838; stroke-width: 1.5;" x1="457" x2="261" y1="221.5049" y2="221.5049"/><line style="stroke: #383838; stroke-width: 1.5;" x1="261" x2="261" y1="221.5049" y2="248.5049"/><polygon fill="#383838" points="257,238.5049,261,248.5049,265,238.5049,261,242.5049" style="stroke: #383838; stroke-width: 1.0;"/><line style="stroke: #383838; stroke-width: 1.5;" x1="321.5" x2="474" y1="30.3013" y2="30.3013"/><polygon fill="#383838" points="470,167.1592,474,177.1592,478,167.1592,474,171.1592" style="stroke: #383838; stroke-width: 1.5;"/><line style="stroke: #383838; stroke-width: 1.5;" x1="474" x2="474" y1="30.3013" y2="260.5049"/><line style="stroke: #383838; stroke-width: 1.5;" x1="474" x2="273" y1="260.5049" y2="260.5049"/><polygon fill="#383838" points="283,256.5049,273,260.5049,283,264.5049,279,260.5049" style="stroke: #383838; stroke-width: 1.0;"/><line style="stroke: #383838; stroke-width: 1.5;" x1="261" x2="261" y1="272.5049" y2="292.5049"/><polygon fill="#383838" points="257,282.5049,261,292.5049,265,282.5049,261,286.5049" style="stroke: #383838; stroke-width: 1.0;"/><!--MD5=[cbe3f124b10974ef39a80a87295eedae]
-@startuml

-scale 1

-

-skinparam shadowing false

-skinparam monochrome true

-

-if (\nTesting enabled?\n) then (Yes)

-  -[#black,dotted]->

-

-partition "//Debug builds only//" #ddd {

-  if (\nMocks installed?\n) then (Yes)

-    :**Return mocks**;

-    detach

-  elseif (\nMocks required?\n) then (Yes)

-    #ffa0a0:throw;

-    detach

-  elseif (\nNative libraries loaded?\n) then (No)

-    #ffa0a0:throw;

-    detach

-  endif

-}

-

-endif

-

-:**Return live instance**;

-

-@enduml

-
-PlantUML version 1.2020.02(Sun Mar 01 02:22:07 PST 2020)
-(GPL source distribution)
-Java Runtime: OpenJDK Runtime Environment
-JVM: OpenJDK 64-Bit Server VM
-Java Version: 11.0.16+8-post-Debian-1
-Operating System: Linux
-Default Encoding: UTF-8
-Language: en
-Country: US
---></g></svg>
\ No newline at end of file
diff --git a/base/android/jni_generator/jni_generator_helper.h b/base/android/jni_generator/jni_generator_helper.h
deleted file mode 100644
index 59bdabb..0000000
--- a/base/android/jni_generator/jni_generator_helper.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ANDROID_JNI_GENERATOR_JNI_GENERATOR_HELPER_H_
-#define BASE_ANDROID_JNI_GENERATOR_JNI_GENERATOR_HELPER_H_
-
-#include <jni.h>
-
-#include "base/android/jni_android.h"
-#include "base/android/jni_int_wrapper.h"
-#include "base/android/scoped_java_ref.h"
-#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/memory/raw_ptr.h"
-#include "build/build_config.h"
-
-// Project-specific macros used by the header files generated by
-// jni_generator.py. Different projects can then specify their own
-// implementation for this file.
-#define CHECK_NATIVE_PTR(env, jcaller, native_ptr, method_name, ...) \
-  DCHECK(native_ptr) << method_name;
-
-#define CHECK_CLAZZ(env, jcaller, clazz, ...) DCHECK(clazz);
-
-#if defined(ARCH_CPU_X86)
-// Dalvik JIT generated code doesn't guarantee 16-byte stack alignment on
-// x86 - use force_align_arg_pointer to realign the stack at the JNI
-// boundary. crbug.com/655248
-#define JNI_GENERATOR_EXPORT \
-  extern "C" __attribute__((visibility("default"), force_align_arg_pointer))
-#else
-#define JNI_GENERATOR_EXPORT extern "C" __attribute__((visibility("default")))
-#endif
-
-// Used to export JNI registration functions.
-#if defined(COMPONENT_BUILD)
-#define JNI_REGISTRATION_EXPORT __attribute__((visibility("default")))
-#else
-#define JNI_REGISTRATION_EXPORT
-#endif
-
-namespace jni_generator {
-
-inline void HandleRegistrationError(JNIEnv* env,
-                                    jclass clazz,
-                                    const char* filename) {
-  LOG(ERROR) << "RegisterNatives failed in " << filename;
-}
-
-inline void CheckException(JNIEnv* env) {
-  base::android::CheckException(env);
-}
-
-// A 32 bit number could be an address on stack. Random 64 bit marker on the
-// stack is much less likely to be present on stack.
-constexpr uint64_t kJniStackMarkerValue = 0xbdbdef1bebcade1b;
-
-// Context about the JNI call with exception checked to be stored in stack.
-struct BASE_EXPORT JniJavaCallContextUnchecked {
-  ALWAYS_INLINE JniJavaCallContextUnchecked() {
-// TODO(ssid): Implement for other architectures.
-#if defined(__arm__) || defined(__aarch64__)
-    // This assumes that this method does not increment the stack pointer.
-    asm volatile("mov %0, sp" : "=r"(sp));
-#else
-    sp = 0;
-#endif
-  }
-
-  // Force no inline to reduce code size.
-  template <base::android::MethodID::Type type>
-  NOINLINE void Init(JNIEnv* env,
-                     jclass clazz,
-                     const char* method_name,
-                     const char* jni_signature,
-                     std::atomic<jmethodID>* atomic_method_id) {
-    env1 = env;
-
-    // Make sure compiler doesn't optimize out the assignment.
-    memcpy(&marker, &kJniStackMarkerValue, sizeof(kJniStackMarkerValue));
-    // Gets PC of the calling function.
-    pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
-
-    method_id = base::android::MethodID::LazyGet<type>(
-        env, clazz, method_name, jni_signature, atomic_method_id);
-  }
-
-  NOINLINE ~JniJavaCallContextUnchecked() {
-    // Reset so that spurious marker finds are avoided.
-    memset(&marker, 0, sizeof(marker));
-  }
-
-  uint64_t marker;
-  uintptr_t sp;
-  uintptr_t pc;
-
-  raw_ptr<JNIEnv> env1;
-  jmethodID method_id;
-};
-
-// Context about the JNI call with exception unchecked to be stored in stack.
-struct BASE_EXPORT JniJavaCallContextChecked {
-  // Force no inline to reduce code size.
-  template <base::android::MethodID::Type type>
-  NOINLINE void Init(JNIEnv* env,
-                     jclass clazz,
-                     const char* method_name,
-                     const char* jni_signature,
-                     std::atomic<jmethodID>* atomic_method_id) {
-    base.Init<type>(env, clazz, method_name, jni_signature, atomic_method_id);
-    // Reset |pc| to correct caller.
-    base.pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
-  }
-
-  NOINLINE ~JniJavaCallContextChecked() {
-    jni_generator::CheckException(base.env1);
-  }
-
-  JniJavaCallContextUnchecked base;
-};
-
-static_assert(sizeof(JniJavaCallContextChecked) ==
-                  sizeof(JniJavaCallContextUnchecked),
-              "Stack unwinder cannot work with structs of different sizes.");
-
-}  // namespace jni_generator
-
-#endif  // BASE_ANDROID_JNI_GENERATOR_JNI_GENERATOR_HELPER_H_
diff --git a/base/android/jni_generator/sample_entry_point.cc b/base/android/jni_generator/sample_entry_point.cc
deleted file mode 100644
index cc2d50b..0000000
--- a/base/android/jni_generator/sample_entry_point.cc
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/android/jni_android.h"
-#include "base/android/jni_generator/jni_registration_generated.h"
-#include "base/android/jni_utils.h"
-
-// This is called by the VM when the shared library is first loaded.
-JNI_EXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved) {
-  // By default, all JNI methods are registered. However, since render processes
-  // don't need very much Java code, we enable selective JNI registration on the
-  // Java side and only register a subset of JNI methods.
-  base::android::InitVM(vm);
-  JNIEnv* env = base::android::AttachCurrentThread();
-
-  if (!RegisterNatives(env)) {
-    return -1;
-  }
-  return JNI_VERSION_1_4;
-}
diff --git a/base/android/jni_generator/sample_for_tests.cc b/base/android/jni_generator/sample_for_tests.cc
deleted file mode 100644
index aeedf9f..0000000
--- a/base/android/jni_generator/sample_for_tests.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <iostream>
-
-#include "base/android/jni_generator/sample_for_tests.h"
-
-#include "base/android/jni_android.h"
-#include "base/android/jni_string.h"
-#include "base/android/scoped_java_ref.h"
-// Generated file for JNI bindings from C++ to Java @CalledByNative methods.
-// Only to be included in one .cc file.
-// Name is based on the java file name: *.java -> jni/*_jni.h
-#include "base/android/jni_generator/jni_sample_header/SampleForAnnotationProcessor_jni.h"  // Generated by JNI.
-#include "base/android/jni_generator/jni_sample_header/SampleForTests_jni.h"  // Generated by JNI.
-
-using base::android::AttachCurrentThread;
-using base::android::ConvertJavaStringToUTF8;
-using base::android::ConvertUTF8ToJavaString;
-using base::android::ScopedJavaLocalRef;
-
-namespace base {
-namespace android {
-
-jdouble CPPClass::InnerClass::MethodOtherP0(
-    JNIEnv* env,
-    const JavaParamRef<jobject>& caller) {
-  return 0.0;
-}
-
-CPPClass::CPPClass() {
-}
-
-CPPClass::~CPPClass() {
-}
-
-// static
-void CPPClass::Destroy(JNIEnv* env, const JavaParamRef<jobject>& caller) {
-  delete this;
-}
-
-jint CPPClass::Method(JNIEnv* env, const JavaParamRef<jobject>& caller) {
-  return 0;
-}
-
-void CPPClass::AddStructB(JNIEnv* env,
-                          const JavaParamRef<jobject>& caller,
-                          const JavaParamRef<jobject>& structb) {
-  long key = Java_InnerStructB_getKey(env, structb);
-  std::string value =
-      ConvertJavaStringToUTF8(env, Java_InnerStructB_getValue(env, structb));
-  map_[key] = value;
-}
-
-void CPPClass::IterateAndDoSomethingWithStructB(
-    JNIEnv* env,
-    const JavaParamRef<jobject>& caller) {
-  // Iterate over the elements and do something with them.
-  for (std::map<long, std::string>::const_iterator it = map_.begin();
-       it != map_.end(); ++it) {
-    long key = it->first;
-    std::string value = it->second;
-    std::cout << key << value;
-  }
-  map_.clear();
-}
-
-ScopedJavaLocalRef<jstring> CPPClass::ReturnAString(
-    JNIEnv* env,
-    const JavaParamRef<jobject>& caller) {
-  return ConvertUTF8ToJavaString(env, "test");
-}
-
-// Static free functions declared and called directly from java.
-static jlong JNI_SampleForTests_Init(JNIEnv* env,
-                                     const JavaParamRef<jobject>& caller,
-                                     const JavaParamRef<jstring>& param) {
-  return 0;
-}
-
-static jdouble JNI_SampleForTests_GetDoubleFunction(
-    JNIEnv*,
-    const JavaParamRef<jobject>&) {
-  return 0;
-}
-
-static jfloat JNI_SampleForTests_GetFloatFunction(JNIEnv*) {
-  return 0;
-}
-
-static void JNI_SampleForTests_SetNonPODDatatype(JNIEnv*,
-                                                 const JavaParamRef<jobject>&,
-                                                 const JavaParamRef<jobject>&) {
-}
-
-static ScopedJavaLocalRef<jobject> JNI_SampleForTests_GetNonPODDatatype(
-    JNIEnv*,
-    const JavaParamRef<jobject>&) {
-  return ScopedJavaLocalRef<jobject>();
-}
-
-static ScopedJavaLocalRef<jstring> JNI_SampleForTests_GetNonPODDatatype(
-    JNIEnv*,
-    const JavaParamRef<jstring>&) {
-  return ScopedJavaLocalRef<jstring>();
-}
-
-static ScopedJavaLocalRef<jobjectArray> JNI_SampleForTests_GetNonPODDatatype(
-    JNIEnv*,
-    const JavaParamRef<jobjectArray>&) {
-  return ScopedJavaLocalRef<jobjectArray>();
-}
-
-static base::android::ScopedJavaLocalRef<jclass> JNI_SampleForTests_GetClass(
-    JNIEnv* env,
-    const base::android::JavaParamRef<jclass>& arg0) {
-  return ScopedJavaLocalRef<jclass>();
-}
-
-static base::android::ScopedJavaLocalRef<jthrowable>
-JNI_SampleForTests_GetThrowable(
-    JNIEnv* env,
-    const base::android::JavaParamRef<jthrowable>& arg0) {
-  return ScopedJavaLocalRef<jthrowable>();
-}
-
-} // namespace android
-} // namespace base
-
-// Proxy natives.
-static void JNI_SampleForAnnotationProcessor_Foo(JNIEnv* env) {}
-
-static base::android::ScopedJavaLocalRef<jobject>
-JNI_SampleForAnnotationProcessor_Bar(
-    JNIEnv* env,
-    const base::android::JavaParamRef<jobject>& sample) {
-  return JNI_SampleForTests_GetNonPODDatatype(env, sample);
-}
-
-static base::android::ScopedJavaLocalRef<jstring>
-JNI_SampleForAnnotationProcessor_RevString(
-    JNIEnv* env,
-    const base::android::JavaParamRef<jstring>& stringToReverse) {
-  return JNI_SampleForTests_GetNonPODDatatype(env, stringToReverse);
-}
-
-static base::android::ScopedJavaLocalRef<jobjectArray>
-JNI_SampleForAnnotationProcessor_SendToNative(
-    JNIEnv* env,
-    const base::android::JavaParamRef<jobjectArray>& strs) {
-  return JNI_SampleForTests_GetNonPODDatatype(env, strs);
-}
-
-static base::android::ScopedJavaLocalRef<jobjectArray>
-JNI_SampleForAnnotationProcessor_SendSamplesToNative(
-    JNIEnv* env,
-    const base::android::JavaParamRef<jobjectArray>& strs) {
-  return JNI_SampleForTests_GetNonPODDatatype(env, strs);
-}
-
-static jboolean JNI_SampleForAnnotationProcessor_HasPhalange(JNIEnv* env) {
-  return jboolean(true);
-}
-
-static base::android::ScopedJavaLocalRef<jintArray>
-JNI_SampleForAnnotationProcessor_TestAllPrimitives(
-    JNIEnv* env,
-    jint zint,
-    const base::android::JavaParamRef<jintArray>& ints,
-    jlong zlong,
-    const base::android::JavaParamRef<jlongArray>& longs,
-    jshort zshort,
-    const base::android::JavaParamRef<jshortArray>& shorts,
-    jchar zchar,
-    const base::android::JavaParamRef<jcharArray>& chars,
-    jbyte zbyte,
-    const base::android::JavaParamRef<jbyteArray>& bytes,
-    jdouble zdouble,
-    const base::android::JavaParamRef<jdoubleArray>& doubles,
-    jfloat zfloat,
-    const base::android::JavaParamRef<jfloatArray>& floats,
-    jboolean zbool,
-    const base::android::JavaParamRef<jbooleanArray>& bools) {
-  return ScopedJavaLocalRef<jintArray>(ints);
-}
-
-static void JNI_SampleForAnnotationProcessor_TestSpecialTypes(
-    JNIEnv* env,
-    const base::android::JavaParamRef<jclass>& clazz,
-    const base::android::JavaParamRef<jobjectArray>& classes,
-    const base::android::JavaParamRef<jthrowable>& throwable,
-    const base::android::JavaParamRef<jobjectArray>& throwables,
-    const base::android::JavaParamRef<jstring>& string,
-    const base::android::JavaParamRef<jobjectArray>& strings,
-    const base::android::JavaParamRef<jobject>& tStruct,
-    const base::android::JavaParamRef<jobjectArray>& structs,
-    const base::android::JavaParamRef<jobject>& obj,
-    const base::android::JavaParamRef<jobjectArray>& objects) {}
-
-static base::android::ScopedJavaLocalRef<jthrowable>
-JNI_SampleForAnnotationProcessor_ReturnThrowable(JNIEnv* env) {
-  return ScopedJavaLocalRef<jthrowable>();
-}
-
-static base::android::ScopedJavaLocalRef<jobjectArray>
-JNI_SampleForAnnotationProcessor_ReturnThrowables(JNIEnv* env) {
-  return ScopedJavaLocalRef<jobjectArray>();
-}
-
-static base::android::ScopedJavaLocalRef<jclass>
-JNI_SampleForAnnotationProcessor_ReturnClass(JNIEnv* env) {
-  return ScopedJavaLocalRef<jclass>();
-}
-
-static base::android::ScopedJavaLocalRef<jobjectArray>
-JNI_SampleForAnnotationProcessor_ReturnClasses(JNIEnv* env) {
-  return ScopedJavaLocalRef<jobjectArray>();
-}
-
-static base::android::ScopedJavaLocalRef<jstring>
-JNI_SampleForAnnotationProcessor_ReturnString(JNIEnv* env) {
-  return ScopedJavaLocalRef<jstring>();
-}
-
-static base::android::ScopedJavaLocalRef<jobjectArray>
-JNI_SampleForAnnotationProcessor_ReturnStrings(JNIEnv* env) {
-  return ScopedJavaLocalRef<jobjectArray>();
-}
-
-static base::android::ScopedJavaLocalRef<jobject>
-JNI_SampleForAnnotationProcessor_ReturnStruct(JNIEnv* env) {
-  return ScopedJavaLocalRef<jobject>();
-}
-
-static base::android::ScopedJavaLocalRef<jobjectArray>
-JNI_SampleForAnnotationProcessor_ReturnStructs(JNIEnv* env) {
-  return ScopedJavaLocalRef<jobjectArray>();
-}
-
-static base::android::ScopedJavaLocalRef<jobject>
-JNI_SampleForAnnotationProcessor_ReturnObject(JNIEnv* env) {
-  return ScopedJavaLocalRef<jobject>();
-}
-
-static base::android::ScopedJavaLocalRef<jobjectArray>
-JNI_SampleForAnnotationProcessor_ReturnObjects(JNIEnv* env) {
-  return ScopedJavaLocalRef<jobjectArray>();
-}
-
-int main() {
-  // On a regular application, you'd call AttachCurrentThread(). This sample is
-  // not yet linking with all the libraries.
-  JNIEnv* env = /* AttachCurrentThread() */ NULL;
-
-  // This is how you call a java static method from C++.
-  bool foo = base::android::Java_SampleForTests_staticJavaMethod(env);
-
-  // This is how you call a java method from C++. Note that you must have
-  // obtained the jobject somehow.
-  ScopedJavaLocalRef<jobject> my_java_object;
-  int bar = base::android::Java_SampleForTests_javaMethod(
-      env, my_java_object, 1, 2);
-
-  base::android::Java_SampleForTests_methodWithGenericParams(
-      env, my_java_object, nullptr, nullptr);
-
-  // This is how you call a java constructor method from C++.
-  ScopedJavaLocalRef<jobject> my_created_object =
-      base::android::Java_SampleForTests_Constructor(env, 1, 2);
-
-  std::cout << foo << bar;
-
-  for (int i = 0; i < 10; ++i) {
-    // Creates a "struct" that will then be used by the java side.
-    ScopedJavaLocalRef<jobject> struct_a =
-        base::android::Java_InnerStructA_create(
-            env, 0, 1, ConvertUTF8ToJavaString(env, "test"));
-    base::android::Java_SampleForTests_addStructA(env, my_java_object,
-                                                  struct_a);
-  }
-  base::android::Java_SampleForTests_iterateAndDoSomething(env, my_java_object);
-  base::android::Java_SampleForTests_packagePrivateJavaMethod(env,
-                                                              my_java_object);
-  base::android::Java_SampleForTests_methodThatThrowsException(env,
-                                                               my_java_object);
-  base::android::Java_SampleForTests_javaMethodWithAnnotatedParam(
-      env, my_java_object, 42, 13, -1, 99);
-
-  base::android::Java_SampleForTests_getInnerInterface(env);
-  base::android::Java_SampleForTests_getInnerEnum(env);
-  base::android::Java_SampleForTests_getInnerEnum(env, 0);
-
-  base::android::ScopedJavaLocalRef<jthrowable> throwable;
-  throwable = base::android::Java_SampleForTests_getThrowable(
-      env, my_java_object, throwable);
-
-  base::android::ScopedJavaLocalRef<jclass> clazz;
-  clazz =
-      base::android::Java_SampleForTests_getClass(env, my_java_object, clazz);
-
-  return 0;
-}
diff --git a/base/android/jni_generator/sample_for_tests.h b/base/android/jni_generator/sample_for_tests.h
deleted file mode 100644
index 2006332..0000000
--- a/base/android/jni_generator/sample_for_tests.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ANDROID_JNI_GENERATOR_SAMPLE_FOR_TESTS_H_
-#define BASE_ANDROID_JNI_GENERATOR_SAMPLE_FOR_TESTS_H_
-
-#include <jni.h>
-#include <map>
-#include <string>
-
-#include "base/android/jni_android.h"
-
-namespace base {
-namespace android {
-
-// This file is used to:
-// - document the best practices and guidelines on JNI usage.
-// - ensure sample_for_tests_jni.h compiles and the functions declared in it
-// as expected.
-//
-// Methods are called directly from Java. More documentation in
-// SampleForTests.java. See BUILD.gn for the build rules necessary for JNI
-// to be used in an APK.
-//
-// For C++ to access Java methods:
-// - GN Build must be configured to generate bindings:
-//  # Add import at top of file:
-//  if (is_android) {
-//    import("//build/config/android/rules.gni")  # For generate_jni().
-//  }
-//  # ...
-//  # An example target that will rely on JNI:
-//  component("foo") {
-//    # ... normal sources, defines, deps.
-//    #     For each jni generated .java -> .h header file in jni_headers
-//    #     target there will be a single .cc file here that includes it.
-//    #
-//    # Add a dep for JNI:
-//    if (is_android) {
-//      deps += [ ":foo_jni" ]
-//    }
-//  }
-//  # ...
-//  # Create target for JNI:
-//  if (is_android) {
-//    generate_jni("jni_headers") {
-//      sources = [
-//        "java/src/org/chromium/example/jni_generator/SampleForTests.java",
-//      ]
-//    }
-//    android_library("java") {
-//      sources = [
-//        "java/src/org/chromium/example/jni_generator/SampleForTests.java",
-//        "java/src/org/chromium/example/jni_generator/NonJniFile.java",
-//      ]
-//    }
-//  }
-// The build rules above are generally that that's needed when adding new
-// JNI methods/files. For a full GN example, see
-// base/android/jni_generator/BUILD.gn
-//
-// For C++ methods to be exposed to Java:
-// - The Java class must be part of an android_apk target that depends on
-//   a generate_jni_registration target. This generate_jni_registration target
-//   automatically generates all necessary registration functions. The
-//   generated header file exposes RegisterNatives() which registers all
-//   methods.
-//
-class CPPClass {
- public:
-  CPPClass();
-
-  CPPClass(const CPPClass&) = delete;
-  CPPClass& operator=(const CPPClass&) = delete;
-
-  ~CPPClass();
-
-  // Java @CalledByNative methods implicitly available to C++ via the _jni.h
-  // file included in the .cc file.
-
-  class InnerClass {
-   public:
-    jdouble MethodOtherP0(JNIEnv* env,
-                          const base::android::JavaParamRef<jobject>& caller);
-  };
-
-  void Destroy(JNIEnv* env, const base::android::JavaParamRef<jobject>& caller);
-
-  jint Method(JNIEnv* env, const base::android::JavaParamRef<jobject>& caller);
-
-  void AddStructB(JNIEnv* env,
-                  const base::android::JavaParamRef<jobject>& caller,
-                  const base::android::JavaParamRef<jobject>& structb);
-
-  void IterateAndDoSomethingWithStructB(
-      JNIEnv* env,
-      const base::android::JavaParamRef<jobject>& caller);
-
-  base::android::ScopedJavaLocalRef<jstring> ReturnAString(
-      JNIEnv* env,
-      const base::android::JavaParamRef<jobject>& caller);
-
- private:
-  std::map<long, std::string> map_;
-};
-
-}  // namespace android
-}  // namespace base
-
-#endif  // BASE_ANDROID_JNI_GENERATOR_SAMPLE_FOR_TESTS_H_
diff --git a/base/android/jni_int_wrapper.h b/base/android/jni_int_wrapper.h
deleted file mode 100644
index 5c0dcf4..0000000
--- a/base/android/jni_int_wrapper.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ANDROID_JNI_INT_WRAPPER_H_
-#define BASE_ANDROID_JNI_INT_WRAPPER_H_
-
-// Wrapper used to receive int when calling Java from native.
-// The wrapper disallows automatic conversion of long to int.
-// This is to avoid a common anti-pattern where a Java int is used
-// to receive a native pointer. Please use a Java long to receive
-// native pointers, so that the code works on both 32-bit and 64-bit
-// platforms. Note the wrapper allows other lossy conversions into
-// jint that could be consider anti-patterns, such as from size_t.
-
-// Checking is only done in debugging builds.
-
-#ifdef NDEBUG
-
-typedef jint JniIntWrapper;
-
-// This inline is sufficiently trivial that it does not change the
-// final code generated by g++.
-inline jint as_jint(JniIntWrapper wrapper) {
-  return wrapper;
-}
-
-#else
-
-class JniIntWrapper {
- public:
-  JniIntWrapper() : i_(0) {}
-  JniIntWrapper(int i) : i_(i) {}
-  JniIntWrapper(const JniIntWrapper& ji) : i_(ji.i_) {}
-  template <class T> JniIntWrapper(const T& t) : i_(t) {}
-  jint as_jint() const { return i_; }
- private:
-  // If you get an "is private" error at the line below it is because you used
-  // an implicit conversion to convert a long to an int when calling Java.
-  // We disallow this, as a common anti-pattern allows converting a native
-  // pointer (intptr_t) to a Java int. Please use a Java long to represent
-  // a native pointer. If you want a lossy conversion, please use an
-  // explicit conversion in your C++ code. Note an error is only seen when
-  // compiling on a 64-bit platform, as intptr_t is indistinguishable from
-  // int on 32-bit platforms.
-  JniIntWrapper(long);
-  jint i_;
-};
-
-inline jint as_jint(const JniIntWrapper& wrapper) {
-  return wrapper.as_jint();
-}
-
-#endif  // NDEBUG
-
-#endif  // BASE_ANDROID_JNI_INT_WRAPPER_H_
diff --git a/base/android/junit/src/org/chromium/base/ApplicationStatusTest.java b/base/android/junit/src/org/chromium/base/ApplicationStatusTest.java
index 71d1cbb..d09c7ff 100644
--- a/base/android/junit/src/org/chromium/base/ApplicationStatusTest.java
+++ b/base/android/junit/src/org/chromium/base/ApplicationStatusTest.java
@@ -39,7 +39,9 @@
 
 /** Unit tests for {@link ApplicationStatus}. */
 @RunWith(BaseRobolectricTestRunner.class)
-@Config(manifest = Config.NONE, shadows = {ApplicationStatusTest.TrackingShadowActivity.class})
+@Config(
+        manifest = Config.NONE,
+        shadows = {ApplicationStatusTest.TrackingShadowActivity.class})
 public class ApplicationStatusTest {
     private static class WindowCallbackWrapper implements Window.Callback {
         final Window.Callback mWrapped;
@@ -243,20 +245,24 @@
 
     @Test
     public void testSingleWrappedCallback() {
-        Assert.assertTrue(ApplicationStatus.reachesWindowCallback(
-                new WindowCallbackWrapper(createWindowCallbackProxy())));
+        Assert.assertTrue(
+                ApplicationStatus.reachesWindowCallback(
+                        new WindowCallbackWrapper(createWindowCallbackProxy())));
     }
 
     @Test
     public void testDoubleWrappedCallback() {
-        Assert.assertTrue(ApplicationStatus.reachesWindowCallback(
-                new WindowCallbackWrapper(new WindowCallbackWrapper(createWindowCallbackProxy()))));
+        Assert.assertTrue(
+                ApplicationStatus.reachesWindowCallback(
+                        new WindowCallbackWrapper(
+                                new WindowCallbackWrapper(createWindowCallbackProxy()))));
     }
 
     @Test
     public void testSubclassWrappedCallback() {
-        Assert.assertTrue(ApplicationStatus.reachesWindowCallback(
-                new SubclassedCallbackWrapper(createWindowCallbackProxy())));
+        Assert.assertTrue(
+                ApplicationStatus.reachesWindowCallback(
+                        new SubclassedCallbackWrapper(createWindowCallbackProxy())));
     }
 
     @Test
diff --git a/base/android/junit/src/org/chromium/base/BuildInfoTest.java b/base/android/junit/src/org/chromium/base/BuildInfoTest.java
index be8b285..0576e8f 100644
--- a/base/android/junit/src/org/chromium/base/BuildInfoTest.java
+++ b/base/android/junit/src/org/chromium/base/BuildInfoTest.java
@@ -1,4 +1,4 @@
-// Copyright 2022 The Chromium Authors. All rights reserved.
+// Copyright 2022 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/base/android/junit/src/org/chromium/base/CallbackControllerTest.java b/base/android/junit/src/org/chromium/base/CallbackControllerTest.java
index b7dea52..55dfce9 100644
--- a/base/android/junit/src/org/chromium/base/CallbackControllerTest.java
+++ b/base/android/junit/src/org/chromium/base/CallbackControllerTest.java
@@ -15,17 +15,14 @@
 
 import org.chromium.base.test.BaseRobolectricTestRunner;
 
-/**
- * Test class for {@link CallbackController}, which also describes typical usage.
- */
+/** Test class for {@link CallbackController}, which also describes typical usage. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class CallbackControllerTest {
-    /**
-     * Callbacks in this test act on {@code CallbackTarget}.
-     */
+    /** Callbacks in this test act on {@code CallbackTarget}. */
     private static class CallbackTarget {
         public void runnableTarget() {}
+
         public void callbackTarget(boolean arg) {}
     }
 
diff --git a/base/android/junit/src/org/chromium/base/CommandLineTest.java b/base/android/junit/src/org/chromium/base/CommandLineTest.java
new file mode 100644
index 0000000..d305683
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/CommandLineTest.java
@@ -0,0 +1,200 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+import androidx.test.filters.SmallTest;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.robolectric.annotation.Config;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+import org.chromium.base.test.util.Feature;
+
+import java.util.Map;
+
+/** Tests for {@link CommandLine}. */
+@RunWith(BaseRobolectricTestRunner.class)
+@Config(manifest = Config.NONE)
+public class CommandLineTest {
+    // A reference command line. Note that switch2 is [brea\d], switch3 is [and "butter"],
+    // and switch4 is [a "quoted" 'food'!]
+    static final String[] INIT_SWITCHES = {
+        "init_command",
+        "--SWITCH",
+        "Arg",
+        "--switch2=brea\\d",
+        "--switch3=and \"butter\"",
+        "--switch4=a \"quoted\" 'food'!",
+        "--",
+        "--actually_an_arg"
+    };
+
+    // The same command line, but in quoted string format.
+    static final char[] INIT_SWITCHES_BUFFER =
+            ("init_command --SWITCH Arg --switch2=brea\\d --switch3=\"and \\\"butt\"er\\\"   "
+                            + "--switch4='a \"quoted\" \\'food\\'!' "
+                            + "-- --actually_an_arg")
+                    .toCharArray();
+
+    static final String CL_ADDED_SWITCH = "zappo-dappo-doggy-trainer";
+    static final String CL_ADDED_SWITCH_2 = "username";
+    static final String CL_ADDED_VALUE_2 = "bozo";
+
+    private CommandLine mCommandLine;
+
+    @Before
+    public void setUp() {
+        mCommandLine = new CommandLine.JavaCommandLine(null);
+    }
+
+    void checkInitSwitches() {
+        CommandLine cl = mCommandLine;
+        Assert.assertFalse(cl.hasSwitch("init_command"));
+        Assert.assertFalse(cl.hasSwitch("switch"));
+        Assert.assertTrue(cl.hasSwitch("SWITCH"));
+        Assert.assertFalse(cl.hasSwitch("--SWITCH"));
+        Assert.assertFalse(cl.hasSwitch("Arg"));
+        Assert.assertFalse(cl.hasSwitch("actually_an_arg"));
+        Assert.assertEquals("brea\\d", cl.getSwitchValue("switch2"));
+        Assert.assertEquals("and \"butter\"", cl.getSwitchValue("switch3"));
+        Assert.assertEquals("a \"quoted\" 'food'!", cl.getSwitchValue("switch4"));
+        Assert.assertNull(cl.getSwitchValue("SWITCH"));
+        Assert.assertNull(cl.getSwitchValue("non-existant"));
+    }
+
+    void checkSettingThenGettingThenRemoving() {
+        CommandLine cl = mCommandLine;
+
+        // Add a plain switch.
+        Assert.assertFalse(cl.hasSwitch(CL_ADDED_SWITCH));
+        cl.appendSwitch(CL_ADDED_SWITCH);
+        Assert.assertTrue(cl.hasSwitch(CL_ADDED_SWITCH));
+
+        // Add a switch paired with a value.
+        Assert.assertFalse(cl.hasSwitch(CL_ADDED_SWITCH_2));
+        Assert.assertNull(cl.getSwitchValue(CL_ADDED_SWITCH_2));
+        cl.appendSwitchWithValue(CL_ADDED_SWITCH_2, CL_ADDED_VALUE_2);
+        Assert.assertEquals(CL_ADDED_VALUE_2, cl.getSwitchValue(CL_ADDED_SWITCH_2));
+
+        // Update a switch's value.
+        cl.appendSwitchWithValue(CL_ADDED_SWITCH_2, "updatedValue");
+        Assert.assertEquals("updatedValue", cl.getSwitchValue(CL_ADDED_SWITCH_2));
+
+        // Append a few new things.
+        final String[] switchesAndArgs = {"thing", "--superfast", "--speed=turbo"};
+        Assert.assertFalse(cl.hasSwitch("thing"));
+        Assert.assertFalse(cl.hasSwitch("superfast"));
+        Assert.assertNull(cl.getSwitchValue("speed"));
+        cl.appendSwitchesAndArguments(switchesAndArgs);
+        Assert.assertFalse(cl.hasSwitch("thing"));
+        Assert.assertFalse(cl.hasSwitch("command"));
+        Assert.assertTrue(cl.hasSwitch("superfast"));
+        Assert.assertEquals("turbo", cl.getSwitchValue("speed"));
+
+        // Get all switches
+        Map<String, String> switches = cl.getSwitches();
+        Assert.assertTrue(switches.containsKey(CL_ADDED_SWITCH));
+        Assert.assertTrue(switches.containsKey(CL_ADDED_SWITCH_2));
+
+        // Remove a plain switch.
+        cl.removeSwitch(CL_ADDED_SWITCH);
+        Assert.assertFalse(cl.hasSwitch(CL_ADDED_SWITCH));
+
+        // Remove a switch with a value.
+        cl.removeSwitch(CL_ADDED_SWITCH_2);
+        Assert.assertFalse(cl.hasSwitch(CL_ADDED_SWITCH_2));
+        Assert.assertNull(cl.getSwitchValue(CL_ADDED_SWITCH_2));
+
+        // Get all switches again to verify it updated.
+        switches = cl.getSwitches();
+        Assert.assertFalse(switches.containsKey(CL_ADDED_SWITCH));
+        Assert.assertFalse(switches.containsKey(CL_ADDED_SWITCH_2));
+    }
+
+    void checkTokenizer(String[] expected, String toParse) {
+        String[] actual = CommandLine.tokenizeQuotedArguments(toParse.toCharArray());
+        Assert.assertEquals(expected.length, actual.length);
+        for (int i = 0; i < expected.length; ++i) {
+            Assert.assertEquals("comparing element " + i, expected[i], actual[i]);
+        }
+    }
+
+    @Test
+    @SmallTest
+    @Feature({"Android-AppBase"})
+    public void testJavaInitialization() {
+        mCommandLine = new CommandLine.JavaCommandLine(INIT_SWITCHES);
+        checkInitSwitches();
+        checkSettingThenGettingThenRemoving();
+    }
+
+    @Test
+    @SmallTest
+    @Feature({"Android-AppBase"})
+    public void testBufferInitialization() {
+        mCommandLine =
+                new CommandLine.JavaCommandLine(
+                        CommandLine.tokenizeQuotedArguments(INIT_SWITCHES_BUFFER));
+        checkInitSwitches();
+        checkSettingThenGettingThenRemoving();
+    }
+
+    @Test
+    @SmallTest
+    @Feature({"Android-AppBase"})
+    public void testArgumentTokenizer() {
+        String toParse = " a\"\\bc de\\\"f g\"\\h ij    k\" \"lm";
+        String[] expected = {"a\\bc de\"f g\\h", "ij", "k lm"};
+        checkTokenizer(expected, toParse);
+
+        toParse = "";
+        expected = new String[0];
+        checkTokenizer(expected, toParse);
+
+        toParse = " \t\n";
+        checkTokenizer(expected, toParse);
+
+        toParse =
+                " \"a'b\" 'c\"d' \"e\\\"f\" 'g\\'h' \"i\\'j\" 'k\\\"l'" + " m\"n\\'o\"p q'r\\\"s't";
+        expected =
+                new String[] {
+                    "a'b", "c\"d", "e\"f", "g'h", "i\\'j", "k\\\"l", "mn\\'op", "qr\\\"st"
+                };
+        checkTokenizer(expected, toParse);
+    }
+
+    @Test
+    @SmallTest
+    @Feature({"Android-AppBase"})
+    public void testUpdatingArgList() {
+        CommandLine cl = mCommandLine;
+        cl.appendSwitch(CL_ADDED_SWITCH);
+        cl.appendSwitchWithValue(CL_ADDED_SWITCH_2, CL_ADDED_VALUE_2);
+        cl.appendSwitchWithValue(CL_ADDED_SWITCH_2, "updatedValue");
+
+        final String[] expectedValueForBothSwitches = {
+            "",
+            "--" + CL_ADDED_SWITCH,
+            "--" + CL_ADDED_SWITCH_2 + "=" + CL_ADDED_VALUE_2,
+            "--" + CL_ADDED_SWITCH_2 + "=updatedValue",
+        };
+        Assert.assertArrayEquals(
+                "Appending a switch multiple times should add multiple args",
+                expectedValueForBothSwitches,
+                mCommandLine.getCommandLineArguments());
+
+        cl.removeSwitch(CL_ADDED_SWITCH_2);
+        final String[] expectedValueWithSecondSwitchRemoved = {
+            "", "--" + CL_ADDED_SWITCH,
+        };
+        Assert.assertArrayEquals(
+                "Removing a switch should remove all its args",
+                expectedValueWithSecondSwitchRemoved,
+                mCommandLine.getCommandLineArguments());
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/FeatureListUnitTest.java b/base/android/junit/src/org/chromium/base/FeatureListUnitTest.java
index 6a706db..3cd7d5c 100644
--- a/base/android/junit/src/org/chromium/base/FeatureListUnitTest.java
+++ b/base/android/junit/src/org/chromium/base/FeatureListUnitTest.java
@@ -17,8 +17,7 @@
 /** Unit tests for {@link FeatureList}. */
 @RunWith(BaseRobolectricTestRunner.class)
 public class FeatureListUnitTest {
-    @Rule
-    public JUnitProcessor mFeaturesProcessor = new JUnitProcessor();
+    @Rule public JUnitProcessor mFeaturesProcessor = new JUnitProcessor();
 
     private static final String FEATURE_A = "FeatureA";
     private static final String FEATURE_A_PARAM_1 = "Param1InFeatureA";
@@ -27,7 +26,8 @@
 
     @Test
     public void test_getTestValueForFeature_noOverride_throwsException() {
-        Assert.assertThrows(IllegalArgumentException.class,
+        Assert.assertThrows(
+                IllegalArgumentException.class,
                 () -> FeatureList.getTestValueForFeature(FEATURE_A));
     }
 
@@ -63,7 +63,8 @@
         testValues.addFieldTrialParamOverride(FEATURE_A, FEATURE_A_PARAM_1, "paramValue");
         FeatureList.setTestValues(testValues);
 
-        Assert.assertEquals("paramValue",
+        Assert.assertEquals(
+                "paramValue",
                 FeatureList.getTestValueForFieldTrialParam(FEATURE_A, FEATURE_A_PARAM_1));
 
         // Other params should still return null
@@ -76,7 +77,8 @@
         testValues.addFeatureFlagOverride(FEATURE_A, true);
         FeatureList.setTestValues(testValues);
 
-        Assert.assertThrows(IllegalArgumentException.class,
+        Assert.assertThrows(
+                IllegalArgumentException.class,
                 () -> FeatureList.getTestValueForFeature(FEATURE_B));
     }
 
@@ -95,9 +97,11 @@
 
         Assert.assertEquals(false, FeatureList.getTestValueForFeature(FEATURE_A));
         Assert.assertEquals(true, FeatureList.getTestValueForFeature(FEATURE_B));
-        Assert.assertEquals("paramValue1",
+        Assert.assertEquals(
+                "paramValue1",
                 FeatureList.getTestValueForFieldTrialParam(FEATURE_A, FEATURE_A_PARAM_1));
-        Assert.assertEquals("paramValue2",
+        Assert.assertEquals(
+                "paramValue2",
                 FeatureList.getTestValueForFieldTrialParam(FEATURE_A, FEATURE_A_PARAM_2));
     }
 
diff --git a/base/android/junit/src/org/chromium/base/FileUtilsTest.java b/base/android/junit/src/org/chromium/base/FileUtilsTest.java
index 7609e11..e0c698d 100644
--- a/base/android/junit/src/org/chromium/base/FileUtilsTest.java
+++ b/base/android/junit/src/org/chromium/base/FileUtilsTest.java
@@ -54,10 +54,11 @@
 
 /** Unit tests for {@link Log}. */
 @RunWith(BaseRobolectricTestRunner.class)
-@Config(manifest = Config.NONE, shadows = {FileUtilsTest.FakeShadowBitmapFactory.class})
+@Config(
+        manifest = Config.NONE,
+        shadows = {FileUtilsTest.FakeShadowBitmapFactory.class})
 public class FileUtilsTest {
-    @Rule
-    public final TemporaryFolder temporaryFolder = new TemporaryFolder();
+    @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder();
 
     private Context mContext;
 
@@ -76,23 +77,26 @@
     private String listAllPaths(Path rootDir) {
         ArrayList<String> pathList = new ArrayList<String>();
         try {
-            Files.walkFileTree(rootDir, new SimpleFileVisitor<Path>() {
-                @Override
-                public FileVisitResult preVisitDirectory(Path path, BasicFileAttributes attrs)
-                        throws IOException {
-                    String relPathString = rootDir.relativize(path).toString();
-                    if (!relPathString.isEmpty()) { // Exclude |rootDir|.
-                        pathList.add(relPathString + "/");
-                    }
-                    return FileVisitResult.CONTINUE;
-                }
-                @Override
-                public FileVisitResult visitFile(Path path, BasicFileAttributes attrs)
-                        throws IOException {
-                    pathList.add(rootDir.relativize(path).toString());
-                    return FileVisitResult.CONTINUE;
-                }
-            });
+            Files.walkFileTree(
+                    rootDir,
+                    new SimpleFileVisitor<Path>() {
+                        @Override
+                        public FileVisitResult preVisitDirectory(
+                                Path path, BasicFileAttributes attrs) throws IOException {
+                            String relPathString = rootDir.relativize(path).toString();
+                            if (!relPathString.isEmpty()) { // Exclude |rootDir|.
+                                pathList.add(relPathString + "/");
+                            }
+                            return FileVisitResult.CONTINUE;
+                        }
+
+                        @Override
+                        public FileVisitResult visitFile(Path path, BasicFileAttributes attrs)
+                                throws IOException {
+                            pathList.add(rootDir.relativize(path).toString());
+                            return FileVisitResult.CONTINUE;
+                        }
+                    });
         } catch (IOException e) {
         }
 
@@ -221,12 +225,14 @@
     @Ignore
     @Test
     public void testRecursivelyDeleteFileWithCanDelete() throws IOException {
-        Function<String, Boolean> canDeleteIfEndsWith1 = (String filepath) -> {
-            return filepath.endsWith("1");
-        };
-        Function<String, Boolean> canDeleteIfEndsWith2 = (String filepath) -> {
-            return filepath.endsWith("2");
-        };
+        Function<String, Boolean> canDeleteIfEndsWith1 =
+                (String filepath) -> {
+                    return filepath.endsWith("1");
+                };
+        Function<String, Boolean> canDeleteIfEndsWith2 =
+                (String filepath) -> {
+                    return filepath.endsWith("2");
+                };
 
         prepareMixedFilesTestCase();
         assertFileList("a1/; a1/b1/; a1/b1/c; a1/b1/c2; a1/b2/; a1/b2/c/; a1/b3; a2/; c");
@@ -271,19 +277,20 @@
 
     @Test
     public void testGetFileSize() throws IOException {
-        Function<byte[], Boolean> runCase = (byte[] inputBytes) -> {
-            ByteArrayInputStream inputStream = new ByteArrayInputStream(inputBytes);
-            byte[] fileBytes;
-            long size;
-            try {
-                File tempFile = temporaryFolder.newFile();
-                FileUtils.copyStreamToFile(inputStream, tempFile);
-                size = FileUtils.getFileSizeBytes(tempFile);
-            } catch (IOException e) {
-                return false;
-            }
-            return inputBytes.length == size;
-        };
+        Function<byte[], Boolean> runCase =
+                (byte[] inputBytes) -> {
+                    ByteArrayInputStream inputStream = new ByteArrayInputStream(inputBytes);
+                    byte[] fileBytes;
+                    long size;
+                    try {
+                        File tempFile = temporaryFolder.newFile();
+                        FileUtils.copyStreamToFile(inputStream, tempFile);
+                        size = FileUtils.getFileSizeBytes(tempFile);
+                    } catch (IOException e) {
+                        return false;
+                    }
+                    return inputBytes.length == size;
+                };
 
         assertTrue(runCase.apply(new byte[] {}));
         assertTrue(runCase.apply(new byte[] {3, 1, 4, 1, 5, 9, 2, 6, 5}));
@@ -308,17 +315,18 @@
 
     @Test
     public void testCopyStream() {
-        Function<byte[], Boolean> runCase = (byte[] inputBytes) -> {
-            ByteArrayInputStream inputStream = new ByteArrayInputStream(inputBytes);
-            ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
-            try {
-                FileUtils.copyStream(inputStream, outputStream);
-            } catch (IOException e) {
-                return false;
-            }
-            byte[] outputBytes = outputStream.toByteArray();
-            return Arrays.equals(inputBytes, outputBytes);
-        };
+        Function<byte[], Boolean> runCase =
+                (byte[] inputBytes) -> {
+                    ByteArrayInputStream inputStream = new ByteArrayInputStream(inputBytes);
+                    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
+                    try {
+                        FileUtils.copyStream(inputStream, outputStream);
+                    } catch (IOException e) {
+                        return false;
+                    }
+                    byte[] outputBytes = outputStream.toByteArray();
+                    return Arrays.equals(inputBytes, outputBytes);
+                };
 
         assertTrue(runCase.apply(new byte[] {}));
         assertTrue(runCase.apply(new byte[] {3, 1, 4, 1, 5, 9, 2, 6, 5}));
@@ -329,26 +337,27 @@
 
     @Test
     public void testCopyStreamToFile() {
-        Function<byte[], Boolean> runCase = (byte[] inputBytes) -> {
-            ByteArrayInputStream inputStream = new ByteArrayInputStream(inputBytes);
-            ByteArrayOutputStream verifyStream = new ByteArrayOutputStream();
-            byte[] fileBytes;
-            try {
-                File tempFile = temporaryFolder.newFile();
-                FileUtils.copyStreamToFile(inputStream, tempFile);
-                byte[] buffer = new byte[6543]; // Use weird size.
-                try (InputStream is = new FileInputStream(tempFile)) {
-                    int amountRead;
-                    while ((amountRead = is.read(buffer)) != -1) {
-                        verifyStream.write(buffer, 0, amountRead);
+        Function<byte[], Boolean> runCase =
+                (byte[] inputBytes) -> {
+                    ByteArrayInputStream inputStream = new ByteArrayInputStream(inputBytes);
+                    ByteArrayOutputStream verifyStream = new ByteArrayOutputStream();
+                    byte[] fileBytes;
+                    try {
+                        File tempFile = temporaryFolder.newFile();
+                        FileUtils.copyStreamToFile(inputStream, tempFile);
+                        byte[] buffer = new byte[6543]; // Use weird size.
+                        try (InputStream is = new FileInputStream(tempFile)) {
+                            int amountRead;
+                            while ((amountRead = is.read(buffer)) != -1) {
+                                verifyStream.write(buffer, 0, amountRead);
+                            }
+                        }
+                    } catch (IOException e) {
+                        return false;
                     }
-                }
-            } catch (IOException e) {
-                return false;
-            }
-            byte[] outputBytes = verifyStream.toByteArray();
-            return Arrays.equals(inputBytes, outputBytes);
-        };
+                    byte[] outputBytes = verifyStream.toByteArray();
+                    return Arrays.equals(inputBytes, outputBytes);
+                };
 
         assertTrue(runCase.apply(new byte[] {}));
         assertTrue(runCase.apply(new byte[] {3, 1, 4, 1, 5, 9, 2, 6, 5}));
@@ -359,16 +368,17 @@
 
     @Test
     public void testReadStream() {
-        Function<byte[], Boolean> runCase = (byte[] inputBytes) -> {
-            ByteArrayInputStream inputStream = new ByteArrayInputStream(inputBytes);
-            byte[] verifyBytes;
-            try {
-                verifyBytes = FileUtils.readStream(inputStream);
-            } catch (IOException e) {
-                return false;
-            }
-            return Arrays.equals(inputBytes, verifyBytes);
-        };
+        Function<byte[], Boolean> runCase =
+                (byte[] inputBytes) -> {
+                    ByteArrayInputStream inputStream = new ByteArrayInputStream(inputBytes);
+                    byte[] verifyBytes;
+                    try {
+                        verifyBytes = FileUtils.readStream(inputStream);
+                    } catch (IOException e) {
+                        return false;
+                    }
+                    return Arrays.equals(inputBytes, verifyBytes);
+                };
 
         assertTrue(runCase.apply(new byte[] {}));
         assertTrue(runCase.apply(new byte[] {3, 1, 4, 1, 5, 9, 2, 6, 5}));
@@ -381,29 +391,33 @@
     public void testGetUriForFileWithContentUri() {
         // ContentUriUtils needs to be initialized for "content://" URL to work. Use a fake
         // version to avoid dealing with Android innards, and to provide consistent results.
-        ContentUriUtils.setFileProviderUtil(new ContentUriUtils.FileProviderUtil() {
-            @Override
-            public Uri getContentUriFromFile(File file) {
-                Uri.Builder builder = new Uri.Builder();
-                String fileString = file.toString();
-                if (fileString.startsWith("/")) {
-                    fileString = fileString.substring(1);
-                }
-                builder.scheme("content").authority("org.chromium.test");
-                for (String path : fileString.split("/")) {
-                    builder.appendPath(path);
-                }
-                return builder.build();
-            }
-        });
+        ContentUriUtils.setFileProviderUtil(
+                new ContentUriUtils.FileProviderUtil() {
+                    @Override
+                    public Uri getContentUriFromFile(File file) {
+                        Uri.Builder builder = new Uri.Builder();
+                        String fileString = file.toString();
+                        if (fileString.startsWith("/")) {
+                            fileString = fileString.substring(1);
+                        }
+                        builder.scheme("content").authority("org.chromium.test");
+                        for (String path : fileString.split("/")) {
+                            builder.appendPath(path);
+                        }
+                        return builder.build();
+                    }
+                });
 
         assertEquals(
                 "content://org.chromium.test/", FileUtils.getUriForFile(new File("/")).toString());
-        assertEquals("content://org.chromium.test/foo.bar",
+        assertEquals(
+                "content://org.chromium.test/foo.bar",
                 FileUtils.getUriForFile(new File("/foo.bar")).toString());
-        assertEquals("content://org.chromium.test/path1/path2/filename.ext",
+        assertEquals(
+                "content://org.chromium.test/path1/path2/filename.ext",
                 FileUtils.getUriForFile(new File("/path1/path2/filename.ext")).toString());
-        assertEquals("content://org.chromium.test/../../..",
+        assertEquals(
+                "content://org.chromium.test/../../..",
                 FileUtils.getUriForFile(new File("/../../..")).toString());
     }
 
@@ -413,7 +427,8 @@
         // Only test using absolute path. Otherwise cwd would be included into results.
         assertEquals("file:///", FileUtils.getUriForFile(new File("/")).toString());
         assertEquals("file:///foo.bar", FileUtils.getUriForFile(new File("/foo.bar")).toString());
-        assertEquals("file:///path1/path2/filename.ext",
+        assertEquals(
+                "file:///path1/path2/filename.ext",
                 FileUtils.getUriForFile(new File("/path1/path2/filename.ext")).toString());
         assertEquals("file:///../../..", FileUtils.getUriForFile(new File("/../../..")).toString());
     }
@@ -484,7 +499,11 @@
         }
 
         @Override
-        public Cursor query(Uri uri, String[] projection, String selection, String[] selectionArgs,
+        public Cursor query(
+                Uri uri,
+                String[] projection,
+                String selection,
+                String[] selectionArgs,
                 String sortOrder) {
             return null;
         }
diff --git a/base/android/junit/src/org/chromium/base/LifetimeAssertTest.java b/base/android/junit/src/org/chromium/base/LifetimeAssertTest.java
index e3d9579..0dc28c0 100644
--- a/base/android/junit/src/org/chromium/base/LifetimeAssertTest.java
+++ b/base/android/junit/src/org/chromium/base/LifetimeAssertTest.java
@@ -14,9 +14,7 @@
 import org.chromium.base.test.BaseRobolectricTestRunner;
 import org.chromium.build.BuildConfig;
 
-/**
- * junit tests for {@link LifetimeAssert}.
- */
+/** junit tests for {@link LifetimeAssert}. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class LifetimeAssertTest {
@@ -40,15 +38,16 @@
         mTargetRef = mTestClass.mLifetimeAssert.mWrapper;
         mFound = false;
         mHookMessage = null;
-        LifetimeAssert.sTestHook = (ref, msg) -> {
-            if (ref == mTargetRef) {
-                synchronized (mLock) {
-                    mFound = true;
-                    mHookMessage = msg;
-                    mLock.notify();
-                }
-            }
-        };
+        LifetimeAssert.sTestHook =
+                (ref, msg) -> {
+                    if (ref == mTargetRef) {
+                        synchronized (mLock) {
+                            mFound = true;
+                            mHookMessage = msg;
+                            mLock.notify();
+                        }
+                    }
+                };
     }
 
     @After
diff --git a/base/android/junit/src/org/chromium/base/LogTest.java b/base/android/junit/src/org/chromium/base/LogTest.java
index ee68663..8636de9 100644
--- a/base/android/junit/src/org/chromium/base/LogTest.java
+++ b/base/android/junit/src/org/chromium/base/LogTest.java
@@ -28,7 +28,9 @@
 
         List<ShadowLog.LogItem> logs = ShadowLog.getLogs();
 
-        assertTrue("The origin of the log message (" + logs.get(logs.size() - 1).msg
+        assertTrue(
+                "The origin of the log message ("
+                        + logs.get(logs.size() - 1).msg
                         + ") looks wrong.",
                 logs.get(logs.size() - 1).msg.matches("\\[LogTest.java:\\d+\\].*"));
     }
@@ -41,19 +43,21 @@
     /** Tests that exceptions provided to the log functions are properly recognized and printed. */
     @Test
     public void exceptionLoggingTest() {
-        Throwable t = new Throwable() {
-            @Override
-            public String toString() {
-                return "MyThrowable";
-            }
-        };
+        Throwable t =
+                new Throwable() {
+                    @Override
+                    public String toString() {
+                        return "MyThrowable";
+                    }
+                };
 
-        Throwable t2 = new Throwable() {
-            @Override
-            public String toString() {
-                return "MyOtherThrowable";
-            }
-        };
+        Throwable t2 =
+                new Throwable() {
+                    @Override
+                    public String toString() {
+                        return "MyOtherThrowable";
+                    }
+                };
 
         List<ShadowLog.LogItem> logs;
 
diff --git a/base/android/junit/src/org/chromium/base/MathUtilsTest.java b/base/android/junit/src/org/chromium/base/MathUtilsTest.java
index fda0361..9190da9 100644
--- a/base/android/junit/src/org/chromium/base/MathUtilsTest.java
+++ b/base/android/junit/src/org/chromium/base/MathUtilsTest.java
@@ -71,6 +71,7 @@
         Assert.assertEquals(CLAMP_FAILURE, 9f, MathUtils.clamp(10.9f, min, max), EPSILON);
         Assert.assertEquals(CLAMP_FAILURE, 9f, MathUtils.clamp(30.1f, max, min), EPSILON);
     }
+
     @Test
     public void testPositiveModulo() {
         Assert.assertEquals(MODULO_FAILURE, 1, MathUtils.positiveModulo(3, 2));
@@ -85,4 +86,4 @@
         Assert.assertEquals(SMOOTH_STEP_FAILURE, 0.648f, MathUtils.smoothstep(0.6f), EPSILON);
         Assert.assertEquals(SMOOTH_STEP_FAILURE, 0.216f, MathUtils.smoothstep(0.3f), EPSILON);
     }
-}
\ No newline at end of file
+}
diff --git a/base/android/junit/src/org/chromium/base/PiiEliderTest.java b/base/android/junit/src/org/chromium/base/PiiEliderTest.java
index 585ca3b..50b63eb 100644
--- a/base/android/junit/src/org/chromium/base/PiiEliderTest.java
+++ b/base/android/junit/src/org/chromium/base/PiiEliderTest.java
@@ -12,9 +12,7 @@
 
 import org.chromium.base.test.BaseRobolectricTestRunner;
 
-/**
- * junit tests for {@link PiiElider}.
- */
+/** junit tests for {@link PiiElider}. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class PiiEliderTest {
@@ -76,38 +74,46 @@
 
     @Test
     public void testElideUrl8() {
-        String original = "exception at org.chromium.chrome.browser.compositor.scene_layer."
-                + "TabListSceneLayer.nativeUpdateLayer(Native Method)";
+        String original =
+                "exception at org.chromium.chrome.browser.compositor.scene_layer."
+                        + "TabListSceneLayer.nativeUpdateLayer(Native Method)";
         assertEquals(original, PiiElider.elideUrl(original));
     }
 
     @Test
     public void testElideUrl9() {
-        String original = "I/dalvikvm( 5083): at org.chromium.chrome.browser.compositor."
-                + "scene_layer.TabListSceneLayer.nativeUpdateLayer(Native Method)";
+        String original =
+                "I/dalvikvm( 5083): at org.chromium.chrome.browser.compositor."
+                        + "scene_layer.TabListSceneLayer.nativeUpdateLayer(Native Method)";
         assertEquals(original, PiiElider.elideUrl(original));
     }
 
     @Test
     public void testElideUrl10() {
-        String original = "Caused by: java.lang.ClassNotFoundException: Didn't find class "
-                + "\"org.chromium.components.browser_ui.widget.SurfaceColorOvalView\"";
+        String original =
+                "Caused by: java.lang.ClassNotFoundException: Didn't find class "
+                        + "\"org.chromium.components.browser_ui.widget.SurfaceColorOvalView\"";
         assertEquals(original, PiiElider.elideUrl(original));
     }
 
     @Test
     public void testElideUrl11() {
-        String original = "java.lang.RuntimeException: Unable to start activity "
-                + "ComponentInfo{com.chrome.dev/org.chromium.chrome.browser.ChromeTabbedActivity}: "
-                + "android.view.InflateException: Binary XML file line #20 in "
-                + "com.chrome.dev:layout/0_resource_name_obfuscated:";
+        String original =
+                """
+                java.lang.RuntimeException: Unable to start activity
+                ComponentInfo{com.chrome.dev/org.chromium.chrome.browser.ChromeTabbedActivity}:
+                android.view.InflateException: Binary XML file line #20 in
+                com.chrome.dev:layout/0_resource_name_obfuscated:
+                """
+                        .replaceAll("\n", " ");
         assertEquals(original, PiiElider.elideUrl(original));
     }
 
     @Test
     public void testElideUrl12() {
-        String original = "System.err: at kH.onAnimationEnd"
-                + "(chromium-TrichromeChromeGoogle6432.aab-canary-530200034:42)";
+        String original =
+                "System.err: at kH.onAnimationEnd"
+                        + "(chromium-TrichromeChromeGoogle6432.aab-canary-530200034:42)";
         assertEquals(original, PiiElider.elideUrl(original));
     }
 
@@ -126,8 +132,9 @@
 
     @Test
     public void testDontElideAndroidPermission() {
-        String original = "java.lang.SecurityException: get package info: "
-                + "Neither user 1210041 nor current process has android.permission.READ_LOGS";
+        String original =
+                "java.lang.SecurityException: get package info: Neither user 1210041 nor current"
+                        + " process has android.permission.READ_LOGS";
         assertEquals(original, PiiElider.elideUrl(original));
     }
 
@@ -161,21 +168,25 @@
 
     @Test
     public void testElideUrlInStacktrace() {
-        String original = "java.lang.RuntimeException: Outer Exception crbug.com/12345\n"
-                + "  at org.chromium.base.PiiElider.sanitizeStacktrace (PiiElider.java:120)\n"
-                + "Caused by: java.lang.NullPointerException: Inner Exception shorturl.com/bxyj5";
-        String expected = "java.lang.RuntimeException: Outer Exception HTTP://WEBADDRESS.ELIDED\n"
-                + "  at org.chromium.base.PiiElider.sanitizeStacktrace (PiiElider.java:120)\n"
-                + "Caused by: java.lang.NullPointerException: Inner Exception "
-                + "HTTP://WEBADDRESS.ELIDED";
+        String original =
+                "java.lang.RuntimeException: Outer Exception crbug.com/12345\n"
+                    + "  at org.chromium.base.PiiElider.sanitizeStacktrace (PiiElider.java:120)\n"
+                    + "Caused by: java.lang.NullPointerException: Inner Exception"
+                    + " shorturl.com/bxyj5";
+        String expected =
+                "java.lang.RuntimeException: Outer Exception HTTP://WEBADDRESS.ELIDED\n"
+                    + "  at org.chromium.base.PiiElider.sanitizeStacktrace (PiiElider.java:120)\n"
+                    + "Caused by: java.lang.NullPointerException: Inner Exception "
+                    + "HTTP://WEBADDRESS.ELIDED";
         assertEquals(expected, PiiElider.sanitizeStacktrace(original));
     }
 
     @Test
     public void testDoesNotElideMethodNameInStacktrace() {
-        String original = "java.lang.NullPointerException: Attempt to invoke virtual method 'int "
-                + "org.robolectric.internal.AndroidSandbox.getBackStackEntryCount()' on a null "
-                + "object reference";
+        String original =
+                "java.lang.NullPointerException: Attempt to invoke virtual method 'int"
+                    + " org.robolectric.internal.AndroidSandbox.getBackStackEntryCount()' on a null"
+                    + " object reference";
         assertEquals(original, PiiElider.sanitizeStacktrace(original));
     }
 }
diff --git a/base/android/junit/src/org/chromium/base/PromiseTest.java b/base/android/junit/src/org/chromium/base/PromiseTest.java
index 4d480cd..c689e16 100644
--- a/base/android/junit/src/org/chromium/base/PromiseTest.java
+++ b/base/android/junit/src/org/chromium/base/PromiseTest.java
@@ -58,9 +58,10 @@
         final Value value = new Value();
 
         Promise<Integer> promise = new Promise<>();
-        Callback<Integer> callback = unusedArg -> {
-            value.set(value.get() + 1);
-        };
+        Callback<Integer> callback =
+                unusedArg -> {
+                    value.set(value.get() + 1);
+                };
         promise.then(callback);
         promise.then(callback);
 
@@ -91,7 +92,10 @@
 
         promise.then((Integer arg) -> arg.toString())
                 .then((String arg) -> arg + arg)
-                .then(result -> { value.set(result.length()); });
+                .then(
+                        result -> {
+                            value.set(result.length());
+                        });
 
         promise.fulfill(123);
         ShadowLooper.runUiThreadTasksIncludingDelayedTasks();
@@ -106,7 +110,11 @@
 
         final Promise<String> innerPromise = new Promise<>();
 
-        promise.then(arg -> innerPromise).then(result -> { value.set(result.length()); });
+        promise.then(arg -> innerPromise)
+                .then(
+                        result -> {
+                            value.set(result.length());
+                        });
 
         assertEquals(0, value.get());
 
@@ -205,7 +213,11 @@
     public void rejectOnThrow() {
         Value value = new Value();
         Promise<Integer> promise = new Promise<>();
-        promise.then((Function) (unusedArg -> { throw new IllegalArgumentException(); }))
+        promise.then(
+                        (Function)
+                                (unusedArg -> {
+                                    throw new IllegalArgumentException();
+                                }))
                 .then(PromiseTest.pass(), PromiseTest.setValue(value, 5));
 
         promise.fulfill(0);
@@ -220,9 +232,11 @@
         Value value = new Value();
         Promise<Integer> promise = new Promise<>();
 
-        promise.then((Promise.AsyncFunction) (unusedArg -> {
-                   throw new IllegalArgumentException();
-               }))
+        promise.then(
+                        (Promise.AsyncFunction)
+                                (unusedArg -> {
+                                    throw new IllegalArgumentException();
+                                }))
                 .then(PromiseTest.pass(), PromiseTest.setValue(value, 5));
 
         promise.fulfill(0);
diff --git a/base/android/junit/src/org/chromium/base/ResettersForTestingTest.java b/base/android/junit/src/org/chromium/base/ResettersForTestingTest.java
index 455dd37..7694c13 100644
--- a/base/android/junit/src/org/chromium/base/ResettersForTestingTest.java
+++ b/base/android/junit/src/org/chromium/base/ResettersForTestingTest.java
@@ -18,14 +18,13 @@
 
 import org.chromium.base.test.BaseRobolectricTestRunner;
 
-/**
- * Unit tests for {@link ResettersForTesting}.
- */
+/** Unit tests for {@link ResettersForTesting}. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class ResettersForTestingTest {
     private static class ResetsToNull {
         public static String str;
+
         public static void setStrForTesting(String newStr) {
             str = newStr;
             ResettersForTesting.register(() -> str = null);
@@ -48,20 +47,22 @@
 
         public static void setStrForTesting(String newStr) {
             str = newStr;
-            ResettersForTesting.register(() -> {
-                str = null;
-                resetCount++;
-            });
+            ResettersForTesting.register(
+                    () -> {
+                        str = null;
+                        resetCount++;
+                    });
         }
     }
 
     private static class ResetsToNullAndIncrementsWithOneShotResetter {
         public static String str;
         public static int resetCount;
-        private static Runnable sResetter = () -> {
-            str = null;
-            resetCount++;
-        };
+        private static Runnable sResetter =
+                () -> {
+                    str = null;
+                    resetCount++;
+                };
 
         public static void setStrForTesting(String newStr) {
             str = newStr;
diff --git a/base/android/junit/src/org/chromium/base/TimeUtilsTest.java b/base/android/junit/src/org/chromium/base/TimeUtilsTest.java
index 8ce45f2..26990c5 100644
--- a/base/android/junit/src/org/chromium/base/TimeUtilsTest.java
+++ b/base/android/junit/src/org/chromium/base/TimeUtilsTest.java
@@ -19,14 +19,11 @@
 import org.chromium.base.TimeUtils.UptimeMillisTimer;
 import org.chromium.base.test.BaseRobolectricTestRunner;
 
-/**
- * Unit tests for {@link TimeUtils}.
- */
+/** Unit tests for {@link TimeUtils}. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class TimeUtilsTest {
-    @Rule
-    public FakeTimeTestRule mFakeTime = new FakeTimeTestRule();
+    @Rule public FakeTimeTestRule mFakeTime = new FakeTimeTestRule();
 
     @Test
     @SmallTest
diff --git a/base/android/junit/src/org/chromium/base/TraceEventTest.java b/base/android/junit/src/org/chromium/base/TraceEventTest.java
index 49b9860..6981681 100644
--- a/base/android/junit/src/org/chromium/base/TraceEventTest.java
+++ b/base/android/junit/src/org/chromium/base/TraceEventTest.java
@@ -20,16 +20,12 @@
 import org.chromium.base.test.util.Feature;
 import org.chromium.base.test.util.JniMocker;
 
-/**
- * Tests for {@link TraceEvent}.
- */
+/** Tests for {@link TraceEvent}. */
 @RunWith(BaseRobolectricTestRunner.class)
 public class TraceEventTest {
-    @Rule
-    public JniMocker mocker = new JniMocker();
+    @Rule public JniMocker mocker = new JniMocker();
 
-    @Mock
-    TraceEvent.Natives mNativeMock;
+    @Mock TraceEvent.Natives mNativeMock;
 
     @Before
     public void setUp() {
@@ -62,14 +58,17 @@
 
         // Input string format:
         // ">>>>> Finished to (TARGET) {HASH_CODE} TARGET_NAME: WHAT"
-        String realEventName = ">>>>> Finished to (org.chromium.myClass.myMethod) "
-                + "{HASH_CODE} org.chromium.myOtherClass.instance: message";
+        String realEventName =
+                ">>>>> Finished to (org.chromium.myClass.myMethod) "
+                        + "{HASH_CODE} org.chromium.myOtherClass.instance: message";
 
         // Output string format:
         // "{TraceEvent.BasicLooperMonitor.LOOPER_TASK_PREFIX} TARGET(TARGET_NAME)"
-        String realEventNameExpected = TraceEvent.BasicLooperMonitor.LOOPER_TASK_PREFIX
-                + "org.chromium.myClass.myMethod(org.chromium.myOtherClass.instance)";
-        Assert.assertEquals(TraceEvent.BasicLooperMonitor.getTraceEventName(realEventName),
+        String realEventNameExpected =
+                TraceEvent.BasicLooperMonitor.LOOPER_TASK_PREFIX
+                        + "org.chromium.myClass.myMethod(org.chromium.myOtherClass.instance)";
+        Assert.assertEquals(
+                TraceEvent.BasicLooperMonitor.getTraceEventName(realEventName),
                 realEventNameExpected);
     }
 
@@ -80,9 +79,11 @@
         TraceEvent.setEventNameFilteringEnabled(true);
         Assert.assertTrue(TraceEvent.eventNameFilteringEnabled());
 
-        String realEventName = TraceEvent.BasicLooperMonitor.LOOPER_TASK_PREFIX
-                + "org.chromium.myClass.myMethod(org.chromium.myOtherClass.instance)";
-        Assert.assertEquals(TraceEvent.BasicLooperMonitor.getTraceEventName(realEventName),
+        String realEventName =
+                TraceEvent.BasicLooperMonitor.LOOPER_TASK_PREFIX
+                        + "org.chromium.myClass.myMethod(org.chromium.myOtherClass.instance)";
+        Assert.assertEquals(
+                TraceEvent.BasicLooperMonitor.getTraceEventName(realEventName),
                 TraceEvent.BasicLooperMonitor.FILTERED_EVENT_NAME);
     }
 
@@ -92,8 +93,7 @@
     public void testScopedTraceEventWithIntArg() {
         TraceEvent.setEnabled(true);
         // Only string literals are allowed in Java event names.
-        try (TraceEvent event = TraceEvent.scoped("TestEvent", 15)) {
-        }
+        try (TraceEvent event = TraceEvent.scoped("TestEvent", 15)) {}
         verify(mNativeMock).beginWithIntArg("TestEvent", 15);
         TraceEvent.setEnabled(false);
     }
diff --git a/base/android/junit/src/org/chromium/base/UnownedUserDataHostTest.java b/base/android/junit/src/org/chromium/base/UnownedUserDataHostTest.java
index 434cf65..5bc5d6d 100644
--- a/base/android/junit/src/org/chromium/base/UnownedUserDataHostTest.java
+++ b/base/android/junit/src/org/chromium/base/UnownedUserDataHostTest.java
@@ -33,17 +33,18 @@
     @Test
     public void testUnpreparedLooper() throws InterruptedException {
         AtomicBoolean illegalStateExceptionThrown = new AtomicBoolean();
-        Thread t = new Thread() {
-            @Override
-            public void run() {
-                try {
-                    // The Looper on this thread is still unprepared, so this should fail.
-                    new UnownedUserDataHost();
-                } catch (IllegalStateException e) {
-                    illegalStateExceptionThrown.set(true);
-                }
-            }
-        };
+        Thread t =
+                new Thread() {
+                    @Override
+                    public void run() {
+                        try {
+                            // The Looper on this thread is still unprepared, so this should fail.
+                            new UnownedUserDataHost();
+                        } catch (IllegalStateException e) {
+                            illegalStateExceptionThrown.set(true);
+                        }
+                    }
+                };
         t.start();
         t.join();
 
diff --git a/base/android/junit/src/org/chromium/base/UnownedUserDataKeyTest.java b/base/android/junit/src/org/chromium/base/UnownedUserDataKeyTest.java
index 1dd1a6a..6943144 100644
--- a/base/android/junit/src/org/chromium/base/UnownedUserDataKeyTest.java
+++ b/base/android/junit/src/org/chromium/base/UnownedUserDataKeyTest.java
@@ -32,9 +32,7 @@
 import java.util.List;
 import java.util.concurrent.FutureTask;
 
-/**
- * Test class for {@link UnownedUserDataKey}, which also describes typical usage.
- */
+/** Test class for {@link UnownedUserDataKey}, which also describes typical usage. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class UnownedUserDataKeyTest {
@@ -57,7 +55,8 @@
 
         @Override
         public void onDetachedFromHost(UnownedUserDataHost host) {
-            assertTrue("Should not detach when informOnDetachmentFromHost() return false.",
+            assertTrue(
+                    "Should not detach when informOnDetachmentFromHost() return false.",
                     informOnDetachment);
             mDetachedHosts.add(host);
         }
@@ -93,6 +92,7 @@
     private static class Foo extends TestUnownedUserData {
         public static final UnownedUserDataKey<Foo> KEY = new UnownedUserDataKey<>(Foo.class);
     }
+
     private static class Bar extends TestUnownedUserData {
         public static final UnownedUserDataKey<Bar> KEY = new UnownedUserDataKey<>(Bar.class);
     }
@@ -671,7 +671,7 @@
 
     @Test
     public void
-    testTwoSimilarItemsMultipleHosts_destroyShouldOnlyRemoveFromCurrentHostWithMultipleKeys() {
+            testTwoSimilarItemsMultipleHosts_destroyShouldOnlyRemoveFromCurrentHostWithMultipleKeys() {
         Foo foo1 = new Foo();
         Foo foo2 = new Foo();
 
@@ -833,8 +833,9 @@
     public void testSingleThreadPolicy() throws Exception {
         Foo.KEY.attachToHost(mHost1, mFoo);
 
-        FutureTask<Void> getTask = new FutureTask<>(
-                () -> assertAsserts(() -> Foo.KEY.retrieveDataFromHost(mHost1)), null);
+        FutureTask<Void> getTask =
+                new FutureTask<>(
+                        () -> assertAsserts(() -> Foo.KEY.retrieveDataFromHost(mHost1)), null);
         PostTask.postTask(TaskTraits.USER_VISIBLE, getTask);
         getTask.get();
 
diff --git a/base/android/junit/src/org/chromium/base/jank_tracker/FrameMetricsListenerTest.java b/base/android/junit/src/org/chromium/base/jank_tracker/FrameMetricsListenerTest.java
index 5bd2466..14d4a13 100644
--- a/base/android/junit/src/org/chromium/base/jank_tracker/FrameMetricsListenerTest.java
+++ b/base/android/junit/src/org/chromium/base/jank_tracker/FrameMetricsListenerTest.java
@@ -17,39 +17,45 @@
 
 import org.chromium.base.test.BaseRobolectricTestRunner;
 
-/**
- *  Tests for FrameMetricsListener.
- */
+/** Tests for FrameMetricsListener. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class FrameMetricsListenerTest {
     @Test
     public void testMetricRecording_OffByDefault() {
         FrameMetricsStore store = new FrameMetricsStore();
+        store.initialize();
+
         FrameMetricsListener metricsListener = new FrameMetricsListener(store);
         FrameMetrics frameMetrics = mock(FrameMetrics.class);
 
         when(frameMetrics.getMetric(FrameMetrics.TOTAL_DURATION)).thenReturn(10_000_000L);
+        store.startTrackingScenario(JankScenario.NEW_TAB_PAGE);
 
         metricsListener.onFrameMetricsAvailable(null, frameMetrics, 0);
 
         // By default metrics shouldn't be logged.
-        Assert.assertEquals(0, store.takeMetrics().durationsNs.length);
+        Assert.assertEquals(
+                0, store.stopTrackingScenario(JankScenario.NEW_TAB_PAGE).durationsNs.length);
         verifyNoMoreInteractions(frameMetrics);
     }
 
     @Test
     public void testMetricRecording_EnableRecording() {
         FrameMetricsStore store = new FrameMetricsStore();
+        store.initialize();
 
         FrameMetricsListener metricsListener = new FrameMetricsListener(store);
         FrameMetrics frameMetrics = mock(FrameMetrics.class);
 
         when(frameMetrics.getMetric(FrameMetrics.TOTAL_DURATION)).thenReturn(10_000_000L);
 
+        store.startTrackingScenario(JankScenario.NEW_TAB_PAGE);
         metricsListener.setIsListenerRecording(true);
         metricsListener.onFrameMetricsAvailable(null, frameMetrics, 0);
 
-        Assert.assertArrayEquals(new long[] {10_000_000L}, store.takeMetrics().durationsNs);
+        Assert.assertArrayEquals(
+                new long[] {10_000_000L},
+                store.stopTrackingScenario(JankScenario.NEW_TAB_PAGE).durationsNs);
     }
 }
diff --git a/base/android/junit/src/org/chromium/base/jank_tracker/FrameMetricsStoreTest.java b/base/android/junit/src/org/chromium/base/jank_tracker/FrameMetricsStoreTest.java
index 931ce7c..fceffc9 100644
--- a/base/android/junit/src/org/chromium/base/jank_tracker/FrameMetricsStoreTest.java
+++ b/base/android/junit/src/org/chromium/base/jank_tracker/FrameMetricsStoreTest.java
@@ -13,36 +13,78 @@
 
 import org.chromium.base.test.BaseRobolectricTestRunner;
 
-/**
- *  Tests for FrameMetricsStore.
- */
+/** Tests for FrameMetricsStore. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class FrameMetricsStoreTest {
     @Test
     public void addFrameMeasurementTest() {
         FrameMetricsStore store = new FrameMetricsStore();
+        store.initialize();
 
-        store.addFrameMeasurement(10_000_000L, false);
-        store.addFrameMeasurement(12_000_000L, false);
-        store.addFrameMeasurement(20_000_000L, true);
-        store.addFrameMeasurement(8_000_000L, true);
+        store.startTrackingScenario(JankScenario.NEW_TAB_PAGE);
 
-        JankMetrics metrics = store.takeMetrics();
+        long frame_start_vsync_ts = 0;
+        store.addFrameMeasurement(10_000_000L, false, frame_start_vsync_ts);
+        store.addFrameMeasurement(12_000_000L, false, frame_start_vsync_ts);
+        store.addFrameMeasurement(20_000_000L, true, frame_start_vsync_ts);
+        store.addFrameMeasurement(8_000_000L, true, frame_start_vsync_ts);
 
-        assertArrayEquals(new long[] {10_000_000L, 12_000_000L, 20_000_000L, 8_000_000L},
+        JankMetrics metrics = store.stopTrackingScenario(JankScenario.NEW_TAB_PAGE);
+
+        assertArrayEquals(
+                new long[] {10_000_000L, 12_000_000L, 20_000_000L, 8_000_000L},
                 metrics.durationsNs);
         assertArrayEquals(new boolean[] {false, false, true, true}, metrics.isJanky);
 
-        metrics = store.takeMetrics();
+        metrics = store.stopTrackingScenario(JankScenario.NEW_TAB_PAGE);
         assertEquals(0, metrics.durationsNs.length);
     }
 
     @Test
     public void takeMetrics_getMetricsWithoutAnyFrames() {
         FrameMetricsStore store = new FrameMetricsStore();
-        JankMetrics metrics = store.takeMetrics();
+        store.initialize();
+        store.startTrackingScenario(JankScenario.NEW_TAB_PAGE);
+        JankMetrics metrics = store.stopTrackingScenario(JankScenario.NEW_TAB_PAGE);
 
         assertEquals(0, metrics.durationsNs.length);
     }
+
+    @Test
+    public void concurrentScenarios() {
+        // We want to test 2 things.
+        // 1) that concurrent scenarios get the correct frames
+        // 2) that the deletion logic runs correctly. Note however that deletion logic is not
+        // actually public behaviour but we just want this test to explicitly exercise it to
+        // uncover potential bugs.
+        FrameMetricsStore store = new FrameMetricsStore();
+        store.initialize();
+
+        store.startTrackingScenario(JankScenario.NEW_TAB_PAGE);
+
+        long frame_start_vsync_ts = 1_000_000L;
+        store.addFrameMeasurement(10_000_000L, false, frame_start_vsync_ts);
+        store.addFrameMeasurement(12_000_000L, false, frame_start_vsync_ts + 1);
+        store.startTrackingScenario(JankScenario.FEED_SCROLLING);
+        store.addFrameMeasurement(20_000_000L, true, frame_start_vsync_ts + 2);
+        store.addFrameMeasurement(8_000_000L, true, frame_start_vsync_ts + 3);
+
+        // Stop NEW_TAB_PAGE and now the first two frames will be deleted from the
+        // FrameMetricsStore().
+        JankMetrics metrics = store.stopTrackingScenario(JankScenario.NEW_TAB_PAGE);
+
+        assertArrayEquals(
+                new long[] {10_000_000L, 12_000_000L, 20_000_000L, 8_000_000L},
+                metrics.durationsNs);
+        assertArrayEquals(new boolean[] {false, false, true, true}, metrics.isJanky);
+
+        metrics = store.stopTrackingScenario(JankScenario.NEW_TAB_PAGE);
+        assertEquals(0, metrics.durationsNs.length);
+
+        // Only after that will we stop FEED_SCROLLING and we should only see the last two frames.
+        metrics = store.stopTrackingScenario(JankScenario.FEED_SCROLLING);
+        assertArrayEquals(new long[] {20_000_000L, 8_000_000L}, metrics.durationsNs);
+        assertArrayEquals(new boolean[] {true, true}, metrics.isJanky);
+    }
 }
diff --git a/base/android/junit/src/org/chromium/base/jank_tracker/JankActivityTrackerTest.java b/base/android/junit/src/org/chromium/base/jank_tracker/JankActivityTrackerTest.java
new file mode 100644
index 0000000..c4c6ad4
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/jank_tracker/JankActivityTrackerTest.java
@@ -0,0 +1,152 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import android.app.Activity;
+import android.view.Window;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+
+import org.chromium.base.ActivityState;
+import org.chromium.base.ApplicationStatus;
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+/** Tests for JankActivityTracker. */
+@RunWith(BaseRobolectricTestRunner.class)
+public class JankActivityTrackerTest {
+    @Mock private Activity mActivity;
+
+    @Mock private Window mWindow;
+
+    @Mock private FrameMetricsListener mFrameMetricsListener;
+
+    @Mock private JankReportingScheduler mJankReportingScheduler;
+
+    JankActivityTracker createJankActivityTracker(Activity activity) {
+        JankActivityTracker tracker =
+                new JankActivityTracker(activity, mFrameMetricsListener, mJankReportingScheduler);
+
+        return tracker;
+    }
+
+    @Before
+    public void setUp() {
+        MockitoAnnotations.initMocks(this);
+
+        when(mActivity.getWindow()).thenReturn(mWindow);
+
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.CREATED);
+    }
+
+    @Test
+    public void jankTrackerTest_TestInitialize() {
+        JankActivityTracker jankActivityTracker = createJankActivityTracker(mActivity);
+        jankActivityTracker.initialize();
+
+        // Verify that we are listening to frame metrics.
+        // Initialize also starts listening to activity lifecycle events, but that's harder to
+        // verify.
+        verify(mWindow).addOnFrameMetricsAvailableListener(any(), any());
+    }
+
+    @Test
+    public void jankTrackerTest_TestActivityResume() {
+        JankActivityTracker jankActivityTracker = createJankActivityTracker(mActivity);
+        jankActivityTracker.initialize();
+
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.STARTED);
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.RESUMED);
+
+        // When an activity resumes we start reporting periodic metrics.
+        verify(mJankReportingScheduler, atLeastOnce()).startReportingPeriodicMetrics();
+        verify(mJankReportingScheduler, never()).stopReportingPeriodicMetrics();
+
+        // When an activity resumes we start recording metrics.
+        verify(mFrameMetricsListener, atLeastOnce()).setIsListenerRecording(true);
+    }
+
+    @Test
+    public void jankTrackerTest_TestActivityPause() {
+        JankActivityTracker jankActivityTracker = createJankActivityTracker(mActivity);
+        jankActivityTracker.initialize();
+
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.STARTED);
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.RESUMED);
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.PAUSED);
+
+        // When an activity pauses the reporting task should still be looping.
+        verify(mJankReportingScheduler, atLeastOnce()).startReportingPeriodicMetrics();
+        verify(mJankReportingScheduler, never()).stopReportingPeriodicMetrics();
+
+        InOrder orderVerifier = Mockito.inOrder(mFrameMetricsListener);
+
+        orderVerifier.verify(mFrameMetricsListener, atLeastOnce()).setIsListenerRecording(true);
+        // When an activity pauses we stop recording metrics.
+        orderVerifier.verify(mFrameMetricsListener).setIsListenerRecording(false);
+    }
+
+    @Test
+    public void jankTrackerTest_TestActivityStop() {
+        JankActivityTracker jankActivityTracker = createJankActivityTracker(mActivity);
+        jankActivityTracker.initialize();
+
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.STARTED);
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.RESUMED);
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.PAUSED);
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.STOPPED);
+
+        // When an activity stops we stop reporting periodic metrics.
+        InOrder schedulerOrderVerifier = Mockito.inOrder(mJankReportingScheduler);
+        schedulerOrderVerifier
+                .verify(mJankReportingScheduler, atLeastOnce())
+                .startReportingPeriodicMetrics();
+        schedulerOrderVerifier.verify(mJankReportingScheduler).stopReportingPeriodicMetrics();
+    }
+
+    @Test
+    public void jankTrackerTest_TestAttachTrackerOnResumedActivity() {
+        // Modify the activity's state before attaching JankActivityTracker.
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.STARTED);
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.RESUMED);
+
+        JankActivityTracker jankActivityTracker = createJankActivityTracker(mActivity);
+        jankActivityTracker.initialize();
+
+        // Verify that JankActivityTracker is running as expected for the Resumed state.
+        // Periodic metric reporting should be enabled.
+        verify(mJankReportingScheduler).startReportingPeriodicMetrics();
+        // Metric recording should be enabled.
+        verify(mFrameMetricsListener, atLeastOnce()).setIsListenerRecording(true);
+    }
+
+    @Test
+    public void jankTrackerTest_TestOutOfOrderStateChange() {
+        JankActivityTracker jankActivityTracker = createJankActivityTracker(mActivity);
+        jankActivityTracker.initialize();
+
+        // Move the activity from STOPPED to RESUMED without calling STARTED.
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.STOPPED);
+        ApplicationStatus.onStateChangeForTesting(mActivity, ActivityState.RESUMED);
+
+        // Verify that JankActivityTracker is running as expected for the Resumed state.
+        // Reporting task should be running and looping.
+        verify(mJankReportingScheduler).startReportingPeriodicMetrics();
+        // Metric recording should be enabled.
+        verify(mFrameMetricsListener, atLeastOnce()).setIsListenerRecording(true);
+        verify(mFrameMetricsListener, atLeastOnce()).setIsListenerRecording(false);
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/jank_tracker/JankMetricUMARecorderTest.java b/base/android/junit/src/org/chromium/base/jank_tracker/JankMetricUMARecorderTest.java
index e8b1f0f..414afd1 100644
--- a/base/android/junit/src/org/chromium/base/jank_tracker/JankMetricUMARecorderTest.java
+++ b/base/android/junit/src/org/chromium/base/jank_tracker/JankMetricUMARecorderTest.java
@@ -19,17 +19,13 @@
 import org.chromium.base.test.BaseRobolectricTestRunner;
 import org.chromium.base.test.util.JniMocker;
 
-/**
- *  Tests for JankMetricUMARecorder.
- */
+/** Tests for JankMetricUMARecorder. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class JankMetricUMARecorderTest {
-    @Rule
-    public JniMocker mocker = new JniMocker();
+    @Rule public JniMocker mocker = new JniMocker();
 
-    @Mock
-    JankMetricUMARecorder.Natives mNativeMock;
+    @Mock JankMetricUMARecorder.Natives mNativeMock;
 
     @Before
     public void setUp() {
@@ -39,22 +35,27 @@
 
     @Test
     public void testRecordMetricsToNative() {
+        long[] timestampsNs = new long[] {5L, 8L, 3L};
         long[] durationsNs = new long[] {5_000_000L, 8_000_000L, 30_000_000L};
         boolean[] jankyFrames = new boolean[] {false, false, true};
 
-        JankMetrics metric = new JankMetrics(durationsNs, jankyFrames);
+        JankMetrics metric = new JankMetrics(timestampsNs, durationsNs, jankyFrames);
 
-        JankMetricUMARecorder.recordJankMetricsToUMA(metric, 0, 1000);
+        JankMetricUMARecorder.recordJankMetricsToUMA(metric, 0, 1000, 1);
 
         // Ensure that the relevant fields are sent down to native.
-        verify(mNativeMock).recordJankMetrics(durationsNs, jankyFrames, 0, 1000);
+        verify(mNativeMock).recordJankMetrics(durationsNs, jankyFrames, 0, 1000, 1);
     }
 
     @Test
     public void testRecordNullMetrics() {
-        JankMetricUMARecorder.recordJankMetricsToUMA(null, 0, 0);
+        JankMetricUMARecorder.recordJankMetricsToUMA(null, 0, 0, 1);
         verify(mNativeMock, never())
-                .recordJankMetrics(ArgumentMatchers.any(), ArgumentMatchers.any(),
-                        ArgumentMatchers.anyLong(), ArgumentMatchers.anyLong());
+                .recordJankMetrics(
+                        ArgumentMatchers.any(),
+                        ArgumentMatchers.any(),
+                        ArgumentMatchers.anyLong(),
+                        ArgumentMatchers.anyLong(),
+                        ArgumentMatchers.anyInt());
     }
 }
diff --git a/base/android/junit/src/org/chromium/base/jank_tracker/JankReportingRunnableTest.java b/base/android/junit/src/org/chromium/base/jank_tracker/JankReportingRunnableTest.java
new file mode 100644
index 0000000..b377a07
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/jank_tracker/JankReportingRunnableTest.java
@@ -0,0 +1,185 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+
+import android.os.Handler;
+import android.os.Looper;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.robolectric.shadows.ShadowLooper;
+
+import org.chromium.base.TimeUtils;
+import org.chromium.base.test.BaseRobolectricTestRunner;
+import org.chromium.base.test.util.JniMocker;
+
+/** Tests for JankReportingRunnable. */
+@RunWith(BaseRobolectricTestRunner.class)
+public class JankReportingRunnableTest {
+    ShadowLooper mShadowLooper;
+    Handler mHandler;
+    @Rule public JniMocker mocker = new JniMocker();
+
+    @Mock JankMetricUMARecorder.Natives mNativeMock;
+
+    @Before
+    public void setUp() {
+        MockitoAnnotations.initMocks(this);
+        mocker.mock(JankMetricUMARecorderJni.TEST_HOOKS, mNativeMock);
+        mShadowLooper = ShadowLooper.shadowMainLooper();
+        mHandler = new Handler(Looper.getMainLooper());
+    }
+
+    @Test
+    public void testStartTracking() {
+        FrameMetricsStore metricsStore = Mockito.spy(new FrameMetricsStore());
+        metricsStore.initialize();
+
+        JankReportingRunnable reportingRunnable =
+                new JankReportingRunnable(
+                        metricsStore,
+                        JankScenario.TAB_SWITCHER,
+                        /* isStartingTracking= */ true,
+                        mHandler,
+                        null);
+        reportingRunnable.run();
+
+        verify(metricsStore).initialize();
+        verify(metricsStore).startTrackingScenario(JankScenario.TAB_SWITCHER);
+        verifyNoMoreInteractions(metricsStore);
+    }
+
+    @Test
+    public void testStopTracking_withoutDelay() {
+        FrameMetricsStore metricsStore = Mockito.spy(new FrameMetricsStore());
+        metricsStore.initialize();
+
+        JankReportingRunnable startReportingRunnable =
+                new JankReportingRunnable(
+                        metricsStore,
+                        JankScenario.TAB_SWITCHER,
+                        /* isStartingTracking= */ true,
+                        mHandler,
+                        null);
+        startReportingRunnable.run();
+
+        metricsStore.addFrameMeasurement(1_000_000L, true, 1);
+
+        JankReportingRunnable stopReportingRunnable =
+                new JankReportingRunnable(
+                        metricsStore,
+                        JankScenario.TAB_SWITCHER,
+                        /* isStartingTracking= */ false,
+                        mHandler,
+                        null);
+        stopReportingRunnable.run();
+
+        verify(metricsStore).initialize();
+        verify(metricsStore).startTrackingScenario(JankScenario.TAB_SWITCHER);
+        verify(metricsStore).stopTrackingScenario(JankScenario.TAB_SWITCHER);
+
+        verify(mNativeMock)
+                .recordJankMetrics(
+                        new long[] {1_000_000L},
+                        new boolean[] {true},
+                        0L,
+                        1L,
+                        JankScenario.TAB_SWITCHER);
+    }
+
+    @Test
+    public void testStopTracking_withDelay() {
+        final long frameTime = 50L * TimeUtils.NANOSECONDS_PER_MILLISECOND;
+        FrameMetricsStore metricsStore = Mockito.spy(new FrameMetricsStore());
+        metricsStore.initialize();
+
+        JankEndScenarioTime endScenarioTime = JankEndScenarioTime.endAt(frameTime);
+        Assert.assertTrue(endScenarioTime != null);
+        Assert.assertEquals(endScenarioTime.endScenarioTimeNs, frameTime);
+
+        JankReportingRunnable startReportingRunnable =
+                new JankReportingRunnable(
+                        metricsStore,
+                        JankScenario.TAB_SWITCHER,
+                        /* isStartingTracking= */ true,
+                        mHandler,
+                        endScenarioTime);
+        startReportingRunnable.run();
+
+        metricsStore.addFrameMeasurement(
+                1_000_000L, true, 1 * TimeUtils.NANOSECONDS_PER_MILLISECOND);
+
+        JankReportingRunnable stopReportingRunnable =
+                new JankReportingRunnable(
+                        metricsStore,
+                        JankScenario.TAB_SWITCHER,
+                        /* isStartingTracking= */ false,
+                        mHandler,
+                        endScenarioTime);
+        stopReportingRunnable.run();
+
+        // Add two frames, one added before the frame time of 50ms above and one after. The first
+        // should be included and the second ignored.
+        metricsStore.addFrameMeasurement(
+                1_000_001L, false, 5 * TimeUtils.NANOSECONDS_PER_MILLISECOND);
+        metricsStore.addFrameMeasurement(
+                1_000_002L, true, (frameTime + 5) * TimeUtils.NANOSECONDS_PER_MILLISECOND);
+
+        mShadowLooper.runOneTask();
+
+        verify(metricsStore).initialize();
+        verify(metricsStore).startTrackingScenario(JankScenario.TAB_SWITCHER);
+        verify(metricsStore).stopTrackingScenario(JankScenario.TAB_SWITCHER, frameTime);
+
+        verify(mNativeMock)
+                .recordJankMetrics(
+                        new long[] {1_000_000L, 1_000_001L},
+                        new boolean[] {true, false},
+                        1L,
+                        5L,
+                        JankScenario.TAB_SWITCHER);
+    }
+
+    @Test
+    public void testStopTracking_emptyStoreShouldntRecordAnything() {
+        // Create a store but don't add any measurements.
+        FrameMetricsStore metricsStore = Mockito.spy(new FrameMetricsStore());
+        metricsStore.initialize();
+
+        JankReportingRunnable startReportingRunnable =
+                new JankReportingRunnable(
+                        metricsStore,
+                        JankScenario.TAB_SWITCHER,
+                        /* isStartingTracking= */ true,
+                        mHandler,
+                        null);
+        startReportingRunnable.run();
+
+        JankReportingRunnable stopReportingRunnable =
+                new JankReportingRunnable(
+                        metricsStore,
+                        JankScenario.TAB_SWITCHER,
+                        /* isStartingTracking= */ false,
+                        mHandler,
+                        null);
+        stopReportingRunnable.run();
+
+        verify(metricsStore).initialize();
+        verify(metricsStore).startTrackingScenario(JankScenario.TAB_SWITCHER);
+        verify(metricsStore).stopTrackingScenario(JankScenario.TAB_SWITCHER);
+
+        // Native shouldn't be called when there are no measurements.
+        verifyNoMoreInteractions(mNativeMock);
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/jank_tracker/JankReportingSchedulerTest.java b/base/android/junit/src/org/chromium/base/jank_tracker/JankReportingSchedulerTest.java
new file mode 100644
index 0000000..7b8902f
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/jank_tracker/JankReportingSchedulerTest.java
@@ -0,0 +1,161 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.jank_tracker;
+
+import static org.mockito.Mockito.verify;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.robolectric.annotation.LooperMode;
+import org.robolectric.shadow.api.Shadow;
+import org.robolectric.shadows.ShadowLooper;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+/** Tests for JankReportingScheduler. */
+@RunWith(BaseRobolectricTestRunner.class)
+@LooperMode(LooperMode.Mode.LEGACY)
+public class JankReportingSchedulerTest {
+    ShadowLooper mShadowLooper;
+
+    @Mock private FrameMetricsStore mFrameMetricsStore;
+
+    JankReportingScheduler createJankReportingScheduler() {
+        JankReportingScheduler scheduler = new JankReportingScheduler(mFrameMetricsStore);
+        mShadowLooper = Shadow.extract(scheduler.getOrCreateHandler().getLooper());
+
+        return scheduler;
+    }
+
+    @Before
+    public void setUp() {
+        MockitoAnnotations.initMocks(this);
+    }
+
+    @Test
+    public void jankScenarioTracking_startTracking() {
+        JankReportingScheduler jankReportingScheduler = createJankReportingScheduler();
+
+        jankReportingScheduler.startTrackingScenario(JankScenario.NEW_TAB_PAGE);
+
+        // When first getting the handler we need to run the initialize on the handler.
+        mShadowLooper.runOneTask();
+        // Starting tracking posts a task to begin recording metrics in FrameMetricsStore.
+        mShadowLooper.runOneTask();
+
+        verify(mFrameMetricsStore).initialize();
+        verify(mFrameMetricsStore).startTrackingScenario(JankScenario.NEW_TAB_PAGE);
+    }
+
+    @Test
+    public void jankScenarioTracking_startAndStopTracking() {
+        JankReportingScheduler jankReportingScheduler = createJankReportingScheduler();
+
+        jankReportingScheduler.startTrackingScenario(JankScenario.NEW_TAB_PAGE);
+        jankReportingScheduler.finishTrackingScenario(JankScenario.NEW_TAB_PAGE);
+
+        // When first getting the handler we need to run the initialize on the handler.
+        mShadowLooper.runOneTask();
+        // Starting tracking posts a task to begin recording metrics in FrameMetricsStore.
+        mShadowLooper.runOneTask();
+        // Stopping tracking posts a task to finish tracking and upload the calculated metrics.
+        mShadowLooper.runOneTask();
+
+        InOrder orderVerifier = Mockito.inOrder(mFrameMetricsStore);
+
+        // After both tasks we should have started and stopped tracking the periodic reporting
+        // scenario.
+        orderVerifier.verify(mFrameMetricsStore).initialize();
+        orderVerifier.verify(mFrameMetricsStore).startTrackingScenario(JankScenario.NEW_TAB_PAGE);
+        orderVerifier.verify(mFrameMetricsStore).stopTrackingScenario(JankScenario.NEW_TAB_PAGE);
+
+        Assert.assertFalse(mShadowLooper.getScheduler().areAnyRunnable());
+    }
+
+    @Test
+    public void jankReportingSchedulerTest_StartPeriodicReporting() {
+        JankReportingScheduler jankReportingScheduler = createJankReportingScheduler();
+
+        jankReportingScheduler.startReportingPeriodicMetrics();
+
+        // When first getting the handler we need to run the initialize on the handler.
+        mShadowLooper.runOneTask();
+        // When periodic reporting is enabled a task is immediately posted to begin tracking.
+        mShadowLooper.runOneTask();
+        // Then a delayed task is posted for the reporting loop.
+        mShadowLooper.runOneTask();
+        // The reporting loop task posts an immediate task to stop tracking and record the data.
+        mShadowLooper.runOneTask();
+
+        InOrder orderVerifier = Mockito.inOrder(mFrameMetricsStore);
+
+        // After both tasks we should have started and stopped tracking the periodic reporting
+        // scenario.
+        orderVerifier.verify(mFrameMetricsStore).initialize();
+        orderVerifier
+                .verify(mFrameMetricsStore)
+                .startTrackingScenario(JankScenario.PERIODIC_REPORTING);
+        orderVerifier
+                .verify(mFrameMetricsStore)
+                .stopTrackingScenario(JankScenario.PERIODIC_REPORTING);
+
+        // There should be another task posted to continue the loop.
+        Assert.assertTrue(mShadowLooper.getScheduler().areAnyRunnable());
+    }
+
+    @Test
+    public void jankReportingSchedulerTest_StopPeriodicReporting() {
+        JankReportingScheduler jankReportingScheduler = createJankReportingScheduler();
+
+        jankReportingScheduler.startReportingPeriodicMetrics();
+
+        // When first getting the handler we need to run the initialize on the handler.
+        mShadowLooper.runOneTask();
+        // Run tracking initialization task.
+        mShadowLooper.runOneTask();
+        // Run the first reporting loop (delayed 30s).
+        mShadowLooper.runOneTask();
+        // Run task to stop tracking 1st loop and record data.
+        mShadowLooper.runOneTask();
+        // Run task to start tracking the 2nd reporting loop.
+        mShadowLooper.runOneTask();
+
+        jankReportingScheduler.stopReportingPeriodicMetrics();
+
+        // Stopping periodic metric recording posts a reporting loop task immediately to stop
+        // tracking and record results.
+        mShadowLooper.runOneTask();
+        // The reporting loop task posts another immediate task to stop tracking and report data.
+        mShadowLooper.runOneTask();
+
+        InOrder orderVerifier = Mockito.inOrder(mFrameMetricsStore);
+
+        // This start/stop pair corresponds to the first reporting period.
+        orderVerifier.verify(mFrameMetricsStore).initialize();
+        orderVerifier
+                .verify(mFrameMetricsStore)
+                .startTrackingScenario(JankScenario.PERIODIC_REPORTING);
+        orderVerifier
+                .verify(mFrameMetricsStore)
+                .stopTrackingScenario(JankScenario.PERIODIC_REPORTING);
+
+        // Stopping reporting forces an immediate report of recorded frames, if any.
+        orderVerifier
+                .verify(mFrameMetricsStore)
+                .startTrackingScenario(JankScenario.PERIODIC_REPORTING);
+        orderVerifier
+                .verify(mFrameMetricsStore)
+                .stopTrackingScenario(JankScenario.PERIODIC_REPORTING);
+
+        // There should not be another task posted to continue the loop.
+        Assert.assertFalse(mShadowLooper.getScheduler().areAnyRunnable());
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/library_loader/LinkerTest.java b/base/android/junit/src/org/chromium/base/library_loader/LinkerTest.java
index 7596e39..9e67a11 100644
--- a/base/android/junit/src/org/chromium/base/library_loader/LinkerTest.java
+++ b/base/android/junit/src/org/chromium/base/library_loader/LinkerTest.java
@@ -27,18 +27,14 @@
 import org.chromium.base.metrics.UmaRecorderHolder;
 import org.chromium.base.test.BaseRobolectricTestRunner;
 
-/**
- *  Tests for {@link Linker}.
- */
+/** Tests for {@link Linker}. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 @SuppressWarnings("GuardedBy") // doNothing().when(...).methodLocked() cannot resolve |mLock|.
 public class LinkerTest {
-    @Mock
-    Linker.Natives mNativeMock;
+    @Mock Linker.Natives mNativeMock;
 
-    @Rule
-    public MockitoRule mMockitoRule = MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS);
+    @Rule public MockitoRule mMockitoRule = MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS);
 
     @Before
     public void setUp() {
@@ -180,7 +176,8 @@
         linker.takeSharedRelrosFromBundle(b);
 
         // Verify.
-        Assert.assertEquals(1,
+        Assert.assertEquals(
+                1,
                 RecordHistogram.getHistogramTotalCountForTesting(
                         "ChromiumAndroidLinker.RelroSharingStatus2"));
     }
diff --git a/base/android/junit/src/org/chromium/base/memory/MemoryPressureMonitorTest.java b/base/android/junit/src/org/chromium/base/memory/MemoryPressureMonitorTest.java
index 20fa05c..47367b0 100644
--- a/base/android/junit/src/org/chromium/base/memory/MemoryPressureMonitorTest.java
+++ b/base/android/junit/src/org/chromium/base/memory/MemoryPressureMonitorTest.java
@@ -24,9 +24,7 @@
 
 import java.util.concurrent.TimeUnit;
 
-/**
- * Test for MemoryPressureMonitor.
- */
+/** Test for MemoryPressureMonitor. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class MemoryPressureMonitorTest {
@@ -109,24 +107,25 @@
     @Test
     @SmallTest
     public void testTrimLevelTranslation() {
-        Integer[][] trimLevelToPressureMap = {//
-                // Levels >= TRIM_MEMORY_COMPLETE map to CRITICAL.
-                {ComponentCallbacks2.TRIM_MEMORY_COMPLETE + 1, MemoryPressureLevel.CRITICAL},
-                {ComponentCallbacks2.TRIM_MEMORY_COMPLETE, MemoryPressureLevel.CRITICAL},
+        Integer[][] trimLevelToPressureMap = { //
+            // Levels >= TRIM_MEMORY_COMPLETE map to CRITICAL.
+            {ComponentCallbacks2.TRIM_MEMORY_COMPLETE + 1, MemoryPressureLevel.CRITICAL},
+            {ComponentCallbacks2.TRIM_MEMORY_COMPLETE, MemoryPressureLevel.CRITICAL},
 
-                // TRIM_MEMORY_RUNNING_CRITICAL maps to CRITICAL.
-                {ComponentCallbacks2.TRIM_MEMORY_RUNNING_CRITICAL, MemoryPressureLevel.CRITICAL},
+            // TRIM_MEMORY_RUNNING_CRITICAL maps to CRITICAL.
+            {ComponentCallbacks2.TRIM_MEMORY_RUNNING_CRITICAL, MemoryPressureLevel.CRITICAL},
 
-                // Levels < TRIM_MEMORY_COMPLETE && >= TRIM_MEMORY_BACKGROUND map to MODERATE.
-                {ComponentCallbacks2.TRIM_MEMORY_COMPLETE - 1, MemoryPressureLevel.MODERATE},
-                {ComponentCallbacks2.TRIM_MEMORY_BACKGROUND + 1, MemoryPressureLevel.MODERATE},
-                {ComponentCallbacks2.TRIM_MEMORY_BACKGROUND, MemoryPressureLevel.MODERATE},
+            // Levels < TRIM_MEMORY_COMPLETE && >= TRIM_MEMORY_BACKGROUND map to MODERATE.
+            {ComponentCallbacks2.TRIM_MEMORY_COMPLETE - 1, MemoryPressureLevel.MODERATE},
+            {ComponentCallbacks2.TRIM_MEMORY_BACKGROUND + 1, MemoryPressureLevel.MODERATE},
+            {ComponentCallbacks2.TRIM_MEMORY_BACKGROUND, MemoryPressureLevel.MODERATE},
 
-                // Other levels are not mapped.
-                {ComponentCallbacks2.TRIM_MEMORY_BACKGROUND - 1, null},
-                {ComponentCallbacks2.TRIM_MEMORY_RUNNING_LOW, null},
-                {ComponentCallbacks2.TRIM_MEMORY_RUNNING_MODERATE, null},
-                {ComponentCallbacks2.TRIM_MEMORY_UI_HIDDEN, null}};
+            // Other levels are not mapped.
+            {ComponentCallbacks2.TRIM_MEMORY_BACKGROUND - 1, null},
+            {ComponentCallbacks2.TRIM_MEMORY_RUNNING_LOW, null},
+            {ComponentCallbacks2.TRIM_MEMORY_RUNNING_MODERATE, null},
+            {ComponentCallbacks2.TRIM_MEMORY_UI_HIDDEN, null}
+        };
         for (Integer[] trimLevelAndPressure : trimLevelToPressureMap) {
             int trimLevel = trimLevelAndPressure[0];
             Integer expectedPressure = trimLevelAndPressure[1];
diff --git a/base/android/junit/src/org/chromium/base/memory/MemoryPurgeManagerTest.java b/base/android/junit/src/org/chromium/base/memory/MemoryPurgeManagerTest.java
index 0b2b517..bc03e9b 100644
--- a/base/android/junit/src/org/chromium/base/memory/MemoryPurgeManagerTest.java
+++ b/base/android/junit/src/org/chromium/base/memory/MemoryPurgeManagerTest.java
@@ -28,18 +28,16 @@
 import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 
-/**
- * Tests for MemoryPurgeManager.
- */
+/** Tests for MemoryPurgeManager. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class MemoryPurgeManagerTest {
-    @Rule
-    public FakeTimeTestRule mFakeTimeTestRule = new FakeTimeTestRule();
-    private Callable<Integer> mGetCount = () -> {
-        return RecordHistogram.getHistogramTotalCountForTesting(
-                MemoryPurgeManager.BACKGROUND_DURATION_HISTOGRAM_NAME);
-    };
+    @Rule public FakeTimeTestRule mFakeTimeTestRule = new FakeTimeTestRule();
+    private Callable<Integer> mGetCount =
+            () -> {
+                return RecordHistogram.getHistogramTotalCountForTesting(
+                        MemoryPurgeManager.BACKGROUND_DURATION_HISTOGRAM_NAME);
+            };
 
     private class MemoryPurgeManagerForTest extends MemoryPurgeManager {
         public MemoryPurgeManagerForTest(int initialState) {
diff --git a/base/android/junit/src/org/chromium/base/metrics/CachingUmaRecorderTest.java b/base/android/junit/src/org/chromium/base/metrics/CachingUmaRecorderTest.java
index 772a0fc..56632b5 100644
--- a/base/android/junit/src/org/chromium/base/metrics/CachingUmaRecorderTest.java
+++ b/base/android/junit/src/org/chromium/base/metrics/CachingUmaRecorderTest.java
@@ -38,8 +38,7 @@
 @RunWith(BaseRobolectricTestRunner.class)
 @SuppressWarnings("DoNotMock") // Ok to mock UmaRecorder since this is testing metrics.
 public final class CachingUmaRecorderTest {
-    @Mock
-    UmaRecorder mUmaRecorder;
+    @Mock UmaRecorder mUmaRecorder;
 
     @Before
     public void initMocks() {
@@ -66,13 +65,16 @@
         cachingUmaRecorder.recordBooleanHistogram(
                 "cachingUmaRecorderTest.recordBooleanHistogram", false);
 
-        assertEquals(3,
+        assertEquals(
+                3,
                 cachingUmaRecorder.getHistogramTotalCountForTesting(
                         "cachingUmaRecorderTest.recordBooleanHistogram"));
-        assertEquals(2,
+        assertEquals(
+                2,
                 cachingUmaRecorder.getHistogramValueCountForTesting(
                         "cachingUmaRecorderTest.recordBooleanHistogram", 1));
-        assertEquals(1,
+        assertEquals(
+                1,
                 cachingUmaRecorder.getHistogramValueCountForTesting(
                         "cachingUmaRecorderTest.recordBooleanHistogram", 0));
 
@@ -90,10 +92,12 @@
 
         cachingUmaRecorder.recordExponentialHistogram(
                 "cachingUmaRecorderTest.recordExponentialHistogram", 72, 1, 1000, 50);
-        assertEquals(1,
+        assertEquals(
+                1,
                 cachingUmaRecorder.getHistogramTotalCountForTesting(
                         "cachingUmaRecorderTest.recordExponentialHistogram"));
-        assertEquals(1,
+        assertEquals(
+                1,
                 cachingUmaRecorder.getHistogramValueCountForTesting(
                         "cachingUmaRecorderTest.recordExponentialHistogram", 72));
         cachingUmaRecorder.setDelegate(mUmaRecorder);
@@ -109,10 +113,12 @@
 
         cachingUmaRecorder.recordLinearHistogram(
                 "cachingUmaRecorderTest.recordLinearHistogram", 72, 1, 1000, 50);
-        assertEquals(1,
+        assertEquals(
+                1,
                 cachingUmaRecorder.getHistogramTotalCountForTesting(
                         "cachingUmaRecorderTest.recordLinearHistogram"));
-        assertEquals(1,
+        assertEquals(
+                1,
                 cachingUmaRecorder.getHistogramValueCountForTesting(
                         "cachingUmaRecorderTest.recordLinearHistogram", 72));
 
@@ -129,10 +135,12 @@
 
         cachingUmaRecorder.recordSparseHistogram(
                 "cachingUmaRecorderTest.recordSparseHistogram", 72);
-        assertEquals(1,
+        assertEquals(
+                1,
                 cachingUmaRecorder.getHistogramTotalCountForTesting(
                         "cachingUmaRecorderTest.recordSparseHistogram"));
-        assertEquals(1,
+        assertEquals(
+                1,
                 cachingUmaRecorder.getHistogramValueCountForTesting(
                         "cachingUmaRecorderTest.recordSparseHistogram", 72));
         cachingUmaRecorder.setDelegate(mUmaRecorder);
@@ -278,14 +286,20 @@
 
         blockingUmaRecorder.lock.lock();
         try {
-            recordingThread = new Thread(() -> {
-                cachingUmaRecorder.recordSparseHistogram(
-                        "CachingUmaRecorderTest.blockUntilRecordingDone", 16);
-            });
+            recordingThread =
+                    new Thread(
+                            () -> {
+                                cachingUmaRecorder.recordSparseHistogram(
+                                        "CachingUmaRecorderTest.blockUntilRecordingDone", 16);
+                            });
             recordingThread.start();
             awaitThreadBlocked(recordingThread, Duration.ofSeconds(1));
 
-            swappingThread = new Thread(() -> { cachingUmaRecorder.setDelegate(mUmaRecorder); });
+            swappingThread =
+                    new Thread(
+                            () -> {
+                                cachingUmaRecorder.setDelegate(mUmaRecorder);
+                            });
             swappingThread.start();
             awaitThreadBlocked(swappingThread, Duration.ofSeconds(1));
         } finally {
@@ -309,10 +323,11 @@
                 case TIMED_WAITING:
                     return;
                 case NEW:
-                case RUNNABLE: {
-                    Thread.yield();
-                    continue;
-                }
+                case RUNNABLE:
+                    {
+                        Thread.yield();
+                        continue;
+                    }
                 case TERMINATED:
                     fail("Thread unexpectedly terminated.");
             }
@@ -420,13 +435,15 @@
     @SuppressWarnings("ThreadPriorityCheck")
     private static Thread startHistogramRecordingThread(
             int sample, int count, UmaRecorder recorder) {
-        Thread thread = new Thread(() -> {
-            for (int i = 0; i < count; i++) {
-                recorder.recordSparseHistogram("StressTest", sample);
-                // Make it more likely this thread will be preempted.
-                Thread.yield();
-            }
-        });
+        Thread thread =
+                new Thread(
+                        () -> {
+                            for (int i = 0; i < count; i++) {
+                                recorder.recordSparseHistogram("StressTest", sample);
+                                // Make it more likely this thread will be preempted.
+                                Thread.yield();
+                            }
+                        });
         thread.start();
         return thread;
     }
@@ -532,13 +549,15 @@
     @SuppressWarnings("ThreadPriorityCheck")
     private static Thread startUserActionRecordingThread(
             int sample, int count, UmaRecorder recorder) {
-        Thread thread = new Thread(() -> {
-            for (int i = 0; i < count; i++) {
-                recorder.recordUserAction("StressTestUserAction." + i, sample);
-                // Make it more likely this thread will be preempted.
-                Thread.yield();
-            }
-        });
+        Thread thread =
+                new Thread(
+                        () -> {
+                            for (int i = 0; i < count; i++) {
+                                recorder.recordUserAction("StressTestUserAction." + i, sample);
+                                // Make it more likely this thread will be preempted.
+                                Thread.yield();
+                            }
+                        });
         thread.start();
         return thread;
     }
diff --git a/base/android/junit/src/org/chromium/base/process_launcher/ChildConnectionAllocatorTest.java b/base/android/junit/src/org/chromium/base/process_launcher/ChildConnectionAllocatorTest.java
index ce7e2dc..c071fc6 100644
--- a/base/android/junit/src/org/chromium/base/process_launcher/ChildConnectionAllocatorTest.java
+++ b/base/android/junit/src/org/chromium/base/process_launcher/ChildConnectionAllocatorTest.java
@@ -54,8 +54,7 @@
     private static final int FREE_CONNECTION_TEST_CALLBACK_START_FAILED = 1;
     private static final int FREE_CONNECTION_TEST_CALLBACK_PROCESS_DIED = 2;
 
-    @Mock
-    private ChildProcessConnection.ServiceCallback mServiceCallback;
+    @Mock private ChildProcessConnection.ServiceCallback mServiceCallback;
 
     static class TestConnectionFactory implements ChildConnectionAllocator.ConnectionFactory {
         private ComponentName mLastServiceName;
@@ -66,22 +65,29 @@
         private ChildProcessConnection.ServiceCallback mConnectionServiceCallback;
 
         @Override
-        public ChildProcessConnection createConnection(Context context, ComponentName serviceName,
-                ComponentName fallbackServiceName, boolean bindToCaller,
-                boolean bindAsExternalService, Bundle serviceBundle, String instanceName) {
+        public ChildProcessConnection createConnection(
+                Context context,
+                ComponentName serviceName,
+                ComponentName fallbackServiceName,
+                boolean bindToCaller,
+                boolean bindAsExternalService,
+                Bundle serviceBundle,
+                String instanceName) {
             mLastServiceName = serviceName;
             mLastInstanceName = instanceName;
             if (mConnection == null) {
                 mConnection = mock(ChildProcessConnection.class);
                 // Retrieve the ServiceCallback so we can simulate the service process dying.
-                doAnswer(new Answer() {
-                    @Override
-                    public Object answer(InvocationOnMock invocation) {
-                        mConnectionServiceCallback =
-                                (ChildProcessConnection.ServiceCallback) invocation.getArgument(1);
-                        return null;
-                    }
-                })
+                doAnswer(
+                                new Answer() {
+                                    @Override
+                                    public Object answer(InvocationOnMock invocation) {
+                                        mConnectionServiceCallback =
+                                                (ChildProcessConnection.ServiceCallback)
+                                                        invocation.getArgument(1);
+                                        return null;
+                                    }
+                                })
                         .when(mConnection)
                         .start(anyBoolean(), any(ChildProcessConnection.ServiceCallback.class));
             }
@@ -102,27 +108,31 @@
 
         // Use this method to have a callback invoked when the connection is started on the next
         // created connection.
-        public void invokeCallbackOnConnectionStart(final boolean onChildStarted,
-                final boolean onStartFailed, final boolean onChildProcessDied) {
+        public void invokeCallbackOnConnectionStart(
+                final boolean onChildStarted,
+                final boolean onStartFailed,
+                final boolean onChildProcessDied) {
             final ChildProcessConnection connection = mock(ChildProcessConnection.class);
             mConnection = connection;
-            doAnswer(new Answer() {
-                @Override
-                public Object answer(InvocationOnMock invocation) {
-                    ChildProcessConnection.ServiceCallback serviceCallback =
-                            (ChildProcessConnection.ServiceCallback) invocation.getArgument(1);
-                    if (onChildStarted) {
-                        serviceCallback.onChildStarted();
-                    }
-                    if (onStartFailed) {
-                        serviceCallback.onChildStartFailed(connection);
-                    }
-                    if (onChildProcessDied) {
-                        serviceCallback.onChildProcessDied(connection);
-                    }
-                    return null;
-                }
-            })
+            doAnswer(
+                            new Answer() {
+                                @Override
+                                public Object answer(InvocationOnMock invocation) {
+                                    ChildProcessConnection.ServiceCallback serviceCallback =
+                                            (ChildProcessConnection.ServiceCallback)
+                                                    invocation.getArgument(1);
+                                    if (onChildStarted) {
+                                        serviceCallback.onChildStarted();
+                                    }
+                                    if (onStartFailed) {
+                                        serviceCallback.onChildStartFailed(connection);
+                                    }
+                                    if (onChildProcessDied) {
+                                        serviceCallback.onChildProcessDied(connection);
+                                    }
+                                    return null;
+                                }
+                            })
                     .when(mConnection)
                     .start(anyBoolean(), any(ChildProcessConnection.ServiceCallback.class));
         }
@@ -146,21 +156,39 @@
     public void setUp() {
         MockitoAnnotations.initMocks(this);
 
-        mAllocator = ChildConnectionAllocator.createFixedForTesting(null, TEST_PACKAGE_NAME,
-                "AllocatorTest", MAX_CONNECTION_NUMBER, true /* bindToCaller */,
-                false /* bindAsExternalService */, false /* useStrongBinding */);
+        mAllocator =
+                ChildConnectionAllocator.createFixedForTesting(
+                        null,
+                        TEST_PACKAGE_NAME,
+                        "AllocatorTest",
+                        MAX_CONNECTION_NUMBER,
+                        /* bindToCaller= */ true,
+                        /* bindAsExternalService= */ false,
+                        /* useStrongBinding= */ false);
         mAllocator.setConnectionFactoryForTesting(mTestConnectionFactory);
 
-        mVariableSizeAllocator = ChildConnectionAllocator.createVariableSizeForTesting(
-                new Handler(), TEST_PACKAGE_NAME, null /* freeSlotCallback */, "AllocatorTest",
-                true /* bindTocall */, false /* bindAsExternalService */,
-                false /* useStrongBinding */, 10);
+        mVariableSizeAllocator =
+                ChildConnectionAllocator.createVariableSizeForTesting(
+                        new Handler(),
+                        TEST_PACKAGE_NAME,
+                        /* freeSlotCallback= */ null,
+                        "AllocatorTest",
+                        /* bindTocall= */ true,
+                        /* bindAsExternalService= */ false,
+                        /* useStrongBinding= */ false,
+                        10);
         mVariableSizeAllocator.setConnectionFactoryForTesting(mTestConnectionFactory);
 
-        mWorkaroundAllocator = ChildConnectionAllocator.createWorkaroundForTesting(new Handler(),
-                TEST_PACKAGE_NAME, null /* freeSlotCallback */, "AllocatorTest",
-                true /* bindTocall */, false /* bindAsExternalService */,
-                false /* useStrongBinding */, 10);
+        mWorkaroundAllocator =
+                ChildConnectionAllocator.createWorkaroundForTesting(
+                        new Handler(),
+                        TEST_PACKAGE_NAME,
+                        /* freeSlotCallback= */ null,
+                        "AllocatorTest",
+                        /* bindTocall= */ true,
+                        /* bindAsExternalService= */ false,
+                        /* useStrongBinding= */ false,
+                        10);
         mWorkaroundAllocator.setConnectionFactoryForTesting(mTestConnectionFactory);
     }
 
@@ -171,11 +199,14 @@
         assertEquals(MAX_CONNECTION_NUMBER, mAllocator.getNumberOfServices());
 
         ChildProcessConnection connection =
-                mAllocator.allocate(null /* context */, null /* serviceBundle */, mServiceCallback);
+                mAllocator.allocate(
+                        /* context= */ null, /* serviceBundle= */ null, mServiceCallback);
         assertNotNull(connection);
 
         verify(connection, times(1))
-                .start(eq(false) /* useStrongBinding */,
+                .start(
+                        eq(false)
+                        /* useStrongBinding= */ ,
                         any(ChildProcessConnection.ServiceCallback.class));
         assertTrue(mAllocator.anyConnectionAllocated());
     }
@@ -187,25 +218,33 @@
         assertTrue(mAllocator.isFreeConnectionAvailable());
         Set<ComponentName> serviceNames = new HashSet<>();
         for (int i = 0; i < MAX_CONNECTION_NUMBER; i++) {
-            ChildProcessConnection connection = mAllocator.allocate(
-                    null /* context */, null /* serviceBundle */, mServiceCallback);
+            ChildProcessConnection connection =
+                    mAllocator.allocate(
+                            /* context= */ null, /* serviceBundle= */ null, mServiceCallback);
             assertNotNull(connection);
             ComponentName serviceName = mTestConnectionFactory.getAndResetLastServiceName();
             assertFalse(serviceNames.contains(serviceName));
             serviceNames.add(serviceName);
         }
         assertFalse(mAllocator.isFreeConnectionAvailable());
-        assertNull(mAllocator.allocate(
-                null /* context */, null /* serviceBundle */, mServiceCallback));
+        assertNull(
+                mAllocator.allocate(
+                        /* context= */ null, /* serviceBundle= */ null, mServiceCallback));
     }
 
     @Test
     @Feature({"ProcessManagement"})
     public void testQueueAllocation() {
         Runnable freeConnectionCallback = mock(Runnable.class);
-        mAllocator = ChildConnectionAllocator.createFixedForTesting(freeConnectionCallback,
-                TEST_PACKAGE_NAME, "AllocatorTest", 1, true /* bindToCaller */,
-                false /* bindAsExternalService */, false /* useStrongBinding */);
+        mAllocator =
+                ChildConnectionAllocator.createFixedForTesting(
+                        freeConnectionCallback,
+                        TEST_PACKAGE_NAME,
+                        "AllocatorTest",
+                        1,
+                        /* bindToCaller= */ true,
+                        /* bindAsExternalService= */ false,
+                        /* useStrongBinding= */ false);
         doTestQueueAllocation(mAllocator, freeConnectionCallback);
     }
 
@@ -213,10 +252,16 @@
     @Feature({"ProcessManagement"})
     public void testQueueAllocationVariableSize() {
         Runnable freeConnectionCallback = mock(Runnable.class);
-        mVariableSizeAllocator = ChildConnectionAllocator.createVariableSizeForTesting(
-                new Handler(), TEST_PACKAGE_NAME, freeConnectionCallback, "AllocatorTest",
-                true /* bindToCaller */, false /* bindAsExternalService */,
-                false /* useStrongBinding */, 1);
+        mVariableSizeAllocator =
+                ChildConnectionAllocator.createVariableSizeForTesting(
+                        new Handler(),
+                        TEST_PACKAGE_NAME,
+                        freeConnectionCallback,
+                        "AllocatorTest",
+                        /* bindToCaller= */ true,
+                        /* bindAsExternalService= */ false,
+                        /* useStrongBinding= */ false,
+                        1);
         doTestQueueAllocation(mVariableSizeAllocator, freeConnectionCallback);
     }
 
@@ -224,9 +269,16 @@
     @Feature({"ProcessManagement"})
     public void testQueueAllocationWorkaround() {
         Runnable freeConnectionCallback = mock(Runnable.class);
-        mWorkaroundAllocator = ChildConnectionAllocator.createWorkaroundForTesting(new Handler(),
-                TEST_PACKAGE_NAME, freeConnectionCallback, "AllocatorTest", true /* bindToCaller */,
-                false /* bindAsExternalService */, false /* useStrongBinding */, 1);
+        mWorkaroundAllocator =
+                ChildConnectionAllocator.createWorkaroundForTesting(
+                        new Handler(),
+                        TEST_PACKAGE_NAME,
+                        freeConnectionCallback,
+                        "AllocatorTest",
+                        /* bindToCaller= */ true,
+                        /* bindAsExternalService= */ false,
+                        /* useStrongBinding= */ false,
+                        1);
         doTestQueueAllocation(mWorkaroundAllocator, freeConnectionCallback);
     }
 
@@ -235,19 +287,28 @@
         allocator.setConnectionFactoryForTesting(mTestConnectionFactory);
         // Occupy all slots.
         ChildProcessConnection connection =
-                allocator.allocate(null /* context */, null /* serviceBundle */, mServiceCallback);
+                allocator.allocate(
+                        /* context= */ null, /* serviceBundle= */ null, mServiceCallback);
         assertNotNull(connection);
         assertEquals(1, allocator.allocatedConnectionsCountForTesting());
 
         final ChildProcessConnection newConnection[] = new ChildProcessConnection[2];
-        Runnable allocate1 = () -> {
-            newConnection[0] = allocator.allocate(
-                    null /* context */, null /* serviceBundle */, mServiceCallback);
-        };
-        Runnable allocate2 = () -> {
-            newConnection[1] = allocator.allocate(
-                    null /* context */, null /* serviceBundle */, mServiceCallback);
-        };
+        Runnable allocate1 =
+                () -> {
+                    newConnection[0] =
+                            allocator.allocate(
+                                    /* context= */ null,
+                                    /* serviceBundle= */ null,
+                                    mServiceCallback);
+                };
+        Runnable allocate2 =
+                () -> {
+                    newConnection[1] =
+                            allocator.allocate(
+                                    /* context= */ null,
+                                    /* serviceBundle= */ null,
+                                    mServiceCallback);
+                };
         allocator.queueAllocation(allocate1);
         allocator.queueAllocation(allocate2);
         verify(freeConnectionCallback, times(1)).run();
@@ -271,12 +332,19 @@
     @Feature({"ProcessManagement"})
     public void testStrongBindingParam() {
         for (boolean useStrongBinding : new boolean[] {true, false}) {
-            ChildConnectionAllocator allocator = ChildConnectionAllocator.createFixedForTesting(
-                    null, TEST_PACKAGE_NAME, "AllocatorTest", MAX_CONNECTION_NUMBER,
-                    true /* bindToCaller */, false /* bindAsExternalService */, useStrongBinding);
+            ChildConnectionAllocator allocator =
+                    ChildConnectionAllocator.createFixedForTesting(
+                            null,
+                            TEST_PACKAGE_NAME,
+                            "AllocatorTest",
+                            MAX_CONNECTION_NUMBER,
+                            /* bindToCaller= */ true,
+                            /* bindAsExternalService= */ false,
+                            useStrongBinding);
             allocator.setConnectionFactoryForTesting(mTestConnectionFactory);
-            ChildProcessConnection connection = allocator.allocate(
-                    null /* context */, null /* serviceBundle */, mServiceCallback);
+            ChildProcessConnection connection =
+                    allocator.allocate(
+                            /* context= */ null, /* serviceBundle= */ null, mServiceCallback);
             verify(connection, times(0)).start(useStrongBinding, mServiceCallback);
         }
     }
@@ -285,14 +353,18 @@
      * Tests that the various ServiceCallbacks are propagated and posted, so they happen after the
      * ChildProcessAllocator,allocate() method has returned.
      */
-    public void runTestWithConnectionCallbacks(ChildConnectionAllocator allocator,
-            boolean onChildStarted, boolean onChildStartFailed, boolean onChildProcessDied) {
+    public void runTestWithConnectionCallbacks(
+            ChildConnectionAllocator allocator,
+            boolean onChildStarted,
+            boolean onChildStartFailed,
+            boolean onChildProcessDied) {
         // We have to pause the Roboletric looper or it'll execute the posted tasks synchronoulsy.
         ShadowLooper.pauseMainLooper();
         mTestConnectionFactory.invokeCallbackOnConnectionStart(
                 onChildStarted, onChildStartFailed, onChildProcessDied);
         ChildProcessConnection connection =
-                allocator.allocate(null /* context */, null /* serviceBundle */, mServiceCallback);
+                allocator.allocate(
+                        /* context= */ null, /* serviceBundle= */ null, mServiceCallback);
         assertNotNull(connection);
 
         // Callbacks are posted.
@@ -309,78 +381,106 @@
     @Test
     @Feature({"ProcessManagement"})
     public void testOnChildStartedCallback() {
-        runTestWithConnectionCallbacks(mAllocator, true /* onChildStarted */,
-                false /* onChildStartFailed */, false /* onChildProcessDied */);
+        runTestWithConnectionCallbacks(
+                mAllocator,
+                /* onChildStarted= */ true,
+                /* onChildStartFailed= */ false,
+                /* onChildProcessDied= */ false);
     }
 
     @Test
     @Feature({"ProcessManagement"})
     public void testOnChildStartedCallbackVariableSize() {
-        runTestWithConnectionCallbacks(mVariableSizeAllocator, true /* onChildStarted */,
-                false /* onChildStartFailed */, false /* onChildProcessDied */);
+        runTestWithConnectionCallbacks(
+                mVariableSizeAllocator,
+                /* onChildStarted= */ true,
+                /* onChildStartFailed= */ false,
+                /* onChildProcessDied= */ false);
     }
 
     @Test
     @Feature({"ProcessManagement"})
     public void testOnChildStartedCallbackWorkaround() {
-        runTestWithConnectionCallbacks(mWorkaroundAllocator, true /* onChildStarted */,
-                false /* onChildStartFailed */, false /* onChildProcessDied */);
+        runTestWithConnectionCallbacks(
+                mWorkaroundAllocator,
+                /* onChildStarted= */ true,
+                /* onChildStartFailed= */ false,
+                /* onChildProcessDied= */ false);
     }
 
     @Test
     @Feature({"ProcessManagement"})
     public void testOnChildStartFailedCallback() {
-        runTestWithConnectionCallbacks(mAllocator, false /* onChildStarted */,
-                true /* onChildStartFailed */, false /* onChildProcessDied */);
+        runTestWithConnectionCallbacks(
+                mAllocator,
+                /* onChildStarted= */ false,
+                /* onChildStartFailed= */ true,
+                /* onChildProcessDied= */ false);
     }
 
     @Test
     @Feature({"ProcessManagement"})
     public void testOnChildStartFailedCallbackVariableSize() {
-        runTestWithConnectionCallbacks(mVariableSizeAllocator, false /* onChildStarted */,
-                true /* onChildStartFailed */, false /* onChildProcessDied */);
+        runTestWithConnectionCallbacks(
+                mVariableSizeAllocator,
+                /* onChildStarted= */ false,
+                /* onChildStartFailed= */ true,
+                /* onChildProcessDied= */ false);
     }
 
     @Test
     @Feature({"ProcessManagement"})
     public void testOnChildStartFailedCallbackWorkaround() {
-        runTestWithConnectionCallbacks(mWorkaroundAllocator, false /* onChildStarted */,
-                true /* onChildStartFailed */, false /* onChildProcessDied */);
+        runTestWithConnectionCallbacks(
+                mWorkaroundAllocator,
+                /* onChildStarted= */ false,
+                /* onChildStartFailed= */ true,
+                /* onChildProcessDied= */ false);
     }
 
     @Test
     @Feature({"ProcessManagement"})
     public void testOnChildProcessDiedCallback() {
-        runTestWithConnectionCallbacks(mAllocator, false /* onChildStarted */,
-                false /* onChildStartFailed */, true /* onChildProcessDied */);
+        runTestWithConnectionCallbacks(
+                mAllocator,
+                /* onChildStarted= */ false,
+                /* onChildStartFailed= */ false,
+                /* onChildProcessDied= */ true);
     }
 
     @Test
     @Feature({"ProcessManagement"})
     public void testOnChildProcessDiedCallbackWithVariableSize() {
-        runTestWithConnectionCallbacks(mVariableSizeAllocator, false /* onChildStarted */,
-                false /* onChildStartFailed */, true /* onChildProcessDied */);
+        runTestWithConnectionCallbacks(
+                mVariableSizeAllocator,
+                /* onChildStarted= */ false,
+                /* onChildStartFailed= */ false,
+                /* onChildProcessDied= */ true);
     }
 
     @Test
     @Feature({"ProcessManagement"})
     public void testOnChildProcessDiedCallbackWorkaround() {
-        runTestWithConnectionCallbacks(mWorkaroundAllocator, false /* onChildStarted */,
-                false /* onChildStartFailed */, true /* onChildProcessDied */);
+        runTestWithConnectionCallbacks(
+                mWorkaroundAllocator,
+                /* onChildStarted= */ false,
+                /* onChildStartFailed= */ false,
+                /* onChildProcessDied= */ true);
     }
 
-    /**
-     * Tests that the allocator clears the connection when it fails to bind/process dies.
-     */
+    /** Tests that the allocator clears the connection when it fails to bind/process dies. */
     private void testFreeConnection(ChildConnectionAllocator allocator, int callbackType) {
         ChildProcessConnection connection =
-                allocator.allocate(null /* context */, null /* serviceBundle */, mServiceCallback);
+                allocator.allocate(
+                        /* context= */ null, /* serviceBundle= */ null, mServiceCallback);
 
         assertNotNull(connection);
         ComponentName serviceName = mTestConnectionFactory.getAndResetLastServiceName();
         String instanceName = mTestConnectionFactory.getAndResetLastInstanceName();
         verify(connection, times(1))
-                .start(eq(false) /* useStrongBinding */,
+                .start(
+                        eq(false)
+                        /* useStrongBinding= */ ,
                         any(ChildProcessConnection.ServiceCallback.class));
         assertTrue(allocator.anyConnectionAllocated());
         int onChildStartFailedExpectedCount = 0;
@@ -408,7 +508,8 @@
 
         // Allocate a new connection to make sure we are not getting the same connection.
         connection =
-                allocator.allocate(null /* context */, null /* serviceBundle */, mServiceCallback);
+                allocator.allocate(
+                        /* context= */ null, /* serviceBundle= */ null, mServiceCallback);
         assertNotNull(connection);
         if (instanceName == null) {
             assertNotEquals(mTestConnectionFactory.getAndResetLastServiceName(), serviceName);
diff --git a/base/android/junit/src/org/chromium/base/process_launcher/ChildProcessConnectionTest.java b/base/android/junit/src/org/chromium/base/process_launcher/ChildProcessConnectionTest.java
index 1a2ad5d..c2b504d 100644
--- a/base/android/junit/src/org/chromium/base/process_launcher/ChildProcessConnectionTest.java
+++ b/base/android/junit/src/org/chromium/base/process_launcher/ChildProcessConnectionTest.java
@@ -67,7 +67,7 @@
 
         @Override
         public boolean bindServiceConnection() {
-            mBound = true;
+            mBound = mBindResult;
             return mBindResult;
         }
 
@@ -115,13 +115,17 @@
         public int getImportanceInGroup() {
             return mImportanceInGroup;
         }
-    };
+    }
+    ;
 
     private final ChildServiceConnectionFactory mServiceConnectionFactory =
             new ChildServiceConnectionFactory() {
                 @Override
-                public ChildServiceConnection createConnection(Intent bindIntent, int bindFlags,
-                        ChildServiceConnectionDelegate delegate, String instanceName) {
+                public ChildServiceConnection createConnection(
+                        Intent bindIntent,
+                        int bindFlags,
+                        ChildServiceConnectionDelegate delegate,
+                        String instanceName) {
                     ChildServiceConnectionMock connection =
                             spy(new ChildServiceConnectionMock(bindIntent, delegate));
                     if (mFirstServiceConnection == null) {
@@ -132,14 +136,11 @@
                 }
             };
 
-    @Mock
-    private ChildProcessConnection.ServiceCallback mServiceCallback;
+    @Mock private ChildProcessConnection.ServiceCallback mServiceCallback;
 
-    @Mock
-    private ChildProcessConnection.ConnectionCallback mConnectionCallback;
+    @Mock private ChildProcessConnection.ConnectionCallback mConnectionCallback;
 
-    @Mock
-    private ChildProcessConnection.ZygoteInfoCallback mZygoteInfoCallback;
+    @Mock private ChildProcessConnection.ZygoteInfoCallback mZygoteInfoCallback;
 
     private IChildProcessService mIChildProcessService;
 
@@ -160,14 +161,16 @@
         ApplicationInfo appInfo = BuildInfo.getInstance().getBrowserApplicationInfo();
         when(mIChildProcessService.getAppInfo()).thenReturn(appInfo);
         // Capture the parameters passed to the IChildProcessService.setupConnection() call.
-        doAnswer(new Answer<Void>() {
-            @Override
-            public Void answer(InvocationOnMock invocation) {
-                mConnectionBundle = (Bundle) invocation.getArgument(0);
-                mConnectionParentProcess = (IParentProcess) invocation.getArgument(1);
-                return null;
-            }
-        })
+        doAnswer(
+                        new Answer<Void>() {
+                            @Override
+                            public Void answer(InvocationOnMock invocation) {
+                                mConnectionBundle = (Bundle) invocation.getArgument(0);
+                                mConnectionParentProcess =
+                                        (IParentProcess) invocation.getArgument(1);
+                                return null;
+                            }
+                        })
                 .when(mIChildProcessService)
                 .setupConnection(
                         or(isNull(), any(Bundle.class)), or(isNull(), any()), or(isNull(), any()));
@@ -178,35 +181,48 @@
     }
 
     private ChildProcessConnection createDefaultTestConnection() {
-        return createTestConnection(false /* bindToCaller */, false /* bindAsExternalService */,
-                null /* serviceBundle */, false /* useFallback */);
+        return createTestConnection(
+                /* bindToCaller= */ false,
+                /* bindAsExternalService= */ false,
+                /* serviceBundle= */ null,
+                /* useFallback= */ false);
     }
 
-    private ChildProcessConnection createTestConnection(boolean bindToCaller,
-            boolean bindAsExternalService, Bundle serviceBundle, boolean useFallback) {
+    private ChildProcessConnection createTestConnection(
+            boolean bindToCaller,
+            boolean bindAsExternalService,
+            Bundle serviceBundle,
+            boolean useFallback) {
         String packageName = "org.chromium.test";
         String serviceName = "TestService";
         String fallbackServiceName = "TestFallbackService";
-        return new ChildProcessConnection(null /* context */,
+        return new ChildProcessConnection(
+                /* context= */ null,
                 new ComponentName(packageName, serviceName),
                 useFallback ? new ComponentName(packageName, fallbackServiceName) : null,
-                bindToCaller, bindAsExternalService, serviceBundle, mServiceConnectionFactory,
-                null /* instanceName */);
+                bindToCaller,
+                bindAsExternalService,
+                serviceBundle,
+                mServiceConnectionFactory,
+                /* instanceName= */ null);
     }
 
     private void sendPid(int pid) throws RemoteException {
         mConnectionParentProcess.finishSetupConnection(
-                pid, 0 /* zygotePid */, -1 /* zygoteStartupTimeMillis */, null /* relroBundle */);
+                pid,
+                /* zygotePid= */ 0,
+                /* zygoteStartupTimeMillis= */ -1,
+                /* relroBundle= */ null);
     }
 
     @Test
     public void testStrongBinding() {
         ChildProcessConnection connection = createDefaultTestConnection();
-        connection.start(true /* useStrongBinding */, null /* serviceCallback */);
+        connection.start(/* useStrongBinding= */ true, /* serviceCallback= */ null);
         assertTrue(connection.isStrongBindingBound());
 
         connection = createDefaultTestConnection();
-        connection.start(false /* useStrongBinding */, null /* serviceCallback */);
+        connection.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
         assertFalse(connection.isStrongBindingBound());
     }
 
@@ -221,10 +237,14 @@
         String stringValue = "thirty four";
         serviceBundle.putString(stringKey, stringValue);
 
-        ChildProcessConnection connection = createTestConnection(false /* bindToCaller */,
-                false /* bindAsExternalService */, serviceBundle, false /* useFallback */);
+        ChildProcessConnection connection =
+                createTestConnection(
+                        /* bindToCaller= */ false,
+                        /* bindAsExternalService= */ false,
+                        serviceBundle,
+                        /* useFallback= */ false);
         // Start the connection without the ChildServiceConnection connecting.
-        connection.start(false /* useStrongBinding */, null /* serviceCallback */);
+        connection.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
         assertNotNull(mFirstServiceConnection);
         Intent bindIntent = mFirstServiceConnection.getBindIntent();
         assertNotNull(bindIntent);
@@ -236,7 +256,7 @@
     public void testServiceStartsSuccessfully() {
         ChildProcessConnection connection = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, mServiceCallback);
+        connection.start(/* useStrongBinding= */ false, mServiceCallback);
         Assert.assertTrue(connection.isVisibleBindingBound());
         Assert.assertFalse(connection.didOnServiceConnectedForTesting());
         verify(mServiceCallback, never()).onChildStarted();
@@ -258,7 +278,7 @@
         // Note we use doReturn so the actual bindServiceConnection() method is not called (it would
         // with when(mFirstServiceConnection.bindServiceConnection()).thenReturn(false).
         doReturn(false).when(mFirstServiceConnection).bindServiceConnection();
-        connection.start(false /* useStrongBinding */, mServiceCallback);
+        connection.start(/* useStrongBinding= */ false, mServiceCallback);
 
         Assert.assertFalse(connection.isVisibleBindingBound());
         Assert.assertFalse(connection.didOnServiceConnectedForTesting());
@@ -271,7 +291,7 @@
     public void testServiceStops() {
         ChildProcessConnection connection = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, mServiceCallback);
+        connection.start(/* useStrongBinding= */ false, mServiceCallback);
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         connection.stop();
         verify(mServiceCallback, times(1)).onChildStarted();
@@ -283,7 +303,7 @@
     public void testServiceDisconnects() {
         ChildProcessConnection connection = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, mServiceCallback);
+        connection.start(/* useStrongBinding= */ false, mServiceCallback);
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         mFirstServiceConnection.notifyServiceDisconnected();
         verify(mServiceCallback, times(1)).onChildStarted();
@@ -294,10 +314,13 @@
     @Test
     public void testNotBoundToCaller() throws RemoteException {
         ChildProcessConnection connection =
-                createTestConnection(false /* bindToCaller */, false /* bindAsExternalService */,
-                        null /* serviceBundle */, false /* useFallback */);
+                createTestConnection(
+                        /* bindToCaller= */ false,
+                        /* bindAsExternalService= */ false,
+                        /* serviceBundle= */ null,
+                        /* useFallback= */ false);
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, mServiceCallback);
+        connection.start(/* useStrongBinding= */ false, mServiceCallback);
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         // Service is started and bindToCallback is not called.
         verify(mServiceCallback, times(1)).onChildStarted();
@@ -309,10 +332,13 @@
     @Test
     public void testBoundToCallerSuccess() throws RemoteException {
         ChildProcessConnection connection =
-                createTestConnection(true /* bindToCaller */, false /* bindAsExternalService */,
-                        null /* serviceBundle */, false /* useFallback */);
+                createTestConnection(
+                        /* bindToCaller= */ true,
+                        /* bindAsExternalService= */ false,
+                        /* serviceBundle= */ null,
+                        /* useFallback= */ false);
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, mServiceCallback);
+        connection.start(/* useStrongBinding= */ false, mServiceCallback);
         when(mIChildProcessService.bindToCaller(any())).thenReturn(true);
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         // Service is started and bindToCallback is called.
@@ -325,10 +351,13 @@
     @Test
     public void testBoundToCallerFailure() throws RemoteException {
         ChildProcessConnection connection =
-                createTestConnection(true /* bindToCaller */, false /* bindAsExternalService */,
-                        null /* serviceBundle */, false /* useFallback */);
+                createTestConnection(
+                        /* bindToCaller= */ true,
+                        /* bindAsExternalService= */ false,
+                        /* serviceBundle= */ null,
+                        /* useFallback= */ false);
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, mServiceCallback);
+        connection.start(/* useStrongBinding= */ false, mServiceCallback);
         // Pretend bindToCaller returns false, i.e. the service is already bound to a different
         // service.
         when(mIChildProcessService.bindToCaller(any())).thenReturn(false);
@@ -344,9 +373,12 @@
     public void testSetupConnectionBeforeServiceConnected() throws RemoteException {
         ChildProcessConnection connection = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, null /* serviceCallback */);
-        connection.setupConnection(null /* connectionBundle */, null /* callback */,
-                mConnectionCallback, null /* zygoteInfoCallback */);
+        connection.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
+        connection.setupConnection(
+                /* connectionBundle= */ null,
+                /* callback= */ null,
+                mConnectionCallback,
+                /* zygoteInfoCallback= */ null);
         verify(mConnectionCallback, never()).onConnected(any());
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         ShadowLooper.runUiThreadTasks();
@@ -359,9 +391,12 @@
     public void testSendPidOnlyWorksOnce() throws RemoteException {
         ChildProcessConnection connection = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, null /* serviceCallback */);
-        connection.setupConnection(null /* connectionBundle */, null /* callback */,
-                mConnectionCallback, null /* zygoteInfoCallback */);
+        connection.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
+        connection.setupConnection(
+                /* connectionBundle= */ null,
+                /* callback= */ null,
+                mConnectionCallback,
+                /* zygoteInfoCallback= */ null);
         verify(mConnectionCallback, never()).onConnected(any());
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         ShadowLooper.runUiThreadTasks();
@@ -377,16 +412,23 @@
     public void testZygotePidSaved() throws RemoteException {
         ChildProcessConnection connection = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, null /* serviceCallback */);
-        connection.setupConnection(null /* connectionBundle */, null /* callback */,
-                mConnectionCallback, null /* zygoteInfoCallback */);
+        connection.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
+        connection.setupConnection(
+                /* connectionBundle= */ null,
+                /* callback= */ null,
+                mConnectionCallback,
+                /* zygoteInfoCallback= */ null);
         verify(mConnectionCallback, never()).onConnected(any());
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         ShadowLooper.runUiThreadTasks();
         assertNotNull(mConnectionParentProcess);
 
-        mConnectionParentProcess.finishSetupConnection(123 /* pid */, 456 /* zygotePid = */,
-                789 /* zygoteStartupTimeMillis */, null /* relroBundle */);
+        mConnectionParentProcess.finishSetupConnection(
+                /* pid= */ 123,
+                456
+                /* zygotePid= */ ,
+                /* zygoteStartupTimeMillis= */ 789,
+                /* relroBundle= */ null);
         assertTrue(connection.hasUsableZygoteInfo());
         assertEquals(456, connection.getZygotePid());
     }
@@ -396,15 +438,21 @@
         // Set up |connection1|.
         ChildProcessConnection connection1 = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection1.start(true /* useStrongBinding */, null /* serviceCallback */);
-        connection1.setupConnection(null /* connectionBundle */, null /* callback */,
-                mConnectionCallback, null /* zygoteInfoCallback */);
+        connection1.start(/* useStrongBinding= */ true, /* serviceCallback= */ null);
+        connection1.setupConnection(
+                /* connectionBundle= */ null,
+                /* callback= */ null,
+                mConnectionCallback,
+                /* zygoteInfoCallback= */ null);
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         ShadowLooper.runUiThreadTasks();
         assertNotNull(mConnectionParentProcess);
         assertNotNull(mFirstServiceConnection);
-        mConnectionParentProcess.finishSetupConnection(125 /* pid */, 0 /* zygotePid */,
-                -1 /* zygoteStartupTimeMillis */, null /* relroBundle */);
+        mConnectionParentProcess.finishSetupConnection(
+                /* pid= */ 125,
+                /* zygotePid= */ 0,
+                /* zygoteStartupTimeMillis= */ -1,
+                /* relroBundle= */ null);
 
         // Allow the following setupConnection() to create a new service connection for
         // |connection2|.
@@ -413,16 +461,22 @@
         // Set up |connection2|.
         ChildProcessConnection connection2 = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection2.start(false /* useStrongBinding */, null /* serviceCallback */);
-        connection2.setupConnection(null /* connectionBundle */, null /* callback */,
-                mConnectionCallback, null /* zygoteInfoCallback */);
+        connection2.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
+        connection2.setupConnection(
+                /* connectionBundle= */ null,
+                /* callback= */ null,
+                mConnectionCallback,
+                /* zygoteInfoCallback= */ null);
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         ShadowLooper.runUiThreadTasks();
         assertNotNull(mConnectionParentProcess);
         assertNotNull(mFirstServiceConnection);
 
-        mConnectionParentProcess.finishSetupConnection(126, /* zygotePid = */ 300,
-                /* zygoteStartupTimeMillis = */ -1, /* relroBundle = */ null);
+        mConnectionParentProcess.finishSetupConnection(
+                126,
+                /* zygotePid= */ 300,
+                /* zygoteStartupTimeMillis= */ -1,
+                /* relroBundle= */ null);
         assertTrue(connection2.hasUsableZygoteInfo());
         assertEquals(300, connection2.getZygotePid());
         assertFalse(connection1.hasUsableZygoteInfo());
@@ -432,17 +486,24 @@
     public void testInvokesZygoteCallback() throws RemoteException {
         ChildProcessConnection connection = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, null /* serviceCallback */);
-        connection.setupConnection(null /* connectionBundle */, null /* callback */,
-                mConnectionCallback, mZygoteInfoCallback);
+        connection.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
+        connection.setupConnection(
+                /* connectionBundle= */ null,
+                /* callback= */ null,
+                mConnectionCallback,
+                mZygoteInfoCallback);
         verify(mConnectionCallback, never()).onConnected(any());
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         ShadowLooper.runUiThreadTasks();
         assertNotNull(mConnectionParentProcess);
 
         Bundle relroBundle = new Bundle();
-        mConnectionParentProcess.finishSetupConnection(123 /* pid */, 456 /* zygotePid = */,
-                789 /* zygoteStartupTimeMillis */, relroBundle);
+        mConnectionParentProcess.finishSetupConnection(
+                /* pid= */ 123,
+                456
+                /* zygotePid= */ ,
+                /* zygoteStartupTimeMillis= */ 789,
+                relroBundle);
         assertTrue(connection.hasUsableZygoteInfo());
         assertEquals(456, connection.getZygotePid());
         verify(mZygoteInfoCallback, times(1)).onReceivedZygoteInfo(connection, relroBundle);
@@ -455,16 +516,23 @@
     public void testConsumeZygoteBundle() throws RemoteException {
         ChildProcessConnection connection = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, null /* serviceCallback */);
-        connection.setupConnection(null /* connectionBundle */, null /* callback */,
-                mConnectionCallback, mZygoteInfoCallback);
+        connection.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
+        connection.setupConnection(
+                /* connectionBundle= */ null,
+                /* callback= */ null,
+                mConnectionCallback,
+                mZygoteInfoCallback);
         verify(mConnectionCallback, never()).onConnected(any());
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         ShadowLooper.runUiThreadTasks();
         assertNotNull(mConnectionParentProcess);
         Bundle relroBundle = new Bundle();
-        mConnectionParentProcess.finishSetupConnection(123 /* pid */, 456 /* zygotePid = */,
-                789 /* zygoteStartupTimeMillis */, relroBundle);
+        mConnectionParentProcess.finishSetupConnection(
+                /* pid= */ 123,
+                456
+                /* zygotePid= */ ,
+                /* zygoteStartupTimeMillis= */ 789,
+                relroBundle);
 
         verify(mIChildProcessService, never()).consumeRelroBundle(any());
         connection.consumeZygoteBundle(relroBundle);
@@ -475,10 +543,13 @@
     public void testSetupConnectionAfterServiceConnected() throws RemoteException {
         ChildProcessConnection connection = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, null /* serviceCallback */);
+        connection.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
-        connection.setupConnection(null /* connectionBundle */, null /* callback */,
-                mConnectionCallback, null /* zygoteInfoCallback */);
+        connection.setupConnection(
+                /* connectionBundle= */ null,
+                /* callback= */ null,
+                mConnectionCallback,
+                /* zygoteInfoCallback= */ null);
         verify(mConnectionCallback, never()).onConnected(any());
         ShadowLooper.runUiThreadTasks();
         assertNotNull(mConnectionParentProcess);
@@ -490,10 +561,13 @@
     public void testKill() throws RemoteException {
         ChildProcessConnection connection = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, null /* serviceCallback */);
+        connection.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
-        connection.setupConnection(null /* connectionBundle */, null /* callback */,
-                mConnectionCallback, null /* zygoteInfoCallback */);
+        connection.setupConnection(
+                /* connectionBundle= */ null,
+                /* callback= */ null,
+                mConnectionCallback,
+                /* zygoteInfoCallback= */ null);
         verify(mConnectionCallback, never()).onConnected(any());
         ShadowLooper.runUiThreadTasks();
         assertNotNull(mConnectionParentProcess);
@@ -523,7 +597,7 @@
     @Test
     public void testUpdateGroupImportanceSmoke() throws RemoteException {
         ChildProcessConnection connection = createDefaultTestConnection();
-        connection.start(false /* useStrongBinding */, null /* serviceCallback */);
+        connection.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
         when(mIChildProcessService.bindToCaller(any())).thenReturn(true);
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         connection.updateGroupImportance(1, 2);
@@ -540,10 +614,13 @@
     public void testExceptionDuringInit() throws RemoteException {
         ChildProcessConnection connection = createDefaultTestConnection();
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, null /* serviceCallback */);
+        connection.start(/* useStrongBinding= */ false, /* serviceCallback= */ null);
         mFirstServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
-        connection.setupConnection(null /* connectionBundle */, null /* callback */,
-                mConnectionCallback, null /* zygoteInfoCallback */);
+        connection.setupConnection(
+                /* connectionBundle= */ null,
+                /* callback= */ null,
+                mConnectionCallback,
+                /* zygoteInfoCallback= */ null);
         verify(mConnectionCallback, never()).onConnected(any());
         ShadowLooper.runUiThreadTasks();
         assertNotNull(mConnectionParentProcess);
@@ -564,10 +641,14 @@
         final int intValue = 34;
         serviceBundle.putInt(intKey, intValue);
 
-        ChildProcessConnection connection = createTestConnection(false /* bindToCaller */,
-                false /* bindAsExternalService */, serviceBundle, true /* useFallback */);
+        ChildProcessConnection connection =
+                createTestConnection(
+                        /* bindToCaller= */ false,
+                        /* bindAsExternalService= */ false,
+                        serviceBundle,
+                        /* useFallback= */ true);
         assertNotNull(mFirstServiceConnection);
-        connection.start(false /* useStrongBinding */, mServiceCallback);
+        connection.start(/* useStrongBinding= */ false, mServiceCallback);
 
         Assert.assertEquals(3, mMockConnections.size());
         boolean anyServiceConnectionBound = false;
@@ -585,8 +666,11 @@
             Assert.assertEquals("TestService", bindIntent.getComponent().getClassName());
         }
 
-        connection.setupConnection(null /* connectionBundle */, null /* callback */,
-                mConnectionCallback, null /* zygoteInfoCallback */);
+        connection.setupConnection(
+                /* connectionBundle= */ null,
+                /* callback= */ null,
+                mConnectionCallback,
+                /* zygoteInfoCallback= */ null);
 
         // Do not call onServiceConnected. Simulate timeout with ShadowLooper.
         ShadowLooper.runUiThreadTasksIncludingDelayedTasks();
@@ -627,12 +711,16 @@
         final int intValue = 34;
         serviceBundle.putInt(intKey, intValue);
 
-        ChildProcessConnection connection = createTestConnection(false /* bindToCaller */,
-                false /* bindAsExternalService */, serviceBundle, true /* useFallback */);
+        ChildProcessConnection connection =
+                createTestConnection(
+                        /* bindToCaller= */ false,
+                        /* bindAsExternalService= */ false,
+                        serviceBundle,
+                        /* useFallback= */ true);
         assertNotNull(mFirstServiceConnection);
         mFirstServiceConnection.setBindResult(false);
 
-        connection.start(false /* useStrongBinding */, mServiceCallback);
+        connection.start(/* useStrongBinding= */ false, mServiceCallback);
 
         verify(mServiceCallback, never()).onChildStarted();
         verify(mServiceCallback, never()).onChildStartFailed(any());
@@ -646,9 +734,11 @@
         }
         // New connection for fallback service should be bound.
         ChildServiceConnectionMock boundServiceConnection = null;
+        int boundConnectionCount = 0;
         for (int i = 3; i < 6; ++i) {
             if (mMockConnections.get(i).isBound()) {
                 boundServiceConnection = mMockConnections.get(i);
+                boundConnectionCount++;
             }
             Intent bindIntent = mMockConnections.get(i).getBindIntent();
             assertNotNull(bindIntent);
@@ -656,6 +746,9 @@
             Assert.assertEquals("TestFallbackService", bindIntent.getComponent().getClassName());
         }
 
+        Assert.assertTrue(boundConnectionCount >= 2);
+        Assert.assertTrue(connection.isVisibleBindingBound());
+
         // Complete connection.
         boundServiceConnection.notifyServiceConnected(mChildProcessServiceBinder);
         verify(mServiceCallback, times(1)).onChildStarted();
diff --git a/base/android/junit/src/org/chromium/base/shared_preferences/KeyPrefixTest.java b/base/android/junit/src/org/chromium/base/shared_preferences/KeyPrefixTest.java
new file mode 100644
index 0000000..ecc30d6
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/shared_preferences/KeyPrefixTest.java
@@ -0,0 +1,63 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.shared_preferences;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import androidx.test.filters.SmallTest;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+/**
+ * Unit tests for {@link KeyPrefix}.
+ */
+@RunWith(BaseRobolectricTestRunner.class)
+public class KeyPrefixTest {
+    @Test
+    @SmallTest
+    public void testSuccess_validPattern() {
+        KeyPrefix prefix = new KeyPrefix("Chrome.Feature.KP.*");
+
+        assertEquals(prefix.pattern(), "Chrome.Feature.KP.*");
+
+        assertEquals(prefix.createKey("DynamicKey"), "Chrome.Feature.KP.DynamicKey");
+        assertEquals(prefix.createKey("Level.DynamicKey"), "Chrome.Feature.KP.Level.DynamicKey");
+        assertEquals(prefix.createKey(42), "Chrome.Feature.KP.42");
+
+        assertTrue(prefix.hasGenerated("Chrome.Feature.KP.DynamicKey"));
+        assertTrue(prefix.hasGenerated("Chrome.Feature.KP.Level.DynamicKey"));
+        assertTrue(prefix.hasGenerated("Chrome.Feature.KP.42"));
+        assertFalse(prefix.hasGenerated("OtherKey"));
+    }
+
+    @Test
+    @SmallTest
+    public void testSuccess_validLegacyPattern() {
+        KeyPrefix prefix = new KeyPrefix("legacy_pattern_*");
+
+        assertEquals(prefix.pattern(), "legacy_pattern_*");
+        assertEquals(prefix.createKey("DynamicKey"), "legacy_pattern_DynamicKey");
+
+        assertTrue(prefix.hasGenerated("legacy_pattern_DynamicKey"));
+        assertFalse(prefix.hasGenerated("OtherKey"));
+    }
+
+    @Test(expected = AssertionError.class)
+    @SmallTest
+    public void testError_missingPeriod() {
+        new KeyPrefix("Chrome.Feature.KP");
+    }
+
+    @Test(expected = AssertionError.class)
+    @SmallTest
+    public void testError_missingStar() {
+        new KeyPrefix("Chrome.Feature.KP.");
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/shared_preferences/KnownPreferenceKeyRegistriesTest.java b/base/android/junit/src/org/chromium/base/shared_preferences/KnownPreferenceKeyRegistriesTest.java
new file mode 100644
index 0000000..deb10a0
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/shared_preferences/KnownPreferenceKeyRegistriesTest.java
@@ -0,0 +1,99 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.shared_preferences;
+
+import static org.junit.Assert.fail;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+/** Unit tests for {@link KnownPreferenceKeyRegistries}. */
+@RunWith(BaseRobolectricTestRunner.class)
+public class KnownPreferenceKeyRegistriesTest {
+    private static final String KEY_1 = "Chrome.Feature.Key1";
+    private static final PreferenceKeyRegistry KNOWN_1 =
+            createRegistryWithOneKey("known_registry1", KEY_1);
+
+    private static final String KEY_2 = "Chrome.Feature.Key2";
+    private static final PreferenceKeyRegistry KNOWN_2 =
+            createRegistryWithOneKey("known_registry2", KEY_2);
+
+    private static final String KEY_3 = "Chrome.Feature.Key3";
+    private static final PreferenceKeyRegistry UNKNOWN =
+            createRegistryWithOneKey("unknown_registry", KEY_3);
+
+    @Before
+    public void setUp() {
+        KnownPreferenceKeyRegistries.clearForTesting();
+    }
+
+    @Test
+    public void testOnlyKnownUsedAfterInit_noAssertion() {
+        KnownPreferenceKeyRegistries.initializeKnownRegistries(Set.of(KNOWN_1, KNOWN_2));
+
+        SharedPreferencesManager.getInstanceForRegistry(KNOWN_1).writeInt(KEY_1, 42);
+        SharedPreferencesManager.getInstanceForRegistry(KNOWN_2).writeInt(KEY_2, 43);
+    }
+
+    @Test
+    public void testOnlyKnownUsedBeforeInit_noAssertion() {
+        SharedPreferencesManager.getInstanceForRegistry(KNOWN_1).writeInt(KEY_1, 42);
+        SharedPreferencesManager.getInstanceForRegistry(KNOWN_2).writeInt(KEY_2, 43);
+
+        KnownPreferenceKeyRegistries.initializeKnownRegistries(Set.of(KNOWN_1, KNOWN_2));
+    }
+
+    @Test
+    public void testUnknownUsedAfterInit_assertion() {
+        KnownPreferenceKeyRegistries.initializeKnownRegistries(Set.of(KNOWN_1, KNOWN_2));
+
+        try {
+            SharedPreferencesManager.getInstanceForRegistry(UNKNOWN).writeInt(KEY_3, 42);
+        } catch (AssertionError e) {
+            assertContains("An unknown registry was used", e.getMessage());
+            assertContains("unknown_registry", e.getMessage());
+            return;
+        }
+        fail("Expected AssertionError");
+    }
+
+    @Test
+    public void testUnknownUsedBeforeInit_assertion() {
+        SharedPreferencesManager.getInstanceForRegistry(UNKNOWN).writeInt(KEY_3, 42);
+
+        try {
+            KnownPreferenceKeyRegistries.initializeKnownRegistries(Set.of(KNOWN_1, KNOWN_2));
+        } catch (AssertionError e) {
+            assertContains("Unknown registries were used", e.getMessage());
+            assertContains("unknown_registry", e.getMessage());
+            return;
+        }
+        fail("Expected AssertionError");
+    }
+
+    private static PreferenceKeyRegistry createRegistryWithOneKey(String name, String key) {
+        return new PreferenceKeyRegistry(
+                name, List.of(key), Collections.EMPTY_LIST, Collections.EMPTY_LIST);
+    }
+
+    // TODO: Unify with HistogramWatcherTestBase's version.
+    protected static void assertContains(String expectedSubstring, String actualString) {
+        Assert.assertNotNull(actualString);
+        if (!actualString.contains(expectedSubstring)) {
+            fail(
+                    String.format(
+                            "Substring <%s> not found in string <%s>",
+                            expectedSubstring, actualString));
+        }
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/shared_preferences/SharedPreferencesManagerTest.java b/base/android/junit/src/org/chromium/base/shared_preferences/SharedPreferencesManagerTest.java
new file mode 100644
index 0000000..5648833
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/shared_preferences/SharedPreferencesManagerTest.java
@@ -0,0 +1,569 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.shared_preferences;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import androidx.test.filters.SmallTest;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/** Unit tests for {@link SharedPreferencesManager}. */
+@RunWith(BaseRobolectricTestRunner.class)
+public class SharedPreferencesManagerTest {
+    @Mock private PreferenceKeyChecker mChecker;
+
+    private static final KeyPrefix TEST_PREFIX = new KeyPrefix("TestPrefix.*");
+    private static final String PREFIXED_KEY_1 = TEST_PREFIX.createKey("stemA");
+    private static final String PREFIXED_KEY_2 = TEST_PREFIX.createKey("stemB");
+    private static final String PREFIXED_KEY_3 = TEST_PREFIX.createKey(33);
+
+    private SharedPreferencesManager mSubject;
+
+    @Before
+    public void setUp() {
+        MockitoAnnotations.initMocks(this);
+        mSubject = new SharedPreferencesManager(mChecker);
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteReadInt() {
+        // Verify default return values when no value is written.
+        assertEquals(0, mSubject.readInt("int_key"));
+        assertEquals(987, mSubject.readInt("int_key", 987));
+        assertFalse(mSubject.contains("int_key"));
+
+        // Write a value.
+        mSubject.writeInt("int_key", 123);
+
+        // Verify value written can be read.
+        assertEquals(123, mSubject.readInt("int_key"));
+        assertEquals(123, mSubject.readInt("int_key", 987));
+        assertTrue(mSubject.contains("int_key"));
+
+        // Remove the value.
+        mSubject.removeKey("int_key");
+
+        // Verify the removed value is not returned anymore.
+        assertEquals(0, mSubject.readInt("int_key"));
+        assertFalse(mSubject.contains("int_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testIncrementInt() {
+        mSubject.writeInt("int_key", 100);
+        int result = mSubject.incrementInt("int_key");
+
+        assertEquals(101, result);
+        assertEquals(101, mSubject.readInt("int_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testIncrementIntDefault() {
+        int result = mSubject.incrementInt("int_key");
+
+        assertEquals(1, result);
+        assertEquals(1, mSubject.readInt("int_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteReadBoolean() {
+        // Verify default return values when no value is written.
+        assertEquals(false, mSubject.readBoolean("bool_key", false));
+        assertEquals(true, mSubject.readBoolean("bool_key", true));
+        assertFalse(mSubject.contains("bool_key"));
+
+        // Write a value.
+        mSubject.writeBoolean("bool_key", true);
+
+        // Verify value written can be read.
+        assertEquals(true, mSubject.readBoolean("bool_key", false));
+        assertEquals(true, mSubject.readBoolean("bool_key", true));
+        assertTrue(mSubject.contains("bool_key"));
+
+        // Remove the value.
+        mSubject.removeKey("bool_key");
+
+        // Verify the removed value is not returned anymore.
+        assertEquals(false, mSubject.readBoolean("bool_key", false));
+        assertEquals(true, mSubject.readBoolean("bool_key", true));
+        assertFalse(mSubject.contains("bool_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteReadString() {
+        // Verify default return values when no value is written.
+        assertEquals("default", mSubject.readString("string_key", "default"));
+        assertFalse(mSubject.contains("string_key"));
+
+        // Write a value.
+        mSubject.writeString("string_key", "foo");
+
+        // Verify value written can be read.
+        assertEquals("foo", mSubject.readString("string_key", "default"));
+        assertTrue(mSubject.contains("string_key"));
+
+        // Remove the value.
+        mSubject.removeKey("string_key");
+
+        // Verify the removed value is not returned anymore.
+        assertEquals("default", mSubject.readString("string_key", "default"));
+        assertFalse(mSubject.contains("string_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteReadLong() {
+        // Verify default return values when no value is written.
+        assertEquals(0, mSubject.readLong("long_key"));
+        assertEquals(9876543210L, mSubject.readLong("long_key", 9876543210L));
+        assertFalse(mSubject.contains("long_key"));
+
+        // Write a value.
+        mSubject.writeLong("long_key", 9999999999L);
+
+        // Verify value written can be read.
+        assertEquals(9999999999L, mSubject.readLong("long_key"));
+        assertEquals(9999999999L, mSubject.readLong("long_key", 9876543210L));
+        assertTrue(mSubject.contains("long_key"));
+
+        // Remove the value.
+        mSubject.removeKey("long_key");
+
+        // Verify the removed value is not returned anymore.
+        assertEquals(0, mSubject.readLong("long_key"));
+        assertFalse(mSubject.contains("long_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteReadFloat() {
+        // Verify default return values when no value is written.
+        assertEquals(1.5f, mSubject.readFloat("float_key", 1.5f), 0.001f);
+        assertFalse(mSubject.contains("float_key"));
+
+        // Write a value.
+        mSubject.writeFloat("float_key", 42.42f);
+
+        // Verify value written can be read.
+        assertEquals(42.42f, mSubject.readFloat("float_key", 1.5f), 0.001f);
+        assertTrue(mSubject.contains("float_key"));
+
+        // Remove the value.
+        mSubject.removeKey("float_key");
+
+        // Verify the removed value is not returned anymore.
+        assertEquals(1.5f, mSubject.readFloat("float_key", 1.5f), 0.001f);
+        assertFalse(mSubject.contains("float_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteReadDouble() {
+        // Verify default return values when no value is written.
+        assertEquals(1.5d, mSubject.readDouble("double_key", 1.5d), 0.001f);
+        assertFalse(mSubject.contains("double_key"));
+
+        // Write a value.
+        mSubject.writeDouble("double_key", 42.42f);
+
+        // Verify value written can be read.
+        assertEquals(42.42d, mSubject.readDouble("double_key", 1.5d), 0.001f);
+        assertTrue(mSubject.contains("double_key"));
+
+        // Remove the value.
+        mSubject.removeKey("double_key");
+
+        // Verify the removed value is not returned anymore.
+        assertEquals(1.5d, mSubject.readDouble("double_key", 1.5d), 0.001f);
+        assertFalse(mSubject.contains("double_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteReadStringSet() {
+        Set<String> defaultStringSet = new HashSet<>(Arrays.asList("a", "b", "c"));
+        Set<String> exampleStringSet = new HashSet<>(Arrays.asList("d", "e"));
+
+        // Verify default return values when no value is written.
+        assertEquals(Collections.emptySet(), mSubject.readStringSet("string_set_key"));
+        assertEquals(defaultStringSet, mSubject.readStringSet("string_set_key", defaultStringSet));
+        assertNull(mSubject.readStringSet("string_set_key", null));
+        assertFalse(mSubject.contains("string_set_key"));
+
+        // Write a value.
+        mSubject.writeStringSet("string_set_key", exampleStringSet);
+
+        // Verify value written can be read.
+        assertEquals(exampleStringSet, mSubject.readStringSet("string_set_key"));
+        assertEquals(exampleStringSet, mSubject.readStringSet("string_set_key", defaultStringSet));
+        assertEquals(exampleStringSet, mSubject.readStringSet("string_set_key", null));
+        assertTrue(mSubject.contains("string_set_key"));
+
+        // Remove the value.
+        mSubject.removeKey("string_set_key");
+
+        // Verify the removed value is not returned anymore.
+        assertEquals(Collections.emptySet(), mSubject.readStringSet("string_set_key"));
+        assertFalse(mSubject.contains("string_set_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testAddToStringSet() {
+        mSubject.writeStringSet("string_set_key", new HashSet<>(Collections.singletonList("bar")));
+        mSubject.addToStringSet("string_set_key", "foo");
+
+        assertEquals(
+                new HashSet<>(Arrays.asList("foo", "bar")),
+                mSubject.readStringSet("string_set_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testAddToStringSetDefault() {
+        mSubject.addToStringSet("string_set_key", "foo");
+
+        assertEquals(
+                new HashSet<>(Collections.singletonList("foo")),
+                mSubject.readStringSet("string_set_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testRemoveFromStringSet() {
+        mSubject.writeStringSet("string_set_key", new HashSet<>(Arrays.asList("foo", "bar")));
+        mSubject.removeFromStringSet("string_set_key", "foo");
+
+        assertEquals(
+                new HashSet<>(Collections.singletonList("bar")),
+                mSubject.readStringSet("string_set_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testRemoveFromStringSetDefault() {
+        mSubject.removeFromStringSet("string_set_key", "foo");
+
+        assertEquals(Collections.emptySet(), mSubject.readStringSet("string_set_key"));
+    }
+
+    @Test(expected = UnsupportedOperationException.class)
+    @SmallTest
+    public void testReadStringSet_nonEmpty_returnsUnmodifiable() {
+        Set<String> exampleStringSet = new HashSet<>(Arrays.asList("d", "e"));
+        mSubject.writeStringSet("string_set_key", exampleStringSet);
+
+        Set<String> unmodifiableSet = mSubject.readStringSet("string_set_key");
+
+        // Should throw an exception
+        unmodifiableSet.add("f");
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteIntSync() throws InterruptedException {
+        // Verify default return values when no value is written.
+        assertEquals(0, mSubject.readInt("int_key"));
+
+        // Write a value on a background thread.
+        Thread t = new Thread(() -> mSubject.writeIntSync("int_key", 123));
+        t.start();
+        t.join();
+
+        // Verify value written can be read.
+        assertEquals(123, mSubject.readInt("int_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteBooleanSync() throws InterruptedException {
+        // Verify default return values when no value is written.
+        assertEquals(false, mSubject.readBoolean("bool_key", false));
+
+        // Write a value on a background thread.
+        Thread t = new Thread(() -> mSubject.writeBooleanSync("bool_key", true));
+        t.start();
+        t.join();
+
+        // Verify value written can be read.
+        assertEquals(true, mSubject.readBoolean("bool_key", false));
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteStringSync() throws InterruptedException {
+        // Verify default return values when no value is written.
+        assertEquals("default", mSubject.readString("string_key", "default"));
+
+        // Write a value on a background thread.
+        Thread t = new Thread(() -> mSubject.writeStringSync("string_key", "foo"));
+        t.start();
+        t.join();
+
+        // Verify value written can be read.
+        assertEquals("foo", mSubject.readString("string_key", "default"));
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteLongSync() throws InterruptedException {
+        // Verify default return values when no value is written.
+        assertEquals(0, mSubject.readLong("long_key"));
+
+        // Write a value on a background thread.
+        Thread t = new Thread(() -> mSubject.writeLongSync("long_key", 9999999999L));
+        t.start();
+        t.join();
+
+        // Verify value written can be read.
+        assertEquals(9999999999L, mSubject.readLong("long_key"));
+    }
+
+    @Test
+    @SmallTest
+    public void testWriteFloatSync() throws InterruptedException {
+        // Verify default return values when no value is written.
+        assertEquals(0f, mSubject.readFloat("float_key", 0f), 0f);
+
+        // Write a value on a background thread.
+        Thread t = new Thread(() -> mSubject.writeFloatSync("float_key", 42.42f));
+        t.start();
+        t.join();
+
+        // Verify value written can be read.
+        assertEquals(42.42f, mSubject.readFloat("float_key", 1.5f), 0.001f);
+    }
+
+    @Test
+    @SmallTest
+    public void testRemoveKeySync() throws InterruptedException {
+        // Write a value.
+        mSubject.writeInt("int_key", 123);
+        assertEquals(123, mSubject.readInt("int_key", 999));
+
+        // Write the value on a background thread.
+        Thread t = new Thread(() -> mSubject.removeKeySync("int_key"));
+        t.start();
+        t.join();
+
+        // Verify value was removed.
+        assertEquals(999, mSubject.readInt("int_key", 999));
+    }
+
+    @Test
+    @SmallTest
+    public void testRemoveKeys() {
+        KeyPrefix otherPrefix = new KeyPrefix("OtherPrefix.*");
+
+        // Write some values, both prefixes and not prefixed.
+        mSubject.writeInt(PREFIXED_KEY_1, 111);
+        mSubject.writeInt(PREFIXED_KEY_2, 222);
+        mSubject.writeInt(PREFIXED_KEY_3, 333);
+        mSubject.writeInt(otherPrefix.createKey("stemA"), 444);
+        mSubject.writeInt("OtherKey", 555);
+
+        // Remove them
+        mSubject.removeKeysWithPrefix(TEST_PREFIX);
+
+        // Verify only values for the given prefix were removed.
+        assertEquals(0, mSubject.readInt(PREFIXED_KEY_1, 0));
+        assertEquals(0, mSubject.readInt(PREFIXED_KEY_2, 0));
+        assertEquals(0, mSubject.readInt(PREFIXED_KEY_3, 0));
+        assertEquals(444, mSubject.readInt(otherPrefix.createKey("stemA"), 0));
+        assertEquals(555, mSubject.readInt("OtherKey", 0));
+    }
+
+    @Test
+    @SmallTest
+    public void testReadStringsWithPrefix() {
+        // Write some values.
+        mSubject.writeString(PREFIXED_KEY_1, "first");
+        mSubject.writeString(PREFIXED_KEY_2, "second");
+        mSubject.writeString(PREFIXED_KEY_3, "third");
+        mSubject.writeString("OtherKey", "fourth");
+
+        // Verify values written are read with readStringsWithPrefix().
+        Map<String, String> result = mSubject.readStringsWithPrefix(TEST_PREFIX);
+        assertEquals(3, result.size());
+
+        assertEquals("first", result.get(PREFIXED_KEY_1));
+        assertEquals("second", result.get(PREFIXED_KEY_2));
+        assertEquals("third", result.get(PREFIXED_KEY_3));
+    }
+
+    @Test
+    @SmallTest
+    public void testReadIntsWithPrefix() {
+        // Write some values.
+        mSubject.writeInt(PREFIXED_KEY_1, 1);
+        mSubject.writeInt(PREFIXED_KEY_2, 2);
+        mSubject.writeInt(PREFIXED_KEY_3, 3);
+        mSubject.writeInt("OtherKey", 4);
+
+        // Verify values written are read with readIntsWithPrefix().
+        Map<String, Integer> result = mSubject.readIntsWithPrefix(TEST_PREFIX);
+        assertEquals(3, result.size());
+        assertEquals(1, result.get(PREFIXED_KEY_1).intValue());
+        assertEquals(2, result.get(PREFIXED_KEY_2).intValue());
+        assertEquals(3, result.get(PREFIXED_KEY_3).intValue());
+    }
+
+    @Test
+    @SmallTest
+    public void testReadLongsWithPrefix() {
+        // Write some values.
+        mSubject.writeLong(PREFIXED_KEY_1, 21474836470001L);
+        mSubject.writeLong(PREFIXED_KEY_2, 21474836470002L);
+        mSubject.writeLong(PREFIXED_KEY_3, 21474836470003L);
+        mSubject.writeLong("OtherKey", 21474836470004L);
+
+        // Verify values written are read with readLongsWithPrefix().
+        Map<String, Long> result = mSubject.readLongsWithPrefix(TEST_PREFIX);
+        assertEquals(3, result.size());
+        assertEquals(21474836470001L, result.get(PREFIXED_KEY_1).longValue());
+        assertEquals(21474836470002L, result.get(PREFIXED_KEY_2).longValue());
+        assertEquals(21474836470003L, result.get(PREFIXED_KEY_3).longValue());
+    }
+
+    @Test
+    @SmallTest
+    public void testReadFloatsWithPrefix() {
+        // Write some values.
+        mSubject.writeFloat(PREFIXED_KEY_1, 1.0f);
+        mSubject.writeFloat(PREFIXED_KEY_2, 2.5f);
+        mSubject.writeFloat(PREFIXED_KEY_3, 3.5f);
+        mSubject.writeFloat("OtherKey", 4.0f);
+
+        // Verify values written are read with readFloatsWithPrefix().
+        Map<String, Float> result = mSubject.readFloatsWithPrefix(TEST_PREFIX);
+        assertEquals(3, result.size());
+        assertEquals(1.0f, result.get(PREFIXED_KEY_1), 1e-10);
+        assertEquals(2.5f, result.get(PREFIXED_KEY_2), 1e-10);
+        assertEquals(3.5f, result.get(PREFIXED_KEY_3), 1e-10);
+    }
+
+    @Test
+    @SmallTest
+    public void testReadDoublesWithPrefix() {
+        // Write some values.
+        mSubject.writeDouble(PREFIXED_KEY_1, 1.0);
+        mSubject.writeDouble(PREFIXED_KEY_2, 2.5);
+        mSubject.writeDouble(PREFIXED_KEY_3, 3.5);
+        mSubject.writeDouble("OtherKey", 4.0);
+
+        // Verify values written are read with readDoublesWithPrefix().
+        Map<String, Double> result = mSubject.readDoublesWithPrefix(TEST_PREFIX);
+        assertEquals(3, result.size());
+        assertEquals(1.0, result.get(PREFIXED_KEY_1), 1e-10);
+        assertEquals(2.5, result.get(PREFIXED_KEY_2), 1e-10);
+        assertEquals(3.5, result.get(PREFIXED_KEY_3).doubleValue(), 1e-10);
+    }
+
+    @Test
+    @SmallTest
+    public void testReadBooleansWithPrefix() {
+        // Write some values.
+        mSubject.writeBoolean(PREFIXED_KEY_1, true);
+        mSubject.writeBoolean(PREFIXED_KEY_2, false);
+        mSubject.writeBoolean(PREFIXED_KEY_3, true);
+        mSubject.writeBoolean("OtherKey", true);
+
+        // Verify values written are read with readBooleansWithPrefix().
+        Map<String, Boolean> result = mSubject.readBooleansWithPrefix(TEST_PREFIX);
+        assertEquals(3, result.size());
+        assertTrue(result.get(PREFIXED_KEY_1));
+        assertFalse(result.get(PREFIXED_KEY_2));
+        assertTrue(result.get(PREFIXED_KEY_3));
+    }
+
+    @Test
+    @SmallTest
+    public void testCheckerIsCalled() {
+        mSubject.writeInt("int_key", 123);
+        verify(mChecker, times(1)).checkIsKeyInUse(eq("int_key"));
+        mSubject.readInt("int_key");
+        verify(mChecker, times(2)).checkIsKeyInUse(eq("int_key"));
+        mSubject.incrementInt("int_key");
+        verify(mChecker, times(3)).checkIsKeyInUse(eq("int_key"));
+
+        mSubject.writeBoolean("bool_key", true);
+        verify(mChecker, times(1)).checkIsKeyInUse(eq("bool_key"));
+        mSubject.readBoolean("bool_key", false);
+        verify(mChecker, times(2)).checkIsKeyInUse(eq("bool_key"));
+
+        mSubject.writeString("string_key", "foo");
+        verify(mChecker, times(1)).checkIsKeyInUse(eq("string_key"));
+        mSubject.readString("string_key", "");
+        verify(mChecker, times(2)).checkIsKeyInUse(eq("string_key"));
+
+        mSubject.writeLong("long_key", 999L);
+        verify(mChecker, times(1)).checkIsKeyInUse(eq("long_key"));
+        mSubject.readLong("long_key");
+        verify(mChecker, times(2)).checkIsKeyInUse(eq("long_key"));
+
+        mSubject.writeFloat("float_key", 2.5f);
+        verify(mChecker, times(1)).checkIsKeyInUse(eq("float_key"));
+        mSubject.readFloat("float_key", 0f);
+        verify(mChecker, times(2)).checkIsKeyInUse(eq("float_key"));
+
+        mSubject.writeDouble("double_key", 2.5d);
+        verify(mChecker, times(1)).checkIsKeyInUse(eq("double_key"));
+        mSubject.readDouble("double_key", 0d);
+        verify(mChecker, times(2)).checkIsKeyInUse(eq("double_key"));
+
+        mSubject.writeStringSet("string_set_key", new HashSet<>());
+        verify(mChecker, times(1)).checkIsKeyInUse(eq("string_set_key"));
+        mSubject.readStringSet("string_set_key");
+        verify(mChecker, times(2)).checkIsKeyInUse(eq("string_set_key"));
+        mSubject.addToStringSet("string_set_key", "bar");
+        verify(mChecker, times(3)).checkIsKeyInUse(eq("string_set_key"));
+        mSubject.removeFromStringSet("string_set_key", "bar");
+        verify(mChecker, times(4)).checkIsKeyInUse(eq("string_set_key"));
+
+        mSubject.removeKey("some_key");
+        verify(mChecker, times(1)).checkIsKeyInUse(eq("some_key"));
+        mSubject.contains("some_key");
+        verify(mChecker, times(2)).checkIsKeyInUse(eq("some_key"));
+
+        mSubject.readBooleansWithPrefix(TEST_PREFIX);
+        verify(mChecker, times(1)).checkIsPrefixInUse(eq(TEST_PREFIX));
+        mSubject.readIntsWithPrefix(TEST_PREFIX);
+        verify(mChecker, times(2)).checkIsPrefixInUse(eq(TEST_PREFIX));
+        mSubject.readLongsWithPrefix(TEST_PREFIX);
+        verify(mChecker, times(3)).checkIsPrefixInUse(eq(TEST_PREFIX));
+        mSubject.readFloatsWithPrefix(TEST_PREFIX);
+        verify(mChecker, times(4)).checkIsPrefixInUse(eq(TEST_PREFIX));
+        mSubject.readDoublesWithPrefix(TEST_PREFIX);
+        verify(mChecker, times(5)).checkIsPrefixInUse(eq(TEST_PREFIX));
+        mSubject.readStringsWithPrefix(TEST_PREFIX);
+        verify(mChecker, times(6)).checkIsPrefixInUse(eq(TEST_PREFIX));
+        mSubject.removeKeysWithPrefix(TEST_PREFIX);
+        verify(mChecker, times(7)).checkIsPrefixInUse(eq(TEST_PREFIX));
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/shared_preferences/StrictPreferenceKeyCheckerTest.java b/base/android/junit/src/org/chromium/base/shared_preferences/StrictPreferenceKeyCheckerTest.java
new file mode 100644
index 0000000..52a1335
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/shared_preferences/StrictPreferenceKeyCheckerTest.java
@@ -0,0 +1,107 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.shared_preferences;
+
+import androidx.test.filters.SmallTest;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+import java.util.Arrays;
+import java.util.List;
+
+/** Unit tests for {@link StrictPreferenceKeyChecker}. */
+@RunWith(BaseRobolectricTestRunner.class)
+public class StrictPreferenceKeyCheckerTest {
+    private static final String KEY1_IN_USE = "Chrome.Feature.Key1";
+    private static final String KEY2_IN_USE = "Chrome.Feature.Key2";
+    private static final String KEY3_NOT_IN_USE = "Chrome.Feature.Key3";
+    private static final KeyPrefix KEY_PREFIX1_IN_USE =
+            new KeyPrefix("Chrome.Feature.KeyPrefix1.*");
+    private static final KeyPrefix KEY_PREFIX2_IN_USE =
+            new KeyPrefix("Chrome.Feature.KeyPrefix2.*");
+    private static final KeyPrefix KEY_PREFIX3_NOT_IN_USE =
+            new KeyPrefix("Chrome.Feature.KeyPrefix3.*");
+    private static final String LEGACY_KEY_IN_USE = "legacykey";
+    private static final String LEGACY_PREFIX_IN_USE = "legacyprefix_";
+
+    private StrictPreferenceKeyChecker mSubject;
+
+    @Before
+    public void setUp() {
+        List<String> keysInUse =
+                Arrays.asList(
+                        KEY1_IN_USE,
+                        KEY2_IN_USE,
+                        KEY_PREFIX1_IN_USE.pattern(),
+                        KEY_PREFIX2_IN_USE.pattern());
+        List<String> legacyKeys = Arrays.asList(LEGACY_KEY_IN_USE);
+        List<KeyPrefix> legacyPrefixes = Arrays.asList(new KeyPrefix(LEGACY_PREFIX_IN_USE + "*"));
+        PreferenceKeyRegistry registry =
+                new PreferenceKeyRegistry("testModule", keysInUse, legacyKeys, legacyPrefixes);
+        mSubject = new StrictPreferenceKeyChecker(registry);
+    }
+
+    @Test
+    @SmallTest
+    public void testRegularKeys_registered_noException() {
+        mSubject.checkIsKeyInUse(KEY1_IN_USE);
+        mSubject.checkIsKeyInUse(KEY2_IN_USE);
+        mSubject.checkIsKeyInUse(LEGACY_KEY_IN_USE);
+        mSubject.checkIsKeyInUse(LEGACY_PREFIX_IN_USE + "restofkey");
+    }
+
+    @Test(expected = RuntimeException.class)
+    @SmallTest
+    public void testRegularKeys_notRegistered_throwsException() {
+        mSubject.checkIsKeyInUse(KEY3_NOT_IN_USE);
+    }
+
+    @Test
+    @SmallTest
+    public void testPrefixedKeys_noException() {
+        mSubject.checkIsKeyInUse(KEY_PREFIX1_IN_USE.createKey("restofkey"));
+    }
+
+    @Test
+    @SmallTest
+    public void testPrefixedKeys_multipleLevels_noException() {
+        mSubject.checkIsKeyInUse(
+                KEY_PREFIX2_IN_USE.createKey("ExtraLevel.DynamicallyGenerated98765"));
+    }
+
+    @Test(expected = RuntimeException.class)
+    @SmallTest
+    public void testPrefixedKeys_noPrefixMatch_throwsException() {
+        mSubject.checkIsKeyInUse(KEY_PREFIX3_NOT_IN_USE.createKey("restofkey"));
+    }
+
+    @Test(expected = RuntimeException.class)
+    @SmallTest
+    public void testPrefixedKeys_matchOnlyPrefix_throwsException() {
+        mSubject.checkIsKeyInUse(KEY_PREFIX1_IN_USE.createKey(""));
+    }
+
+    @Test(expected = RuntimeException.class)
+    @SmallTest
+    public void testPrefixedKeys_matchPattern_throwsException() {
+        mSubject.checkIsKeyInUse(KEY_PREFIX1_IN_USE.createKey("*"));
+    }
+
+    @Test
+    @SmallTest
+    public void testPrefix_inUse_noException() {
+        mSubject.checkIsPrefixInUse(KEY_PREFIX2_IN_USE);
+    }
+
+    @Test(expected = RuntimeException.class)
+    @SmallTest
+    public void testPrefix_notInUse_throwsException() {
+        mSubject.checkIsPrefixInUse(KEY_PREFIX3_NOT_IN_USE);
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/supplier/LazyOneshotSupplierImplTest.java b/base/android/junit/src/org/chromium/base/supplier/LazyOneshotSupplierImplTest.java
new file mode 100644
index 0000000..b8fdfd6
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/supplier/LazyOneshotSupplierImplTest.java
@@ -0,0 +1,72 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.supplier;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.MockitoAnnotations;
+import org.mockito.Spy;
+import org.robolectric.annotation.Config;
+import org.robolectric.annotation.LooperMode;
+import org.robolectric.shadows.ShadowProcess;
+
+import org.chromium.base.Callback;
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+/** Unit tests for {@link LazyOneshotSupplierImpl}. */
+@RunWith(BaseRobolectricTestRunner.class)
+@Config(shadows = {ShadowProcess.class})
+@LooperMode(LooperMode.Mode.LEGACY)
+public class LazyOneshotSupplierImplTest {
+    @Spy
+    private LazyOneshotSupplierImpl<String> mSupplier =
+            new LazyOneshotSupplierImpl<>() {
+                @Override
+                public void doSet() {
+                    set("answer");
+                }
+            };
+
+    @Spy private Callback<String> mCallback1;
+    @Spy private Callback<String> mCallback2;
+
+    @Before
+    public void setup() {
+        MockitoAnnotations.initMocks(this);
+    }
+
+    @Test
+    public void testSetBeforeDoSet() {
+        assertFalse(mSupplier.hasValue());
+        mSupplier.set("answer");
+
+        mSupplier.onAvailable(mCallback1);
+        mSupplier.onAvailable(mCallback2);
+
+        assertTrue(mSupplier.hasValue());
+        verify(mCallback1).onResult("answer");
+        verify(mCallback2).onResult("answer");
+        verify(mSupplier, times(0)).doSet();
+    }
+
+    @Test
+    public void testDoSetCalledOnce() {
+        mSupplier.onAvailable(mCallback1);
+        verify(mSupplier, times(0)).doSet();
+
+        assertEquals("answer", mSupplier.get());
+        assertEquals("answer", mSupplier.get());
+
+        verify(mCallback1).onResult("answer");
+        verify(mSupplier).doSet();
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/supplier/ObservableSupplierImplTest.java b/base/android/junit/src/org/chromium/base/supplier/ObservableSupplierImplTest.java
index 7b19a1a..fc4a1a7 100644
--- a/base/android/junit/src/org/chromium/base/supplier/ObservableSupplierImplTest.java
+++ b/base/android/junit/src/org/chromium/base/supplier/ObservableSupplierImplTest.java
@@ -4,9 +4,13 @@
 
 package org.chromium.base.supplier;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
+
 import android.os.Handler;
 
-import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.robolectric.annotation.Config;
@@ -14,9 +18,7 @@
 import org.chromium.base.Callback;
 import org.chromium.base.test.BaseRobolectricTestRunner;
 
-/**
- * Unit tests for {@link ObservableSupplierImpl}.
- */
+/** Unit tests for {@link ObservableSupplierImpl}. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class ObservableSupplierImplTest {
@@ -29,10 +31,11 @@
 
     @Test
     public void testObserverNotification_SetMultiple() {
-        Callback<String> supplierObserver = result -> {
-            mCallCount++;
-            mLastSuppliedString = result;
-        };
+        Callback<String> supplierObserver =
+                result -> {
+                    mCallCount++;
+                    mLastSuppliedString = result;
+                };
 
         mSupplier.addObserver(supplierObserver);
         checkState(0, null, null, "before setting first string.");
@@ -49,10 +52,11 @@
 
     @Test
     public void testObserverNotification_SetSame() {
-        Callback<String> supplierObserver = result -> {
-            mCallCount++;
-            mLastSuppliedString = result;
-        };
+        Callback<String> supplierObserver =
+                result -> {
+                    mCallCount++;
+                    mLastSuppliedString = result;
+                };
 
         mSupplier.addObserver(supplierObserver);
         checkState(0, null, null, "before setting first string.");
@@ -62,14 +66,24 @@
 
         mSupplier.set(TEST_STRING_1);
         checkState(1, TEST_STRING_1, TEST_STRING_1, "after resetting first string.");
+
+        // Need to trick Java to not intern our new string.
+        String anotherTestString1 = new String(new char[] {'T', 'e', 's', 't'});
+        assertNotSame(TEST_STRING_1, anotherTestString1);
+        mSupplier.set(anotherTestString1);
+        // Don't use checkState, as the string arguments do not really make sense.
+        assertEquals(
+                "Incorrect call count after setting a different but equal string.", 1, mCallCount);
     }
 
+
     @Test
     public void testObserverNotification_RemoveObserver() {
-        Callback<String> supplierObserver = result -> {
-            mCallCount++;
-            mLastSuppliedString = result;
-        };
+        Callback<String> supplierObserver =
+                result -> {
+                    mCallCount++;
+                    mLastSuppliedString = result;
+                };
 
         mSupplier.addObserver(supplierObserver);
         checkState(0, null, null, "before setting first string.");
@@ -86,22 +100,21 @@
     @Test
     public void testObserverNotification_RegisterObserverAfterSet() {
         Handler handler = new Handler();
-        handler.post(() -> {
-            mSupplier.set(TEST_STRING_1);
-            checkState(0, null, TEST_STRING_1, "after setting first string.");
+        handler.post(
+                () -> {
+                    mSupplier.set(TEST_STRING_1);
+                    checkState(0, null, TEST_STRING_1, "after setting first string.");
 
-            Callback<String> supplierObserver = new Callback<String>() {
-                @Override
-                public void onResult(String result) {
-                    mCallCount++;
-                    mLastSuppliedString = result;
-                }
-            };
+                    Callback<String> supplierObserver =
+                            (String result) -> {
+                                mCallCount++;
+                                mLastSuppliedString = result;
+                            };
 
-            mSupplier.addObserver(supplierObserver);
+                    mSupplier.addObserver(supplierObserver);
 
-            checkState(0, null, TEST_STRING_1, "after setting observer.");
-        });
+                    checkState(0, null, TEST_STRING_1, "after setting observer.");
+                });
 
         handler.post(() -> checkState(1, TEST_STRING_1, TEST_STRING_1, "in second message loop."));
     }
@@ -109,25 +122,24 @@
     @Test
     public void testObserverNotification_RegisterObserverAfterSetThenSetAgain() {
         Handler handler = new Handler();
-        handler.post(() -> {
-            mSupplier.set(TEST_STRING_1);
-            checkState(0, null, TEST_STRING_1, "after setting first string.");
+        handler.post(
+                () -> {
+                    mSupplier.set(TEST_STRING_1);
+                    checkState(0, null, TEST_STRING_1, "after setting first string.");
 
-            Callback<String> supplierObserver = new Callback<String>() {
-                @Override
-                public void onResult(String result) {
-                    mCallCount++;
-                    mLastSuppliedString = result;
-                }
-            };
+                    Callback<String> supplierObserver =
+                            (String result) -> {
+                                mCallCount++;
+                                mLastSuppliedString = result;
+                            };
 
-            mSupplier.addObserver(supplierObserver);
+                    mSupplier.addObserver(supplierObserver);
 
-            checkState(0, null, TEST_STRING_1, "after setting observer.");
+                    checkState(0, null, TEST_STRING_1, "after setting observer.");
 
-            mSupplier.set(TEST_STRING_2);
-            checkState(1, TEST_STRING_2, TEST_STRING_2, "after setting second string.");
-        });
+                    mSupplier.set(TEST_STRING_2);
+                    checkState(1, TEST_STRING_2, TEST_STRING_2, "after setting second string.");
+                });
 
         handler.post(() -> checkState(1, TEST_STRING_2, TEST_STRING_2, "in second message loop."));
     }
@@ -135,38 +147,38 @@
     @Test
     public void testObserverNotification_RegisterObserverAfterSetThenRemove() {
         Handler handler = new Handler();
-        handler.post(() -> {
-            mSupplier.set(TEST_STRING_1);
-            checkState(0, null, TEST_STRING_1, "after setting first string.");
+        handler.post(
+                () -> {
+                    mSupplier.set(TEST_STRING_1);
+                    checkState(0, null, TEST_STRING_1, "after setting first string.");
 
-            Callback<String> supplierObserver = new Callback<String>() {
-                @Override
-                public void onResult(String result) {
-                    mCallCount++;
-                    mLastSuppliedString = result;
-                }
-            };
+                    Callback<String> supplierObserver =
+                            (String result) -> {
+                                mCallCount++;
+                                mLastSuppliedString = result;
+                            };
 
-            mSupplier.addObserver(supplierObserver);
+                    mSupplier.addObserver(supplierObserver);
 
-            checkState(0, null, TEST_STRING_1, "after setting observer.");
+                    checkState(0, null, TEST_STRING_1, "after setting observer.");
 
-            mSupplier.removeObserver(supplierObserver);
-        });
+                    mSupplier.removeObserver(supplierObserver);
+                });
 
         handler.post(() -> checkState(0, null, TEST_STRING_1, "in second message loop."));
     }
 
     @Test
     public void testObserverNotification_RemoveObserverInsideCallback() {
-        Callback<String> supplierObserver = new Callback<String>() {
-            @Override
-            public void onResult(String result) {
-                mCallCount++;
-                mLastSuppliedString = result;
-                mSupplier.removeObserver(this);
-            }
-        };
+        Callback<String> supplierObserver =
+                new Callback<>() {
+                    @Override
+                    public void onResult(String result) {
+                        mCallCount++;
+                        mLastSuppliedString = result;
+                        mSupplier.removeObserver(this);
+                    }
+                };
 
         mSupplier.addObserver(supplierObserver);
         checkState(0, null, null, "before setting first string.");
@@ -178,13 +190,48 @@
         checkState(1, TEST_STRING_1, TEST_STRING_2, "after setting second string.");
     }
 
-    private void checkState(int expectedCallCount, String expectedLastSuppliedString,
-            String expectedStringFromGet, String assertDescription) {
-        Assert.assertEquals(
-                "Incorrect call count " + assertDescription, expectedCallCount, mCallCount);
-        Assert.assertEquals("Incorrect last supplied string " + assertDescription,
-                expectedLastSuppliedString, mLastSuppliedString);
-        Assert.assertEquals(
+    @Test
+    public void testHasObservers() {
+        Callback<String> observer1 = (ignored) -> {};
+        Callback<String> observer2 = (ignored) -> {};
+
+        assertFalse("No observers yet", mSupplier.hasObservers());
+
+        mSupplier.addObserver(observer1);
+        assertTrue("Should have observer1", mSupplier.hasObservers());
+
+        mSupplier.addObserver(observer1);
+        assertTrue("Adding observer1 twice shouldn't break anything", mSupplier.hasObservers());
+
+        mSupplier.removeObserver(observer1);
+        assertFalse(
+                "observer1 should be entirely removed with one remove", mSupplier.hasObservers());
+
+        mSupplier.addObserver(observer1);
+        mSupplier.addObserver(observer2);
+        assertTrue("Should have multiple observers", mSupplier.hasObservers());
+
+        mSupplier.removeObserver(observer1);
+        assertTrue("Should still have observer2", mSupplier.hasObservers());
+
+        mSupplier.removeObserver(observer1);
+        assertTrue("Removing observer1 twice shouldn't break anything", mSupplier.hasObservers());
+
+        mSupplier.removeObserver(observer2);
+        assertFalse("Both observers should be gone", mSupplier.hasObservers());
+    }
+
+    private void checkState(
+            int expectedCallCount,
+            String expectedLastSuppliedString,
+            String expectedStringFromGet,
+            String assertDescription) {
+        assertEquals("Incorrect call count " + assertDescription, expectedCallCount, mCallCount);
+        assertEquals(
+                "Incorrect last supplied string " + assertDescription,
+                expectedLastSuppliedString,
+                mLastSuppliedString);
+        assertEquals(
                 "Incorrect #get() " + assertDescription, expectedStringFromGet, mSupplier.get());
     }
 }
diff --git a/base/android/junit/src/org/chromium/base/supplier/OneShotCallbackTest.java b/base/android/junit/src/org/chromium/base/supplier/OneShotCallbackTest.java
index d6554f6..ed8e285 100644
--- a/base/android/junit/src/org/chromium/base/supplier/OneShotCallbackTest.java
+++ b/base/android/junit/src/org/chromium/base/supplier/OneShotCallbackTest.java
@@ -27,11 +27,9 @@
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class OneShotCallbackTest {
-    @Rule
-    public MockitoRule mMockitoRule = MockitoJUnit.rule();
+    @Rule public MockitoRule mMockitoRule = MockitoJUnit.rule();
 
-    @Mock
-    Callback<Integer> mCallbackMock;
+    @Mock Callback<Integer> mCallbackMock;
 
     @Test
     public void testNotCalledWithNoValue() {
@@ -40,7 +38,10 @@
 
         handler.post(() -> new OneShotCallback<>(supplier, mCallbackMock));
 
-        handler.post(() -> { verify(mCallbackMock, never()).onResult(any()); });
+        handler.post(
+                () -> {
+                    verify(mCallbackMock, never()).onResult(any());
+                });
     }
 
     @Test
@@ -49,9 +50,15 @@
         ObservableSupplierImpl<Integer> supplier = new ObservableSupplierImpl<>();
         supplier.set(5);
 
-        handler.post(() -> { new OneShotCallback<>(supplier, mCallbackMock); });
+        handler.post(
+                () -> {
+                    new OneShotCallback<>(supplier, mCallbackMock);
+                });
 
-        handler.post(() -> { verify(mCallbackMock, times(1)).onResult(5); });
+        handler.post(
+                () -> {
+                    verify(mCallbackMock, times(1)).onResult(5);
+                });
     }
 
     @Test
@@ -60,10 +67,16 @@
         ObservableSupplierImpl<Integer> supplier = new ObservableSupplierImpl<>();
 
         handler.post(() -> new OneShotCallback<>(supplier, mCallbackMock));
-        handler.post(() -> { verify(mCallbackMock, never()).onResult(any()); });
+        handler.post(
+                () -> {
+                    verify(mCallbackMock, never()).onResult(any());
+                });
 
         supplier.set(5);
-        handler.post(() -> { verify(mCallbackMock, times(1)).onResult(5); });
+        handler.post(
+                () -> {
+                    verify(mCallbackMock, times(1)).onResult(5);
+                });
     }
 
     @Test
@@ -73,9 +86,15 @@
         supplier.set(5);
         supplier.set(10);
 
-        handler.post(() -> { new OneShotCallback<>(supplier, mCallbackMock); });
+        handler.post(
+                () -> {
+                    new OneShotCallback<>(supplier, mCallbackMock);
+                });
 
-        handler.post(() -> { verify(mCallbackMock, times(1)).onResult(10); });
+        handler.post(
+                () -> {
+                    verify(mCallbackMock, times(1)).onResult(10);
+                });
     }
 
     @Test
@@ -84,12 +103,18 @@
         ObservableSupplierImpl<Integer> supplier = new ObservableSupplierImpl<>();
 
         handler.post(() -> new OneShotCallback<>(supplier, mCallbackMock));
-        handler.post(() -> { verify(mCallbackMock, never()).onResult(any()); });
+        handler.post(
+                () -> {
+                    verify(mCallbackMock, never()).onResult(any());
+                });
 
         supplier.set(5);
-        handler.post(() -> { verify(mCallbackMock, times(1)).onResult(5); });
+        handler.post(
+                () -> {
+                    verify(mCallbackMock, times(1)).onResult(5);
+                });
 
         supplier.set(10);
         verifyNoMoreInteractions(mCallbackMock);
     }
-}
\ No newline at end of file
+}
diff --git a/base/android/junit/src/org/chromium/base/supplier/OneshotSupplierImplTest.java b/base/android/junit/src/org/chromium/base/supplier/OneshotSupplierImplTest.java
index b00cb18..5de8d32 100644
--- a/base/android/junit/src/org/chromium/base/supplier/OneshotSupplierImplTest.java
+++ b/base/android/junit/src/org/chromium/base/supplier/OneshotSupplierImplTest.java
@@ -20,19 +20,15 @@
 import org.chromium.base.Callback;
 import org.chromium.base.test.BaseRobolectricTestRunner;
 
-/**
- * Unit tests for {@link OneshotSupplierImpl}.
- */
+/** Unit tests for {@link OneshotSupplierImpl}. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(shadows = {ShadowProcess.class})
 @LooperMode(LooperMode.Mode.LEGACY)
 public class OneshotSupplierImplTest {
     private OneshotSupplierImpl<String> mSupplier = new OneshotSupplierImpl<>();
 
-    @Spy
-    private Callback<String> mCallback1;
-    @Spy
-    private Callback<String> mCallback2;
+    @Spy private Callback<String> mCallback1;
+    @Spy private Callback<String> mCallback2;
 
     @Before
     public void setup() {
diff --git a/base/android/junit/src/org/chromium/base/supplier/SyncOneshotSupplierImplTest.java b/base/android/junit/src/org/chromium/base/supplier/SyncOneshotSupplierImplTest.java
new file mode 100644
index 0000000..80a1e59
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/supplier/SyncOneshotSupplierImplTest.java
@@ -0,0 +1,79 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.supplier;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import androidx.test.filters.SmallTest;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+/** Unit tests for {@link SyncOneshotSupplierImpl}. */
+@RunWith(BaseRobolectricTestRunner.class)
+public class SyncOneshotSupplierImplTest {
+    private SyncOneshotSupplierImpl<Integer> mSupplier = new SyncOneshotSupplierImpl<>();
+
+    private AtomicInteger mValue1 = new AtomicInteger();
+    private AtomicInteger mValue2 = new AtomicInteger();
+
+    @Test
+    @SmallTest
+    public void testGet() {
+        final int expectedValue = 5;
+        assertNull(mSupplier.get());
+        mSupplier.set(expectedValue);
+        assertEquals(expectedValue, (int) mSupplier.get());
+    }
+
+    @Test
+    @SmallTest
+    public void testSet() {
+        final int expectedValue = 5;
+        assertNull(mSupplier.onAvailable(mValue1::set));
+        assertNull(mSupplier.onAvailable(mValue2::set));
+
+        assertEquals(0, mValue1.get());
+        assertEquals(0, mValue2.get());
+
+        mSupplier.set(expectedValue);
+
+        assertEquals(expectedValue, mValue1.get());
+        assertEquals(expectedValue, mValue2.get());
+    }
+
+    @Test
+    @SmallTest
+    public void testSetBeforeOnAvailable() {
+        final int expectedValue = 10;
+        mSupplier.set(expectedValue);
+
+        assertEquals(expectedValue, (int) mSupplier.onAvailable(mValue1::set));
+        assertEquals(expectedValue, (int) mSupplier.onAvailable(mValue2::set));
+
+        assertEquals(expectedValue, mValue1.get());
+        assertEquals(expectedValue, mValue2.get());
+    }
+
+    @Test
+    @SmallTest
+    public void testSetInterleaved() {
+        final int expectedValue = 20;
+        assertNull(mSupplier.onAvailable(mValue1::set));
+
+        mSupplier.set(expectedValue);
+        assertEquals(expectedValue, mValue1.get());
+
+        assertEquals(expectedValue, (int) mSupplier.onAvailable(mValue2::set));
+
+        assertEquals(expectedValue, mValue1.get());
+        assertEquals(expectedValue, mValue2.get());
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/supplier/TransitiveObservableSupplierTest.java b/base/android/junit/src/org/chromium/base/supplier/TransitiveObservableSupplierTest.java
new file mode 100644
index 0000000..f4d349d
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/supplier/TransitiveObservableSupplierTest.java
@@ -0,0 +1,167 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.supplier;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoInteractions;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnit;
+import org.mockito.junit.MockitoRule;
+import org.robolectric.shadows.ShadowLooper;
+
+import org.chromium.base.Callback;
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+import java.util.function.Function;
+
+/** Unit tests for {@link TransitiveObservableSupplier}. */
+@RunWith(BaseRobolectricTestRunner.class)
+public class TransitiveObservableSupplierTest {
+    // Shared singleton lambda to satisfy all callers. Type erasure makes it all equivalent at
+    // runtime and we still get compile time type safety.
+    private static final Function<?, ?> SHARED_TRAMPOLINE = arg -> arg;
+
+    public @Rule MockitoRule mMockitoRule = MockitoJUnit.rule();
+
+    private @Mock Callback<Object> mOnChangeCallback;
+    // While nothing is verified on these, mocks have good toString methods by default.
+    private @Mock Object mObject1;
+    private @Mock Object mObject2;
+    private @Mock Object mObject3;
+    private @Mock Object mObject4;
+
+    /**
+     * Convenience helper when the parent value needs no unwrapping. These methods should be moved
+     * to the implementation file if any client needs it.
+     */
+    private static <Z> TransitiveObservableSupplier<ObservableSupplier<Z>, Z> make(
+            ObservableSupplier<ObservableSupplier<Z>> parentSupplier) {
+        return new TransitiveObservableSupplier(parentSupplier, trampoline());
+    }
+
+    private static <T> Function<T, T> trampoline() {
+        return (Function<T, T>) SHARED_TRAMPOLINE;
+    }
+
+    @Test
+    public void testGetWithoutObservers() {
+        ObservableSupplierImpl<ObservableSupplier<Object>> parentSupplier =
+                new ObservableSupplierImpl<>();
+        ObservableSupplierImpl<Object> targetSupplier1 = new ObservableSupplierImpl<>();
+
+        ObservableSupplier<Object> transitiveSupplier = make(parentSupplier);
+        assertNull(transitiveSupplier.get());
+
+        parentSupplier.set(targetSupplier1);
+        assertNull(transitiveSupplier.get());
+
+        targetSupplier1.set(mObject1);
+        assertEquals(mObject1, transitiveSupplier.get());
+
+        targetSupplier1.set(null);
+        assertNull(transitiveSupplier.get());
+
+        targetSupplier1.set(mObject2);
+        assertEquals(mObject2, transitiveSupplier.get());
+
+        parentSupplier.set(null);
+        assertNull(transitiveSupplier.get());
+
+        targetSupplier1.set(mObject3);
+        assertNull(transitiveSupplier.get());
+
+        parentSupplier.set(targetSupplier1);
+        assertEquals(mObject3, transitiveSupplier.get());
+    }
+
+    @Test
+    public void testGetWithObserver() {
+        ObservableSupplierImpl<ObservableSupplier<Object>> parentSupplier =
+                new ObservableSupplierImpl<>();
+        ObservableSupplierImpl<Object> targetSupplier1 = new ObservableSupplierImpl<>();
+        ObservableSupplierImpl<Object> targetSupplier2 = new ObservableSupplierImpl<>();
+
+        ObservableSupplier<Object> transitiveSupplier = make(parentSupplier);
+        assertNull(transitiveSupplier.get());
+
+        assertNull(transitiveSupplier.addObserver(mOnChangeCallback));
+        verifyNoInteractions(mOnChangeCallback);
+
+        parentSupplier.set(targetSupplier1);
+        assertNull(transitiveSupplier.get());
+        verifyNoInteractions(mOnChangeCallback);
+
+        targetSupplier1.set(mObject1);
+        assertEquals(mObject1, transitiveSupplier.get());
+        verify(mOnChangeCallback).onResult(eq(mObject1));
+
+        targetSupplier1.set(mObject2);
+        assertEquals(mObject2, transitiveSupplier.get());
+        verify(mOnChangeCallback).onResult(eq(mObject2));
+
+        targetSupplier1.set(null);
+        assertEquals(null, transitiveSupplier.get());
+        verify(mOnChangeCallback).onResult(eq(null));
+
+        targetSupplier2.set(mObject3);
+        parentSupplier.set(targetSupplier2);
+        assertEquals(mObject3, transitiveSupplier.get());
+        verify(mOnChangeCallback).onResult(eq(mObject3));
+
+        transitiveSupplier.removeObserver(mOnChangeCallback);
+        targetSupplier2.set(mObject4);
+        assertEquals(mObject4, transitiveSupplier.get());
+        verify(mOnChangeCallback, never()).onResult(eq(mObject4));
+    }
+
+    @Test
+    public void testSameObserver() {
+        ObservableSupplierImpl<ObservableSupplier<Object>> parentSupplier =
+                new ObservableSupplierImpl<>();
+        ObservableSupplierImpl<Object> targetSupplier = new ObservableSupplierImpl<>();
+        parentSupplier.set(targetSupplier);
+
+        ObservableSupplier<Object> transitiveSupplier = make(parentSupplier);
+        assertEquals(null, transitiveSupplier.addObserver(mOnChangeCallback));
+        assertTrue(parentSupplier.hasObservers());
+        assertTrue(targetSupplier.hasObservers());
+
+        targetSupplier.set(mObject1);
+        assertEquals(mObject1, transitiveSupplier.get());
+        verify(mOnChangeCallback).onResult(eq(mObject1));
+
+        assertEquals(mObject1, transitiveSupplier.addObserver(mOnChangeCallback));
+        transitiveSupplier.removeObserver(mOnChangeCallback);
+        assertFalse(parentSupplier.hasObservers());
+        assertFalse(targetSupplier.hasObservers());
+    }
+
+    @Test
+    public void testAlreadyHasValueWhenObserverAdded() {
+        ObservableSupplierImpl<ObservableSupplier<Object>> parentSupplier =
+                new ObservableSupplierImpl<>();
+        ObservableSupplierImpl<Object> targetSupplier = new ObservableSupplierImpl<>();
+        parentSupplier.set(targetSupplier);
+        targetSupplier.set(mObject1);
+
+        ObservableSupplier<Object> transitiveSupplier = make(parentSupplier);
+        assertEquals(mObject1, transitiveSupplier.get());
+
+        assertEquals(mObject1, transitiveSupplier.addObserver(mOnChangeCallback));
+        assertEquals(mObject1, transitiveSupplier.get());
+        ShadowLooper.idleMainLooper();
+        verify(mOnChangeCallback).onResult(eq(mObject1));
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/supplier/UnownedUserDataSupplierTest.java b/base/android/junit/src/org/chromium/base/supplier/UnownedUserDataSupplierTest.java
index 1d7912c..0472029 100644
--- a/base/android/junit/src/org/chromium/base/supplier/UnownedUserDataSupplierTest.java
+++ b/base/android/junit/src/org/chromium/base/supplier/UnownedUserDataSupplierTest.java
@@ -17,9 +17,7 @@
 import org.chromium.base.UnownedUserDataKey;
 import org.chromium.base.test.BaseRobolectricTestRunner;
 
-/**
- * Unit tests for {@link ObservableSupplierImpl}.
- */
+/** Unit tests for {@link ObservableSupplierImpl}. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class UnownedUserDataSupplierTest {
@@ -102,11 +100,11 @@
         TestUnownedUserDataSupplier secondarySupplier = new TestUnownedUserDataSupplier();
         secondarySupplier.attach(mHost);
         Assert.assertFalse(
-                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting().isAttachedToAnyHost(
-                        mSupplier));
+                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting()
+                        .isAttachedToAnyHost(mSupplier));
         Assert.assertTrue(
-                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting().isAttachedToAnyHost(
-                        secondarySupplier));
+                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting()
+                        .isAttachedToAnyHost(secondarySupplier));
         secondarySupplier.destroy();
     }
 
@@ -115,26 +113,26 @@
         UnownedUserDataHost secondaryHost = new UnownedUserDataHost();
         mSupplier.attach(secondaryHost);
         Assert.assertTrue(
-                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting().isAttachedToAnyHost(
-                        mSupplier));
+                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting()
+                        .isAttachedToAnyHost(mSupplier));
         mSupplier.destroy();
         mIsDestroyed = true;
         Assert.assertFalse(
-                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting().isAttachedToAnyHost(
-                        mSupplier));
+                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting()
+                        .isAttachedToAnyHost(mSupplier));
     }
 
     @Test
     public void testDestroy() {
         Assert.assertTrue(
-                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting().isAttachedToAnyHost(
-                        mSupplier));
+                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting()
+                        .isAttachedToAnyHost(mSupplier));
 
         mSupplier.destroy();
         Assert.assertNull(TestUnownedUserDataSupplier.from(mHost));
         Assert.assertFalse(
-                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting().isAttachedToAnyHost(
-                        mSupplier));
+                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting()
+                        .isAttachedToAnyHost(mSupplier));
         mIsDestroyed = true;
     }
 
@@ -145,13 +143,13 @@
 
         TestUnownedUserDataSupplier secondarySupplier = new TestUnownedUserDataSupplier();
         Assert.assertFalse(
-                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting().isAttachedToHost(
-                        mHost));
+                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting()
+                        .isAttachedToHost(mHost));
 
         secondarySupplier.destroy();
         Assert.assertFalse(
-                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting().isAttachedToHost(
-                        mHost));
+                TestUnownedUserDataSupplier.getUnownedUserDataKeyForTesting()
+                        .isAttachedToHost(mHost));
         mIsDestroyed = true;
     }
 
@@ -165,4 +163,4 @@
         }
         mIsDestroyed = true;
     }
-}
\ No newline at end of file
+}
diff --git a/base/android/junit/src/org/chromium/base/task/AsyncTaskThreadTest.java b/base/android/junit/src/org/chromium/base/task/AsyncTaskThreadTest.java
index 2cbdf74..696f04f 100644
--- a/base/android/junit/src/org/chromium/base/task/AsyncTaskThreadTest.java
+++ b/base/android/junit/src/org/chromium/base/task/AsyncTaskThreadTest.java
@@ -33,9 +33,7 @@
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
-/**
- * Tests for {@link AsyncTask}.
- */
+/** Tests for {@link AsyncTask}. */
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 @LooperMode(LooperMode.Mode.LEGACY)
@@ -96,8 +94,7 @@
     private final RoboExecutorService mRoboExecutorService = new RoboExecutorService();
     private final Scheduler mBackgroundScheduler = Robolectric.getBackgroundThreadScheduler();
 
-    @Rule
-    public ExpectedException thrown = ExpectedException.none();
+    @Rule public ExpectedException thrown = ExpectedException.none();
 
     public AsyncTaskThreadTest() {
         if (DEBUG) ShadowLog.stream = System.out;
@@ -129,7 +126,7 @@
         mBackgroundScheduler.runOneTask();
 
         // Cannot cancel. The task is already run.
-        assertFalse(mTask.cancel(false /* mayInterruptIfRunning */));
+        assertFalse(mTask.cancel(/* mayInterruptIfRunning= */ false));
         assertTrue(mTask.get());
         assertEquals(Boolean.TRUE, mTask.getPostExecuteResult());
 
@@ -174,7 +171,7 @@
 
         // This reflects FutureTask#cancel() behavior. Note that the task is
         // started but cancel can still return true.
-        assertTrue(mTask.cancel(false /* mayInterruptIfRunning */));
+        assertTrue(mTask.cancel(/* mayInterruptIfRunning= */ false));
 
         // Continue the task.
         mTask.feedData(true);
@@ -205,7 +202,7 @@
         mBackgroundScheduler.runOneTask();
 
         // Cannot cancel. The task is already run.
-        assertFalse(mTask.cancel(true /* mayInterruptIfRunning */));
+        assertFalse(mTask.cancel(/* mayInterruptIfRunning= */ true));
         assertTrue(mTask.get());
         assertEquals(Boolean.TRUE, mTask.getPostExecuteResult());
 
@@ -226,7 +223,7 @@
         assertEquals(Status.RUNNING, mTask.getStatus());
 
         // Cancel and interrupt the current task.
-        assertTrue(mTask.cancel(true /* mayInterruptIfRunning */));
+        assertTrue(mTask.cancel(/* mayInterruptIfRunning= */ true));
 
         // Do not feed data here because task may finish before it gets interrupted.
 
diff --git a/base/android/junit/src/org/chromium/base/task/SequencedTaskRunnerTaskMigrationTest.java b/base/android/junit/src/org/chromium/base/task/SequencedTaskRunnerTaskMigrationTest.java
index 79b29f2..cc6f011 100644
--- a/base/android/junit/src/org/chromium/base/task/SequencedTaskRunnerTaskMigrationTest.java
+++ b/base/android/junit/src/org/chromium/base/task/SequencedTaskRunnerTaskMigrationTest.java
@@ -30,8 +30,7 @@
 @RunWith(BaseRobolectricTestRunner.class)
 @Config(manifest = Config.NONE)
 public class SequencedTaskRunnerTaskMigrationTest {
-    @Rule
-    public JniMocker mMocker = new JniMocker();
+    @Rule public JniMocker mMocker = new JniMocker();
 
     // It might be tempting to use fake executor similar to Robolectric's scheduler that is driven
     // from the test's main thread. Unfortunately this approach means that only two states of the
@@ -54,7 +53,8 @@
         Assert.assertTrue("Some task is stuck in thread pool queue", queuedRunnables.isEmpty());
         // Termination will be immediate if tests aren't broken. Generous timeout prevents test
         // from being stuck forever.
-        Assert.assertTrue("Some task is stuck in thread pool",
+        Assert.assertTrue(
+                "Some task is stuck in thread pool",
                 mConcurrentExecutor.awaitTermination(10, TimeUnit.SECONDS));
     }
 
@@ -76,7 +76,8 @@
         preNativeTask.awaitTaskStarted();
         taskRunner.initNativeTaskRunner();
 
-        Assert.assertFalse("Native task should not start before java task completion",
+        Assert.assertFalse(
+                "Native task should not start before java task completion",
                 fakeTaskRunnerNatives.hasReceivedTasks());
     }
 
@@ -104,7 +105,8 @@
         // runner and checking the state of the latter in assertion below.
         nativeTask.awaitTaskStarted();
 
-        Assert.assertTrue("Second task should run on the native pool",
+        Assert.assertTrue(
+                "Second task should run on the native pool",
                 fakeTaskRunnerNatives.hasReceivedTasks());
     }
 
@@ -131,7 +133,8 @@
         try {
             // Generous timeout prevents test from being stuck forever. Actual delay is going to
             // be a few milliseconds.
-            Assert.assertTrue("Timed out waiting for latch to count down",
+            Assert.assertTrue(
+                    "Timed out waiting for latch to count down",
                     taskLatch.await(10, TimeUnit.SECONDS));
         } catch (InterruptedException e) {
             Thread.currentThread().interrupt();
diff --git a/base/android/library_loader/library_loader_hooks.h b/base/android/library_loader/library_loader_hooks.h
index 6fe749b..c81f7a4 100644
--- a/base/android/library_loader/library_loader_hooks.h
+++ b/base/android/library_loader/library_loader_hooks.h
@@ -29,12 +29,8 @@
   PROCESS_WEBVIEW = 3,
   // Shared library is running in child process as part of webview.
   PROCESS_WEBVIEW_CHILD = 4,
-  // Shared library is running in the app that uses weblayer.
-  PROCESS_WEBLAYER = 5,
-  // Shared library is running in child process as part of weblayer.
-  PROCESS_WEBLAYER_CHILD = 6,
   // Shared library is running in a non-embedded WebView process.
-  PROCESS_WEBVIEW_NONEMBEDDED = 7,
+  PROCESS_WEBVIEW_NONEMBEDDED = 5,
 };
 
 // Returns the library process type this library was loaded for.
diff --git a/base/android/linker/BUILD.gn b/base/android/linker/BUILD.gn
index c433af9..e7b6870 100644
--- a/base/android/linker/BUILD.gn
+++ b/base/android/linker/BUILD.gn
@@ -15,7 +15,10 @@
     "linker_minimal_libcxx.cc",
   ]
 
-  deps = [ "//build:buildflag_header_h" ]
+  deps = [
+    "//build:buildflag_header_h",
+    "//third_party/jni_zero:jni_export",
+  ]
 
   # Export JNI symbols.
   configs -= [ "//build/config/android:hide_all_but_jni_onload" ]
diff --git a/base/android/linker/DEPS b/base/android/linker/DEPS
index 7cefa77..bcf221f 100644
--- a/base/android/linker/DEPS
+++ b/base/android/linker/DEPS
@@ -3,6 +3,7 @@
   # directory.
   "-base",
   "+base/android/linker",
+  "+third_party/jni_zero/jni_export.h",
 ]
 
 # Allow using //base in unittests. These unittests are included in targets that
diff --git a/base/android/linker/linker_jni.cc b/base/android/linker/linker_jni.cc
index ec7538e..f44c82e 100644
--- a/base/android/linker/linker_jni.cc
+++ b/base/android/linker/linker_jni.cc
@@ -710,7 +710,7 @@
   return functions.IsWorking();
 }
 
-JNI_GENERATOR_EXPORT void
+JNI_BOUNDARY_EXPORT void
 Java_org_chromium_base_library_1loader_LinkerJni_nativeFindMemoryRegionAtRandomAddress(
     JNIEnv* env,
     jclass clazz,
@@ -722,7 +722,7 @@
   s_lib_info_fields.SetLoadInfo(env, lib_info_obj, address, size);
 }
 
-JNI_GENERATOR_EXPORT void
+JNI_BOUNDARY_EXPORT void
 Java_org_chromium_base_library_1loader_LinkerJni_nativeReserveMemoryForLibrary(
     JNIEnv* env,
     jclass clazz,
@@ -735,7 +735,7 @@
   s_lib_info_fields.SetLoadInfo(env, lib_info_obj, address, size);
 }
 
-JNI_GENERATOR_EXPORT jboolean
+JNI_BOUNDARY_EXPORT jboolean
 Java_org_chromium_base_library_1loader_LinkerJni_nativeFindRegionReservedByWebViewZygote(
     JNIEnv* env,
     jclass clazz,
@@ -749,7 +749,7 @@
   return true;
 }
 
-JNI_GENERATOR_EXPORT jboolean
+JNI_BOUNDARY_EXPORT jboolean
 Java_org_chromium_base_library_1loader_LinkerJni_nativeLoadLibrary(
     JNIEnv* env,
     jclass clazz,
@@ -770,7 +770,7 @@
   return true;
 }
 
-JNI_GENERATOR_EXPORT jboolean
+JNI_BOUNDARY_EXPORT jboolean
 Java_org_chromium_base_library_1loader_LinkerJni_nativeUseRelros(
     JNIEnv* env,
     jclass clazz,
@@ -796,7 +796,7 @@
   return true;
 }
 
-JNI_GENERATOR_EXPORT jint
+JNI_BOUNDARY_EXPORT jint
 Java_org_chromium_base_library_1loader_LinkerJni_nativeGetRelroSharingResult(
     JNIEnv* env,
     jclass clazz) {
diff --git a/base/android/linker/linker_jni.h b/base/android/linker/linker_jni.h
index 93076bd..4f8b681 100644
--- a/base/android/linker/linker_jni.h
+++ b/base/android/linker/linker_jni.h
@@ -22,6 +22,7 @@
 #include <stdlib.h>
 
 #include "build/build_config.h"
+#include "third_party/jni_zero/jni_export.h"
 
 // Set this to 1 to enable debug traces to the Android log.
 // Note that LOG() from "base/logging.h" cannot be used, since it is
@@ -43,16 +44,6 @@
 #define PLOG_ERROR(FORMAT, ...) \
   LOG_ERROR(FORMAT ": %s", ##__VA_ARGS__, strerror(errno))
 
-#if defined(ARCH_CPU_X86)
-// Dalvik JIT generated code doesn't guarantee 16-byte stack alignment on
-// x86 - use force_align_arg_pointer to realign the stack at the JNI
-// boundary. https://crbug.com/655248
-#define JNI_GENERATOR_EXPORT \
-  extern "C" __attribute__((visibility("default"), force_align_arg_pointer))
-#else
-#define JNI_GENERATOR_EXPORT extern "C" __attribute__((visibility("default")))
-#endif
-
 #if defined(__arm__) && defined(__ARM_ARCH_7A__)
 #define CURRENT_ABI "armeabi-v7a"
 #elif defined(__arm__)
diff --git a/base/android/meminfo_dump_provider.cc b/base/android/meminfo_dump_provider.cc
index aeaa62e..347f50c 100644
--- a/base/android/meminfo_dump_provider.cc
+++ b/base/android/meminfo_dump_provider.cc
@@ -53,7 +53,7 @@
   // would confuse data in UMA. In particular, the background/foreground session
   // filter would no longer be accurate.
   if (stale_data && args.level_of_detail !=
-                        base::trace_event::MemoryDumpLevelOfDetail::DETAILED) {
+                        base::trace_event::MemoryDumpLevelOfDetail::kDetailed) {
     return true;
   }
 
diff --git a/base/android/meminfo_dump_provider_unittest.cc b/base/android/meminfo_dump_provider_unittest.cc
index 99b8266..d22ff18 100644
--- a/base/android/meminfo_dump_provider_unittest.cc
+++ b/base/android/meminfo_dump_provider_unittest.cc
@@ -30,7 +30,7 @@
   auto& instance = MeminfoDumpProvider::Initialize();
 
   base::trace_event::MemoryDumpArgs args{};
-  args.level_of_detail = base::trace_event::MemoryDumpLevelOfDetail::DETAILED;
+  args.level_of_detail = base::trace_event::MemoryDumpLevelOfDetail::kDetailed;
   base::trace_event::ProcessMemoryDump first_pmd{args};
 
   bool success = instance.OnMemoryDump(args, &first_pmd);
@@ -80,7 +80,8 @@
   // First dump, data may or may not be stale.
   {
     base::trace_event::MemoryDumpArgs args{};
-    args.level_of_detail = base::trace_event::MemoryDumpLevelOfDetail::DETAILED;
+    args.level_of_detail =
+        base::trace_event::MemoryDumpLevelOfDetail::kDetailed;
     base::trace_event::ProcessMemoryDump pmd{args};
     ASSERT_TRUE(instance.OnMemoryDump(args, &pmd));
   }
@@ -89,7 +90,7 @@
   {
     base::trace_event::MemoryDumpArgs args{};
     args.level_of_detail =
-        base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND;
+        base::trace_event::MemoryDumpLevelOfDetail::kBackground;
     base::trace_event::ProcessMemoryDump pmd{args};
     ASSERT_TRUE(instance.OnMemoryDump(args, &pmd));
     base::trace_event::MemoryAllocatorDump* dump =
diff --git a/base/android/proguard/chromium_apk.flags b/base/android/proguard/chromium_apk.flags
index 16023ab..981acae 100644
--- a/base/android/proguard/chromium_apk.flags
+++ b/base/android/proguard/chromium_apk.flags
@@ -61,3 +61,8 @@
 -assumenosideeffects class android.**, java.** {
   static <fields>;
 }
+
+# Keep the names of exception types, to make it easier to understand stack
+# traces in contexts where it's not trivial to deobfuscate them - for example
+# when reported to app developers who are using WebView.
+-keepnames class ** extends java.lang.Throwable {}
diff --git a/base/android/scoped_java_ref.h b/base/android/scoped_java_ref.h
index 3245d06..326e012 100644
--- a/base/android/scoped_java_ref.h
+++ b/base/android/scoped_java_ref.h
@@ -125,7 +125,7 @@
   // template parameter.
   template <typename ElementType,
             typename T_ = T,
-            typename = std::enable_if_t<std::is_same<T_, jobjectArray>::value>>
+            typename = std::enable_if_t<std::is_same_v<T_, jobjectArray>>>
   JavaObjectArrayReader<ElementType> ReadElements() const {
     return JavaObjectArrayReader<ElementType>(*this);
   }
@@ -192,7 +192,7 @@
 
   // Copy conversion constructor.
   template <typename U,
-            typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+            typename = std::enable_if_t<std::is_convertible_v<U, T>>>
   ScopedJavaLocalRef(const ScopedJavaLocalRef<U>& other) : env_(other.env_) {
     JavaRef<T>::SetNewLocalRef(env_, other.obj());
   }
@@ -205,7 +205,7 @@
 
   // Move conversion constructor.
   template <typename U,
-            typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+            typename = std::enable_if_t<std::is_convertible_v<U, T>>>
   ScopedJavaLocalRef(ScopedJavaLocalRef<U>&& other) : env_(other.env_) {
     JavaRef<T>::steal(std::move(other));
   }
@@ -235,7 +235,7 @@
 
   // Copy conversion assignment.
   template <typename U,
-            typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+            typename = std::enable_if_t<std::is_convertible_v<U, T>>>
   ScopedJavaLocalRef& operator=(const ScopedJavaLocalRef<U>& other) {
     Reset(other);
     return *this;
@@ -243,7 +243,7 @@
 
   // Move assignment.
   template <typename U,
-            typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+            typename = std::enable_if_t<std::is_convertible_v<U, T>>>
   ScopedJavaLocalRef& operator=(ScopedJavaLocalRef<U>&& other) {
     env_ = other.env_;
     Reset();
@@ -260,7 +260,7 @@
   void Reset() { JavaRef<T>::ResetLocalRef(env_); }
 
   template <typename U,
-            typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+            typename = std::enable_if_t<std::is_convertible_v<U, T>>>
   void Reset(const ScopedJavaLocalRef<U>& other) {
     // We can copy over env_ here as |other| instance must be from the same
     // thread as |this| local ref. (See class comment for multi-threading
@@ -316,7 +316,7 @@
 
   // Copy conversion constructor.
   template <typename U,
-            typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+            typename = std::enable_if_t<std::is_convertible_v<U, T>>>
   ScopedJavaGlobalRef(const ScopedJavaGlobalRef<U>& other) {
     Reset(other);
   }
@@ -329,7 +329,7 @@
 
   // Move conversion constructor.
   template <typename U,
-            typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+            typename = std::enable_if_t<std::is_convertible_v<U, T>>>
   ScopedJavaGlobalRef(ScopedJavaGlobalRef<U>&& other) {
     JavaRef<T>::steal(std::move(other));
   }
@@ -357,7 +357,7 @@
 
   // Copy conversion assignment.
   template <typename U,
-            typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+            typename = std::enable_if_t<std::is_convertible_v<U, T>>>
   ScopedJavaGlobalRef& operator=(const ScopedJavaGlobalRef<U>& other) {
     Reset(other);
     return *this;
@@ -365,7 +365,7 @@
 
   // Move assignment.
   template <typename U,
-            typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+            typename = std::enable_if_t<std::is_convertible_v<U, T>>>
   ScopedJavaGlobalRef& operator=(ScopedJavaGlobalRef<U>&& other) {
     Reset();
     JavaRef<T>::steal(std::move(other));
@@ -381,7 +381,7 @@
   void Reset() { JavaRef<T>::ResetGlobalRef(); }
 
   template <typename U,
-            typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+            typename = std::enable_if_t<std::is_convertible_v<U, T>>>
   void Reset(const ScopedJavaGlobalRef<U>& other) {
     Reset(nullptr, other.obj());
   }
diff --git a/base/android/scoped_java_ref_unittest.cc b/base/android/scoped_java_ref_unittest.cc
index b50a5d9..8afc11e 100644
--- a/base/android/scoped_java_ref_unittest.cc
+++ b/base/android/scoped_java_ref_unittest.cc
@@ -162,11 +162,11 @@
     EXPECT_SAME_OBJECT(new_global, str);
     new_global = str;
     EXPECT_SAME_OBJECT(new_local, str);
-    static_assert(!std::is_convertible<ScopedJavaLocalRef<jobject>,
-                                       ScopedJavaGlobalRef<jobject>>::value,
+    static_assert(!std::is_convertible_v<ScopedJavaLocalRef<jobject>,
+                                         ScopedJavaGlobalRef<jobject>>,
                   "");
-    static_assert(!std::is_convertible<ScopedJavaGlobalRef<jobject>,
-                                       ScopedJavaLocalRef<jobject>>::value,
+    static_assert(!std::is_convertible_v<ScopedJavaGlobalRef<jobject>,
+                                         ScopedJavaLocalRef<jobject>>,
                   "");
   }
 
@@ -284,18 +284,18 @@
   JavaObjectArrayReader<jobject> reader(array_);
   It i = reader.begin();
 
-  EXPECT_TRUE(std::is_copy_constructible<It>::value);
+  EXPECT_TRUE(std::is_copy_constructible_v<It>);
   It copy = i;
   EXPECT_EQ(copy, i);
   EXPECT_EQ(It(i), i);
 
-  EXPECT_TRUE(std::is_copy_assignable<It>::value);
+  EXPECT_TRUE(std::is_copy_assignable_v<It>);
   It assign = reader.end();
   It& assign2 = (assign = i);
   EXPECT_EQ(assign, i);
   EXPECT_EQ(assign2, assign);
 
-  EXPECT_TRUE(std::is_destructible<It>::value);
+  EXPECT_TRUE(std::is_destructible_v<It>);
 
   // Swappable
   It left = reader.begin(), right = reader.end();
@@ -304,8 +304,8 @@
   EXPECT_EQ(right, reader.begin());
 
   // Basic check that iterator_traits works
-  bool same_type = std::is_same<std::iterator_traits<It>::iterator_category,
-                                std::input_iterator_tag>::value;
+  bool same_type = std::is_same_v<std::iterator_traits<It>::iterator_category,
+                                  std::input_iterator_tag>;
   EXPECT_TRUE(same_type);
 
   // Comparisons
diff --git a/base/android/shared_preferences/shared_preferences_manager.cc b/base/android/shared_preferences/shared_preferences_manager.cc
new file mode 100644
index 0000000..ba0a80c
--- /dev/null
+++ b/base/android/shared_preferences/shared_preferences_manager.cc
@@ -0,0 +1,77 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/android/shared_preferences/shared_preferences_manager.h"
+
+#include "base/android/jni_android.h"
+#include "base/android/jni_string.h"
+#include "base/base_shared_preferences_jni/SharedPreferencesManager_jni.h"
+#include "base/check.h"
+
+namespace base::android {
+
+SharedPreferencesManager::SharedPreferencesManager(const JavaRef<jobject>& jobj,
+                                                   JNIEnv* env)
+    : java_obj_(jobj), env_(env) {}
+
+SharedPreferencesManager::SharedPreferencesManager(
+    const SharedPreferencesManager& other)
+    : java_obj_(other.java_obj_), env_(other.env_) {}
+
+SharedPreferencesManager::~SharedPreferencesManager() {}
+
+void SharedPreferencesManager::RemoveKey(
+    const std::string& shared_preference_key) {
+  ScopedJavaLocalRef<jstring> jkey =
+      ConvertUTF8ToJavaString(env_, shared_preference_key);
+  Java_SharedPreferencesManager_removeKey(env_, java_obj_, jkey);
+}
+
+bool SharedPreferencesManager::ContainsKey(
+    const std::string& shared_preference_key) {
+  ScopedJavaLocalRef<jstring> jkey =
+      ConvertUTF8ToJavaString(env_, shared_preference_key);
+  return Java_SharedPreferencesManager_contains(env_, java_obj_, jkey);
+}
+
+bool SharedPreferencesManager::ReadBoolean(
+    const std::string& shared_preference_key,
+    bool default_value) {
+  ScopedJavaLocalRef<jstring> jkey =
+      ConvertUTF8ToJavaString(env_, shared_preference_key);
+  return Java_SharedPreferencesManager_readBoolean(env_, java_obj_, jkey,
+                                                   default_value);
+}
+
+int SharedPreferencesManager::ReadInt(const std::string& shared_preference_key,
+                                      int default_value) {
+  ScopedJavaLocalRef<jstring> jkey =
+      ConvertUTF8ToJavaString(env_, shared_preference_key);
+  return Java_SharedPreferencesManager_readInt(env_, java_obj_, jkey,
+                                               default_value);
+}
+
+std::string SharedPreferencesManager::ReadString(
+    const std::string& shared_preference_key,
+    const std::string& default_value) {
+  ScopedJavaLocalRef<jstring> jkey =
+      ConvertUTF8ToJavaString(env_, shared_preference_key);
+  ScopedJavaLocalRef<jstring> jdefault_value =
+      ConvertUTF8ToJavaString(env_, default_value);
+  ScopedJavaLocalRef<jstring> java_result =
+      Java_SharedPreferencesManager_readString(env_, java_obj_, jkey,
+                                               jdefault_value);
+  return ConvertJavaStringToUTF8(java_result);
+}
+
+void SharedPreferencesManager::WriteString(
+    const std::string& shared_preference_key,
+    const std::string& value) {
+  ScopedJavaLocalRef<jstring> jkey =
+      ConvertUTF8ToJavaString(env_, shared_preference_key);
+  ScopedJavaLocalRef<jstring> jvalue = ConvertUTF8ToJavaString(env_, value);
+  Java_SharedPreferencesManager_writeString(env_, java_obj_, jkey, jvalue);
+}
+
+}  // namespace base::android
diff --git a/base/android/shared_preferences/shared_preferences_manager.h b/base/android/shared_preferences/shared_preferences_manager.h
new file mode 100644
index 0000000..2c491cc
--- /dev/null
+++ b/base/android/shared_preferences/shared_preferences_manager.h
@@ -0,0 +1,40 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ANDROID_SHARED_PREFERENCES_SHARED_PREFERENCES_MANAGER_H_
+#define BASE_ANDROID_SHARED_PREFERENCES_SHARED_PREFERENCES_MANAGER_H_
+
+#include "base/base_export.h"
+
+#include "base/android/jni_android.h"
+
+namespace base::android {
+
+// A SharedPreferencesManager that provides access to Android SharedPreferences
+// with uniqueness key checking.
+class BASE_EXPORT SharedPreferencesManager {
+ public:
+  explicit SharedPreferencesManager(const JavaRef<jobject>& jobj, JNIEnv* env);
+  SharedPreferencesManager(const SharedPreferencesManager&);
+  SharedPreferencesManager& operator=(const SharedPreferencesManager&) = delete;
+  ~SharedPreferencesManager();
+
+  void RemoveKey(const std::string& shared_preference_key);
+  bool ContainsKey(const std::string& shared_preference_key);
+  bool ReadBoolean(const std::string& shared_preference_key,
+                   bool default_value);
+  int ReadInt(const std::string& shared_preference_key, int default_value);
+  std::string ReadString(const std::string& shared_preference_key,
+                         const std::string& default_value);
+  void WriteString(const std::string& shared_preference_key,
+                   const std::string& value);
+
+ private:
+  ScopedJavaLocalRef<jobject> java_obj_;
+  raw_ptr<JNIEnv> env_;
+};
+
+}  // namespace base::android
+
+#endif  // BASE_ANDROID_SHARED_PREFERENCES_SHARED_PREFERENCES_MANAGER_H_
diff --git a/base/android/trace_event_binding.cc b/base/android/trace_event_binding.cc
index 1ba5b1d..824e5a4 100644
--- a/base/android/trace_event_binding.cc
+++ b/base/android/trace_event_binding.cc
@@ -43,7 +43,9 @@
         args.config->chrome_config().trace_config());
     event_name_filtering_per_session_[args.internal_instance_index] =
         trace_config.IsEventPackageNameFilterEnabled();
+  }
 
+  void OnStart(const perfetto::DataSourceBase::StartArgs&) override {
     JNIEnv* env = base::android::AttachCurrentThread();
     base::android::Java_TraceEvent_setEnabled(env, true);
     base::android::Java_TraceEvent_setEventNameFilteringEnabled(
diff --git a/base/apple/DEPS b/base/apple/DEPS
new file mode 100644
index 0000000..816f056
--- /dev/null
+++ b/base/apple/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+  # base/apple is a base on which base/mac and base/ios build, so don't allow
+  # dependencies on them.
+  "-mac",
+  "-ios",
+]
diff --git a/base/apple/backup_util.mm b/base/apple/backup_util.mm
index aeb34b7..ebd0ff8 100644
--- a/base/apple/backup_util.mm
+++ b/base/apple/backup_util.mm
@@ -6,9 +6,9 @@
 
 #import <Foundation/Foundation.h>
 
+#include "base/apple/foundation_util.h"
 #include "base/files/file_path.h"
 #include "base/logging.h"
-#include "base/mac/foundation_util.h"
 #include "base/strings/sys_string_conversions.h"
 #include "base/threading/scoped_blocking_call.h"
 
@@ -18,7 +18,7 @@
   base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
                                                 base::BlockingType::MAY_BLOCK);
 
-  NSURL* file_url = mac::FilePathToNSURL(file_path);
+  NSURL* file_url = apple::FilePathToNSURL(file_path);
   DCHECK([file_url checkPromisedItemIsReachableAndReturnError:nil]);
 
   NSError* error = nil;
@@ -40,7 +40,7 @@
   base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
                                                 base::BlockingType::MAY_BLOCK);
 
-  NSURL* file_url = mac::FilePathToNSURL(file_path);
+  NSURL* file_url = apple::FilePathToNSURL(file_path);
   DCHECK([file_url checkPromisedItemIsReachableAndReturnError:nil]);
 
   NSError* error = nil;
diff --git a/base/apple/backup_util_unittest.mm b/base/apple/backup_util_unittest.mm
index 219a997..60f5c22 100644
--- a/base/apple/backup_util_unittest.mm
+++ b/base/apple/backup_util_unittest.mm
@@ -7,11 +7,11 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include "base/apple/foundation_util.h"
+#include "base/apple/scoped_cftyperef.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/files/scoped_temp_dir.h"
-#include "base/mac/foundation_util.h"
-#include "base/mac/scoped_cftyperef.h"
 #include "base/numerics/safe_conversions.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -44,8 +44,8 @@
   ScopedTempDir temp_dir_;
   ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
   FilePath excluded_file_path = temp_dir_.GetPath().Append("excluded");
-  base::ScopedCFTypeRef<CFURLRef> excluded_url =
-      mac::FilePathToCFURL(excluded_file_path);
+  ScopedCFTypeRef<CFURLRef> excluded_url =
+      apple::FilePathToCFURL(excluded_file_path);
 
   constexpr char placeholder_data[] = "All your base are belong to us!";
   ASSERT_EQ(checked_cast<int>(std::size(placeholder_data)),
diff --git a/base/apple/bind_objc_block_unittest.mm b/base/apple/bind_objc_block_unittest.mm
new file mode 100644
index 0000000..80572e1
--- /dev/null
+++ b/base/apple/bind_objc_block_unittest.mm
@@ -0,0 +1,143 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/functional/bind.h"
+#include "base/functional/callback.h"
+#include "base/functional/callback_helpers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gtest_mac.h"
+
+namespace {
+
+TEST(BindObjcBlockTest, TestScopedClosureRunnerExitScope) {
+  int run_count = 0;
+  int* ptr = &run_count;
+  {
+    base::ScopedClosureRunner runner(base::BindOnce(^{
+      (*ptr)++;
+    }));
+    EXPECT_EQ(0, run_count);
+  }
+  EXPECT_EQ(1, run_count);
+}
+
+TEST(BindObjcBlockTest, TestScopedClosureRunnerRelease) {
+  int run_count = 0;
+  int* ptr = &run_count;
+  base::OnceClosure c;
+  {
+    base::ScopedClosureRunner runner(base::BindOnce(^{
+      (*ptr)++;
+    }));
+    c = runner.Release();
+    EXPECT_EQ(0, run_count);
+  }
+  EXPECT_EQ(0, run_count);
+  std::move(c).Run();
+  EXPECT_EQ(1, run_count);
+}
+
+TEST(BindObjcBlockTest, TestReturnValue) {
+  const int kReturnValue = 42;
+  base::OnceCallback<int(void)> c = base::BindOnce(^{
+    return kReturnValue;
+  });
+  EXPECT_EQ(kReturnValue, std::move(c).Run());
+}
+
+TEST(BindObjcBlockTest, TestArgument) {
+  const int kArgument = 42;
+  base::OnceCallback<int(int)> c = base::BindOnce(^(int a) {
+    return a + 1;
+  });
+  EXPECT_EQ(kArgument + 1, std::move(c).Run(kArgument));
+}
+
+TEST(BindObjcBlockTest, TestTwoArguments) {
+  std::string result;
+  std::string* ptr = &result;
+  base::OnceCallback<void(const std::string&, const std::string&)> c =
+      base::BindOnce(^(const std::string& a, const std::string& b) {
+        *ptr = a + b;
+      });
+  std::move(c).Run("forty", "two");
+  EXPECT_EQ(result, "fortytwo");
+}
+
+TEST(BindObjcBlockTest, TestThreeArguments) {
+  std::string result;
+  std::string* ptr = &result;
+  base::OnceCallback<void(const std::string&, const std::string&,
+                          const std::string&)>
+      cb = base::BindOnce(
+          ^(const std::string& a, const std::string& b, const std::string& c) {
+            *ptr = a + b + c;
+          });
+  std::move(cb).Run("six", "times", "nine");
+  EXPECT_EQ(result, "sixtimesnine");
+}
+
+TEST(BindObjcBlockTest, TestSixArguments) {
+  std::string result1;
+  std::string* ptr = &result1;
+  int result2;
+  int* ptr2 = &result2;
+  base::OnceCallback<void(int, int, const std::string&, const std::string&, int,
+                          const std::string&)>
+      cb = base::BindOnce(^(int a, int b, const std::string& c,
+                            const std::string& d, int e, const std::string& f) {
+        *ptr = c + d + f;
+        *ptr2 = a + b + e;
+      });
+  std::move(cb).Run(1, 2, "infinite", "improbability", 3, "drive");
+  EXPECT_EQ(result1, "infiniteimprobabilitydrive");
+  EXPECT_EQ(result2, 6);
+}
+
+TEST(BindObjcBlockTest, TestBlockMoveable) {
+  base::OnceClosure c;
+  __block BOOL invoked_block = NO;
+  @autoreleasepool {
+    c = base::BindOnce(
+        ^(std::unique_ptr<BOOL> v) {
+          invoked_block = *v;
+        },
+        std::make_unique<BOOL>(YES));
+  }
+  std::move(c).Run();
+  EXPECT_TRUE(invoked_block);
+}
+
+// Tests that the bound block is retained until the end of its execution, even
+// if the callback itself is destroyed during the invocation. It was found that
+// some code depends on this behaviour (see https://crbug.com/845687).
+TEST(BindObjcBlockTest, TestBlockDeallocation) {
+  base::RepeatingClosure closure;
+  __block BOOL invoked_block = NO;
+  closure = base::BindRepeating(
+      ^(base::RepeatingClosure* this_closure) {
+        *this_closure = base::RepeatingClosure();
+        invoked_block = YES;
+      },
+      &closure);
+  closure.Run();
+  EXPECT_TRUE(invoked_block);
+}
+
+TEST(BindObjcBlockTest, TestBlockReleased) {
+  __weak NSObject* weak_nsobject;
+  @autoreleasepool {
+    NSObject* nsobject = [[NSObject alloc] init];
+    weak_nsobject = nsobject;
+
+    auto callback = base::BindOnce(^{
+      [nsobject description];
+    });
+  }
+  EXPECT_NSEQ(nil, weak_nsobject);
+}
+
+}  // namespace
diff --git a/base/apple/bridging.h b/base/apple/bridging.h
index 1a9284b..19911dc 100644
--- a/base/apple/bridging.h
+++ b/base/apple/bridging.h
@@ -8,9 +8,9 @@
 #include <CoreText/CoreText.h>
 #import <Foundation/Foundation.h>
 
+#include "base/apple/scoped_cftyperef.h"
 #include "base/base_export.h"
 #include "base/check.h"
-#include "base/mac/scoped_cftyperef.h"
 #include "base/types/always_false.h"
 #include "build/build_config.h"
 
@@ -144,7 +144,7 @@
 namespace base::apple {
 
 template <typename CFT>
-id _Nullable CFToNSOwnershipCast(base::ScopedCFTypeRef<CFT>) {
+id _Nullable CFToNSOwnershipCast(ScopedCFTypeRef<CFT>) {
   static_assert(
       AlwaysFalse<CFT>,
       "Error: Do not pass a ScopedCFTypeRef to CFToNSOwnershipCast. "
diff --git a/base/apple/bundle_locations.mm b/base/apple/bundle_locations.mm
index f3b8090..a875fdc 100644
--- a/base/apple/bundle_locations.mm
+++ b/base/apple/bundle_locations.mm
@@ -4,8 +4,8 @@
 
 #include "base/apple/bundle_locations.h"
 
+#include "base/apple/foundation_util.h"
 #include "base/check.h"
-#include "base/mac/foundation_util.h"
 #include "base/strings/sys_string_conversions.h"
 
 namespace base::apple {
@@ -26,7 +26,7 @@
 }
 
 FilePath MainBundlePath() {
-  return mac::NSStringToFilePath(MainBundle().bundlePath);
+  return apple::NSStringToFilePath(MainBundle().bundlePath);
 }
 
 NSBundle* OuterBundle() {
@@ -41,7 +41,7 @@
 }
 
 FilePath OuterBundlePath() {
-  return mac::NSStringToFilePath(OuterBundle().bundlePath);
+  return apple::NSStringToFilePath(OuterBundle().bundlePath);
 }
 
 NSBundle* FrameworkBundle() {
@@ -52,7 +52,7 @@
 }
 
 FilePath FrameworkBundlePath() {
-  return mac::NSStringToFilePath(FrameworkBundle().bundlePath);
+  return apple::NSStringToFilePath(FrameworkBundle().bundlePath);
 }
 
 namespace {
@@ -62,7 +62,7 @@
     return nil;
   }
 
-  NSBundle* bundle = [NSBundle bundleWithURL:mac::FilePathToNSURL(file_path)];
+  NSBundle* bundle = [NSBundle bundleWithURL:apple::FilePathToNSURL(file_path)];
   CHECK(bundle) << "Failed to load the bundle at " << file_path.value();
 
   return bundle;
diff --git a/base/apple/call_with_eh_frame.cc b/base/apple/call_with_eh_frame.cc
new file mode 100644
index 0000000..a97c0fc
--- /dev/null
+++ b/base/apple/call_with_eh_frame.cc
@@ -0,0 +1,49 @@
+// Copyright 2015 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/call_with_eh_frame.h"
+
+#include <stdint.h>
+#include <unwind.h>
+
+namespace base::apple {
+
+#if defined(__x86_64__) || defined(__aarch64__)
+extern "C" _Unwind_Reason_Code __gxx_personality_v0(int,
+                                                    _Unwind_Action,
+                                                    uint64_t,
+                                                    struct _Unwind_Exception*,
+                                                    struct _Unwind_Context*);
+
+_Unwind_Reason_Code CxxPersonalityRoutine(
+    int version,
+    _Unwind_Action actions,
+    uint64_t exception_class,
+    struct _Unwind_Exception* exception_object,
+    struct _Unwind_Context* context) {
+  // Unwinding is a two-phase process: phase one searches for an exception
+  // handler, and phase two performs cleanup. For phase one, this custom
+  // personality will terminate the search. For phase two, this should delegate
+  // back to the standard personality routine.
+
+  if ((actions & _UA_SEARCH_PHASE) != 0) {
+    // Tell libunwind that this is the end of the stack. When it encounters the
+    // CallWithEHFrame, it will stop searching for an exception handler. The
+    // result is that no exception handler has been found higher on the stack,
+    // and any that are lower on the stack (e.g. in CFRunLoopRunSpecific), will
+    // now be skipped. Since this is reporting the end of the stack, and no
+    // exception handler will have been found, std::terminate() will be called.
+    return _URC_END_OF_STACK;
+  }
+
+  return __gxx_personality_v0(version, actions, exception_class,
+                              exception_object, context);
+}
+#else   // !defined(__x86_64__) && !defined(__aarch64__)
+// No implementation exists, so just call the block directly.
+void CallWithEHFrame(void (^block)(void)) {
+  block();
+}
+#endif  // defined(__x86_64__) || defined(__aarch64__)
+}  // namespace base::apple
diff --git a/base/apple/call_with_eh_frame.h b/base/apple/call_with_eh_frame.h
new file mode 100644
index 0000000..b4fc00f
--- /dev/null
+++ b/base/apple/call_with_eh_frame.h
@@ -0,0 +1,24 @@
+// Copyright 2015 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_CALL_WITH_EH_FRAME_H_
+#define BASE_APPLE_CALL_WITH_EH_FRAME_H_
+
+#include "base/base_export.h"
+
+namespace base::apple {
+
+// Invokes the specified block in a stack frame with a special exception
+// handler. This function creates an exception handling stack frame that
+// specifies a custom C++ exception personality routine, which terminates the
+// search for an exception handler at this frame.
+//
+// The purpose of this function is to prevent a try/catch statement in system
+// libraries, acting as a global exception handler, from handling exceptions
+// in such a way that disrupts the generation of useful stack traces.
+void BASE_EXPORT CallWithEHFrame(void (^block)(void));
+
+}  // namespace base::apple
+
+#endif  // BASE_APPLE_CALL_WITH_EH_FRAME_H_
diff --git a/base/apple/call_with_eh_frame_asm.S b/base/apple/call_with_eh_frame_asm.S
new file mode 100644
index 0000000..04e13ba
--- /dev/null
+++ b/base/apple/call_with_eh_frame_asm.S
@@ -0,0 +1,133 @@
+// Copyright 2015 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(__x86_64__) || defined(__aarch64__)
+
+// base::apple::CallWithEHFrame(void () block_pointer)
+#define CALL_WITH_EH_FRAME __ZN4base5apple15CallWithEHFrameEU13block_pointerFvvE
+
+  .section __TEXT,__text,regular,pure_instructions
+#if !defined(COMPONENT_BUILD)
+  .private_extern CALL_WITH_EH_FRAME
+#endif
+  .globl CALL_WITH_EH_FRAME
+  .p2align 2
+CALL_WITH_EH_FRAME:
+
+  .cfi_startproc
+
+  // Configure the C++ exception handler personality routine. Normally the
+  // compiler would emit ___gxx_personality_v0 here. The purpose of this
+  // function is to use a custom personality routine.
+  .cfi_personality 155, __ZN4base5apple21CxxPersonalityRoutineEi14_Unwind_ActionyP17_Unwind_ExceptionP15_Unwind_Context
+  .cfi_lsda 16, CallWithEHFrame_exception_table
+
+#if defined(__x86_64__)
+Lfunction_start:
+  pushq %rbp
+  .cfi_def_cfa_offset 16
+  .cfi_offset %rbp, -16
+  movq %rsp, %rbp
+  .cfi_def_cfa_register %rbp
+
+  // Load the function pointer from the block descriptor.
+  movq 16(%rdi), %rax
+
+  // Execute the block in the context of a C++ try{}.
+Ltry_start:
+  callq *%rax
+Ltry_end:
+  popq %rbp
+  ret
+
+  // Landing pad for the exception handler. This should never be called, since
+  // the personality routine will stop the search for an exception handler,
+  // which will cause the runtime to invoke the default terminate handler.
+Lcatch:
+  movq %rax, %rdi
+  callq ___cxa_begin_catch  // The ABI requires a call to the catch handler.
+  ud2  // In the event this is called, make it fatal.
+
+#elif defined(__aarch64__)
+Lfunction_start:
+  stp x29, x30, [sp, #-16]!
+  mov x29, sp
+  .cfi_def_cfa w29, 16
+  .cfi_offset w30, -8
+  .cfi_offset w29, -16
+
+  // Load the function pointer from the block descriptor.
+  ldr x8, [x0, #16]
+
+  // Execute the block in the context of a C++ try{}.
+Ltry_start:
+  blr x8
+Ltry_end:
+  ldp x29, x30, [sp], #16
+  ret
+
+  // Landing pad for the exception handler. This should never be called, since
+  // the personality routine will stop the search for an exception handler,
+  // which will cause the runtime to invoke the default terminate handler.
+Lcatch:
+  bl ___cxa_begin_catch  // The ABI requires a call to the catch handler.
+  brk #0x1  // In the event this is called, make it fatal.
+#endif
+
+Lfunction_end:
+  .cfi_endproc
+
+  // The C++ exception table that is used to identify this frame as an
+  // exception handler. See https://llvm.org/docs/ExceptionHandling.html,
+  // https://itanium-cxx-abi.github.io/cxx-abi/exceptions.pdf and
+  // https://www.airs.com/blog/archives/464.
+  .section __TEXT,__gcc_except_tab
+  .p2align 2
+CallWithEHFrame_exception_table:
+  .byte 255  // DW_EH_PE_omit
+  .byte 155  // DW_EH_PE_indirect | DW_EH_PE_pcrel | DW_EH_PE_sdata4
+  // The number of bytes in this table
+  .uleb128 Ltypes_table_base - Ltypes_table_ref_base
+
+Ltypes_table_ref_base:
+  .byte 1  // DW_EH_PE_uleb128
+  // Callsite table length.
+  .uleb128 Lcall_site_table_end - Lcall_site_table_start
+
+Lcall_site_table_start:
+// First callsite.
+CS1_begin = Ltry_start - Lfunction_start
+  .uleb128 CS1_begin
+CS1_end = Ltry_end - Ltry_start
+  .uleb128 CS1_end
+
+// First landing pad.
+LP1 = Lcatch - Lfunction_start
+  .uleb128 LP1
+  .uleb128 1  // Action record.
+
+// Second callsite.
+CS2_begin = Ltry_end - Lfunction_start
+  .uleb128 CS2_begin
+CS2_end = Lfunction_end - Ltry_end
+  .uleb128 CS2_end
+
+  // Second landing pad (none).
+  .uleb128 0
+  .uleb128 0  // No action.
+
+Lcall_site_table_end:
+  // Action table.
+  // Action record 1.
+  .uleb128 1  // Type filter -1.
+  .uleb128 0  // No further action to take.
+
+  // Types table.
+  .p2align 2
+  .long 0  // Type filter -1: no type filter for this catch(){} clause.
+
+Ltypes_table_base:
+  .p2align 2
+
+#endif  // defined(__x86_64__) || defined(__aarch64__)
diff --git a/base/apple/call_with_eh_frame_unittest.mm b/base/apple/call_with_eh_frame_unittest.mm
new file mode 100644
index 0000000..eaf07f0
--- /dev/null
+++ b/base/apple/call_with_eh_frame_unittest.mm
@@ -0,0 +1,53 @@
+// Copyright 2015 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/call_with_eh_frame.h"
+
+#import <Foundation/Foundation.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base::apple {
+namespace {
+
+class CallWithEHFrameTest : public testing::Test {
+ protected:
+  void ThrowException() {
+    @throw [NSException exceptionWithName:@"TestException"
+                                   reason:@"Testing exceptions"
+                                 userInfo:nil];
+  }
+};
+
+// Catching from within the EHFrame is allowed.
+TEST_F(CallWithEHFrameTest, CatchExceptionHigher) {
+  bool __block saw_exception = false;
+  base::apple::CallWithEHFrame(^{
+    @try {
+      ThrowException();
+    } @catch (NSException* exception) {
+      saw_exception = true;
+    }
+  });
+  EXPECT_TRUE(saw_exception);
+}
+
+// Trying to catch an exception outside the EHFrame is blocked.
+TEST_F(CallWithEHFrameTest, CatchExceptionLower) {
+  auto catch_exception_lower = ^{
+    bool saw_exception = false;
+    @try {
+      base::apple::CallWithEHFrame(^{
+        ThrowException();
+      });
+    } @catch (NSException* exception) {
+      saw_exception = true;
+    }
+    ASSERT_FALSE(saw_exception);
+  };
+  EXPECT_DEATH(catch_exception_lower(), "");
+}
+
+}  // namespace
+}  // namespace base::apple
diff --git a/base/apple/dispatch_source_mach.cc b/base/apple/dispatch_source_mach.cc
new file mode 100644
index 0000000..e155bee
--- /dev/null
+++ b/base/apple/dispatch_source_mach.cc
@@ -0,0 +1,66 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/dispatch_source_mach.h"
+
+#include "base/apple/scoped_dispatch_object.h"
+
+namespace base::apple {
+
+struct DispatchSourceMach::Storage {
+  // The dispatch queue used to service the source_.
+  ScopedDispatchObject<dispatch_queue_t> queue;
+
+  // A MACH_RECV dispatch source.
+  ScopedDispatchObject<dispatch_source_t> source;
+
+  // Semaphore used to wait on the |source_|'s cancellation in the destructor.
+  ScopedDispatchObject<dispatch_semaphore_t> source_canceled;
+};
+
+DispatchSourceMach::DispatchSourceMach(const char* name,
+                                       mach_port_t port,
+                                       void (^event_handler)())
+    : DispatchSourceMach(dispatch_queue_create(name, DISPATCH_QUEUE_SERIAL),
+                         port,
+                         event_handler) {
+  // Since the queue was created above in the delegated constructor, and it was
+  // subsequently retained, release it here.
+  dispatch_release(storage_->queue.get());
+}
+
+DispatchSourceMach::DispatchSourceMach(dispatch_queue_t queue,
+                                       mach_port_t port,
+                                       void (^event_handler)())
+    : storage_(std::make_unique<Storage>()) {
+  storage_->queue.reset(queue, base::scoped_policy::RETAIN);
+  storage_->source.reset(dispatch_source_create(
+      DISPATCH_SOURCE_TYPE_MACH_RECV, port, 0, storage_->queue.get()));
+  storage_->source_canceled.reset(dispatch_semaphore_create(0));
+
+  dispatch_source_set_event_handler(storage_->source.get(), event_handler);
+  dispatch_source_set_cancel_handler(storage_->source.get(), ^{
+    dispatch_semaphore_signal(storage_->source_canceled.get());
+  });
+}
+
+DispatchSourceMach::~DispatchSourceMach() {
+  // Cancel the source and wait for the semaphore to be signaled. This will
+  // ensure the source managed by this class is not used after it is freed.
+  dispatch_source_cancel(storage_->source.get());
+  storage_->source.reset();
+
+  dispatch_semaphore_wait(storage_->source_canceled.get(),
+                          DISPATCH_TIME_FOREVER);
+}
+
+void DispatchSourceMach::Resume() {
+  dispatch_resume(storage_->source.get());
+}
+
+dispatch_queue_t DispatchSourceMach::Queue() const {
+  return storage_->queue.get();
+}
+
+}  // namespace base::apple
diff --git a/base/apple/dispatch_source_mach.h b/base/apple/dispatch_source_mach.h
new file mode 100644
index 0000000..e6289ba
--- /dev/null
+++ b/base/apple/dispatch_source_mach.h
@@ -0,0 +1,55 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_DISPATCH_SOURCE_MACH_H_
+#define BASE_APPLE_DISPATCH_SOURCE_MACH_H_
+
+#include <dispatch/dispatch.h>
+
+#include <memory>
+
+#include "base/base_export.h"
+
+namespace base::apple {
+
+// This class encapsulates a MACH_RECV dispatch source. When this object is
+// destroyed, the source will be cancelled and it will wait for the source
+// to stop executing work. The source can run on either a user-supplied queue,
+// or it can create its own for the source.
+class BASE_EXPORT DispatchSourceMach {
+ public:
+  // Creates a new dispatch source for the |port| and schedules it on a new
+  // queue that will be created with |name|. When a Mach message is received,
+  // the |event_handler| will be called.
+  DispatchSourceMach(const char* name,
+                     mach_port_t port,
+                     void (^event_handler)());
+
+  // Creates a new dispatch source with the same semantics as above, but rather
+  // than creating a new queue, it schedules the source on |queue|.
+  DispatchSourceMach(dispatch_queue_t queue,
+                     mach_port_t port,
+                     void (^event_handler)());
+
+  DispatchSourceMach(const DispatchSourceMach&) = delete;
+  DispatchSourceMach& operator=(const DispatchSourceMach&) = delete;
+
+  // Cancels the source and waits for it to become fully cancelled before
+  // releasing the source.
+  ~DispatchSourceMach();
+
+  // Resumes the source. This must be called before any Mach messages will
+  // be received.
+  void Resume();
+
+  dispatch_queue_t Queue() const;
+
+ private:
+  struct Storage;
+  std::unique_ptr<Storage> storage_;
+};
+
+}  // namespace base::apple
+
+#endif  // BASE_APPLE_DISPATCH_SOURCE_MACH_H_
diff --git a/base/apple/dispatch_source_mach_unittest.cc b/base/apple/dispatch_source_mach_unittest.cc
new file mode 100644
index 0000000..9fd7e21
--- /dev/null
+++ b/base/apple/dispatch_source_mach_unittest.cc
@@ -0,0 +1,125 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/dispatch_source_mach.h"
+
+#include <mach/mach.h>
+
+#include <memory>
+
+#include "base/apple/scoped_mach_port.h"
+#include "base/logging.h"
+#include "base/test/test_timeouts.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base::apple {
+
+class DispatchSourceMachTest : public testing::Test {
+ public:
+  void SetUp() override {
+    mach_port_t port = MACH_PORT_NULL;
+    ASSERT_EQ(KERN_SUCCESS, mach_port_allocate(mach_task_self(),
+                                               MACH_PORT_RIGHT_RECEIVE, &port));
+    receive_right_.reset(port);
+
+    ASSERT_EQ(KERN_SUCCESS, mach_port_insert_right(mach_task_self(), port, port,
+                                                   MACH_MSG_TYPE_MAKE_SEND));
+    send_right_.reset(port);
+  }
+
+  mach_port_t GetPort() { return receive_right_.get(); }
+
+  void WaitForSemaphore(dispatch_semaphore_t semaphore) {
+    dispatch_semaphore_wait(
+        semaphore, dispatch_time(DISPATCH_TIME_NOW,
+                                 TestTimeouts::action_timeout().InSeconds() *
+                                     NSEC_PER_SEC));
+  }
+
+ private:
+  base::apple::ScopedMachReceiveRight receive_right_;
+  base::apple::ScopedMachSendRight send_right_;
+};
+
+TEST_F(DispatchSourceMachTest, ReceiveAfterResume) {
+  dispatch_semaphore_t signal = dispatch_semaphore_create(0);
+  mach_port_t port = GetPort();
+
+  bool __block did_receive = false;
+  DispatchSourceMach source("org.chromium.base.test.ReceiveAfterResume", port,
+                            ^{
+                              mach_msg_empty_rcv_t msg = {{0}};
+                              msg.header.msgh_size = sizeof(msg);
+                              msg.header.msgh_local_port = port;
+                              mach_msg_receive(&msg.header);
+                              did_receive = true;
+
+                              dispatch_semaphore_signal(signal);
+                            });
+
+  mach_msg_empty_send_t msg = {{0}};
+  msg.header.msgh_size = sizeof(msg);
+  msg.header.msgh_remote_port = port;
+  msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND);
+  ASSERT_EQ(KERN_SUCCESS, mach_msg_send(&msg.header));
+
+  EXPECT_FALSE(did_receive);
+
+  source.Resume();
+
+  WaitForSemaphore(signal);
+  dispatch_release(signal);
+
+  EXPECT_TRUE(did_receive);
+}
+
+TEST_F(DispatchSourceMachTest, NoMessagesAfterDestruction) {
+  mach_port_t port = GetPort();
+
+  std::unique_ptr<int> count(new int(0));
+  int* __block count_ptr = count.get();
+
+  std::unique_ptr<DispatchSourceMach> source(new DispatchSourceMach(
+      "org.chromium.base.test.NoMessagesAfterDestruction", port, ^{
+        mach_msg_empty_rcv_t msg = {{0}};
+        msg.header.msgh_size = sizeof(msg);
+        msg.header.msgh_local_port = port;
+        mach_msg_receive(&msg.header);
+        LOG(INFO) << "Receive " << *count_ptr;
+        ++(*count_ptr);
+      }));
+  source->Resume();
+
+  dispatch_queue_t queue =
+      dispatch_queue_create("org.chromium.base.test.MessageSend", NULL);
+  dispatch_semaphore_t signal = dispatch_semaphore_create(0);
+  for (int i = 0; i < 30; ++i) {
+    dispatch_async(queue, ^{
+      mach_msg_empty_send_t msg = {{0}};
+      msg.header.msgh_size = sizeof(msg);
+      msg.header.msgh_remote_port = port;
+      msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND);
+      mach_msg_send(&msg.header);
+    });
+
+    // After sending five messages, shut down the source and taint the
+    // pointer the handler dereferences. The test will crash if |count_ptr|
+    // is being used after "free".
+    if (i == 5) {
+      std::unique_ptr<DispatchSourceMach>* source_ptr = &source;
+      dispatch_async(queue, ^{
+        source_ptr->reset();
+        count_ptr = reinterpret_cast<int*>(0xdeaddead);
+        dispatch_semaphore_signal(signal);
+      });
+    }
+  }
+
+  WaitForSemaphore(signal);
+  dispatch_release(signal);
+
+  dispatch_release(queue);
+}
+
+}  // namespace base::apple
diff --git a/base/apple/foundation_util.h b/base/apple/foundation_util.h
new file mode 100644
index 0000000..d1d8e05
--- /dev/null
+++ b/base/apple/foundation_util.h
@@ -0,0 +1,321 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_FOUNDATION_UTIL_H_
+#define BASE_APPLE_FOUNDATION_UTIL_H_
+
+#include <AvailabilityMacros.h>
+#include <CoreFoundation/CoreFoundation.h>
+#include <CoreText/CoreText.h>
+#include <Security/Security.h>
+
+#include <string>
+
+#include "base/apple/scoped_cftyperef.h"
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(__OBJC__)
+#import <Foundation/Foundation.h>
+@class NSFont;
+@class UIFont;
+#endif  // __OBJC__
+
+namespace base {
+class FilePath;
+}
+
+namespace base::apple {
+
+// Returns true if the application is running from a bundle
+BASE_EXPORT bool AmIBundled();
+BASE_EXPORT void SetOverrideAmIBundled(bool value);
+
+#if defined(UNIT_TEST)
+// This is required because instantiating some tests requires checking the
+// directory structure, which sets the AmIBundled cache state. Individual tests
+// may or may not be bundled, and this would trip them up if the cache weren't
+// cleared. This should not be called from individual tests, just from test
+// instantiation code that gets a path from PathService.
+BASE_EXPORT void ClearAmIBundledCache();
+#endif
+
+// Returns true if this process is marked as a "Background only process".
+BASE_EXPORT bool IsBackgroundOnlyProcess();
+
+// Returns the path to a resource within the framework bundle.
+BASE_EXPORT FilePath PathForFrameworkBundleResource(const char* resource_name);
+
+// Returns the creator code associated with the CFBundleRef at bundle.
+OSType CreatorCodeForCFBundleRef(CFBundleRef bundle);
+
+// Returns the creator code associated with this application, by calling
+// CreatorCodeForCFBundleRef for the application's main bundle.  If this
+// information cannot be determined, returns kUnknownType ('????').  This
+// does not respect the override app bundle because it's based on CFBundle
+// instead of NSBundle, and because callers probably don't want the override
+// app bundle's creator code anyway.
+BASE_EXPORT OSType CreatorCodeForApplication();
+
+#if defined(__OBJC__)
+
+// Searches for directories for the given key in only the given |domain_mask|.
+// If found, fills result (which must always be non-NULL) with the
+// first found directory and returns true.  Otherwise, returns false.
+BASE_EXPORT bool GetSearchPathDirectory(NSSearchPathDirectory directory,
+                                        NSSearchPathDomainMask domain_mask,
+                                        FilePath* result);
+
+// Searches for directories for the given key in only the local domain.
+// If found, fills result (which must always be non-NULL) with the
+// first found directory and returns true.  Otherwise, returns false.
+BASE_EXPORT bool GetLocalDirectory(NSSearchPathDirectory directory,
+                                   FilePath* result);
+
+// Searches for directories for the given key in only the user domain.
+// If found, fills result (which must always be non-NULL) with the
+// first found directory and returns true.  Otherwise, returns false.
+BASE_EXPORT bool GetUserDirectory(NSSearchPathDirectory directory,
+                                  FilePath* result);
+
+#endif  // __OBJC__
+
+// Returns the ~/Library directory.
+BASE_EXPORT FilePath GetUserLibraryPath();
+
+// Returns the ~/Documents directory.
+BASE_EXPORT FilePath GetUserDocumentPath();
+
+// Takes a path to an (executable) binary and tries to provide the path to an
+// application bundle containing it. It takes the outermost bundle that it can
+// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces "/Foo/Bar.app").
+//   |exec_name| - path to the binary
+//   returns - path to the application bundle, or empty on error
+BASE_EXPORT FilePath GetAppBundlePath(const FilePath& exec_name);
+
+// Takes a path to an (executable) binary and tries to provide the path to an
+// application bundle containing it. It takes the innermost bundle that it can
+// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces
+// "/Foo/Bar.app/.../Baz.app").
+//   |exec_name| - path to the binary
+//   returns - path to the application bundle, or empty on error
+BASE_EXPORT FilePath GetInnermostAppBundlePath(const FilePath& exec_name);
+
+#define TYPE_NAME_FOR_CF_TYPE_DECL(TypeCF) \
+  BASE_EXPORT std::string TypeNameForCFType(TypeCF##Ref)
+
+TYPE_NAME_FOR_CF_TYPE_DECL(CFArray);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFBag);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFBoolean);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFData);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFDate);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFDictionary);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFNull);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFNumber);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFSet);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFString);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFURL);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFUUID);
+
+TYPE_NAME_FOR_CF_TYPE_DECL(CGColor);
+
+TYPE_NAME_FOR_CF_TYPE_DECL(CTFont);
+TYPE_NAME_FOR_CF_TYPE_DECL(CTRun);
+
+TYPE_NAME_FOR_CF_TYPE_DECL(SecAccessControl);
+TYPE_NAME_FOR_CF_TYPE_DECL(SecCertificate);
+TYPE_NAME_FOR_CF_TYPE_DECL(SecKey);
+TYPE_NAME_FOR_CF_TYPE_DECL(SecPolicy);
+
+#undef TYPE_NAME_FOR_CF_TYPE_DECL
+
+// Returns the base bundle ID, which can be set by SetBaseBundleID but
+// defaults to a reasonable string. This never returns NULL. BaseBundleID
+// returns a pointer to static storage that must not be freed.
+BASE_EXPORT const char* BaseBundleID();
+
+// Sets the base bundle ID to override the default. The implementation will
+// make its own copy of new_base_bundle_id.
+BASE_EXPORT void SetBaseBundleID(const char* new_base_bundle_id);
+
+// CFCast<>() and CFCastStrict<>() cast a basic CFTypeRef to a more
+// specific CoreFoundation type. The compatibility of the passed
+// object is found by comparing its opaque type against the
+// requested type identifier. If the supplied object is not
+// compatible with the requested return type, CFCast<>() returns
+// NULL and CFCastStrict<>() will DCHECK. Providing a NULL pointer
+// to either variant results in NULL being returned without
+// triggering any DCHECK.
+//
+// Example usage:
+// CFNumberRef some_number = base::apple::CFCast<CFNumberRef>(
+//     CFArrayGetValueAtIndex(array, index));
+//
+// CFTypeRef hello = CFSTR("hello world");
+// CFStringRef some_string = base::apple::CFCastStrict<CFStringRef>(hello);
+
+template <typename T>
+T CFCast(const CFTypeRef& cf_val);
+
+template <typename T>
+T CFCastStrict(const CFTypeRef& cf_val);
+
+#define CF_CAST_DECL(TypeCF)                                            \
+  template <>                                                           \
+  BASE_EXPORT TypeCF##Ref CFCast<TypeCF##Ref>(const CFTypeRef& cf_val); \
+                                                                        \
+  template <>                                                           \
+  BASE_EXPORT TypeCF##Ref CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val)
+
+CF_CAST_DECL(CFArray);
+CF_CAST_DECL(CFBag);
+CF_CAST_DECL(CFBoolean);
+CF_CAST_DECL(CFData);
+CF_CAST_DECL(CFDate);
+CF_CAST_DECL(CFDictionary);
+CF_CAST_DECL(CFNull);
+CF_CAST_DECL(CFNumber);
+CF_CAST_DECL(CFSet);
+CF_CAST_DECL(CFString);
+CF_CAST_DECL(CFURL);
+CF_CAST_DECL(CFUUID);
+
+CF_CAST_DECL(CGColor);
+
+CF_CAST_DECL(CTFont);
+CF_CAST_DECL(CTFontDescriptor);
+CF_CAST_DECL(CTRun);
+
+CF_CAST_DECL(SecAccessControl);
+CF_CAST_DECL(SecCertificate);
+CF_CAST_DECL(SecKey);
+CF_CAST_DECL(SecPolicy);
+
+#undef CF_CAST_DECL
+
+#if defined(__OBJC__)
+
+// ObjCCast<>() and ObjCCastStrict<>() cast a basic id to a more
+// specific (NSObject-derived) type. The compatibility of the passed
+// object is found by checking if it's a kind of the requested type
+// identifier. If the supplied object is not compatible with the
+// requested return type, ObjCCast<>() returns nil and
+// ObjCCastStrict<>() will DCHECK. Providing a nil pointer to either
+// variant results in nil being returned without triggering any DCHECK.
+//
+// The strict variant is useful when retrieving a value from a
+// collection which only has values of a specific type, e.g. an
+// NSArray of NSStrings. The non-strict variant is useful when
+// retrieving values from data that you can't fully control. For
+// example, a plist read from disk may be beyond your exclusive
+// control, so you'd only want to check that the values you retrieve
+// from it are of the expected types, but not crash if they're not.
+//
+// Example usage:
+// NSString* version = base::apple::ObjCCast<NSString>(
+//     [bundle objectForInfoDictionaryKey:@"CFBundleShortVersionString"]);
+//
+// NSString* str = base::apple::ObjCCastStrict<NSString>(
+//     [ns_arr_of_ns_strs objectAtIndex:0]);
+template <typename T>
+T* ObjCCast(id objc_val) {
+  if ([objc_val isKindOfClass:[T class]]) {
+    return reinterpret_cast<T*>(objc_val);
+  }
+  return nil;
+}
+
+template <typename T>
+T* ObjCCastStrict(id objc_val) {
+  T* rv = ObjCCast<T>(objc_val);
+  DCHECK(objc_val == nil || rv);
+  return rv;
+}
+
+#endif  // defined(__OBJC__)
+
+// Helper function for GetValueFromDictionary to create the error message
+// that appears when a type mismatch is encountered.
+BASE_EXPORT std::string GetValueFromDictionaryErrorMessage(
+    CFStringRef key,
+    const std::string& expected_type,
+    CFTypeRef value);
+
+// Utility function to pull out a value from a dictionary, check its type, and
+// return it. Returns NULL if the key is not present or of the wrong type.
+template <typename T>
+T GetValueFromDictionary(CFDictionaryRef dict, CFStringRef key) {
+  CFTypeRef value = CFDictionaryGetValue(dict, key);
+  T value_specific = CFCast<T>(value);
+
+  if (value && !value_specific) {
+    std::string expected_type = TypeNameForCFType(value_specific);
+    DLOG(WARNING) << GetValueFromDictionaryErrorMessage(key, expected_type,
+                                                        value);
+  }
+
+  return value_specific;
+}
+
+#if defined(__OBJC__)
+
+// Converts |path| to an autoreleased NSURL. Returns nil if |path| is empty.
+BASE_EXPORT NSURL* FilePathToNSURL(const FilePath& path);
+
+// Converts |path| to an autoreleased NSString. Returns nil if |path| is empty.
+BASE_EXPORT NSString* FilePathToNSString(const FilePath& path);
+
+// Converts |str| to a FilePath. Returns an empty path if |str| is nil.
+BASE_EXPORT FilePath NSStringToFilePath(NSString* str);
+
+// Converts |url| to a FilePath. Returns an empty path if |url| is nil or if
+// |url| is not of scheme "file".
+BASE_EXPORT FilePath NSURLToFilePath(NSURL* url);
+
+#endif  // __OBJC__
+
+// Converts a non-null |path| to a CFURLRef. |path| must not be empty.
+//
+// This function only uses manually-owned resources, so it does not depend on an
+// NSAutoreleasePool being set up on the current thread.
+BASE_EXPORT ScopedCFTypeRef<CFURLRef> FilePathToCFURL(const FilePath& path);
+
+#if defined(__OBJC__)
+// Converts |range| to an NSRange, returning the new range in |range_out|.
+// Returns true if conversion was successful, false if the values of |range|
+// could not be converted to NSUIntegers.
+[[nodiscard]] BASE_EXPORT bool CFRangeToNSRange(CFRange range,
+                                                NSRange* range_out);
+#endif  // defined(__OBJC__)
+
+}  // namespace base::apple
+
+// Stream operations for CFTypes. They can be used with Objective-C types as
+// well by using the casting methods in base/apple/bridging.h.
+//
+// For example: LOG(INFO) << base::apple::NSToCFPtrCast(@"foo");
+//
+// operator<<() can not be overloaded for Objective-C types as the compiler
+// cannot distinguish between overloads for id with overloads for void*.
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o,
+                                            const CFErrorRef err);
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o,
+                                            const CFStringRef str);
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, CFRange);
+
+#if defined(__OBJC__)
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, id);
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, NSRange);
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, SEL);
+
+#if BUILDFLAG(IS_MAC)
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, NSPoint);
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, NSRect);
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, NSSize);
+#endif  // IS_MAC
+
+#endif  // __OBJC__
+
+#endif  // BASE_APPLE_FOUNDATION_UTIL_H_
diff --git a/base/apple/foundation_util.mm b/base/apple/foundation_util.mm
new file mode 100644
index 0000000..cd637a1
--- /dev/null
+++ b/base/apple/foundation_util.mm
@@ -0,0 +1,479 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/foundation_util.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <vector>
+
+#include "base/apple/bundle_locations.h"
+#include "base/apple/osstatus_logging.h"
+#include "base/containers/adapters.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/notreached.h"
+#include "base/numerics/checked_math.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/ranges/algorithm.h"
+#include "base/strings/string_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "build/branding_buildflags.h"
+#include "build/build_config.h"
+
+#if !BUILDFLAG(IS_IOS)
+#import <AppKit/AppKit.h>
+#endif
+
+extern "C" {
+CFTypeID SecKeyGetTypeID();
+}  // extern "C"
+
+namespace base::apple {
+
+namespace {
+
+bool g_cached_am_i_bundled_called = false;
+bool g_cached_am_i_bundled_value = false;
+bool g_override_am_i_bundled = false;
+bool g_override_am_i_bundled_value = false;
+
+bool UncachedAmIBundled() {
+#if BUILDFLAG(IS_IOS)
+  // All apps are bundled on iOS.
+  return true;
+#else
+  if (g_override_am_i_bundled) {
+    return g_override_am_i_bundled_value;
+  }
+
+  // Yes, this is cheap.
+  return [apple::OuterBundle().bundlePath hasSuffix:@".app"];
+#endif
+}
+
+}  // namespace
+
+bool AmIBundled() {
+  // If the return value is not cached, this function will return different
+  // values depending on when it's called. This confuses some client code, see
+  // http://crbug.com/63183 .
+  if (!g_cached_am_i_bundled_called) {
+    g_cached_am_i_bundled_called = true;
+    g_cached_am_i_bundled_value = UncachedAmIBundled();
+  }
+  DCHECK_EQ(g_cached_am_i_bundled_value, UncachedAmIBundled())
+      << "The return value of AmIBundled() changed. This will confuse tests. "
+      << "Call SetAmIBundled() override manually if your test binary "
+      << "delay-loads the framework.";
+  return g_cached_am_i_bundled_value;
+}
+
+void SetOverrideAmIBundled(bool value) {
+#if BUILDFLAG(IS_IOS)
+  // It doesn't make sense not to be bundled on iOS.
+  if (!value) {
+    NOTREACHED();
+  }
+#endif
+  g_override_am_i_bundled = true;
+  g_override_am_i_bundled_value = value;
+}
+
+BASE_EXPORT void ClearAmIBundledCache() {
+  g_cached_am_i_bundled_called = false;
+}
+
+bool IsBackgroundOnlyProcess() {
+  // This function really does want to examine NSBundle's idea of the main
+  // bundle dictionary.  It needs to look at the actual running .app's
+  // Info.plist to access its LSUIElement property.
+  @autoreleasepool {
+    NSDictionary* info_dictionary = [apple::MainBundle() infoDictionary];
+    return [info_dictionary[@"LSUIElement"] boolValue] != NO;
+  }
+}
+
+FilePath PathForFrameworkBundleResource(const char* resource_name) {
+  NSBundle* bundle = apple::FrameworkBundle();
+  NSURL* resource_url = [bundle URLForResource:@(resource_name)
+                                 withExtension:nil];
+  return NSURLToFilePath(resource_url);
+}
+
+OSType CreatorCodeForCFBundleRef(CFBundleRef bundle) {
+  OSType creator = kUnknownType;
+  CFBundleGetPackageInfo(bundle, /*packageType=*/nullptr, &creator);
+  return creator;
+}
+
+OSType CreatorCodeForApplication() {
+  CFBundleRef bundle = CFBundleGetMainBundle();
+  if (!bundle) {
+    return kUnknownType;
+  }
+
+  return CreatorCodeForCFBundleRef(bundle);
+}
+
+bool GetSearchPathDirectory(NSSearchPathDirectory directory,
+                            NSSearchPathDomainMask domain_mask,
+                            FilePath* result) {
+  DCHECK(result);
+  NSArray<NSString*>* dirs =
+      NSSearchPathForDirectoriesInDomains(directory, domain_mask, YES);
+  if (dirs.count < 1) {
+    return false;
+  }
+  *result = NSStringToFilePath(dirs[0]);
+  return true;
+}
+
+bool GetLocalDirectory(NSSearchPathDirectory directory, FilePath* result) {
+  return GetSearchPathDirectory(directory, NSLocalDomainMask, result);
+}
+
+bool GetUserDirectory(NSSearchPathDirectory directory, FilePath* result) {
+  return GetSearchPathDirectory(directory, NSUserDomainMask, result);
+}
+
+FilePath GetUserLibraryPath() {
+  FilePath user_library_path;
+  if (!GetUserDirectory(NSLibraryDirectory, &user_library_path)) {
+    DLOG(WARNING) << "Could not get user library path";
+  }
+  return user_library_path;
+}
+
+FilePath GetUserDocumentPath() {
+  FilePath user_document_path;
+  if (!GetUserDirectory(NSDocumentDirectory, &user_document_path)) {
+    DLOG(WARNING) << "Could not get user document path";
+  }
+  return user_document_path;
+}
+
+// Takes a path to an (executable) binary and tries to provide the path to an
+// application bundle containing it. It takes the outermost bundle that it can
+// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces "/Foo/Bar.app").
+//   |exec_name| - path to the binary
+//   returns - path to the application bundle, or empty on error
+FilePath GetAppBundlePath(const FilePath& exec_name) {
+  const char kExt[] = ".app";
+  const size_t kExtLength = std::size(kExt) - 1;
+
+  // Split the path into components.
+  std::vector<std::string> components = exec_name.GetComponents();
+
+  // It's an error if we don't get any components.
+  if (components.empty()) {
+    return FilePath();
+  }
+
+  // Don't prepend '/' to the first component.
+  std::vector<std::string>::const_iterator it = components.begin();
+  std::string bundle_name = *it;
+  DCHECK_GT(it->length(), 0U);
+  // If the first component ends in ".app", we're already done.
+  if (it->length() > kExtLength &&
+      !it->compare(it->length() - kExtLength, kExtLength, kExt, kExtLength)) {
+    return FilePath(bundle_name);
+  }
+
+  // The first component may be "/" or "//", etc. Only append '/' if it doesn't
+  // already end in '/'.
+  if (bundle_name.back() != '/') {
+    bundle_name += '/';
+  }
+
+  // Go through the remaining components.
+  for (++it; it != components.end(); ++it) {
+    DCHECK_GT(it->length(), 0U);
+
+    bundle_name += *it;
+
+    // If the current component ends in ".app", we're done.
+    if (it->length() > kExtLength &&
+        !it->compare(it->length() - kExtLength, kExtLength, kExt, kExtLength)) {
+      return FilePath(bundle_name);
+    }
+
+    // Separate this component from the next one.
+    bundle_name += '/';
+  }
+
+  return FilePath();
+}
+
+// Takes a path to an (executable) binary and tries to provide the path to an
+// application bundle containing it. It takes the innermost bundle that it can
+// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces
+// "/Foo/Bar.app/.../Baz.app").
+//   |exec_name| - path to the binary
+//   returns - path to the application bundle, or empty on error
+FilePath GetInnermostAppBundlePath(const FilePath& exec_name) {
+  static constexpr char kExt[] = ".app";
+  static constexpr size_t kExtLength = std::size(kExt) - 1;
+
+  // Split the path into components.
+  std::vector<std::string> components = exec_name.GetComponents();
+
+  // It's an error if we don't get any components.
+  if (components.empty()) {
+    return FilePath();
+  }
+
+  auto app = ranges::find_if(
+      Reversed(components), [](const std::string& component) -> bool {
+        return component.size() > kExtLength && EndsWith(component, kExt);
+      });
+
+  if (app == components.rend()) {
+    return FilePath();
+  }
+
+  // Remove all path components after the final ".app" extension.
+  components.erase(app.base(), components.end());
+
+  std::string bundle_path;
+  for (const std::string& component : components) {
+    // Don't prepend a slash if this is the first component or if the
+    // previous component ended with a slash, which can happen when dealing
+    // with an absolute path.
+    if (!bundle_path.empty() && bundle_path.back() != '/') {
+      bundle_path += '/';
+    }
+
+    bundle_path += component;
+  }
+
+  return FilePath(bundle_path);
+}
+
+#define TYPE_NAME_FOR_CF_TYPE_DEFN(TypeCF)     \
+  std::string TypeNameForCFType(TypeCF##Ref) { \
+    return #TypeCF;                            \
+  }
+
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFArray)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFBag)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFBoolean)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFData)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFDate)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFDictionary)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFNull)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFNumber)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFSet)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFString)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFURL)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFUUID)
+
+TYPE_NAME_FOR_CF_TYPE_DEFN(CGColor)
+
+TYPE_NAME_FOR_CF_TYPE_DEFN(CTFont)
+TYPE_NAME_FOR_CF_TYPE_DEFN(CTRun)
+
+#if !BUILDFLAG(IS_IOS)
+TYPE_NAME_FOR_CF_TYPE_DEFN(SecAccessControl)
+TYPE_NAME_FOR_CF_TYPE_DEFN(SecCertificate)
+TYPE_NAME_FOR_CF_TYPE_DEFN(SecKey)
+TYPE_NAME_FOR_CF_TYPE_DEFN(SecPolicy)
+#endif
+
+#undef TYPE_NAME_FOR_CF_TYPE_DEFN
+
+static const char* base_bundle_id;
+
+const char* BaseBundleID() {
+  if (base_bundle_id) {
+    return base_bundle_id;
+  }
+
+#if BUILDFLAG(GOOGLE_CHROME_BRANDING)
+  return "com.google.Chrome";
+#else
+  return "org.chromium.Chromium";
+#endif
+}
+
+void SetBaseBundleID(const char* new_base_bundle_id) {
+  if (new_base_bundle_id != base_bundle_id) {
+    free((void*)base_bundle_id);
+    base_bundle_id = new_base_bundle_id ? strdup(new_base_bundle_id) : nullptr;
+  }
+}
+
+#define CF_CAST_DEFN(TypeCF)                                       \
+  template <>                                                      \
+  TypeCF##Ref CFCast<TypeCF##Ref>(const CFTypeRef& cf_val) {       \
+    if (cf_val == NULL) {                                          \
+      return NULL;                                                 \
+    }                                                              \
+    if (CFGetTypeID(cf_val) == TypeCF##GetTypeID()) {              \
+      return (TypeCF##Ref)(cf_val);                                \
+    }                                                              \
+    return NULL;                                                   \
+  }                                                                \
+                                                                   \
+  template <>                                                      \
+  TypeCF##Ref CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val) { \
+    TypeCF##Ref rv = CFCast<TypeCF##Ref>(cf_val);                  \
+    DCHECK(cf_val == NULL || rv);                                  \
+    return rv;                                                     \
+  }
+
+CF_CAST_DEFN(CFArray)
+CF_CAST_DEFN(CFBag)
+CF_CAST_DEFN(CFBoolean)
+CF_CAST_DEFN(CFData)
+CF_CAST_DEFN(CFDate)
+CF_CAST_DEFN(CFDictionary)
+CF_CAST_DEFN(CFNull)
+CF_CAST_DEFN(CFNumber)
+CF_CAST_DEFN(CFSet)
+CF_CAST_DEFN(CFString)
+CF_CAST_DEFN(CFURL)
+CF_CAST_DEFN(CFUUID)
+
+CF_CAST_DEFN(CGColor)
+
+CF_CAST_DEFN(CTFont)
+CF_CAST_DEFN(CTFontDescriptor)
+CF_CAST_DEFN(CTRun)
+
+CF_CAST_DEFN(SecCertificate)
+
+#if !BUILDFLAG(IS_IOS)
+CF_CAST_DEFN(SecAccessControl)
+CF_CAST_DEFN(SecKey)
+CF_CAST_DEFN(SecPolicy)
+#endif
+
+#undef CF_CAST_DEFN
+
+std::string GetValueFromDictionaryErrorMessage(CFStringRef key,
+                                               const std::string& expected_type,
+                                               CFTypeRef value) {
+  ScopedCFTypeRef<CFStringRef> actual_type_ref(
+      CFCopyTypeIDDescription(CFGetTypeID(value)));
+  return "Expected value for key " + SysCFStringRefToUTF8(key) + " to be " +
+         expected_type + " but it was " +
+         SysCFStringRefToUTF8(actual_type_ref.get()) + " instead";
+}
+
+NSURL* FilePathToNSURL(const FilePath& path) {
+  if (NSString* path_string = FilePathToNSString(path)) {
+    return [NSURL fileURLWithPath:path_string];
+  }
+  return nil;
+}
+
+NSString* FilePathToNSString(const FilePath& path) {
+  if (path.empty()) {
+    return nil;
+  }
+  return @(path.value().c_str());  // @() does UTF8 conversion.
+}
+
+FilePath NSStringToFilePath(NSString* str) {
+  if (!str.length) {
+    return FilePath();
+  }
+  return FilePath(str.fileSystemRepresentation);
+}
+
+FilePath NSURLToFilePath(NSURL* url) {
+  if (!url.fileURL) {
+    return FilePath();
+  }
+  return NSStringToFilePath(url.path);
+}
+
+ScopedCFTypeRef<CFURLRef> FilePathToCFURL(const FilePath& path) {
+  DCHECK(!path.empty());
+
+  // The function's docs promise that it does not require an NSAutoreleasePool.
+  // A straightforward way to accomplish this is to use *Create* functions,
+  // combined with ScopedCFTypeRef.
+  const std::string& path_string = path.value();
+  ScopedCFTypeRef<CFStringRef> path_cfstring(CFStringCreateWithBytes(
+      kCFAllocatorDefault, reinterpret_cast<const UInt8*>(path_string.data()),
+      checked_cast<CFIndex>(path_string.length()), kCFStringEncodingUTF8,
+      /*isExternalRepresentation=*/FALSE));
+  if (!path_cfstring) {
+    return ScopedCFTypeRef<CFURLRef>();
+  }
+
+  return ScopedCFTypeRef<CFURLRef>(CFURLCreateWithFileSystemPath(
+      kCFAllocatorDefault, path_cfstring.get(), kCFURLPOSIXPathStyle,
+      /*isDirectory=*/FALSE));
+}
+
+bool CFRangeToNSRange(CFRange range, NSRange* range_out) {
+  NSUInteger end;
+  if (IsValueInRangeForNumericType<NSUInteger>(range.location) &&
+      IsValueInRangeForNumericType<NSUInteger>(range.length) &&
+      CheckAdd(range.location, range.length).AssignIfValid(&end) &&
+      IsValueInRangeForNumericType<NSUInteger>(end)) {
+    *range_out = NSMakeRange(static_cast<NSUInteger>(range.location),
+                             static_cast<NSUInteger>(range.length));
+    return true;
+  }
+  return false;
+}
+
+}  // namespace base::apple
+
+std::ostream& operator<<(std::ostream& o, const CFStringRef string) {
+  return o << base::SysCFStringRefToUTF8(string);
+}
+
+std::ostream& operator<<(std::ostream& o, const CFErrorRef err) {
+  base::apple::ScopedCFTypeRef<CFStringRef> desc(CFErrorCopyDescription(err));
+  base::apple::ScopedCFTypeRef<CFDictionaryRef> user_info(
+      CFErrorCopyUserInfo(err));
+  CFStringRef errorDesc = nullptr;
+  if (user_info.get()) {
+    errorDesc = reinterpret_cast<CFStringRef>(
+        CFDictionaryGetValue(user_info.get(), kCFErrorDescriptionKey));
+  }
+  o << "Code: " << CFErrorGetCode(err) << " Domain: " << CFErrorGetDomain(err)
+    << " Desc: " << desc.get();
+  if (errorDesc) {
+    o << "(" << errorDesc << ")";
+  }
+  return o;
+}
+
+std::ostream& operator<<(std::ostream& o, CFRange range) {
+  return o << NSStringFromRange(
+             NSMakeRange(static_cast<NSUInteger>(range.location),
+                         static_cast<NSUInteger>(range.length)));
+}
+
+std::ostream& operator<<(std::ostream& o, id obj) {
+  return obj ? o << [obj description].UTF8String : o << "(nil)";
+}
+
+std::ostream& operator<<(std::ostream& o, NSRange range) {
+  return o << NSStringFromRange(range);
+}
+
+std::ostream& operator<<(std::ostream& o, SEL selector) {
+  return o << NSStringFromSelector(selector);
+}
+
+#if !BUILDFLAG(IS_IOS)
+std::ostream& operator<<(std::ostream& o, NSPoint point) {
+  return o << NSStringFromPoint(point);
+}
+std::ostream& operator<<(std::ostream& o, NSRect rect) {
+  return o << NSStringFromRect(rect);
+}
+std::ostream& operator<<(std::ostream& o, NSSize size) {
+  return o << NSStringFromSize(size);
+}
+#endif
diff --git a/base/apple/foundation_util_unittest.mm b/base/apple/foundation_util_unittest.mm
new file mode 100644
index 0000000..51aa215
--- /dev/null
+++ b/base/apple/foundation_util_unittest.mm
@@ -0,0 +1,432 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/foundation_util.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <Foundation/Foundation.h>
+#include <limits.h>
+#include <stddef.h>
+
+#include "base/apple/scoped_cftyperef.h"
+#include "base/files/file_path.h"
+#include "base/format_macros.h"
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#import "testing/gtest_mac.h"
+
+namespace base::apple {
+
+TEST(FoundationUtilTest, CFCast) {
+  // Build out the CF types to be tested as empty containers.
+  ScopedCFTypeRef<CFTypeRef> test_array(
+      CFArrayCreate(nullptr, nullptr, 0, &kCFTypeArrayCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_array_mutable(
+      CFArrayCreateMutable(nullptr, 0, &kCFTypeArrayCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_bag(
+      CFBagCreate(nullptr, nullptr, 0, &kCFTypeBagCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_bag_mutable(
+      CFBagCreateMutable(nullptr, 0, &kCFTypeBagCallBacks));
+  CFTypeRef test_bool = kCFBooleanTrue;
+  ScopedCFTypeRef<CFTypeRef> test_data(CFDataCreate(nullptr, nullptr, 0));
+  ScopedCFTypeRef<CFTypeRef> test_data_mutable(CFDataCreateMutable(nullptr, 0));
+  ScopedCFTypeRef<CFTypeRef> test_date(CFDateCreate(nullptr, 0));
+  ScopedCFTypeRef<CFTypeRef> test_dict(CFDictionaryCreate(
+      nullptr, nullptr, nullptr, 0, &kCFCopyStringDictionaryKeyCallBacks,
+      &kCFTypeDictionaryValueCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_dict_mutable(CFDictionaryCreateMutable(
+      nullptr, 0, &kCFCopyStringDictionaryKeyCallBacks,
+      &kCFTypeDictionaryValueCallBacks));
+  int int_val = 256;
+  ScopedCFTypeRef<CFTypeRef> test_number(
+      CFNumberCreate(nullptr, kCFNumberIntType, &int_val));
+  CFTypeRef test_null = kCFNull;
+  ScopedCFTypeRef<CFTypeRef> test_set(
+      CFSetCreate(nullptr, nullptr, 0, &kCFTypeSetCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_set_mutable(
+      CFSetCreateMutable(nullptr, 0, &kCFTypeSetCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_str(CFStringCreateWithBytes(
+      nullptr, nullptr, 0, kCFStringEncodingASCII, false));
+  CFTypeRef test_str_const = CFSTR("hello");
+  ScopedCFTypeRef<CFTypeRef> test_str_mutable(
+      CFStringCreateMutable(nullptr, 0));
+
+  // Make sure the allocations of CF types are good.
+  EXPECT_TRUE(test_array);
+  EXPECT_TRUE(test_array_mutable);
+  EXPECT_TRUE(test_bag);
+  EXPECT_TRUE(test_bag_mutable);
+  EXPECT_TRUE(test_bool);
+  EXPECT_TRUE(test_data);
+  EXPECT_TRUE(test_data_mutable);
+  EXPECT_TRUE(test_date);
+  EXPECT_TRUE(test_dict);
+  EXPECT_TRUE(test_dict_mutable);
+  EXPECT_TRUE(test_number);
+  EXPECT_TRUE(test_null);
+  EXPECT_TRUE(test_set);
+  EXPECT_TRUE(test_set_mutable);
+  EXPECT_TRUE(test_str);
+  EXPECT_TRUE(test_str_const);
+  EXPECT_TRUE(test_str_mutable);
+
+  // Casting the CFTypeRef objects correctly provides the same pointer.
+  EXPECT_EQ(test_array.get(), CFCast<CFArrayRef>(test_array.get()));
+  EXPECT_EQ(test_array_mutable.get(),
+            CFCast<CFArrayRef>(test_array_mutable.get()));
+  EXPECT_EQ(test_bag.get(), CFCast<CFBagRef>(test_bag.get()));
+  EXPECT_EQ(test_bag_mutable.get(), CFCast<CFBagRef>(test_bag_mutable.get()));
+  EXPECT_EQ(test_bool, CFCast<CFBooleanRef>(test_bool));
+  EXPECT_EQ(test_data.get(), CFCast<CFDataRef>(test_data.get()));
+  EXPECT_EQ(test_data_mutable.get(),
+            CFCast<CFDataRef>(test_data_mutable.get()));
+  EXPECT_EQ(test_date.get(), CFCast<CFDateRef>(test_date.get()));
+  EXPECT_EQ(test_dict.get(), CFCast<CFDictionaryRef>(test_dict.get()));
+  EXPECT_EQ(test_dict_mutable.get(),
+            CFCast<CFDictionaryRef>(test_dict_mutable.get()));
+  EXPECT_EQ(test_number.get(), CFCast<CFNumberRef>(test_number.get()));
+  EXPECT_EQ(test_null, CFCast<CFNullRef>(test_null));
+  EXPECT_EQ(test_set.get(), CFCast<CFSetRef>(test_set.get()));
+  EXPECT_EQ(test_set_mutable.get(), CFCast<CFSetRef>(test_set_mutable.get()));
+  EXPECT_EQ(test_str.get(), CFCast<CFStringRef>(test_str.get()));
+  EXPECT_EQ(test_str_const, CFCast<CFStringRef>(test_str_const));
+  EXPECT_EQ(test_str_mutable.get(),
+            CFCast<CFStringRef>(test_str_mutable.get()));
+
+  // When given an incorrect CF cast, provide nullptr.
+  EXPECT_FALSE(CFCast<CFStringRef>(test_array.get()));
+  EXPECT_FALSE(CFCast<CFStringRef>(test_array_mutable.get()));
+  EXPECT_FALSE(CFCast<CFStringRef>(test_bag.get()));
+  EXPECT_FALSE(CFCast<CFSetRef>(test_bag_mutable.get()));
+  EXPECT_FALSE(CFCast<CFSetRef>(test_bool));
+  EXPECT_FALSE(CFCast<CFNullRef>(test_data.get()));
+  EXPECT_FALSE(CFCast<CFDictionaryRef>(test_data_mutable.get()));
+  EXPECT_FALSE(CFCast<CFDictionaryRef>(test_date.get()));
+  EXPECT_FALSE(CFCast<CFNumberRef>(test_dict.get()));
+  EXPECT_FALSE(CFCast<CFDateRef>(test_dict_mutable.get()));
+  EXPECT_FALSE(CFCast<CFDataRef>(test_number.get()));
+  EXPECT_FALSE(CFCast<CFDataRef>(test_null));
+  EXPECT_FALSE(CFCast<CFBooleanRef>(test_set.get()));
+  EXPECT_FALSE(CFCast<CFBagRef>(test_set_mutable.get()));
+  EXPECT_FALSE(CFCast<CFBagRef>(test_str.get()));
+  EXPECT_FALSE(CFCast<CFArrayRef>(test_str_const));
+  EXPECT_FALSE(CFCast<CFArrayRef>(test_str_mutable.get()));
+
+  // Giving a nullptr provides a nullptr.
+  EXPECT_FALSE(CFCast<CFArrayRef>(nullptr));
+  EXPECT_FALSE(CFCast<CFBagRef>(nullptr));
+  EXPECT_FALSE(CFCast<CFBooleanRef>(nullptr));
+  EXPECT_FALSE(CFCast<CFDataRef>(nullptr));
+  EXPECT_FALSE(CFCast<CFDateRef>(nullptr));
+  EXPECT_FALSE(CFCast<CFDictionaryRef>(nullptr));
+  EXPECT_FALSE(CFCast<CFNullRef>(nullptr));
+  EXPECT_FALSE(CFCast<CFNumberRef>(nullptr));
+  EXPECT_FALSE(CFCast<CFSetRef>(nullptr));
+  EXPECT_FALSE(CFCast<CFStringRef>(nullptr));
+
+  // CFCastStrict: correct cast results in correct pointer being returned.
+  EXPECT_EQ(test_array.get(), CFCastStrict<CFArrayRef>(test_array.get()));
+  EXPECT_EQ(test_array_mutable.get(),
+            CFCastStrict<CFArrayRef>(test_array_mutable.get()));
+  EXPECT_EQ(test_bag.get(), CFCastStrict<CFBagRef>(test_bag.get()));
+  EXPECT_EQ(test_bag_mutable.get(),
+            CFCastStrict<CFBagRef>(test_bag_mutable.get()));
+  EXPECT_EQ(test_bool, CFCastStrict<CFBooleanRef>(test_bool));
+  EXPECT_EQ(test_data.get(), CFCastStrict<CFDataRef>(test_data.get()));
+  EXPECT_EQ(test_data_mutable.get(),
+            CFCastStrict<CFDataRef>(test_data_mutable.get()));
+  EXPECT_EQ(test_date.get(), CFCastStrict<CFDateRef>(test_date.get()));
+  EXPECT_EQ(test_dict.get(), CFCastStrict<CFDictionaryRef>(test_dict.get()));
+  EXPECT_EQ(test_dict_mutable.get(),
+            CFCastStrict<CFDictionaryRef>(test_dict_mutable.get()));
+  EXPECT_EQ(test_number.get(), CFCastStrict<CFNumberRef>(test_number.get()));
+  EXPECT_EQ(test_null, CFCastStrict<CFNullRef>(test_null));
+  EXPECT_EQ(test_set.get(), CFCastStrict<CFSetRef>(test_set.get()));
+  EXPECT_EQ(test_set_mutable.get(),
+            CFCastStrict<CFSetRef>(test_set_mutable.get()));
+  EXPECT_EQ(test_str.get(), CFCastStrict<CFStringRef>(test_str.get()));
+  EXPECT_EQ(test_str_const, CFCastStrict<CFStringRef>(test_str_const));
+  EXPECT_EQ(test_str_mutable.get(),
+            CFCastStrict<CFStringRef>(test_str_mutable.get()));
+
+  // CFCastStrict: Giving a nullptr provides a nullptr.
+  EXPECT_FALSE(CFCastStrict<CFArrayRef>(nullptr));
+  EXPECT_FALSE(CFCastStrict<CFBagRef>(nullptr));
+  EXPECT_FALSE(CFCastStrict<CFBooleanRef>(nullptr));
+  EXPECT_FALSE(CFCastStrict<CFDataRef>(nullptr));
+  EXPECT_FALSE(CFCastStrict<CFDateRef>(nullptr));
+  EXPECT_FALSE(CFCastStrict<CFDictionaryRef>(nullptr));
+  EXPECT_FALSE(CFCastStrict<CFNullRef>(nullptr));
+  EXPECT_FALSE(CFCastStrict<CFNumberRef>(nullptr));
+  EXPECT_FALSE(CFCastStrict<CFSetRef>(nullptr));
+  EXPECT_FALSE(CFCastStrict<CFStringRef>(nullptr));
+}
+
+TEST(FoundationUtilTest, ObjCCast) {
+  @autoreleasepool {
+    id test_array = @[];
+    id test_array_mutable = [NSMutableArray array];
+    id test_data = [NSData data];
+    id test_data_mutable = [NSMutableData dataWithCapacity:10];
+    id test_date = [NSDate date];
+    id test_dict = @{@"meaning" : @42};
+    id test_dict_mutable = [NSMutableDictionary dictionaryWithCapacity:10];
+    id test_number = @42;
+    id test_null = [NSNull null];
+    id test_set = [NSSet setWithObject:@"string object"];
+    id test_set_mutable = [NSMutableSet setWithCapacity:10];
+    id test_str = [NSString string];
+    id test_str_const = @"bonjour";
+    id test_str_mutable = [NSMutableString stringWithCapacity:10];
+
+    // Make sure the allocations of NS types are good.
+    EXPECT_TRUE(test_array);
+    EXPECT_TRUE(test_array_mutable);
+    EXPECT_TRUE(test_data);
+    EXPECT_TRUE(test_data_mutable);
+    EXPECT_TRUE(test_date);
+    EXPECT_TRUE(test_dict);
+    EXPECT_TRUE(test_dict_mutable);
+    EXPECT_TRUE(test_number);
+    EXPECT_TRUE(test_null);
+    EXPECT_TRUE(test_set);
+    EXPECT_TRUE(test_set_mutable);
+    EXPECT_TRUE(test_str);
+    EXPECT_TRUE(test_str_const);
+    EXPECT_TRUE(test_str_mutable);
+
+    // Casting the id correctly provides the same pointer.
+    EXPECT_EQ(test_array, ObjCCast<NSArray>(test_array));
+    EXPECT_EQ(test_array_mutable, ObjCCast<NSArray>(test_array_mutable));
+    EXPECT_EQ(test_data, ObjCCast<NSData>(test_data));
+    EXPECT_EQ(test_data_mutable, ObjCCast<NSData>(test_data_mutable));
+    EXPECT_EQ(test_date, ObjCCast<NSDate>(test_date));
+    EXPECT_EQ(test_dict, ObjCCast<NSDictionary>(test_dict));
+    EXPECT_EQ(test_dict_mutable, ObjCCast<NSDictionary>(test_dict_mutable));
+    EXPECT_EQ(test_number, ObjCCast<NSNumber>(test_number));
+    EXPECT_EQ(test_null, ObjCCast<NSNull>(test_null));
+    EXPECT_EQ(test_set, ObjCCast<NSSet>(test_set));
+    EXPECT_EQ(test_set_mutable, ObjCCast<NSSet>(test_set_mutable));
+    EXPECT_EQ(test_str, ObjCCast<NSString>(test_str));
+    EXPECT_EQ(test_str_const, ObjCCast<NSString>(test_str_const));
+    EXPECT_EQ(test_str_mutable, ObjCCast<NSString>(test_str_mutable));
+
+    // When given an incorrect ObjC cast, provide nil.
+    EXPECT_FALSE(ObjCCast<NSString>(test_array));
+    EXPECT_FALSE(ObjCCast<NSString>(test_array_mutable));
+    EXPECT_FALSE(ObjCCast<NSString>(test_data));
+    EXPECT_FALSE(ObjCCast<NSString>(test_data_mutable));
+    EXPECT_FALSE(ObjCCast<NSSet>(test_date));
+    EXPECT_FALSE(ObjCCast<NSSet>(test_dict));
+    EXPECT_FALSE(ObjCCast<NSNumber>(test_dict_mutable));
+    EXPECT_FALSE(ObjCCast<NSNull>(test_number));
+    EXPECT_FALSE(ObjCCast<NSDictionary>(test_null));
+    EXPECT_FALSE(ObjCCast<NSDictionary>(test_set));
+    EXPECT_FALSE(ObjCCast<NSDate>(test_set_mutable));
+    EXPECT_FALSE(ObjCCast<NSData>(test_str));
+    EXPECT_FALSE(ObjCCast<NSData>(test_str_const));
+    EXPECT_FALSE(ObjCCast<NSArray>(test_str_mutable));
+
+    // Giving a nil provides a nil.
+    EXPECT_FALSE(ObjCCast<NSArray>(nil));
+    EXPECT_FALSE(ObjCCast<NSData>(nil));
+    EXPECT_FALSE(ObjCCast<NSDate>(nil));
+    EXPECT_FALSE(ObjCCast<NSDictionary>(nil));
+    EXPECT_FALSE(ObjCCast<NSNull>(nil));
+    EXPECT_FALSE(ObjCCast<NSNumber>(nil));
+    EXPECT_FALSE(ObjCCast<NSSet>(nil));
+    EXPECT_FALSE(ObjCCast<NSString>(nil));
+
+    // ObjCCastStrict: correct cast results in correct pointer being returned.
+    EXPECT_EQ(test_array, ObjCCastStrict<NSArray>(test_array));
+    EXPECT_EQ(test_array_mutable, ObjCCastStrict<NSArray>(test_array_mutable));
+    EXPECT_EQ(test_data, ObjCCastStrict<NSData>(test_data));
+    EXPECT_EQ(test_data_mutable, ObjCCastStrict<NSData>(test_data_mutable));
+    EXPECT_EQ(test_date, ObjCCastStrict<NSDate>(test_date));
+    EXPECT_EQ(test_dict, ObjCCastStrict<NSDictionary>(test_dict));
+    EXPECT_EQ(test_dict_mutable,
+              ObjCCastStrict<NSDictionary>(test_dict_mutable));
+    EXPECT_EQ(test_number, ObjCCastStrict<NSNumber>(test_number));
+    EXPECT_EQ(test_null, ObjCCastStrict<NSNull>(test_null));
+    EXPECT_EQ(test_set, ObjCCastStrict<NSSet>(test_set));
+    EXPECT_EQ(test_set_mutable, ObjCCastStrict<NSSet>(test_set_mutable));
+    EXPECT_EQ(test_str, ObjCCastStrict<NSString>(test_str));
+    EXPECT_EQ(test_str_const, ObjCCastStrict<NSString>(test_str_const));
+    EXPECT_EQ(test_str_mutable, ObjCCastStrict<NSString>(test_str_mutable));
+
+    // ObjCCastStrict: Giving a nil provides a nil.
+    EXPECT_FALSE(ObjCCastStrict<NSArray>(nil));
+    EXPECT_FALSE(ObjCCastStrict<NSData>(nil));
+    EXPECT_FALSE(ObjCCastStrict<NSDate>(nil));
+    EXPECT_FALSE(ObjCCastStrict<NSDictionary>(nil));
+    EXPECT_FALSE(ObjCCastStrict<NSNull>(nil));
+    EXPECT_FALSE(ObjCCastStrict<NSNumber>(nil));
+    EXPECT_FALSE(ObjCCastStrict<NSSet>(nil));
+    EXPECT_FALSE(ObjCCastStrict<NSString>(nil));
+  }
+}
+
+TEST(FoundationUtilTest, GetValueFromDictionary) {
+  int one = 1, two = 2, three = 3;
+
+  ScopedCFTypeRef<CFNumberRef> cf_one(
+      CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &one));
+  ScopedCFTypeRef<CFNumberRef> cf_two(
+      CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &two));
+  ScopedCFTypeRef<CFNumberRef> cf_three(
+      CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &three));
+
+  CFStringRef keys[] = {CFSTR("one"), CFSTR("two"), CFSTR("three")};
+  CFNumberRef values[] = {cf_one.get(), cf_two.get(), cf_three.get()};
+
+  static_assert(std::size(keys) == std::size(values),
+                "keys and values arrays must have the same size");
+
+  ScopedCFTypeRef<CFDictionaryRef> test_dict(CFDictionaryCreate(
+      kCFAllocatorDefault, reinterpret_cast<const void**>(keys),
+      reinterpret_cast<const void**>(values), std::size(values),
+      &kCFCopyStringDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks));
+
+  // GetValueFromDictionary<>(_, _) should produce the correct
+  // expected output.
+  EXPECT_EQ(values[0],
+            GetValueFromDictionary<CFNumberRef>(test_dict.get(), CFSTR("one")));
+  EXPECT_EQ(values[1],
+            GetValueFromDictionary<CFNumberRef>(test_dict.get(), CFSTR("two")));
+  EXPECT_EQ(values[2], GetValueFromDictionary<CFNumberRef>(test_dict.get(),
+                                                           CFSTR("three")));
+
+  // Bad input should produce bad output.
+  EXPECT_FALSE(
+      GetValueFromDictionary<CFNumberRef>(test_dict.get(), CFSTR("four")));
+  EXPECT_FALSE(
+      GetValueFromDictionary<CFStringRef>(test_dict.get(), CFSTR("one")));
+}
+
+TEST(FoundationUtilTest, FilePathToNSURL) {
+  EXPECT_NSEQ(nil, FilePathToNSURL(FilePath()));
+  EXPECT_NSEQ([NSURL fileURLWithPath:@"/a/b"],
+              FilePathToNSURL(FilePath("/a/b")));
+}
+
+TEST(FoundationUtilTest, FilePathToNSString) {
+  EXPECT_NSEQ(nil, FilePathToNSString(FilePath()));
+  EXPECT_NSEQ(@"/a/b", FilePathToNSString(FilePath("/a/b")));
+}
+
+TEST(FoundationUtilTest, NSStringToFilePath) {
+  EXPECT_EQ(FilePath(), NSStringToFilePath(nil));
+  EXPECT_EQ(FilePath(), NSStringToFilePath(@""));
+  EXPECT_EQ(FilePath("/a/b"), NSStringToFilePath(@"/a/b"));
+}
+
+TEST(FoundationUtilTest, FilePathToCFURL) {
+  ScopedCFTypeRef<CFURLRef> url(CFURLCreateWithFileSystemPath(
+      nullptr, CFSTR("/a/b"), kCFURLPOSIXPathStyle, false));
+  EXPECT_TRUE(CFEqual(url.get(), FilePathToCFURL(FilePath("/a/b")).get()));
+}
+
+TEST(FoundationUtilTest, CFRangeToNSRange) {
+  NSRange range_out;
+  EXPECT_TRUE(CFRangeToNSRange(CFRangeMake(10, 5), &range_out));
+  EXPECT_EQ(10UL, range_out.location);
+  EXPECT_EQ(5UL, range_out.length);
+  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(-1, 5), &range_out));
+  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(5, -1), &range_out));
+  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(-1, -1), &range_out));
+  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(LONG_MAX, LONG_MAX), &range_out));
+  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(LONG_MIN, LONG_MAX), &range_out));
+}
+
+TEST(StringNumberConversionsTest, FormatNSInteger) {
+  // The PRI[dxu]NS macro assumes that NSInteger is a typedef to "int" on
+  // 32-bit architecture and a typedef to "long" on 64-bit architecture
+  // (respectively "unsigned int" and "unsigned long" for NSUInteger). Use
+  // pointer incompatibility to validate this at compilation.
+#if defined(ARCH_CPU_64_BITS)
+  typedef long FormatNSIntegerAsType;
+  typedef unsigned long FormatNSUIntegerAsType;
+#else
+  typedef int FormatNSIntegerAsType;
+  typedef unsigned int FormatNSUIntegerAsType;
+#endif  // defined(ARCH_CPU_64_BITS)
+
+  NSInteger some_nsinteger;
+  [[maybe_unused]] FormatNSIntegerAsType* pointer_to_some_nsinteger =
+      &some_nsinteger;
+
+  NSUInteger some_nsuinteger;
+  [[maybe_unused]] FormatNSUIntegerAsType* pointer_to_some_nsuinteger =
+      &some_nsuinteger;
+
+  // Check that format specifier works correctly for NSInteger.
+  const struct {
+    NSInteger value;
+    const char* expected;
+    const char* expected_hex;
+  } nsinteger_cases[] = {
+#if !defined(ARCH_CPU_64_BITS)
+    {12345678, "12345678", "bc614e"},
+    {-12345678, "-12345678", "ff439eb2"},
+#else
+    {12345678, "12345678", "bc614e"},
+    {-12345678, "-12345678", "ffffffffff439eb2"},
+    {137451299150l, "137451299150", "2000bc614e"},
+    {-137451299150l, "-137451299150", "ffffffdfff439eb2"},
+#endif  // !defined(ARCH_CPU_64_BITS)
+  };
+
+  for (const auto& nsinteger_case : nsinteger_cases) {
+    EXPECT_EQ(nsinteger_case.expected,
+              StringPrintf("%" PRIdNS, nsinteger_case.value));
+    EXPECT_EQ(nsinteger_case.expected_hex,
+              StringPrintf("%" PRIxNS, nsinteger_case.value));
+  }
+
+  // Check that format specifier works correctly for NSUInteger.
+  const struct {
+    NSUInteger value;
+    const char* expected;
+    const char* expected_hex;
+  } nsuinteger_cases[] = {
+#if !defined(ARCH_CPU_64_BITS)
+    {12345678u, "12345678", "bc614e"},
+    {4282621618u, "4282621618", "ff439eb2"},
+#else
+    {12345678u, "12345678", "bc614e"},
+    {4282621618u, "4282621618", "ff439eb2"},
+    {137451299150ul, "137451299150", "2000bc614e"},
+    {18446743936258252466ul, "18446743936258252466", "ffffffdfff439eb2"},
+#endif  // !defined(ARCH_CPU_64_BITS)
+  };
+
+  for (const auto& nsuinteger_case : nsuinteger_cases) {
+    EXPECT_EQ(nsuinteger_case.expected,
+              StringPrintf("%" PRIuNS, nsuinteger_case.value));
+    EXPECT_EQ(nsuinteger_case.expected_hex,
+              StringPrintf("%" PRIxNS, nsuinteger_case.value));
+  }
+}
+
+#define EXPECT_LOG_EQ(expected, val) \
+  EXPECT_EQ(expected, (std::ostringstream() << (val)).str())
+
+TEST(FoundationLoggingTest, ObjCObject) {
+  EXPECT_LOG_EQ("Hello, world!", @"Hello, world!");
+}
+
+TEST(FoundationLoggingTest, ObjCNil) {
+  EXPECT_LOG_EQ("(nil)", static_cast<id>(nil));
+}
+
+TEST(FoundationLoggingTest, CFRange) {
+  EXPECT_LOG_EQ("{0, 100}", CFRangeMake(0, 100));
+}
+
+TEST(FoundationLoggingTest, NSRange) {
+  EXPECT_LOG_EQ("{0, 100}", NSMakeRange(0, 100));
+}
+
+}  // namespace base::apple
diff --git a/base/apple/mach_logging.cc b/base/apple/mach_logging.cc
new file mode 100644
index 0000000..f91b48c
--- /dev/null
+++ b/base/apple/mach_logging.cc
@@ -0,0 +1,86 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/mach_logging.h"
+
+#include <iomanip>
+#include <string>
+
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(USE_BLINK)
+#if BUILDFLAG(IS_IOS)
+#include "base/ios/sim_header_shims.h"
+#else
+#include <servers/bootstrap.h>
+#endif  // BUILDFLAG(IS_IOS)
+#endif  // BUILDFLAG(USE_BLINK)
+
+namespace {
+
+std::string FormatMachErrorNumber(mach_error_t mach_err) {
+  // For the os/kern subsystem, give the error number in decimal as in
+  // <mach/kern_return.h>. Otherwise, give it in hexadecimal to make it easier
+  // to visualize the various bits. See <mach/error.h>.
+  if (mach_err >= 0 && mach_err < KERN_RETURN_MAX) {
+    return base::StringPrintf(" (%d)", mach_err);
+  }
+  return base::StringPrintf(" (0x%08x)", mach_err);
+}
+
+}  // namespace
+
+namespace logging {
+
+MachLogMessage::MachLogMessage(const char* file_path,
+                               int line,
+                               LogSeverity severity,
+                               mach_error_t mach_err)
+    : LogMessage(file_path, line, severity), mach_err_(mach_err) {}
+
+MachLogMessage::~MachLogMessage() {
+  stream() << ": " << mach_error_string(mach_err_)
+           << FormatMachErrorNumber(mach_err_);
+}
+
+#if BUILDFLAG(USE_BLINK)
+
+BootstrapLogMessage::BootstrapLogMessage(const char* file_path,
+                                         int line,
+                                         LogSeverity severity,
+                                         kern_return_t bootstrap_err)
+    : LogMessage(file_path, line, severity), bootstrap_err_(bootstrap_err) {}
+
+BootstrapLogMessage::~BootstrapLogMessage() {
+  stream() << ": " << bootstrap_strerror(bootstrap_err_);
+
+  switch (bootstrap_err_) {
+    case BOOTSTRAP_SUCCESS:
+    case BOOTSTRAP_NOT_PRIVILEGED:
+    case BOOTSTRAP_NAME_IN_USE:
+    case BOOTSTRAP_UNKNOWN_SERVICE:
+    case BOOTSTRAP_SERVICE_ACTIVE:
+    case BOOTSTRAP_BAD_COUNT:
+    case BOOTSTRAP_NO_MEMORY:
+    case BOOTSTRAP_NO_CHILDREN: {
+      // Show known bootstrap errors in decimal because that's how they're
+      // defined in <servers/bootstrap.h>.
+      stream() << " (" << bootstrap_err_ << ")";
+      break;
+    }
+
+    default: {
+      // bootstrap_strerror passes unknown errors to mach_error_string, so
+      // format them as they would be if they were handled by
+      // MachErrorMessage.
+      stream() << FormatMachErrorNumber(bootstrap_err_);
+      break;
+    }
+  }
+}
+
+#endif  // BUILDFLAG(USE_BLINK)
+
+}  // namespace logging
diff --git a/base/apple/mach_logging.h b/base/apple/mach_logging.h
new file mode 100644
index 0000000..d6593ef
--- /dev/null
+++ b/base/apple/mach_logging.h
@@ -0,0 +1,171 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_MACH_LOGGING_H_
+#define BASE_APPLE_MACH_LOGGING_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "build/blink_buildflags.h"
+#include "build/build_config.h"
+
+// Use the MACH_LOG family of macros along with a mach_error_t (kern_return_t)
+// containing a Mach error. The error value will be decoded so that logged
+// messages explain the error.
+//
+// Use the BOOTSTRAP_LOG family of macros specifically for errors that occur
+// while interoperating with the bootstrap subsystem. These errors will first
+// be looked up as bootstrap error messages. If no match is found, they will
+// be treated as generic Mach errors, as in MACH_LOG.
+//
+// Examples:
+//
+//   kern_return_t kr = mach_timebase_info(&info);
+//   if (kr != KERN_SUCCESS) {
+//     MACH_LOG(ERROR, kr) << "mach_timebase_info";
+//   }
+//
+//   kr = vm_deallocate(task, address, size);
+//   MACH_DCHECK(kr == KERN_SUCCESS, kr) << "vm_deallocate";
+
+namespace logging {
+
+class BASE_EXPORT MachLogMessage : public logging::LogMessage {
+ public:
+  MachLogMessage(const char* file_path,
+                 int line,
+                 LogSeverity severity,
+                 mach_error_t mach_err);
+
+  MachLogMessage(const MachLogMessage&) = delete;
+  MachLogMessage& operator=(const MachLogMessage&) = delete;
+
+  ~MachLogMessage() override;
+
+ private:
+  mach_error_t mach_err_;
+};
+
+}  // namespace logging
+
+#if DCHECK_IS_ON()
+#define MACH_DVLOG_IS_ON(verbose_level) VLOG_IS_ON(verbose_level)
+#else
+#define MACH_DVLOG_IS_ON(verbose_level) 0
+#endif
+
+#define MACH_LOG_STREAM(severity, mach_err) \
+    COMPACT_GOOGLE_LOG_EX_ ## severity(MachLogMessage, mach_err).stream()
+#define MACH_VLOG_STREAM(verbose_level, mach_err) \
+    logging::MachLogMessage(__FILE__, __LINE__, \
+                            -verbose_level, mach_err).stream()
+
+#define MACH_LOG(severity, mach_err) \
+    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), LOG_IS_ON(severity))
+#define MACH_LOG_IF(severity, condition, mach_err) \
+    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), \
+                LOG_IS_ON(severity) && (condition))
+
+#define MACH_VLOG(verbose_level, mach_err) \
+    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+                VLOG_IS_ON(verbose_level))
+#define MACH_VLOG_IF(verbose_level, condition, mach_err) \
+    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+                VLOG_IS_ON(verbose_level) && (condition))
+
+#define MACH_CHECK(condition, mach_err) \
+    LAZY_STREAM(MACH_LOG_STREAM(FATAL, mach_err), !(condition)) \
+    << "Check failed: " # condition << ". "
+
+#define MACH_DLOG(severity, mach_err) \
+    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), DLOG_IS_ON(severity))
+#define MACH_DLOG_IF(severity, condition, mach_err) \
+    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), \
+                DLOG_IS_ON(severity) && (condition))
+
+#define MACH_DVLOG(verbose_level, mach_err) \
+    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+                MACH_DVLOG_IS_ON(verbose_level))
+#define MACH_DVLOG_IF(verbose_level, condition, mach_err) \
+    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+                MACH_DVLOG_IS_ON(verbose_level) && (condition))
+
+#define MACH_DCHECK(condition, mach_err)        \
+  LAZY_STREAM(MACH_LOG_STREAM(FATAL, mach_err), \
+              DCHECK_IS_ON() && !(condition))   \
+      << "Check failed: " #condition << ". "
+
+#if BUILDFLAG(USE_BLINK)
+
+namespace logging {
+
+class BASE_EXPORT BootstrapLogMessage : public logging::LogMessage {
+ public:
+  BootstrapLogMessage(const char* file_path,
+                      int line,
+                      LogSeverity severity,
+                      kern_return_t bootstrap_err);
+
+  BootstrapLogMessage(const BootstrapLogMessage&) = delete;
+  BootstrapLogMessage& operator=(const BootstrapLogMessage&) = delete;
+
+  ~BootstrapLogMessage() override;
+
+ private:
+  kern_return_t bootstrap_err_;
+};
+
+}  // namespace logging
+
+#define BOOTSTRAP_DVLOG_IS_ON MACH_DVLOG_IS_ON
+
+#define BOOTSTRAP_LOG_STREAM(severity, bootstrap_err) \
+    COMPACT_GOOGLE_LOG_EX_ ## severity(BootstrapLogMessage, \
+                                       bootstrap_err).stream()
+#define BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err) \
+    logging::BootstrapLogMessage(__FILE__, __LINE__, \
+                                 -verbose_level, bootstrap_err).stream()
+
+#define BOOTSTRAP_LOG(severity, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, \
+                                     bootstrap_err), LOG_IS_ON(severity))
+#define BOOTSTRAP_LOG_IF(severity, condition, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
+                LOG_IS_ON(severity) && (condition))
+
+#define BOOTSTRAP_VLOG(verbose_level, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+                VLOG_IS_ON(verbose_level))
+#define BOOTSTRAP_VLOG_IF(verbose_level, condition, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+                VLOG_IS_ON(verbose_level) && (condition))
+
+#define BOOTSTRAP_CHECK(condition, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(FATAL, bootstrap_err), !(condition)) \
+    << "Check failed: " # condition << ". "
+
+#define BOOTSTRAP_DLOG(severity, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
+                DLOG_IS_ON(severity))
+#define BOOTSTRAP_DLOG_IF(severity, condition, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
+                DLOG_IS_ON(severity) && (condition))
+
+#define BOOTSTRAP_DVLOG(verbose_level, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+                BOOTSTRAP_DVLOG_IS_ON(verbose_level))
+#define BOOTSTRAP_DVLOG_IF(verbose_level, condition, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+                BOOTSTRAP_DVLOG_IS_ON(verbose_level) && (condition))
+
+#define BOOTSTRAP_DCHECK(condition, bootstrap_err)        \
+  LAZY_STREAM(BOOTSTRAP_LOG_STREAM(FATAL, bootstrap_err), \
+              DCHECK_IS_ON() && !(condition))             \
+      << "Check failed: " #condition << ". "
+
+#endif  //  BUILDFLAG(USE_BLINK)
+
+#endif  // BASE_APPLE_MACH_LOGGING_H_
diff --git a/base/apple/osstatus_logging.h b/base/apple/osstatus_logging.h
new file mode 100644
index 0000000..f9d34b5
--- /dev/null
+++ b/base/apple/osstatus_logging.h
@@ -0,0 +1,101 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_OSSTATUS_LOGGING_H_
+#define BASE_APPLE_OSSTATUS_LOGGING_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_IOS)
+#include <MacTypes.h>
+#else
+#include <libkern/OSTypes.h>
+#endif
+
+// Use the OSSTATUS_LOG family to log messages related to errors in macOS/iOS
+// system routines that report status via an OSStatus or OSErr value. It is
+// similar to the PLOG family which operates on errno, but because there is no
+// global (or thread-local) OSStatus or OSErr value, the specific error must
+// be supplied as an argument to the OSSTATUS_LOG macro. The message logged
+// will contain the symbolic constant name corresponding to the status value,
+// along with the value itself.
+//
+// OSErr is just an older 16-bit form of the newer 32-bit OSStatus. Despite
+// the name, OSSTATUS_LOG can be used equally well for OSStatus and OSErr.
+
+namespace logging {
+
+// Returns a UTF8 description from an OSStatus/OSErr value.
+BASE_EXPORT std::string DescriptionFromOSStatus(OSStatus err);
+
+class BASE_EXPORT OSStatusLogMessage : public logging::LogMessage {
+ public:
+  OSStatusLogMessage(const char* file_path,
+                     int line,
+                     LogSeverity severity,
+                     OSStatus status);
+
+  OSStatusLogMessage(const OSStatusLogMessage&) = delete;
+  OSStatusLogMessage& operator=(const OSStatusLogMessage&) = delete;
+
+  ~OSStatusLogMessage() override;
+
+ private:
+  OSStatus status_;
+};
+
+}  // namespace logging
+
+#if DCHECK_IS_ON()
+#define MAC_DVLOG_IS_ON(verbose_level) VLOG_IS_ON(verbose_level)
+#else
+#define MAC_DVLOG_IS_ON(verbose_level) 0
+#endif
+
+#define OSSTATUS_LOG_STREAM(severity, status) \
+  COMPACT_GOOGLE_LOG_EX_##severity(OSStatusLogMessage, status).stream()
+#define OSSTATUS_VLOG_STREAM(verbose_level, status)                       \
+  logging::OSStatusLogMessage(__FILE__, __LINE__, -verbose_level, status) \
+      .stream()
+
+#define OSSTATUS_LOG(severity, status) \
+  LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), LOG_IS_ON(severity))
+#define OSSTATUS_LOG_IF(severity, condition, status) \
+  LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), \
+              LOG_IS_ON(severity) && (condition))
+
+#define OSSTATUS_VLOG(verbose_level, status)               \
+  LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
+              VLOG_IS_ON(verbose_level))
+#define OSSTATUS_VLOG_IF(verbose_level, condition, status) \
+  LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
+              VLOG_IS_ON(verbose_level) && (condition))
+
+#define OSSTATUS_CHECK(condition, status)                       \
+  LAZY_STREAM(OSSTATUS_LOG_STREAM(FATAL, status), !(condition)) \
+      << "Check failed: " #condition << ". "
+
+#define OSSTATUS_DLOG(severity, status) \
+  LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), DLOG_IS_ON(severity))
+#define OSSTATUS_DLOG_IF(severity, condition, status) \
+  LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status),  \
+              DLOG_IS_ON(severity) && (condition))
+
+#define OSSTATUS_DVLOG(verbose_level, status)              \
+  LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
+              MAC_DVLOG_IS_ON(verbose_level))
+#define OSSTATUS_DVLOG_IF(verbose_level, condition, status) \
+  LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status),  \
+              MAC_DVLOG_IS_ON(verbose_level) && (condition))
+
+#define OSSTATUS_DCHECK(condition, status)        \
+  LAZY_STREAM(OSSTATUS_LOG_STREAM(FATAL, status), \
+              DCHECK_IS_ON() && !(condition))     \
+      << "Check failed: " #condition << ". "
+
+#endif  // BASE_APPLE_OSSTATUS_LOGGING_H_
diff --git a/base/apple/osstatus_logging.mm b/base/apple/osstatus_logging.mm
new file mode 100644
index 0000000..821dcf1
--- /dev/null
+++ b/base/apple/osstatus_logging.mm
@@ -0,0 +1,31 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/osstatus_logging.h"
+
+#import <Foundation/Foundation.h>
+
+#include <iomanip>
+
+namespace logging {
+
+std::string DescriptionFromOSStatus(OSStatus err) {
+  NSError* error = [NSError errorWithDomain:NSOSStatusErrorDomain
+                                       code:err
+                                   userInfo:nil];
+  return error.description.UTF8String;
+}
+
+OSStatusLogMessage::OSStatusLogMessage(const char* file_path,
+                                       int line,
+                                       LogSeverity severity,
+                                       OSStatus status)
+    : LogMessage(file_path, line, severity), status_(status) {}
+
+OSStatusLogMessage::~OSStatusLogMessage() {
+  stream() << ": " << DescriptionFromOSStatus(status_) << " (" << status_
+           << ")";
+}
+
+}  // namespace logging
diff --git a/base/apple/scoped_cffiledescriptorref.h b/base/apple/scoped_cffiledescriptorref.h
new file mode 100644
index 0000000..bab1189
--- /dev/null
+++ b/base/apple/scoped_cffiledescriptorref.h
@@ -0,0 +1,37 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_SCOPED_CFFILEDESCRIPTORREF_H_
+#define BASE_APPLE_SCOPED_CFFILEDESCRIPTORREF_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "base/scoped_generic.h"
+
+namespace base::apple {
+
+namespace internal {
+
+struct ScopedCFFileDescriptorRefTraits {
+  static CFFileDescriptorRef InvalidValue() { return nullptr; }
+  static void Free(CFFileDescriptorRef ref) {
+    CFFileDescriptorInvalidate(ref);
+    CFRelease(ref);
+  }
+};
+
+}  // namespace internal
+
+// ScopedCFFileDescriptorRef is designed after ScopedCFTypeRef<>. On
+// destruction, it will invalidate the file descriptor.
+// ScopedCFFileDescriptorRef (unlike ScopedCFTypeRef<>) does not support RETAIN
+// semantics, copying, or assignment, as doing so would increase the chances
+// that a file descriptor is invalidated while still in use.
+using ScopedCFFileDescriptorRef =
+    ScopedGeneric<CFFileDescriptorRef,
+                  internal::ScopedCFFileDescriptorRefTraits>;
+
+}  // namespace base::apple
+
+#endif  // BASE_APPLE_SCOPED_CFFILEDESCRIPTORREF_H_
diff --git a/base/apple/scoped_cftyperef.h b/base/apple/scoped_cftyperef.h
new file mode 100644
index 0000000..4fa1901
--- /dev/null
+++ b/base/apple/scoped_cftyperef.h
@@ -0,0 +1,48 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_SCOPED_CFTYPEREF_H_
+#define BASE_APPLE_SCOPED_CFTYPEREF_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "base/apple/scoped_typeref.h"
+
+namespace base::apple {
+
+// ScopedCFTypeRef<> is patterned after std::unique_ptr<>, but maintains
+// ownership of a CoreFoundation object: any object that can be represented
+// as a CFTypeRef.  Style deviations here are solely for compatibility with
+// std::unique_ptr<>'s interface, with which everyone is already familiar.
+//
+// By default, ScopedCFTypeRef<> takes ownership of an object (in the
+// constructor or in reset()) by taking over the caller's existing ownership
+// claim.  The caller must own the object it gives to ScopedCFTypeRef<>, and
+// relinquishes an ownership claim to that object.  ScopedCFTypeRef<> does not
+// call CFRetain(). This behavior is parameterized by the |OwnershipPolicy|
+// enum. If the value |RETAIN| is passed (in the constructor or in reset()),
+// then ScopedCFTypeRef<> will call CFRetain() on the object, and the initial
+// ownership is not changed.
+
+namespace internal {
+
+template <typename CFT>
+struct ScopedCFTypeRefTraits {
+  static CFT InvalidValue() { return nullptr; }
+  static CFT Retain(CFT object) {
+    CFRetain(object);
+    return object;
+  }
+  static void Release(CFT object) { CFRelease(object); }
+};
+
+}  // namespace internal
+
+template <typename CFT>
+using ScopedCFTypeRef =
+    ScopedTypeRef<CFT, internal::ScopedCFTypeRefTraits<CFT>>;
+
+}  // namespace base::apple
+
+#endif  // BASE_APPLE_SCOPED_CFTYPEREF_H_
diff --git a/base/apple/scoped_cftyperef_unittest.cc b/base/apple/scoped_cftyperef_unittest.cc
new file mode 100644
index 0000000..f378a94
--- /dev/null
+++ b/base/apple/scoped_cftyperef_unittest.cc
@@ -0,0 +1,255 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/scoped_cftyperef.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include <utility>
+
+#include "base/memory/scoped_policy.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// This is effectively a unit test of ScopedTypeRef rather than ScopedCFTypeRef,
+// but because ScopedTypeRef is parameterized, the CFType version is a great
+// test subject because it uses all the features.
+//
+// Note that CFMutableArray is used for testing, even when subtypes aren't
+// needed, because it is never optimized into immortal constant values, unlike
+// other types.
+
+namespace base::apple {
+namespace {
+
+TEST(ScopedCFTypeRefTest, ConstructionSameType) {
+  CFMutableArrayRef array =
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks);
+  EXPECT_EQ(1, CFGetRetainCount(array));
+
+  ScopedCFTypeRef<CFMutableArrayRef> retain_scoper(array,
+                                                   base::scoped_policy::RETAIN);
+  EXPECT_EQ(array, retain_scoper.get());
+  EXPECT_EQ(2, CFGetRetainCount(array));
+
+  ScopedCFTypeRef<CFMutableArrayRef> assume_scoper(array,
+                                                   base::scoped_policy::ASSUME);
+  EXPECT_EQ(array, assume_scoper.get());
+  EXPECT_EQ(2, CFGetRetainCount(array));
+
+  CFMutableArrayRef array2 =
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks);
+  EXPECT_EQ(1, CFGetRetainCount(array2));
+  ScopedCFTypeRef<CFMutableArrayRef> assume_scoper2(
+      array2 /* with implicit ASSUME */);
+  EXPECT_EQ(array2, assume_scoper2.get());
+  EXPECT_EQ(1, CFGetRetainCount(array2));
+}
+
+TEST(ScopedCFTypeRefTest, ConstructionSubType) {
+  CFMutableArrayRef array =
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks);
+  EXPECT_EQ(1, CFGetRetainCount(array));
+
+  ScopedCFTypeRef<CFArrayRef> scoper(array);
+  EXPECT_EQ(array, scoper.get());
+  EXPECT_EQ(1, CFGetRetainCount(array));
+}
+
+TEST(ScopedCFTypeRefTest, CopyConstructionSameType) {
+  ScopedCFTypeRef<CFMutableArrayRef> original(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+
+  ScopedCFTypeRef<CFMutableArrayRef> copy(original);
+  EXPECT_EQ(original.get(), copy.get());
+  EXPECT_EQ(2, CFGetRetainCount(original.get()));
+}
+
+TEST(ScopedCFTypeRefTest, CopyConstructionSubType) {
+  ScopedCFTypeRef<CFMutableArrayRef> original(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+
+  ScopedCFTypeRef<CFArrayRef> copy(original);
+  EXPECT_EQ(original.get(), copy.get());
+  EXPECT_EQ(2, CFGetRetainCount(original.get()));
+}
+
+TEST(ScopedCFTypeRefTest, CopyConstructionReturnSubType) {
+  auto subtype_returner = []() -> ScopedCFTypeRef<CFArrayRef> {
+    ScopedCFTypeRef<CFMutableArrayRef> original(
+        CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+    return original;
+  };
+  EXPECT_TRUE(subtype_returner());
+}
+
+TEST(ScopedCFTypeRefTest, CopyAssignmentSameType) {
+  ScopedCFTypeRef<CFMutableArrayRef> original(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+
+  ScopedCFTypeRef<CFMutableArrayRef> new_object(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  EXPECT_EQ(1, CFGetRetainCount(new_object.get()));
+
+  original = new_object;
+  EXPECT_EQ(original.get(), new_object.get());
+  EXPECT_EQ(2, CFGetRetainCount(original.get()));
+}
+
+TEST(ScopedCFTypeRefTest, CopyAssignmentSubType) {
+  ScopedCFTypeRef<CFArrayRef> original(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+
+  ScopedCFTypeRef<CFMutableArrayRef> new_object(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  EXPECT_EQ(1, CFGetRetainCount(new_object.get()));
+
+  original = new_object;
+  EXPECT_EQ(original.get(), new_object.get());
+  EXPECT_EQ(2, CFGetRetainCount(original.get()));
+}
+
+TEST(ScopedCFTypeRefTest, MoveConstructionSameType) {
+  ScopedCFTypeRef<CFMutableArrayRef> original(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  CFMutableArrayRef original_ref = original.get();
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+
+  ScopedCFTypeRef<CFMutableArrayRef> copy(std::move(original));
+  EXPECT_EQ(nullptr, original.get());
+  EXPECT_EQ(original_ref, copy.get());
+  EXPECT_EQ(1, CFGetRetainCount(copy.get()));
+}
+
+TEST(ScopedCFTypeRefTest, MoveConstructionSubType) {
+  ScopedCFTypeRef<CFMutableArrayRef> original(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  CFMutableArrayRef original_ref = original.get();
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+
+  ScopedCFTypeRef<CFArrayRef> copy(std::move(original));
+  EXPECT_EQ(nullptr, original.get());
+  EXPECT_EQ(original_ref, copy.get());
+  EXPECT_EQ(1, CFGetRetainCount(copy.get()));
+}
+
+class MoveConstructionReturnTest {
+ public:
+  MoveConstructionReturnTest()
+      : array_(CFArrayCreateMutable(nullptr,
+                                    /*capacity=*/0,
+                                    &kCFTypeArrayCallBacks)) {}
+
+  base::apple::ScopedCFTypeRef<CFMutableArrayRef> take_array() {
+    return std::move(array_);
+  }
+
+  bool has_array() { return array_.get() != nullptr; }
+
+ private:
+  base::apple::ScopedCFTypeRef<CFMutableArrayRef> array_;
+};
+
+TEST(ScopedCFTypeRefTest, MoveConstructionReturn) {
+  MoveConstructionReturnTest test;
+  ASSERT_TRUE(test.has_array());
+  ASSERT_TRUE(test.take_array());
+  ASSERT_FALSE(test.has_array());
+  ASSERT_FALSE(test.take_array());
+}
+
+TEST(ScopedCFTypeRefTest, MoveAssignmentSameType) {
+  ScopedCFTypeRef<CFMutableArrayRef> original(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+
+  ScopedCFTypeRef<CFMutableArrayRef> new_object(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  CFMutableArrayRef new_ref = new_object.get();
+  EXPECT_EQ(1, CFGetRetainCount(new_object.get()));
+
+  original = std::move(new_object);
+  EXPECT_EQ(nullptr, new_object.get());
+  EXPECT_EQ(new_ref, original.get());
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+}
+
+TEST(ScopedCFTypeRefTest, MoveAssignmentSubType) {
+  ScopedCFTypeRef<CFArrayRef> original(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+
+  ScopedCFTypeRef<CFMutableArrayRef> new_object(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  CFMutableArrayRef new_ref = new_object.get();
+  EXPECT_EQ(1, CFGetRetainCount(new_object.get()));
+
+  original = std::move(new_object);
+  EXPECT_EQ(nullptr, new_object.get());
+  EXPECT_EQ(new_ref, original.get());
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+}
+
+TEST(ScopedCFTypeRefTest, ResetSameType) {
+  CFMutableArrayRef array =
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks);
+  EXPECT_EQ(1, CFGetRetainCount(array));
+
+  ScopedCFTypeRef<CFMutableArrayRef> retain_scoper;
+  retain_scoper.reset(array, base::scoped_policy::RETAIN);
+  EXPECT_EQ(array, retain_scoper.get());
+  EXPECT_EQ(2, CFGetRetainCount(array));
+
+  ScopedCFTypeRef<CFMutableArrayRef> assume_scoper;
+  assume_scoper.reset(array, base::scoped_policy::ASSUME);
+  EXPECT_EQ(array, assume_scoper.get());
+  EXPECT_EQ(2, CFGetRetainCount(array));
+
+  CFMutableArrayRef array2 =
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks);
+  EXPECT_EQ(1, CFGetRetainCount(array2));
+  ScopedCFTypeRef<CFMutableArrayRef> assume_scoper2;
+  assume_scoper2.reset(array2 /* with implicit ASSUME */);
+  EXPECT_EQ(array2, assume_scoper2.get());
+  EXPECT_EQ(1, CFGetRetainCount(array2));
+}
+
+TEST(ScopedCFTypeRefTest, ResetSubType) {
+  CFMutableArrayRef array =
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks);
+  EXPECT_EQ(1, CFGetRetainCount(array));
+
+  ScopedCFTypeRef<CFArrayRef> scoper;
+  scoper.reset(array);
+  EXPECT_EQ(array, scoper.get());
+  EXPECT_EQ(1, CFGetRetainCount(array));
+}
+
+TEST(ScopedCFTypeRefTest, ResetFromScoperSameType) {
+  ScopedCFTypeRef<CFMutableArrayRef> original(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+
+  ScopedCFTypeRef<CFMutableArrayRef> copy;
+  copy.reset(original);
+  EXPECT_EQ(original.get(), copy.get());
+  EXPECT_EQ(2, CFGetRetainCount(original.get()));
+}
+
+TEST(ScopedCFTypeRefTest, ResetFromScoperSubType) {
+  ScopedCFTypeRef<CFMutableArrayRef> original(
+      CFArrayCreateMutable(nullptr, /*capacity=*/0, &kCFTypeArrayCallBacks));
+  EXPECT_EQ(1, CFGetRetainCount(original.get()));
+
+  ScopedCFTypeRef<CFArrayRef> copy;
+  copy.reset(original);
+  EXPECT_EQ(original.get(), copy.get());
+  EXPECT_EQ(2, CFGetRetainCount(original.get()));
+}
+
+}  // namespace
+}  // namespace base::apple
diff --git a/base/apple/scoped_dispatch_object.h b/base/apple/scoped_dispatch_object.h
new file mode 100644
index 0000000..a39b31a
--- /dev/null
+++ b/base/apple/scoped_dispatch_object.h
@@ -0,0 +1,42 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_SCOPED_DISPATCH_OBJECT_H_
+#define BASE_APPLE_SCOPED_DISPATCH_OBJECT_H_
+
+#include <dispatch/dispatch.h>
+
+#include "base/apple/scoped_typeref.h"
+
+#if __OBJC__
+// In Objective-C ARC, dispatch types are Objective-C types, and must be managed
+// as such with __strong, etc. This header file must not be included in
+// Objective-C code, nor may it be allowed to be recursively included. Use the
+// pimpl pattern to isolate its use in a pure C++ file if needed.
+#error Do not use this file, or allow it to be included, in Objective-C code.
+#endif
+
+namespace base::apple {
+
+namespace internal {
+
+template <typename T>
+struct ScopedDispatchObjectTraits {
+  static constexpr T InvalidValue() { return nullptr; }
+  static T Retain(T object) {
+    dispatch_retain(object);
+    return object;
+  }
+  static void Release(T object) { dispatch_release(object); }
+};
+
+}  // namespace internal
+
+template <typename T>
+using ScopedDispatchObject =
+    ScopedTypeRef<T, internal::ScopedDispatchObjectTraits<T>>;
+
+}  // namespace base::apple
+
+#endif  // BASE_APPLE_SCOPED_DISPATCH_OBJECT_H_
diff --git a/base/apple/scoped_mach_port.cc b/base/apple/scoped_mach_port.cc
new file mode 100644
index 0000000..c5bb5ca
--- /dev/null
+++ b/base/apple/scoped_mach_port.cc
@@ -0,0 +1,75 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/scoped_mach_port.h"
+
+#include "base/apple/mach_logging.h"
+
+namespace base::apple {
+namespace internal {
+
+// static
+void SendRightTraits::Free(mach_port_t port) {
+  kern_return_t kr = mach_port_deallocate(mach_task_self(), port);
+  MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
+      << "ScopedMachSendRight mach_port_deallocate";
+}
+
+// static
+void ReceiveRightTraits::Free(mach_port_t port) {
+  kern_return_t kr =
+      mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_RECEIVE, -1);
+  MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
+      << "ScopedMachReceiveRight mach_port_mod_refs";
+}
+
+// static
+void PortSetTraits::Free(mach_port_t port) {
+  kern_return_t kr =
+      mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_PORT_SET, -1);
+  MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
+      << "ScopedMachPortSet mach_port_mod_refs";
+}
+
+}  // namespace internal
+
+bool CreateMachPort(ScopedMachReceiveRight* receive,
+                    ScopedMachSendRight* send,
+                    absl::optional<mach_port_msgcount_t> queue_limit) {
+  mach_port_options_t options{};
+  options.flags = (send != nullptr ? MPO_INSERT_SEND_RIGHT : 0);
+
+  if (queue_limit.has_value()) {
+    options.flags |= MPO_QLIMIT;
+    options.mpl.mpl_qlimit = *queue_limit;
+  }
+
+  kern_return_t kr =
+      mach_port_construct(mach_task_self(), &options, 0,
+                          ScopedMachReceiveRight::Receiver(*receive).get());
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "mach_port_construct";
+    return false;
+  }
+
+  // Multiple rights are coalesced to the same name in a task, so assign the
+  // send rights to the same name.
+  if (send) {
+    send->reset(receive->get());
+  }
+
+  return true;
+}
+
+ScopedMachSendRight RetainMachSendRight(mach_port_t port) {
+  kern_return_t kr =
+      mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1);
+  if (kr == KERN_SUCCESS) {
+    return ScopedMachSendRight(port);
+  }
+  MACH_DLOG(ERROR, kr) << "mach_port_mod_refs +1";
+  return {};
+}
+
+}  // namespace base::apple
diff --git a/base/apple/scoped_mach_port.h b/base/apple/scoped_mach_port.h
new file mode 100644
index 0000000..6b3236e
--- /dev/null
+++ b/base/apple/scoped_mach_port.h
@@ -0,0 +1,79 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_SCOPED_MACH_PORT_H_
+#define BASE_APPLE_SCOPED_MACH_PORT_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/scoped_generic.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+namespace base::apple {
+
+namespace internal {
+
+struct BASE_EXPORT SendRightTraits {
+  static mach_port_t InvalidValue() {
+    return MACH_PORT_NULL;
+  }
+
+  BASE_EXPORT static void Free(mach_port_t port);
+};
+
+struct BASE_EXPORT ReceiveRightTraits {
+  static mach_port_t InvalidValue() {
+    return MACH_PORT_NULL;
+  }
+
+  BASE_EXPORT static void Free(mach_port_t port);
+};
+
+struct PortSetTraits {
+  static mach_port_t InvalidValue() {
+    return MACH_PORT_NULL;
+  }
+
+  BASE_EXPORT static void Free(mach_port_t port);
+};
+
+}  // namespace internal
+
+// A scoper for handling a Mach port that names a send right. Send rights are
+// reference counted, and this takes ownership of the right on construction
+// and then removes a reference to the right on destruction. If the reference
+// is the last one on the right, the right is deallocated.
+using ScopedMachSendRight =
+    ScopedGeneric<mach_port_t, internal::SendRightTraits>;
+
+// A scoper for handling a Mach port's receive right. There is only one
+// receive right per port. This takes ownership of the receive right on
+// construction and then destroys the right on destruction, turning all
+// outstanding send rights into dead names.
+using ScopedMachReceiveRight =
+    ScopedGeneric<mach_port_t, internal::ReceiveRightTraits>;
+
+// A scoper for handling a Mach port set. A port set can have only one
+// reference. This takes ownership of that single reference on construction and
+// destroys the port set on destruction. Destroying a port set does not destroy
+// the receive rights that are members of the port set.
+using ScopedMachPortSet = ScopedGeneric<mach_port_t, internal::PortSetTraits>;
+
+// Constructs a Mach port receive right and places the result in |receive|.
+// If |send| is non-null, a send right will be created as well and stored
+// there. If |queue_limit| is specified, the receive right will be constructed
+// with the specified mpo_qlmit. Returns true on success and false on failure.
+BASE_EXPORT bool CreateMachPort(
+    ScopedMachReceiveRight* receive,
+    ScopedMachSendRight* send,
+    absl::optional<mach_port_msgcount_t> queue_limit = absl::nullopt);
+
+// Increases the user reference count for MACH_PORT_RIGHT_SEND by 1 and returns
+// a new scoper to manage the additional right.
+BASE_EXPORT ScopedMachSendRight RetainMachSendRight(mach_port_t port);
+
+}  // namespace base::apple
+
+#endif  // BASE_APPLE_SCOPED_MACH_PORT_H_
diff --git a/base/apple/scoped_mach_vm.cc b/base/apple/scoped_mach_vm.cc
new file mode 100644
index 0000000..1d2a3c6
--- /dev/null
+++ b/base/apple/scoped_mach_vm.cc
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/scoped_mach_vm.h"
+
+#include "base/apple/mach_logging.h"
+
+namespace base::apple {
+
+void ScopedMachVM::reset(vm_address_t address, vm_size_t size) {
+  DCHECK_EQ(address % PAGE_SIZE, 0u);
+  DCHECK_EQ(size % PAGE_SIZE, 0u);
+  reset_unaligned(address, size);
+}
+
+void ScopedMachVM::reset_unaligned(vm_address_t address, vm_size_t size) {
+  if (size_) {
+    if (address_ < address) {
+      kern_return_t kr = vm_deallocate(mach_task_self(), address_,
+                                       std::min(size_, address - address_));
+      MACH_DCHECK(kr == KERN_SUCCESS, kr) << "vm_deallocate";
+    }
+    if (address_ + size_ > address + size) {
+      vm_address_t deallocate_start = std::max(address_, address + size);
+      kern_return_t kr = vm_deallocate(mach_task_self(), deallocate_start,
+                                       address_ + size_ - deallocate_start);
+      MACH_DCHECK(kr == KERN_SUCCESS, kr) << "vm_deallocate";
+    }
+  }
+
+  address_ = address;
+  size_ = size;
+}
+
+}  // namespace base::apple
diff --git a/base/apple/scoped_mach_vm.h b/base/apple/scoped_mach_vm.h
new file mode 100644
index 0000000..cb9765d
--- /dev/null
+++ b/base/apple/scoped_mach_vm.h
@@ -0,0 +1,100 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_SCOPED_MACH_VM_H_
+#define BASE_APPLE_SCOPED_MACH_VM_H_
+
+#include <mach/mach.h>
+#include <stddef.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/check_op.h"
+
+// Use ScopedMachVM to supervise ownership of pages in the current process
+// through the Mach VM subsystem. Pages allocated with vm_allocate can be
+// released when exiting a scope with ScopedMachVM.
+//
+// The Mach VM subsystem operates on a page-by-page basis, and a single VM
+// allocation managed by a ScopedMachVM object may span multiple pages. As far
+// as Mach is concerned, allocated pages may be deallocated individually. This
+// is in contrast to higher-level allocators such as malloc, where the base
+// address of an allocation implies the size of an allocated block.
+// Consequently, it is not sufficient to just pass the base address of an
+// allocation to ScopedMachVM, it also needs to know the size of the
+// allocation. To avoid any confusion, both the base address and size must
+// be page-aligned.
+//
+// When dealing with Mach VM, base addresses will naturally be page-aligned,
+// but user-specified sizes may not be. If there's a concern that a size is
+// not page-aligned, use the mach_vm_round_page macro to correct it.
+//
+// Example:
+//
+//   vm_address_t address = 0;
+//   vm_size_t size = 12345;  // This requested size is not page-aligned.
+//   kern_return_t kr =
+//       vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
+//   if (kr != KERN_SUCCESS) {
+//     return false;
+//   }
+//   ScopedMachVM vm_owner(address, mach_vm_round_page(size));
+
+namespace base::apple {
+
+class BASE_EXPORT ScopedMachVM {
+ public:
+  explicit ScopedMachVM(vm_address_t address = 0, vm_size_t size = 0)
+      : address_(address), size_(size) {
+    DCHECK_EQ(address % PAGE_SIZE, 0u);
+    DCHECK_EQ(size % PAGE_SIZE, 0u);
+  }
+
+  ScopedMachVM(const ScopedMachVM&) = delete;
+  ScopedMachVM& operator=(const ScopedMachVM&) = delete;
+
+  ~ScopedMachVM() {
+    if (size_) {
+      vm_deallocate(mach_task_self(), address_, size_);
+    }
+  }
+
+  // Resets the scoper to manage a new memory region. Both |address| and |size|
+  // must be page-aligned. If the new region is a smaller subset of the
+  // existing region (i.e. the new and old regions overlap), the non-
+  // overlapping part of the old region is deallocated.
+  void reset(vm_address_t address = 0, vm_size_t size = 0);
+
+  // Like reset() but does not DCHECK that |address| and |size| are page-
+  // aligned.
+  void reset_unaligned(vm_address_t address, vm_size_t size);
+
+  vm_address_t address() const {
+    return address_;
+  }
+
+  vm_size_t size() const {
+    return size_;
+  }
+
+  void swap(ScopedMachVM& that) {
+    std::swap(address_, that.address_);
+    std::swap(size_, that.size_);
+  }
+
+  void release() {
+    address_ = 0;
+    size_ = 0;
+  }
+
+ private:
+  vm_address_t address_;
+  vm_size_t size_;
+};
+
+}  // namespace base::apple
+
+#endif  // BASE_APPLE_SCOPED_MACH_VM_H_
diff --git a/base/apple/scoped_mach_vm_unittest.cc b/base/apple/scoped_mach_vm_unittest.cc
new file mode 100644
index 0000000..43858a7
--- /dev/null
+++ b/base/apple/scoped_mach_vm_unittest.cc
@@ -0,0 +1,242 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/scoped_mach_vm.h"
+
+#include <mach/mach.h>
+
+#include "base/memory/page_size.h"
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Note: This test CANNOT be run multiple times within the same process (e.g.
+// with --gtest_repeat). Allocating and deallocating in quick succession, even
+// with different sizes, will typically result in the kernel returning the same
+// address. If the allocation pattern is small->large->small, the second small
+// allocation will report being part of the previously-deallocated large region.
+// That will cause the GetRegionInfo() expectations to fail.
+
+namespace base::apple {
+namespace {
+
+void GetRegionInfo(vm_address_t* region_address, vm_size_t* region_size) {
+  vm_region_basic_info_64 region_info;
+  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+  mach_port_t object;
+  kern_return_t kr = vm_region_64(
+      mach_task_self(), region_address, region_size, VM_REGION_BASIC_INFO_64,
+      reinterpret_cast<vm_region_info_t>(&region_info), &count, &object);
+  EXPECT_EQ(KERN_SUCCESS, kr);
+}
+
+TEST(ScopedMachVMTest, Basic) {
+  vm_address_t address;
+  vm_size_t size = base::GetPageSize();
+  kern_return_t kr =
+      vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
+  ASSERT_EQ(KERN_SUCCESS, kr);
+
+  ScopedMachVM scoper(address, size);
+  EXPECT_EQ(address, scoper.address());
+  EXPECT_EQ(size, scoper.size());
+
+  // Test the initial region. In some cases on some platforms (macOS 13 on
+  // Intel, for example), Darwin may combine the requested allocation with
+  // an existing one. As a result, the allocated region may live in a
+  // larger region. Therefore, when we GetRegionInfo(), we want to check
+  // that our original region is a subset of (region_address, region_size)
+  // rather than being exactly equal to it.
+  vm_address_t region_address = address;
+  vm_size_t region_size;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_EQ(KERN_SUCCESS, kr);
+  EXPECT_GE(address, region_address);
+  EXPECT_LE(address + size, region_address + region_size);
+
+  {
+    ScopedMachVM scoper2;
+    EXPECT_EQ(0u, scoper2.address());
+    EXPECT_EQ(0u, scoper2.size());
+
+    scoper.swap(scoper2);
+
+    EXPECT_EQ(address, scoper2.address());
+    EXPECT_EQ(size, scoper2.size());
+
+    EXPECT_EQ(0u, scoper.address());
+    EXPECT_EQ(0u, scoper.size());
+  }
+
+  // After deallocation, the kernel will return the next highest address.
+  region_address = address;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_EQ(KERN_SUCCESS, kr);
+  EXPECT_LT(address, region_address);
+}
+
+TEST(ScopedMachVMTest, Reset) {
+  vm_address_t address;
+  vm_size_t size = base::GetPageSize();
+  kern_return_t kr =
+      vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
+  ASSERT_EQ(KERN_SUCCESS, kr);
+
+  ScopedMachVM scoper(address, size);
+
+  // Test the initial region.
+  vm_address_t region_address = address;
+  vm_size_t region_size;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_EQ(KERN_SUCCESS, kr);
+  EXPECT_GE(address, region_address);
+  EXPECT_LE(address + size, region_address + region_size);
+
+  scoper.reset();
+
+  // After deallocation, the kernel will return the next highest address.
+  region_address = address;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_EQ(KERN_SUCCESS, kr);
+  EXPECT_LT(address, region_address);
+}
+
+TEST(ScopedMachVMTest, ResetSmallerAddress) {
+  vm_address_t address;
+  vm_size_t size = 2 * base::GetPageSize();
+  kern_return_t kr =
+      vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
+  ASSERT_EQ(KERN_SUCCESS, kr);
+
+  ScopedMachVM scoper(address, base::GetPageSize());
+
+  // Test the initial region.
+  vm_address_t region_address = address;
+  vm_size_t region_size;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_EQ(KERN_SUCCESS, kr);
+  EXPECT_EQ(address, region_address);
+  EXPECT_EQ(2u * base::GetPageSize(), region_size);
+
+  // This will free address..base::GetPageSize() that is currently in the
+  // scoper.
+  scoper.reset(address + base::GetPageSize(), base::GetPageSize());
+
+  // Verify that the region is now only one page.
+  region_address = address;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_EQ(address + base::GetPageSize(), region_address);
+  EXPECT_EQ(1u * base::GetPageSize(), region_size);
+}
+
+TEST(ScopedMachVMTest, ResetLargerAddressAndSize) {
+  const vm_size_t kOnePage = base::GetPageSize();
+  const vm_size_t kTwoPages = 2 * kOnePage;
+  const vm_size_t kThreePages = 3 * kOnePage;
+
+  vm_address_t address;
+  kern_return_t kr =
+      vm_allocate(mach_task_self(), &address, kThreePages, VM_FLAGS_ANYWHERE);
+  ASSERT_EQ(KERN_SUCCESS, kr);
+
+  // Test the initial region.
+  vm_address_t region_address = address;
+  vm_size_t region_size;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_EQ(KERN_SUCCESS, kr);
+  EXPECT_GE(address, region_address);
+  EXPECT_LE(address + kThreePages, region_address + region_size);
+
+  ScopedMachVM scoper(address + kTwoPages, kOnePage);
+  // Expand the region to be larger.
+  scoper.reset(address, kThreePages);
+
+  // Verify that the region is still three pages.
+  region_address = address;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_GE(address, region_address);
+  EXPECT_LE(address + kThreePages, region_address + region_size);
+}
+
+TEST(ScopedMachVMTest, ResetLargerAddress) {
+  const vm_size_t kThreePages = 3 * base::GetPageSize();
+  const vm_size_t kSixPages = 2 * kThreePages;
+
+  vm_address_t address;
+  kern_return_t kr =
+      vm_allocate(mach_task_self(), &address, kSixPages, VM_FLAGS_ANYWHERE);
+  ASSERT_EQ(KERN_SUCCESS, kr);
+
+  // Test the initial region.
+  vm_address_t region_address = address;
+  vm_size_t region_size;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_EQ(KERN_SUCCESS, kr);
+  EXPECT_GE(address, region_address);
+  EXPECT_LE(address + kSixPages, region_address + region_size);
+
+  ScopedMachVM scoper(address + kThreePages, kThreePages);
+
+  // Shift the region by three pages; the last three pages should be
+  // deallocated, while keeping the first three.
+  scoper.reset(address, kThreePages);
+
+  // Verify that the region is just three pages.
+  region_address = address;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_GE(address, region_address);
+  EXPECT_LE(address + kThreePages, region_address + region_size);
+}
+
+TEST(ScopedMachVMTest, ResetUnaligned) {
+  const vm_size_t kOnePage = base::GetPageSize();
+  const vm_size_t kTwoPages = 2 * kOnePage;
+
+  vm_address_t address;
+  kern_return_t kr =
+      vm_allocate(mach_task_self(), &address, kTwoPages, VM_FLAGS_ANYWHERE);
+  ASSERT_EQ(KERN_SUCCESS, kr);
+
+  ScopedMachVM scoper;
+
+  // Test the initial region.
+  vm_address_t region_address = address;
+  vm_size_t region_size;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_GE(address, region_address);
+  EXPECT_LE(address + kTwoPages, region_address + region_size);
+
+  // Initialize with unaligned size.
+  scoper.reset_unaligned(address + kOnePage, kOnePage - 3);
+  // Reset with another unaligned size.
+  scoper.reset_unaligned(address + kOnePage, kOnePage - 11);
+
+  // The entire unaligned page gets deallocated.
+  region_address = address;
+  GetRegionInfo(&region_address, &region_size);
+  EXPECT_GE(address, region_address);
+  EXPECT_LE(address + kOnePage, region_address + region_size);
+
+  // Reset with the remaining page.
+  scoper.reset_unaligned(address, base::GetPageSize());
+}
+
+#if DCHECK_IS_ON()
+
+TEST(ScopedMachVMTest, ResetMustBeAligned) {
+  const vm_size_t kOnePage = base::GetPageSize();
+  const vm_size_t kTwoPages = 2 * kOnePage;
+
+  vm_address_t address;
+  kern_return_t kr =
+      vm_allocate(mach_task_self(), &address, kTwoPages, VM_FLAGS_ANYWHERE);
+  ASSERT_EQ(KERN_SUCCESS, kr);
+
+  ScopedMachVM scoper;
+  EXPECT_DCHECK_DEATH(scoper.reset(address, kOnePage + 1));
+}
+
+#endif  // DCHECK_IS_ON()
+
+}  // namespace
+}  // namespace base::apple
diff --git a/base/apple/scoped_nsautorelease_pool.h b/base/apple/scoped_nsautorelease_pool.h
new file mode 100644
index 0000000..056668b
--- /dev/null
+++ b/base/apple/scoped_nsautorelease_pool.h
@@ -0,0 +1,67 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_SCOPED_NSAUTORELEASE_POOL_H_
+#define BASE_APPLE_SCOPED_NSAUTORELEASE_POOL_H_
+
+#include "base/base_export.h"
+#include "base/dcheck_is_on.h"
+#include "base/memory/raw_ptr_exclusion.h"
+#include "base/memory/stack_allocated.h"
+#include "base/thread_annotations.h"
+#include "base/threading/thread_checker.h"
+
+namespace base::apple {
+
+// ScopedNSAutoreleasePool creates an autorelease pool when instantiated and
+// pops it when destroyed.  This allows an autorelease pool to be maintained in
+// ordinary C++ code without bringing in any direct Objective-C dependency.
+//
+// Before using, please be aware that the semantics of autorelease pools do not
+// match the semantics of a C++ class. In particular, recycling or destructing a
+// pool lower on the stack destroys all pools higher on the stack, which does
+// not mesh well with the existence of C++ objects for each pool.
+//
+// Use this class only in C++ code; use @autoreleasepool in Obj-C(++) code.
+
+class BASE_EXPORT ScopedNSAutoreleasePool {
+  STACK_ALLOCATED();
+
+ public:
+  ScopedNSAutoreleasePool();
+
+  ScopedNSAutoreleasePool(const ScopedNSAutoreleasePool&) = delete;
+  ScopedNSAutoreleasePool& operator=(const ScopedNSAutoreleasePool&) = delete;
+  ScopedNSAutoreleasePool(ScopedNSAutoreleasePool&&) = delete;
+  ScopedNSAutoreleasePool& operator=(ScopedNSAutoreleasePool&&) = delete;
+
+  ~ScopedNSAutoreleasePool();
+
+  // Clear out the pool in case its position on the stack causes it to be alive
+  // for long periods of time (such as the entire length of the app). Only use
+  // then when you're certain the items currently in the pool are no longer
+  // needed.
+  void Recycle();
+
+ private:
+  // Pushes the autorelease pool and does all required verification.
+  void PushImpl() VALID_CONTEXT_REQUIRED(thread_checker_);
+
+  // Pops the autorelease pool and does all required verification.
+  void PopImpl() VALID_CONTEXT_REQUIRED(thread_checker_);
+
+  // This field is not a raw_ptr<> because it is a pointer to an Objective-C
+  // object.
+  RAW_PTR_EXCLUSION void* autorelease_pool_ GUARDED_BY_CONTEXT(thread_checker_);
+
+  THREAD_CHECKER(thread_checker_);
+
+#if DCHECK_IS_ON()
+  unsigned long level_ = 0;
+#endif
+};
+
+}  // namespace base::apple
+
+#endif  // BASE_APPLE_SCOPED_NSAUTORELEASE_POOL_H_
diff --git a/base/apple/scoped_nsautorelease_pool.mm b/base/apple/scoped_nsautorelease_pool.mm
new file mode 100644
index 0000000..f3a71c6
--- /dev/null
+++ b/base/apple/scoped_nsautorelease_pool.mm
@@ -0,0 +1,140 @@
+// Copyright 2010 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/apple/scoped_nsautorelease_pool.h"
+
+#include "base/dcheck_is_on.h"
+
+#if DCHECK_IS_ON()
+#import <Foundation/Foundation.h>
+
+#include "base/debug/crash_logging.h"
+#include "base/debug/stack_trace.h"
+#include "base/immediate_crash.h"
+#include "base/strings/sys_string_conversions.h"
+#endif
+
+// Note that this uses the direct runtime interface to the autorelease pool.
+// https://clang.llvm.org/docs/AutomaticReferenceCounting.html#runtime-support
+// This is so this can work when compiled for ARC.
+extern "C" {
+void* objc_autoreleasePoolPush(void);
+void objc_autoreleasePoolPop(void* pool);
+}
+
+namespace base::apple {
+
+#if DCHECK_IS_ON()
+namespace {
+
+using BlockReturningStackTrace = debug::StackTrace (^)();
+
+// Because //base is not allowed to define Objective-C classes, which would be
+// the most reasonable way to wrap a C++ object like base::debug::StackTrace, do
+// it in a much more absurd, yet not completely unreasonable, way.
+//
+// This uses a default argument for the stack trace so that the creation of the
+// stack trace is attributed to the parent function.
+BlockReturningStackTrace MakeBlockReturningStackTrace(
+    debug::StackTrace stack_trace = debug::StackTrace()) {
+  // Return a block that references the stack trace. That will cause a copy of
+  // the stack trace to be made by the block, and because blocks are effectively
+  // Objective-C objects, they can be used in the NSThread thread dictionary.
+  return ^() {
+    return stack_trace;
+  };
+}
+
+// For each NSThread, maintain an array of stack traces, one for the state of
+// the stack for each invocation of an autorelease pool push. Even though one is
+// allowed to clear out an entire stack of autorelease pools by releasing one
+// near the bottom, because the stack abstraction is mapped to C++ classes, this
+// cannot be allowed.
+NSMutableArray<BlockReturningStackTrace>* GetLevelStackTraces() {
+  NSMutableArray* traces =
+      NSThread.currentThread
+          .threadDictionary[@"CrScopedNSAutoreleasePoolTraces"];
+  if (traces) {
+    return traces;
+  }
+
+  traces = [NSMutableArray array];
+  NSThread.currentThread.threadDictionary[@"CrScopedNSAutoreleasePoolTraces"] =
+      traces;
+  return traces;
+}
+
+}  // namespace
+#endif
+
+ScopedNSAutoreleasePool::ScopedNSAutoreleasePool() {
+  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+  PushImpl();
+}
+
+ScopedNSAutoreleasePool::~ScopedNSAutoreleasePool() {
+  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+  PopImpl();
+}
+
+void ScopedNSAutoreleasePool::Recycle() {
+  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+  // Cycle the internal pool, allowing everything there to get cleaned up and
+  // start anew.
+  PopImpl();
+  PushImpl();
+}
+
+void ScopedNSAutoreleasePool::PushImpl() {
+#if DCHECK_IS_ON()
+  [GetLevelStackTraces() addObject:MakeBlockReturningStackTrace()];
+  level_ = GetLevelStackTraces().count;
+#endif
+  autorelease_pool_ = objc_autoreleasePoolPush();
+}
+
+void ScopedNSAutoreleasePool::PopImpl() {
+#if DCHECK_IS_ON()
+  auto level_count = GetLevelStackTraces().count;
+  if (level_ != level_count) {
+    NSLog(@"Popping autorelease pool at level %lu while pools exist through "
+          @"level %lu",
+          level_, level_count);
+    if (level_ < level_count) {
+      NSLog(@"WARNING: This abandons ScopedNSAutoreleasePool objects which now "
+            @"have no corresponding implementation.");
+    } else {
+      NSLog(@"ERROR: This is an abandoned ScopedNSAutoreleasePool that cannot "
+            @"release; expect the autorelease machinery to crash.");
+    }
+    NSLog(@"====================");
+    NSString* current_stack = SysUTF8ToNSString(debug::StackTrace().ToString());
+    NSLog(@"Pop:\n%@", current_stack);
+    [GetLevelStackTraces()
+        enumerateObjectsWithOptions:NSEnumerationReverse
+                         usingBlock:^(BlockReturningStackTrace obj,
+                                      NSUInteger idx, BOOL* stop) {
+                           NSLog(@"====================");
+                           NSLog(@"Autorelease pool level %lu was pushed:\n%@",
+                                 idx + 1, SysUTF8ToNSString(obj().ToString()));
+                         }];
+    // Assume an interactive use of Chromium where crashing immediately is
+    // desirable, and die. When investigating a failing automated test that dies
+    // here, remove these crash keys and call to ImmediateCrash() to reveal
+    // where the abandoned ScopedNSAutoreleasePool was expected to be released.
+    SCOPED_CRASH_KEY_NUMBER("ScopedNSAutoreleasePool", "currentlevel", level_);
+    SCOPED_CRASH_KEY_NUMBER("ScopedNSAutoreleasePool", "levelcount",
+                            level_count);
+    SCOPED_CRASH_KEY_STRING1024("ScopedNSAutoreleasePool", "currentstack",
+                                SysNSStringToUTF8(current_stack));
+    SCOPED_CRASH_KEY_STRING1024("ScopedNSAutoreleasePool", "recentstack",
+                                GetLevelStackTraces().lastObject().ToString());
+    ImmediateCrash();
+  }
+  [GetLevelStackTraces() removeLastObject];
+#endif
+  objc_autoreleasePoolPop(autorelease_pool_);
+}
+
+}  // namespace base::apple
diff --git a/base/apple/scoped_nsautorelease_pool_unittest.mm b/base/apple/scoped_nsautorelease_pool_unittest.mm
new file mode 100644
index 0000000..8bd1120
--- /dev/null
+++ b/base/apple/scoped_nsautorelease_pool_unittest.mm
@@ -0,0 +1,26 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/apple/scoped_nsautorelease_pool.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+namespace base::apple {
+
+#if DCHECK_IS_ON()
+TEST(ScopedNSAutoreleasePoolTest, DieOutOfOrder) {
+  absl::optional<ScopedNSAutoreleasePool> pool1;
+  absl::optional<ScopedNSAutoreleasePool> pool2;
+
+  // Instantiate the pools in the order 1, then 2.
+  pool1.emplace();
+  pool2.emplace();
+
+  // Destroy in the wrong order; ensure death.
+  ASSERT_DEATH(pool1.reset(), "autorelease");
+}
+#endif
+
+}  // namespace base::apple
diff --git a/base/apple/scoped_nsobject.h b/base/apple/scoped_nsobject.h
new file mode 100644
index 0000000..10d25b6
--- /dev/null
+++ b/base/apple/scoped_nsobject.h
@@ -0,0 +1,102 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_SCOPED_NSOBJECT_H_
+#define BASE_APPLE_SCOPED_NSOBJECT_H_
+
+// Include NSObject.h directly because Foundation.h pulls in many dependencies.
+// (Approx 100k lines of code versus 1.5k for NSObject.h). scoped_nsobject gets
+// singled out because it is most typically included from other header files.
+#import <Foundation/NSObject.h>
+
+#include <type_traits>
+
+#include "base/apple/scoped_typeref.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+#if defined(__has_feature) && __has_feature(objc_arc)
+#error "Do not use scoped_nsobject in ARC code; use __strong instead."
+#endif
+
+@class NSAutoreleasePool;
+
+namespace base::apple {
+
+// scoped_nsobject<> is patterned after std::unique_ptr<>, but maintains
+// ownership of an NSObject subclass object.  Style deviations here are solely
+// for compatibility with std::unique_ptr<>'s interface, with which everyone is
+// already familiar.
+//
+// scoped_nsobject<> takes ownership of an object (in the constructor or in
+// reset()) by taking over the caller's existing ownership claim.  The caller
+// must own the object it gives to scoped_nsobject<>, and relinquishes an
+// ownership claim to that object.  scoped_nsobject<> does not call -retain,
+// callers have to call this manually if appropriate.
+//
+// scoped_nsprotocol<> has the same behavior as scoped_nsobject, but can be used
+// with protocols.
+//
+// scoped_nsobject<> is not to be used for NSAutoreleasePools. For C++ code use
+// NSAutoreleasePool; for Objective-C(++) code use @autoreleasepool instead. We
+// check for bad uses of scoped_nsobject and NSAutoreleasePool at compile time
+// with a template specialization (see below).
+
+namespace internal {
+
+template <typename NST>
+struct ScopedNSProtocolTraits {
+  static NST InvalidValue() { return nil; }
+  static NST Retain(NST nst) { return [nst retain]; }
+  static void Release(NST nst) { [nst release]; }
+};
+
+}  // namespace internal
+
+template <typename NST>
+class scoped_nsprotocol
+    : public ScopedTypeRef<NST, internal::ScopedNSProtocolTraits<NST>> {
+ public:
+  using ScopedTypeRef<NST,
+                      internal::ScopedNSProtocolTraits<NST>>::ScopedTypeRef;
+
+  // Shift reference to the autorelease pool to be released later.
+  NST autorelease() { return [this->release() autorelease]; }
+};
+
+// Free functions
+template <class C>
+void swap(scoped_nsprotocol<C>& p1, scoped_nsprotocol<C>& p2) {
+  p1.swap(p2);
+}
+
+template <class C>
+bool operator==(C p1, const scoped_nsprotocol<C>& p2) {
+  return p1 == p2.get();
+}
+
+template <class C>
+bool operator!=(C p1, const scoped_nsprotocol<C>& p2) {
+  return p1 != p2.get();
+}
+
+template <typename NST>
+class scoped_nsobject : public scoped_nsprotocol<NST*> {
+ public:
+  using scoped_nsprotocol<NST*>::scoped_nsprotocol;
+
+  static_assert(std::is_same_v<NST, NSAutoreleasePool> == false,
+                "Use @autoreleasepool instead");
+};
+
+// Specialization to make scoped_nsobject<id> work.
+template <>
+class scoped_nsobject<id> : public scoped_nsprotocol<id> {
+ public:
+  using scoped_nsprotocol<id>::scoped_nsprotocol;
+};
+
+}  // namespace base::apple
+
+#endif  // BASE_APPLE_SCOPED_NSOBJECT_H_
diff --git a/base/apple/scoped_nsobject_unittest.mm b/base/apple/scoped_nsobject_unittest.mm
new file mode 100644
index 0000000..a369fbc
--- /dev/null
+++ b/base/apple/scoped_nsobject_unittest.mm
@@ -0,0 +1,123 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/apple/scoped_nsobject.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(__has_feature) && __has_feature(objc_arc)
+#error "This file must not be compiled with ARC."
+#endif
+
+namespace {
+
+TEST(ScopedNSObjectTest, ScopedNSObject) {
+  base::apple::scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+  ASSERT_TRUE(p1.get());
+  ASSERT_EQ(1u, [p1.get() retainCount]);
+  base::apple::scoped_nsobject<NSObject> p2(p1);
+  ASSERT_EQ(p1.get(), p2.get());
+  ASSERT_EQ(2u, [p1.get() retainCount]);
+  p2.reset();
+  ASSERT_EQ(nil, p2.get());
+  ASSERT_EQ(1u, [p1.get() retainCount]);
+  {
+    base::apple::scoped_nsobject<NSObject> p3 = p1;
+    ASSERT_EQ(p1.get(), p3.get());
+    ASSERT_EQ(2u, [p1.get() retainCount]);
+    @autoreleasepool {
+      p3 = p1;
+    }
+    ASSERT_EQ(p1.get(), p3.get());
+    ASSERT_EQ(2u, [p1.get() retainCount]);
+  }
+  ASSERT_EQ(1u, [p1.get() retainCount]);
+  base::apple::scoped_nsobject<NSObject> p4([p1.get() retain]);
+  ASSERT_EQ(2u, [p1.get() retainCount]);
+  ASSERT_TRUE(p1 == p1.get());
+  ASSERT_TRUE(p1 == p1);
+  ASSERT_FALSE(p1 != p1);
+  ASSERT_FALSE(p1 != p1.get());
+  base::apple::scoped_nsobject<NSObject> p5([[NSObject alloc] init]);
+  ASSERT_TRUE(p1 != p5);
+  ASSERT_TRUE(p1 != p5.get());
+  ASSERT_FALSE(p1 == p5);
+  ASSERT_FALSE(p1 == p5.get());
+
+  base::apple::scoped_nsobject<NSObject> p6 = p1;
+  ASSERT_EQ(3u, [p6.get() retainCount]);
+  @autoreleasepool {
+    p6.autorelease();
+    ASSERT_EQ(nil, p6.get());
+    ASSERT_EQ(3u, [p1.get() retainCount]);
+  }
+  ASSERT_EQ(2u, [p1.get() retainCount]);
+
+  base::apple::scoped_nsobject<NSObject> p7([NSObject new]);
+  base::apple::scoped_nsobject<NSObject> p8(std::move(p7));
+  ASSERT_TRUE(p8);
+  ASSERT_EQ(1u, [p8.get() retainCount]);
+  ASSERT_FALSE(p7.get());
+}
+
+// Instantiating scoped_nsobject<> with T=NSAutoreleasePool should trip a
+// static_assert.
+#if 0
+TEST(ScopedNSObjectTest, FailToCreateScopedNSObjectAutoreleasePool) {
+  base::apple::scoped_nsobject<NSAutoreleasePool> pool;
+}
+#endif
+
+TEST(ScopedNSObjectTest, ScopedNSObjectInContainer) {
+  base::apple::scoped_nsobject<id> p([[NSObject alloc] init]);
+  ASSERT_TRUE(p.get());
+  ASSERT_EQ(1u, [p.get() retainCount]);
+  {
+    std::vector<base::apple::scoped_nsobject<id>> objects;
+    objects.push_back(p);
+    ASSERT_EQ(2u, [p.get() retainCount]);
+    ASSERT_EQ(p.get(), objects[0].get());
+    objects.push_back(
+        base::apple::scoped_nsobject<id>([[NSObject alloc] init]));
+    ASSERT_TRUE(objects[1].get());
+    ASSERT_EQ(1u, [objects[1].get() retainCount]);
+  }
+  ASSERT_EQ(1u, [p.get() retainCount]);
+}
+
+TEST(ScopedNSObjectTest, ScopedNSObjectFreeFunctions) {
+  base::apple::scoped_nsobject<id> p1([[NSObject alloc] init]);
+  id o1 = p1.get();
+  ASSERT_TRUE(o1 == p1);
+  ASSERT_FALSE(o1 != p1);
+  base::apple::scoped_nsobject<id> p2([[NSObject alloc] init]);
+  ASSERT_TRUE(o1 != p2);
+  ASSERT_FALSE(o1 == p2);
+  id o2 = p2.get();
+  swap(p1, p2);
+  ASSERT_EQ(o2, p1.get());
+  ASSERT_EQ(o1, p2.get());
+}
+
+TEST(ScopedNSObjectTest, ResetWithAnotherScopedNSObject) {
+  base::apple::scoped_nsobject<id> p1([[NSObject alloc] init]);
+  id o1 = p1.get();
+
+  id o2 = nil;
+  {
+    base::apple::scoped_nsobject<id> p2([[NSObject alloc] init]);
+    o2 = p2.get();
+    p1.reset(p2);
+    EXPECT_EQ(2u, [p1.get() retainCount]);
+  }
+
+  EXPECT_NE(o1, p1.get());
+  EXPECT_EQ(o2, p1.get());
+  EXPECT_NE(p1.get(), nil);
+
+  EXPECT_EQ(1u, [p1.get() retainCount]);
+}
+
+}  // namespace
diff --git a/base/apple/scoped_objc_class_swizzler.h b/base/apple/scoped_objc_class_swizzler.h
new file mode 100644
index 0000000..279e59f
--- /dev/null
+++ b/base/apple/scoped_objc_class_swizzler.h
@@ -0,0 +1,57 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_SCOPED_OBJC_CLASS_SWIZZLER_H_
+#define BASE_APPLE_SCOPED_OBJC_CLASS_SWIZZLER_H_
+
+#import <objc/runtime.h>
+
+#include "base/base_export.h"
+
+namespace base::apple {
+
+// Within a given scope, swaps method implementations of a class interface, or
+// between two class interfaces. The argument and return types must match.
+class BASE_EXPORT ScopedObjCClassSwizzler {
+ public:
+  // Given two classes that each respond to |selector|, swap the implementations
+  // of those methods.
+  ScopedObjCClassSwizzler(Class target, Class source, SEL selector);
+
+  // Given two selectors on the same class interface, |target| (e.g. via
+  // inheritance or categories), swap the implementations of methods |original|
+  // and |alternate|.
+  ScopedObjCClassSwizzler(Class target, SEL original, SEL alternate);
+
+  ScopedObjCClassSwizzler(const ScopedObjCClassSwizzler&) = delete;
+  ScopedObjCClassSwizzler& operator=(const ScopedObjCClassSwizzler&) = delete;
+
+  ~ScopedObjCClassSwizzler();
+
+  // Return a callable function pointer for the replaced method. To call this
+  // from the replacing function, the first two arguments should be |self| and
+  // |_cmd|. These are followed by the (variadic) method arguments.
+  IMP GetOriginalImplementation() const;
+
+  // Invoke the original function directly, optionally with some arguments.
+  // Prefer this to hanging onto pointers to the original implementation
+  // function or to casting the result of GetOriginalImplementation() yourself.
+  template <typename Ret, typename... Args>
+  Ret InvokeOriginal(id receiver, SEL selector, Args... args) const {
+    auto func = reinterpret_cast<Ret (*)(id, SEL, Args...)>(
+        GetOriginalImplementation());
+    return func(receiver, selector, args...);
+  }
+
+ private:
+  // Delegated constructor.
+  void Init(Class target, Class source, SEL original, SEL alternate);
+
+  Method old_selector_impl_;
+  Method new_selector_impl_;
+};
+
+}  // namespace base::apple
+
+#endif  // BASE_APPLE_SCOPED_OBJC_CLASS_SWIZZLER_H_
diff --git a/base/apple/scoped_objc_class_swizzler.mm b/base/apple/scoped_objc_class_swizzler.mm
new file mode 100644
index 0000000..d06b43a
--- /dev/null
+++ b/base/apple/scoped_objc_class_swizzler.mm
@@ -0,0 +1,71 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/apple/scoped_objc_class_swizzler.h"
+
+#include <string.h>
+
+#include "base/check_op.h"
+
+namespace base::apple {
+
+ScopedObjCClassSwizzler::ScopedObjCClassSwizzler(Class target,
+                                                 Class source,
+                                                 SEL selector)
+    : old_selector_impl_(nullptr), new_selector_impl_(nullptr) {
+  Init(target, source, selector, selector);
+}
+
+ScopedObjCClassSwizzler::ScopedObjCClassSwizzler(Class target,
+                                                 SEL original,
+                                                 SEL alternate)
+    : old_selector_impl_(nullptr), new_selector_impl_(nullptr) {
+  Init(target, target, original, alternate);
+}
+
+ScopedObjCClassSwizzler::~ScopedObjCClassSwizzler() {
+  if (old_selector_impl_ && new_selector_impl_) {
+    method_exchangeImplementations(old_selector_impl_, new_selector_impl_);
+  }
+}
+
+IMP ScopedObjCClassSwizzler::GetOriginalImplementation() const {
+  // Note that while the swizzle is in effect the "new" method is actually
+  // pointing to the original implementation, since they have been swapped.
+  return method_getImplementation(new_selector_impl_);
+}
+
+void ScopedObjCClassSwizzler::Init(Class target,
+                                   Class source,
+                                   SEL original,
+                                   SEL alternate) {
+  old_selector_impl_ = class_getInstanceMethod(target, original);
+  new_selector_impl_ = class_getInstanceMethod(source, alternate);
+  if (!old_selector_impl_ && !new_selector_impl_) {
+    // Try class methods.
+    old_selector_impl_ = class_getClassMethod(target, original);
+    new_selector_impl_ = class_getClassMethod(source, alternate);
+  }
+
+  DCHECK(old_selector_impl_);
+  DCHECK(new_selector_impl_);
+  if (!old_selector_impl_ || !new_selector_impl_) {
+    return;
+  }
+
+  // The argument and return types must match exactly.
+  const char* old_types = method_getTypeEncoding(old_selector_impl_);
+  const char* new_types = method_getTypeEncoding(new_selector_impl_);
+  DCHECK(old_types);
+  DCHECK(new_types);
+  DCHECK_EQ(0, strcmp(old_types, new_types));
+  if (!old_types || !new_types || strcmp(old_types, new_types)) {
+    old_selector_impl_ = new_selector_impl_ = nullptr;
+    return;
+  }
+
+  method_exchangeImplementations(old_selector_impl_, new_selector_impl_);
+}
+
+}  // namespace base::apple
diff --git a/base/apple/scoped_objc_class_swizzler_unittest.mm b/base/apple/scoped_objc_class_swizzler_unittest.mm
new file mode 100644
index 0000000..9a5fdc9
--- /dev/null
+++ b/base/apple/scoped_objc_class_swizzler_unittest.mm
@@ -0,0 +1,154 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/apple/scoped_objc_class_swizzler.h"
+
+#import <Foundation/Foundation.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+@interface ObjCClassSwizzlerTestOne : NSObject
++ (NSInteger)function;
+- (NSInteger)method;
+- (NSInteger)modifier;
+@end
+
+@interface ObjCClassSwizzlerTestTwo : NSObject
++ (NSInteger)function;
+- (NSInteger)method;
+- (NSInteger)modifier;
+@end
+
+@implementation ObjCClassSwizzlerTestOne : NSObject
+
++ (NSInteger)function {
+  return 10;
+}
+
+- (NSInteger)method {
+  // Multiply by a modifier to ensure |self| in a swizzled implementation
+  // refers to the original object.
+  return 1 * [self modifier];
+}
+
+- (NSInteger)modifier {
+  return 3;
+}
+
+@end
+
+@implementation ObjCClassSwizzlerTestTwo : NSObject
+
++ (NSInteger)function {
+  return 20;
+}
+
+- (NSInteger)method {
+  return 2 * [self modifier];
+}
+
+- (NSInteger)modifier {
+  return 7;
+}
+
+@end
+
+@interface ObjCClassSwizzlerTestOne (AlternateCategory)
+- (NSInteger)alternate;
+@end
+
+@implementation ObjCClassSwizzlerTestOne (AlternateCategory)
+- (NSInteger)alternate {
+  return 3 * [self modifier];
+}
+@end
+
+@interface ObjCClassSwizzlerTestOneChild : ObjCClassSwizzlerTestOne
+- (NSInteger)childAlternate;
+@end
+
+@implementation ObjCClassSwizzlerTestOneChild
+- (NSInteger)childAlternate {
+  return 5 * [self modifier];
+}
+@end
+
+namespace base::apple {
+
+TEST(ObjCClassSwizzlerTest, SwizzleInstanceMethods) {
+  ObjCClassSwizzlerTestOne* object_one =
+      [[ObjCClassSwizzlerTestOne alloc] init];
+  ObjCClassSwizzlerTestTwo* object_two =
+      [[ObjCClassSwizzlerTestTwo alloc] init];
+  EXPECT_EQ(3, [object_one method]);
+  EXPECT_EQ(14, [object_two method]);
+
+  {
+    base::apple::ScopedObjCClassSwizzler swizzler(
+        [ObjCClassSwizzlerTestOne class], [ObjCClassSwizzlerTestTwo class],
+        @selector(method));
+    EXPECT_EQ(6, [object_one method]);
+    EXPECT_EQ(7, [object_two method]);
+
+    EXPECT_EQ(3, swizzler.InvokeOriginal<int>(object_one, @selector(method)));
+  }
+
+  EXPECT_EQ(3, [object_one method]);
+  EXPECT_EQ(14, [object_two method]);
+}
+
+TEST(ObjCClassSwizzlerTest, SwizzleClassMethods) {
+  EXPECT_EQ(10, [ObjCClassSwizzlerTestOne function]);
+  EXPECT_EQ(20, [ObjCClassSwizzlerTestTwo function]);
+
+  {
+    base::apple::ScopedObjCClassSwizzler swizzler(
+        [ObjCClassSwizzlerTestOne class], [ObjCClassSwizzlerTestTwo class],
+        @selector(function));
+    EXPECT_EQ(20, [ObjCClassSwizzlerTestOne function]);
+    EXPECT_EQ(10, [ObjCClassSwizzlerTestTwo function]);
+
+    EXPECT_EQ(10, swizzler.InvokeOriginal<int>([ObjCClassSwizzlerTestOne class],
+                                               @selector(function)));
+  }
+
+  EXPECT_EQ(10, [ObjCClassSwizzlerTestOne function]);
+  EXPECT_EQ(20, [ObjCClassSwizzlerTestTwo function]);
+}
+
+TEST(ObjCClassSwizzlerTest, SwizzleViaCategory) {
+  ObjCClassSwizzlerTestOne* object_one =
+      [[ObjCClassSwizzlerTestOne alloc] init];
+  EXPECT_EQ(3, [object_one method]);
+
+  {
+    base::apple::ScopedObjCClassSwizzler swizzler(
+        [ObjCClassSwizzlerTestOne class], @selector(method),
+        @selector(alternate));
+    EXPECT_EQ(9, [object_one method]);
+
+    EXPECT_EQ(3, swizzler.InvokeOriginal<int>(object_one, @selector(method)));
+  }
+
+  EXPECT_EQ(3, [object_one method]);
+}
+
+TEST(ObjCClassSwizzlerTest, SwizzleViaInheritance) {
+  ObjCClassSwizzlerTestOneChild* child =
+      [[ObjCClassSwizzlerTestOneChild alloc] init];
+  EXPECT_EQ(3, [child method]);
+
+  {
+    base::apple::ScopedObjCClassSwizzler swizzler(
+        [ObjCClassSwizzlerTestOneChild class], @selector(method),
+        @selector(childAlternate));
+    EXPECT_EQ(15, [child method]);
+
+    EXPECT_EQ(3, swizzler.InvokeOriginal<int>(child, @selector(method)));
+  }
+
+  EXPECT_EQ(3, [child method]);
+}
+
+}  // namespace base::apple
diff --git a/base/apple/scoped_typeref.h b/base/apple/scoped_typeref.h
new file mode 100644
index 0000000..7c2ad41
--- /dev/null
+++ b/base/apple/scoped_typeref.h
@@ -0,0 +1,196 @@
+// Copyright 2014 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_APPLE_SCOPED_TYPEREF_H_
+#define BASE_APPLE_SCOPED_TYPEREF_H_
+
+#include "base/check_op.h"
+#include "base/memory/scoped_policy.h"
+
+namespace base::apple {
+
+// ScopedTypeRef<> is patterned after std::shared_ptr<>, but maintains ownership
+// of a reference to any type that is maintained by Retain and Release methods.
+//
+// The Traits structure must provide the Retain and Release methods for type T.
+// A default ScopedTypeRefTraits is used but not defined, and should be defined
+// for each type to use this interface. For example, an appropriate definition
+// of ScopedTypeRefTraits for CGLContextObj would be:
+//
+//   template<>
+//   struct ScopedTypeRefTraits<CGLContextObj> {
+//     static CGLContextObj InvalidValue() { return nullptr; }
+//     static CGLContextObj Retain(CGLContextObj object) {
+//       CGLContextRetain(object);
+//       return object;
+//     }
+//     static void Release(CGLContextObj object) { CGLContextRelease(object); }
+//   };
+//
+// For the many types that have pass-by-pointer create functions, the function
+// InitializeInto() is provided to allow direct initialization and assumption
+// of ownership of the object. For example, continuing to use the above
+// CGLContextObj specialization:
+//
+//   base::apple::ScopedTypeRef<CGLContextObj> context;
+//   CGLCreateContext(pixel_format, share_group, context.InitializeInto());
+//
+// For initialization with an existing object, the caller may specify whether
+// the ScopedTypeRef<> being initialized is assuming the caller's existing
+// ownership of the object (and should not call Retain in initialization) or if
+// it should not assume this ownership and must create its own (by calling
+// Retain in initialization). This behavior is based on the `policy` parameter,
+// with `ASSUME` for the former and `RETAIN` for the latter. The default policy
+// is to `ASSUME`.
+
+template <typename T>
+struct ScopedTypeRefTraits;
+
+template <typename T, typename Traits = ScopedTypeRefTraits<T>>
+class ScopedTypeRef {
+ public:
+  using element_type = T;
+
+  // Construction from underlying type
+
+  explicit constexpr ScopedTypeRef(
+      element_type object = Traits::InvalidValue(),
+      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+      : object_(object) {
+    if (object_ != Traits::InvalidValue() &&
+        policy == base::scoped_policy::RETAIN) {
+      object_ = Traits::Retain(object_);
+    }
+  }
+
+  // The pattern in the four [copy|move] [constructors|assignment operators]
+  // below is that for each of them there is the standard version for use by
+  // scopers wrapping objects of this type, and a templated version to handle
+  // scopers wrapping objects of subtypes. One might think that one could get
+  // away only the templated versions, as their templates should match the
+  // usage, but that doesn't work. Having a templated function that matches the
+  // types of, say, a copy constructor, doesn't count as a copy constructor, and
+  // the compiler's generated copy constructor is incorrect.
+
+  // Copy construction
+
+  ScopedTypeRef(const ScopedTypeRef<T, Traits>& that) : object_(that.get()) {
+    if (object_ != Traits::InvalidValue()) {
+      object_ = Traits::Retain(object_);
+    }
+  }
+
+  template <typename R, typename RTraits>
+  ScopedTypeRef(const ScopedTypeRef<R, RTraits>& that) : object_(that.get()) {
+    if (object_ != Traits::InvalidValue()) {
+      object_ = Traits::Retain(object_);
+    }
+  }
+
+  // Copy assignment
+
+  ScopedTypeRef& operator=(const ScopedTypeRef<T, Traits>& that) {
+    reset(that.get(), base::scoped_policy::RETAIN);
+    return *this;
+  }
+
+  template <typename R, typename RTraits>
+  ScopedTypeRef& operator=(const ScopedTypeRef<R, RTraits>& that) {
+    reset(that.get(), base::scoped_policy::RETAIN);
+    return *this;
+  }
+
+  // Move construction
+
+  ScopedTypeRef(ScopedTypeRef<T, Traits>&& that) : object_(that.release()) {}
+
+  template <typename R, typename RTraits>
+  ScopedTypeRef(ScopedTypeRef<R, RTraits>&& that) : object_(that.release()) {}
+
+  // Move assignment
+
+  ScopedTypeRef& operator=(ScopedTypeRef<T, Traits>&& that) {
+    reset(that.release(), base::scoped_policy::ASSUME);
+    return *this;
+  }
+
+  template <typename R, typename RTraits>
+  ScopedTypeRef& operator=(ScopedTypeRef<R, RTraits>&& that) {
+    reset(that.release(), base::scoped_policy::ASSUME);
+    return *this;
+  }
+
+  // Resetting
+
+  template <typename R, typename RTraits>
+  void reset(const ScopedTypeRef<R, RTraits>& that) {
+    reset(that.get(), base::scoped_policy::RETAIN);
+  }
+
+  void reset(element_type object = Traits::InvalidValue(),
+             base::scoped_policy::OwnershipPolicy policy =
+                 base::scoped_policy::ASSUME) {
+    if (object != Traits::InvalidValue() &&
+        policy == base::scoped_policy::RETAIN) {
+      object = Traits::Retain(object);
+    }
+    if (object_ != Traits::InvalidValue()) {
+      Traits::Release(object_);
+    }
+    object_ = object;
+  }
+
+  // Destruction
+
+  ~ScopedTypeRef() {
+    if (object_ != Traits::InvalidValue()) {
+      Traits::Release(object_);
+    }
+  }
+
+  // This is to be used only to take ownership of objects that are created by
+  // pass-by-pointer create functions. To enforce this, require that this object
+  // be empty before use.
+  [[nodiscard]] element_type* InitializeInto() {
+    CHECK_EQ(object_, Traits::InvalidValue());
+    return &object_;
+  }
+
+  bool operator==(const ScopedTypeRef& that) const {
+    return object_ == that.object_;
+  }
+
+  bool operator!=(const ScopedTypeRef& that) const {
+    return object_ != that.object_;
+  }
+
+  explicit operator bool() const { return object_ != Traits::InvalidValue(); }
+
+  // TODO(https://crbug.com/1495439): Remove.
+  operator element_type() const { return object_; }
+
+  element_type get() const { return object_; }
+
+  void swap(ScopedTypeRef& that) {
+    element_type temp = that.object_;
+    that.object_ = object_;
+    object_ = temp;
+  }
+
+  // ScopedTypeRef<>::release() is like std::unique_ptr<>::release.  It is NOT
+  // a wrapper for Release().  To force a ScopedTypeRef<> object to call
+  // Release(), use ScopedTypeRef<>::reset().
+  [[nodiscard]] element_type release() {
+    element_type temp = object_;
+    object_ = Traits::InvalidValue();
+    return temp;
+  }
+
+ private:
+  element_type object_;
+};
+
+}  // namespace base::apple
+
+#endif  // BASE_APPLE_SCOPED_TYPEREF_H_
diff --git a/base/atomicops_internals_portable.h b/base/atomicops_internals_portable.h
index e3ef3a0..5239062 100644
--- a/base/atomicops_internals_portable.h
+++ b/base/atomicops_internals_portable.h
@@ -29,6 +29,7 @@
 
 #include <atomic>
 
+#include "base/numerics/wrapping_math.h"
 #include "build/build_config.h"
 
 namespace base {
@@ -66,14 +67,15 @@
 
 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
                                           Atomic32 increment) {
-  return increment +
-         ((AtomicLocation32)ptr)
-             ->fetch_add(increment, std::memory_order_relaxed);
+  return base::WrappingAdd(
+      ((AtomicLocation32)ptr)->fetch_add(increment, std::memory_order_relaxed),
+      increment);
 }
 
 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
                                         Atomic32 increment) {
-  return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
+  return base::WrappingAdd(((AtomicLocation32)ptr)->fetch_add(increment),
+                           increment);
 }
 
 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
@@ -116,6 +118,8 @@
 
 #if defined(ARCH_CPU_64_BITS)
 
+using AtomicU64 = std::make_unsigned_t<Atomic64>;
+
 typedef volatile std::atomic<Atomic64>* AtomicLocation64;
 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
               "incompatible 64-bit atomic layout");
@@ -139,14 +143,15 @@
 
 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
                                           Atomic64 increment) {
-  return increment +
-         ((AtomicLocation64)ptr)
-             ->fetch_add(increment, std::memory_order_relaxed);
+  return base::WrappingAdd(
+      ((AtomicLocation64)ptr)->fetch_add(increment, std::memory_order_relaxed),
+      increment);
 }
 
 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
                                         Atomic64 increment) {
-  return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
+  return base::WrappingAdd(((AtomicLocation64)ptr)->fetch_add(increment),
+                           increment);
 }
 
 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
diff --git a/base/atomicops_unittest.cc b/base/atomicops_unittest.cc
index d75651d7..b494bd1 100644
--- a/base/atomicops_unittest.cc
+++ b/base/atomicops_unittest.cc
@@ -177,7 +177,7 @@
 
   AtomicType value;
 
-  if constexpr (std::is_same<AtomicType, base::subtle::Atomic32>::value) {
+  if constexpr (std::is_same_v<AtomicType, base::subtle::Atomic32>) {
     base::subtle::NoBarrier_Store(&value, kVal1);
     EXPECT_EQ(kVal1, value);
     base::subtle::NoBarrier_Store(&value, kVal2);
diff --git a/base/barrier_callback.h b/base/barrier_callback.h
index 7738782..2a1614d 100644
--- a/base/barrier_callback.h
+++ b/base/barrier_callback.h
@@ -92,15 +92,15 @@
 //
 // See also
 // https://chromium.googlesource.com/chromium/src/+/HEAD/docs/callback.md
-template <typename T,
-          typename RawArg = base::remove_cvref_t<T>,
-          typename DoneArg = std::vector<RawArg>,
-          template <typename>
-          class CallbackType,
-          typename std::enable_if<std::is_same<
-              std::vector<RawArg>,
-              base::remove_cvref_t<DoneArg>>::value>::type* = nullptr,
-          typename = base::EnableIfIsBaseCallback<CallbackType>>
+template <
+    typename T,
+    typename RawArg = base::remove_cvref_t<T>,
+    typename DoneArg = std::vector<RawArg>,
+    template <typename>
+    class CallbackType,
+    std::enable_if_t<std::is_same_v<std::vector<RawArg>,
+                                    base::remove_cvref_t<DoneArg>>>* = nullptr,
+    typename = base::EnableIfIsBaseCallback<CallbackType>>
 RepeatingCallback<void(T)> BarrierCallback(
     size_t num_callbacks,
     CallbackType<void(DoneArg)> done_callback) {
diff --git a/base/base64.cc b/base/base64.cc
index 1c6e470..4b2fd19 100644
--- a/base/base64.cc
+++ b/base/base64.cc
@@ -50,6 +50,10 @@
   *output = Base64Encode(base::as_bytes(base::make_span(input)));
 }
 
+std::string Base64Encode(StringPiece input) {
+  return Base64Encode(base::as_bytes(base::make_span(input)));;
+}
+
 bool Base64Decode(StringPiece input,
                   std::string* output,
                   Base64DecodePolicy policy) {
diff --git a/base/base64.h b/base/base64.h
index 262e158..65d52e6 100644
--- a/base/base64.h
+++ b/base/base64.h
@@ -25,8 +25,13 @@
                                     std::string* output);
 
 // Encodes the input string in base64.
+// DEPRECATED, use `std::string Base64Encode(StringPiece input)` instead.
+// TODO(crbug.com/1486214): Remove this.
 BASE_EXPORT void Base64Encode(StringPiece input, std::string* output);
 
+// Encodes the input string in base64.
+BASE_EXPORT std::string Base64Encode(StringPiece input);
+
 // Decodes the base64 input string.  Returns true if successful and false
 // otherwise. The output string is only modified if successful. The decoding can
 // be done in-place.
diff --git a/base/base64_encode_fuzzer.cc b/base/base64_encode_fuzzer.cc
index 1564d0b..bda3989 100644
--- a/base/base64_encode_fuzzer.cc
+++ b/base/base64_encode_fuzzer.cc
@@ -19,9 +19,7 @@
   CHECK_EQ(data_piece, decode_output);
 
   // Also run the StringPiece variant and check that it gives the same results.
-  std::string string_piece_encode_output;
-  base::Base64Encode(data_piece, &string_piece_encode_output);
-  CHECK_EQ(encode_output, string_piece_encode_output);
+  CHECK_EQ(encode_output, base::Base64Encode(data_piece));
 
   return 0;
 }
diff --git a/base/base64_unittest.cc b/base/base64_unittest.cc
index 8a2b08b..727780f 100644
--- a/base/base64_unittest.cc
+++ b/base/base64_unittest.cc
@@ -17,11 +17,10 @@
   const std::string kText = "hello world";
   const std::string kBase64Text = "aGVsbG8gd29ybGQ=";
 
-  std::string encoded;
   std::string decoded;
   bool ok;
 
-  Base64Encode(kText, &encoded);
+  std::string encoded = Base64Encode(kText);
   EXPECT_EQ(kBase64Text, encoded);
 
   ok = Base64Decode(encoded, &decoded);
@@ -120,9 +119,8 @@
 TEST(Base64Test, InPlace) {
   const std::string kText = "hello world";
   const std::string kBase64Text = "aGVsbG8gd29ybGQ=";
-  std::string text(kText);
 
-  Base64Encode(text, &text);
+  std::string text = Base64Encode(kText);
   EXPECT_EQ(kBase64Text, text);
 
   bool ok = Base64Decode(text, &text);
diff --git a/base/base_paths.h b/base/base_paths.h
index 6e7a86e..044a0bb 100644
--- a/base/base_paths.h
+++ b/base/base_paths.h
@@ -71,9 +71,6 @@
   DIR_SRC_TEST_DATA_ROOT,  // The root of files in the source tree that are
                            // made available to tests. Useful for tests that use
                            // resources that exist in the source tree.
-  DIR_SOURCE_ROOT = DIR_SRC_TEST_DATA_ROOT,  // Legacy name still widely used.
-                                             // TODO(crbug.com/1264897): Replace
-                                             // all instances and remove alias.
   DIR_OUT_TEST_DATA_ROOT,  // Path of build outputs available to tests. Build
                            // output files are normally placed directly in the
                            // build output directory on platforms that do not
diff --git a/base/base_paths_ios.mm b/base/base_paths_ios.mm
index 417360a..6ddcd38 100644
--- a/base/base_paths_ios.mm
+++ b/base/base_paths_ios.mm
@@ -8,11 +8,11 @@
 #import <Foundation/Foundation.h>
 
 #include "base/apple/bundle_locations.h"
+#include "base/apple/foundation_util.h"
 #include "base/base_paths.h"
 #include "base/base_paths_apple.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
-#include "base/mac/foundation_util.h"
 #include "base/path_service.h"
 
 namespace base {
@@ -25,7 +25,8 @@
 
     case base::DIR_APP_DATA: {
       base::FilePath path;
-      if (!base::mac::GetUserDirectory(NSApplicationSupportDirectory, &path)) {
+      if (!base::apple::GetUserDirectory(NSApplicationSupportDirectory,
+                                         &path)) {
         return false;
       }
 
@@ -58,7 +59,7 @@
       return true;
 
     case base::DIR_CACHE:
-      return base::mac::GetUserDirectory(NSCachesDirectory, result);
+      return base::apple::GetUserDirectory(NSCachesDirectory, result);
 
     default:
       return false;
diff --git a/base/base_paths_mac.mm b/base/base_paths_mac.mm
index 7b0372b..8f5c8bd 100644
--- a/base/base_paths_mac.mm
+++ b/base/base_paths_mac.mm
@@ -8,11 +8,11 @@
 #import <Foundation/Foundation.h>
 
 #include "base/apple/bundle_locations.h"
+#include "base/apple/foundation_util.h"
 #include "base/base_paths.h"
 #include "base/base_paths_apple.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
-#include "base/mac/foundation_util.h"
 #include "base/notreached.h"
 #include "base/path_service.h"
 
@@ -28,7 +28,7 @@
           result, reinterpret_cast<const void*>(&base::PathProviderMac));
     case base::DIR_APP_DATA: {
       bool success =
-          base::mac::GetUserDirectory(NSApplicationSupportDirectory, result);
+          base::apple::GetUserDirectory(NSApplicationSupportDirectory, result);
       return success;
     }
     case base::DIR_SRC_TEST_DATA_ROOT:
@@ -40,7 +40,7 @@
       // Start with the executable's directory.
       *result = result->DirName();
 
-      if (base::mac::AmIBundled()) {
+      if (base::apple::AmIBundled()) {
         // The bundled app executables (Chromium, TestShell, etc) live five
         // levels down, eg:
         // src/xcodebuild/{Debug|Release}/Chromium.app/Contents/MacOS/Chromium
@@ -52,16 +52,16 @@
       }
       return true;
     case base::DIR_USER_DESKTOP:
-      return base::mac::GetUserDirectory(NSDesktopDirectory, result);
+      return base::apple::GetUserDirectory(NSDesktopDirectory, result);
     case base::DIR_ASSETS:
-      if (!base::mac::AmIBundled()) {
+      if (!base::apple::AmIBundled()) {
         return PathService::Get(base::DIR_MODULE, result);
       }
       *result = base::apple::FrameworkBundlePath().Append(
           FILE_PATH_LITERAL("Resources"));
       return true;
     case base::DIR_CACHE:
-      return base::mac::GetUserDirectory(NSCachesDirectory, result);
+      return base::apple::GetUserDirectory(NSCachesDirectory, result);
     default:
       return false;
   }
diff --git a/base/base_paths_posix.cc b/base/base_paths_posix.cc
index 046b05b..96b2483 100644
--- a/base/base_paths_posix.cc
+++ b/base/base_paths_posix.cc
@@ -22,6 +22,7 @@
 #include "base/nix/xdg_util.h"
 #include "base/notreached.h"
 #include "base/path_service.h"
+#include "base/posix/sysctl.h"
 #include "base/process/process_metrics.h"
 #include "build/build_config.h"
 
@@ -48,16 +49,12 @@
       return true;
 #elif BUILDFLAG(IS_FREEBSD)
       int name[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
-      char bin_dir[PATH_MAX + 1];
-      size_t length = sizeof(bin_dir);
-      // Upon return, |length| is the number of bytes written to |bin_dir|
-      // including the string terminator.
-      int error = sysctl(name, 4, bin_dir, &length, NULL, 0);
-      if (error < 0 || length <= 1) {
+      absl::optional<std::string> bin_dir = StringSysctl(name, std::size(name));
+      if (!bin_dir.has_value() || bin_dir.value().length() <= 1) {
         NOTREACHED() << "Unable to resolve path.";
         return false;
       }
-      *result = FilePath(FilePath::StringType(bin_dir, length - 1));
+      *result = FilePath(bin_dir.value());
       return true;
 #elif BUILDFLAG(IS_SOLARIS)
       char bin_dir[PATH_MAX + 1];
diff --git a/base/base_switches.cc b/base/base_switches.cc
index f552729..d2ea9e6 100644
--- a/base/base_switches.cc
+++ b/base/base_switches.cc
@@ -160,6 +160,24 @@
 // The field trial parameters and their values when testing changes locally.
 const char kForceFieldTrialParams[] = "force-fieldtrial-params";
 
+// When we retrieve the package name within the SDK Runtime, we need to use
+// a bit of a hack to do this by taking advantage of the fact that the pid
+// is the same pid as the application's pid + 10000.
+// see:
+// https://cs.android.com/android/platform/superproject/main/+/main:frameworks/base/core/java/android/os/Process.java;l=292;drc=47fffdd53115a9af1820e3f89d8108745be4b55d
+// When the render process is created however, it is just a regular isolated
+// process with no particular association so we can't perform the same hack.
+// When creating minidumps, the package name is retrieved from the process
+// meaning the render process minidumps would end up reporting a generic
+// process name not associated with the app.
+// We work around this by feeding through the host package information to the
+// render process when launching it.
+const char kHostPackageName[] = "host-package-name";
+const char kHostPackageLabel[] = "host-package-label";
+const char kHostVersionCode[] = "host-version-code";
+const char kPackageName[] = "package-name";
+const char kPackageVersionName[] = "package-version-name";
+const char kPackageVersionCode[] = "package-version-code";
 #endif
 
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
diff --git a/base/base_switches.h b/base/base_switches.h
index 57873ea..ebc5870 100644
--- a/base/base_switches.h
+++ b/base/base_switches.h
@@ -54,6 +54,12 @@
 extern const char kDefaultCountryCodeAtInstall[];
 extern const char kEnableIdleTracing[];
 extern const char kForceFieldTrialParams[];
+extern const char kHostPackageName[];
+extern const char kHostPackageLabel[];
+extern const char kHostVersionCode[];
+extern const char kPackageName[];
+extern const char kPackageVersionName[];
+extern const char kPackageVersionCode[];
 #endif
 
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
diff --git a/base/big_endian.h b/base/big_endian.h
index 1c46d38..c7845fb 100644
--- a/base/big_endian.h
+++ b/base/big_endian.h
@@ -7,15 +7,51 @@
 
 #include <stddef.h>
 #include <stdint.h>
+#include <string.h>
 #include <type_traits>
 
 #include "base/base_export.h"
 #include "base/containers/span.h"
 #include "base/memory/raw_ptr.h"
 #include "base/strings/string_piece.h"
+#include "base/sys_byteorder.h"
+#include "build/build_config.h"
 
 namespace base {
 
+namespace internal {
+
+// ByteSwapIfLittleEndian performs ByteSwap if this platform is little-endian,
+// otherwise it is a no-op.
+
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+
+template <typename T>
+inline auto ByteSwapIfLittleEndian(T val) -> decltype(ByteSwap(val)) {
+  return ByteSwap(val);
+}
+
+#else
+
+// The use of decltype ensures this is only enabled for types for which
+// ByteSwap() is defined, so the same set of overloads will work on both
+// little-endian and big-endian platforms.
+
+template <typename T>
+inline auto ByteSwapIfLittleEndian(T val) -> decltype(ByteSwap(val)) {
+  return val;
+}
+
+#endif
+
+// We never need to byte-swap a single-byte value, but it's convenient to have
+// this overload to avoid a special case.
+inline uint8_t ByteSwapIfLittleEndian(uint8_t val) {
+  return val;
+}
+
+}  // namespace internal
+
 // Read an integer (signed or unsigned) from |buf| in Big Endian order.
 // Note: this loop is unrolled with -O1 and above.
 // NOTE(szym): glibc dns-canon.c use ntohs(*(uint16_t*)ptr) which is
@@ -23,49 +59,23 @@
 // This would cause SIGBUS on ARMv5 or earlier and ARMv6-M.
 template <typename T>
 inline void ReadBigEndian(const uint8_t buf[], T* out) {
-  static_assert(std::is_integral<T>::value, "T has to be an integral type.");
+  static_assert(std::is_integral_v<T>, "T has to be an integral type.");
   // Make an unsigned version of the output type to make shift possible
   // without UB.
-  typename std::make_unsigned<T>::type unsigned_result = buf[0];
-  for (size_t i = 1; i < sizeof(T); ++i) {
-    unsigned_result <<= 8;
-    // Must cast to uint8_t to avoid clobbering by sign extension.
-    unsigned_result |= buf[i];
-  }
-  *out = unsigned_result;
+  typename std::make_unsigned<T>::type raw;
+  memcpy(&raw, buf, sizeof(T));
+  *out = static_cast<T>(internal::ByteSwapIfLittleEndian(raw));
 }
 
 // Write an integer (signed or unsigned) |val| to |buf| in Big Endian order.
 // Note: this loop is unrolled with -O1 and above.
 template<typename T>
 inline void WriteBigEndian(char buf[], T val) {
-  static_assert(std::is_integral<T>::value, "T has to be an integral type.");
-  auto unsigned_val = static_cast<typename std::make_unsigned<T>::type>(val);
-  for (size_t i = 0; i < sizeof(T); ++i) {
-    buf[sizeof(T) - i - 1] = static_cast<char>(unsigned_val & 0xFF);
-    unsigned_val >>= 8;
-  }
-}
-
-// Specializations to make clang happy about the (dead code) shifts above.
-template <>
-inline void ReadBigEndian<uint8_t>(const uint8_t buf[], uint8_t* out) {
-  *out = buf[0];
-}
-
-template <>
-inline void WriteBigEndian<uint8_t>(char buf[], uint8_t val) {
-  buf[0] = static_cast<char>(val);
-}
-
-template <>
-inline void ReadBigEndian<int8_t>(const uint8_t buf[], int8_t* out) {
-  *out = static_cast<int8_t>(buf[0]);
-}
-
-template <>
-inline void WriteBigEndian<int8_t>(char buf[], int8_t val) {
-  buf[0] = static_cast<char>(val);
+  static_assert(std::is_integral_v<T>, "T has to be an integral type.");
+  const auto unsigned_val =
+      static_cast<typename std::make_unsigned<T>::type>(val);
+  const auto raw = internal::ByteSwapIfLittleEndian(unsigned_val);
+  memcpy(buf, &raw, sizeof(T));
 }
 
 // Allows reading integers in network order (big endian) while iterating over
diff --git a/base/big_endian_perftest.cc b/base/big_endian_perftest.cc
new file mode 100644
index 0000000..2666bc9
--- /dev/null
+++ b/base/big_endian_perftest.cc
@@ -0,0 +1,113 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/big_endian.h"
+
+#include <stdint.h>
+
+#include "base/check.h"
+#include "base/containers/span.h"
+#include "third_party/google_benchmark/src/include/benchmark/benchmark.h"
+
+namespace base {
+namespace {
+
+constexpr size_t kSize = 128 * 1024 * 1024;
+int64_t aligned_bytes[kSize / sizeof(int64_t)];
+struct {
+  int64_t aligment;
+  char padding_to_cause_misalignment;
+  char bytes[kSize];
+} misaligned_bytes;
+
+void DoNotOptimizeSpan(span<const char> range) {
+  // ::benchmark::DoNotOptimize() generates quite large code, so instead of
+  // calling it for every byte in the range, calculate `sum` which depends on
+  // every byte in the range and then call DoNotOptimise() on that.
+  int sum = 0;
+  for (char c : range) {
+    sum += c;
+  }
+  ::benchmark::DoNotOptimize(sum);
+}
+
+template <typename T>
+inline void WriteBigEndianCommon(::benchmark::State& state, char* const start) {
+  size_t offset = 0;
+  T value = 0;
+  for (auto _ : state) {
+    WriteBigEndian(start + offset, value);
+    offset += sizeof(T);
+    static_assert(kSize % sizeof(T) == 0);
+    if (offset == kSize) {
+      offset = 0;
+    }
+    ++value;
+  }
+  DoNotOptimizeSpan({start, kSize});
+}
+
+template <typename T>
+void BM_WriteBigEndianAligned(::benchmark::State& state) {
+  char* const start = reinterpret_cast<char*>(aligned_bytes);
+  CHECK(reinterpret_cast<uintptr_t>(start) % alignof(T) == 0);
+  WriteBigEndianCommon<T>(state, start);
+}
+
+template <typename T>
+void BM_WriteBigEndianMisaligned(::benchmark::State& state) {
+  char* const start = misaligned_bytes.bytes;
+  CHECK(reinterpret_cast<uintptr_t>(start) % alignof(T) != 0);
+  WriteBigEndianCommon<T>(state, start);
+}
+
+template <typename T>
+inline void ReadBigEndianCommon(::benchmark::State& state,
+                                const uint8_t* const start) {
+  size_t offset = 0;
+  for (auto _ : state) {
+    T value;
+    ReadBigEndian(start + offset, &value);
+    ::benchmark::DoNotOptimize(value);
+    offset += sizeof(T);
+    static_assert(kSize % sizeof(T) == 0);
+    if (offset == kSize) {
+      offset = 0;
+    }
+  }
+}
+
+template <typename T>
+void BM_ReadBigEndianAligned(::benchmark::State& state) {
+  const uint8_t* const start = reinterpret_cast<uint8_t*>(aligned_bytes);
+  CHECK(reinterpret_cast<uintptr_t>(start) % alignof(T) == 0);
+  ReadBigEndianCommon<T>(state, start);
+}
+
+template <typename T>
+void BM_ReadBigEndianMisaligned(::benchmark::State& state) {
+  const uint8_t* const start =
+      reinterpret_cast<uint8_t*>(misaligned_bytes.bytes);
+  CHECK(reinterpret_cast<uintptr_t>(start) % alignof(T) != 0);
+  ReadBigEndianCommon<T>(state, start);
+}
+
+#define BENCHMARK_FOR_INT_TYPES(function)            \
+  BENCHMARK(function<int16_t>)->MinWarmUpTime(1.0);  \
+  BENCHMARK(function<uint16_t>)->MinWarmUpTime(1.0); \
+  BENCHMARK(function<int32_t>)->MinWarmUpTime(1.0);  \
+  BENCHMARK(function<uint32_t>)->MinWarmUpTime(1.0); \
+  BENCHMARK(function<int64_t>)->MinWarmUpTime(1.0);  \
+  BENCHMARK(function<uint64_t>)->MinWarmUpTime(1.0); \
+  typedef int force_semicolon
+
+BENCHMARK_FOR_INT_TYPES(BM_WriteBigEndianAligned);
+BENCHMARK_FOR_INT_TYPES(BM_WriteBigEndianMisaligned);
+BENCHMARK_FOR_INT_TYPES(BM_ReadBigEndianAligned);
+BENCHMARK_FOR_INT_TYPES(BM_ReadBigEndianMisaligned);
+
+#undef BENCHMARK_FOR_INT_TYPES
+
+}  // namespace
+}  // namespace base
diff --git a/base/bits.h b/base/bits.h
index dfe6dfd..aa7ff67 100644
--- a/base/bits.h
+++ b/base/bits.h
@@ -22,7 +22,7 @@
 // Returns true iff |value| is a power of 2.
 //
 // TODO(pkasting): When C++20 is available, replace with std::has_single_bit().
-template <typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
+template <typename T, typename = std::enable_if_t<std::is_integral_v<T>>>
 constexpr bool IsPowerOfTwo(T value) {
   // From "Hacker's Delight": Section 2.1 Manipulating Rightmost Bits.
   //
@@ -42,7 +42,7 @@
 
 // Move |ptr| back to the previous multiple of alignment, which must be a power
 // of two. Defined for types where sizeof(T) is one byte.
-template <typename T, typename = typename std::enable_if<sizeof(T) == 1>::type>
+template <typename T, typename = std::enable_if_t<sizeof(T) == 1>>
 inline T* AlignDown(T* ptr, uintptr_t alignment) {
   return reinterpret_cast<T*>(
       AlignDown(reinterpret_cast<uintptr_t>(ptr), alignment));
@@ -57,7 +57,7 @@
 
 // Advance |ptr| to the next multiple of alignment, which must be a power of
 // two. Defined for types where sizeof(T) is one byte.
-template <typename T, typename = typename std::enable_if<sizeof(T) == 1>::type>
+template <typename T, typename = std::enable_if_t<sizeof(T) == 1>>
 inline T* AlignUp(T* ptr, uintptr_t alignment) {
   return reinterpret_cast<T*>(
       AlignUp(reinterpret_cast<uintptr_t>(ptr), alignment));
@@ -85,8 +85,7 @@
 // do better, but we'll avoid doing that unless we see proof that we need to.
 template <typename T, int bits = sizeof(T) * 8>
 ALWAYS_INLINE constexpr
-    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
-                            int>::type
+    typename std::enable_if<std::is_unsigned_v<T> && sizeof(T) <= 8, int>::type
     CountLeadingZeroBits(T value) {
   static_assert(bits > 0, "invalid instantiation");
   return LIKELY(value)
@@ -98,8 +97,7 @@
 
 template <typename T, int bits = sizeof(T) * 8>
 ALWAYS_INLINE constexpr
-    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
-                            int>::type
+    typename std::enable_if<std::is_unsigned_v<T> && sizeof(T) <= 8, int>::type
     CountTrailingZeroBits(T value) {
   return LIKELY(value) ? bits == 64
                              ? __builtin_ctzll(static_cast<uint64_t>(value))
@@ -130,7 +128,7 @@
 // Can be used instead of manually shifting a 1 to the left.
 template <typename T>
 constexpr T LeftmostBit() {
-  static_assert(std::is_integral<T>::value,
+  static_assert(std::is_integral_v<T>,
                 "This function can only be used with integral types.");
   T one(1u);
   return one << (8 * sizeof(T) - 1);
diff --git a/base/build_time_unittest.cc b/base/build_time_unittest.cc
index b552a43..318c0f0 100644
--- a/base/build_time_unittest.cc
+++ b/base/build_time_unittest.cc
@@ -21,7 +21,13 @@
 #endif
 }
 
-TEST(BuildTime, InThePast) {
+// Disabled on Android due to flakes; see https://crbug.com/1474884.
+#if BUILDFLAG(IS_ANDROID)
+#define MAYBE_InThePast DISABLED_InThePast
+#else
+#define MAYBE_InThePast InThePast
+#endif
+TEST(BuildTime, MAYBE_InThePast) {
   EXPECT_LT(base::GetBuildTime(), base::Time::Now());
   EXPECT_LT(base::GetBuildTime(), base::Time::NowFromSystemTime());
 }
diff --git a/base/callback_list_nocompile.nc b/base/callback_list_nocompile.nc
new file mode 100644
index 0000000..792a0ff
--- /dev/null
+++ b/base/callback_list_nocompile.nc
@@ -0,0 +1,49 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/callback_list.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/functional/bind.h"
+#include "base/functional/callback_helpers.h"
+
+namespace base {
+
+class Foo {
+ public:
+  Foo() {}
+  ~Foo() {}
+};
+
+class FooListener {
+ public:
+  FooListener() = default;
+  FooListener(const FooListener&) = delete;
+  FooListener& operator=(const FooListener&) = delete;
+
+  void GotAScopedFoo(std::unique_ptr<Foo> f) { foo_ = std::move(f); }
+
+  std::unique_ptr<Foo> foo_;
+};
+
+// Callbacks run with a move-only typed parameter.
+//
+// CallbackList does not support move-only typed parameters. Notify() is
+// designed to take zero or more parameters, and run each registered callback
+// with them. With move-only types, the parameter will be set to NULL after the
+// first callback has been run.
+void WontCompile() {
+  FooListener f;
+  RepeatingCallbackList<void(std::unique_ptr<Foo>)> c1;
+  CallbackListSubscription sub =
+      c1.Add(BindRepeating(&FooListener::GotAScopedFoo, Unretained(&f)));
+  c1.Notify(std::unique_ptr<Foo>(new Foo()));  // expected-error@*:* {{call to implicitly-deleted copy constructor of 'std::unique_ptr<base::Foo>'}}
+}
+
+}  // namespace base
diff --git a/base/callback_list_unittest.nc b/base/callback_list_unittest.nc
deleted file mode 100644
index cc98db5..0000000
--- a/base/callback_list_unittest.nc
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a "No Compile Test" suite.
-// http://dev.chromium.org/developers/testing/no-compile-tests
-
-#include "base/callback_list.h"
-
-#include <memory>
-#include <utility>
-
-#include "base/functional/bind.h"
-#include "base/functional/callback_helpers.h"
-
-namespace base {
-
-class Foo {
- public:
-  Foo() {}
-  ~Foo() {}
-};
-
-class FooListener {
- public:
-  FooListener() = default;
-  FooListener(const FooListener&) = delete;
-  FooListener& operator=(const FooListener&) = delete;
-
-  void GotAScopedFoo(std::unique_ptr<Foo> f) { foo_ = std::move(f); }
-
-  std::unique_ptr<Foo> foo_;
-};
-
-
-#if defined(NCTEST_MOVE_ONLY_TYPE_PARAMETER)  // [r"fatal error: call to (implicitly-)?deleted( copy)? constructor"]
-
-// Callbacks run with a move-only typed parameter.
-//
-// CallbackList does not support move-only typed parameters. Notify() is
-// designed to take zero or more parameters, and run each registered callback
-// with them. With move-only types, the parameter will be set to NULL after the
-// first callback has been run.
-void WontCompile() {
-  FooListener f;
-  RepeatingCallbackList<void(std::unique_ptr<Foo>)> c1;
-  CallbackListSubscription sub =
-      c1.Add(BindRepeating(&FooListener::GotAScopedFoo, Unretained(&f)));
-  c1.Notify(std::unique_ptr<Foo>(new Foo()));
-}
-
-#endif
-
-}  // namespace base
diff --git a/base/check_op.h b/base/check_op.h
index 1fba78d..0c3aea6 100644
--- a/base/check_op.h
+++ b/base/check_op.h
@@ -69,10 +69,10 @@
 #endif
 
 template <typename T>
-inline typename std::enable_if<
+inline std::enable_if_t<
     base::internal::SupportsOstreamOperator<const T&> &&
-        !std::is_function<typename std::remove_pointer<T>::type>::value,
-    char*>::type
+        !std::is_function_v<typename std::remove_pointer<T>::type>,
+    char*>
 CheckOpValueStr(const T& v) {
   auto f = [](std::ostream& s, const void* p) {
     s << *reinterpret_cast<const T*>(p);
@@ -95,10 +95,9 @@
 
 // Overload for types that have no operator<< but do have .ToString() defined.
 template <typename T>
-inline typename std::enable_if<
-    !base::internal::SupportsOstreamOperator<const T&> &&
-        base::internal::SupportsToString<const T&>,
-    char*>::type
+inline std::enable_if_t<!base::internal::SupportsOstreamOperator<const T&> &&
+                            base::internal::SupportsToString<const T&>,
+                        char*>
 CheckOpValueStr(const T& v) {
   // .ToString() may not return a std::string, e.g. blink::WTF::String.
   return CheckOpValueStr(v.ToString());
@@ -110,9 +109,9 @@
 // standards-conforming here and converts function pointers to regular
 // pointers, so this is a no-op for MSVC.)
 template <typename T>
-inline typename std::enable_if<
-    std::is_function<typename std::remove_pointer<T>::type>::value,
-    char*>::type
+inline std::enable_if_t<
+    std::is_function_v<typename std::remove_pointer<T>::type>,
+    char*>
 CheckOpValueStr(const T& v) {
   return CheckOpValueStr(reinterpret_cast<const void*>(v));
 }
@@ -120,10 +119,9 @@
 // We need overloads for enums that don't support operator<<.
 // (i.e. scoped enums where no operator<< overload was declared).
 template <typename T>
-inline typename std::enable_if<
-    !base::internal::SupportsOstreamOperator<const T&> &&
-        std::is_enum<T>::value,
-    char*>::type
+inline std::enable_if_t<!base::internal::SupportsOstreamOperator<const T&> &&
+                            std::is_enum_v<T>,
+                        char*>
 CheckOpValueStr(const T& v) {
   return CheckOpValueStr(
       static_cast<typename std::underlying_type<T>::type>(v));
@@ -169,27 +167,27 @@
 
 // The second overload avoids address-taking of static members for
 // fundamental types.
-#define DEFINE_CHECK_OP_IMPL(name, op)                                  \
-  template <typename T, typename U,                                     \
-            std::enable_if_t<!std::is_fundamental<T>::value ||          \
-                                 !std::is_fundamental<U>::value,        \
-                             int> = 0>                                  \
-  constexpr char* Check##name##Impl(const T& v1, const U& v2,           \
-                                    const char* expr_str) {             \
-    if (LIKELY(ANALYZER_ASSUME_TRUE(v1 op v2)))                         \
-      return nullptr;                                                   \
-    return CreateCheckOpLogMessageString(expr_str, CheckOpValueStr(v1), \
-                                         CheckOpValueStr(v2));          \
-  }                                                                     \
-  template <typename T, typename U,                                     \
-            std::enable_if_t<std::is_fundamental<T>::value &&           \
-                                 std::is_fundamental<U>::value,         \
-                             int> = 0>                                  \
-  constexpr char* Check##name##Impl(T v1, U v2, const char* expr_str) { \
-    if (LIKELY(ANALYZER_ASSUME_TRUE(v1 op v2)))                         \
-      return nullptr;                                                   \
-    return CreateCheckOpLogMessageString(expr_str, CheckOpValueStr(v1), \
-                                         CheckOpValueStr(v2));          \
+#define DEFINE_CHECK_OP_IMPL(name, op)                                         \
+  template <                                                                   \
+      typename T, typename U,                                                  \
+      std::enable_if_t<!std::is_fundamental_v<T> || !std::is_fundamental_v<U>, \
+                       int> = 0>                                               \
+  constexpr char* Check##name##Impl(const T& v1, const U& v2,                  \
+                                    const char* expr_str) {                    \
+    if (LIKELY(ANALYZER_ASSUME_TRUE(v1 op v2)))                                \
+      return nullptr;                                                          \
+    return CreateCheckOpLogMessageString(expr_str, CheckOpValueStr(v1),        \
+                                         CheckOpValueStr(v2));                 \
+  }                                                                            \
+  template <                                                                   \
+      typename T, typename U,                                                  \
+      std::enable_if_t<std::is_fundamental_v<T> && std::is_fundamental_v<U>,   \
+                       int> = 0>                                               \
+  constexpr char* Check##name##Impl(T v1, U v2, const char* expr_str) {        \
+    if (LIKELY(ANALYZER_ASSUME_TRUE(v1 op v2)))                                \
+      return nullptr;                                                          \
+    return CreateCheckOpLogMessageString(expr_str, CheckOpValueStr(v1),        \
+                                         CheckOpValueStr(v2));                 \
   }
 
 // clang-format off
diff --git a/base/command_line_unittest.cc b/base/command_line_unittest.cc
index 9189266..b153953 100644
--- a/base/command_line_unittest.cc
+++ b/base/command_line_unittest.cc
@@ -34,12 +34,15 @@
 
 namespace base {
 
+#if BUILDFLAG(IS_WIN)
 // To test Windows quoting behavior, we use a string that has some backslashes
 // and quotes.
 // Consider the command-line argument: q\"bs1\bs2\\bs3q\\\"
 // Here it is with C-style escapes.
 static const CommandLine::StringType kTrickyQuoted =
     FILE_PATH_LITERAL("q\\\"bs1\\bs2\\\\bs3q\\\\\\\"");
+#endif
+
 // It should be parsed by Windows as: q"bs1\bs2\\bs3q\"
 // Here that is with C-style escapes.
 static const CommandLine::StringType kTricky =
diff --git a/base/compiler_specific.h b/base/compiler_specific.h
index 26bf04f..5caeab5 100644
--- a/base/compiler_specific.h
+++ b/base/compiler_specific.h
@@ -374,6 +374,20 @@
 #define TRIVIAL_ABI
 #endif
 
+// Detect whether a type is trivially relocatable, ie. a move-and-destroy
+// sequence can replaced with memmove(). This can be used to optimise the
+// implementation of containers. This is automatically true for types that were
+// defined with TRIVIAL_ABI such as scoped_refptr.
+//
+// See also:
+//   https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/p1144r8.html
+//   https://clang.llvm.org/docs/LanguageExtensions.html#:~:text=__is_trivially_relocatable
+#if defined(__clang__) && HAS_BUILTIN(__is_trivially_relocatable)
+#define IS_TRIVIALLY_RELOCATABLE(t) __is_trivially_relocatable(t)
+#else
+#define IS_TRIVIALLY_RELOCATABLE(t) false
+#endif
+
 // Marks a member function as reinitializing a moved-from variable.
 // See also
 // https://clang.llvm.org/extra/clang-tidy/checks/bugprone-use-after-move.html#reinitialization
diff --git a/base/containers/DEPS b/base/containers/DEPS
new file mode 100644
index 0000000..91f179a
--- /dev/null
+++ b/base/containers/DEPS
@@ -0,0 +1,6 @@
+specific_include_rules = {
+  # Needed to memory benchmark different containers against each other.
+  "containers_memory_benchmark\.cc": [
+    "+third_party/abseil-cpp/absl/container",
+  ],
+}
diff --git a/base/containers/README.md b/base/containers/README.md
index c76b5d7..54a47bf 100644
--- a/base/containers/README.md
+++ b/base/containers/README.md
@@ -140,7 +140,7 @@
 sizeof(T).
 
 `flat_set` and `flat_map` support a notion of transparent comparisons.
-Therefore you can, for example, lookup `base::StringPiece` in a set of
+Therefore you can, for example, lookup `std::string_view` in a set of
 `std::strings` without constructing a temporary `std::string`. This
 functionality is based on C++14 extensions to the `std::set`/`std::map`
 interface.
@@ -209,7 +209,7 @@
 ```cpp
 constexpr auto kSet = base::MakeFixedFlatSet<int>({1, 2, 3});
 
-constexpr auto kMap = base::MakeFixedFlatMap<base::StringPiece, int>(
+constexpr auto kMap = base::MakeFixedFlatMap<std::string_view, int>(
     {{"foo", 1}, {"bar", 2}, {"baz", 3}});
 ```
 
diff --git a/base/containers/analyze_containers_memory_benchmark.py b/base/containers/analyze_containers_memory_benchmark.py
new file mode 100755
index 0000000..6b7d622
--- /dev/null
+++ b/base/containers/analyze_containers_memory_benchmark.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python3
+# Copyright 2023 The Chromium Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Processes the raw output from containers_memory_usage into CSV files. Each CSV
+# file contains the results for all tested container types for a given key and
+# value type.
+#
+# Usage:
+# $ out/release/containers_memory_benchmark &> output.txt
+# $ python3 analyze_containers_memory_benchmark.py < output.txt -o bench-results
+
+import argparse
+from collections.abc import Sequence
+import csv
+import os.path
+import re
+import sys
+from typing import Optional
+
+
+_HEADER_RE = re.compile(r'===== (?P<name>.+) =====')
+_ITER_RE = re.compile(r'iteration (?P<iter>\d+)')
+_ALLOC_RE = re.compile(r'alloc address (?P<alloc_addr>.+) size (?P<size>\d+)')
+_FREED_RE = re.compile(r'freed address (?P<freed_addr>.+)')
+
+
+class ContainerStatsProcessor:
+
+  def __init__(self, name: str):
+    # e.g. base::flat_map
+    self._name = name
+    # current number of elements in the container
+    self._n = None
+    # map of address to size for currently active allocations. Needed because
+    # the free handler only records an address, and not a size.
+    self._addr_to_size = {}
+    # running count of the number of bytes needed at the current iteration
+    self._running_size = 0
+    # map of container size to number of bytes used to store a container of that
+    # size. Keys are expected to be contiguous from 0 to the total iteration
+    # count.
+    self._data = {}
+
+  @property
+  def name(self):
+    return self._name
+
+  @property
+  def data(self):
+    return self._data
+
+  def did_alloc(self, addr: str, size: int):
+    self._addr_to_size[addr] = size
+    self._running_size += size
+
+  def did_free(self, addr: str):
+    size = self._addr_to_size.pop(addr)
+    self._running_size -= size
+
+  def did_iterate(self, n: int):
+    if self._n is not None:
+      self.flush_current_iteration_if_needed()
+    self._n = n
+
+  def flush_current_iteration_if_needed(self):
+    self._data[self._n] = self._running_size
+
+
+class TestCaseProcessor:
+
+  def __init__(self, name: str):
+    # e.g. int -> std::string
+    self._name = name
+    # containers for which all allocation data has been processed and finalized.
+    self._finalized_stats: list[ContainerStatsProcessor] = []
+    # the current container being processed.
+    self._current_container_stats: Optional[ContainerStatsProcessor] = None
+
+  @property
+  def current_container_stats(self):
+    return self._current_container_stats
+
+  def did_begin_container_stats(self, container_type: str):
+    self._finalize_current_container_stats_if_needed()
+    self._current_container_stats = ContainerStatsProcessor(container_type)
+
+  def did_finish_container_stats(self, output_dir: str):
+    self._finalize_current_container_stats_if_needed()
+    with open(
+        os.path.join(output_dir, f'{self._name}.csv'), 'w', newline=''
+    ) as f:
+      writer = csv.writer(f)
+      # First the column headers...
+      writer.writerow(
+          ['size'] + [stats.name for stats in self._finalized_stats]
+      )
+      # In theory, all processed containers should have the same number of keys,
+      # but assert just to be sure.
+      keys = []
+      for stats in self._finalized_stats:
+        if not keys:
+          keys = sorted(stats.data.keys())
+        else:
+          assert keys == sorted(stats.data.keys())
+      for key in keys:
+        writer.writerow(
+            [key] + [stats.data[key] for stats in self._finalized_stats]
+        )
+
+  def _finalize_current_container_stats_if_needed(self):
+    if self._current_container_stats:
+      self._current_container_stats.flush_current_iteration_if_needed()
+      self._finalized_stats.append(self._current_container_stats)
+      self._current_container_stats = None
+
+
+def main(argv: Sequence[str]) -> None:
+  parser = argparse.ArgumentParser(
+      description='Processes raw output from containers_memory_usage into CSVs.'
+  )
+  parser.add_argument(
+      '-o', help='directory to write CSV files to', required=True
+  )
+  args = parser.parse_args()
+
+  # It would be nicer to use a ContextManager, but that complicates splitting up
+  # the input and iterating through it. This is "good enough".
+  processor: Optional[TestCaseProcessor] = None
+
+  for line in sys.stdin:
+    line = line.strip()
+    if '->' in line:
+      if processor:
+        processor.did_finish_container_stats(args.o)
+      processor = TestCaseProcessor(line)
+      continue
+
+    match = _HEADER_RE.match(line)
+    if match:
+      processor.did_begin_container_stats(match.group('name'))
+
+    match = _ITER_RE.match(line)
+    if match:
+      processor.current_container_stats.did_iterate(int(match.group('iter')))
+      continue
+
+    match = _ALLOC_RE.match(line)
+    if match:
+      processor.current_container_stats.did_alloc(
+          match.group('alloc_addr'), int(match.group('size'))
+      )
+      continue
+
+    match = _FREED_RE.match(line)
+    if match:
+      processor.current_container_stats.did_free(match.group('freed_addr'))
+      continue
+
+  if processor:
+    processor.did_finish_container_stats(args.o)
+
+
+if __name__ == '__main__':
+  main(sys.argv)
diff --git a/base/containers/buffer_iterator.h b/base/containers/buffer_iterator.h
index f8a0073..9fdcaba 100644
--- a/base/containers/buffer_iterator.h
+++ b/base/containers/buffer_iterator.h
@@ -58,8 +58,8 @@
 template <typename B>
 class BufferIterator {
  public:
-  static_assert(std::is_same<std::remove_const_t<B>, char>::value ||
-                    std::is_same<std::remove_const_t<B>, unsigned char>::value,
+  static_assert(std::is_same_v<std::remove_const_t<B>, char> ||
+                    std::is_same_v<std::remove_const_t<B>, unsigned char>,
                 "Underlying buffer type must be char-type.");
 
   BufferIterator() {}
diff --git a/base/containers/buffer_iterator_unittest.nc b/base/containers/buffer_iterator_unittest.nc
index ca1f8f3..c99c50e 100644
--- a/base/containers/buffer_iterator_unittest.nc
+++ b/base/containers/buffer_iterator_unittest.nc
@@ -21,7 +21,7 @@
   std::string string_;
 };
 
-#if defined(NCTEST_BUFFER_ITERATOR_CREATE_TYPE_UINT16)  // [r"fatal error: static_assert failed due to requirement 'std::is_same<unsigned short, char>::value || std::is_same<unsigned short, unsigned char>::value': Underlying buffer type must be char-type."]
+#if defined(NCTEST_BUFFER_ITERATOR_CREATE_TYPE_UINT16)  // [r"fatal error: static_assert failed due to requirement 'std::is_same_v<unsigned short, char> || std::is_same_v<unsigned short, unsigned char>': Underlying buffer type must be char-type."]
 
 void WontCompile() {
   constexpr size_t size = 64;
diff --git a/base/containers/checked_iterators.h b/base/containers/checked_iterators.h
index bed8ac6..0c24014 100644
--- a/base/containers/checked_iterators.h
+++ b/base/containers/checked_iterators.h
@@ -58,7 +58,7 @@
   // See https://wg21.link/n4042 for details.
   template <
       typename U,
-      std::enable_if_t<std::is_convertible<U (*)[], T (*)[]>::value>* = nullptr>
+      std::enable_if_t<std::is_convertible_v<U (*)[], T (*)[]>>* = nullptr>
   constexpr CheckedContiguousIterator(const CheckedContiguousIterator<U>& other)
       : start_(other.start_), current_(other.current_), end_(other.end_) {
     // We explicitly don't delegate to the 3-argument constructor here. Its
diff --git a/base/containers/circular_deque.h b/base/containers/circular_deque.h
index 61f2c8e..d1882d7 100644
--- a/base/containers/circular_deque.h
+++ b/base/containers/circular_deque.h
@@ -503,8 +503,7 @@
 
   // This variant should be enabled only when InputIterator is an iterator.
   template <typename InputIterator>
-  typename std::enable_if<::base::internal::is_iterator<InputIterator>::value,
-                          void>::type
+  std::enable_if_t<::base::internal::is_iterator<InputIterator>::value, void>
   assign(InputIterator first, InputIterator last) {
     // Possible future enhancement, dispatch on iterator tag type. For forward
     // iterators we can use std::difference to preallocate the space required
@@ -719,8 +718,7 @@
   // This enable_if keeps this call from getting confused with the (pos, count,
   // value) version when value is an integer.
   template <class InputIterator>
-  typename std::enable_if<::base::internal::is_iterator<InputIterator>::value,
-                          void>::type
+  std::enable_if_t<::base::internal::is_iterator<InputIterator>::value, void>
   insert(const_iterator pos, InputIterator first, InputIterator last) {
     ValidateIterator(pos);
 
diff --git a/base/containers/circular_deque_unittest.cc b/base/containers/circular_deque_unittest.cc
index 381ab59..d7a9ae3 100644
--- a/base/containers/circular_deque_unittest.cc
+++ b/base/containers/circular_deque_unittest.cc
@@ -5,7 +5,9 @@
 #include "base/containers/circular_deque.h"
 
 #include "base/memory/raw_ptr.h"
+#include "base/memory/scoped_refptr.h"
 #include "base/test/copy_only_int.h"
+#include "base/test/gtest_util.h"
 #include "base/test/move_only_int.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -45,6 +47,20 @@
   raw_ptr<int> counter_;
 };
 
+// This class implements the interface that scoped_refptr expects, but actually
+// just counts the number of reference count changes that are attempted.
+class RefCountChangeCounter {
+ public:
+  void AddRef() { ++ref_count_changes_; }
+
+  void Release() { ++ref_count_changes_; }
+
+  int ref_count_changes() const { return ref_count_changes_; }
+
+ private:
+  int ref_count_changes_ = 0;
+};
+
 }  // namespace
 
 TEST(CircularDeque, FillConstructor) {
@@ -864,9 +880,7 @@
   EXPECT_EQ(&back, &q.back());
 }
 
-/*
-This test should assert in a debug build. It tries to dereference an iterator
-after mutating the container. Uncomment to double-check that this works.
+// This test tries to dereference an iterator after mutating the container.
 TEST(CircularDeque, UseIteratorAfterMutate) {
   circular_deque<int> q;
   q.push_back(0);
@@ -875,8 +889,32 @@
   EXPECT_EQ(0, *old_begin);
 
   q.push_back(1);
-  EXPECT_EQ(0, *old_begin);  // Should DCHECK.
+
+  // This statement is not executed when DCHECKs are disabled.
+  EXPECT_DCHECK_DEATH(*old_begin);
 }
-*/
+
+// This test verifies that a scoped_refptr specifically is moved rather than
+// copied when a circular_deque is resized. It would be extremely inefficient if
+// it was copied in this case.
+TEST(CircularDeque, DoesntChurnRefCount) {
+  static constexpr size_t kCount = 10;
+  RefCountChangeCounter counters[kCount];
+  circular_deque<scoped_refptr<RefCountChangeCounter>> deque;
+  bool checked_capacity = false;
+  for (auto& counter : counters) {
+    deque.push_back(scoped_refptr<RefCountChangeCounter>(&counter));
+    if (!checked_capacity) {
+      // Verify that the deque will have to reallocate.
+      EXPECT_LT(deque.capacity(), kCount);
+      checked_capacity = true;
+    }
+  }
+  // Verify that reallocation has happened.
+  EXPECT_GE(deque.capacity(), kCount);
+  for (const auto& counter : counters) {
+    EXPECT_EQ(1, counter.ref_count_changes());
+  }
+}
 
 }  // namespace base
diff --git a/base/containers/containers_memory_benchmark.cc b/base/containers/containers_memory_benchmark.cc
new file mode 100644
index 0000000..af726fc
--- /dev/null
+++ b/base/containers/containers_memory_benchmark.cc
@@ -0,0 +1,252 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This is a framework to measure the memory overhead of different containers.
+// Under the hood, it works by logging allocations and frees using an allocator
+// hook.
+//
+// Since the free callback does not report a size, and the allocator hooks run
+// in the middle of allocation, the logger simply takes the simplest approach
+// and logs out the raw data, relying on analyze_containers_memory_usage.py to
+// turn the raw output into useful numbers.
+//
+// The output of consists of m (number of different key/value combinations being
+// tested) x n (number of different map types being tested) sections:
+//
+// <key type 1> -> <value type 1>
+// ===== <map type 1> =====
+// iteration 0
+// alloc <address 1> size <size 1>
+// iteration 1
+// alloc <address 2> size <size 2>
+// free <address 1>
+// iteration 2
+// alloc <address 3> size <size 3>
+// free <address 2>
+// ...
+// ...
+// ...
+// ===== <map type n>
+// iteration 0
+// alloc <address 1000> size <size 1000>
+// iteration 1
+// alloc <address 1001> size <size 1001>
+// free <address 1000>
+// iteration 2
+// alloc <address 1002> size <size 1002>
+// free <address 1001>
+// ...
+// ...
+// ...
+// <key type m> -> <value type m>
+// ===== <map type 1> =====
+// ...
+// ...
+// ===== <map type n> =====
+//
+// Alternate output strategies are possible, but most of them are worse/more
+// complex, and do not eliminate the postprocessing step.
+
+#include <array>
+#include <atomic>
+#include <charconv>
+#include <limits>
+#include <map>
+#include <string>
+#include <unordered_map>
+#include <utility>
+
+#include "base/allocator/dispatcher/dispatcher.h"
+#include "base/containers/flat_map.h"
+#include "base/logging.h"
+#include "base/strings/safe_sprintf.h"
+#include "base/unguessable_token.h"
+#include "base/values.h"
+#include "third_party/abseil-cpp/absl/container/btree_map.h"
+#include "third_party/abseil-cpp/absl/container/flat_hash_map.h"
+#include "third_party/abseil-cpp/absl/container/node_hash_map.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+namespace {
+
+std::atomic<bool> log_allocs_and_frees;
+
+struct AllocationLogger {
+ public:
+  void OnAllocation(void* address,
+                    size_t size,
+                    base::allocator::dispatcher::AllocationSubsystem sub_system,
+                    const char* type_name) {
+    if (log_allocs_and_frees.load(std::memory_order_acquire)) {
+      char buffer[128];
+      // Assume success; ignore return value.
+      base::strings::SafeSPrintf(buffer, "alloc address %p size %d\n", address,
+                                 size);
+      RAW_LOG(INFO, buffer);
+    }
+  }
+
+  void OnFree(void* address) {
+    if (log_allocs_and_frees.load(std::memory_order_acquire)) {
+      char buffer[128];
+      // Assume success; ignore return value.
+      base::strings::SafeSPrintf(buffer, "freed address %p\n", address);
+      RAW_LOG(INFO, buffer);
+    }
+  }
+
+  static void Install() {
+    static AllocationLogger logger;
+    base::allocator::dispatcher::Dispatcher::GetInstance().InitializeForTesting(
+        &logger);
+  }
+};
+
+class ScopedLogAllocAndFree {
+ public:
+  ScopedLogAllocAndFree() {
+    log_allocs_and_frees.store(true, std::memory_order_release);
+  }
+
+  ~ScopedLogAllocAndFree() {
+    log_allocs_and_frees.store(false, std::memory_order_release);
+  }
+};
+
+// Measures the memory usage for a container with type `Container` from 0 to
+// 6857 elements, using `inserter` to insert a single element at a time.
+// `inserter` should be a functor that takes a `Container& container` as its
+// first parameter and a `size_t current_index` as its second parameter.
+//
+// Note that `inserter` can't use `base::FunctionRef` since the inserter is
+// passed through several layers before actually being instantiated below in
+// this function.
+template <typename Container, typename Inserter>
+void MeasureOneContainer(const Inserter& inserter) {
+  char buffer[128];
+
+  RAW_LOG(INFO, "iteration 0");
+  // Record any initial allocations made by an empty container.
+  absl::optional<ScopedLogAllocAndFree> base_size_logger;
+  base_size_logger.emplace();
+  Container c;
+  base_size_logger.reset();
+  // As a hack, also log out sizeof(c) since the initial base size of the
+  // container should be counted too. The exact placeholder used for the address
+  // (in this case "(stack)") isn't important as long as it will not have a
+  // corresponding free line logged for it.
+  base::strings::SafeSPrintf(buffer, "alloc address (stack) size %d",
+                             sizeof(c));
+  RAW_LOG(INFO, buffer);
+
+  // Swisstables resizes the backing store around 6858 elements.
+  for (size_t i = 1; i <= 6857; ++i) {
+    base::strings::SafeSPrintf(buffer, "iteration %d", i);
+    RAW_LOG(INFO, buffer);
+    inserter(c, i);
+  }
+}
+
+// Measures the memory usage for all the container types under test. `inserter`
+// is used to insert a single element at a time into the tested container.
+template <typename K, typename V, typename Inserter>
+void Measure(const Inserter& inserter) {
+  using Hasher = std::conditional_t<std::is_same_v<base::UnguessableToken, K>,
+                                    base::UnguessableTokenHash, std::hash<K>>;
+
+  RAW_LOG(INFO, "===== base::flat_map =====");
+  MeasureOneContainer<base::flat_map<K, V>>(inserter);
+  RAW_LOG(INFO, "===== std::map =====");
+  MeasureOneContainer<std::map<K, V>>(inserter);
+  RAW_LOG(INFO, "===== std::unordered_map =====");
+  MeasureOneContainer<std::unordered_map<K, V, Hasher>>(inserter);
+  RAW_LOG(INFO, "===== absl::btree_map =====");
+  MeasureOneContainer<absl::btree_map<K, V>>(inserter);
+  RAW_LOG(INFO, "===== absl::flat_hash_map =====");
+  MeasureOneContainer<absl::flat_hash_map<K, V, Hasher>>(inserter);
+  RAW_LOG(INFO, "===== absl::node_hash_map =====");
+  MeasureOneContainer<absl::node_hash_map<K, V, Hasher>>(inserter);
+}
+
+}  // namespace
+
+int main() {
+  AllocationLogger::Install();
+
+  RAW_LOG(INFO, "int -> int");
+  Measure<int, int>([](auto& container, size_t i) {
+    ScopedLogAllocAndFree scoped_logging;
+    container.insert({i, 0});
+  });
+  RAW_LOG(INFO, "int -> void*");
+  Measure<int, void*>([](auto& container, size_t i) {
+    ScopedLogAllocAndFree scoped_logging;
+    container.insert({i, nullptr});
+  });
+  RAW_LOG(INFO, "int -> std::string");
+  Measure<int, std::string>([](auto& container, size_t i) {
+    ScopedLogAllocAndFree scoped_logging;
+    container.insert({i, ""});
+  });
+  RAW_LOG(INFO, "size_t -> int");
+  Measure<size_t, int>([](auto& container, size_t i) {
+    ScopedLogAllocAndFree scoped_logging;
+    container.insert({i, 0});
+  });
+  RAW_LOG(INFO, "size_t -> void*");
+  Measure<size_t, void*>([](auto& container, size_t i) {
+    ScopedLogAllocAndFree scoped_logging;
+    container.insert({i, nullptr});
+  });
+  RAW_LOG(INFO, "size_t -> std::string");
+  Measure<size_t, std::string>([](auto& container, size_t i) {
+    ScopedLogAllocAndFree scoped_logging;
+    container.insert({i, ""});
+  });
+  RAW_LOG(INFO, "std::string -> std::string");
+  Measure<std::string, std::string>([](auto& container, size_t i) {
+    std::string key;
+    key.resize(std::numeric_limits<size_t>::digits10 + 1);
+    auto result = std::to_chars(&key.front(), &key.back(), i);
+    key.resize(result.ptr - &key.front());
+    ScopedLogAllocAndFree scoped_logging;
+    container.insert({key, ""});
+  });
+  RAW_LOG(INFO, "base::UnguessableToken -> void*");
+  Measure<base::UnguessableToken, void*>([](auto& container, size_t i) {
+    auto token = base::UnguessableToken::Create();
+    ScopedLogAllocAndFree scoped_logging;
+    container.insert({token, nullptr});
+  });
+  RAW_LOG(INFO, "base::UnguessableToken -> base::Value");
+  Measure<base::UnguessableToken, base::Value>([](auto& container, size_t i) {
+    auto token = base::UnguessableToken::Create();
+    base::Value value;
+    ScopedLogAllocAndFree scoped_logging;
+    container.insert({token, std::move(value)});
+  });
+  RAW_LOG(INFO, "base::UnguessableToken -> std::array<std::string, 4>");
+  Measure<base::UnguessableToken, std::array<std::string, 4>>(
+      [](auto& container, size_t i) {
+        auto token = base::UnguessableToken::Create();
+        ScopedLogAllocAndFree scoped_logging;
+        container.insert({token, {}});
+      });
+  RAW_LOG(INFO, "base::UnguessableToken -> std::array<std::string, 8>");
+  Measure<base::UnguessableToken, std::array<std::string, 8>>(
+      [](auto& container, size_t i) {
+        auto token = base::UnguessableToken::Create();
+        ScopedLogAllocAndFree scoped_logging;
+        container.insert({token, {}});
+      });
+  RAW_LOG(INFO, "base::UnguessableToken -> std::array<std::string, 16>");
+  Measure<base::UnguessableToken, std::array<std::string, 16>>(
+      [](auto& container, size_t i) {
+        auto token = base::UnguessableToken::Create();
+        ScopedLogAllocAndFree scoped_logging;
+        container.insert({token, {}});
+      });
+
+  return 0;
+}
diff --git a/base/containers/contiguous_iterator.h b/base/containers/contiguous_iterator.h
index 18bd0a8..7886fbb 100644
--- a/base/containers/contiguous_iterator.h
+++ b/base/containers/contiguous_iterator.h
@@ -40,8 +40,13 @@
 // `static_assert(is_trivial_v<value_type>)` inside libc++'s std::basic_string.
 template <typename T>
 struct IsStringIter
-    : std::conjunction<std::is_trivial<iter_value_t<T>>, IsStringIterImpl<T>> {
-};
+    : std::conjunction<
+          std::disjunction<std::is_same<iter_value_t<T>, char>,
+                           std::is_same<iter_value_t<T>, wchar_t>,
+                           std::is_same<iter_value_t<T>, char8_t>,
+                           std::is_same<iter_value_t<T>, char16_t>,
+                           std::is_same<iter_value_t<T>, char32_t>>,
+          IsStringIterImpl<T>> {};
 
 // An iterator to std::array is contiguous.
 // Reference: https://wg21.link/array.overview#1
diff --git a/base/containers/contiguous_iterator_unittest.cc b/base/containers/contiguous_iterator_unittest.cc
index ec23fcd..6ced78b 100644
--- a/base/containers/contiguous_iterator_unittest.cc
+++ b/base/containers/contiguous_iterator_unittest.cc
@@ -23,11 +23,12 @@
 
 TEST(ContiguousIteratorTest, ForwardIterator) {
   using ForwardIterator = std::forward_list<int>::iterator;
-  static_assert(std::is_same<std::forward_iterator_tag,
-                             typename std::iterator_traits<
-                                 ForwardIterator>::iterator_category>::value,
-                "Error: The iterator_category of ForwardIterator is not "
-                "std::forward_iterator_tag.");
+  static_assert(
+      std::is_same_v<
+          std::forward_iterator_tag,
+          typename std::iterator_traits<ForwardIterator>::iterator_category>,
+      "Error: The iterator_category of ForwardIterator is not "
+      "std::forward_iterator_tag.");
   static_assert(
       !IsContiguousIterator<ForwardIterator>::value,
       "Error: ForwardIterator should not be considered a contiguous iterator.");
@@ -44,12 +45,11 @@
 
 TEST(ContiguousIteratorTest, BidirectionalIterator) {
   using BidirectionalIterator = std::set<int>::iterator;
-  static_assert(
-      std::is_same<std::bidirectional_iterator_tag,
-                   typename std::iterator_traits<
-                       BidirectionalIterator>::iterator_category>::value,
-      "Error: The iterator_category of BidirectionalIterator is not "
-      "std::bidirectional_iterator_tag.");
+  static_assert(std::is_same_v<std::bidirectional_iterator_tag,
+                               typename std::iterator_traits<
+                                   BidirectionalIterator>::iterator_category>,
+                "Error: The iterator_category of BidirectionalIterator is not "
+                "std::bidirectional_iterator_tag.");
   static_assert(!IsContiguousIterator<BidirectionalIterator>::value,
                 "Error: BidirectionalIterator should not be considered a "
                 "contiguous iterator.");
@@ -70,12 +70,11 @@
 
 TEST(ContiguousIteratorTest, RandomAccessIterator) {
   using RandomAccessIterator = std::deque<int>::iterator;
-  static_assert(
-      std::is_same<std::random_access_iterator_tag,
-                   typename std::iterator_traits<
-                       RandomAccessIterator>::iterator_category>::value,
-      "Error: The iterator_category of RandomAccessIterator is not "
-      "std::random_access_iterator_tag.");
+  static_assert(std::is_same_v<std::random_access_iterator_tag,
+                               typename std::iterator_traits<
+                                   RandomAccessIterator>::iterator_category>,
+                "Error: The iterator_category of RandomAccessIterator is not "
+                "std::random_access_iterator_tag.");
   static_assert(!IsContiguousIterator<RandomAccessIterator>::value,
                 "Error: RandomAccessIterator should not be considered a "
                 "contiguous iterator.");
diff --git a/base/containers/cxx20_erase_internal.h b/base/containers/cxx20_erase_internal.h
index d86ed77..5f3744b 100644
--- a/base/containers/cxx20_erase_internal.h
+++ b/base/containers/cxx20_erase_internal.h
@@ -5,6 +5,8 @@
 #ifndef BASE_CONTAINERS_CXX20_ERASE_INTERNAL_H_
 #define BASE_CONTAINERS_CXX20_ERASE_INTERNAL_H_
 
+#include <cstddef>
+
 // Internal portion of base/containers/cxx20_erase_*.h. Please include those
 // headers instead of including this directly.
 
diff --git a/base/containers/enum_set.h b/base/containers/enum_set.h
index fb63e16..41070d6 100644
--- a/base/containers/enum_set.h
+++ b/base/containers/enum_set.h
@@ -48,7 +48,7 @@
 class EnumSet {
  private:
   static_assert(
-      std::is_enum<E>::value,
+      std::is_enum_v<E>,
       "First template parameter of EnumSet must be an enumeration type");
   using enum_underlying_type = std::underlying_type_t<E>;
 
@@ -152,7 +152,7 @@
       return i;
     }
 
-    raw_ptr<const EnumBitSet, DanglingUntriaged> enums_;
+    const raw_ptr<const EnumBitSet> enums_;
     size_t i_;
   };
 
@@ -170,7 +170,9 @@
     }
   }
 
-  // Returns an EnumSet with all possible values.
+  // Returns an EnumSet with all values between kMinValue and kMaxValue, which
+  // also contains undefined enum values if the enum in question has gaps
+  // between kMinValue and kMaxValue.
   static constexpr EnumSet All() {
     if (base::is_constant_evaluated()) {
       if (kValueCount == 0) {
diff --git a/base/containers/extend_unittest.cc b/base/containers/extend_unittest.cc
index 2e005fb..d1189dc 100644
--- a/base/containers/extend_unittest.cc
+++ b/base/containers/extend_unittest.cc
@@ -29,8 +29,8 @@
   return a.c_ == b.c_;
 }
 
-static_assert(std::is_move_constructible<NonCopyable>::value, "");
-static_assert(!std::is_copy_constructible<NonCopyable>::value, "");
+static_assert(std::is_move_constructible_v<NonCopyable>, "");
+static_assert(!std::is_copy_constructible_v<NonCopyable>, "");
 
 struct CopyableMovable {
   bool copied_;
diff --git a/base/containers/fixed_flat_map.h b/base/containers/fixed_flat_map.h
index f5e90dd..49a3e47 100644
--- a/base/containers/fixed_flat_map.h
+++ b/base/containers/fixed_flat_map.h
@@ -94,7 +94,7 @@
 // input automatically.
 //
 // Example usage:
-//   constexpr auto kMap = base::MakeFixedFlatMapSorted<base::StringPiece, int>(
+//   constexpr auto kMap = base::MakeFixedFlatMapSorted<std::string_view, int>(
 //       {{"bar", 2}, {"baz", 3}, {"foo", 1}});
 template <class Key, class Mapped, size_t N, class Compare = std::less<>>
 constexpr fixed_flat_map<Key, Mapped, N, Compare> MakeFixedFlatMapSorted(
@@ -116,7 +116,7 @@
 // is large, prefer `MakeFixedFlatMapSorted`.
 //
 // Example usage:
-//   constexpr auto kMap = base::MakeFixedFlatMap<base::StringPiece, int>(
+//   constexpr auto kMap = base::MakeFixedFlatMap<std::string_view, int>(
 //       {{"foo", 1}, {"bar", 2}, {"baz", 3}});
 template <class Key, class Mapped, size_t N, class Compare = std::less<>>
 constexpr fixed_flat_map<Key, Mapped, N, Compare> MakeFixedFlatMap(
diff --git a/base/containers/fixed_flat_set.h b/base/containers/fixed_flat_set.h
index 836f647..c286be4 100644
--- a/base/containers/fixed_flat_set.h
+++ b/base/containers/fixed_flat_set.h
@@ -80,7 +80,7 @@
 // input automatically.
 //
 // Example usage:
-//   constexpr auto kSet = base::MakeFixedFlatSetSorted<base::StringPiece>(
+//   constexpr auto kSet = base::MakeFixedFlatSetSorted<std::string_view>(
 //       {"bar", "baz", "foo", "qux"});
 template <class Key, size_t N, class Compare = std::less<>>
 constexpr fixed_flat_set<Key, N, Compare> MakeFixedFlatSetSorted(
@@ -100,7 +100,7 @@
 //   constexpr auto kIntSet = base::MakeFixedFlatSet<int>({1, 2, 3, 4});
 //
 // Data needs not to be sorted:
-//   constexpr auto kStringSet = base::MakeFixedFlatSet<base::StringPiece>(
+//   constexpr auto kStringSet = base::MakeFixedFlatSet<std::string_view>(
 //       {"foo", "bar", "baz", "qux"});
 //
 // Note: Wrapping `Key` in `std::common_type_t` below requires callers to
diff --git a/base/containers/flat_map.h b/base/containers/flat_map.h
index 618b50d..dc5ec9f 100644
--- a/base/containers/flat_map.h
+++ b/base/containers/flat_map.h
@@ -235,12 +235,12 @@
   iterator insert_or_assign(const_iterator hint, K&& key, M&& obj);
 
   template <class K, class... Args>
-  std::enable_if_t<std::is_constructible<key_type, K&&>::value,
+  std::enable_if_t<std::is_constructible_v<key_type, K&&>,
                    std::pair<iterator, bool>>
   try_emplace(K&& key, Args&&... args);
 
   template <class K, class... Args>
-  std::enable_if_t<std::is_constructible<key_type, K&&>::value, iterator>
+  std::enable_if_t<std::is_constructible_v<key_type, K&&>, iterator>
   try_emplace(const_iterator hint, K&& key, Args&&... args);
 
   // --------------------------------------------------------------------------
@@ -324,7 +324,7 @@
 template <class K, class... Args>
 auto flat_map<Key, Mapped, Compare, Container>::try_emplace(K&& key,
                                                             Args&&... args)
-    -> std::enable_if_t<std::is_constructible<key_type, K&&>::value,
+    -> std::enable_if_t<std::is_constructible_v<key_type, K&&>,
                         std::pair<iterator, bool>> {
   return tree::emplace_key_args(
       key, std::piecewise_construct,
@@ -337,7 +337,7 @@
 auto flat_map<Key, Mapped, Compare, Container>::try_emplace(const_iterator hint,
                                                             K&& key,
                                                             Args&&... args)
-    -> std::enable_if_t<std::is_constructible<key_type, K&&>::value, iterator> {
+    -> std::enable_if_t<std::is_constructible_v<key_type, K&&>, iterator> {
   return tree::emplace_hint_key_args(
              hint, key, std::piecewise_construct,
              std::forward_as_tuple(std::forward<K>(key)),
diff --git a/base/containers/flat_tree.h b/base/containers/flat_tree.h
index abb02d5..7a8e605 100644
--- a/base/containers/flat_tree.h
+++ b/base/containers/flat_tree.h
@@ -1081,7 +1081,7 @@
 template <typename K>
 auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::lower_bound(
     const K& key) const -> const_iterator {
-  static_assert(std::is_convertible<const KeyTypeOrK<K>&, const K&>::value,
+  static_assert(std::is_convertible_v<const KeyTypeOrK<K>&, const K&>,
                 "Requested type cannot be bound to the container's key_type "
                 "which is required for a non-transparent compare.");
 
@@ -1115,7 +1115,7 @@
 template <typename K>
 auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::upper_bound(
     const K& key) const -> const_iterator {
-  static_assert(std::is_convertible<const KeyTypeOrK<K>&, const K&>::value,
+  static_assert(std::is_convertible_v<const KeyTypeOrK<K>&, const K&>,
                 "Requested type cannot be bound to the container's key_type "
                 "which is required for a non-transparent compare.");
 
diff --git a/base/containers/flat_tree_unittest.cc b/base/containers/flat_tree_unittest.cc
index 00b6f50..a63fb74 100644
--- a/base/containers/flat_tree_unittest.cc
+++ b/base/containers/flat_tree_unittest.cc
@@ -211,14 +211,14 @@
   using MoveThrowsTree = flat_tree<MoveThrows, base::identity, std::less<>,
                                    std::array<MoveThrows, 1>>;
 
-  static_assert(std::is_nothrow_move_constructible<IntTree>::value,
+  static_assert(std::is_nothrow_move_constructible_v<IntTree>,
                 "Error: IntTree is not nothrow move constructible");
-  static_assert(std::is_nothrow_move_assignable<IntTree>::value,
+  static_assert(std::is_nothrow_move_assignable_v<IntTree>,
                 "Error: IntTree is not nothrow move assignable");
 
-  static_assert(!std::is_nothrow_move_constructible<MoveThrowsTree>::value,
+  static_assert(!std::is_nothrow_move_constructible_v<MoveThrowsTree>,
                 "Error: MoveThrowsTree is nothrow move constructible");
-  static_assert(!std::is_nothrow_move_assignable<MoveThrowsTree>::value,
+  static_assert(!std::is_nothrow_move_assignable_v<MoveThrowsTree>,
                 "Error: MoveThrowsTree is nothrow move assignable");
 }
 
@@ -291,14 +291,13 @@
 
 TEST(FlatTree, Types) {
   // These are guaranteed to be portable.
-  static_assert((std::is_same<int, IntTree::key_type>::value), "");
-  static_assert((std::is_same<int, IntTree::value_type>::value), "");
-  static_assert((std::is_same<std::less<>, IntTree::key_compare>::value), "");
-  static_assert((std::is_same<int&, IntTree::reference>::value), "");
-  static_assert((std::is_same<const int&, IntTree::const_reference>::value),
-                "");
-  static_assert((std::is_same<int*, IntTree::pointer>::value), "");
-  static_assert((std::is_same<const int*, IntTree::const_pointer>::value), "");
+  static_assert((std::is_same_v<int, IntTree::key_type>), "");
+  static_assert((std::is_same_v<int, IntTree::value_type>), "");
+  static_assert((std::is_same_v<std::less<>, IntTree::key_compare>), "");
+  static_assert((std::is_same_v<int&, IntTree::reference>), "");
+  static_assert((std::is_same_v<const int&, IntTree::const_reference>), "");
+  static_assert((std::is_same_v<int*, IntTree::pointer>), "");
+  static_assert((std::is_same_v<const int*, IntTree::const_pointer>), "");
 }
 
 // ----------------------------------------------------------------------------
diff --git a/base/containers/intrusive_heap.h b/base/containers/intrusive_heap.h
index acda39b..44087b7 100644
--- a/base/containers/intrusive_heap.h
+++ b/base/containers/intrusive_heap.h
@@ -539,7 +539,7 @@
  private:
   // Templated version of ToIndex that lets insert/erase/Replace work with all
   // integral types.
-  template <typename I, typename = std::enable_if_t<std::is_integral<I>::value>>
+  template <typename I, typename = std::enable_if_t<std::is_integral_v<I>>>
   size_type ToIndex(I pos) {
     return static_cast<size_type>(pos);
   }
diff --git a/base/containers/intrusive_heap_unittest.cc b/base/containers/intrusive_heap_unittest.cc
index f6d83ec..7a54e83 100644
--- a/base/containers/intrusive_heap_unittest.cc
+++ b/base/containers/intrusive_heap_unittest.cc
@@ -143,8 +143,8 @@
 // Used to determine whether or not the "take" operations can be used.
 template <typename T>
 struct NotMovable {
-  static constexpr bool value = !std::is_nothrow_move_constructible<T>::value &&
-                                std::is_copy_constructible<T>::value;
+  static constexpr bool value = !std::is_nothrow_move_constructible_v<T> &&
+                                std::is_copy_constructible_v<T>;
 };
 
 // Invokes "take" if the type is movable, otherwise invokes erase.
@@ -516,11 +516,11 @@
 // default-constructors, move-operations and copy-operations.
 template <typename ValueType, bool D, bool M, bool C>
 void ValidateValueType() {
-  static_assert(std::is_default_constructible<ValueType>::value == D, "oops");
-  static_assert(std::is_move_constructible<ValueType>::value == M, "oops");
-  static_assert(std::is_move_assignable<ValueType>::value == M, "oops");
-  static_assert(std::is_copy_constructible<ValueType>::value == C, "oops");
-  static_assert(std::is_copy_assignable<ValueType>::value == C, "oops");
+  static_assert(std::is_default_constructible_v<ValueType> == D, "oops");
+  static_assert(std::is_move_constructible_v<ValueType> == M, "oops");
+  static_assert(std::is_move_assignable_v<ValueType> == M, "oops");
+  static_assert(std::is_copy_constructible_v<ValueType> == C, "oops");
+  static_assert(std::is_copy_assignable_v<ValueType> == C, "oops");
 }
 
 // A small test element that provides its own HeapHandle storage and implements
diff --git a/base/containers/map_util.h b/base/containers/map_util.h
new file mode 100644
index 0000000..35ce4d7
--- /dev/null
+++ b/base/containers/map_util.h
@@ -0,0 +1,69 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_MAP_UTIL_H_
+#define BASE_CONTAINERS_MAP_UTIL_H_
+
+#include <memory>
+
+namespace base {
+
+namespace internal {
+
+template <typename Map>
+using MappedType = typename Map::mapped_type;
+
+}  // namespace internal
+
+// Returns a pointer to the const value associated with the given key if it
+// exists, or null otherwise.
+template <typename Map, typename Key>
+constexpr const internal::MappedType<Map>* FindOrNull(const Map& map,
+                                                      const Key& key) {
+  auto it = map.find(key);
+  return it != map.end() ? &it->second : nullptr;
+}
+
+// Returns a pointer to the value associated with the given key if it exists, or
+// null otherwise.
+template <typename Map, typename Key>
+constexpr internal::MappedType<Map>* FindOrNull(Map& map, const Key& key) {
+  auto it = map.find(key);
+  return it != map.end() ? &it->second : nullptr;
+}
+
+// Returns the const pointer value associated with the given key. If none is
+// found, null is returned. The function is designed to be used with a map of
+// keys to pointers or smart pointers.
+//
+// This function does not distinguish between a missing key and a key mapped
+// to a null value.
+template <typename Map,
+          typename Key,
+          typename MappedElementType =
+              std::pointer_traits<internal::MappedType<Map>>::element_type>
+constexpr const MappedElementType* FindPtrOrNull(const Map& map,
+                                                 const Key& key) {
+  auto it = map.find(key);
+  return it != map.end() ? std::to_address(it->second) : nullptr;
+}
+
+// Returns the pointer value associated with the given key. If none is found,
+// null is returned. The function is designed to be used with a map of keys to
+// pointers or smart pointers.
+//
+// This function does not distinguish between a missing key and a key mapped
+// to a null value.
+template <typename Map,
+          typename Key,
+          typename MappedElementType =
+              std::pointer_traits<internal::MappedType<Map>>::element_type>
+constexpr MappedElementType* FindPtrOrNull(Map& map, const Key& key) {
+  auto it = map.find(key);
+  return it != map.end() ? std::to_address(it->second) : nullptr;
+}
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_MAP_UTIL_H_
diff --git a/base/containers/map_util_unittest.cc b/base/containers/map_util_unittest.cc
new file mode 100644
index 0000000..14ba76b
--- /dev/null
+++ b/base/containers/map_util_unittest.cc
@@ -0,0 +1,63 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_MAP_UTIL_UNITTEST_CC_
+#define BASE_CONTAINERS_MAP_UTIL_UNITTEST_CC_
+
+#include "base/containers/map_util.h"
+
+#include <memory>
+#include <string>
+
+#include "base/containers/flat_map.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+using testing::AllOf;
+using testing::Eq;
+using testing::Pointee;
+
+constexpr char kKey[] = "key";
+constexpr char kValue[] = "value";
+constexpr char kMissingKey[] = "missing_key";
+
+using StringToStringMap = base::flat_map<std::string, std::string>;
+using StringToStringPtrMap = base::flat_map<std::string, std::string*>;
+using StringToStringUniquePtrMap =
+    base::flat_map<std::string, std::unique_ptr<std::string>>;
+
+TEST(MapUtilTest, FindOrNull) {
+  StringToStringMap mapping({{kKey, kValue}});
+
+  EXPECT_THAT(FindOrNull(mapping, kKey), Pointee(Eq(kValue)));
+  EXPECT_EQ(FindOrNull(mapping, kMissingKey), nullptr);
+}
+
+TEST(MapUtilTest, FindPtrOrNullForPointers) {
+  auto val = std::make_unique<std::string>(kValue);
+
+  StringToStringPtrMap mapping({{kKey, val.get()}});
+
+  EXPECT_THAT(FindPtrOrNull(mapping, kKey),
+              AllOf(Eq(val.get()), Pointee(Eq(kValue))));
+  EXPECT_EQ(FindPtrOrNull(mapping, kMissingKey), nullptr);
+}
+
+TEST(MapUtilTest, FindPtrOrNullForPointerLikeValues) {
+  StringToStringUniquePtrMap mapping;
+  mapping.insert({kKey, std::make_unique<std::string>(kValue)});
+
+  EXPECT_THAT(FindPtrOrNull(mapping, kKey), Pointee(Eq(kValue)));
+  EXPECT_EQ(FindPtrOrNull(mapping, kMissingKey), nullptr);
+}
+
+}  // namespace
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_MAP_UTIL_UNITTEST_CC_
diff --git a/base/containers/span.h b/base/containers/span.h
index c9040f0..9c0fe37 100644
--- a/base/containers/span.h
+++ b/base/containers/span.h
@@ -19,15 +19,17 @@
 #include "base/containers/checked_iterators.h"
 #include "base/containers/contiguous_iterator.h"
 #include "base/cxx20_to_address.h"
-#include "base/memory/raw_ptr_exclusion.h"
-#include "base/numerics/safe_math.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/template_util.h"
 
 namespace base {
 
 // [views.constants]
 constexpr size_t dynamic_extent = std::numeric_limits<size_t>::max();
 
-template <typename T, size_t Extent = dynamic_extent>
+template <typename T,
+          size_t Extent = dynamic_extent,
+          typename InternalPtrType = T*>
 class span;
 
 namespace internal {
@@ -80,8 +82,8 @@
 
 template <typename Iter, typename T>
 using EnableIfCompatibleContiguousIterator = std::enable_if_t<
-    std::conjunction<IsContiguousIterator<Iter>,
-                     IteratorHasConvertibleReferenceType<Iter, T>>::value>;
+    std::conjunction_v<IsContiguousIterator<Iter>,
+                       IteratorHasConvertibleReferenceType<Iter, T>>>;
 
 template <typename Container, typename T>
 using ContainerHasConvertibleData = IsLegalDataConversion<
@@ -162,8 +164,8 @@
 // own the underlying memory, so care must be taken to ensure that a span does
 // not outlive the backing store.
 //
-// span is somewhat analogous to StringPiece, but with arbitrary element types,
-// allowing mutation if T is non-const.
+// span is somewhat analogous to std::string_view, but with arbitrary element
+// types, allowing mutation if T is non-const.
 //
 // span is implicitly convertible from C++ arrays, as well as most [1]
 // container-like types that provide a data() and size() method (such as
@@ -233,7 +235,7 @@
 // appropriate make_span() utility functions are provided.
 
 // [span], class template span
-template <typename T, size_t Extent>
+template <typename T, size_t Extent, typename InternalPtrType>
 class GSL_POINTER span : public internal::ExtentStorage<Extent> {
  private:
   using ExtentStorage = internal::ExtentStorage<Extent>;
@@ -248,9 +250,6 @@
   using reference = T&;
   using const_reference = const T&;
   using iterator = CheckedContiguousIterator<T>;
-  // TODO(https://crbug.com/828324): Drop the const_iterator typedef once gMock
-  // supports containers without this nested type.
-  using const_iterator = iterator;
   using reverse_iterator = std::reverse_iterator<iterator>;
   static constexpr size_t extent = Extent;
 
@@ -286,11 +285,10 @@
     CHECK(Extent == dynamic_extent || Extent == count);
   }
 
-  template <
-      typename It,
-      typename End,
-      typename = internal::EnableIfCompatibleContiguousIterator<It, T>,
-      typename = std::enable_if_t<!std::is_convertible<End, size_t>::value>>
+  template <typename It,
+            typename End,
+            typename = internal::EnableIfCompatibleContiguousIterator<It, T>,
+            typename = std::enable_if_t<!std::is_convertible_v<End, size_t>>>
   constexpr span(It begin, End end) noexcept
       // Subtracting two iterators gives a ptrdiff_t, but the result should be
       // non-negative: see CHECK below.
@@ -437,11 +435,11 @@
 
   // [span.iter], span iterator support
   constexpr iterator begin() const noexcept {
-    return iterator(data_, data_ + size());
+    return iterator(data(), data() + size());
   }
 
   constexpr iterator end() const noexcept {
-    return iterator(data_, data_ + size(), data_ + size());
+    return iterator(data(), data() + size(), data() + size());
   }
 
   constexpr reverse_iterator rbegin() const noexcept {
@@ -455,13 +453,38 @@
  private:
   // This field is not a raw_ptr<> because it was filtered by the rewriter
   // for: #constexpr-ctor-field-initializer, #global-scope, #union
-  RAW_PTR_EXCLUSION T* data_;
+  InternalPtrType data_;
 };
 
 // span<T, Extent>::extent can not be declared inline prior to C++17, hence this
 // definition is required.
-template <class T, size_t Extent>
-constexpr size_t span<T, Extent>::extent;
+template <class T, size_t Extent, typename InternalPtrType>
+constexpr size_t span<T, Extent, InternalPtrType>::extent;
+
+template <typename It,
+          typename T = std::remove_reference_t<iter_reference_t<It>>>
+span(It, StrictNumeric<size_t>) -> span<T>;
+
+template <typename It,
+          typename End,
+          typename = std::enable_if_t<!std::is_convertible_v<End, size_t>>,
+          typename T = std::remove_reference_t<iter_reference_t<It>>>
+span(It, End) -> span<T>;
+
+template <typename T, size_t N>
+span(T (&)[N]) -> span<T, N>;
+
+template <typename T, size_t N>
+span(std::array<T, N>&) -> span<T, N>;
+
+template <typename T, size_t N>
+span(const std::array<T, N>&) -> span<const T, N>;
+
+template <typename Container,
+          typename T = std::remove_pointer_t<
+              decltype(std::data(std::declval<Container>()))>,
+          size_t X = internal::Extent<Container>::value>
+span(Container&&) -> span<T, X>;
 
 // [span.objectrep], views of object representation
 template <typename T, size_t X>
@@ -472,7 +495,7 @@
 
 template <typename T,
           size_t X,
-          typename = std::enable_if_t<!std::is_const<T>::value>>
+          typename = std::enable_if_t<!std::is_const_v<T>>>
 span<uint8_t, (X == dynamic_extent ? dynamic_extent : sizeof(T) * X)>
 as_writable_bytes(span<T, X> s) noexcept {
   return {reinterpret_cast<uint8_t*>(s.data()), s.size_bytes()};
diff --git a/base/containers/span_unittest.cc b/base/containers/span_unittest.cc
index e2a02ed..bd78154 100644
--- a/base/containers/span_unittest.cc
+++ b/base/containers/span_unittest.cc
@@ -11,6 +11,7 @@
 #include <memory>
 #include <string>
 #include <type_traits>
+#include <utility>
 #include <vector>
 
 #include "base/containers/adapters.h"
@@ -26,6 +27,132 @@
 
 namespace base {
 
+namespace {
+
+// Tests for span(It, StrictNumeric<size_t>) deduction guide. These tests use a
+// helper function to wrap the static_asserts, as most STL containers don't work
+// well in a constexpr context. std::array<T, N> does, but base::span has
+// specific overloads for std::array<T, n>, so that ends up being less helpful
+// than it would initially appear.
+//
+// Another alternative would be to use std::declval, but that would be fairly
+// verbose.
+[[maybe_unused]] void TestDeductionGuides() {
+  // Tests for span(It, StrictNumeric<size_t>) deduction guide.
+  {
+    const std::vector<int> v;
+    static_assert(
+        std::is_same_v<decltype(span(v.cbegin(), v.size())), span<const int>>);
+    static_assert(
+        std::is_same_v<decltype(span(v.begin(), v.size())), span<const int>>);
+    static_assert(
+        std::is_same_v<decltype(span(v.data(), v.size())), span<const int>>);
+  }
+
+  {
+    std::vector<int> v;
+    static_assert(
+        std::is_same_v<decltype(span(v.cbegin(), v.size())), span<const int>>);
+    static_assert(
+        std::is_same_v<decltype(span(v.begin(), v.size())), span<int>>);
+    static_assert(
+        std::is_same_v<decltype(span(v.data(), v.size())), span<int>>);
+  }
+
+  // Tests for span(It, End) deduction guide.
+  {
+    const std::vector<int> v;
+    static_assert(
+        std::is_same_v<decltype(span(v.cbegin(), v.cend())), span<const int>>);
+    static_assert(
+        std::is_same_v<decltype(span(v.begin(), v.end())), span<const int>>);
+  }
+
+  {
+    std::vector<int> v;
+    static_assert(
+        std::is_same_v<decltype(span(v.cbegin(), v.cend())), span<const int>>);
+    static_assert(
+        std::is_same_v<decltype(span(v.begin(), v.end())), span<int>>);
+  }
+
+  // Tests for span(T (&)[N]) deduction guide.
+  {
+    const int kArray[] = {1, 2, 3};
+    static_assert(std::is_same_v<decltype(span(kArray)), span<const int, 3>>);
+  }
+  {
+    int kArray[] = {1, 2, 3};
+    static_assert(std::is_same_v<decltype(span(kArray)), span<int, 3>>);
+  }
+
+  // Tests for span(std::array<T, N>&) deduction guide.
+  static_assert(
+      std::is_same_v<decltype(span(std::declval<std::array<const bool, 3>&>())),
+                     span<const bool, 3>>);
+  static_assert(
+      std::is_same_v<decltype(span(std::declval<std::array<bool, 3>&>())),
+                     span<bool, 3>>);
+
+  // Tests for span(const std::array<T, N>&) deduction guide.
+  static_assert(
+      std::is_same_v<decltype(span(
+                         std::declval<const std::array<const bool, 3>&>())),
+                     span<const bool, 3>>);
+  static_assert(
+      std::is_same_v<decltype(span(
+                         std::declval<const std::array<const bool, 3>&&>())),
+                     span<const bool, 3>>);
+  static_assert(std::is_same_v<
+                decltype(span(std::declval<std::array<const bool, 3>&&>())),
+                span<const bool, 3>>);
+  static_assert(
+      std::is_same_v<decltype(span(std::declval<const std::array<bool, 3>&>())),
+                     span<const bool, 3>>);
+  static_assert(std::is_same_v<
+                decltype(span(std::declval<const std::array<bool, 3>&&>())),
+                span<const bool, 3>>);
+  static_assert(
+      std::is_same_v<decltype(span(std::declval<std::array<bool, 3>&&>())),
+                     span<const bool, 3>>);
+
+  // Tests for span(Container&&) deduction guide.
+  static_assert(
+      std::is_same_v<decltype(span(std::declval<const std::string&>())),
+                     span<const char>>);
+  static_assert(
+      std::is_same_v<decltype(span(std::declval<const std::string&&>())),
+                     span<const char>>);
+  static_assert(
+      std::is_same_v<decltype(span(std::declval<std::string&>())), span<char>>);
+  static_assert(std::is_same_v<decltype(span(std::declval<std::string&&>())),
+                               span<const char>>);
+  static_assert(
+      std::is_same_v<decltype(span(std::declval<const std::u16string&>())),
+                     span<const char16_t>>);
+  static_assert(
+      std::is_same_v<decltype(span(std::declval<const std::u16string&&>())),
+                     span<const char16_t>>);
+  static_assert(std::is_same_v<decltype(span(std::declval<std::u16string&>())),
+                               span<char16_t>>);
+  static_assert(std::is_same_v<decltype(span(std::declval<std::u16string&&>())),
+                               span<const char16_t>>);
+  static_assert(std::is_same_v<
+                decltype(span(std::declval<const std::array<float, 9>&>())),
+                span<const float, 9>>);
+  static_assert(std::is_same_v<
+                decltype(span(std::declval<const std::array<float, 9>&&>())),
+                span<const float, 9>>);
+  static_assert(
+      std::is_same_v<decltype(span(std::declval<std::array<float, 9>&>())),
+                     span<float, 9>>);
+  static_assert(
+      std::is_same_v<decltype(span(std::declval<std::array<float, 9>&&>())),
+                     span<const float, 9>>);
+}
+
+}  // namespace
+
 TEST(SpanTest, DefaultConstructor) {
   span<int> dynamic_span;
   EXPECT_EQ(nullptr, dynamic_span.data());
@@ -115,47 +242,47 @@
   // In particular we are checking whether From is implicitly convertible to To,
   // which also implies that To is explicitly constructible from From.
   static_assert(
-      std::is_convertible<std::array<int, 3>&, base::span<int>>::value,
+      std::is_convertible_v<std::array<int, 3>&, base::span<int>>,
       "Error: l-value reference to std::array<int> should be convertible to "
       "base::span<int> with dynamic extent.");
   static_assert(
-      std::is_convertible<std::array<int, 3>&, base::span<int, 3>>::value,
+      std::is_convertible_v<std::array<int, 3>&, base::span<int, 3>>,
       "Error: l-value reference to std::array<int> should be convertible to "
       "base::span<int> with the same static extent.");
   static_assert(
-      std::is_convertible<std::array<int, 3>&, base::span<const int>>::value,
+      std::is_convertible_v<std::array<int, 3>&, base::span<const int>>,
       "Error: l-value reference to std::array<int> should be convertible to "
       "base::span<const int> with dynamic extent.");
   static_assert(
-      std::is_convertible<std::array<int, 3>&, base::span<const int, 3>>::value,
+      std::is_convertible_v<std::array<int, 3>&, base::span<const int, 3>>,
       "Error: l-value reference to std::array<int> should be convertible to "
       "base::span<const int> with the same static extent.");
-  static_assert(std::is_convertible<const std::array<int, 3>&,
-                                    base::span<const int>>::value,
-                "Error: const l-value reference to std::array<int> should be "
-                "convertible to base::span<const int> with dynamic extent.");
   static_assert(
-      std::is_convertible<const std::array<int, 3>&,
-                          base::span<const int, 3>>::value,
+      std::is_convertible_v<const std::array<int, 3>&, base::span<const int>>,
+      "Error: const l-value reference to std::array<int> should be "
+      "convertible to base::span<const int> with dynamic extent.");
+  static_assert(
+      std::is_convertible_v<const std::array<int, 3>&,
+                            base::span<const int, 3>>,
       "Error: const l-value reference to std::array<int> should be convertible "
       "to base::span<const int> with the same static extent.");
-  static_assert(std::is_convertible<std::array<const int, 3>&,
-                                    base::span<const int>>::value,
-                "Error: l-value reference to std::array<const int> should be "
-                "convertible to base::span<const int> with dynamic extent.");
   static_assert(
-      std::is_convertible<std::array<const int, 3>&,
-                          base::span<const int, 3>>::value,
+      std::is_convertible_v<std::array<const int, 3>&, base::span<const int>>,
+      "Error: l-value reference to std::array<const int> should be "
+      "convertible to base::span<const int> with dynamic extent.");
+  static_assert(
+      std::is_convertible_v<std::array<const int, 3>&,
+                            base::span<const int, 3>>,
       "Error: l-value reference to std::array<const int> should be convertible "
       "to base::span<const int> with the same static extent.");
   static_assert(
-      std::is_convertible<const std::array<const int, 3>&,
-                          base::span<const int>>::value,
+      std::is_convertible_v<const std::array<const int, 3>&,
+                            base::span<const int>>,
       "Error: const l-value reference to std::array<const int> should be "
       "convertible to base::span<const int> with dynamic extent.");
   static_assert(
-      std::is_convertible<const std::array<const int, 3>&,
-                          base::span<const int, 3>>::value,
+      std::is_convertible_v<const std::array<const int, 3>&,
+                            base::span<const int, 3>>,
       "Error: const l-value reference to std::array<const int> should be "
       "convertible to base::span<const int> with the same static extent.");
 }
@@ -170,33 +297,33 @@
   // Args, which also implies that T is not implicitly constructible from Args
   // as well.
   static_assert(
-      !std::is_constructible<base::span<int>, const std::array<int, 3>&>::value,
+      !std::is_constructible_v<base::span<int>, const std::array<int, 3>&>,
       "Error: base::span<int> with dynamic extent should not be constructible "
       "from const l-value reference to std::array<int>");
 
   static_assert(
-      !std::is_constructible<base::span<int>, std::array<const int, 3>&>::value,
+      !std::is_constructible_v<base::span<int>, std::array<const int, 3>&>,
       "Error: base::span<int> with dynamic extent should not be constructible "
       "from l-value reference to std::array<const int>");
 
   static_assert(
-      !std::is_constructible<base::span<int>,
-                             const std::array<const int, 3>&>::value,
+      !std::is_constructible_v<base::span<int>,
+                               const std::array<const int, 3>&>,
       "Error: base::span<int> with dynamic extent should not be constructible "
       "const from l-value reference to std::array<const int>");
 
   static_assert(
-      !std::is_constructible<base::span<int, 2>, std::array<int, 3>&>::value,
+      !std::is_constructible_v<base::span<int, 2>, std::array<int, 3>&>,
       "Error: base::span<int> with static extent should not be constructible "
       "from l-value reference to std::array<int> with different extent");
 
   static_assert(
-      !std::is_constructible<base::span<int, 4>, std::array<int, 3>&>::value,
+      !std::is_constructible_v<base::span<int, 4>, std::array<int, 3>&>,
       "Error: base::span<int> with dynamic extent should not be constructible "
       "from l-value reference to std::array<int> with different extent");
 
   static_assert(
-      !std::is_constructible<base::span<int>, std::array<bool, 3>&>::value,
+      !std::is_constructible_v<base::span<int>, std::array<bool, 3>&>,
       "Error: base::span<int> with dynamic extent should not be constructible "
       "from l-value reference to std::array<bool>");
 }
@@ -1124,9 +1251,8 @@
   EXPECT_EQ(expected_span.data(), made_span.data());
   EXPECT_EQ(expected_span.size(), made_span.size());
   static_assert(decltype(made_span)::extent == dynamic_extent, "");
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, MakeSpanFromPointerPair) {
@@ -1141,9 +1267,8 @@
   EXPECT_EQ(expected_span.data(), made_span.data());
   EXPECT_EQ(expected_span.size(), made_span.size());
   static_assert(decltype(made_span)::extent == dynamic_extent, "");
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, MakeSpanFromConstexprArray) {
@@ -1153,9 +1278,8 @@
   EXPECT_EQ(expected_span.data(), made_span.data());
   EXPECT_EQ(expected_span.size(), made_span.size());
   static_assert(decltype(made_span)::extent == 5, "");
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, MakeSpanFromStdArray) {
@@ -1165,9 +1289,8 @@
   EXPECT_EQ(expected_span.data(), made_span.data());
   EXPECT_EQ(expected_span.size(), made_span.size());
   static_assert(decltype(made_span)::extent == 5, "");
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, MakeSpanFromConstContainer) {
@@ -1177,9 +1300,8 @@
   EXPECT_EQ(expected_span.data(), made_span.data());
   EXPECT_EQ(expected_span.size(), made_span.size());
   static_assert(decltype(made_span)::extent == dynamic_extent, "");
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, MakeStaticSpanFromConstContainer) {
@@ -1189,9 +1311,8 @@
   EXPECT_EQ(expected_span.data(), made_span.data());
   EXPECT_EQ(expected_span.size(), made_span.size());
   static_assert(decltype(made_span)::extent == 5, "");
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, MakeSpanFromContainer) {
@@ -1201,9 +1322,8 @@
   EXPECT_EQ(expected_span.data(), made_span.data());
   EXPECT_EQ(expected_span.size(), made_span.size());
   static_assert(decltype(made_span)::extent == dynamic_extent, "");
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, MakeStaticSpanFromContainer) {
@@ -1213,9 +1333,8 @@
   EXPECT_EQ(expected_span.data(), make_span<5>(vector).data());
   EXPECT_EQ(expected_span.size(), make_span<5>(vector).size());
   static_assert(decltype(make_span<5>(vector))::extent == 5, "");
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, MakeStaticSpanFromConstexprContainer) {
@@ -1223,8 +1342,8 @@
   constexpr auto made_span = make_span<12>(str);
   static_assert(str.data() == made_span.data(), "Error: data() does not match");
   static_assert(str.size() == made_span.size(), "Error: size() does not match");
-  static_assert(std::is_same<decltype(str)::value_type,
-                             decltype(made_span)::value_type>::value,
+  static_assert(std::is_same_v<decltype(str)::value_type,
+                               decltype(made_span)::value_type>,
                 "Error: value_type does not match");
   static_assert(str.size() == decltype(made_span)::extent,
                 "Error: extent does not match");
@@ -1241,9 +1360,8 @@
   EXPECT_EQ(expected_span.data(), made_span.data());
   EXPECT_EQ(expected_span.size(), made_span.size());
   static_assert(decltype(made_span)::extent == dynamic_extent, "");
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, MakeStaticSpanFromRValueContainer) {
@@ -1257,17 +1375,16 @@
   EXPECT_EQ(expected_span.data(), made_span.data());
   EXPECT_EQ(expected_span.size(), made_span.size());
   static_assert(decltype(made_span)::extent == 5, "");
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, MakeSpanFromDynamicSpan) {
   static constexpr int kArray[] = {1, 2, 3, 4, 5};
   constexpr span<const int> expected_span(kArray);
   constexpr auto made_span = make_span(expected_span);
-  static_assert(std::is_same<decltype(expected_span)::element_type,
-                             decltype(made_span)::element_type>::value,
+  static_assert(std::is_same_v<decltype(expected_span)::element_type,
+                               decltype(made_span)::element_type>,
                 "make_span(span) should have the same element_type as span");
 
   static_assert(expected_span.data() == made_span.data(),
@@ -1279,17 +1396,16 @@
   static_assert(decltype(made_span)::extent == decltype(expected_span)::extent,
                 "make_span(span) should have the same extent as span");
 
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, MakeSpanFromStaticSpan) {
   static constexpr int kArray[] = {1, 2, 3, 4, 5};
   constexpr span<const int, 5> expected_span(kArray);
   constexpr auto made_span = make_span(expected_span);
-  static_assert(std::is_same<decltype(expected_span)::element_type,
-                             decltype(made_span)::element_type>::value,
+  static_assert(std::is_same_v<decltype(expected_span)::element_type,
+                               decltype(made_span)::element_type>,
                 "make_span(span) should have the same element_type as span");
 
   static_assert(expected_span.data() == made_span.data(),
@@ -1301,9 +1417,8 @@
   static_assert(decltype(made_span)::extent == decltype(expected_span)::extent,
                 "make_span(span) should have the same extent as span");
 
-  static_assert(
-      std::is_same<decltype(expected_span), decltype(made_span)>::value,
-      "the type of made_span differs from expected_span!");
+  static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+                "the type of made_span differs from expected_span!");
 }
 
 TEST(SpanTest, EnsureConstexprGoodness) {
@@ -1417,31 +1532,31 @@
   // Statically checks that various conversions between spans of dynamic and
   // static extent are possible or not.
   static_assert(
-      !std::is_constructible<span<int, 0>, span<int>>::value,
+      !std::is_constructible_v<span<int, 0>, span<int>>,
       "Error: static span should not be constructible from dynamic span");
 
-  static_assert(!std::is_constructible<span<int, 2>, span<int, 1>>::value,
+  static_assert(!std::is_constructible_v<span<int, 2>, span<int, 1>>,
                 "Error: static span should not be constructible from static "
                 "span with different extent");
 
-  static_assert(std::is_convertible<span<int, 0>, span<int>>::value,
+  static_assert(std::is_convertible_v<span<int, 0>, span<int>>,
                 "Error: static span should be convertible to dynamic span");
 
-  static_assert(std::is_convertible<span<int>, span<int>>::value,
+  static_assert(std::is_convertible_v<span<int>, span<int>>,
                 "Error: dynamic span should be convertible to dynamic span");
 
-  static_assert(std::is_convertible<span<int, 2>, span<int, 2>>::value,
+  static_assert(std::is_convertible_v<span<int, 2>, span<int, 2>>,
                 "Error: static span should be convertible to static span");
 }
 
 TEST(SpanTest, IteratorConversions) {
-  static_assert(std::is_convertible<span<int>::iterator,
-                                    span<const int>::iterator>::value,
-                "Error: iterator should be convertible to const iterator");
+  static_assert(
+      std::is_convertible_v<span<int>::iterator, span<const int>::iterator>,
+      "Error: iterator should be convertible to const iterator");
 
-  static_assert(!std::is_convertible<span<const int>::iterator,
-                                     span<int>::iterator>::value,
-                "Error: const iterator should not be convertible to iterator");
+  static_assert(
+      !std::is_convertible_v<span<const int>::iterator, span<int>::iterator>,
+      "Error: const iterator should not be convertible to iterator");
 }
 
 TEST(SpanTest, ExtentMacro) {
diff --git a/base/containers/stack_container.h b/base/containers/stack_container.h
deleted file mode 100644
index ba3842a..0000000
--- a/base/containers/stack_container.h
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_CONTAINERS_STACK_CONTAINER_H_
-#define BASE_CONTAINERS_STACK_CONTAINER_H_
-
-// ================== DEPRECATION NOTICE ==================
-// These classes are deprecated and will be removed soon. Use
-// absl::InlinedVector instead. If absl::InlinedVector doesn't fit your use
-// case, please email [email protected] with details.
-
-#include <stddef.h>
-#include <memory>
-#include <vector>
-
-#include "base/compiler_specific.h"
-#include "base/memory/raw_ptr_exclusion.h"
-#include "build/build_config.h"
-
-namespace base {
-
-// This allocator can be used with STL containers to provide a stack buffer
-// from which to allocate memory and overflows onto the heap. This stack buffer
-// would be allocated on the stack and allows us to avoid heap operations in
-// some situations.
-//
-// STL likes to make copies of allocators, so the allocator itself can't hold
-// the data. Instead, we make the creator responsible for creating a
-// StackAllocator::Source which contains the data. Copying the allocator
-// merely copies the pointer to this shared source, so all allocators created
-// based on our allocator will share the same stack buffer.
-//
-// This stack buffer implementation is very simple. The first allocation that
-// fits in the stack buffer will use the stack buffer. Any subsequent
-// allocations will not use the stack buffer, even if there is unused room.
-// This makes it appropriate for array-like containers, but the caller should
-// be sure to reserve() in the container up to the stack buffer size. Otherwise
-// the container will allocate a small array which will "use up" the stack
-// buffer.
-template <typename T, size_t stack_capacity, typename FallbackAllocator>
-class StackAllocator : public FallbackAllocator {
- public:
-  using pointer = typename std::allocator_traits<FallbackAllocator>::pointer;
-  using size_type =
-      typename std::allocator_traits<FallbackAllocator>::size_type;
-
-  // Backing store for the allocator. The container owner is responsible for
-  // maintaining this for as long as any containers using this allocator are
-  // live.
-  struct Source {
-    Source() : used_stack_buffer_(false) {
-    }
-
-    // Casts the buffer in its right type.
-    NO_SANITIZE("cfi-unrelated-cast")
-    T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
-    NO_SANITIZE("cfi-unrelated-cast")
-    const T* stack_buffer() const {
-      return reinterpret_cast<const T*>(&stack_buffer_);
-    }
-
-    // The buffer itself. It is not of type T because we don't want the
-    // constructors and destructors to be automatically called. Define a POD
-    // buffer of the right size instead.
-    alignas(T) char stack_buffer_[sizeof(T[stack_capacity])];
-#if defined(__GNUC__) && !defined(ARCH_CPU_X86_FAMILY)
-    static_assert(alignof(T) <= 16, "http://crbug.com/115612");
-#endif
-
-    // Set when the stack buffer is used for an allocation. We do not track
-    // how much of the buffer is used, only that somebody is using it.
-    bool used_stack_buffer_;
-  };
-
-  // Used by containers when they want to refer to an allocator of type U.
-  template<typename U>
-  struct rebind {
-    typedef StackAllocator<U, stack_capacity, FallbackAllocator> other;
-  };
-
-  // For the straight up copy c-tor, we can share storage.
-  StackAllocator(
-      const StackAllocator<T, stack_capacity, FallbackAllocator>& rhs)
-      : source_(rhs.source_) {}
-
-  // ISO C++ requires the following constructor to be defined,
-  // and std::vector in VC++2008SP1 Release fails with an error
-  // in the class _Container_base_aux_alloc_real (from <xutility>)
-  // if the constructor does not exist.
-  // For this constructor, we cannot share storage; there's
-  // no guarantee that the Source buffer of Ts is large enough
-  // for Us.
-  // TODO: If we were fancy pants, perhaps we could share storage
-  // iff sizeof(T) == sizeof(U).
-  template <typename U, size_t other_capacity, typename FA>
-  StackAllocator(const StackAllocator<U, other_capacity, FA>& other)
-      : source_(nullptr) {}
-
-  // This constructor must exist. It creates a default allocator that doesn't
-  // actually have a stack buffer. glibc's std::string() will compare the
-  // current allocator against the default-constructed allocator, so this
-  // should be fast.
-  StackAllocator() : source_(nullptr) {}
-
-  explicit StackAllocator(Source* source) : source_(source) {
-  }
-
-  // Actually do the allocation. Use the stack buffer if nobody has used it yet
-  // and the size requested fits. Otherwise, fall through to the standard
-  // allocator.
-  pointer allocate(size_type n) {
-    if (source_ && !source_->used_stack_buffer_ && n <= stack_capacity) {
-      source_->used_stack_buffer_ = true;
-      return source_->stack_buffer();
-    } else {
-      return std::allocator_traits<FallbackAllocator>::allocate(*this, n);
-    }
-  }
-
-  // Free: when trying to free the stack buffer, just mark it as free. For
-  // non-stack-buffer pointers, just fall though to the standard allocator.
-  void deallocate(pointer p, size_type n) {
-    if (source_ && p == source_->stack_buffer())
-      source_->used_stack_buffer_ = false;
-    else
-      std::allocator_traits<FallbackAllocator>::deallocate(*this, p, n);
-  }
-
- private:
-  // `source_` is not a raw_ptr<T> for performance reasons: on-stack pointee.
-  RAW_PTR_EXCLUSION Source* source_;
-};
-
-// A wrapper around STL containers that maintains a stack-sized buffer that the
-// initial capacity of the vector is based on. Growing the container beyond the
-// stack capacity will transparently overflow onto the heap. The container must
-// support reserve().
-//
-// This will not work with std::string since some implementations allocate
-// more bytes than requested in calls to reserve(), forcing the allocation onto
-// the heap.  http://crbug.com/709273
-//
-// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
-// type. This object is really intended to be used only internally. You'll want
-// to use the wrappers below for different types.
-template <typename TContainerType, int stack_capacity>
-class StackContainer {
- public:
-  using ContainerType = TContainerType;
-  using ContainedType = typename ContainerType::value_type;
-  using Allocator = typename ContainerType::allocator_type;
-
-  // Allocator must be constructed before the container!
-  StackContainer() : allocator_(&stack_data_), container_(allocator_) {
-    // Make the container use the stack allocation by reserving our buffer size
-    // before doing anything else.
-    container_.reserve(stack_capacity);
-  }
-  StackContainer(const StackContainer&) = delete;
-  StackContainer& operator=(const StackContainer&) = delete;
-
-  // Getters for the actual container.
-  //
-  // Danger: any copies of this made using the copy constructor must have
-  // shorter lifetimes than the source. The copy will share the same allocator
-  // and therefore the same stack buffer as the original. Use std::copy to
-  // copy into a "real" container for longer-lived objects.
-  ContainerType& container() { return container_; }
-  const ContainerType& container() const { return container_; }
-
-  // Support operator-> to get to the container. This allows nicer syntax like:
-  //   StackContainer<...> foo;
-  //   std::sort(foo->begin(), foo->end());
-  ContainerType* operator->() { return &container_; }
-  const ContainerType* operator->() const { return &container_; }
-
-#ifdef UNIT_TEST
-  // Retrieves the stack source so that that unit tests can verify that the
-  // buffer is being used properly.
-  const typename Allocator::Source& stack_data() const {
-    return stack_data_;
-  }
-#endif
-
- protected:
-  typename Allocator::Source stack_data_;
-  NO_UNIQUE_ADDRESS Allocator allocator_;
-  ContainerType container_;
-};
-
-// Range-based iteration support for StackContainer.
-template <typename TContainerType, int stack_capacity>
-auto begin(
-    const StackContainer<TContainerType, stack_capacity>& stack_container)
-    -> decltype(begin(stack_container.container())) {
-  return begin(stack_container.container());
-}
-
-template <typename TContainerType, int stack_capacity>
-auto begin(StackContainer<TContainerType, stack_capacity>& stack_container)
-    -> decltype(begin(stack_container.container())) {
-  return begin(stack_container.container());
-}
-
-template <typename TContainerType, int stack_capacity>
-auto end(StackContainer<TContainerType, stack_capacity>& stack_container)
-    -> decltype(end(stack_container.container())) {
-  return end(stack_container.container());
-}
-
-template <typename TContainerType, int stack_capacity>
-auto end(const StackContainer<TContainerType, stack_capacity>& stack_container)
-    -> decltype(end(stack_container.container())) {
-  return end(stack_container.container());
-}
-
-// StackVector -----------------------------------------------------------------
-
-// THIS CLASS IS DEPRECATED. Use absl::InlinedVector instead.
-
-// Example:
-//   StackVector<int, 16> foo;
-//   foo->push_back(22);  // we have overloaded operator->
-//   foo[0] = 10;         // as well as operator[]
-template <typename T,
-          size_t stack_capacity,
-          typename FallbackAllocator = std::allocator<T>>
-class StackVector
-    : public StackContainer<
-          std::vector<T, StackAllocator<T, stack_capacity, FallbackAllocator>>,
-          stack_capacity> {
- public:
-  StackVector()
-      : StackContainer<
-            std::vector<T,
-                        StackAllocator<T, stack_capacity, FallbackAllocator>>,
-            stack_capacity>() {}
-
-  // We need to put this in STL containers sometimes, which requires a copy
-  // constructor. We can't call the regular copy constructor because that will
-  // take the stack buffer from the original. Here, we create an empty object
-  // and make a stack buffer of its own.
-  StackVector(const StackVector<T, stack_capacity, FallbackAllocator>& other)
-      : StackContainer<
-            std::vector<T,
-                        StackAllocator<T, stack_capacity, FallbackAllocator>>,
-            stack_capacity>() {
-    this->container().assign(other->begin(), other->end());
-  }
-
-  StackVector<T, stack_capacity, FallbackAllocator>& operator=(
-      const StackVector<T, stack_capacity, FallbackAllocator>& other) {
-    this->container().assign(other->begin(), other->end());
-    return *this;
-  }
-
-  // Vectors are commonly indexed, which isn't very convenient even with
-  // operator-> (using "->at()" does exception stuff we don't want).
-  T& operator[](size_t i) { return this->container().operator[](i); }
-  const T& operator[](size_t i) const {
-    return this->container().operator[](i);
-  }
-};
-
-}  // namespace base
-
-// Opt out of libc++ container annotations for StackAllocator. It seems to slow
-// down some tests enough to cause timeouts(?) crbug.com/1444659
-#ifdef _LIBCPP_HAS_ASAN_CONTAINER_ANNOTATIONS_FOR_ALL_ALLOCATORS
-template <typename T, size_t stack_capacity, typename FallbackAllocator>
-struct ::std::__asan_annotate_container_with_allocator<
-    base::StackAllocator<T, stack_capacity, FallbackAllocator>>
-    : ::std::false_type {};
-#endif  // _LIBCPP_HAS_ASAN_CONTAINER_ANNOTATIONS_FOR_ALL_ALLOCATORS
-
-#endif  // BASE_CONTAINERS_STACK_CONTAINER_H_
diff --git a/base/containers/stack_container_unittest.cc b/base/containers/stack_container_unittest.cc
deleted file mode 100644
index 68c6321..0000000
--- a/base/containers/stack_container_unittest.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/containers/stack_container.h"
-
-#include <stddef.h>
-
-#include "base/memory/aligned_memory.h"
-#include "base/memory/raw_ptr.h"
-#include "base/memory/ref_counted.h"
-#include "base/ranges/algorithm.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-namespace {
-
-class Dummy : public RefCounted<Dummy> {
- public:
-  explicit Dummy(int* alive) : alive_(alive) {
-    ++*alive_;
-  }
-
- private:
-  friend class RefCounted<Dummy>;
-
-  ~Dummy() {
-    --*alive_;
-  }
-
-  const raw_ptr<int> alive_;
-};
-
-}  // namespace
-
-TEST(StackContainer, Vector) {
-  const int stack_size = 3;
-  StackVector<int, stack_size> vect;
-  const int* stack_buffer = &vect.stack_data().stack_buffer()[0];
-
-  // The initial |stack_size| elements should appear in the stack buffer.
-  EXPECT_EQ(static_cast<size_t>(stack_size), vect.container().capacity());
-  for (int i = 0; i < stack_size; i++) {
-    vect.container().push_back(i);
-    EXPECT_EQ(stack_buffer, &vect.container()[0]);
-    EXPECT_TRUE(vect.stack_data().used_stack_buffer_);
-  }
-
-  // Adding more elements should push the array onto the heap.
-  for (int i = 0; i < stack_size; i++) {
-    vect.container().push_back(i + stack_size);
-    EXPECT_NE(stack_buffer, &vect.container()[0]);
-    EXPECT_FALSE(vect.stack_data().used_stack_buffer_);
-  }
-
-  // The array should still be in order.
-  for (int i = 0; i < stack_size * 2; i++)
-    EXPECT_EQ(i, vect.container()[i]);
-
-  // Resize to smaller. Our STL implementation won't reallocate in this case,
-  // otherwise it might use our stack buffer. We reserve right after the resize
-  // to guarantee it isn't using the stack buffer, even though it doesn't have
-  // much data.
-  vect.container().resize(stack_size);
-  vect.container().reserve(stack_size * 2);
-  EXPECT_FALSE(vect.stack_data().used_stack_buffer_);
-
-  // Copying the small vector to another should use the same allocator and use
-  // the now-unused stack buffer. GENERALLY CALLERS SHOULD NOT DO THIS since
-  // they have to get the template types just right and it can cause errors.
-  std::vector<int, StackAllocator<int, stack_size, std::allocator<int>>> other(
-      vect.container());
-  EXPECT_EQ(stack_buffer, &other.front());
-  EXPECT_TRUE(vect.stack_data().used_stack_buffer_);
-  for (int i = 0; i < stack_size; i++)
-    EXPECT_EQ(i, other[i]);
-}
-
-TEST(StackContainer, VectorDoubleDelete) {
-  // Regression testing for double-delete.
-  typedef StackVector<scoped_refptr<Dummy>, 2> Vector;
-  Vector vect;
-
-  int alive = 0;
-  scoped_refptr<Dummy> dummy(new Dummy(&alive));
-  EXPECT_EQ(alive, 1);
-
-  vect->push_back(dummy);
-  EXPECT_EQ(alive, 1);
-
-  Dummy* dummy_unref = dummy.get();
-  dummy = nullptr;
-  EXPECT_EQ(alive, 1);
-
-  auto itr = ranges::find(vect, dummy_unref);
-  EXPECT_EQ(itr->get(), dummy_unref);
-  vect->erase(itr);
-  EXPECT_EQ(alive, 0);
-
-  // Shouldn't crash at exit.
-}
-
-namespace {
-
-template <size_t alignment>
-class AlignedData {
- public:
-  AlignedData() { memset(data_, 0, alignment); }
-  ~AlignedData() = default;
-  alignas(alignment) char data_[alignment];
-};
-
-}  // namespace
-
-TEST(StackContainer, BufferAlignment) {
-  StackVector<wchar_t, 16> text;
-  text->push_back(L'A');
-  EXPECT_TRUE(IsAligned(&text[0], alignof(wchar_t)));
-
-  StackVector<double, 1> doubles;
-  doubles->push_back(0.0);
-  EXPECT_TRUE(IsAligned(&doubles[0], alignof(double)));
-
-  StackVector<AlignedData<16>, 1> aligned16;
-  aligned16->push_back(AlignedData<16>());
-  EXPECT_TRUE(IsAligned(&aligned16[0], 16));
-
-#if !defined(__GNUC__) || defined(ARCH_CPU_X86_FAMILY)
-  // It seems that non-X86 gcc doesn't respect greater than 16 byte alignment.
-  // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33721 for details.
-  // TODO(sbc): Re-enable this if GCC starts respecting higher alignments.
-  StackVector<AlignedData<256>, 1> aligned256;
-  aligned256->push_back(AlignedData<256>());
-  EXPECT_TRUE(IsAligned(&aligned256[0], 256));
-#endif
-}
-
-template class StackVector<int, 2>;
-template class StackVector<scoped_refptr<Dummy>, 2>;
-
-template <typename T, size_t size>
-void CheckStackVectorElements(const StackVector<T, size>& vec,
-                              std::initializer_list<T> expected) {
-  auto expected_it = expected.begin();
-  EXPECT_EQ(vec->size(), expected.size());
-  for (T t : vec) {
-    EXPECT_NE(expected.end(), expected_it);
-    EXPECT_EQ(*expected_it, t);
-    ++expected_it;
-  }
-  EXPECT_EQ(expected.end(), expected_it);
-}
-
-TEST(StackContainer, Iteration) {
-  StackVector<int, 3> vect;
-  vect->push_back(7);
-  vect->push_back(11);
-
-  CheckStackVectorElements(vect, {7, 11});
-  for (int& i : vect) {
-    ++i;
-  }
-  CheckStackVectorElements(vect, {8, 12});
-  vect->push_back(13);
-  CheckStackVectorElements(vect, {8, 12, 13});
-  vect->resize(5);
-  CheckStackVectorElements(vect, {8, 12, 13, 0, 0});
-  vect->resize(1);
-  CheckStackVectorElements(vect, {8});
-}
-
-namespace {
-struct Allocator : std::allocator<int> {
-  using Base = std::allocator<int>;
-
-  int* allocate(size_t n) {
-    ++allocated;
-    return Base::allocate(n);
-  }
-  void deallocate(int* p, size_t n) {
-    ++deallocated;
-    Base::deallocate(p, n);
-  }
-
-  static int allocated;
-  static int deallocated;
-};
-
-int Allocator::allocated = 0;
-int Allocator::deallocated = 0;
-}  // namespace
-
-TEST(StackContainer, CustomAllocator) {
-  StackVector<int, 2, Allocator> v;
-
-  EXPECT_EQ(0, Allocator::allocated);
-  EXPECT_EQ(0, Allocator::deallocated);
-
-  v->push_back(1);
-  v->push_back(1);
-  EXPECT_EQ(0, Allocator::allocated);
-  v->push_back(1);
-  EXPECT_EQ(1, Allocator::allocated);
-
-  EXPECT_EQ(0, Allocator::deallocated);
-  v->clear();
-  // shrink_to_fit() makes sure to destroy empty backing store.
-  v->shrink_to_fit();
-  EXPECT_EQ(1, Allocator::deallocated);
-}
-
-}  // namespace base
diff --git a/base/containers/vector_buffer.h b/base/containers/vector_buffer.h
index 324cf89..163953d 100644
--- a/base/containers/vector_buffer.h
+++ b/base/containers/vector_buffer.h
@@ -13,12 +13,12 @@
 
 #include "base/check.h"
 #include "base/check_op.h"
+#include "base/compiler_specific.h"
 #include "base/containers/util.h"
 #include "base/memory/raw_ptr_exclusion.h"
 #include "base/numerics/checked_math.h"
 
-namespace base {
-namespace internal {
+namespace base::internal {
 
 // Internal implementation detail of base/containers.
 //
@@ -97,15 +97,13 @@
 
   // Trivially destructible objects need not have their destructors called.
   template <typename T2 = T,
-            typename std::enable_if<std::is_trivially_destructible<T2>::value,
-                                    int>::type = 0>
+            std::enable_if_t<std::is_trivially_destructible_v<T2>, int> = 0>
   void DestructRange(T* begin, T* end) {}
 
   // Non-trivially destructible objects must have their destructors called
   // individually.
   template <typename T2 = T,
-            typename std::enable_if<!std::is_trivially_destructible<T2>::value,
-                                    int>::type = 0>
+            std::enable_if_t<!std::is_trivially_destructible_v<T2>, int> = 0>
   void DestructRange(T* begin, T* end) {
     CHECK_LE(begin, end);
     while (begin != end) {
@@ -123,11 +121,18 @@
   // and the address of the first element to copy to. There must be sufficient
   // room in the destination for all items in the range [begin, end).
 
-  // Trivially copyable types can use memcpy. trivially copyable implies
+  // Trivially copyable types can use memcpy. Trivially copyable implies
   // that there is a trivial destructor as we don't have to call it.
-  template <
-      typename T2 = T,
-      typename std::enable_if<std::is_trivially_copyable_v<T2>, int>::type = 0>
+
+  // Trivially relocatable types can also use memcpy. Trivially relocatable
+  // imples that memcpy is equivalent to move + destroy.
+
+  template <typename T2>
+  static inline constexpr bool is_trivially_copyable_or_relocatable =
+      std::is_trivially_copyable_v<T2> || IS_TRIVIALLY_RELOCATABLE(T2);
+
+  template <typename T2 = T,
+            std::enable_if_t<is_trivially_copyable_or_relocatable<T2>, int> = 0>
   static void MoveRange(T* from_begin, T* from_end, T* to) {
     CHECK(!RangesOverlap(from_begin, from_end, to));
 
@@ -139,9 +144,9 @@
   // Not trivially copyable, but movable: call the move constructor and
   // destruct the original.
   template <typename T2 = T,
-            typename std::enable_if<std::is_move_constructible<T2>::value &&
-                                        !std::is_trivially_copyable_v<T2>,
-                                    int>::type = 0>
+            std::enable_if_t<std::is_move_constructible_v<T2> &&
+                                 !is_trivially_copyable_or_relocatable<T2>,
+                             int> = 0>
   static void MoveRange(T* from_begin, T* from_end, T* to) {
     CHECK(!RangesOverlap(from_begin, from_end, to));
     while (from_begin != from_end) {
@@ -155,9 +160,9 @@
   // Not movable, not trivially copyable: call the copy constructor and
   // destruct the original.
   template <typename T2 = T,
-            typename std::enable_if<!std::is_move_constructible<T2>::value &&
-                                        !std::is_trivially_copyable_v<T2>,
-                                    int>::type = 0>
+            std::enable_if_t<!std::is_move_constructible_v<T2> &&
+                                 !is_trivially_copyable_or_relocatable<T2>,
+                             int> = 0>
   static void MoveRange(T* from_begin, T* from_end, T* to) {
     CHECK(!RangesOverlap(from_begin, from_end, to));
     while (from_begin != from_end) {
@@ -187,7 +192,6 @@
   size_t capacity_ = 0;
 };
 
-}  // namespace internal
-}  // namespace base
+}  // namespace base::internal
 
 #endif  // BASE_CONTAINERS_VECTOR_BUFFER_H_
diff --git a/base/containers/vector_buffer_unittest.cc b/base/containers/vector_buffer_unittest.cc
index c822351..05cc055 100644
--- a/base/containers/vector_buffer_unittest.cc
+++ b/base/containers/vector_buffer_unittest.cc
@@ -4,12 +4,47 @@
 
 #include "base/containers/vector_buffer.h"
 
+#include "base/compiler_specific.h"
+#include "base/memory/raw_ptr.h"
 #include "base/test/copy_only_int.h"
 #include "base/test/move_only_int.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
-namespace base {
-namespace internal {
+namespace base::internal {
+
+namespace {
+
+class TRIVIAL_ABI TrivialAbiWithCountingOperations {
+ public:
+  TrivialAbiWithCountingOperations(int* destruction_counter, int* move_counter)
+      : destruction_counter_(destruction_counter),
+        move_counter_(move_counter) {}
+
+  ~TrivialAbiWithCountingOperations() { ++*destruction_counter_; }
+
+  // Copy construction and assignment should not be used.
+  TrivialAbiWithCountingOperations(const TrivialAbiWithCountingOperations&) =
+      delete;
+  TrivialAbiWithCountingOperations& operator=(
+      const TrivialAbiWithCountingOperations&) = delete;
+
+  // Count how many times the move constructor is used.
+  TrivialAbiWithCountingOperations(TrivialAbiWithCountingOperations&& rhs)
+      : destruction_counter_(rhs.destruction_counter_),
+        move_counter_(rhs.move_counter_) {
+    ++*move_counter_;
+  }
+
+  // Move assignment should not be used.
+  TrivialAbiWithCountingOperations& operator=(
+      TrivialAbiWithCountingOperations&&) = delete;
+
+ private:
+  raw_ptr<int> destruction_counter_;
+  raw_ptr<int> move_counter_;
+};
+
+}  // namespace
 
 TEST(VectorBuffer, DeletePOD) {
   constexpr int size = 10;
@@ -85,5 +120,32 @@
   }
 }
 
-}  // namespace internal
-}  // namespace base
+TEST(VectorBuffer, TrivialAbiMove) {
+  // Currently trivial relocation doesn't work on Windows for some reason, so
+  // the test needs to handle both cases.
+  constexpr bool kHaveTrivialRelocation =
+      IS_TRIVIALLY_RELOCATABLE(TrivialAbiWithCountingOperations);
+  constexpr int size = 10;
+  VectorBuffer<TrivialAbiWithCountingOperations> dest(size);
+
+  int destruction_count = 0;
+  int move_count = 0;
+  VectorBuffer<TrivialAbiWithCountingOperations> original(size);
+  for (int i = 0; i < size; i++) {
+    new (original.begin() + i)
+        TrivialAbiWithCountingOperations(&destruction_count, &move_count);
+  }
+
+  original.MoveRange(original.begin(), original.end(), dest.begin());
+
+  // We expect the move to have been performed via memcpy, without calling move
+  // constructors or destructors.
+  EXPECT_EQ(destruction_count, kHaveTrivialRelocation ? 0 : size);
+  EXPECT_EQ(move_count, kHaveTrivialRelocation ? 0 : size);
+
+  dest.DestructRange(dest.begin(), dest.end());
+  EXPECT_EQ(destruction_count, kHaveTrivialRelocation ? size : size * 2);
+  EXPECT_EQ(move_count, kHaveTrivialRelocation ? 0 : size);
+}
+
+}  // namespace base::internal
diff --git a/base/cpu_reduction_experiment.cc b/base/cpu_reduction_experiment.cc
index f1584d4..d8291f3 100644
--- a/base/cpu_reduction_experiment.cc
+++ b/base/cpu_reduction_experiment.cc
@@ -20,7 +20,7 @@
 // Whether to enable a series of optimizations that reduce total CPU
 // utilization.
 BASE_FEATURE(kReduceCpuUtilization,
-             "ReduceCpuUtilization",
+             "ReduceCpuUtilization2",
              FEATURE_ENABLED_BY_DEFAULT);
 
 class CpuReductionExperimentSubSampler {
diff --git a/base/cxx20_to_address.h b/base/cxx20_to_address.h
index b9158d2..2582c19 100644
--- a/base/cxx20_to_address.h
+++ b/base/cxx20_to_address.h
@@ -31,7 +31,7 @@
 // Reference: https://wg21.link/pointer.conversion#lib:to_address
 template <typename T>
 constexpr T* to_address(T* p) noexcept {
-  static_assert(!std::is_function<T>::value,
+  static_assert(!std::is_function_v<T>,
                 "Error: T must not be a function type.");
   return p;
 }
diff --git a/base/debug/alias.h b/base/debug/alias.h
index 77b02f0..d28525e 100644
--- a/base/debug/alias.h
+++ b/base/debug/alias.h
@@ -85,21 +85,21 @@
 
 }  // namespace base
 
-// Convenience macro that copies the null-terminated string from |c_str| into a
-// stack-allocated char array named |var_name| that holds up to |char_count|
+// Convenience macro that copies the null-terminated string from `c_str` into a
+// stack-allocated char array named `var_name` that holds up to `array_size - 1`
 // characters and should be preserved in memory dumps.
-#define DEBUG_ALIAS_FOR_CSTR(var_name, c_str, char_count) \
-  char var_name[char_count];                              \
-  ::base::strlcpy(var_name, (c_str), sizeof(var_name));   \
+#define DEBUG_ALIAS_FOR_CSTR(var_name, c_str, array_size)  \
+  char var_name[array_size] = {};                          \
+  ::base::strlcpy(var_name, (c_str), std::size(var_name)); \
   ::base::debug::Alias(var_name)
 
-#define DEBUG_ALIAS_FOR_U16CSTR(var_name, c_str, char_count)   \
-  char16_t var_name[char_count];                               \
+#define DEBUG_ALIAS_FOR_U16CSTR(var_name, c_str, array_size)   \
+  char16_t var_name[array_size] = {};                          \
   ::base::u16cstrlcpy(var_name, (c_str), std::size(var_name)); \
   ::base::debug::Alias(var_name)
 
-#define DEBUG_ALIAS_FOR_WCHARCSTR(var_name, c_str, char_count) \
-  wchar_t var_name[char_count];                                \
+#define DEBUG_ALIAS_FOR_WCHARCSTR(var_name, c_str, array_size) \
+  wchar_t var_name[array_size] = {};                           \
   ::base::wcslcpy(var_name, (c_str), std::size(var_name));     \
   ::base::debug::Alias(var_name)
 
diff --git a/base/debug/alias_unittest.cc b/base/debug/alias_unittest.cc
index 6aa7421..8d26b83 100644
--- a/base/debug/alias_unittest.cc
+++ b/base/debug/alias_unittest.cc
@@ -11,13 +11,16 @@
 #include "testing/gtest/include/gtest/gtest.h"
 
 TEST(DebugAlias, Test) {
-  const char kTestString[] = "string contents";
+  constexpr char kTestString[] = "string contents";
+  constexpr auto kTestStringLength = std::string_view(kTestString).size();
   std::unique_ptr<std::string> input =
-      std::make_unique<std::string>("string contents");
+      std::make_unique<std::string>(kTestString);
 
-  // Verify the contents get copied + the new local variable has the right type.
-  DEBUG_ALIAS_FOR_CSTR(copy1, input->c_str(), 100 /* > input->size() */);
-  static_assert(std::is_same_v<decltype(copy1), char[100]>);
+  // Verify the contents get copied + the new local variable has the right type
+  // when the array size given is exactly `input->size() + 1`.
+  DEBUG_ALIAS_FOR_CSTR(copy1, input->c_str(),
+                       kTestStringLength + 1 /* == input->size() + 1 */);
+  static_assert(std::is_same_v<decltype(copy1), char[kTestStringLength + 1]>);
   EXPECT_TRUE(
       std::equal(std::begin(kTestString), std::end(kTestString), copy1));
 
@@ -26,16 +29,35 @@
   DEBUG_ALIAS_FOR_CSTR(copy2, input->c_str(), 3 /* < input->size() */);
   static_assert(std::is_same_v<decltype(copy2), char[3]>);
   EXPECT_TRUE(std::equal(std::begin(copy2), std::end(copy2), "st"));
+
+  // Verify that the copy is properly null-terminated even when it is larger
+  // than the input string.
+  DEBUG_ALIAS_FOR_CSTR(copy3, input->c_str(), 100 /* > input->size() + 1 */);
+  static_assert(std::is_same_v<decltype(copy3), char[100]>);
+  EXPECT_TRUE(
+      std::equal(std::begin(kTestString), std::end(kTestString), copy3));
 }
 
 TEST(DebugAlias, U16String) {
-  const char16_t kTestString[] = u"H͟e͟l͟l͟o͟ ͟w͟o͟r͟l͟d͟!͟";
+  constexpr char16_t kTestString[] = u"H͟e͟l͟l͟o͟ ͟w͟o͟r͟l͟d͟!͟";
+  constexpr auto kTestStringLength = std::u16string_view(kTestString).size();
   std::u16string input = kTestString;
 
-  DEBUG_ALIAS_FOR_U16CSTR(aliased_copy, input.c_str(), 100);
-  static_assert(std::is_same_v<decltype(aliased_copy), char16_t[100]>);
+  // Verify the contents get copied + the new local variable has the right type
+  // when the array size given is exactly `input->size() + 1`.
+  DEBUG_ALIAS_FOR_U16CSTR(aliased_copy, input.c_str(), kTestStringLength + 1);
+  static_assert(
+      std::is_same_v<decltype(aliased_copy), char16_t[kTestStringLength + 1]>);
   EXPECT_TRUE(
       std::equal(std::begin(kTestString), std::end(kTestString), aliased_copy));
+
+  // Verify that the copy is properly null-terminated even when it is larger
+  // than the input string.
+  DEBUG_ALIAS_FOR_U16CSTR(aliased_copy2, input.c_str(), kTestStringLength + 1);
+  static_assert(
+      std::is_same_v<decltype(aliased_copy2), char16_t[kTestStringLength + 1]>);
+  EXPECT_TRUE(std::equal(std::begin(kTestString), std::end(kTestString),
+                         aliased_copy2));
 }
 
 TEST(DebugAlias, U16StringPartialCopy) {
diff --git a/base/debug/allocation_trace.h b/base/debug/allocation_trace.h
index 36f7d54..229ed9e 100644
--- a/base/debug/allocation_trace.h
+++ b/base/debug/allocation_trace.h
@@ -11,7 +11,7 @@
 #include <cstdint>
 
 #include "base/allocator/dispatcher/subsystem.h"
-#include "base/allocator/partition_allocator/pointers/raw_ptr_exclusion.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_exclusion.h"
 #include "base/base_export.h"
 #include "base/bits.h"
 #include "base/compiler_specific.h"
@@ -50,7 +50,13 @@
 // any of the Initialize*-functions while another thread A is currently
 // initializing, B's invocations shall immediately return |false| without
 // interfering with thread A.
-struct BASE_EXPORT OperationRecord {
+class BASE_EXPORT OperationRecord {
+ public:
+  constexpr OperationRecord() = default;
+
+  OperationRecord(const OperationRecord&) = delete;
+  OperationRecord& operator=(const OperationRecord&) = delete;
+
   // Is the record currently being taken?
   bool IsRecording() const;
 
@@ -198,7 +204,13 @@
 //
 // TODO(https://crbug.com/1419908): Evaluate the impact of the shared cache
 // lines between entries.
-struct BASE_EXPORT AllocationTraceRecorder {
+class BASE_EXPORT AllocationTraceRecorder {
+ public:
+  constexpr AllocationTraceRecorder() = default;
+
+  AllocationTraceRecorder(const AllocationTraceRecorder&) = delete;
+  AllocationTraceRecorder& operator=(const AllocationTraceRecorder&) = delete;
+
   // The allocation event observer interface. See the dispatcher for further
   // details. The functions are marked NO_INLINE. All other functions called but
   // the one taking the call stack are marked ALWAYS_INLINE. This way we ensure
diff --git a/base/debug/allocation_trace_reporting.h b/base/debug/allocation_trace_reporting.h
index 1fc3b87..16f622a 100644
--- a/base/debug/allocation_trace_reporting.h
+++ b/base/debug/allocation_trace_reporting.h
@@ -14,7 +14,7 @@
 #include "base/time/time.h"
 
 namespace base::debug::tracer {
-struct AllocationTraceRecorder;
+class AllocationTraceRecorder;
 class SequenceSpecificData;
 
 // The reporting for AllocationTraceRecorder starts a reporting task which
diff --git a/base/debug/crash_logging.h b/base/debug/crash_logging.h
index c492b27..5069651 100644
--- a/base/debug/crash_logging.h
+++ b/base/debug/crash_logging.h
@@ -172,7 +172,7 @@
                                    ::base::debug::CrashKeySize::Size1024)
 
 #define SCOPED_CRASH_KEY_BOOL(category, name, data)                       \
-  static_assert(std::is_same<std::decay_t<decltype(data)>, bool>::value,  \
+  static_assert(std::is_same_v<std::decay_t<decltype(data)>, bool>,       \
                 "SCOPED_CRASH_KEY_BOOL must be passed a boolean value."); \
   SCOPED_CRASH_KEY_STRING32(category, name, (data) ? "true" : "false")
 
diff --git a/base/debug/crash_logging_unittest.nc b/base/debug/crash_logging_unittest.nc
index 97ad6e1..669b183 100644
--- a/base/debug/crash_logging_unittest.nc
+++ b/base/debug/crash_logging_unittest.nc
@@ -7,7 +7,7 @@
 
 #include "base/debug/crash_logging.h"
 
-#if defined(NCTEST_SCOPED_CRASH_KEY_BOOL_ON_NON_BOOL_ARG)  // [r"static assertion failed due to requirement 'std::is_same<int, bool>::value': SCOPED_CRASH_KEY_BOOL must be passed a boolean value\."]
+#if defined(NCTEST_SCOPED_CRASH_KEY_BOOL_ON_NON_BOOL_ARG)  // [r"static assertion failed due to requirement 'std::is_same_v<int, bool>': SCOPED_CRASH_KEY_BOOL must be passed a boolean value\."]
 
 void WontCompile() {
   SCOPED_CRASH_KEY_BOOL(category, name, 1);
diff --git a/base/debug/debug.gni b/base/debug/debug.gni
index f8419e4..f61c907 100644
--- a/base/debug/debug.gni
+++ b/base/debug/debug.gni
@@ -14,7 +14,8 @@
   #
   # Although it should work on other platforms as well, for the above reasons,
   # we currently enable it only for Android when compiling for Arm64.
-  build_allocation_stack_trace_recorder = current_cpu == "arm64" && is_android
+  build_allocation_stack_trace_recorder =
+      !is_official_build && current_cpu == "arm64" && is_android
 }
 
 declare_args() {
diff --git a/base/debug/dwarf_line_no.cc b/base/debug/dwarf_line_no.cc
index 2f12ea4..513b132 100644
--- a/base/debug/dwarf_line_no.cc
+++ b/base/debug/dwarf_line_no.cc
@@ -1267,7 +1267,7 @@
 
 }  // namespace
 
-bool GetDwarfSourceLineNumber(void* pc,
+bool GetDwarfSourceLineNumber(const void* pc,
                               uintptr_t cu_offset,
                               char* out,
                               size_t out_size) {
@@ -1291,7 +1291,7 @@
   return true;
 }
 
-void GetDwarfCompileUnitOffsets(void* const* trace,
+void GetDwarfCompileUnitOffsets(const void* const* trace,
                                 uint64_t* cu_offsets,
                                 size_t num_frames) {
   // Ensure `cu_offsets` always has a known state.
diff --git a/base/debug/dwarf_line_no.h b/base/debug/dwarf_line_no.h
index 2a51aa8..dde7fbe 100644
--- a/base/debug/dwarf_line_no.h
+++ b/base/debug/dwarf_line_no.h
@@ -15,7 +15,7 @@
 //
 // Expects `trace` and `cu_offsets` to be `num_frames` in size. If a frame
 // cannot be found, the corresponding value stored in `cu_offsets` is 0.
-void GetDwarfCompileUnitOffsets(void* const* trace,
+void GetDwarfCompileUnitOffsets(const void* const* trace,
                                 uint64_t* cu_offsets,
                                 size_t num_frames);
 
@@ -29,7 +29,7 @@
 //   ../../base/debug/stack_trace_unittest.cc:120,16
 //
 // This means `pc` was from line 120, column 16, of stack_trace_unittest.cc.
-bool GetDwarfSourceLineNumber(void* pc,
+bool GetDwarfSourceLineNumber(const void* pc,
                               uint64_t cu_offsets,
                               char* out,
                               size_t out_size);
diff --git a/base/debug/stack_trace.cc b/base/debug/stack_trace.cc
index 3debc8b..551c0d2 100644
--- a/base/debug/stack_trace.cc
+++ b/base/debug/stack_trace.cc
@@ -261,13 +261,6 @@
 #endif
 }
 
-const void *const *StackTrace::Addresses(size_t* count) const {
-  *count = count_;
-  if (count_)
-    return trace_;
-  return nullptr;
-}
-
 void StackTrace::Print() const {
   PrintWithPrefix(nullptr);
 }
diff --git a/base/debug/stack_trace.h b/base/debug/stack_trace.h
index ef208de..9f5d7c0 100644
--- a/base/debug/stack_trace.h
+++ b/base/debug/stack_trace.h
@@ -11,6 +11,7 @@
 #include <string>
 
 #include "base/base_export.h"
+#include "base/containers/span.h"
 #include "base/debug/debugging_buildflags.h"
 #include "base/memory/raw_ptr.h"
 #include "build/build_config.h"
@@ -104,7 +105,9 @@
   // number of elements in the returned array. Addresses()[0] will contain an
   // address from the leaf function, and Addresses()[count-1] will contain an
   // address from the root function (i.e.; the thread's entry point).
-  const void* const* Addresses(size_t* count) const;
+  span<const void* const> addresses() const {
+    return make_span(trace_, count_);
+  }
 
   // Prints the stack trace to stderr.
   void Print() const;
@@ -134,7 +137,7 @@
   void InitTrace(const _CONTEXT* context_record);
 #endif
 
-  void* trace_[kMaxTraces];
+  const void* trace_[kMaxTraces];
 
   // The number of valid frames in |trace_|.
   size_t count_;
@@ -145,7 +148,7 @@
 
 // Record a stack trace with up to |count| frames into |trace|. Returns the
 // number of frames read.
-BASE_EXPORT size_t CollectStackTrace(void** trace, size_t count);
+BASE_EXPORT size_t CollectStackTrace(const void** trace, size_t count);
 
 #if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
 
diff --git a/base/debug/stack_trace_android.cc b/base/debug/stack_trace_android.cc
index 7e98002..814a498 100644
--- a/base/debug/stack_trace_android.cc
+++ b/base/debug/stack_trace_android.cc
@@ -31,7 +31,7 @@
         max_depth(max_depth),
         have_skipped_self(false) {}
 
-  raw_ptr<uintptr_t> frames;
+  raw_ptr<uintptr_t, AllowPtrArithmetic> frames;
   size_t frame_count;
   size_t max_depth;
   bool have_skipped_self;
@@ -75,7 +75,7 @@
   return (sigaction(SIGPIPE, &action, NULL) == 0);
 }
 
-size_t CollectStackTrace(void** trace, size_t count) {
+size_t CollectStackTrace(const void** trace, size_t count) {
   StackCrawlState state(reinterpret_cast<uintptr_t*>(trace), count);
   _Unwind_Backtrace(&TraceStackFrame, &state);
   return state.frame_count;
diff --git a/base/debug/stack_trace_fuchsia.cc b/base/debug/stack_trace_fuchsia.cc
index 1c55e93..543e475 100644
--- a/base/debug/stack_trace_fuchsia.cc
+++ b/base/debug/stack_trace_fuchsia.cc
@@ -29,7 +29,7 @@
 namespace {
 
 struct BacktraceData {
-  void** trace_array;
+  const void** trace_array;
   size_t* count;
   size_t max;
 };
@@ -203,7 +203,7 @@
 
 // Returns true if |address| is contained by any of the memory regions
 // mapped for |module_entry|.
-bool ModuleContainsFrameAddress(void* address,
+bool ModuleContainsFrameAddress(const void* address,
                                 const SymbolMap::Module& module_entry) {
   for (size_t i = 0; i < module_entry.segment_count; ++i) {
     const SymbolMap::Segment& segment = module_entry.segments[i];
@@ -229,7 +229,7 @@
   return true;
 }
 
-size_t CollectStackTrace(void** trace, size_t count) {
+size_t CollectStackTrace(const void** trace, size_t count) {
   size_t frame_count = 0;
   BacktraceData data = {trace, &frame_count, count};
   _Unwind_Backtrace(&UnwindStore, &data);
diff --git a/base/debug/stack_trace_nacl.cc b/base/debug/stack_trace_nacl.cc
index e3fe282..44c7a49 100644
--- a/base/debug/stack_trace_nacl.cc
+++ b/base/debug/stack_trace_nacl.cc
@@ -11,10 +11,6 @@
 StackTrace::StackTrace(size_t count) : StackTrace() {}
 StackTrace::StackTrace(const void* const* trace, size_t count) : StackTrace() {}
 
-const void* const* StackTrace::Addresses(size_t* count) const {
-  return nullptr;
-}
-
 void StackTrace::Print() const {}
 
 void StackTrace::OutputToStream(std::ostream* os) const {}
diff --git a/base/debug/stack_trace_perftest.cc b/base/debug/stack_trace_perftest.cc
index 4f03e50..1fbdeba 100644
--- a/base/debug/stack_trace_perftest.cc
+++ b/base/debug/stack_trace_perftest.cc
@@ -4,6 +4,7 @@
 
 #include <vector>
 
+#include "base/containers/span.h"
 #include "base/debug/stack_trace.h"
 #include "base/logging.h"
 #include "base/strings/stringprintf.h"
@@ -32,19 +33,19 @@
 
 class StackTracer {
  public:
-  StackTracer(size_t trace_count) : trace_count(trace_count) {}
+  StackTracer(size_t trace_count) : trace_count_(trace_count) {}
   void Trace() {
-    size_t tmp;
-    base::debug::StackTrace st(trace_count);
-    const void* addresses = st.Addresses(&tmp);
+    StackTrace st(trace_count_);
+    span<const void* const> addresses = st.addresses();
     // make sure a valid array of stack frames is returned
-    EXPECT_NE(addresses, nullptr);
+    ASSERT_FALSE(addresses.empty());
+    EXPECT_TRUE(addresses[0]);
     // make sure the test generates the intended count of stack frames
-    EXPECT_EQ(trace_count, tmp);
+    EXPECT_EQ(trace_count_, addresses.size());
   }
 
  private:
-  const size_t trace_count;
+  const size_t trace_count_;
 };
 
 void MultiObjTest(size_t trace_count) {
diff --git a/base/debug/stack_trace_posix.cc b/base/debug/stack_trace_posix.cc
index ad958c3..5fec90b 100644
--- a/base/debug/stack_trace_posix.cc
+++ b/base/debug/stack_trace_posix.cc
@@ -60,6 +60,8 @@
 #endif
 
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#include <sys/prctl.h>
+
 #include "base/debug/proc_maps_linux.h"
 #endif
 
@@ -161,7 +163,7 @@
 };
 
 #if defined(HAVE_BACKTRACE)
-void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
+void OutputPointer(const void* pointer, BacktraceOutputHandler* handler) {
   // This should be more than enough to store a 64-bit number in hex:
   // 16 hex digits + 1 for null-terminator.
   char buf[17] = { '\0' };
@@ -189,7 +191,7 @@
 }
 #endif  // defined(USE_SYMBOLIZE)
 
-void ProcessBacktrace(void* const* trace,
+void ProcessBacktrace(const void* const* trace,
                       size_t size,
                       const char* prefix_string,
                       BacktraceOutputHandler* handler) {
@@ -216,8 +218,8 @@
 
     // Subtract by one as return address of function may be in the next
     // function when a function is annotated as noreturn.
-    void* address = static_cast<char*>(trace[i]) - 1;
-    if (google::Symbolize(address, buf, sizeof(buf))) {
+    const void* address = static_cast<const char*>(trace[i]) - 1;
+    if (google::Symbolize(const_cast<void*>(address), buf, sizeof(buf))) {
       handler->HandleOutput(buf);
 #if BUILDFLAG(ENABLE_STACK_TRACE_LINE_NUMBERS)
       // Only output the source line number if the offset was found. Otherwise,
@@ -266,8 +268,8 @@
     }
     printed = true;
 #else   // defined(HAVE_DLADDR)
-    std::unique_ptr<char*, FreeDeleter> trace_symbols(
-        backtrace_symbols(trace, static_cast<int>(size)));
+    std::unique_ptr<char*, FreeDeleter> trace_symbols(backtrace_symbols(
+        const_cast<void* const*>(trace), static_cast<int>(size)));
     if (trace_symbols.get()) {
       for (size_t i = 0; i < size; ++i) {
         std::string trace_symbol = trace_symbols.get()[i];
@@ -762,13 +764,51 @@
     return -1;
   }
 
+  // This class is copied from
+  // third_party/crashpad/crashpad/util/linux/scoped_pr_set_dumpable.h.
+  // It aims at ensuring the process is dumpable before opening /proc/self/mem.
+  // If the process is already dumpable, this class doesn't do anything.
+  class ScopedPrSetDumpable {
+   public:
+    // Uses `PR_SET_DUMPABLE` to make the current process dumpable.
+    //
+    // Restores the dumpable flag to its original value on destruction. If the
+    // original value couldn't be determined, the destructor attempts to
+    // restore the flag to 0 (non-dumpable).
+    explicit ScopedPrSetDumpable() {
+      int result = prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
+      was_dumpable_ = result > 0;
+
+      if (!was_dumpable_) {
+        std::ignore = prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
+      }
+    }
+
+    ScopedPrSetDumpable(const ScopedPrSetDumpable&) = delete;
+    ScopedPrSetDumpable& operator=(const ScopedPrSetDumpable&) = delete;
+
+    ~ScopedPrSetDumpable() {
+      if (!was_dumpable_) {
+        std::ignore = prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
+      }
+    }
+
+   private:
+    bool was_dumpable_;
+  };
+
   // Set the base address for each memory region by reading ELF headers in
   // process memory.
   void SetBaseAddressesForMemoryRegions() {
-    base::ScopedFD mem_fd(
-        HANDLE_EINTR(open("/proc/self/mem", O_RDONLY | O_CLOEXEC)));
-    if (!mem_fd.is_valid())
-      return;
+    base::ScopedFD mem_fd;
+    {
+      ScopedPrSetDumpable s;
+      mem_fd = base::ScopedFD(
+          HANDLE_EINTR(open("/proc/self/mem", O_RDONLY | O_CLOEXEC)));
+      if (!mem_fd.is_valid()) {
+        return;
+      }
+    }
 
     auto safe_memcpy = [&mem_fd](void* dst, uintptr_t src, size_t size) {
       return HANDLE_EINTR(pread(mem_fd.get(), dst, size,
@@ -986,19 +1026,18 @@
 }
 #endif
 
-size_t CollectStackTrace(void** trace, size_t count) {
+size_t CollectStackTrace(const void** trace, size_t count) {
   // NOTE: This code MUST be async-signal safe (it's used by in-process
   // stack dumping signal handler). NO malloc or stdio is allowed here.
 
 #if defined(NO_UNWIND_TABLES) && BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
   // If we do not have unwind tables, then try tracing using frame pointers.
-  return base::debug::TraceStackFramePointers(const_cast<const void**>(trace),
-                                              count, 0);
+  return base::debug::TraceStackFramePointers(trace, count, 0);
 #elif defined(HAVE_BACKTRACE)
   // Though the backtrace API man page does not list any possible negative
   // return values, we take no chance.
   return base::saturated_cast<size_t>(
-      backtrace(trace, base::saturated_cast<int>(count)));
+      backtrace(const_cast<void**>(trace), base::saturated_cast<int>(count)));
 #else
   return 0;
 #endif
diff --git a/base/debug/stack_trace_unittest.cc b/base/debug/stack_trace_unittest.cc
index 4759f27..0662aa8 100644
--- a/base/debug/stack_trace_unittest.cc
+++ b/base/debug/stack_trace_unittest.cc
@@ -10,6 +10,7 @@
 
 #include "base/debug/debugging_buildflags.h"
 #include "base/debug/stack_trace.h"
+#include "base/immediate_crash.h"
 #include "base/logging.h"
 #include "base/process/kill.h"
 #include "base/process/process_handle.h"
@@ -20,6 +21,12 @@
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/multiprocess_func_list.h"
 
+#include "base/allocator/buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+#endif
+
 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID) && !BUILDFLAG(IS_IOS)
 #include "base/test/multiprocess_test.h"
 #endif
@@ -32,6 +39,7 @@
 #else
 typedef testing::Test StackTraceTest;
 #endif
+typedef testing::Test StackTraceDeathTest;
 
 #if !defined(__UCLIBC__) && !defined(_AIX)
 // StackTrace::OutputToStream() is not implemented under uclibc, nor AIX.
@@ -48,8 +56,7 @@
   // ToString() should produce the same output.
   EXPECT_EQ(backtrace_message, trace.ToString());
 
-  size_t frames_found = 0;
-  const void* const* addresses = trace.Addresses(&frames_found);
+  span<const void* const> addresses = trace.addresses();
 
 #if defined(OFFICIAL_BUILD) && \
     ((BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE)) || BUILDFLAG(IS_FUCHSIA))
@@ -61,8 +68,8 @@
         // ((BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE)) ||
         // BUILDFLAG(IS_FUCHSIA))
 
-  ASSERT_TRUE(addresses);
-  ASSERT_GT(frames_found, 5u) << "Too few frames found.";
+  ASSERT_GT(addresses.size(), 5u) << "Too few frames found.";
+  ASSERT_TRUE(addresses[0]);
 
   if (!StackTrace::WillSymbolizeToStreamForTesting())
     return;
@@ -98,13 +105,10 @@
 TEST_F(StackTraceTest, TruncatedTrace) {
   StackTrace trace;
 
-  size_t count = 0;
-  trace.Addresses(&count);
-  ASSERT_LT(2u, count);
+  ASSERT_LT(2u, trace.addresses().size());
 
   StackTrace truncated(2);
-  truncated.Addresses(&count);
-  EXPECT_EQ(2u, count);
+  EXPECT_EQ(2u, truncated.addresses().size());
 }
 #endif  // !defined(OFFICIAL_BUILD) && !defined(NO_UNWIND_TABLES)
 
@@ -157,31 +161,91 @@
 #endif  // !defined(__UCLIBC__) && !defined(_AIX)
 
 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
-#if !BUILDFLAG(IS_IOS)
-static char* newArray() {
-  // Clang warns about the mismatched new[]/delete if they occur in the same
-  // function.
-  return new char[10];
+// Since Mac's base::debug::StackTrace().Print() is not malloc-free, skip
+// StackDumpSignalHandlerIsMallocFree if BUILDFLAG(IS_MAC).
+#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !BUILDFLAG(IS_MAC)
+
+namespace {
+
+// ImmediateCrash if a signal handler incorrectly uses malloc().
+// In an actual implementation, this could cause infinite recursion into the
+// signal handler or other problems. Because malloc() is not guaranteed to be
+// async signal safe.
+void* BadMalloc(const allocator_shim::AllocatorDispatch*, size_t, void*) {
+  base::ImmediateCrash();
 }
 
-MULTIPROCESS_TEST_MAIN(MismatchedMallocChildProcess) {
-  char* pointer = newArray();
-  delete pointer;
-  return 2;
+void* BadCalloc(const allocator_shim::AllocatorDispatch*,
+                size_t,
+                size_t,
+                void* context) {
+  base::ImmediateCrash();
 }
 
-// Regression test for StackDumpingSignalHandler async-signal unsafety.
-// Combined with tcmalloc's debugallocation, that signal handler
-// and e.g. mismatched new[]/delete would cause a hang because
-// of re-entering malloc.
-TEST_F(StackTraceTest, AsyncSignalUnsafeSignalHandlerHang) {
-  Process child = SpawnChild("MismatchedMallocChildProcess");
-  ASSERT_TRUE(child.IsValid());
-  int exit_code;
-  ASSERT_TRUE(
-      child.WaitForExitWithTimeout(TestTimeouts::action_timeout(), &exit_code));
+void* BadAlignedAlloc(const allocator_shim::AllocatorDispatch*,
+                      size_t,
+                      size_t,
+                      void*) {
+  base::ImmediateCrash();
 }
-#endif  // !BUILDFLAG(IS_IOS)
+
+void* BadAlignedRealloc(const allocator_shim::AllocatorDispatch*,
+                        void*,
+                        size_t,
+                        size_t,
+                        void*) {
+  base::ImmediateCrash();
+}
+
+void* BadRealloc(const allocator_shim::AllocatorDispatch*,
+                 void*,
+                 size_t,
+                 void*) {
+  base::ImmediateCrash();
+}
+
+void BadFree(const allocator_shim::AllocatorDispatch*, void*, void*) {
+  base::ImmediateCrash();
+}
+
+allocator_shim::AllocatorDispatch g_bad_malloc_dispatch = {
+    &BadMalloc,         /* alloc_function */
+    &BadMalloc,         /* alloc_unchecked_function */
+    &BadCalloc,         /* alloc_zero_initialized_function */
+    &BadAlignedAlloc,   /* alloc_aligned_function */
+    &BadRealloc,        /* realloc_function */
+    &BadFree,           /* free_function */
+    nullptr,            /* get_size_estimate_function */
+    nullptr,            /* claimed_address_function */
+    nullptr,            /* batch_malloc_function */
+    nullptr,            /* batch_free_function */
+    nullptr,            /* free_definite_size_function */
+    nullptr,            /* try_free_default_function */
+    &BadAlignedAlloc,   /* aligned_malloc_function */
+    &BadAlignedRealloc, /* aligned_realloc_function */
+    &BadFree,           /* aligned_free_function */
+    nullptr,            /* next */
+};
+
+}  // namespace
+
+// Regression test for StackDumpSignalHandler async-signal unsafety.
+// Since malloc() is not guaranteed to be async signal safe, it is not allowed
+// to use malloc() inside StackDumpSignalHandler().
+TEST_F(StackTraceDeathTest, StackDumpSignalHandlerIsMallocFree) {
+  EXPECT_DEATH_IF_SUPPORTED(
+      [] {
+        // On Android, base::debug::EnableInProcessStackDumping() does not
+        // change any actions taken by signals to be StackDumpSignalHandler. So
+        // the StackDumpSignalHandlerIsMallocFree test doesn't work on Android.
+        EnableInProcessStackDumping();
+        allocator_shim::InsertAllocatorDispatch(&g_bad_malloc_dispatch);
+        // Raise SIGSEGV to invoke StackDumpSignalHandler().
+        kill(getpid(), SIGSEGV);
+      }(),
+      "\\[end of stack trace\\]\n");
+}
+#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
 
 namespace {
 
diff --git a/base/debug/stack_trace_win.cc b/base/debug/stack_trace_win.cc
index e7328d6..415f1cf 100644
--- a/base/debug/stack_trace_win.cc
+++ b/base/debug/stack_trace_win.cc
@@ -17,8 +17,8 @@
 #include "base/files/file_path.h"
 #include "base/logging.h"
 #include "base/memory/singleton.h"
+#include "base/strings/strcat_win.h"
 #include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
 #include "base/synchronization/lock.h"
 #include "build/build_config.h"
 
@@ -201,8 +201,8 @@
     return false;
   }
 
-  std::wstring new_path = StringPrintf(L"%ls;%ls", symbols_path,
-                                       GetExePath().DirName().value().c_str());
+  std::wstring new_path =
+      StrCat({symbols_path, L";", GetExePath().DirName().value()});
   if (!SymSetSearchPathW(GetCurrentProcess(), new_path.c_str())) {
     g_init_error = GetLastError();
     DLOG(WARNING) << "SymSetSearchPath failed." << g_init_error;
@@ -324,9 +324,9 @@
   return InitializeSymbols();
 }
 
-NOINLINE size_t CollectStackTrace(void** trace, size_t count) {
+NOINLINE size_t CollectStackTrace(const void** trace, size_t count) {
   // When walking our own stack, use CaptureStackBackTrace().
-  return CaptureStackBackTrace(0, count, trace, NULL);
+  return CaptureStackBackTrace(0, count, const_cast<void**>(trace), NULL);
 }
 
 StackTrace::StackTrace(EXCEPTION_POINTERS* exception_pointers) {
diff --git a/base/debug/task_trace.cc b/base/debug/task_trace.cc
index 8a9f8a8..14775df 100644
--- a/base/debug/task_trace.cc
+++ b/base/debug/task_trace.cc
@@ -4,22 +4,20 @@
 
 #include "base/debug/task_trace.h"
 
+#include <algorithm>
+#include <iostream>
+#include <sstream>
+
+#include "base/pending_task.h"
 #include "base/ranges/algorithm.h"
+#include "base/task/common/task_annotator.h"
 #include "build/build_config.h"
 
 #if BUILDFLAG(IS_ANDROID)
 #include <android/log.h>
-#endif  // BUILDFLAG(IS_ANDROID)
 
-#include <iostream>
-#include <sstream>
-
-#if BUILDFLAG(IS_ANDROID)
 #include "base/no_destructor.h"
-#endif
-
-#include "base/pending_task.h"
-#include "base/task/common/task_annotator.h"
+#endif  // BUILDFLAG(IS_ANDROID)
 
 namespace base {
 namespace debug {
@@ -98,11 +96,11 @@
   if (empty()) {
     return count;
   }
-  const void* const* current_addresses = stack_trace_->Addresses(&count);
-  for (size_t i = 0; i < count && i < addresses.size(); ++i) {
-    addresses[i] = current_addresses[i];
-  }
-  return count;
+  span<const void* const> current_addresses = stack_trace_->addresses();
+  ranges::copy_n(current_addresses.begin(),
+                 std::min(current_addresses.size(), addresses.size()),
+                 addresses.begin());
+  return current_addresses.size();
 }
 
 std::ostream& operator<<(std::ostream& os, const TaskTrace& task_trace) {
diff --git a/base/debug/test_elf_image_builder.cc b/base/debug/test_elf_image_builder.cc
index 6218e4c..309dd07 100644
--- a/base/debug/test_elf_image_builder.cc
+++ b/base/debug/test_elf_image_builder.cc
@@ -69,11 +69,11 @@
           bits::AlignUp(desc.size(), size_t{4}),
       '\0');
   uint8_t* loc = &buffer.front();
-  Nhdr* nhdr = reinterpret_cast<Nhdr*>(loc);
-  nhdr->n_namesz = name_with_null_size;
-  nhdr->n_descsz = desc.size();
-  nhdr->n_type = type;
-  loc += sizeof(Nhdr);
+  Nhdr nhdr;
+  nhdr.n_namesz = name_with_null_size;
+  nhdr.n_descsz = desc.size();
+  nhdr.n_type = type;
+  loc = AppendHdr(nhdr, loc);
 
   memcpy(loc, name.data(), name.size());
   *(loc + name.size()) = '\0';
@@ -248,26 +248,26 @@
   // Add the soname state.
   if (soname_) {
     // Add a DYNAMIC section for the soname.
-    Dyn* soname_dyn = reinterpret_cast<Dyn*>(loc);
-    soname_dyn->d_tag = DT_SONAME;
-    soname_dyn->d_un.d_val = 1;  // One char into the string table.
-    loc += sizeof(Dyn);
+    Dyn soname_dyn;
+    soname_dyn.d_tag = DT_SONAME;
+    soname_dyn.d_un.d_val = 1;  // One char into the string table.
+    loc = AppendHdr(soname_dyn, loc);
   }
 
-  Dyn* strtab_dyn = reinterpret_cast<Dyn*>(loc);
-  strtab_dyn->d_tag = DT_STRTAB;
+  Dyn strtab_dyn;
+  strtab_dyn.d_tag = DT_STRTAB;
 #if BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_ANDROID)
   // Fuchsia and Android do not alter the symtab pointer on ELF load -- it's
   // expected to remain a 'virutal address'.
-  strtab_dyn->d_un.d_ptr =
+  strtab_dyn.d_un.d_ptr =
       GetVirtualAddressForOffset(measures.strtab_start, elf_start);
 #else
   // Linux relocates this value on ELF load, so produce the pointer value after
   // relocation. That value will always be equal to the actual memory address.
-  strtab_dyn->d_un.d_ptr =
+  strtab_dyn.d_un.d_ptr =
       reinterpret_cast<uintptr_t>(elf_start + measures.strtab_start);
 #endif
-  loc += sizeof(Dyn);
+  loc = AppendHdr(strtab_dyn, loc);
 
   // Add a string table with one entry for the soname, if necessary.
   *loc++ = '\0';  // The first byte holds a null character.
@@ -287,8 +287,7 @@
 // static
 template <typename T>
 uint8_t* TestElfImageBuilder::AppendHdr(const T& hdr, uint8_t* loc) {
-  static_assert(std::is_trivially_copyable<T>::value,
-                "T should be a plain struct");
+  static_assert(std::is_trivially_copyable_v<T>, "T should be a plain struct");
   memcpy(loc, &hdr, sizeof(T));
   return loc + sizeof(T);
 }
diff --git a/base/enterprise_util_mac.mm b/base/enterprise_util_mac.mm
index fd14573..1977180 100644
--- a/base/enterprise_util_mac.mm
+++ b/base/enterprise_util_mac.mm
@@ -9,8 +9,8 @@
 #include <string>
 #include <vector>
 
+#include "base/apple/foundation_util.h"
 #include "base/logging.h"
-#include "base/mac/foundation_util.h"
 #include "base/process/launch.h"
 #include "base/strings/string_split.h"
 #include "base/strings/string_util.h"
@@ -181,12 +181,13 @@
       }
 
       for (id element in results) {
-        ODRecord* record = mac::ObjCCastStrict<ODRecord>(element);
+        ODRecord* record = base::apple::ObjCCastStrict<ODRecord>(element);
         NSArray* attributes =
             [record valuesForAttribute:kODAttributeTypeMetaRecordName
                                  error:nil];
         for (id attribute in attributes) {
-          NSString* attribute_value = mac::ObjCCastStrict<NSString>(attribute);
+          NSString* attribute_value =
+              base::apple::ObjCCastStrict<NSString>(attribute);
           // Example: "uid=johnsmith,ou=People,dc=chromium,dc=org
           NSRange domain_controller =
               [attribute_value rangeOfString:@"(^|,)\\s*dc="
@@ -201,7 +202,8 @@
             [record valuesForAttribute:kODAttributeTypeAltSecurityIdentities
                                  error:nil];
         for (id attribute in attributes) {
-          NSString* attribute_value = mac::ObjCCastStrict<NSString>(attribute);
+          NSString* attribute_value =
+              base::apple::ObjCCastStrict<NSString>(attribute);
           NSRange icloud =
               [attribute_value rangeOfString:@"CN=com.apple.idms.appleid.prd"
                                      options:NSCaseInsensitiveSearch];
diff --git a/base/feature_list.cc b/base/feature_list.cc
index 9698a29..6575c52 100644
--- a/base/feature_list.cc
+++ b/base/feature_list.cc
@@ -87,13 +87,8 @@
     SCOPED_CRASH_KEY_STRING256("FeatureList", "feature-accessed-too-early",
                                feature->name);
 #endif  // !BUILDFLAG(IS_NACL)
-    // Fail if DCHECKs are enabled.
-    DCHECK(!feature) << "Accessed feature " << feature->name
-                     << " before FeatureList registration.";
-    // TODO(crbug.com/1383852): When we believe that all early accesses have
-    // been fixed, remove this base::debug::DumpWithoutCrashing() and change the
-    // above DCHECK to a CHECK.
-    base::debug::DumpWithoutCrashing();
+    CHECK(!feature) << "Accessed feature " << feature->name
+                    << " before FeatureList registration.";
 #endif  // !BUILDFLAG(IS_IOS) && !BUILDFLAG(IS_ANDROID) &&
         // !BUILDFLAG(IS_CHROMEOS)
   }
diff --git a/base/features.cc b/base/features.cc
index 784865e..50cb762 100644
--- a/base/features.cc
+++ b/base/features.cc
@@ -28,15 +28,23 @@
 // Optimizes parsing and loading of data: URLs.
 BASE_FEATURE(kOptimizeDataUrls, "OptimizeDataUrls", FEATURE_ENABLED_BY_DEFAULT);
 
-BASE_FEATURE(kSupportsUserDataFlatHashMap,
-             "SupportsUserDataFlatHashMap",
+BASE_FEATURE(kUseRustJsonParser,
+             "UseRustJsonParser",
              FEATURE_DISABLED_BY_DEFAULT);
 
-#if BUILDFLAG(IS_ANDROID)
-// Force to enable LowEndDeviceMode partially on Android mid-range devices.
-// Such devices aren't considered low-end, but we'd like experiment with
-// a subset of low-end features to see if we get a good memory vs. performance
-// tradeoff.
+BASE_FEATURE(kJsonNegativeZero, "JsonNegativeZero", FEATURE_ENABLED_BY_DEFAULT);
+
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
+// Force to enable LowEndDeviceMode partially on Android 3Gb devices.
+// (see PartialLowEndModeOnMidRangeDevices below)
+BASE_FEATURE(kPartialLowEndModeOn3GbDevices,
+             "PartialLowEndModeOn3GbDevices",
+             base::FEATURE_DISABLED_BY_DEFAULT);
+
+// Used to enable LowEndDeviceMode partially on Android and ChromeOS mid-range
+// devices. Such devices aren't considered low-end, but we'd like experiment
+// with a subset of low-end features to see if we get a good memory vs.
+// performance tradeoff.
 //
 // TODO(crbug.com/1434873): |#if| out 32-bit before launching or going to
 // high Stable %, because we will enable the feature only for <8GB 64-bit
@@ -44,15 +52,19 @@
 // population to collect data.
 BASE_FEATURE(kPartialLowEndModeOnMidRangeDevices,
              "PartialLowEndModeOnMidRangeDevices",
+#if BUILDFLAG(IS_ANDROID)
+             base::FEATURE_ENABLED_BY_DEFAULT);
+#elif BUILDFLAG(IS_CHROMEOS)
              base::FEATURE_DISABLED_BY_DEFAULT);
+#endif
 
-// A parameter to exclude or not exclude LowEndBackgroundCleanup from
-// PartialLowModeOnMidRangeDevices. This is used to see how
-// LowEndBackGroundCleanup affects total count of memory.gpu.privatefootprints.
-const FeatureParam<bool> kPartialLowEndModeExcludeLowEndBackgroundCleanup{
-    &kPartialLowEndModeOnMidRangeDevices, "exculde-low-end-background-cleanup",
-    false};
+#endif  // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
 
+#if BUILDFLAG(IS_ANDROID)
+// Whether to report frame metrics to the Android.FrameTimeline.* histograms.
+BASE_FEATURE(kCollectAndroidFrameTimelineMetrics,
+             "CollectAndroidFrameTimelineMetrics",
+             base::FEATURE_DISABLED_BY_DEFAULT);
 #endif  // BUILDFLAG(IS_ANDROID)
 
 }  // namespace base::features
diff --git a/base/features.h b/base/features.h
index 90a69b4..a80ac17 100644
--- a/base/features.h
+++ b/base/features.h
@@ -21,12 +21,17 @@
 
 BASE_EXPORT BASE_DECLARE_FEATURE(kOptimizeDataUrls);
 
-BASE_EXPORT BASE_DECLARE_FEATURE(kSupportsUserDataFlatHashMap);
+BASE_EXPORT BASE_DECLARE_FEATURE(kUseRustJsonParser);
+
+BASE_EXPORT BASE_DECLARE_FEATURE(kJsonNegativeZero);
+
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
+BASE_EXPORT BASE_DECLARE_FEATURE(kPartialLowEndModeOn3GbDevices);
+BASE_EXPORT BASE_DECLARE_FEATURE(kPartialLowEndModeOnMidRangeDevices);
+#endif
 
 #if BUILDFLAG(IS_ANDROID)
-BASE_EXPORT BASE_DECLARE_FEATURE(kPartialLowEndModeOnMidRangeDevices);
-extern const BASE_EXPORT FeatureParam<bool>
-    kPartialLowEndModeExcludeLowEndBackgroundCleanup;
+BASE_EXPORT BASE_DECLARE_FEATURE(kCollectAndroidFrameTimelineMetrics);
 #endif
 
 }  // namespace base::features
diff --git a/base/file_version_info_apple.h b/base/file_version_info_apple.h
new file mode 100644
index 0000000..7b3041a
--- /dev/null
+++ b/base/file_version_info_apple.h
@@ -0,0 +1,44 @@
+// Copyright 2011 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILE_VERSION_INFO_APPLE_H_
+#define BASE_FILE_VERSION_INFO_APPLE_H_
+
+#include "base/file_version_info.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include <string>
+
+@class NSBundle;
+
+class FileVersionInfoApple : public FileVersionInfo {
+ public:
+  explicit FileVersionInfoApple(NSBundle* bundle);
+  FileVersionInfoApple(const FileVersionInfoApple&) = delete;
+  FileVersionInfoApple& operator=(const FileVersionInfoApple&) = delete;
+  ~FileVersionInfoApple() override;
+
+  // Accessors to the different version properties.
+  // Returns an empty string if the property is not found.
+  std::u16string company_name() override;
+  std::u16string company_short_name() override;
+  std::u16string product_name() override;
+  std::u16string product_short_name() override;
+  std::u16string internal_name() override;
+  std::u16string product_version() override;
+  std::u16string special_build() override;
+  std::u16string original_filename() override;
+  std::u16string file_description() override;
+  std::u16string file_version() override;
+
+ private:
+  // Returns a std::u16string value for a property name.
+  // Returns the empty string if the property does not exist.
+  std::u16string GetString16Value(CFStringRef name);
+
+  NSBundle* __strong bundle_;
+};
+
+#endif  // BASE_FILE_VERSION_INFO_APPLE_H_
diff --git a/base/file_version_info_apple.mm b/base/file_version_info_apple.mm
new file mode 100644
index 0000000..ef5a299
--- /dev/null
+++ b/base/file_version_info_apple.mm
@@ -0,0 +1,97 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_version_info_apple.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/apple/bridging.h"
+#include "base/apple/bundle_locations.h"
+#include "base/apple/foundation_util.h"
+#include "base/files/file_path.h"
+#include "base/strings/sys_string_conversions.h"
+#include "build/build_config.h"
+
+FileVersionInfoApple::FileVersionInfoApple(NSBundle* bundle)
+    : bundle_(bundle) {}
+
+FileVersionInfoApple::~FileVersionInfoApple() = default;
+
+// static
+std::unique_ptr<FileVersionInfo>
+FileVersionInfo::CreateFileVersionInfoForCurrentModule() {
+  return CreateFileVersionInfo(base::apple::FrameworkBundlePath());
+}
+
+// static
+std::unique_ptr<FileVersionInfo> FileVersionInfo::CreateFileVersionInfo(
+    const base::FilePath& file_path) {
+  NSString* path = base::SysUTF8ToNSString(file_path.value());
+  NSBundle* bundle = [NSBundle bundleWithPath:path];
+  return std::make_unique<FileVersionInfoApple>(bundle);
+}
+
+std::u16string FileVersionInfoApple::company_name() {
+  return std::u16string();
+}
+
+std::u16string FileVersionInfoApple::company_short_name() {
+  return std::u16string();
+}
+
+std::u16string FileVersionInfoApple::internal_name() {
+  return std::u16string();
+}
+
+std::u16string FileVersionInfoApple::product_name() {
+  return GetString16Value(kCFBundleNameKey);
+}
+
+std::u16string FileVersionInfoApple::product_short_name() {
+  return GetString16Value(kCFBundleNameKey);
+}
+
+std::u16string FileVersionInfoApple::product_version() {
+  // On macOS, CFBundleVersion is used by LaunchServices, and must follow
+  // specific formatting rules, so the four-part Chrome version is in
+  // CFBundleShortVersionString. On iOS, both have a policy-enforced limit
+  // of three version components, so the full version is stored in a custom
+  // key (CrBundleVersion) falling back to CFBundleVersion if not present.
+#if BUILDFLAG(IS_IOS)
+  std::u16string version(GetString16Value(CFSTR("CrBundleVersion")));
+  if (version.length() > 0) {
+    return version;
+  }
+  return GetString16Value(CFSTR("CFBundleVersion"));
+#else
+  return GetString16Value(CFSTR("CFBundleShortVersionString"));
+#endif  // BUILDFLAG(IS_IOS)
+}
+
+std::u16string FileVersionInfoApple::file_description() {
+  return std::u16string();
+}
+
+std::u16string FileVersionInfoApple::file_version() {
+  return product_version();
+}
+
+std::u16string FileVersionInfoApple::original_filename() {
+  return GetString16Value(kCFBundleNameKey);
+}
+
+std::u16string FileVersionInfoApple::special_build() {
+  return std::u16string();
+}
+
+std::u16string FileVersionInfoApple::GetString16Value(CFStringRef name) {
+  if (bundle_) {
+    NSString* ns_name = base::apple::CFToNSPtrCast(name);
+    NSString* value = [bundle_ objectForInfoDictionaryKey:ns_name];
+    if (value) {
+      return base::SysNSStringToUTF16(value);
+    }
+  }
+  return std::u16string();
+}
diff --git a/base/file_version_info_mac.h b/base/file_version_info_mac.h
deleted file mode 100644
index bc73885..0000000
--- a/base/file_version_info_mac.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_FILE_VERSION_INFO_MAC_H_
-#define BASE_FILE_VERSION_INFO_MAC_H_
-
-#include <CoreFoundation/CoreFoundation.h>
-
-#include <string>
-
-#include "base/file_version_info.h"
-
-@class NSBundle;
-
-class FileVersionInfoMac : public FileVersionInfo {
- public:
-  explicit FileVersionInfoMac(NSBundle *bundle);
-  FileVersionInfoMac(const FileVersionInfoMac&) = delete;
-  FileVersionInfoMac& operator=(const FileVersionInfoMac&) = delete;
-  ~FileVersionInfoMac() override;
-
-  // Accessors to the different version properties.
-  // Returns an empty string if the property is not found.
-  std::u16string company_name() override;
-  std::u16string company_short_name() override;
-  std::u16string product_name() override;
-  std::u16string product_short_name() override;
-  std::u16string internal_name() override;
-  std::u16string product_version() override;
-  std::u16string special_build() override;
-  std::u16string original_filename() override;
-  std::u16string file_description() override;
-  std::u16string file_version() override;
-
- private:
-  // Returns a std::u16string value for a property name.
-  // Returns the empty string if the property does not exist.
-  std::u16string GetString16Value(CFStringRef name);
-
-  NSBundle* __strong bundle_;
-};
-
-#endif  // BASE_FILE_VERSION_INFO_MAC_H_
diff --git a/base/file_version_info_mac.mm b/base/file_version_info_mac.mm
deleted file mode 100644
index 595bf74..0000000
--- a/base/file_version_info_mac.mm
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/file_version_info_mac.h"
-
-#import <Foundation/Foundation.h>
-
-#include "base/apple/bridging.h"
-#include "base/apple/bundle_locations.h"
-#include "base/files/file_path.h"
-#include "base/mac/foundation_util.h"
-#include "base/strings/sys_string_conversions.h"
-#include "build/build_config.h"
-
-FileVersionInfoMac::FileVersionInfoMac(NSBundle* bundle) : bundle_(bundle) {}
-
-FileVersionInfoMac::~FileVersionInfoMac() = default;
-
-// static
-std::unique_ptr<FileVersionInfo>
-FileVersionInfo::CreateFileVersionInfoForCurrentModule() {
-  return CreateFileVersionInfo(base::apple::FrameworkBundlePath());
-}
-
-// static
-std::unique_ptr<FileVersionInfo> FileVersionInfo::CreateFileVersionInfo(
-    const base::FilePath& file_path) {
-  NSString* path = base::SysUTF8ToNSString(file_path.value());
-  NSBundle* bundle = [NSBundle bundleWithPath:path];
-  return std::make_unique<FileVersionInfoMac>(bundle);
-}
-
-std::u16string FileVersionInfoMac::company_name() {
-  return std::u16string();
-}
-
-std::u16string FileVersionInfoMac::company_short_name() {
-  return std::u16string();
-}
-
-std::u16string FileVersionInfoMac::internal_name() {
-  return std::u16string();
-}
-
-std::u16string FileVersionInfoMac::product_name() {
-  return GetString16Value(kCFBundleNameKey);
-}
-
-std::u16string FileVersionInfoMac::product_short_name() {
-  return GetString16Value(kCFBundleNameKey);
-}
-
-std::u16string FileVersionInfoMac::product_version() {
-  // On macOS, CFBundleVersion is used by LaunchServices, and must follow
-  // specific formatting rules, so the four-part Chrome version is in
-  // CFBundleShortVersionString. On iOS, both have a policy-enforced limit
-  // of three version components, so the full version is stored in a custom
-  // key (CrBundleVersion) falling back to CFBundleVersion if not present.
-#if BUILDFLAG(IS_IOS)
-  std::u16string version(GetString16Value(CFSTR("CrBundleVersion")));
-  if (version.length() > 0)
-    return version;
-  return GetString16Value(CFSTR("CFBundleVersion"));
-#else
-  return GetString16Value(CFSTR("CFBundleShortVersionString"));
-#endif  // BUILDFLAG(IS_IOS)
-}
-
-std::u16string FileVersionInfoMac::file_description() {
-  return std::u16string();
-}
-
-std::u16string FileVersionInfoMac::file_version() {
-  return product_version();
-}
-
-std::u16string FileVersionInfoMac::original_filename() {
-  return GetString16Value(kCFBundleNameKey);
-}
-
-std::u16string FileVersionInfoMac::special_build() {
-  return std::u16string();
-}
-
-std::u16string FileVersionInfoMac::GetString16Value(CFStringRef name) {
-  if (bundle_) {
-    NSString* ns_name = base::apple::CFToNSPtrCast(name);
-    NSString* value = [bundle_ objectForInfoDictionaryKey:ns_name];
-    if (value) {
-      return base::SysNSStringToUTF16(value);
-    }
-  }
-  return std::u16string();
-}
diff --git a/base/file_version_info_win_unittest.cc b/base/file_version_info_win_unittest.cc
index 4015420..8a6c824 100644
--- a/base/file_version_info_win_unittest.cc
+++ b/base/file_version_info_win_unittest.cc
@@ -23,7 +23,7 @@
 
 FilePath GetTestDataPath() {
   FilePath path;
-  base::PathService::Get(base::DIR_SOURCE_ROOT, &path);
+  base::PathService::Get(base::DIR_SRC_TEST_DATA_ROOT, &path);
   path = path.AppendASCII("base");
   path = path.AppendASCII("test");
   path = path.AppendASCII("data");
diff --git a/base/files/block_tests_writing_to_special_dirs.cc b/base/files/block_tests_writing_to_special_dirs.cc
new file mode 100644
index 0000000..8dd595d
--- /dev/null
+++ b/base/files/block_tests_writing_to_special_dirs.cc
@@ -0,0 +1,66 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/block_tests_writing_to_special_dirs.h"
+
+#include <utility>
+
+#include "base/files/file_path.h"
+#include "base/no_destructor.h"
+#include "base/path_service.h"
+
+namespace base {
+
+// static
+absl::optional<BlockTestsWritingToSpecialDirs>&
+BlockTestsWritingToSpecialDirs::Get() {
+  static NoDestructor<absl::optional<BlockTestsWritingToSpecialDirs>>
+      block_tests_writing_to_special_dirs;
+  return *block_tests_writing_to_special_dirs;
+}
+
+// static
+bool BlockTestsWritingToSpecialDirs::CanWriteToPath(const FilePath& path) {
+  auto& dir_blocker = Get();
+  if (!dir_blocker.has_value()) {
+    return true;
+  }
+  if (!dir_blocker->blocked_dirs_.empty()) {
+    // `blocked_paths_` needs to be initialized lazily because PathService::Get
+    // can't be called from the test harness code before the indiviudal tests
+    // run. On Windows, calling PathService::Get in the test harness startup
+    // codel causes user32.dll to get loaded, which breaks  delayload_unittests.
+    // On the Mac, it triggers a change in `AmIBundled`.
+    for (const int dir_key : dir_blocker->blocked_dirs_) {
+      // If test infrastructure has overridden `dir_key` already, there is no
+      // need to block writes to it. Android tests apparently do this.
+      if (PathService::IsOverriddenForTesting(dir_key)) {
+        continue;
+      }
+      FilePath path_to_block;
+      // Sandbox can make PathService::Get fail.
+      if (PathService::Get(dir_key, &path_to_block)) {
+        dir_blocker->blocked_paths_.insert(std::move(path_to_block));
+      }
+    }
+    dir_blocker->blocked_dirs_.clear();
+  }
+  for (const auto& path_to_block : dir_blocker->blocked_paths_) {
+    if (path_to_block.IsParent(path)) {
+      (*dir_blocker->failure_callback_)(path);
+      return false;
+    }
+  }
+  return true;
+}
+
+BlockTestsWritingToSpecialDirs::BlockTestsWritingToSpecialDirs(
+    std::vector<int> blocked_dirs,
+    FileWriteBlockedForTestingFunctionPtr failure_callback)
+    : blocked_dirs_(std::move(blocked_dirs)),
+      failure_callback_(failure_callback) {}
+
+BlockTestsWritingToSpecialDirs::~BlockTestsWritingToSpecialDirs() = default;
+
+}  // namespace base
diff --git a/base/files/block_tests_writing_to_special_dirs.h b/base/files/block_tests_writing_to_special_dirs.h
new file mode 100644
index 0000000..2567052
--- /dev/null
+++ b/base/files/block_tests_writing_to_special_dirs.h
@@ -0,0 +1,56 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_BLOCK_TESTS_WRITING_TO_SPECIAL_DIRS_H_
+#define BASE_FILES_BLOCK_TESTS_WRITING_TO_SPECIAL_DIRS_H_
+
+#include <set>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+namespace base {
+
+class FilePath;
+
+using FileWriteBlockedForTestingFunctionPtr = void (*)(const FilePath&);
+
+// Utility class for production code to check if writing to special directories
+// is blocked for tests.
+class BASE_EXPORT BlockTestsWritingToSpecialDirs {
+ public:
+  static bool CanWriteToPath(const FilePath& path);
+
+  BlockTestsWritingToSpecialDirs(
+      std::vector<int> blocked_dirs,
+      FileWriteBlockedForTestingFunctionPtr failure_callback);
+  BlockTestsWritingToSpecialDirs(
+      const BlockTestsWritingToSpecialDirs& blocker) = delete;
+  BlockTestsWritingToSpecialDirs& operator=(
+      const BlockTestsWritingToSpecialDirs&) = delete;
+
+  ~BlockTestsWritingToSpecialDirs();
+
+ private:
+  friend class BlockTestsWritingToSpecialDirsTest;
+  friend class ScopedBlockTestsWritingToSpecialDirs;
+
+  // This private method is used by `ScopedBlockTestsWritingToSpecialDirs` to
+  // create an object of this class stored in a function static object.
+  // `CanWriteToPath` above checks the paths stored in that object, if it is
+  // set. Thus, only ScopedBlockTestsWritingToSpecialDirs should be able to
+  // block tests writing to special dirs.
+  static absl::optional<BlockTestsWritingToSpecialDirs>& Get();
+
+  // `blocked_paths_` will be initialized lazily, from `blocked_dirs_`.
+  std::set<FilePath> blocked_paths_;
+  std::vector<int> blocked_dirs_;
+  FileWriteBlockedForTestingFunctionPtr failure_callback_ = nullptr;
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_BLOCK_TESTS_WRITING_TO_SPECIAL_DIRS_H_
diff --git a/base/files/block_tests_writing_to_special_dirs_unittest.cc b/base/files/block_tests_writing_to_special_dirs_unittest.cc
new file mode 100644
index 0000000..a43567d
--- /dev/null
+++ b/base/files/block_tests_writing_to_special_dirs_unittest.cc
@@ -0,0 +1,56 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/block_tests_writing_to_special_dirs.h"
+
+#include "base/files/file_path.h"
+#include "base/path_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class BlockTestsWritingToSpecialDirsTest : public testing::Test {
+ public:
+  BlockTestsWritingToSpecialDirsTest() {
+    auto& prev_block_tests = Get();
+    if (prev_block_tests.has_value()) {
+      save_block_tests_.emplace(std::move(prev_block_tests->blocked_dirs_),
+                                prev_block_tests->failure_callback_);
+      prev_block_tests.reset();
+    }
+  }
+
+  ~BlockTestsWritingToSpecialDirsTest() override {
+    if (save_block_tests_.has_value()) {
+      Get().emplace(std::move(save_block_tests_->blocked_dirs_),
+                    save_block_tests_->failure_callback_);
+    }
+  }
+
+ protected:
+  absl::optional<BlockTestsWritingToSpecialDirs>& Get() {
+    return BlockTestsWritingToSpecialDirs::Get();
+  }
+  absl::optional<BlockTestsWritingToSpecialDirs> save_block_tests_;
+};
+
+// Test that with no special dirs blocked,
+// BlockTestsWritingToSpecialDirs::CanWriteToPath returns true.
+TEST_F(BlockTestsWritingToSpecialDirsTest, NoSpecialDirWriteBlocker) {
+  EXPECT_TRUE(BlockTestsWritingToSpecialDirs::CanWriteToPath(
+      PathService::CheckedGet(DIR_SRC_TEST_DATA_ROOT).AppendASCII("file")));
+}
+
+TEST_F(BlockTestsWritingToSpecialDirsTest, SpecialDirWriteBlocker) {
+  std::vector<int> dirs_to_block = {DIR_SRC_TEST_DATA_ROOT};
+  if (PathService::IsOverriddenForTesting(dirs_to_block[0])) {
+    GTEST_SKIP() << "DIR_SRC_TEST_DATA_ROOT is already overridden";
+  }
+  Get().emplace(std::move(dirs_to_block), ([](const FilePath& path) {}));
+
+  EXPECT_FALSE(BlockTestsWritingToSpecialDirs::CanWriteToPath(
+      PathService::CheckedGet(DIR_SRC_TEST_DATA_ROOT).AppendASCII("file")));
+}
+
+}  // namespace base
diff --git a/base/files/dir_reader_linux.h b/base/files/dir_reader_linux.h
index be1f972..4675e22 100644
--- a/base/files/dir_reader_linux.h
+++ b/base/files/dir_reader_linux.h
@@ -65,7 +65,9 @@
     if (r == 0)
       return false;
     if (r < 0) {
-      DPLOG(FATAL) << "getdents64 failed";
+      if (errno != ENOENT) {
+        DPLOG(FATAL) << "getdents64 failed";
+      }
       return false;
     }
     size_ = static_cast<size_t>(r);
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
index 71ca266..2b2f81a 100644
--- a/base/files/file_path.cc
+++ b/base/files/file_path.cc
@@ -17,11 +17,12 @@
 #include "base/strings/string_piece.h"
 #include "base/strings/string_util.h"
 #include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_ostream_operators.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/trace_event/base_tracing.h"
 
 #if BUILDFLAG(IS_APPLE)
-#include "base/mac/scoped_cftyperef.h"
+#include "base/apple/scoped_cftyperef.h"
 #include "base/third_party/icu/icu_utf.h"
 #endif
 
@@ -1267,7 +1268,7 @@
 
 StringType FilePath::GetHFSDecomposedForm(StringPieceType string) {
   StringType result;
-  ScopedCFTypeRef<CFStringRef> cfstring(CFStringCreateWithBytesNoCopy(
+  apple::ScopedCFTypeRef<CFStringRef> cfstring(CFStringCreateWithBytesNoCopy(
       NULL, reinterpret_cast<const UInt8*>(string.data()),
       checked_cast<CFIndex>(string.length()), kCFStringEncodingUTF8, false,
       kCFAllocatorNull));
@@ -1275,16 +1276,16 @@
     // Query the maximum length needed to store the result. In most cases this
     // will overestimate the required space. The return value also already
     // includes the space needed for a terminating 0.
-    CFIndex length = CFStringGetMaximumSizeOfFileSystemRepresentation(cfstring);
+    CFIndex length =
+        CFStringGetMaximumSizeOfFileSystemRepresentation(cfstring.get());
     DCHECK_GT(length, 0);  // should be at least 1 for the 0-terminator.
     // Reserve enough space for CFStringGetFileSystemRepresentation to write
     // into. Also set the length to the maximum so that we can shrink it later.
     // (Increasing rather than decreasing it would clobber the string contents!)
     result.reserve(static_cast<size_t>(length));
     result.resize(static_cast<size_t>(length) - 1);
-    Boolean success = CFStringGetFileSystemRepresentation(cfstring,
-                                                          &result[0],
-                                                          length);
+    Boolean success =
+        CFStringGetFileSystemRepresentation(cfstring.get(), &result[0], length);
     if (success) {
       // Reduce result.length() to actual string length.
       result.resize(strlen(result.c_str()));
@@ -1310,11 +1311,11 @@
 
   // GetHFSDecomposedForm() returns an empty string in an error case.
   if (hfs1.empty() || hfs2.empty()) {
-    ScopedCFTypeRef<CFStringRef> cfstring1(CFStringCreateWithBytesNoCopy(
+    apple::ScopedCFTypeRef<CFStringRef> cfstring1(CFStringCreateWithBytesNoCopy(
         NULL, reinterpret_cast<const UInt8*>(string1.data()),
         checked_cast<CFIndex>(string1.length()), kCFStringEncodingUTF8, false,
         kCFAllocatorNull));
-    ScopedCFTypeRef<CFStringRef> cfstring2(CFStringCreateWithBytesNoCopy(
+    apple::ScopedCFTypeRef<CFStringRef> cfstring2(CFStringCreateWithBytesNoCopy(
         NULL, reinterpret_cast<const UInt8*>(string2.data()),
         checked_cast<CFIndex>(string2.length()), kCFStringEncodingUTF8, false,
         kCFAllocatorNull));
@@ -1331,8 +1332,8 @@
       return 0;
     }
 
-    return static_cast<int>(
-        CFStringCompare(cfstring1, cfstring2, kCFCompareCaseInsensitive));
+    return static_cast<int>(CFStringCompare(cfstring1.get(), cfstring2.get(),
+                                            kCFCompareCaseInsensitive));
   }
 
   return HFSFastUnicodeCompare(hfs1, hfs2);
diff --git a/base/files/file_path_unittest.cc b/base/files/file_path_unittest.cc
index 9673a48..6555218 100644
--- a/base/files/file_path_unittest.cc
+++ b/base/files/file_path_unittest.cc
@@ -9,6 +9,7 @@
 #include <sstream>
 
 #include "base/files/safe_base_name.h"
+#include "base/strings/utf_ostream_operators.h"
 #include "base/strings/utf_string_conversions.h"
 #include "build/build_config.h"
 #include "build/buildflag.h"
diff --git a/base/files/file_path_watcher.cc b/base/files/file_path_watcher.cc
index daf7683..44ce2f0 100644
--- a/base/files/file_path_watcher.cc
+++ b/base/files/file_path_watcher.cc
@@ -7,6 +7,9 @@
 
 #include "base/files/file_path_watcher.h"
 
+#include <memory>
+#include <utility>
+
 #include "base/check.h"
 #include "base/files/file_path.h"
 #include "build/build_config.h"
@@ -52,6 +55,15 @@
   return impl_->WatchWithOptions(path, options, callback);
 }
 
+bool FilePathWatcher::WatchWithChangeInfo(
+    const FilePath& path,
+    const WatchOptions& options,
+    const CallbackWithChangeInfo& callback) {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+  DCHECK(path.IsAbsolute());
+  return impl_->WatchWithChangeInfo(path, options, callback);
+}
+
 bool FilePathWatcher::PlatformDelegate::WatchWithOptions(
     const FilePath& path,
     const WatchOptions& options,
@@ -59,4 +71,16 @@
   return Watch(path, options.type, callback);
 }
 
+bool FilePathWatcher::PlatformDelegate::WatchWithChangeInfo(
+    const FilePath& path,
+    const WatchOptions& options,
+    const CallbackWithChangeInfo& callback) {
+  return Watch(path, options.type, base::BindRepeating(callback, ChangeInfo()));
+}
+
+FilePathWatcher::FilePathWatcher(std::unique_ptr<PlatformDelegate> delegate) {
+  DETACH_FROM_SEQUENCE(sequence_checker_);
+  impl_ = std::move(delegate);
+}
+
 }  // namespace base
diff --git a/base/files/file_path_watcher.h b/base/files/file_path_watcher.h
index b54b557..100fefd 100644
--- a/base/files/file_path_watcher.h
+++ b/base/files/file_path_watcher.h
@@ -8,19 +8,21 @@
 #define BASE_FILES_FILE_PATH_WATCHER_H_
 
 #include <memory>
+#include <string>
 #include <utility>
 
 #include "base/base_export.h"
+#include "base/containers/enum_set.h"
+#include "base/files/file_path.h"
 #include "base/functional/callback_forward.h"
 #include "base/memory/scoped_refptr.h"
 #include "base/sequence_checker.h"
 #include "base/task/sequenced_task_runner.h"
 #include "build/build_config.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
 
 namespace base {
 
-class FilePath;
-
 // This class lets you register interest in changes on a FilePath.
 // The callback will get called whenever the file or directory referenced by the
 // FilePath is changed, including created or deleted. Due to limitations in the
@@ -34,6 +36,50 @@
 // Must be destroyed on the sequence that invokes Watch().
 class BASE_EXPORT FilePathWatcher {
  public:
+  // Type of change which occurred on the affected. Note that this may differ
+  // from the watched path, e.g. in the case of recursive watches.
+  enum class ChangeType {
+    kUnsupported,  // The implementation does not support change types.
+    kCreated,
+    kDeleted,
+    kModified,  // Includes modifications to either file contents or attributes.
+    kMoved
+  };
+
+  // Path type of the affected path. Note that this may differ from the watched
+  // path, e.g. in the case of recursive watches.
+  enum class FilePathType {
+    kUnknown,  // The implementation could not determine the path type or does
+               // not support path types.
+    kDirectory,
+    kFile,
+  };
+
+  // Information about the file system change. This information should be as
+  // specific as the underlying platform allows. For example, when watching
+  // directory foo/, creating file foo/bar.txt should be reported as a change
+  // with a `kCreated` change type and a `kFile` path type rather than as a
+  // modification to directory foo/. Due to limitations on some platforms, this
+  // is not always possible. Callers should treat this information a strong
+  // hint, but still be capable of handling events where this information is not
+  // known given the limitations on some platforms.
+  struct ChangeInfo {
+    FilePathType file_path_type = FilePathType::kUnknown;
+    ChangeType change_type = ChangeType::kUnsupported;
+    // Can be used to associate related events. For example, renaming a file may
+    // trigger separate "moved from" and "moved to" events with the same
+    // `cookie` value.
+    //
+    // TODO(https://crbug.com/1425601): This is currently only used to associate
+    // `kMoved` events, and requires all consumers to implement the same logic
+    // to coalesce these events. Consider upstreaming this event coalesing logic
+    // to the platform-specific implementations and then replacing `cookie` with
+    // the file path that the file was moved from, if this is known.
+    absl::optional<uint32_t> cookie;
+  };
+
+  // TODO(https://crbug.com/1425601): Rename this now that this class declares
+  // other types of Types.
   enum class Type {
     // Indicates that the watcher should watch the given path and its
     // ancestors for changes. If the path does not exist, its ancestors will
@@ -74,6 +120,9 @@
   // that case, the callback won't be invoked again.
   using Callback =
       base::RepeatingCallback<void(const FilePath& path, bool error)>;
+  // Same as above, but includes more information about the change, if known.
+  using CallbackWithChangeInfo = RepeatingCallback<
+      void(const ChangeInfo&, const FilePath& path, bool error)>;
 
   // Used internally to encapsulate different members on different platforms.
   class PlatformDelegate {
@@ -91,11 +140,19 @@
                                      Type type,
                                      const Callback& callback) = 0;
 
-    // A new, more general API. It can deal with multiple options.
+    // A more general API which can deal with multiple options.
     [[nodiscard]] virtual bool WatchWithOptions(const FilePath& path,
                                                 const WatchOptions& options,
                                                 const Callback& callback);
 
+    // Watches the specified `path` according to the given `options`.
+    // `callback` is invoked for each subsequent modification, with a
+    // `ChangeInfo` populated with the fields supported by the implementation.
+    [[nodiscard]] virtual bool WatchWithChangeInfo(
+        const FilePath& path,
+        const WatchOptions& options,
+        const CallbackWithChangeInfo& callback);
+
     // Stop watching. This is called from FilePathWatcher's dtor in order to
     // allow to shut down properly while the object is still alive.
     virtual void Cancel() = 0;
@@ -144,12 +201,21 @@
   // FileDescriptorWatcher.
   bool Watch(const FilePath& path, Type type, const Callback& callback);
 
-  // A new, more general API. It can deal with multiple options.
+  // A more general API which can deal with multiple options.
   bool WatchWithOptions(const FilePath& path,
                         const WatchOptions& options,
                         const Callback& callback);
 
+  // Same as above, but `callback` includes more information about the change,
+  // if known. On platforms for which change information is not supported,
+  // `callback` is called with a dummy `ChangeInfo`.
+  bool WatchWithChangeInfo(const FilePath& path,
+                           const WatchOptions& options,
+                           const CallbackWithChangeInfo& callback);
+
  private:
+  explicit FilePathWatcher(std::unique_ptr<PlatformDelegate> delegate);
+
   std::unique_ptr<PlatformDelegate> impl_;
 
   SEQUENCE_CHECKER(sequence_checker_);
diff --git a/base/files/file_path_watcher_fsevents.cc b/base/files/file_path_watcher_fsevents.cc
index 6c9461e..0b30e82 100644
--- a/base/files/file_path_watcher_fsevents.cc
+++ b/base/files/file_path_watcher_fsevents.cc
@@ -9,11 +9,11 @@
 #include <algorithm>
 #include <list>
 
+#include "base/apple/scoped_cftyperef.h"
 #include "base/check.h"
 #include "base/files/file_util.h"
 #include "base/functional/bind.h"
 #include "base/lazy_instance.h"
-#include "base/mac/scoped_cftyperef.h"
 #include "base/strings/stringprintf.h"
 #include "base/task/sequenced_task_runner.h"
 #include "base/threading/scoped_blocking_call.h"
@@ -99,8 +99,8 @@
   // captured by the block's scope.
   const FilePath path_copy(path);
 
-  dispatch_async(queue_, ^{
-      StartEventStream(start_event, path_copy);
+  dispatch_async(queue_.get(), ^{
+    StartEventStream(start_event, path_copy);
   });
   return true;
 }
@@ -113,7 +113,7 @@
   // Switch to the dispatch queue to tear down the event stream. As the queue is
   // owned by |this|, and this method is called from the destructor, execute the
   // block synchronously.
-  dispatch_sync(queue_, ^{
+  dispatch_sync(queue_.get(), ^{
     if (fsevent_stream_) {
       DestroyEventStream();
       target_.clear();
@@ -166,7 +166,7 @@
                          if (!weak_watcher)
                            return;
                          FilePathWatcherFSEvents* watcher = weak_watcher.get();
-                         dispatch_async(watcher->queue_, ^{
+                         dispatch_async(watcher->queue_.get(), ^{
                            watcher->UpdateEventStream(root_change_at);
                          });
                        },
@@ -213,13 +213,13 @@
   if (fsevent_stream_)
     DestroyEventStream();
 
-  ScopedCFTypeRef<CFStringRef> cf_path(CFStringCreateWithCString(
+  apple::ScopedCFTypeRef<CFStringRef> cf_path(CFStringCreateWithCString(
       NULL, resolved_target_.value().c_str(), kCFStringEncodingMacHFS));
-  ScopedCFTypeRef<CFStringRef> cf_dir_path(CFStringCreateWithCString(
+  apple::ScopedCFTypeRef<CFStringRef> cf_dir_path(CFStringCreateWithCString(
       NULL, resolved_target_.DirName().value().c_str(),
       kCFStringEncodingMacHFS));
   CFStringRef paths_array[] = { cf_path.get(), cf_dir_path.get() };
-  ScopedCFTypeRef<CFArrayRef> watched_paths(
+  apple::ScopedCFTypeRef<CFArrayRef> watched_paths(
       CFArrayCreate(NULL, reinterpret_cast<const void**>(paths_array),
                     std::size(paths_array), &kCFTypeArrayCallBacks));
 
@@ -230,12 +230,10 @@
   context.release = NULL;
   context.copyDescription = NULL;
 
-  fsevent_stream_ = FSEventStreamCreate(NULL, &FSEventsCallback, &context,
-                                        watched_paths,
-                                        start_event,
-                                        kEventLatencySeconds,
-                                        kFSEventStreamCreateFlagWatchRoot);
-  FSEventStreamSetDispatchQueue(fsevent_stream_, queue_);
+  fsevent_stream_ = FSEventStreamCreate(
+      NULL, &FSEventsCallback, &context, watched_paths.get(), start_event,
+      kEventLatencySeconds, kFSEventStreamCreateFlagWatchRoot);
+  FSEventStreamSetDispatchQueue(fsevent_stream_, queue_.get());
 
   if (!FSEventStreamStart(fsevent_stream_)) {
     task_runner()->PostTask(FROM_HERE,
diff --git a/base/files/file_path_watcher_fsevents.h b/base/files/file_path_watcher_fsevents.h
index 989aed2..8377ddb 100644
--- a/base/files/file_path_watcher_fsevents.h
+++ b/base/files/file_path_watcher_fsevents.h
@@ -10,9 +10,9 @@
 
 #include <vector>
 
+#include "base/apple/scoped_dispatch_object.h"
 #include "base/files/file_path.h"
 #include "base/files/file_path_watcher.h"
-#include "base/mac/scoped_dispatch_object.h"
 #include "base/memory/weak_ptr.h"
 
 namespace base {
@@ -76,7 +76,7 @@
   FilePathWatcher::Callback callback_;
 
   // The dispatch queue on which the event stream is scheduled.
-  ScopedDispatchObject<dispatch_queue_t> queue_;
+  apple::ScopedDispatchObject<dispatch_queue_t> queue_;
 
   // Target path to watch (passed to callback).
   // (Only accessed from the libdispatch queue.)
diff --git a/base/files/file_path_watcher_inotify.cc b/base/files/file_path_watcher_inotify.cc
index 5e29c0d..e37eaa7 100644
--- a/base/files/file_path_watcher_inotify.cc
+++ b/base/files/file_path_watcher_inotify.cc
@@ -29,6 +29,7 @@
 #include "base/files/file_path_watcher_inotify.h"
 #include "base/files/file_util.h"
 #include "base/functional/bind.h"
+#include "base/functional/callback_helpers.h"
 #include "base/lazy_instance.h"
 #include "base/location.h"
 #include "base/logging.h"
@@ -43,6 +44,7 @@
 #include "base/threading/scoped_blocking_call.h"
 #include "base/trace_event/base_tracing.h"
 #include "build/build_config.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
 
 namespace base {
 
@@ -71,6 +73,21 @@
 // Used by test to override inotify watcher limit.
 size_t g_override_max_inotify_watches = 0u;
 
+FilePathWatcher::ChangeType ToChangeType(const inotify_event* const event) {
+  // Greedily select the most specific change type. It's possible that multiple
+  // types may apply, so this is ordered by specificity (e.g. "created" may also
+  // imply "modified", but the former is more useful).
+  if (event->mask & (IN_MOVED_FROM | IN_MOVED_TO)) {
+    return FilePathWatcher::ChangeType::kMoved;
+  } else if (event->mask & IN_CREATE) {
+    return FilePathWatcher::ChangeType::kCreated;
+  } else if (event->mask & IN_DELETE) {
+    return FilePathWatcher::ChangeType::kDeleted;
+  } else {
+    return FilePathWatcher::ChangeType::kModified;
+  }
+}
+
 class InotifyReaderThreadDelegate final : public PlatformThread::Delegate {
  public:
   explicit InotifyReaderThreadDelegate(int inotify_fd)
@@ -163,14 +180,14 @@
   // |fired_watch| identifies the watch that fired, |child| indicates what has
   // changed, and is relative to the currently watched path for |fired_watch|.
   //
+  // |change_info| includes information about the change.
   // |created| is true if the object appears.
   // |deleted| is true if the object disappears.
-  // |is_dir| is true if the object is a directory.
   void OnFilePathChanged(InotifyReader::Watch fired_watch,
                          const FilePath::StringType& child,
+                         FilePathWatcher::ChangeInfo change_info,
                          bool created,
-                         bool deleted,
-                         bool is_dir);
+                         bool deleted);
 
   // Returns whether the number of inotify watches of this FilePathWatcherImpl
   // would exceed the limit if adding one more.
@@ -191,6 +208,11 @@
                         const WatchOptions& flags,
                         const FilePathWatcher::Callback& callback) override;
 
+  bool WatchWithChangeInfo(
+      const FilePath& path,
+      const WatchOptions& options,
+      const FilePathWatcher::CallbackWithChangeInfo& callback) override;
+
   // Cancel the watch. This unregisters the instance with InotifyReader.
   void Cancel() override;
 
@@ -246,7 +268,7 @@
   bool HasValidWatchVector() const;
 
   // Callback to notify upon changes.
-  FilePathWatcher::Callback callback_;
+  FilePathWatcher::CallbackWithChangeInfo callback_;
 
   // The file or directory we're supposed to watch.
   FilePath target_;
@@ -390,13 +412,22 @@
   auto& watcher_map = watchers_it->second;
   for (const auto& entry : watcher_map) {
     auto& watcher_entry = entry.second;
+
+    FilePathWatcher::ChangeInfo change_info{
+        .file_path_type = event->mask & IN_ISDIR
+                              ? FilePathWatcher::FilePathType::kDirectory
+                              : FilePathWatcher::FilePathType::kFile,
+        .change_type = ToChangeType(event),
+        .cookie =
+            event->cookie ? absl::make_optional(event->cookie) : absl::nullopt,
+    };
+    bool created = event->mask & (IN_CREATE | IN_MOVED_TO);
+    bool deleted = event->mask & (IN_DELETE | IN_MOVED_FROM);
     watcher_entry.task_runner->PostTask(
         FROM_HERE,
         BindOnce(&FilePathWatcherImpl::OnFilePathChanged, watcher_entry.watcher,
-                 static_cast<Watch>(event->wd), child,
-                 event->mask & (IN_CREATE | IN_MOVED_TO),
-                 event->mask & (IN_DELETE | IN_MOVED_FROM),
-                 event->mask & IN_ISDIR));
+                 static_cast<Watch>(event->wd), child, std::move(change_info),
+                 created, deleted));
   }
 }
 
@@ -412,11 +443,12 @@
   DCHECK(!task_runner() || task_runner()->RunsTasksInCurrentSequence());
 }
 
-void FilePathWatcherImpl::OnFilePathChanged(InotifyReader::Watch fired_watch,
-                                            const FilePath::StringType& child,
-                                            bool created,
-                                            bool deleted,
-                                            bool is_dir) {
+void FilePathWatcherImpl::OnFilePathChanged(
+    InotifyReader::Watch fired_watch,
+    const FilePath::StringType& child,
+    FilePathWatcher::ChangeInfo change_info,
+    bool created,
+    bool deleted) {
   DCHECK(task_runner()->RunsTasksInCurrentSequence());
   DCHECK(!watches_.empty());
   DCHECK(HasValidWatchVector());
@@ -488,34 +520,38 @@
     if (target_changed || (change_on_target_path && deleted) ||
         (change_on_target_path && created && PathExists(target_))) {
       if (!did_update) {
-        if (!UpdateRecursiveWatches(fired_watch, is_dir)) {
+        if (!UpdateRecursiveWatches(
+                fired_watch, change_info.file_path_type ==
+                                 FilePathWatcher::FilePathType::kDirectory)) {
           exceeded_limit = true;
           break;
         }
         did_update = true;
       }
-      if (report_modified_path_ && !change_on_target_path) {
-        callback_.Run(target_.Append(child),
-                      /*error=*/false);  // `this` may be deleted.
-      } else {
-        callback_.Run(target_, /*error=*/false);  // `this` may be deleted.
-      }
+      FilePath modified_path = report_modified_path_ && !change_on_target_path
+                                   ? target_.Append(child)
+                                   : target_;
+      callback_.Run(std::move(change_info), modified_path,
+                    /*error=*/false);  // `this` may be deleted.
       return;
     }
   }
 
   if (!exceeded_limit && Contains(recursive_paths_by_watch_, fired_watch)) {
     if (!did_update) {
-      if (!UpdateRecursiveWatches(fired_watch, is_dir))
+      if (!UpdateRecursiveWatches(
+              fired_watch, change_info.file_path_type ==
+                               FilePathWatcher::FilePathType::kDirectory)) {
         exceeded_limit = true;
+      }
     }
     if (!exceeded_limit) {
-      if (report_modified_path_) {
-        callback_.Run(recursive_paths_by_watch_[fired_watch].Append(child),
-                      /*error=*/false);  // `this` may be deleted.
-      } else {
-        callback_.Run(target_, /*error=*/false);  // `this` may be deleted.
-      }
+      FilePath modified_path =
+          report_modified_path_
+              ? recursive_paths_by_watch_[fired_watch].Append(child)
+              : target_;
+      callback_.Run(std::move(change_info), modified_path,
+                    /*error=*/false);  // `this` may be deleted.
       return;
     }
   }
@@ -528,8 +564,8 @@
     auto callback = callback_;
     Cancel();
 
-    // Fires the "error=true" callback.
-    callback.Run(target_, /*error=*/true);  // `this` may be deleted.
+    // Fires the error callback. `this` may be deleted as a result of this call.
+    callback.Run(FilePathWatcher::ChangeInfo(), target_, /*error=*/true);
   }
 }
 
@@ -556,17 +592,39 @@
 bool FilePathWatcherImpl::Watch(const FilePath& path,
                                 Type type,
                                 const FilePathWatcher::Callback& callback) {
+  return WatchWithChangeInfo(
+      path, WatchOptions{.type = type},
+      base::IgnoreArgs<const FilePathWatcher::ChangeInfo&>(
+          base::BindRepeating(std::move(callback))));
+}
+
+bool FilePathWatcherImpl::WatchWithOptions(
+    const FilePath& path,
+    const WatchOptions& options,
+    const FilePathWatcher::Callback& callback) {
+  return WatchWithChangeInfo(
+      path, options,
+      base::IgnoreArgs<const FilePathWatcher::ChangeInfo&>(
+          base::BindRepeating(std::move(callback))));
+}
+
+bool FilePathWatcherImpl::WatchWithChangeInfo(
+    const FilePath& path,
+    const WatchOptions& options,
+    const FilePathWatcher::CallbackWithChangeInfo& callback) {
   DCHECK(target_.empty());
 
   set_task_runner(SequencedTaskRunner::GetCurrentDefault());
   callback_ = callback;
   target_ = path;
-  type_ = type;
+  type_ = options.type;
+  report_modified_path_ = options.report_modified_path;
 
   std::vector<FilePath::StringType> comps = target_.GetComponents();
   DCHECK(!comps.empty());
-  for (size_t i = 1; i < comps.size(); ++i)
+  for (size_t i = 1; i < comps.size(); ++i) {
     watches_.emplace_back(comps[i]);
+  }
   watches_.emplace_back(FilePath::StringType());
 
   if (!UpdateWatches()) {
@@ -578,14 +636,6 @@
   return true;
 }
 
-bool FilePathWatcherImpl::WatchWithOptions(
-    const FilePath& path,
-    const WatchOptions& options,
-    const FilePathWatcher::Callback& callback) {
-  report_modified_path_ = options.report_modified_path;
-  return Watch(path, options.type, callback);
-}
-
 void FilePathWatcherImpl::Cancel() {
   if (!callback_) {
     // Watch() was never called.
@@ -840,10 +890,8 @@
   g_override_max_inotify_watches = 0u;
 }
 
-FilePathWatcher::FilePathWatcher() {
-  DETACH_FROM_SEQUENCE(sequence_checker_);
-  impl_ = std::make_unique<FilePathWatcherImpl>();
-}
+FilePathWatcher::FilePathWatcher()
+    : FilePathWatcher(std::make_unique<FilePathWatcherImpl>()) {}
 
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
 // Put inside "BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)" because Android
diff --git a/base/files/file_path_watcher_mac.cc b/base/files/file_path_watcher_mac.cc
index 877ad02..fff1a4d 100644
--- a/base/files/file_path_watcher_mac.cc
+++ b/base/files/file_path_watcher_mac.cc
@@ -54,9 +54,7 @@
 
 }  // namespace
 
-FilePathWatcher::FilePathWatcher() {
-  DETACH_FROM_SEQUENCE(sequence_checker_);
-  impl_ = std::make_unique<FilePathWatcherImpl>();
-}
+FilePathWatcher::FilePathWatcher()
+    : FilePathWatcher(std::make_unique<FilePathWatcherImpl>()) {}
 
 }  // namespace base
diff --git a/base/files/file_path_watcher_stub.cc b/base/files/file_path_watcher_stub.cc
index 04ed36c..29b2f6d 100644
--- a/base/files/file_path_watcher_stub.cc
+++ b/base/files/file_path_watcher_stub.cc
@@ -38,9 +38,7 @@
 
 }  // namespace
 
-FilePathWatcher::FilePathWatcher() {
-  DETACH_FROM_SEQUENCE(sequence_checker_);
-  impl_ = std::make_unique<FilePathWatcherImpl>();
-}
+FilePathWatcher::FilePathWatcher()
+    : FilePathWatcher(std::make_unique<FilePathWatcherImpl>()) {}
 
 }  // namespace base
diff --git a/base/files/file_path_watcher_unittest.cc b/base/files/file_path_watcher_unittest.cc
index efaad49..6727eb0 100644
--- a/base/files/file_path_watcher_unittest.cc
+++ b/base/files/file_path_watcher_unittest.cc
@@ -70,21 +70,70 @@
 enum class ExpectedEventsSinceLastWait { kNone, kSome };
 
 struct Event {
-  FilePath path;
   bool error;
-  // TODO(https://crbug.com/1425601): Support change types.
+  FilePath path;
+  FilePathWatcher::ChangeInfo change_info;
 
   bool operator==(const Event& other) const {
-    return error == other.error && path == other.path;
+    return error == other.error && path == other.path &&
+           change_info.file_path_type == other.change_info.file_path_type &&
+           change_info.change_type == other.change_info.change_type &&
+           // Don't compare the values of the cookies.
+           change_info.cookie.has_value() ==
+               other.change_info.cookie.has_value();
   }
 };
+using EventListMatcher = testing::Matcher<std::list<Event>>;
 
-std::ostream& operator<<(std::ostream& os, const Event& event) {
-  return os << "Event{ error: " << (event.error ? "true" : "false")
-            << ", path: " << event.path << " }";
+Event ToEvent(const FilePathWatcher::ChangeInfo& change_info,
+              const FilePath& path,
+              bool error) {
+  return Event{.error = error, .path = path, .change_info = change_info};
 }
 
-using EventListMatcher = testing::Matcher<std::list<Event>>;
+std::ostream& operator<<(std::ostream& os,
+                         const FilePathWatcher::ChangeType& change_type) {
+  switch (change_type) {
+    case FilePathWatcher::ChangeType::kUnsupported:
+      return os << "unsupported";
+    case FilePathWatcher::ChangeType::kCreated:
+      return os << "created";
+    case FilePathWatcher::ChangeType::kDeleted:
+      return os << "deleted";
+    case FilePathWatcher::ChangeType::kModified:
+      return os << "modified";
+    case FilePathWatcher::ChangeType::kMoved:
+      return os << "moved";
+  }
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         const FilePathWatcher::FilePathType& file_path_type) {
+  switch (file_path_type) {
+    case FilePathWatcher::FilePathType::kUnknown:
+      return os << "Unknown";
+    case FilePathWatcher::FilePathType::kFile:
+      return os << "File";
+    case FilePathWatcher::FilePathType::kDirectory:
+      return os << "Directory";
+  }
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         const FilePathWatcher::ChangeInfo& change_info) {
+  return os << "ChangeInfo{ file_path_type: " << change_info.file_path_type
+            << ", change_type: " << change_info.change_type
+            << ", cookie: " << change_info.cookie.has_value() << " }";
+}
+
+std::ostream& operator<<(std::ostream& os, const Event& event) {
+  if (event.error) {
+    return os << "Event{ ERROR }";
+  }
+
+  return os << "Event{ path: " << event.path
+            << ", change_info: " << event.change_info << " }";
+}
 
 void SpinEventLoopForABit() {
   base::RunLoop loop;
@@ -101,6 +150,47 @@
   return listener.str();
 }
 
+inline constexpr auto HasPath = [](const FilePath& path) {
+  return testing::Field(&Event::path, path);
+};
+inline constexpr auto HasErrored = []() {
+  return testing::Field(&Event::error, testing::IsTrue());
+};
+inline constexpr auto HasCookie = []() {
+  return testing::Field(
+      &Event::change_info,
+      testing::Field(&FilePathWatcher::ChangeInfo::cookie, testing::IsTrue()));
+};
+inline constexpr auto IsType =
+    [](const FilePathWatcher::ChangeType& change_type) {
+      return testing::Field(
+          &Event::change_info,
+          testing::Field(&FilePathWatcher::ChangeInfo::change_type,
+                         change_type));
+    };
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
+inline constexpr auto IsFile = []() {
+  return testing::Field(
+      &Event::change_info,
+      testing::Field(&FilePathWatcher::ChangeInfo::file_path_type,
+                     FilePathWatcher::FilePathType::kFile));
+};
+inline constexpr auto IsDirectory = []() {
+  return testing::Field(
+      &Event::change_info,
+      testing::Field(&FilePathWatcher::ChangeInfo::file_path_type,
+                     FilePathWatcher::FilePathType::kDirectory));
+};
+#else
+inline constexpr auto IsUnknownPathType = []() {
+  return testing::Field(
+      &Event::change_info,
+      testing::Field(&FilePathWatcher::ChangeInfo::file_path_type,
+                     FilePathWatcher::FilePathType::kUnknown));
+};
+#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
+        // BUILDFLAG(IS_ANDROID)
+
 // Enables an accumulative, add-as-you-go pattern for expecting events:
 //   - Do something that should fire `event1` on `delegate`
 //   - Add `event1` to an `accumulated_event_expecter`
@@ -123,7 +213,8 @@
 //   - Wait until `delegate` matches { `event1`, `event2` }
 //
 // The potential for false-positives is much less if event types are known. We
-// should consider moving towards the latter pattern once that is supported.
+// should consider moving towards the latter pattern
+// (see `FilePathWatcherWithChangeInfoTest`) once that is supported.
 class AccumulatingEventExpecter {
  public:
   EventListMatcher GetMatcher() {
@@ -136,10 +227,8 @@
     return temp;
   }
 
-  // TODO(https://crbug.com/1425601): Add a version of this method which
-  // includes the type of change.
   void AddExpectedEventForPath(const FilePath& path, bool error = false) {
-    expected_events_.emplace_back(path, error);
+    expected_events_.emplace_back(ToEvent({}, path, error));
     expected_events_since_last_wait_ = ExpectedEventsSinceLastWait::kSome;
   }
 
@@ -157,6 +246,10 @@
   virtual ~TestDelegateBase() = default;
 
   virtual void OnFileChanged(const FilePath& path, bool error) = 0;
+  virtual void OnFileChangedWithInfo(
+      const FilePathWatcher::ChangeInfo& change_info,
+      const FilePath& path,
+      bool error) = 0;
 };
 
 // Receives and accumulates notifications from a specific `FilePathWatcher`.
@@ -172,7 +265,15 @@
   // TestDelegateBase:
   void OnFileChanged(const FilePath& path, bool error) override {
     DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-    received_events_.emplace_back(path, error);
+    Event event = ToEvent({}, path, error);
+    received_events_.emplace_back(std::move(event));
+  }
+  void OnFileChangedWithInfo(const FilePathWatcher::ChangeInfo& change_info,
+                             const FilePath& path,
+                             bool error) override {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    Event event = ToEvent(change_info, path, error);
+    received_events_.emplace_back(std::move(event));
   }
 
   // Gives all in-flight events a chance to arrive, then forgets all events that
@@ -206,6 +307,13 @@
         << Explain(matcher, received_events_);
   }
   // Convenience method for above.
+  void RunUntilEventsMatch(const EventListMatcher& matcher,
+                           const Location& location = FROM_HERE) {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    return RunUntilEventsMatch(matcher, ExpectedEventsSinceLastWait::kSome,
+                               location);
+  }
+  // Convenience method for above.
   void RunUntilEventsMatch(AccumulatingEventExpecter& event_expecter,
                            const Location& location = FROM_HERE) {
     DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
@@ -214,6 +322,18 @@
         event_expecter.GetMatcher(),
         event_expecter.GetAndResetExpectedEventsSinceLastWait(), location);
   }
+  // Convenience method for above when no events are expected.
+  void SpinAndExpectNoEvents(const Location& location = FROM_HERE) {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+    return RunUntilEventsMatch(testing::IsEmpty(),
+                               ExpectedEventsSinceLastWait::kNone, location);
+  }
+
+  const std::list<Event>& events() const {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    return received_events_;
+  }
 
  private:
   SEQUENCE_CHECKER(sequence_checker_);
@@ -280,6 +400,11 @@
                              TestDelegateBase* delegate,
                              FilePathWatcher::WatchOptions watch_options);
 
+  bool SetupWatchWithChangeInfo(const FilePath& target,
+                                FilePathWatcher* watcher,
+                                TestDelegateBase* delegate,
+                                FilePathWatcher::WatchOptions watch_options);
+
   test::TaskEnvironment task_environment_;
 
   ScopedTempDir temp_dir_;
@@ -291,8 +416,7 @@
                                      FilePathWatcher::Type watch_type) {
   return watcher->Watch(
       target, watch_type,
-      BindPostTaskToCurrentDefault(BindRepeating(
-          &TestDelegateBase::OnFileChanged, delegate->AsWeakPtr())));
+      BindRepeating(&TestDelegateBase::OnFileChanged, delegate->AsWeakPtr()));
 }
 
 bool FilePathWatcherTest::SetupWatchWithOptions(
@@ -302,8 +426,18 @@
     FilePathWatcher::WatchOptions watch_options) {
   return watcher->WatchWithOptions(
       target, watch_options,
+      BindRepeating(&TestDelegateBase::OnFileChanged, delegate->AsWeakPtr()));
+}
+
+bool FilePathWatcherTest::SetupWatchWithChangeInfo(
+    const FilePath& target,
+    FilePathWatcher* watcher,
+    TestDelegateBase* delegate,
+    FilePathWatcher::WatchOptions watch_options) {
+  return watcher->WatchWithChangeInfo(
+      target, watch_options,
       BindPostTaskToCurrentDefault(BindRepeating(
-          &TestDelegateBase::OnFileChanged, delegate->AsWeakPtr())));
+          &TestDelegateBase::OnFileChangedWithInfo, delegate->AsWeakPtr())));
 }
 
 // Basic test: Create the file and verify that we notice.
@@ -321,6 +455,36 @@
   delegate.RunUntilEventsMatch(event_expecter);
 }
 
+// Basic test: Create the directory and verify that we notice.
+TEST_F(FilePathWatcherTest, NewDirectory) {
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  AccumulatingEventExpecter event_expecter;
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher, &delegate,
+                         FilePathWatcher::Type::kNonRecursive));
+
+  ASSERT_TRUE(CreateDirectory(test_file()));
+  event_expecter.AddExpectedEventForPath(test_file());
+  delegate.RunUntilEventsMatch(event_expecter);
+}
+
+// Basic test: Create the directory and verify that we notice.
+TEST_F(FilePathWatcherTest, NewDirectoryRecursiveWatch) {
+  if (!FilePathWatcher::RecursiveWatchAvailable()) {
+    return;
+  }
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  AccumulatingEventExpecter event_expecter;
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher, &delegate,
+                         FilePathWatcher::Type::kRecursive));
+
+  ASSERT_TRUE(CreateDirectory(test_file()));
+  event_expecter.AddExpectedEventForPath(test_file());
+  delegate.RunUntilEventsMatch(event_expecter);
+}
+
 // Verify that modifying the file is caught.
 TEST_F(FilePathWatcherTest, ModifiedFile) {
   ASSERT_TRUE(WriteFile(test_file(), "content"));
@@ -337,8 +501,73 @@
   delegate.RunUntilEventsMatch(event_expecter);
 }
 
-// Verify that moving the file into place is caught.
-TEST_F(FilePathWatcherTest, MovedFile) {
+// Verify that creating the parent directory of the watched file is not caught.
+TEST_F(FilePathWatcherTest, CreateParentDirectory) {
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  FilePath parent(temp_dir_.GetPath().AppendASCII("parent"));
+  FilePath child(parent.AppendASCII("child"));
+
+  ASSERT_TRUE(SetupWatch(child, &watcher, &delegate,
+                         FilePathWatcher::Type::kNonRecursive));
+
+  // Now make sure we do not get notified when the parent is created.
+  ASSERT_TRUE(CreateDirectory(parent));
+  delegate.SpinAndExpectNoEvents();
+}
+
+// Verify that changes to the sibling of the watched file are not caught.
+TEST_F(FilePathWatcherTest, CreateSiblingFile) {
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher, &delegate,
+                         FilePathWatcher::Type::kNonRecursive));
+
+  // Now make sure we do not get notified if a sibling of the watched file is
+  // created or modified.
+  ASSERT_TRUE(WriteFile(test_file().AddExtensionASCII(".swap"), "content"));
+  ASSERT_TRUE(WriteFile(test_file().AddExtensionASCII(".swap"), "new content"));
+  delegate.SpinAndExpectNoEvents();
+}
+
+// Verify that changes to the sibling of the parent of the watched file are not
+// caught.
+TEST_F(FilePathWatcherTest, CreateParentSiblingFile) {
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  FilePath parent(temp_dir_.GetPath().AppendASCII("parent"));
+  FilePath parent_sibling(temp_dir_.GetPath().AppendASCII("parent_sibling"));
+  FilePath child(parent.AppendASCII("child"));
+  ASSERT_TRUE(SetupWatch(child, &watcher, &delegate,
+                         FilePathWatcher::Type::kNonRecursive));
+
+  // Don't notice changes to a sibling directory of `parent` while `parent` does
+  // not exist.
+  ASSERT_TRUE(CreateDirectory(parent_sibling));
+  ASSERT_TRUE(DeletePathRecursively(parent_sibling));
+
+  // Don't notice changes to a sibling file of `parent` while `parent` does
+  // not exist.
+  ASSERT_TRUE(WriteFile(parent_sibling, "do not notice this"));
+  ASSERT_TRUE(DeleteFile(parent_sibling));
+
+  // Don't notice the creation of `parent`.
+  ASSERT_TRUE(CreateDirectory(parent));
+
+  // Don't notice changes to a sibling directory of `parent` while `parent`
+  // exists.
+  ASSERT_TRUE(CreateDirectory(parent_sibling));
+  ASSERT_TRUE(DeletePathRecursively(parent_sibling));
+
+  // Don't notice changes to a sibling file of `parent` while `parent` exists.
+  ASSERT_TRUE(WriteFile(parent_sibling, "do not notice this"));
+  ASSERT_TRUE(DeleteFile(parent_sibling));
+
+  delegate.SpinAndExpectNoEvents();
+}
+
+// Verify that moving an unwatched file to a watched path is caught.
+TEST_F(FilePathWatcherTest, MovedToFile) {
   FilePath source_file(temp_dir_.GetPath().AppendASCII("source"));
   ASSERT_TRUE(WriteFile(source_file, "content"));
 
@@ -354,6 +583,22 @@
   delegate.RunUntilEventsMatch(event_expecter);
 }
 
+// Verify that moving the watched file to an unwatched path is caught.
+TEST_F(FilePathWatcherTest, MovedFromFile) {
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  AccumulatingEventExpecter event_expecter;
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher, &delegate,
+                         FilePathWatcher::Type::kNonRecursive));
+
+  // Now make sure we get notified if the file is modified.
+  ASSERT_TRUE(Move(test_file(), temp_dir_.GetPath().AppendASCII("dest")));
+  event_expecter.AddExpectedEventForPath(test_file());
+  delegate.RunUntilEventsMatch(event_expecter);
+}
+
 TEST_F(FilePathWatcherTest, DeletedFile) {
   ASSERT_TRUE(WriteFile(test_file(), "content"));
 
@@ -382,7 +627,13 @@
   Deleter& operator=(const Deleter&) = delete;
   ~Deleter() override = default;
 
-  void OnFileChanged(const FilePath&, bool) override {
+  void OnFileChanged(const FilePath& /*path*/, bool /*error*/) override {
+    watcher_.reset();
+    std::move(done_closure_).Run();
+  }
+  void OnFileChangedWithInfo(const FilePathWatcher::ChangeInfo& /*change_info*/,
+                             const FilePath& /*path*/,
+                             bool /*error*/) override {
     watcher_.reset();
     std::move(done_closure_).Run();
   }
@@ -494,9 +745,8 @@
                          FilePathWatcher::Type::kNonRecursive));
 
   FilePath sub_path(temp_dir_.GetPath());
-  for (std::vector<std::string>::const_iterator d(dir_names.begin());
-       d != dir_names.end(); ++d) {
-    sub_path = sub_path.AppendASCII(*d);
+  for (const auto& dir_name : dir_names) {
+    sub_path = sub_path.AppendASCII(dir_name);
     ASSERT_TRUE(CreateDirectory(sub_path));
     // TODO(https://crbug.com/1432064): Expect that no events are fired.
   }
@@ -624,9 +874,6 @@
   ASSERT_TRUE(SetupWatch(subdir, &subdir_watcher, &subdir_delegate,
                          FilePathWatcher::Type::kNonRecursive));
 
-  // TODO(https://crbug.com/1432064): Add a test asserting that creation of the
-  // parent directory does not trigger an event.
-
   // Setup a directory hierarchy.
   // We should only get notified on `subdir_delegate` of its creation.
   ASSERT_TRUE(CreateDirectory(subdir));
@@ -899,7 +1146,7 @@
   FilePathWatcher watcher;
   TestDelegate delegate;
   AccumulatingEventExpecter event_expecter;
-  // Note that we are watching the symlink
+  // Note that we are watching the symlink.
   ASSERT_TRUE(SetupWatch(test_link(), &watcher, &delegate,
                          FilePathWatcher::Type::kNonRecursive));
 
@@ -1033,7 +1280,7 @@
   FilePath file(dir.AppendASCII("file"));
   FilePath linkfile(link_dir.AppendASCII("file"));
   TestDelegate delegate;
-  // AccumulatingEventExpecter event_expecter;
+
   // Now create the link from dir.lnk pointing to dir but
   // neither dir nor dir/file exist yet.
   ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
@@ -1679,4 +1926,1008 @@
 
 #endif  // BUILDFLAG(IS_APPLE)
 
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
+// TODO(https://crbug.com/1432064): Ideally most all of the tests above would be
+// parameterized in this way.
+// TODO(https://crbug.com/1425601): ChangeInfo is currently only supported by
+// the inotify based implementation.
+class FilePathWatcherWithChangeInfoTest
+    : public FilePathWatcherTest,
+      public testing::WithParamInterface<
+          std::tuple<FilePathWatcher::Type, bool>> {
+ public:
+  void SetUp() override { FilePathWatcherTest::SetUp(); }
+
+ protected:
+  FilePathWatcher::Type type() const { return std::get<0>(GetParam()); }
+  bool report_modified_path() const { return std::get<1>(GetParam()); }
+
+  FilePathWatcher::WatchOptions GetWatchOptions() const {
+    return FilePathWatcher::WatchOptions{
+        .type = type(), .report_modified_path = report_modified_path()};
+  }
+};
+
+TEST_P(FilePathWatcherWithChangeInfoTest, NewFile) {
+  // Each change should have these attributes.
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(HasPath(test_file()), testing::Not(HasErrored()), IsFile(),
+                     testing::Not(HasCookie())));
+  // Match the expected change types, in this order.
+  // TODO(https://crbug.com/1425601): Update this when change types are
+  // supported on more platforms.
+  static_assert(kExpectedEventsForNewFileWrite == 2);
+  const auto sequence_matcher =
+      testing::ElementsAre(IsType(FilePathWatcher::ChangeType::kCreated),
+                           IsType(FilePathWatcher::ChangeType::kModified));
+  // Put it all together.
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_file(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, NewDirectory) {
+  const auto matcher = testing::ElementsAre(testing::AllOf(
+      HasPath(test_file()), testing::Not(HasErrored()), IsDirectory(),
+      IsType(FilePathWatcher::ChangeType::kCreated),
+      testing::Not(HasCookie())));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_file(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  ASSERT_TRUE(CreateDirectory(test_file()));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, ModifiedFile) {
+  // TODO(https://crbug.com/1425601): Some platforms will not support
+  // `ChangeType::kContentsModified`. Update this matcher once support for those
+  // platforms is added.
+  const auto matcher = testing::ElementsAre(
+      testing::AllOf(HasPath(test_file()), testing::Not(HasErrored()), IsFile(),
+                     IsType(FilePathWatcher::ChangeType::kModified),
+                     testing::Not(HasCookie())));
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+#if BUILDFLAG(IS_ANDROID)
+  // TODO(https://crbug.com/1496350): There appears to be a race condition
+  // between setting up the inotify watch and the processing of the file system
+  // notifications created while setting up the file system for this test. Spin
+  // the event loop to ensure that the events have been processed by the time
+  // the inotify watch has been set up.
+  SpinEventLoopForABit();
+#endif  // BUILDFLAG(IS_ANDROID)
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_file(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  ASSERT_TRUE(WriteFile(test_file(), "new content"));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, MovedFile) {
+  // TODO(https://crbug.com/1425601): Some platforms will not provide separate
+  // events for "moved from" and "moved to". Update this matcher once support
+  // for those platforms is added.
+  const auto matcher = testing::ElementsAre(
+      testing::AllOf(HasPath(test_file()), testing::Not(HasErrored()), IsFile(),
+                     IsType(FilePathWatcher::ChangeType::kMoved), HasCookie()));
+
+  FilePath source_file(temp_dir_.GetPath().AppendASCII("source"));
+  ASSERT_TRUE(WriteFile(source_file, "content"));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_file(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  ASSERT_TRUE(Move(source_file, test_file()));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, MatchCookies) {
+  FilePath source_file(test_file().AppendASCII("source"));
+  FilePath dest_file(test_file().AppendASCII("dest"));
+
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(testing::Not(HasErrored()), IsFile(),
+                     IsType(FilePathWatcher::ChangeType::kMoved), HasCookie()));
+  // TODO(https://crbug.com/1425601): Some platforms will not provide separate
+  // events for "moved from" and "moved to". Update this matcher once support
+  // for those platforms is added.
+  const auto sequence_matcher = testing::UnorderedElementsAre(
+      testing::AllOf(
+          HasPath(report_modified_path() ? source_file : test_file()),
+          IsType(FilePathWatcher::ChangeType::kMoved)),
+      testing::AllOf(HasPath(report_modified_path() ? dest_file : test_file()),
+                     IsType(FilePathWatcher::ChangeType::kMoved)));
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  ASSERT_TRUE(CreateDirectory(test_file()));
+  ASSERT_TRUE(WriteFile(source_file, "content"));
+#if BUILDFLAG(IS_ANDROID)
+  // TODO(https://crbug.com/1496350): There appears to be a race condition
+  // between setting up the inotify watch and the processing of the file system
+  // notifications created while setting up the file system for this test. Spin
+  // the event loop to ensure that the events have been processed by the time
+  // the inotify watch has been set up.
+  SpinEventLoopForABit();
+#endif  // BUILDFLAG(IS_ANDROID)
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_file(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  ASSERT_TRUE(Move(source_file, dest_file));
+  delegate.RunUntilEventsMatch(matcher);
+
+  const auto& events = delegate.events();
+  ASSERT_THAT(events, testing::SizeIs(2));
+
+  EXPECT_TRUE(events.front().change_info.cookie.has_value());
+  EXPECT_EQ(events.front().change_info.cookie,
+            events.back().change_info.cookie);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, DeletedFile) {
+  const auto matcher = testing::ElementsAre(
+      testing::AllOf(HasPath(test_file()), testing::Not(HasErrored()), IsFile(),
+                     IsType(FilePathWatcher::ChangeType::kDeleted),
+                     testing::Not(HasCookie())));
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+#if BUILDFLAG(IS_ANDROID)
+  // TODO(https://crbug.com/1496350): There appears to be a race condition
+  // between setting up the inotify watch and the processing of the file system
+  // notifications created while setting up the file system for this test. Spin
+  // the event loop to ensure that the events have been processed by the time
+  // the inotify watch has been set up.
+  SpinEventLoopForABit();
+#endif  // BUILDFLAG(IS_ANDROID)
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_file(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  ASSERT_TRUE(DeleteFile(test_file()));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, DeletedDirectory) {
+  const auto matcher = testing::ElementsAre(testing::AllOf(
+      HasPath(test_file()), testing::Not(HasErrored()), IsDirectory(),
+      IsType(FilePathWatcher::ChangeType::kDeleted),
+      testing::Not(HasCookie())));
+
+  ASSERT_TRUE(CreateDirectory(test_file()));
+#if BUILDFLAG(IS_ANDROID)
+  // TODO(https://crbug.com/1496350): There appears to be a race condition
+  // between setting up the inotify watch and the processing of the file system
+  // notifications created while setting up the file system for this test. Spin
+  // the event loop to ensure that the events have been processed by the time
+  // the inotify watch has been set up.
+  SpinEventLoopForABit();
+#endif  // BUILDFLAG(IS_ANDROID)
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_file(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  ASSERT_TRUE(DeletePathRecursively(test_file()));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, MultipleWatchersSingleFile) {
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(HasPath(test_file()), testing::Not(HasErrored()), IsFile(),
+                     testing::Not(HasCookie())));
+  // TODO(https://crbug.com/1425601): Update this when change types are
+  // supported on more platforms.
+  static_assert(kExpectedEventsForNewFileWrite == 2);
+  const auto sequence_matcher =
+      testing::ElementsAre(IsType(FilePathWatcher::ChangeType::kCreated),
+                           IsType(FilePathWatcher::ChangeType::kModified));
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  FilePathWatcher watcher1, watcher2;
+  TestDelegate delegate1, delegate2;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_file(), &watcher1, &delegate1,
+                                       GetWatchOptions()));
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_file(), &watcher2, &delegate2,
+                                       GetWatchOptions()));
+
+  // Expect each delegate to get notified of all changes.
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+
+  delegate1.RunUntilEventsMatch(matcher);
+  delegate2.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, NonExistentDirectory) {
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath file(dir.AppendASCII("file"));
+  const auto each_event_matcher =
+      testing::Each(testing::AllOf(HasPath(file), testing::Not(HasErrored()),
+                                   IsFile(), testing::Not(HasCookie())));
+  const auto sequence_matcher =
+      testing::IsSupersetOf({IsType(FilePathWatcher::ChangeType::kCreated),
+                             IsType(FilePathWatcher::ChangeType::kModified),
+                             IsType(FilePathWatcher::ChangeType::kDeleted)});
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(file, &watcher, &delegate, GetWatchOptions()));
+
+  // The delegate is only watching the file. Parent directory creation should
+  // not trigger an event.
+  ASSERT_TRUE(CreateDirectory(dir));
+  // It may take some time for `watcher` to re-construct its watch list, so spin
+  // for a bit while we ensure that creating the parent directory does not
+  // trigger an event.
+  delegate.RunUntilEventsMatch(testing::IsEmpty(),
+                               ExpectedEventsSinceLastWait::kNone);
+
+  ASSERT_TRUE(WriteFile(file, "content"));
+  ASSERT_TRUE(WriteFile(file, "content v2"));
+  ASSERT_TRUE(DeleteFile(file));
+
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, DirectoryChain) {
+  FilePath path(temp_dir_.GetPath());
+  std::vector<std::string> dir_names;
+  for (int i = 0; i < 20; i++) {
+    std::string dir(StringPrintf("d%d", i));
+    dir_names.push_back(dir);
+    path = path.AppendASCII(dir);
+  }
+  FilePath file(path.AppendASCII("file"));
+
+  const auto each_event_matcher =
+      testing::Each(testing::AllOf(HasPath(file), testing::Not(HasErrored()),
+                                   IsFile(), testing::Not(HasCookie())));
+  const auto sequence_matcher =
+      testing::IsSupersetOf({IsType(FilePathWatcher::ChangeType::kCreated),
+                             IsType(FilePathWatcher::ChangeType::kModified)});
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(file, &watcher, &delegate, GetWatchOptions()));
+
+  FilePath sub_path(temp_dir_.GetPath());
+  for (const auto& dir_name : dir_names) {
+    sub_path = sub_path.AppendASCII(dir_name);
+    ASSERT_TRUE(CreateDirectory(sub_path));
+  }
+  // Allow the watcher to reconstruct its watch list.
+  SpinEventLoopForABit();
+
+  ASSERT_TRUE(WriteFile(file, "content"));
+  ASSERT_TRUE(WriteFile(file, "content v2"));
+
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, DisappearingDirectory) {
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath file(dir.AppendASCII("file"));
+
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(HasPath(file), testing::Not(HasErrored()),
+                     IsType(FilePathWatcher::ChangeType::kDeleted),
+                     testing::Not(HasCookie())));
+  // TODO(https://crbug.com/1432044): inotify incorrectly reports an additional
+  // deletion event for the parent directory (though while confusingly reporting
+  // the path as `file`). Once fixed, update this matcher to assert that only
+  // one event is received.
+  const auto sequence_matcher = testing::Contains(IsFile());
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  ASSERT_TRUE(CreateDirectory(dir));
+  ASSERT_TRUE(WriteFile(file, "content"));
+#if BUILDFLAG(IS_ANDROID)
+  // TODO(https://crbug.com/1496350): There appears to be a race condition
+  // between setting up the inotify watch and the processing of the file system
+  // notifications created while setting up the file system for this test. Spin
+  // the event loop to ensure that the events have been processed by the time
+  // the inotify watch has been set up.
+  SpinEventLoopForABit();
+#endif  // BUILDFLAG(IS_ANDROID)
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(file, &watcher, &delegate, GetWatchOptions()));
+
+  ASSERT_TRUE(DeletePathRecursively(dir));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, DeleteAndRecreate) {
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(HasPath(test_file()), testing::Not(HasErrored()), IsFile(),
+                     testing::Not(HasCookie())));
+  // TODO(https://crbug.com/1425601): Update this when change types are
+  // supported on on more platforms.
+  static_assert(kExpectedEventsForNewFileWrite == 2);
+  const auto sequence_matcher =
+      testing::ElementsAre(IsType(FilePathWatcher::ChangeType::kDeleted),
+                           IsType(FilePathWatcher::ChangeType::kCreated),
+                           IsType(FilePathWatcher::ChangeType::kModified));
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+#if BUILDFLAG(IS_ANDROID)
+  // TODO(https://crbug.com/1496350): There appears to be a race condition
+  // between setting up the inotify watch and the processing of the file system
+  // notifications created while setting up the file system for this test. Spin
+  // the event loop to ensure that the events have been processed by the time
+  // the inotify watch has been set up.
+  SpinEventLoopForABit();
+#endif  // BUILDFLAG(IS_ANDROID)
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_file(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  ASSERT_TRUE(DeleteFile(test_file()));
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, WatchDirectory) {
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath file1(dir.AppendASCII("file1"));
+  FilePath file2(dir.AppendASCII("file2"));
+
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(testing::Not(HasErrored()), testing::Not(HasCookie())));
+  const auto sequence_matcher = testing::IsSupersetOf(
+      {testing::AllOf(HasPath(report_modified_path() ? file1 : dir), IsFile(),
+                      IsType(FilePathWatcher::ChangeType::kCreated)),
+       testing::AllOf(HasPath(report_modified_path() ? file1 : dir), IsFile(),
+                      IsType(FilePathWatcher::ChangeType::kModified)),
+       testing::AllOf(HasPath(report_modified_path() ? file1 : dir), IsFile(),
+                      IsType(FilePathWatcher::ChangeType::kDeleted)),
+       testing::AllOf(HasPath(report_modified_path() ? file2 : dir), IsFile(),
+                      IsType(FilePathWatcher::ChangeType::kCreated))});
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  ASSERT_TRUE(CreateDirectory(dir));
+#if BUILDFLAG(IS_ANDROID)
+  // TODO(https://crbug.com/1496350): There appears to be a race condition
+  // between setting up the inotify watch and the processing of the file system
+  // notifications created while setting up the file system for this test. Spin
+  // the event loop to ensure that the events have been processed by the time
+  // the inotify watch has been set up.
+  SpinEventLoopForABit();
+#endif  // BUILDFLAG(IS_ANDROID)
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(dir, &watcher, &delegate, GetWatchOptions()));
+
+  ASSERT_TRUE(WriteFile(file1, "content"));
+  ASSERT_TRUE(WriteFile(file1, "content v2"));
+  ASSERT_TRUE(DeleteFile(file1));
+  ASSERT_TRUE(WriteFile(file2, "content"));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, MoveParent) {
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath dest(temp_dir_.GetPath().AppendASCII("dest"));
+  FilePath subdir(dir.AppendASCII("subdir"));
+  FilePath file(subdir.AppendASCII("file"));
+
+  const auto each_event_matcher = testing::Each(testing::Not(HasErrored()));
+  // TODO(https://crbug.com/1432044): inotify incorrectly sometimes reports
+  // the first event as a directory creation... why?
+  const auto file_delegate_sequence_matcher = testing::IsSupersetOf(
+      {testing::AllOf(HasPath(file), IsFile(),
+                      IsType(FilePathWatcher::ChangeType::kCreated)),
+       testing::AllOf(HasPath(file), IsDirectory(),
+                      IsType(FilePathWatcher::ChangeType::kMoved))});
+  const auto subdir_delegate_sequence_matcher = testing::IsSupersetOf(
+      {testing::AllOf(HasPath(subdir), IsDirectory(),
+                      IsType(FilePathWatcher::ChangeType::kCreated)),
+       testing::AllOf(HasPath(report_modified_path() ? file : subdir), IsFile(),
+                      IsType(FilePathWatcher::ChangeType::kCreated)),
+       testing::AllOf(HasPath(subdir), IsDirectory(),
+                      IsType(FilePathWatcher::ChangeType::kMoved))});
+  const auto file_delegate_matcher =
+      testing::AllOf(each_event_matcher, file_delegate_sequence_matcher);
+  const auto subdir_delegate_matcher =
+      testing::AllOf(each_event_matcher, subdir_delegate_sequence_matcher);
+
+  FilePathWatcher file_watcher, subdir_watcher;
+  TestDelegate file_delegate, subdir_delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(file, &file_watcher, &file_delegate,
+                                       GetWatchOptions()));
+  ASSERT_TRUE(SetupWatchWithChangeInfo(subdir, &subdir_watcher,
+                                       &subdir_delegate, GetWatchOptions()));
+
+  // Setup a directory hierarchy.
+  // We should only get notified on `subdir_delegate` of its creation.
+  ASSERT_TRUE(CreateDirectory(subdir));
+  // Allow the watchers to reconstruct their watch lists.
+  SpinEventLoopForABit();
+
+  ASSERT_TRUE(WriteFile(file, "content"));
+  // Allow the file watcher to reconstruct its watch list.
+  SpinEventLoopForABit();
+
+  Move(dir, dest);
+  file_delegate.RunUntilEventsMatch(file_delegate_matcher);
+  subdir_delegate.RunUntilEventsMatch(subdir_delegate_matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, MoveChild) {
+  FilePath source_dir(temp_dir_.GetPath().AppendASCII("source"));
+  FilePath source_subdir(source_dir.AppendASCII("subdir"));
+  FilePath source_file(source_subdir.AppendASCII("file"));
+  FilePath dest_dir(temp_dir_.GetPath().AppendASCII("dest"));
+  FilePath dest_subdir(dest_dir.AppendASCII("subdir"));
+  FilePath dest_file(dest_subdir.AppendASCII("file"));
+
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(testing::Not(HasErrored()), IsDirectory(),
+                     IsType(FilePathWatcher::ChangeType::kMoved), HasCookie()));
+  const auto file_delegate_sequence_matcher =
+      testing::ElementsAre(HasPath(dest_file));
+  const auto subdir_delegate_sequence_matcher =
+      testing::ElementsAre(HasPath(dest_subdir));
+  const auto file_delegate_matcher =
+      testing::AllOf(each_event_matcher, file_delegate_sequence_matcher);
+  const auto subdir_delegate_matcher =
+      testing::AllOf(each_event_matcher, subdir_delegate_sequence_matcher);
+
+  // Setup a directory hierarchy.
+  ASSERT_TRUE(CreateDirectory(source_subdir));
+  ASSERT_TRUE(WriteFile(source_file, "content"));
+
+  FilePathWatcher file_watcher, subdir_watcher;
+  TestDelegate file_delegate, subdir_delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(dest_file, &file_watcher, &file_delegate,
+                                       GetWatchOptions()));
+  ASSERT_TRUE(SetupWatchWithChangeInfo(dest_subdir, &subdir_watcher,
+                                       &subdir_delegate, GetWatchOptions()));
+
+  // Move the directory into place, s.t. the watched file appears.
+  ASSERT_TRUE(Move(source_dir, dest_dir));
+  file_delegate.RunUntilEventsMatch(file_delegate_matcher);
+  subdir_delegate.RunUntilEventsMatch(subdir_delegate_matcher);
+}
+
+// TODO(pauljensen): Re-enable when crbug.com/475568 is fixed and SetUp() places
+// the |temp_dir_| in /data.
+#if !BUILDFLAG(IS_ANDROID)
+TEST_P(FilePathWatcherWithChangeInfoTest, FileAttributesChanged) {
+  const auto matcher = testing::ElementsAre(
+      testing::AllOf(HasPath(test_file()), testing::Not(HasErrored()), IsFile(),
+                     IsType(FilePathWatcher::ChangeType::kModified),
+                     testing::Not(HasCookie())));
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_file(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  // Now make sure we get notified if the file is modified.
+  ASSERT_TRUE(MakeFileUnreadable(test_file()));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, CreateLink) {
+  // TODO(https://crbug.com/1425601): Check for symlink-ness on platforms which
+  // support it.
+  const auto matcher = testing::ElementsAre(
+      testing::AllOf(HasPath(test_link()), testing::Not(HasErrored()), IsFile(),
+                     IsType(FilePathWatcher::ChangeType::kCreated),
+                     testing::Not(HasCookie())));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  AccumulatingEventExpecter event_expecter;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_link(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  // Now make sure we get notified if the link is created.
+  // Note that test_file() doesn't have to exist.
+  ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+// Unfortunately this test case only works if the link target exists.
+// TODO(craig) fix this as part of crbug.com/91561.
+TEST_P(FilePathWatcherWithChangeInfoTest, DeleteLink) {
+  // TODO(https://crbug.com/1425601): Check for symlink-ness on platforms which
+  // support it.
+  const auto matcher = testing::ElementsAre(
+      testing::AllOf(HasPath(test_link()), testing::Not(HasErrored()), IsFile(),
+                     IsType(FilePathWatcher::ChangeType::kDeleted),
+                     testing::Not(HasCookie())));
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_link(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  // Now make sure we get notified if the link is deleted.
+  ASSERT_TRUE(DeleteFile(test_link()));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, ModifiedLinkedFile) {
+  // TODO(https://crbug.com/1425601): Check for symlink-ness on platforms which
+  // support it.
+  const auto matcher = testing::ElementsAre(
+      testing::AllOf(HasPath(test_link()), testing::Not(HasErrored()), IsFile(),
+                     IsType(FilePathWatcher::ChangeType::kModified),
+                     testing::Not(HasCookie())));
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_link(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  // Now make sure we get notified if the file is modified.
+  ASSERT_TRUE(WriteFile(test_file(), "new content"));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, CreateTargetLinkedFile) {
+  // TODO(https://crbug.com/1425601): Check for symlink-ness on platforms which
+  // support it.
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(HasPath(test_link()), testing::Not(HasErrored()), IsFile(),
+                     testing::Not(HasCookie())));
+  // TODO(https://crbug.com/1425601): Update this when change types are
+  // supported on on more platforms.
+  static_assert(kExpectedEventsForNewFileWrite == 2);
+  const auto sequence_matcher =
+      testing::ElementsAre(IsType(FilePathWatcher::ChangeType::kCreated),
+                           IsType(FilePathWatcher::ChangeType::kModified));
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_link(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  // Now make sure we get notified if the target file is created.
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, DeleteTargetLinkedFile) {
+  // TODO(https://crbug.com/1425601): Check for symlink-ness on platforms which
+  // support it.
+  const auto matcher = testing::ElementsAre(
+      testing::AllOf(HasPath(test_link()), testing::Not(HasErrored()), IsFile(),
+                     IsType(FilePathWatcher::ChangeType::kDeleted),
+                     testing::Not(HasCookie())));
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(SetupWatchWithChangeInfo(test_link(), &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  // Now make sure we get notified if the target file is deleted.
+  ASSERT_TRUE(DeleteFile(test_file()));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, LinkedDirectoryPart1) {
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
+  FilePath file(dir.AppendASCII("file"));
+  FilePath linkfile(link_dir.AppendASCII("file"));
+
+  // TODO(https://crbug.com/1425601): Check for symlink-ness on platforms which
+  // support it.
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(HasPath(linkfile), testing::Not(HasErrored()), IsFile(),
+                     testing::Not(HasCookie())));
+  const auto sequence_matcher =
+      testing::IsSupersetOf({IsType(FilePathWatcher::ChangeType::kCreated),
+                             IsType(FilePathWatcher::ChangeType::kModified),
+                             IsType(FilePathWatcher::ChangeType::kDeleted)});
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  // dir/file should exist.
+  ASSERT_TRUE(CreateDirectory(dir));
+  ASSERT_TRUE(WriteFile(file, "content"));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  // Note that we are watching dir.lnk/file which doesn't exist yet.
+  ASSERT_TRUE(SetupWatchWithChangeInfo(linkfile, &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
+  // Allow the watcher to reconstruct its watch list.
+  SpinEventLoopForABit();
+
+  ASSERT_TRUE(WriteFile(file, "content v2"));
+  ASSERT_TRUE(WriteFile(file, "content v2"));
+  ASSERT_TRUE(DeleteFile(file));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, LinkedDirectoryPart2) {
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
+  FilePath file(dir.AppendASCII("file"));
+  FilePath linkfile(link_dir.AppendASCII("file"));
+
+  // TODO(https://crbug.com/1425601): Check for symlink-ness on platforms which
+  // support it.
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(HasPath(linkfile), testing::Not(HasErrored()), IsFile(),
+                     testing::Not(HasCookie())));
+  const auto sequence_matcher =
+      testing::IsSupersetOf({IsType(FilePathWatcher::ChangeType::kCreated),
+                             IsType(FilePathWatcher::ChangeType::kModified),
+                             IsType(FilePathWatcher::ChangeType::kDeleted)});
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  // Now create the link from dir.lnk pointing to dir but
+  // neither dir nor dir/file exist yet.
+  ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  // Note that we are watching dir.lnk/file.
+  ASSERT_TRUE(SetupWatchWithChangeInfo(linkfile, &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  ASSERT_TRUE(CreateDirectory(dir));
+  // Allow the watcher to reconstruct its watch list.
+  SpinEventLoopForABit();
+
+  ASSERT_TRUE(WriteFile(file, "content"));
+  ASSERT_TRUE(WriteFile(file, "content v2"));
+  ASSERT_TRUE(DeleteFile(file));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, LinkedDirectoryPart3) {
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
+  FilePath file(dir.AppendASCII("file"));
+  FilePath linkfile(link_dir.AppendASCII("file"));
+
+  // TODO(https://crbug.com/1425601): Check for symlink-ness on platforms which
+  // support it.
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(HasPath(linkfile), testing::Not(HasErrored()), IsFile(),
+                     testing::Not(HasCookie())));
+  const auto sequence_matcher =
+      testing::IsSupersetOf({IsType(FilePathWatcher::ChangeType::kCreated),
+                             IsType(FilePathWatcher::ChangeType::kModified),
+                             IsType(FilePathWatcher::ChangeType::kDeleted)});
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  ASSERT_TRUE(CreateDirectory(dir));
+  ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  // Note that we are watching dir.lnk/file but the file doesn't exist yet.
+  ASSERT_TRUE(SetupWatchWithChangeInfo(linkfile, &watcher, &delegate,
+                                       GetWatchOptions()));
+
+  ASSERT_TRUE(WriteFile(file, "content"));
+  ASSERT_TRUE(WriteFile(file, "content v2"));
+  ASSERT_TRUE(DeleteFile(file));
+  delegate.RunUntilEventsMatch(matcher);
+}
+#endif  // !BUILDFLAG(IS_ANDROID)
+
+TEST_P(FilePathWatcherWithChangeInfoTest, CreatedFileInDirectory) {
+  // Expect the change to be reported as a file creation, not as a
+  // directory modification.
+  FilePath parent(temp_dir_.GetPath().AppendASCII("parent"));
+  FilePath child(parent.AppendASCII("child"));
+
+  const auto matcher = testing::IsSupersetOf(
+      {testing::AllOf(HasPath(report_modified_path() ? child : parent),
+                      IsFile(), IsType(FilePathWatcher::ChangeType::kCreated),
+                      testing::Not(HasErrored()), testing::Not(HasCookie()))});
+
+  ASSERT_TRUE(CreateDirectory(parent));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(parent, &watcher, &delegate, GetWatchOptions()));
+
+  ASSERT_TRUE(WriteFile(child, "contents"));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, ModifiedFileInDirectory) {
+  // Expect the change to be reported as a file modification, not as a
+  // directory modification.
+  FilePath parent(temp_dir_.GetPath().AppendASCII("parent"));
+  FilePath child(parent.AppendASCII("child"));
+
+  const auto matcher = testing::ElementsAre(
+      testing::AllOf(HasPath(report_modified_path() ? child : parent), IsFile(),
+                     IsType(FilePathWatcher::ChangeType::kModified),
+                     testing::Not(HasErrored()), testing::Not(HasCookie())));
+
+  ASSERT_TRUE(CreateDirectory(parent));
+  ASSERT_TRUE(WriteFile(child, "contents"));
+#if BUILDFLAG(IS_ANDROID)
+  // TODO(https://crbug.com/1496350): There appears to be a race condition
+  // between setting up the inotify watch and the processing of the file system
+  // notifications created while setting up the file system for this test. Spin
+  // the event loop to ensure that the events have been processed by the time
+  // the inotify watch has been set up.
+#if BUILDFLAG(IS_ANDROID)
+  // TODO(https://crbug.com/1496350): There appears to be a race condition
+  // between setting up the inotify watch and the processing of the file system
+  // notifications created while setting up the file system for this test. Spin
+  // the event loop to ensure that the events have been processed by the time
+  // the inotify watch has been set up.
+  SpinEventLoopForABit();
+#endif  // BUILDFLAG(IS_ANDROID)
+#endif  // BUILDFLAG(IS_ANDROID)
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(parent, &watcher, &delegate, GetWatchOptions()));
+
+  ASSERT_TRUE(WriteFile(child, "contents v2"));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, DeletedFileInDirectory) {
+  // Expect the change to be reported as a file deletion, not as a
+  // directory modification.
+  FilePath parent(temp_dir_.GetPath().AppendASCII("parent"));
+  FilePath child(parent.AppendASCII("child"));
+
+  const auto matcher = testing::ElementsAre(
+      testing::AllOf(HasPath(report_modified_path() ? child : parent), IsFile(),
+                     IsType(FilePathWatcher::ChangeType::kDeleted),
+                     testing::Not(HasErrored()), testing::Not(HasCookie())));
+
+  ASSERT_TRUE(CreateDirectory(parent));
+  ASSERT_TRUE(WriteFile(child, "contents"));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(parent, &watcher, &delegate, GetWatchOptions()));
+
+  ASSERT_TRUE(DeleteFile(child));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, FileInDirectory) {
+  // Expect the changes to be reported as events on the file, not as
+  // modifications to the directory.
+  FilePath parent(temp_dir_.GetPath().AppendASCII("parent"));
+  FilePath child(parent.AppendASCII("child"));
+
+  const auto each_event_matcher = testing::Each(testing::AllOf(
+      HasPath(report_modified_path() ? child : parent),
+      testing::Not(HasErrored()), IsFile(), testing::Not(HasCookie())));
+  const auto sequence_matcher =
+      testing::IsSupersetOf({IsType(FilePathWatcher::ChangeType::kCreated),
+                             IsType(FilePathWatcher::ChangeType::kModified),
+                             IsType(FilePathWatcher::ChangeType::kDeleted)});
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  ASSERT_TRUE(CreateDirectory(parent));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(parent, &watcher, &delegate, GetWatchOptions()));
+
+  ASSERT_TRUE(WriteFile(child, "contents"));
+  ASSERT_TRUE(WriteFile(child, "contents v2"));
+  ASSERT_TRUE(DeleteFile(child));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, DirectoryInDirectory) {
+  // Expect the changes to be reported as events on the child directory, not as
+  // modifications to the parent directory.
+  FilePath parent(temp_dir_.GetPath().AppendASCII("parent"));
+  FilePath child(parent.AppendASCII("child"));
+
+  const auto each_event_matcher = testing::Each(testing::AllOf(
+      HasPath(report_modified_path() ? child : parent),
+      testing::Not(HasErrored()), IsDirectory(), testing::Not(HasCookie())));
+  const auto sequence_matcher =
+      testing::ElementsAre(IsType(FilePathWatcher::ChangeType::kCreated),
+                           IsType(FilePathWatcher::ChangeType::kDeleted));
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  ASSERT_TRUE(CreateDirectory(parent));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(parent, &watcher, &delegate, GetWatchOptions()));
+
+  ASSERT_TRUE(CreateDirectory(child));
+  ASSERT_TRUE(DeletePathRecursively(child));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, NestedDirectoryInDirectory) {
+  FilePath parent(temp_dir_.GetPath().AppendASCII("parent"));
+  FilePath child(parent.AppendASCII("child"));
+  FilePath grandchild(child.AppendASCII("grandchild"));
+
+  const auto each_event_matcher = testing::Each(
+      testing::AllOf(testing::Not(HasErrored()), testing::Not(HasCookie())));
+
+  EventListMatcher sequence_matcher;
+  if (type() == FilePathWatcher::Type::kRecursive) {
+    sequence_matcher = testing::IsSupersetOf(
+        {testing::AllOf(HasPath(report_modified_path() ? child : parent),
+                        IsDirectory(),
+                        IsType(FilePathWatcher::ChangeType::kCreated)),
+         testing::AllOf(HasPath(report_modified_path() ? grandchild : parent),
+                        IsFile(),
+                        IsType(FilePathWatcher::ChangeType::kCreated)),
+         testing::AllOf(HasPath(report_modified_path() ? grandchild : parent),
+                        IsFile(),
+                        IsType(FilePathWatcher::ChangeType::kModified)),
+         testing::AllOf(HasPath(report_modified_path() ? grandchild : parent),
+                        IsFile(),
+                        IsType(FilePathWatcher::ChangeType::kDeleted)),
+         testing::AllOf(HasPath(report_modified_path() ? child : parent),
+                        IsDirectory(),
+                        IsType(FilePathWatcher::ChangeType::kDeleted))});
+  } else {
+    // Do not expect changes to `grandchild` when watching `parent`
+    // non-recursively.
+    sequence_matcher = testing::ElementsAre(
+        testing::AllOf(HasPath(report_modified_path() ? child : parent),
+                       IsDirectory(),
+                       IsType(FilePathWatcher::ChangeType::kCreated)),
+        testing::AllOf(HasPath(report_modified_path() ? child : parent),
+                       IsDirectory(),
+                       IsType(FilePathWatcher::ChangeType::kDeleted)));
+  }
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  ASSERT_TRUE(CreateDirectory(parent));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(parent, &watcher, &delegate, GetWatchOptions()));
+
+  ASSERT_TRUE(CreateDirectory(child));
+  // Allow the watcher to reconstruct its watch list.
+  SpinEventLoopForABit();
+
+  ASSERT_TRUE(WriteFile(grandchild, "contents"));
+  ASSERT_TRUE(WriteFile(grandchild, "contents v2"));
+  ASSERT_TRUE(DeleteFile(grandchild));
+  ASSERT_TRUE(DeletePathRecursively(child));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+TEST_P(FilePathWatcherWithChangeInfoTest, DeleteDirectoryRecursively) {
+  FilePath grandparent(temp_dir_.GetPath());
+  FilePath parent(grandparent.AppendASCII("parent"));
+  FilePath child(parent.AppendASCII("child"));
+  FilePath grandchild(child.AppendASCII("grandchild"));
+
+  const auto each_event_matcher = testing::Each(testing::AllOf(
+      testing::Not(HasErrored()), IsType(FilePathWatcher::ChangeType::kDeleted),
+      testing::Not(HasCookie())));
+
+  // TODO(https://crbug.com/1432044): inotify incorrectly reports an additional
+  // deletion event. Once fixed, update this matcher to assert that only one
+  // event per removed file/dir is received.
+  EventListMatcher sequence_matcher;
+  if (type() == FilePathWatcher::Type::kRecursive) {
+    sequence_matcher = testing::IsSupersetOf(
+        {testing::AllOf(HasPath(parent), IsDirectory()),
+         testing::AllOf(HasPath(report_modified_path() ? child : parent),
+                        IsDirectory()),
+         // TODO(https://crbug.com/1432044): inotify incorrectly reports this
+         // deletion on the path of just "grandchild" rather than on
+         // "/absolute/path/blah/blah/parent/child/grantchild".
+         testing::AllOf(
+             HasPath(report_modified_path() ? grandchild.BaseName() : parent),
+             IsFile())});
+  } else {
+    // Do not expect changes to `grandchild` when watching `parent`
+    // non-recursively.
+    sequence_matcher = testing::IsSupersetOf(
+        {testing::AllOf(HasPath(parent), IsDirectory()),
+         testing::AllOf(HasPath(report_modified_path() ? child : parent),
+                        IsDirectory())});
+  }
+  const auto matcher = testing::AllOf(each_event_matcher, sequence_matcher);
+
+  ASSERT_TRUE(CreateDirectory(parent));
+  ASSERT_TRUE(CreateDirectory(child));
+  ASSERT_TRUE(WriteFile(grandchild, "contents"));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(parent, &watcher, &delegate, GetWatchOptions()));
+
+  ASSERT_TRUE(DeletePathRecursively(grandparent));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+INSTANTIATE_TEST_SUITE_P(
+    /* no prefix */,
+    FilePathWatcherWithChangeInfoTest,
+    ::testing::Combine(::testing::Values(FilePathWatcher::Type::kNonRecursive,
+                                         FilePathWatcher::Type::kRecursive),
+                       // Is WatchOptions.report_modified_path enabled?
+                       ::testing::Bool()));
+
+#else
+
+TEST_F(FilePathWatcherTest, UseDummyChangeInfoIfNotSupported) {
+  const auto matcher = testing::ElementsAre(testing::AllOf(
+      HasPath(test_file()), testing::Not(HasErrored()), IsUnknownPathType(),
+      IsType(FilePathWatcher::ChangeType::kUnsupported),
+      testing::Not(HasCookie())));
+
+  FilePathWatcher watcher;
+  TestDelegate delegate;
+  ASSERT_TRUE(
+      SetupWatchWithChangeInfo(test_file(), &watcher, &delegate,
+                               {.type = FilePathWatcher::Type::kNonRecursive}));
+
+  ASSERT_TRUE(CreateDirectory(test_file()));
+  delegate.RunUntilEventsMatch(matcher);
+}
+
+#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
+        // BUILDFLAG(IS_ANDROID)
+
 }  // namespace base
diff --git a/base/files/file_path_watcher_win.cc b/base/files/file_path_watcher_win.cc
index 9b3f13c..cfd0327 100644
--- a/base/files/file_path_watcher_win.cc
+++ b/base/files/file_path_watcher_win.cc
@@ -293,9 +293,7 @@
 
 }  // namespace
 
-FilePathWatcher::FilePathWatcher() {
-  DETACH_FROM_SEQUENCE(sequence_checker_);
-  impl_ = std::make_unique<FilePathWatcherImpl>();
-}
+FilePathWatcher::FilePathWatcher()
+    : FilePathWatcher(std::make_unique<FilePathWatcherImpl>()) {}
 
 }  // namespace base
diff --git a/base/files/file_proxy.cc b/base/files/file_proxy.cc
index c3b8083..92bd952 100644
--- a/base/files/file_proxy.cc
+++ b/base/files/file_proxy.cc
@@ -25,10 +25,10 @@
 
 class FileHelper {
  public:
-  FileHelper(FileProxy* proxy, File file)
+  FileHelper(base::WeakPtr<FileProxy> proxy, File file)
       : file_(std::move(file)),
         task_runner_(proxy->task_runner()),
-        proxy_(AsWeakPtr(proxy)) {}
+        proxy_(proxy) {}
   FileHelper(const FileHelper&) = delete;
   FileHelper& operator=(const FileHelper&) = delete;
 
@@ -53,9 +53,8 @@
 
 class GenericFileHelper : public FileHelper {
  public:
-  GenericFileHelper(FileProxy* proxy, File file)
-      : FileHelper(proxy, std::move(file)) {
-  }
+  GenericFileHelper(base::WeakPtr<FileProxy> proxy, File file)
+      : FileHelper(std::move(proxy), std::move(file)) {}
   GenericFileHelper(const GenericFileHelper&) = delete;
   GenericFileHelper& operator=(const GenericFileHelper&) = delete;
 
@@ -88,9 +87,8 @@
 
 class CreateOrOpenHelper : public FileHelper {
  public:
-  CreateOrOpenHelper(FileProxy* proxy, File file)
-      : FileHelper(proxy, std::move(file)) {
-  }
+  CreateOrOpenHelper(base::WeakPtr<FileProxy> proxy, File file)
+      : FileHelper(std::move(proxy), std::move(file)) {}
   CreateOrOpenHelper(const CreateOrOpenHelper&) = delete;
   CreateOrOpenHelper& operator=(const CreateOrOpenHelper&) = delete;
 
@@ -108,9 +106,8 @@
 
 class CreateTemporaryHelper : public FileHelper {
  public:
-  CreateTemporaryHelper(FileProxy* proxy, File file)
-      : FileHelper(proxy, std::move(file)) {
-  }
+  CreateTemporaryHelper(base::WeakPtr<FileProxy> proxy, File file)
+      : FileHelper(std::move(proxy), std::move(file)) {}
   CreateTemporaryHelper(const CreateTemporaryHelper&) = delete;
   CreateTemporaryHelper& operator=(const CreateTemporaryHelper&) = delete;
 
@@ -149,9 +146,8 @@
 
 class GetInfoHelper : public FileHelper {
  public:
-  GetInfoHelper(FileProxy* proxy, File file)
-      : FileHelper(proxy, std::move(file)) {
-  }
+  GetInfoHelper(base::WeakPtr<FileProxy> proxy, File file)
+      : FileHelper(std::move(proxy), std::move(file)) {}
   GetInfoHelper(const GetInfoHelper&) = delete;
   GetInfoHelper& operator=(const GetInfoHelper&) = delete;
 
@@ -172,8 +168,8 @@
 
 class ReadHelper : public FileHelper {
  public:
-  ReadHelper(FileProxy* proxy, File file, int bytes_to_read)
-      : FileHelper(proxy, std::move(file)),
+  ReadHelper(base::WeakPtr<FileProxy> proxy, File file, int bytes_to_read)
+      : FileHelper(std::move(proxy), std::move(file)),
         buffer_(new char[static_cast<size_t>(bytes_to_read)]),
         bytes_to_read_(bytes_to_read) {}
   ReadHelper(const ReadHelper&) = delete;
@@ -198,11 +194,11 @@
 
 class WriteHelper : public FileHelper {
  public:
-  WriteHelper(FileProxy* proxy,
+  WriteHelper(base::WeakPtr<FileProxy> proxy,
               File file,
               const char* buffer,
               int bytes_to_write)
-      : FileHelper(proxy, std::move(file)),
+      : FileHelper(std::move(proxy), std::move(file)),
         buffer_(new char[static_cast<size_t>(bytes_to_write)]),
         bytes_to_write_(bytes_to_write) {
     memcpy(buffer_.get(), buffer, static_cast<size_t>(bytes_to_write));
@@ -241,7 +237,8 @@
                              uint32_t file_flags,
                              StatusCallback callback) {
   DCHECK(!file_.IsValid());
-  CreateOrOpenHelper* helper = new CreateOrOpenHelper(this, File());
+  CreateOrOpenHelper* helper =
+      new CreateOrOpenHelper(weak_ptr_factory_.GetWeakPtr(), File());
   return task_runner_->PostTaskAndReply(
       FROM_HERE,
       BindOnce(&CreateOrOpenHelper::RunWork, Unretained(helper), file_path,
@@ -252,7 +249,8 @@
 bool FileProxy::CreateTemporary(uint32_t additional_file_flags,
                                 CreateTemporaryCallback callback) {
   DCHECK(!file_.IsValid());
-  CreateTemporaryHelper* helper = new CreateTemporaryHelper(this, File());
+  CreateTemporaryHelper* helper =
+      new CreateTemporaryHelper(weak_ptr_factory_.GetWeakPtr(), File());
   return task_runner_->PostTaskAndReply(
       FROM_HERE,
       BindOnce(&CreateTemporaryHelper::RunWork, Unretained(helper),
@@ -284,7 +282,8 @@
 
 bool FileProxy::Close(StatusCallback callback) {
   DCHECK(file_.IsValid());
-  GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
+  GenericFileHelper* helper =
+      new GenericFileHelper(weak_ptr_factory_.GetWeakPtr(), std::move(file_));
   return task_runner_->PostTaskAndReply(
       FROM_HERE, BindOnce(&GenericFileHelper::Close, Unretained(helper)),
       BindOnce(&GenericFileHelper::Reply, Owned(helper), std::move(callback)));
@@ -292,7 +291,8 @@
 
 bool FileProxy::GetInfo(GetFileInfoCallback callback) {
   DCHECK(file_.IsValid());
-  GetInfoHelper* helper = new GetInfoHelper(this, std::move(file_));
+  GetInfoHelper* helper =
+      new GetInfoHelper(weak_ptr_factory_.GetWeakPtr(), std::move(file_));
   return task_runner_->PostTaskAndReply(
       FROM_HERE, BindOnce(&GetInfoHelper::RunWork, Unretained(helper)),
       BindOnce(&GetInfoHelper::Reply, Owned(helper), std::move(callback)));
@@ -303,7 +303,8 @@
   if (bytes_to_read < 0)
     return false;
 
-  ReadHelper* helper = new ReadHelper(this, std::move(file_), bytes_to_read);
+  ReadHelper* helper = new ReadHelper(weak_ptr_factory_.GetWeakPtr(),
+                                      std::move(file_), bytes_to_read);
   return task_runner_->PostTaskAndReply(
       FROM_HERE, BindOnce(&ReadHelper::RunWork, Unretained(helper), offset),
       BindOnce(&ReadHelper::Reply, Owned(helper), std::move(callback)));
@@ -317,8 +318,8 @@
   if (bytes_to_write <= 0 || buffer == nullptr)
     return false;
 
-  WriteHelper* helper =
-      new WriteHelper(this, std::move(file_), buffer, bytes_to_write);
+  WriteHelper* helper = new WriteHelper(
+      weak_ptr_factory_.GetWeakPtr(), std::move(file_), buffer, bytes_to_write);
   return task_runner_->PostTaskAndReply(
       FROM_HERE, BindOnce(&WriteHelper::RunWork, Unretained(helper), offset),
       BindOnce(&WriteHelper::Reply, Owned(helper), std::move(callback)));
@@ -328,7 +329,8 @@
                          Time last_modified_time,
                          StatusCallback callback) {
   DCHECK(file_.IsValid());
-  GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
+  GenericFileHelper* helper =
+      new GenericFileHelper(weak_ptr_factory_.GetWeakPtr(), std::move(file_));
   return task_runner_->PostTaskAndReply(
       FROM_HERE,
       BindOnce(&GenericFileHelper::SetTimes, Unretained(helper),
@@ -338,7 +340,8 @@
 
 bool FileProxy::SetLength(int64_t length, StatusCallback callback) {
   DCHECK(file_.IsValid());
-  GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
+  GenericFileHelper* helper =
+      new GenericFileHelper(weak_ptr_factory_.GetWeakPtr(), std::move(file_));
   return task_runner_->PostTaskAndReply(
       FROM_HERE,
       BindOnce(&GenericFileHelper::SetLength, Unretained(helper), length),
@@ -347,7 +350,8 @@
 
 bool FileProxy::Flush(StatusCallback callback) {
   DCHECK(file_.IsValid());
-  GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
+  GenericFileHelper* helper =
+      new GenericFileHelper(weak_ptr_factory_.GetWeakPtr(), std::move(file_));
   return task_runner_->PostTaskAndReply(
       FROM_HERE, BindOnce(&GenericFileHelper::Flush, Unretained(helper)),
       BindOnce(&GenericFileHelper::Reply, Owned(helper), std::move(callback)));
diff --git a/base/files/file_proxy.h b/base/files/file_proxy.h
index 680a511..1dae4a6 100644
--- a/base/files/file_proxy.h
+++ b/base/files/file_proxy.h
@@ -33,7 +33,7 @@
 //   proxy.Write(...);
 //
 // means the second Write will always fail.
-class BASE_EXPORT FileProxy : public SupportsWeakPtr<FileProxy> {
+class BASE_EXPORT FileProxy final {
  public:
   // This callback is used by methods that report only an error code. It is
   // valid to pass a null callback to some functions that takes a
@@ -133,6 +133,8 @@
 
   scoped_refptr<TaskRunner> task_runner_;
   File file_;
+
+  base::WeakPtrFactory<FileProxy> weak_ptr_factory_{this};
 };
 
 }  // namespace base
diff --git a/base/files/file_proxy_unittest.cc b/base/files/file_proxy_unittest.cc
index a8bcc58..23f1b82 100644
--- a/base/files/file_proxy_unittest.cc
+++ b/base/files/file_proxy_unittest.cc
@@ -378,13 +378,13 @@
 
   // The returned values may only have the seconds precision, so we cast
   // the double values to int here.
-  EXPECT_EQ(static_cast<int>(last_modified_time.ToDoubleT()),
-            static_cast<int>(info.last_modified.ToDoubleT()));
+  EXPECT_EQ(static_cast<int>(last_modified_time.InSecondsFSinceUnixEpoch()),
+            static_cast<int>(info.last_modified.InSecondsFSinceUnixEpoch()));
 
 #if !BUILDFLAG(IS_FUCHSIA)
   // On Fuchsia, /tmp is noatime
-  EXPECT_EQ(static_cast<int>(last_accessed_time.ToDoubleT()),
-            static_cast<int>(info.last_accessed.ToDoubleT()));
+  EXPECT_EQ(static_cast<int>(last_accessed_time.InSecondsFSinceUnixEpoch()),
+            static_cast<int>(info.last_accessed.InSecondsFSinceUnixEpoch()));
 #endif  // BUILDFLAG(IS_FUCHSIA)
 }
 
diff --git a/base/files/file_util.h b/base/files/file_util.h
index 0a9a277..0318528 100644
--- a/base/files/file_util.h
+++ b/base/files/file_util.h
@@ -420,6 +420,12 @@
 // Both paths are only accessible to admin and system processes, and are
 // therefore secure.
 BASE_EXPORT bool GetSecureSystemTemp(FilePath* temp);
+
+// Set whether or not the use of %systemroot%\SystemTemp or %programfiles% is
+// permitted for testing. This is so tests that run as admin will still continue
+// to use %TMP% so their files will be correctly cleaned up by the test
+// launcher.
+BASE_EXPORT void SetDisableSecureSystemTempForTesting(bool disabled);
 #endif  // BUILDFLAG(IS_WIN)
 
 // Do NOT USE in new code. Use ScopedTempDir instead.
diff --git a/base/files/file_util_apple.mm b/base/files/file_util_apple.mm
new file mode 100644
index 0000000..39f8d2c
--- /dev/null
+++ b/base/files/file_util_apple.mm
@@ -0,0 +1,71 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_util.h"
+
+#import <Foundation/Foundation.h>
+#include <copyfile.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "base/apple/foundation_util.h"
+#include "base/check_op.h"
+#include "base/files/file_path.h"
+#include "base/strings/string_util.h"
+#include "base/threading/scoped_blocking_call.h"
+
+namespace base {
+
+bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
+  ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
+  if (from_path.ReferencesParent() || to_path.ReferencesParent()) {
+    return false;
+  }
+  return (copyfile(from_path.value().c_str(), to_path.value().c_str(),
+                   /*state=*/nullptr, COPYFILE_DATA) == 0);
+}
+
+bool GetTempDir(base::FilePath* path) {
+  // In order to facilitate hermetic runs on macOS, first check
+  // MAC_CHROMIUM_TMPDIR. This is used instead of TMPDIR for historical reasons.
+  // This was originally done for https://crbug.com/698759 (TMPDIR too long for
+  // process singleton socket path), but is hopefully obsolete as of
+  // https://crbug.com/1266817 (allows a longer process singleton socket path).
+  // Continue tracking MAC_CHROMIUM_TMPDIR as that's what build infrastructure
+  // sets on macOS.
+  const char* env_tmpdir = getenv("MAC_CHROMIUM_TMPDIR");
+  if (env_tmpdir) {
+    *path = base::FilePath(env_tmpdir);
+    return true;
+  }
+
+  // If we didn't find it, fall back to the native function.
+  NSString* tmp = NSTemporaryDirectory();
+  if (tmp == nil) {
+    return false;
+  }
+  *path = base::apple::NSStringToFilePath(tmp);
+  return true;
+}
+
+FilePath GetHomeDir() {
+  NSString* tmp = NSHomeDirectory();
+  if (tmp != nil) {
+    FilePath mac_home_dir = base::apple::NSStringToFilePath(tmp);
+    if (!mac_home_dir.empty()) {
+      return mac_home_dir;
+    }
+  }
+
+  // Fall back on temp dir if no home directory is defined.
+  FilePath rv;
+  if (GetTempDir(&rv)) {
+    return rv;
+  }
+
+  // Last resort.
+  return FilePath("/tmp");
+}
+
+}  // namespace base
diff --git a/base/files/file_util_mac.mm b/base/files/file_util_mac.mm
deleted file mode 100644
index c75cf10..0000000
--- a/base/files/file_util_mac.mm
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/files/file_util.h"
-
-#import <Foundation/Foundation.h>
-#include <copyfile.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "base/check_op.h"
-#include "base/files/file_path.h"
-#include "base/mac/foundation_util.h"
-#include "base/strings/string_util.h"
-#include "base/threading/scoped_blocking_call.h"
-
-namespace base {
-
-bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
-  ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
-  if (from_path.ReferencesParent() || to_path.ReferencesParent())
-    return false;
-  return (copyfile(from_path.value().c_str(), to_path.value().c_str(),
-                   /*state=*/nullptr, COPYFILE_DATA) == 0);
-}
-
-bool GetTempDir(base::FilePath* path) {
-  // In order to facilitate hermetic runs on macOS, first check
-  // MAC_CHROMIUM_TMPDIR. This is used instead of TMPDIR for historical reasons.
-  // This was originally done for https://crbug.com/698759 (TMPDIR too long for
-  // process singleton socket path), but is hopefully obsolete as of
-  // https://crbug.com/1266817 (allows a longer process singleton socket path).
-  // Continue tracking MAC_CHROMIUM_TMPDIR as that's what build infrastructure
-  // sets on macOS.
-  const char* env_tmpdir = getenv("MAC_CHROMIUM_TMPDIR");
-  if (env_tmpdir) {
-    *path = base::FilePath(env_tmpdir);
-    return true;
-  }
-
-  // If we didn't find it, fall back to the native function.
-  NSString* tmp = NSTemporaryDirectory();
-  if (tmp == nil)
-    return false;
-  *path = base::mac::NSStringToFilePath(tmp);
-  return true;
-}
-
-FilePath GetHomeDir() {
-  NSString* tmp = NSHomeDirectory();
-  if (tmp != nil) {
-    FilePath mac_home_dir = base::mac::NSStringToFilePath(tmp);
-    if (!mac_home_dir.empty())
-      return mac_home_dir;
-  }
-
-  // Fall back on temp dir if no home directory is defined.
-  FilePath rv;
-  if (GetTempDir(&rv))
-    return rv;
-
-  // Last resort.
-  return FilePath("/tmp");
-}
-
-}  // namespace base
diff --git a/base/files/file_util_posix.cc b/base/files/file_util_posix.cc
index da04b25..81136bc 100644
--- a/base/files/file_util_posix.cc
+++ b/base/files/file_util_posix.cc
@@ -51,7 +51,7 @@
 
 #if BUILDFLAG(IS_APPLE)
 #include <AvailabilityMacros.h>
-#include "base/mac/foundation_util.h"
+#include "base/apple/foundation_util.h"
 #endif
 
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
@@ -630,7 +630,7 @@
 #endif  // !BUILDFLAG(IS_FUCHSIA)
 
 #if !BUILDFLAG(IS_APPLE)
-// This is implemented in file_util_mac.mm for Mac.
+// This is implemented in file_util_apple.mm for Mac.
 bool GetTempDir(FilePath* path) {
   const char* tmp = getenv("TMPDIR");
   if (tmp) {
@@ -647,7 +647,7 @@
 }
 #endif  // !BUILDFLAG(IS_APPLE)
 
-#if !BUILDFLAG(IS_APPLE)  // Mac implementation is in file_util_mac.mm.
+#if !BUILDFLAG(IS_APPLE)  // Mac implementation is in file_util_apple.mm.
 FilePath GetHomeDir() {
 #if BUILDFLAG(IS_CHROMEOS)
   if (SysInfo::IsRunningOnChromeOS()) {
@@ -690,7 +690,7 @@
 
 FilePath FormatTemporaryFileName(FilePath::StringPieceType identifier) {
 #if BUILDFLAG(IS_APPLE)
-  StringPiece prefix = base::mac::BaseBundleID();
+  StringPiece prefix = base::apple::BaseBundleID();
 #elif BUILDFLAG(GOOGLE_CHROME_BRANDING)
   StringPiece prefix = "com.google.Chrome";
 #else
@@ -781,6 +781,7 @@
     if (!DirectoryExists(subpath)) {
       if (error)
         *error = File::OSErrorToFileError(saved_errno);
+      errno = saved_errno;
       return false;
     }
   }
diff --git a/base/files/file_util_unittest.cc b/base/files/file_util_unittest.cc
index c59c226..ea3c767 100644
--- a/base/files/file_util_unittest.cc
+++ b/base/files/file_util_unittest.cc
@@ -56,11 +56,9 @@
 #include <tchar.h>
 #include <windows.h>
 #include <winioctl.h>
-#include "base/features.h"
 #include "base/scoped_native_library.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/test/gtest_util.h"
-#include "base/test/scoped_feature_list.h"
 #include "base/win/scoped_handle.h"
 #include "base/win/win_util.h"
 #endif
@@ -3014,7 +3012,7 @@
   FilePath secure_system_temp;
   ASSERT_EQ(GetSecureSystemTemp(&secure_system_temp), !!::IsUserAnAdmin());
   if (!::IsUserAnAdmin()) {
-    return;
+    GTEST_SKIP() << "This test must be run by an admin user";
   }
 
   FilePath dir_windows;
@@ -3031,17 +3029,26 @@
   FilePath temp_dir;
   ASSERT_TRUE(CreateNewTempDirectory(FilePath::StringType(), &temp_dir));
   EXPECT_TRUE(PathExists(temp_dir));
+  EXPECT_TRUE(DeleteFile(temp_dir));
+}
 
 #if BUILDFLAG(IS_WIN)
+TEST_F(FileUtilTest, TempDirectoryParentTest) {
+  if (!::IsUserAnAdmin()) {
+    GTEST_SKIP() << "This test must be run by an admin user";
+  }
+  FilePath temp_dir;
+  ASSERT_TRUE(CreateNewTempDirectory(FilePath::StringType(), &temp_dir));
+  EXPECT_TRUE(PathExists(temp_dir));
+
   FilePath expected_parent_dir;
   if (!GetSecureSystemTemp(&expected_parent_dir)) {
     EXPECT_TRUE(PathService::Get(DIR_TEMP, &expected_parent_dir));
   }
   EXPECT_TRUE(expected_parent_dir.IsParent(temp_dir));
-#endif  // BUILDFLAG(IS_WIN)
-
   EXPECT_TRUE(DeleteFile(temp_dir));
 }
+#endif  // BUILDFLAG(IS_WIN)
 
 TEST_F(FileUtilTest, CreateNewTemporaryDirInDirTest) {
   FilePath new_dir;
diff --git a/base/files/file_util_win.cc b/base/files/file_util_win.cc
index d7b5fb2..986db3b 100644
--- a/base/files/file_util_win.cc
+++ b/base/files/file_util_win.cc
@@ -62,6 +62,8 @@
 
 int g_extra_allowed_path_for_no_execute = 0;
 
+bool g_disable_secure_system_temp_for_testing = false;
+
 const DWORD kFileShareAll =
     FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
 const wchar_t kDefaultTempDirPrefix[] = L"ChromiumTemp";
@@ -688,6 +690,10 @@
 }
 
 bool GetSecureSystemTemp(FilePath* temp) {
+  if (g_disable_secure_system_temp_for_testing) {
+    return false;
+  }
+
   ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
 
   CHECK(temp);
@@ -711,6 +717,10 @@
   return false;
 }
 
+void SetDisableSecureSystemTempForTesting(bool disabled) {
+  g_disable_secure_system_temp_for_testing = disabled;
+}
+
 // The directory is created under `GetSecureSystemTemp` for security reasons if
 // the caller is admin to avoid attacks from lower privilege processes.
 //
@@ -719,7 +729,8 @@
 // `GetSecureSystemTemp` could be because `%systemroot%\SystemTemp` does not
 // exist, or unable to resolve `DIR_WINDOWS` or `DIR_PROGRAM_FILES`, say due to
 // registry redirection, or unable to create a directory due to
-// `GetSecureSystemTemp` being read-only or having atypical ACLs.
+// `GetSecureSystemTemp` being read-only or having atypical ACLs. Tests can also
+// disable this behavior resulting in false being returned.
 bool CreateNewTempDirectory(const FilePath::StringType& prefix,
                             FilePath* new_temp_path) {
   ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
diff --git a/base/files/memory_mapped_file.cc b/base/files/memory_mapped_file.cc
index 132c3a9..985c3ae 100644
--- a/base/files/memory_mapped_file.cc
+++ b/base/files/memory_mapped_file.cc
@@ -41,6 +41,14 @@
     case READ_ONLY:
       flags = File::FLAG_OPEN | File::FLAG_READ;
       break;
+    case READ_WRITE_COPY:
+      flags = File::FLAG_OPEN | File::FLAG_READ;
+#if BUILDFLAG(IS_FUCHSIA)
+      // Fuchsia's mmap() implementation does not allow us to create a
+      // copy-on-write mapping of a file opened as read-only.
+      flags |= File::FLAG_WRITE;
+#endif
+      break;
     case READ_WRITE:
       flags = File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE;
       break;
@@ -92,6 +100,7 @@
       [[fallthrough]];
     case READ_ONLY:
     case READ_WRITE:
+    case READ_WRITE_COPY:
       // Ensure that the region values are valid.
       if (region.offset < 0) {
         DLOG(ERROR) << "Region bounds are not valid.";
diff --git a/base/files/memory_mapped_file.h b/base/files/memory_mapped_file.h
index d099ce4..24f76fe 100644
--- a/base/files/memory_mapped_file.h
+++ b/base/files/memory_mapped_file.h
@@ -11,8 +11,9 @@
 #include <utility>
 
 #include "base/base_export.h"
+#include "base/containers/span.h"
 #include "base/files/file.h"
-#include "base/memory/raw_ptr.h"
+#include "base/memory/raw_ptr_exclusion.h"
 #include "build/build_config.h"
 
 #if BUILDFLAG(IS_WIN)
@@ -39,6 +40,11 @@
     // be as much as 1s on some systems.
     READ_WRITE,
 
+    // This provides read/write access to the mapped file contents as above, but
+    // applies a copy-on-write policy such that no writes are carried through to
+    // the underlying file.
+    READ_WRITE_COPY,
+
     // This provides read/write access but with the ability to write beyond
     // the end of the existing file up to a maximum size specified as the
     // "region". Depending on the OS, the file may or may not be immediately
@@ -109,6 +115,10 @@
   uint8_t* data() { return data_; }
   size_t length() const { return length_; }
 
+  span<const uint8_t> bytes() const { return make_span(data_, length_); }
+
+  span<uint8_t> mutable_bytes() const { return make_span(data_, length_); }
+
   // Is file_ a valid file handle that points to an open, memory mapped file?
   bool IsValid() const;
 
@@ -140,7 +150,9 @@
 
   File file_;
 
-  raw_ptr<uint8_t, DanglingUntriaged | AllowPtrArithmetic> data_ = nullptr;
+  // `data_` is never allocated by PartitionAlloc, so there is no benefit to
+  // using a raw_ptr.
+  RAW_PTR_EXCLUSION uint8_t* data_ = nullptr;
   size_t length_ = 0;
 
 #if BUILDFLAG(IS_WIN)
diff --git a/base/files/memory_mapped_file_posix.cc b/base/files/memory_mapped_file_posix.cc
index be5084b..1435190 100644
--- a/base/files/memory_mapped_file_posix.cc
+++ b/base/files/memory_mapped_file_posix.cc
@@ -66,18 +66,24 @@
     length_ = region.size;
   }
 
-  int flags = 0;
+  int prot = 0;
+  int flags = MAP_SHARED;
   switch (access) {
     case READ_ONLY:
-      flags |= PROT_READ;
+      prot |= PROT_READ;
       break;
 
     case READ_WRITE:
-      flags |= PROT_READ | PROT_WRITE;
+      prot |= PROT_READ | PROT_WRITE;
+      break;
+
+    case READ_WRITE_COPY:
+      prot |= PROT_READ | PROT_WRITE;
+      flags = MAP_PRIVATE;
       break;
 
     case READ_WRITE_EXTEND:
-      flags |= PROT_READ | PROT_WRITE;
+      prot |= PROT_READ | PROT_WRITE;
 
       if (!AllocateFileRegion(&file_, region.offset, region.size))
         return false;
@@ -85,8 +91,8 @@
       break;
   }
 
-  data_ = static_cast<uint8_t*>(mmap(nullptr, map_size, flags, MAP_SHARED,
-                                     file_.GetPlatformFile(), map_start));
+  data_ = static_cast<uint8_t*>(
+      mmap(nullptr, map_size, prot, flags, file_.GetPlatformFile(), map_start));
   if (data_ == MAP_FAILED) {
     DPLOG(ERROR) << "mmap " << file_.GetPlatformFile();
     return false;
@@ -101,7 +107,7 @@
   ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
 
   if (data_ != nullptr) {
-    munmap(data_.ExtractAsDangling(), length_);
+    munmap(data_, length_);
   }
   file_.Close();
   length_ = 0;
diff --git a/base/files/memory_mapped_file_unittest.cc b/base/files/memory_mapped_file_unittest.cc
index dc9b9b6..93969ba 100644
--- a/base/files/memory_mapped_file_unittest.cc
+++ b/base/files/memory_mapped_file_unittest.cc
@@ -27,9 +27,9 @@
 }
 
 // Check that the watermark sequence is consistent with the |offset| provided.
-bool CheckBufferContents(const uint8_t* data, size_t size, size_t offset) {
-  std::unique_ptr<uint8_t[]> test_data(CreateTestBuffer(size, offset));
-  return memcmp(test_data.get(), data, size) == 0;
+bool CheckBufferContents(span<const uint8_t> bytes, size_t offset) {
+  std::unique_ptr<uint8_t[]> test_data(CreateTestBuffer(bytes.size(), offset));
+  return memcmp(test_data.get(), bytes.data(), bytes.size()) == 0;
 }
 
 class MemoryMappedFileTest : public PlatformTest {
@@ -67,7 +67,7 @@
   ASSERT_EQ(kFileSize, map.length());
   ASSERT_TRUE(map.data() != nullptr);
   EXPECT_TRUE(map.IsValid());
-  ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+  ASSERT_TRUE(CheckBufferContents(map.bytes(), 0));
 }
 
 TEST_F(MemoryMappedFileTest, MapWholeFileByFD) {
@@ -79,7 +79,7 @@
   ASSERT_EQ(kFileSize, map.length());
   ASSERT_TRUE(map.data() != nullptr);
   EXPECT_TRUE(map.IsValid());
-  ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+  ASSERT_TRUE(CheckBufferContents(map.bytes(), 0));
 }
 
 TEST_F(MemoryMappedFileTest, MapSmallFile) {
@@ -90,7 +90,7 @@
   ASSERT_EQ(kFileSize, map.length());
   ASSERT_TRUE(map.data() != nullptr);
   EXPECT_TRUE(map.IsValid());
-  ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+  ASSERT_TRUE(CheckBufferContents(map.bytes(), 0));
 }
 
 TEST_F(MemoryMappedFileTest, MapWholeFileUsingRegion) {
@@ -104,7 +104,7 @@
   ASSERT_EQ(kFileSize, map.length());
   ASSERT_TRUE(map.data() != nullptr);
   EXPECT_TRUE(map.IsValid());
-  ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+  ASSERT_TRUE(CheckBufferContents(map.bytes(), 0));
 }
 
 TEST_F(MemoryMappedFileTest, MapPartialRegionAtBeginning) {
@@ -119,7 +119,7 @@
   ASSERT_EQ(kPartialSize, map.length());
   ASSERT_TRUE(map.data() != nullptr);
   EXPECT_TRUE(map.IsValid());
-  ASSERT_TRUE(CheckBufferContents(map.data(), kPartialSize, 0));
+  ASSERT_TRUE(CheckBufferContents(map.bytes().first(kPartialSize), 0));
 }
 
 TEST_F(MemoryMappedFileTest, MapPartialRegionAtEnd) {
@@ -135,7 +135,7 @@
   ASSERT_EQ(kPartialSize, map.length());
   ASSERT_TRUE(map.data() != nullptr);
   EXPECT_TRUE(map.IsValid());
-  ASSERT_TRUE(CheckBufferContents(map.data(), kPartialSize, kOffset));
+  ASSERT_TRUE(CheckBufferContents(map.bytes().first(kPartialSize), kOffset));
 }
 
 TEST_F(MemoryMappedFileTest, MapSmallPartialRegionInTheMiddle) {
@@ -152,7 +152,7 @@
   ASSERT_EQ(kPartialSize, map.length());
   ASSERT_TRUE(map.data() != nullptr);
   EXPECT_TRUE(map.IsValid());
-  ASSERT_TRUE(CheckBufferContents(map.data(), kPartialSize, kOffset));
+  ASSERT_TRUE(CheckBufferContents(map.bytes().first(kPartialSize), kOffset));
 }
 
 TEST_F(MemoryMappedFileTest, MapLargePartialRegionInTheMiddle) {
@@ -169,7 +169,7 @@
   ASSERT_EQ(kPartialSize, map.length());
   ASSERT_TRUE(map.data() != nullptr);
   EXPECT_TRUE(map.IsValid());
-  ASSERT_TRUE(CheckBufferContents(map.data(), kPartialSize, kOffset));
+  ASSERT_TRUE(CheckBufferContents(map.bytes().first(kPartialSize), kOffset));
 }
 
 TEST_F(MemoryMappedFileTest, WriteableFile) {
@@ -182,15 +182,16 @@
     ASSERT_EQ(kFileSize, map.length());
     ASSERT_TRUE(map.data() != nullptr);
     EXPECT_TRUE(map.IsValid());
-    ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+    ASSERT_TRUE(CheckBufferContents(map.bytes(), 0));
 
-    uint8_t* bytes = map.data();
+    span<uint8_t> bytes = map.mutable_bytes();
     bytes[0] = 'B';
     bytes[1] = 'a';
     bytes[2] = 'r';
     bytes[kFileSize - 1] = '!';
-    EXPECT_FALSE(CheckBufferContents(map.data(), kFileSize, 0));
-    EXPECT_TRUE(CheckBufferContents(map.data() + 3, kFileSize - 4, 3));
+    EXPECT_FALSE(CheckBufferContents(map.bytes(), 0));
+    EXPECT_TRUE(
+        CheckBufferContents(map.bytes().first(kFileSize - 1).subspan(3), 3));
   }
 
   int64_t file_size;
@@ -203,6 +204,39 @@
   EXPECT_EQ("!", contents.substr(kFileSize - 1, 1));
 }
 
+TEST_F(MemoryMappedFileTest, CopyOnWrite) {
+  const size_t kFileSize = 127;
+  CreateTemporaryTestFile(kFileSize);
+
+  {
+    MemoryMappedFile map;
+    ASSERT_TRUE(
+        map.Initialize(temp_file_path(), MemoryMappedFile::READ_WRITE_COPY));
+    ASSERT_EQ(kFileSize, map.length());
+    ASSERT_TRUE(map.data() != nullptr);
+    EXPECT_TRUE(map.IsValid());
+    ASSERT_TRUE(CheckBufferContents(map.bytes(), 0));
+
+    span<uint8_t> bytes = map.mutable_bytes();
+    bytes[0] = 'B';
+    bytes[1] = 'a';
+    bytes[2] = 'r';
+    bytes[kFileSize - 1] = '!';
+    EXPECT_FALSE(CheckBufferContents(map.bytes(), 0));
+    EXPECT_TRUE(
+        CheckBufferContents(map.bytes().first(kFileSize - 1).subspan(3), 3));
+  }
+
+  int64_t file_size;
+  ASSERT_TRUE(GetFileSize(temp_file_path(), &file_size));
+  EXPECT_EQ(static_cast<int64_t>(kFileSize), file_size);
+
+  // Although the buffer has been modified in memory, the file is unchanged.
+  std::string contents;
+  ASSERT_TRUE(ReadFileToString(temp_file_path(), &contents));
+  EXPECT_TRUE(CheckBufferContents(as_bytes(span(contents)), 0));
+}
+
 TEST_F(MemoryMappedFileTest, ExtendableFile) {
   const size_t kFileSize = 127;
   const size_t kFileExtend = 100;
@@ -218,16 +252,16 @@
     EXPECT_EQ(kFileSize + kFileExtend, map.length());
     ASSERT_TRUE(map.data() != nullptr);
     EXPECT_TRUE(map.IsValid());
-    ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+    ASSERT_TRUE(CheckBufferContents(map.bytes().first(kFileSize), 0));
 
-    uint8_t* bytes = map.data();
+    span<uint8_t> bytes = map.mutable_bytes();
     EXPECT_EQ(0, bytes[kFileSize + 0]);
     EXPECT_EQ(0, bytes[kFileSize + 1]);
     EXPECT_EQ(0, bytes[kFileSize + 2]);
     bytes[kFileSize + 0] = 'B';
     bytes[kFileSize + 1] = 'A';
     bytes[kFileSize + 2] = 'Z';
-    EXPECT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+    EXPECT_TRUE(CheckBufferContents(map.bytes().first(kFileSize), 0));
   }
 
   int64_t file_size;
diff --git a/base/files/memory_mapped_file_win.cc b/base/files/memory_mapped_file_win.cc
index 7169fba..57147fe 100644
--- a/base/files/memory_mapped_file_win.cc
+++ b/base/files/memory_mapped_file_win.cc
@@ -62,17 +62,25 @@
   if (!file_.IsValid())
     return false;
 
+  DWORD view_access;
   DWORD flags = 0;
   ULARGE_INTEGER size = {};
   switch (access) {
     case READ_ONLY:
       flags |= PAGE_READONLY;
+      view_access = FILE_MAP_READ;
       break;
     case READ_WRITE:
       flags |= PAGE_READWRITE;
+      view_access = FILE_MAP_WRITE;
+      break;
+    case READ_WRITE_COPY:
+      flags |= PAGE_WRITECOPY;
+      view_access = FILE_MAP_COPY;
       break;
     case READ_WRITE_EXTEND:
       flags |= PAGE_READWRITE;
+      view_access = FILE_MAP_WRITE;
       size.QuadPart = region.size;
       break;
     case READ_CODE_IMAGE:
@@ -119,10 +127,9 @@
     length_ = region.size;
   }
 
-  data_ = static_cast<uint8_t*>(
-      ::MapViewOfFile(file_mapping_.get(),
-                      (flags & PAGE_READONLY) ? FILE_MAP_READ : FILE_MAP_WRITE,
-                      map_start.HighPart, map_start.LowPart, map_size));
+  data_ = static_cast<uint8_t*>(::MapViewOfFile(file_mapping_.get(),
+                                                view_access, map_start.HighPart,
+                                                map_start.LowPart, map_size));
   if (data_ == nullptr)
     return false;
   data_ += data_offset;
diff --git a/base/files/scoped_temp_file.cc b/base/files/scoped_temp_file.cc
new file mode 100644
index 0000000..b00f0bc
--- /dev/null
+++ b/base/files/scoped_temp_file.cc
@@ -0,0 +1,60 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/scoped_temp_file.h"
+
+#include <utility>
+
+#include "base/check.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+
+namespace base {
+
+ScopedTempFile::ScopedTempFile() = default;
+
+ScopedTempFile::ScopedTempFile(ScopedTempFile&& other) noexcept
+    : path_(std::move(other.path_)) {}
+
+ScopedTempFile& ScopedTempFile::operator=(ScopedTempFile&& other) noexcept {
+  if (!path_.empty()) {
+    CHECK_NE(path_, other.path_);
+  }
+  if (!Delete()) {
+    DLOG(WARNING) << "Could not delete temp dir in operator=().";
+  }
+  path_ = std::move(other.path_);
+  return *this;
+}
+
+ScopedTempFile::~ScopedTempFile() {
+  if (!Delete()) {
+    DLOG(WARNING) << "Could not delete temp dir in destructor.";
+  }
+}
+
+bool ScopedTempFile::Create() {
+  CHECK(path_.empty());
+  return base::CreateTemporaryFile(&path_);
+}
+
+bool ScopedTempFile::Delete() {
+  if (path_.empty()) {
+    return true;
+  }
+  if (DeleteFile(path_)) {
+    path_.clear();
+    return true;
+  }
+  return false;
+}
+
+void ScopedTempFile::Reset() {
+  if (!Delete()) {
+    DLOG(WARNING) << "Could not delete temp dir in Reset().";
+  }
+  path_.clear();
+}
+
+}  // namespace base
diff --git a/base/files/scoped_temp_file.h b/base/files/scoped_temp_file.h
new file mode 100644
index 0000000..1ca8b52
--- /dev/null
+++ b/base/files/scoped_temp_file.h
@@ -0,0 +1,47 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_SCOPED_TEMP_FILE_H_
+#define BASE_FILES_SCOPED_TEMP_FILE_H_
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+
+namespace base {
+
+// An owned FilePath that's deleted when this object goes out of scope.
+// Deletion is attempted on destruction, but is not guaranteed.
+class BASE_EXPORT ScopedTempFile {
+ public:
+  // No file is owned/created initially.
+  ScopedTempFile();
+
+  ScopedTempFile(ScopedTempFile&&) noexcept;
+  ScopedTempFile& operator=(ScopedTempFile&&) noexcept;
+
+  ~ScopedTempFile();
+
+  // The owned path must be empty before calling Create().
+  // Returns true on success.
+  [[nodiscard]] bool Create();
+
+  // Returns true on success or if the file was never created.
+  [[nodiscard]] bool Delete();
+
+  // Attempts to delete the file.  The managed path is reset regardless of
+  // if the deletion was successful.
+  void Reset();
+
+  [[nodiscard]] const base::FilePath& path() const { return path_; }
+
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  operator bool() const { return !path_.empty(); }
+
+ private:
+  FilePath path_;
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_SCOPED_TEMP_FILE_H_
diff --git a/base/files/scoped_temp_file_unittest.cc b/base/files/scoped_temp_file_unittest.cc
new file mode 100644
index 0000000..3686a93
--- /dev/null
+++ b/base/files/scoped_temp_file_unittest.cc
@@ -0,0 +1,89 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/scoped_temp_file.h"
+
+#include <utility>
+
+#include "base/files/file_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(ScopedTempFile, Basic) {
+  ScopedTempFile temp_file;
+  EXPECT_TRUE(temp_file.path().empty());
+
+  EXPECT_TRUE(temp_file.Create());
+  EXPECT_TRUE(PathExists(temp_file.path()));
+
+  EXPECT_TRUE(temp_file.Delete());
+  EXPECT_FALSE(PathExists(temp_file.path()));
+}
+
+TEST(ScopedTempFile, MoveConstruct) {
+  ScopedTempFile temp1;
+  EXPECT_TRUE(temp1.Create());
+  FilePath file1 = temp1.path();
+  EXPECT_TRUE(PathExists(file1));
+
+  ScopedTempFile temp2(std::move(temp1));
+  EXPECT_TRUE(temp1.path().empty());
+  EXPECT_EQ(file1, temp2.path());
+  EXPECT_TRUE(PathExists(file1));
+}
+
+TEST(ScopedTempFile, MoveAssign) {
+  ScopedTempFile temp1;
+  EXPECT_TRUE(temp1.Create());
+  FilePath file1 = temp1.path();
+  EXPECT_TRUE(PathExists(file1));
+
+  ScopedTempFile temp2;
+  EXPECT_TRUE(temp2.Create());
+  FilePath file2 = temp2.path();
+  EXPECT_TRUE(PathExists(file2));
+
+  temp2 = std::move(temp1);
+  EXPECT_TRUE(temp1.path().empty());
+  EXPECT_EQ(temp2.path(), file1);
+  EXPECT_TRUE(PathExists(file1));
+  EXPECT_FALSE(PathExists(file2));
+}
+
+TEST(ScopedTempFile, Destruct) {
+  FilePath file;
+  {
+    ScopedTempFile temp;
+    EXPECT_TRUE(temp.Create());
+    file = temp.path();
+    EXPECT_TRUE(PathExists(file));
+  }
+
+  EXPECT_FALSE(PathExists(file));
+}
+
+TEST(ScopedTempFile, Reset) {
+  ScopedTempFile temp_file;
+  EXPECT_TRUE(temp_file.path().empty());
+
+  EXPECT_TRUE(temp_file.Create());
+  EXPECT_TRUE(PathExists(temp_file.path()));
+
+  temp_file.Reset();
+  EXPECT_FALSE(PathExists(temp_file.path()));
+}
+
+TEST(ScopedTempFile, OperatorBool) {
+  ScopedTempFile temp_file;
+  EXPECT_FALSE(temp_file);
+
+  EXPECT_TRUE(temp_file.Create());
+  EXPECT_TRUE(temp_file);
+
+  EXPECT_TRUE(temp_file.Delete());
+  EXPECT_FALSE(temp_file);
+}
+
+}  // namespace base
diff --git a/base/format_macros.h b/base/format_macros.h
index b8c3b45..4b89b21 100644
--- a/base/format_macros.h
+++ b/base/format_macros.h
@@ -39,34 +39,10 @@
 
 #include <inttypes.h>
 
-#if BUILDFLAG(IS_WIN)
-
-#if !defined(PRId64) || !defined(PRIu64) || !defined(PRIx64)
-#error "inttypes.h provided by win toolchain should define these."
-#endif
-
-#define WidePRId64 L"I64d"
-#define WidePRIu64 L"I64u"
-#define WidePRIx64 L"I64x"
-
-#if !defined(PRIuS)
-#define PRIuS "Iu"
-#endif
-
-#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
-
-// GCC will concatenate wide and narrow strings correctly, so nothing needs to
-// be done here.
-#define WidePRId64 PRId64
-#define WidePRIu64 PRIu64
-#define WidePRIx64 PRIx64
-
 #if !defined(PRIuS)
 #define PRIuS "zu"
 #endif
 
-#endif  // BUILDFLAG(IS_WIN)
-
 // The size of NSInteger and NSUInteger varies between 32-bit and 64-bit
 // architectures and Apple does not provides standard format macros and
 // recommends casting. This has many drawbacks, so instead define macros
diff --git a/base/fuchsia/file_utils.cc b/base/fuchsia/file_utils.cc
index a504b5b..9e86e86 100644
--- a/base/fuchsia/file_utils.cc
+++ b/base/fuchsia/file_utils.cc
@@ -31,9 +31,7 @@
 
   zx::channel channel;
   zx_status_t status =
-      fdio_fd_transfer(fd.get(), channel.reset_and_get_address());
-  if (status != ZX_ERR_UNAVAILABLE)
-    std::ignore = fd.release();
+      fdio_fd_transfer(fd.release(), channel.reset_and_get_address());
   if (status != ZX_OK) {
     ZX_DLOG(ERROR, status) << "fdio_fd_transfer";
     return fidl::InterfaceHandle<::fuchsia::io::Directory>();
diff --git a/base/fuchsia/test.test-fidl b/base/fuchsia/test.test-fidl
index 69f0e9a..8d2df64 100644
--- a/base/fuchsia/test.test-fidl
+++ b/base/fuchsia/test.test-fidl
@@ -5,8 +5,8 @@
 library base.testfidl;
 
 @discoverable
-protocol TestInterface {
-    Add(struct {
+closed protocol TestInterface {
+    strict Add(struct {
         a int32;
         b int32;
     }) -> (struct {
diff --git a/base/fuchsia/time_zone_data_unittest.cc b/base/fuchsia/time_zone_data_unittest.cc
index 45664ed..a1076b9 100644
--- a/base/fuchsia/time_zone_data_unittest.cc
+++ b/base/fuchsia/time_zone_data_unittest.cc
@@ -7,6 +7,7 @@
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/strings/string_util.h"
+#include "base/test/icu_test_util.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "third_party/icu/source/common/unicode/uclean.h"
@@ -28,9 +29,13 @@
 
 class TimeZoneDataTest : public testing::Test {
  protected:
-  void SetUp() override { ResetIcu(); }
+  void TearDown() override {
+    ResetIcu();
 
-  void TearDown() override { ResetIcu(); }
+    // ICU must be set back up in case e.g. a log statement that formats times
+    // uses it.
+    test::InitializeICUForTesting();
+  }
 
   // Needed to enable loading of ICU config files that are different from what
   // is available in Chromium.  Both icu_util and ICU library keep internal
@@ -59,15 +64,17 @@
 // not be present, in which case we skip running this test.
 TEST_F(TimeZoneDataTest, CompareSystemRevisionWithExpected) {
   if (!base::PathExists(base::FilePath(kRevisionFilePath))) {
-    LOG(INFO) << "Skipped test because tzdata config is not present";
-    return;
+    GTEST_SKIP() << "Skipped test because tzdata config is not present";
   }
 
   // ResetIcu() ensures that time zone data is loaded from the default location.
+  // This is done after the GTEST_SKIP() call above, since that may output a
+  // timestamp that requires ICU to be set up.
+  ResetIcu();
 
   ASSERT_TRUE(InitializeICU());
   std::string expected;
-  ASSERT_TRUE(
+  EXPECT_TRUE(
       base::ReadFileToString(base::FilePath(kRevisionFilePath), &expected));
   std::string actual;
   GetActualRevision(&actual);
@@ -82,9 +89,10 @@
 // ICU library versions.
 TEST_F(TimeZoneDataTest, TestLoadingTimeZoneDataFromKnownConfigs) {
   ASSERT_TRUE(base::DirectoryExists(base::FilePath(kTzDataDirPath)));
+  ResetIcu();
   SetIcuTimeZoneDataDirForTesting(kTzDataDirPath);
 
-  EXPECT_TRUE(InitializeICU());
+  ASSERT_TRUE(InitializeICU());
   std::string actual;
   GetActualRevision(&actual);
   EXPECT_EQ("2019a", actual) << "If ICU no longer supports this tzdata "
@@ -92,9 +100,10 @@
 }
 
 TEST_F(TimeZoneDataTest, DoesNotCrashWithInvalidPath) {
+  ResetIcu();
   SetIcuTimeZoneDataDirForTesting("/some/nonexistent/path");
 
-  EXPECT_TRUE(InitializeICU());
+  ASSERT_TRUE(InitializeICU());
   std::string actual;
   GetActualRevision(&actual);
   EXPECT_TRUE(
diff --git a/base/functional/bind.h b/base/functional/bind.h
index 999c6d5..3d9a9b5 100644
--- a/base/functional/bind.h
+++ b/base/functional/bind.h
@@ -64,8 +64,8 @@
                 "BindOnce requires non-const rvalue for OnceCallback binding."
                 " I.e.: base::BindOnce(std::move(callback)).");
   static_assert(
-      std::conjunction<
-          internal::AssertBindArgIsNotBasePassed<std::decay_t<Args>>...>::value,
+      std::conjunction_v<
+          internal::AssertBindArgIsNotBasePassed<std::decay_t<Args>>...>,
       "Use std::move() instead of base::Passed() with base::BindOnce()");
 
   return internal::BindImpl<OnceCallback>(std::forward<Functor>(functor),
diff --git a/base/functional/bind_internal.h b/base/functional/bind_internal.h
index 8fea91e..4eb827f 100644
--- a/base/functional/bind_internal.h
+++ b/base/functional/bind_internal.h
@@ -13,9 +13,9 @@
 #include <type_traits>
 #include <utility>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/pointers/raw_ptr.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr.h"
 #include "base/check.h"
 #include "base/compiler_specific.h"
 #include "base/functional/callback_internal.h"
@@ -23,7 +23,6 @@
 #include "base/functional/unretained_traits.h"
 #include "base/memory/raw_ptr.h"
 #include "base/memory/raw_ptr_asan_bound_arg_tracker.h"
-#include "base/memory/raw_ptr_asan_service.h"
 #include "base/memory/raw_ref.h"
 #include "base/memory/raw_scoped_refptr_mismatch_checker.h"
 #include "base/memory/weak_ptr.h"
@@ -276,8 +275,8 @@
  public:
   // NOLINTNEXTLINE(google-explicit-constructor)
   UnretainedRefWrapperReceiver(
-      UnretainedRefWrapper<T, UnretainedTrait, PtrTraits>&& o)
-      : obj_(std::move(o)) {}
+      UnretainedRefWrapper<T, UnretainedTrait, PtrTraits>&& obj)
+      : obj_(std::move(obj)) {}
   // NOLINTNEXTLINE(google-explicit-constructor)
   T& operator*() const { return obj_.get(); }
 
@@ -551,7 +550,7 @@
 //   IsCallableObject<void(Foo::*)()>::value is false.
 //
 //   int i = 0;
-//   auto f = [i]() {};
+//   auto f = [i] {};
 //   IsCallableObject<decltype(f)>::value is false.
 template <typename Functor, typename SFINAE = void>
 struct IsCallableObject : std::false_type {};
@@ -591,11 +590,11 @@
 // Example:
 //
 //   // Captureless lambdas are allowed.
-//   []() {return 42;};
+//   [] { return 42; };
 //
 //   // Capturing lambdas are *not* allowed.
 //   int x;
-//   [x]() {return x;};
+//   [x] { return x; };
 //
 //   // Any empty class with operator() is allowed.
 //   struct Foo {
@@ -736,37 +735,13 @@
 
 // For __stdcall methods.
 template <typename R, typename Receiver, typename... Args>
-struct FunctorTraits<R (__stdcall Receiver::*)(Args...)> {
-  using RunType = R(Receiver*, Args...);
-  static constexpr bool is_method = true;
-  static constexpr bool is_nullable = true;
-  static constexpr bool is_callback = false;
-  static constexpr bool is_stateless = true;
-
-  template <typename Method, typename ReceiverPtr, typename... RunArgs>
-  static R Invoke(Method method,
-                  ReceiverPtr&& receiver_ptr,
-                  RunArgs&&... args) {
-    return ((*receiver_ptr).*method)(std::forward<RunArgs>(args)...);
-  }
-};
+struct FunctorTraits<R (__stdcall Receiver::*)(Args...)>
+    : public FunctorTraits<R (Receiver::*)(Args...)> {};
 
 // For __stdcall const methods.
 template <typename R, typename Receiver, typename... Args>
-struct FunctorTraits<R (__stdcall Receiver::*)(Args...) const> {
-  using RunType = R(const Receiver*, Args...);
-  static constexpr bool is_method = true;
-  static constexpr bool is_nullable = true;
-  static constexpr bool is_callback = false;
-  static constexpr bool is_stateless = true;
-
-  template <typename Method, typename ReceiverPtr, typename... RunArgs>
-  static R Invoke(Method method,
-                  ReceiverPtr&& receiver_ptr,
-                  RunArgs&&... args) {
-    return ((*receiver_ptr).*method)(std::forward<RunArgs>(args)...);
-  }
-};
+struct FunctorTraits<R (__stdcall Receiver::*)(Args...) const>
+    : public FunctorTraits<R (Receiver::*)(Args...) const> {};
 
 #endif  // BUILDFLAG(IS_WIN) && !defined(ARCH_CPU_64_BITS)
 
@@ -1116,7 +1091,6 @@
   //   Foo::Foo() {}
   //
   //   scoped_refptr<Foo> oo = Foo::Create();
-  //
   DCHECK(receiver->HasAtLeastOneRef());
 }
 
@@ -1355,7 +1329,7 @@
       static constexpr bool kNotARawPtr = !IsRawPtrV<FunctorParamType>;
 
       static constexpr bool kCanBeForwardedToBoundFunctor =
-          std::is_constructible_v<FunctorParamType, ForwardingType>;
+          std::is_convertible_v<ForwardingType, FunctorParamType>;
 
       // If the bound type can't be forwarded then test if `FunctorParamType` is
       // a non-const lvalue reference and a reference to the unwrapped type
@@ -1372,8 +1346,8 @@
       // forwarded if `Passed()` had been used.
       static constexpr bool kMoveOnlyTypeMustUseBasePassed =
           kCanBeForwardedToBoundFunctor ||
-          !std::is_constructible_v<FunctorParamType,
-                                   std::decay_t<ForwardingType>&&>;
+          !std::is_convertible_v<std::decay_t<ForwardingType>&&,
+                                 FunctorParamType>;
     };
   };
 
@@ -1447,6 +1421,35 @@
           typename Param>
 struct AssertConstructible {
  private:
+  // We forbid callbacks to use raw_ptr as a parameter. However, we allow
+  // MayBeDangling<T> iff the callback argument was created using
+  // `base::UnsafeDangling`.
+  static_assert(
+      BindArgument<i>::template ForwardedAs<
+          Unwrapped>::template ToParamWithType<Param>::kNotARawPtr ||
+          BindArgument<i>::template ToParamWithType<Param>::template StoredAs<
+              Storage>::kMayBeDanglingMustBeUsed,
+      "base::Bind() target functor has a parameter of type raw_ptr<T>. "
+      "raw_ptr<T> should not be used for function parameters, please use T* or "
+      "T& instead.");
+
+  // A bound functor must take a dangling pointer argument (e.g. bound using the
+  // UnsafeDangling helper) as a MayBeDangling<T>, to make it clear that the
+  // pointee's lifetime must be externally validated before using it. For
+  // methods, exempt a bound receiver (i.e. the this pointer) as it is not
+  // passed as a regular function argument.
+  static_assert(
+      BindArgument<i>::template ToParamWithType<Param>::template StoredAs<
+          Storage>::template kMayBeDanglingPtrPassedCorrectly<is_method>,
+      "base::UnsafeDangling() pointers must be received by functors with "
+      "MayBeDangling<T> as parameter.");
+
+  static_assert(
+      BindArgument<i>::template ToParamWithType<Param>::template StoredAs<
+          Storage>::kUnsafeDanglingAndMayBeDanglingHaveMatchingTraits,
+      "MayBeDangling<T> parameter must receive the same RawPtrTraits as the "
+      "one passed to the corresponding base::UnsafeDangling() call.");
+
   // With `BindRepeating`, there are two decision points for how to handle a
   // move-only type:
   //
@@ -1499,35 +1502,6 @@
       BindArgument<i>::template BoundAs<Arg>::template StoredAs<
           Storage>::kBindArgumentCanBeCaptured,
       "Cannot capture argument: is the argument copyable or movable?");
-
-  // We forbid callbacks to use raw_ptr as a parameter. However, we allow
-  // MayBeDangling<T> iff the callback argument was created using
-  // `base::UnsafeDangling`.
-  static_assert(
-      BindArgument<i>::template ForwardedAs<
-          Unwrapped>::template ToParamWithType<Param>::kNotARawPtr ||
-          BindArgument<i>::template ToParamWithType<Param>::template StoredAs<
-              Storage>::kMayBeDanglingMustBeUsed,
-      "base::Bind() target functor has a parameter of type raw_ptr<T>. "
-      "raw_ptr<T> should not be used for function parameters, please use T* or "
-      "T& instead.");
-
-  // A bound functor must take a dangling pointer argument (e.g. bound using the
-  // UnsafeDangling helper) as a MayBeDangling<T>, to make it clear that the
-  // pointee's lifetime must be externally validated before using it. For
-  // methods, exempt a bound receiver (i.e. the this pointer) as it is not
-  // passed as a regular function argument.
-  static_assert(
-      BindArgument<i>::template ToParamWithType<Param>::template StoredAs<
-          Storage>::template kMayBeDanglingPtrPassedCorrectly<is_method>,
-      "base::UnsafeDangling() pointers must be received by functors with "
-      "MayBeDangling<T> as parameter.");
-
-  static_assert(
-      BindArgument<i>::template ToParamWithType<Param>::template StoredAs<
-          Storage>::kUnsafeDanglingAndMayBeDanglingHaveMatchingTraits,
-      "MayBeDangling<T> parameter must receive the same RawPtrTraits as the "
-      "one passed to the corresponding base::UnsafeDangling() call.");
 };
 
 // Takes three same-length TypeLists, and applies AssertConstructible for each
diff --git a/base/functional/bind_unittest.cc b/base/functional/bind_unittest.cc
index 8fecb55..a8788b3 100644
--- a/base/functional/bind_unittest.cc
+++ b/base/functional/bind_unittest.cc
@@ -6,15 +6,16 @@
 
 #include <functional>
 #include <memory>
+#include <string>
 #include <utility>
 #include <vector>
 
 #include "base/allocator/partition_alloc_features.h"
 #include "base/allocator/partition_alloc_support.h"
-#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_for_testing.h"
-#include "base/allocator/partition_allocator/partition_root.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_for_testing.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_root.h"
 #include "base/functional/callback.h"
 #include "base/functional/disallow_unretained.h"
 #include "base/memory/ptr_util.h"
@@ -1490,17 +1491,16 @@
   EXPECT_FALSE(internal::IsCallableObject<void (*)()>::value);
   EXPECT_FALSE(internal::IsCallableObject<void (NoRef::*)()>::value);
 
-  auto f = []() {};
+  auto f = [] {};
   EXPECT_TRUE(internal::IsCallableObject<decltype(f)>::value);
 
   int i = 0;
-  auto g = [i]() { (void)i; };
+  auto g = [i] { (void)i; };
   EXPECT_TRUE(internal::IsCallableObject<decltype(g)>::value);
 
   auto h = [](int, double) { return 'k'; };
-  EXPECT_TRUE(
-      (std::is_same<char(int, double),
-                    internal::ExtractCallableRunType<decltype(h)>>::value));
+  EXPECT_TRUE((std::is_same_v<char(int, double),
+                              internal::ExtractCallableRunType<decltype(h)>>));
 
   EXPECT_EQ(42, BindRepeating([] { return 42; }).Run());
   EXPECT_EQ(42, BindRepeating([](int i) { return i * 7; }, 6).Run());
@@ -1597,56 +1597,51 @@
   // Check if Callback variants have declarations of conversions as expected.
   // Copy constructor and assignment of RepeatingCallback.
   static_assert(
-      std::is_constructible<RepeatingClosure, const RepeatingClosure&>::value,
+      std::is_constructible_v<RepeatingClosure, const RepeatingClosure&>,
       "RepeatingClosure should be copyable.");
-  static_assert(
-      std::is_assignable<RepeatingClosure, const RepeatingClosure&>::value,
-      "RepeatingClosure should be copy-assignable.");
+  static_assert(std::is_assignable_v<RepeatingClosure, const RepeatingClosure&>,
+                "RepeatingClosure should be copy-assignable.");
 
   // Move constructor and assignment of RepeatingCallback.
-  static_assert(
-      std::is_constructible<RepeatingClosure, RepeatingClosure&&>::value,
-      "RepeatingClosure should be movable.");
-  static_assert(std::is_assignable<RepeatingClosure, RepeatingClosure&&>::value,
+  static_assert(std::is_constructible_v<RepeatingClosure, RepeatingClosure&&>,
+                "RepeatingClosure should be movable.");
+  static_assert(std::is_assignable_v<RepeatingClosure, RepeatingClosure&&>,
                 "RepeatingClosure should be move-assignable");
 
   // Conversions from OnceCallback to RepeatingCallback.
-  static_assert(
-      !std::is_constructible<RepeatingClosure, const OnceClosure&>::value,
-      "OnceClosure should not be convertible to RepeatingClosure.");
-  static_assert(
-      !std::is_assignable<RepeatingClosure, const OnceClosure&>::value,
-      "OnceClosure should not be convertible to RepeatingClosure.");
+  static_assert(!std::is_constructible_v<RepeatingClosure, const OnceClosure&>,
+                "OnceClosure should not be convertible to RepeatingClosure.");
+  static_assert(!std::is_assignable_v<RepeatingClosure, const OnceClosure&>,
+                "OnceClosure should not be convertible to RepeatingClosure.");
 
   // Destructive conversions from OnceCallback to RepeatingCallback.
-  static_assert(!std::is_constructible<RepeatingClosure, OnceClosure&&>::value,
+  static_assert(!std::is_constructible_v<RepeatingClosure, OnceClosure&&>,
                 "OnceClosure should not be convertible to RepeatingClosure.");
-  static_assert(!std::is_assignable<RepeatingClosure, OnceClosure&&>::value,
+  static_assert(!std::is_assignable_v<RepeatingClosure, OnceClosure&&>,
                 "OnceClosure should not be convertible to RepeatingClosure.");
 
   // Copy constructor and assignment of OnceCallback.
-  static_assert(!std::is_constructible<OnceClosure, const OnceClosure&>::value,
+  static_assert(!std::is_constructible_v<OnceClosure, const OnceClosure&>,
                 "OnceClosure should not be copyable.");
-  static_assert(!std::is_assignable<OnceClosure, const OnceClosure&>::value,
+  static_assert(!std::is_assignable_v<OnceClosure, const OnceClosure&>,
                 "OnceClosure should not be copy-assignable");
 
   // Move constructor and assignment of OnceCallback.
-  static_assert(std::is_constructible<OnceClosure, OnceClosure&&>::value,
+  static_assert(std::is_constructible_v<OnceClosure, OnceClosure&&>,
                 "OnceClosure should be movable.");
-  static_assert(std::is_assignable<OnceClosure, OnceClosure&&>::value,
+  static_assert(std::is_assignable_v<OnceClosure, OnceClosure&&>,
                 "OnceClosure should be move-assignable.");
 
   // Conversions from RepeatingCallback to OnceCallback.
-  static_assert(
-      std::is_constructible<OnceClosure, const RepeatingClosure&>::value,
-      "RepeatingClosure should be convertible to OnceClosure.");
-  static_assert(std::is_assignable<OnceClosure, const RepeatingClosure&>::value,
+  static_assert(std::is_constructible_v<OnceClosure, const RepeatingClosure&>,
+                "RepeatingClosure should be convertible to OnceClosure.");
+  static_assert(std::is_assignable_v<OnceClosure, const RepeatingClosure&>,
                 "RepeatingClosure should be convertible to OnceClosure.");
 
   // Destructive conversions from RepeatingCallback to OnceCallback.
-  static_assert(std::is_constructible<OnceClosure, RepeatingClosure&&>::value,
+  static_assert(std::is_constructible_v<OnceClosure, RepeatingClosure&&>,
                 "RepeatingClosure should be convertible to OnceClosure.");
-  static_assert(std::is_assignable<OnceClosure, RepeatingClosure&&>::value,
+  static_assert(std::is_assignable_v<OnceClosure, RepeatingClosure&&>,
                 "RepeatingClosure should be covretible to OnceClosure.");
 
   OnceClosure cb = BindOnce(&VoidPolymorphic<>::Run);
@@ -1779,6 +1774,28 @@
   EXPECT_EQ(123, res);
 }
 
+TEST_F(BindTest, ConvertibleArgs) {
+  // Create two types S and T, such that you can convert a T to an S, but you
+  // cannot construct an S from a T.
+  struct T;
+  class S {
+    friend struct T;
+    explicit S(const T&) {}
+  };
+  struct T {
+    // NOLINTNEXTLINE(google-explicit-constructor)
+    operator S() const { return S(*this); }
+  };
+  static_assert(!std::is_constructible_v<S, T>);
+  static_assert(std::is_convertible_v<T, S>);
+
+  // Ensure it's possible to pass a T to a function expecting an S.
+  void (*foo)(S) = +[](S) {};
+  const T t;
+  auto callback = base::BindOnce(foo, t);
+  std::move(callback).Run();
+}
+
 }  // namespace
 
 // This simulates a race weak pointer that, unlike our `base::WeakPtr<>`,
@@ -1860,8 +1877,7 @@
 // testing purpose.
 static constexpr partition_alloc::PartitionOptions
     kOnlyEnableBackupRefPtrOptions = {
-        .backup_ref_ptr =
-            partition_alloc::PartitionOptions::BackupRefPtr::kEnabled,
+        .backup_ref_ptr = partition_alloc::PartitionOptions::kEnabled,
 };
 
 class BindUnretainedDanglingInternalFixture : public BindTest {
diff --git a/base/functional/callback.h b/base/functional/callback.h
index 773662e..84a21f9 100644
--- a/base/functional/callback.h
+++ b/base/functional/callback.h
@@ -142,6 +142,8 @@
   //
   // May not be called on a null callback.
   R Run(Args... args) && {
+    CHECK(!holder_.is_null());
+
     // Move the callback instance into a local variable before the invocation,
     // that ensures the internal state is cleared after the invocation.
     // It's not safe to touch |this| after the invocation, since running the
@@ -324,6 +326,8 @@
   //
   // May not be called on a null callback.
   R Run(Args... args) const& {
+    CHECK(!holder_.is_null());
+
     // Keep `bind_state` alive at least until after the invocation to ensure all
     // bound `Unretained` arguments remain protected by MiraclePtr.
     scoped_refptr<internal::BindStateBase> bind_state = holder_.bind_state();
@@ -338,6 +342,8 @@
   //
   // May not be called on a null callback.
   R Run(Args... args) && {
+    CHECK(!holder_.is_null());
+
     // Move the callback instance into a local variable before the invocation,
     // that ensures the internal state is cleared after the invocation.
     // It's not safe to touch |this| after the invocation, since running the
diff --git a/base/functional/callback_helpers.h b/base/functional/callback_helpers.h
index a04ff24..592e15f 100644
--- a/base/functional/callback_helpers.h
+++ b/base/functional/callback_helpers.h
@@ -96,8 +96,33 @@
   const bool ignore_extra_runs_;
 };
 
+template <typename... Args>
+void ForwardRepeatingCallbacksImpl(
+    std::vector<RepeatingCallback<void(Args...)>> cbs,
+    Args... args) {
+  for (auto& cb : cbs) {
+    if (cb) {
+      cb.Run(std::forward<Args>(args)...);
+    }
+  }
+}
+
 }  // namespace internal
 
+// Wraps the given RepeatingCallbacks and return one RepeatingCallbacks with an
+// identical signature. On invocation of this callback, all the given
+// RepeatingCallbacks will be called with the same arguments. Unbound arguments
+// must be copyable.
+template <typename... Args>
+RepeatingCallback<void(Args...)> ForwardRepeatingCallbacks(
+    std::initializer_list<RepeatingCallback<void(Args...)>>&& cbs) {
+  std::vector<RepeatingCallback<void(Args...)>> v(
+      std::forward<std::initializer_list<RepeatingCallback<void(Args...)>>>(
+          cbs));
+  return BindRepeating(&internal::ForwardRepeatingCallbacksImpl<Args...>,
+                       std::move(v));
+}
+
 // Wraps the given OnceCallback and returns two OnceCallbacks with an identical
 // signature. On first invokation of either returned callbacks, the original
 // callback is invoked. Invoking the remaining callback results in a crash.
@@ -116,22 +141,45 @@
   return std::make_pair(wrapped_once, wrapped_once);
 }
 
-// Convenience helper to allow a `closure` to be used in a context which is
-// expecting a callback with arguments. Returns a null callback if `closure` is
-// null.
-template <typename... Args>
-RepeatingCallback<void(Args...)> IgnoreArgs(RepeatingClosure closure) {
-  return closure ? BindRepeating([](Args...) {}).Then(std::move(closure))
-                 : RepeatingCallback<void(Args...)>();
+// Adapts `callback` for use in a context which is expecting a callback with
+// additional parameters. Returns a null callback if `callback` is null.
+//
+// Usage:
+//   void LogError(char* error_message) {
+//     if (error_message) {
+//       cout << "Log: " << error_message << endl;
+//     }
+//   }
+//   base::RepeatingCallback<void(int, char*)> cb =
+//      base::IgnoreArgs<int>(base::BindRepeating(&LogError));
+//   cb.Run(42, nullptr);
+//
+// Note in the example above that the type(s) passed to `IgnoreArgs`
+// represent the additional prepended parameters (those which will be
+// "ignored").
+template <typename... Preargs, typename... Args>
+RepeatingCallback<void(Preargs..., Args...)> IgnoreArgs(
+    RepeatingCallback<void(Args...)> callback) {
+  return callback ? BindRepeating(
+                        [](RepeatingCallback<void(Args...)> callback,
+                           Preargs..., Args... args) {
+                          std::move(callback).Run(std::forward<Args>(args)...);
+                        },
+                        std::move(callback))
+                  : RepeatingCallback<void(Preargs..., Args...)>();
 }
 
-// Convenience helper to allow a `closure` to be used in a context which is
-// expecting a callback with arguments. Returns a null callback if `closure` is
-// null.
-template <typename... Args>
-OnceCallback<void(Args...)> IgnoreArgs(OnceClosure closure) {
-  return closure ? BindOnce([](Args...) {}).Then(std::move(closure))
-                 : OnceCallback<void(Args...)>();
+// As above, but for OnceCallback.
+template <typename... Preargs, typename... Args>
+OnceCallback<void(Preargs..., Args...)> IgnoreArgs(
+    OnceCallback<void(Args...)> callback) {
+  return callback ? BindOnce(
+                        [](OnceCallback<void(Args...)> callback, Preargs...,
+                           Args... args) {
+                          std::move(callback).Run(std::forward<Args>(args)...);
+                        },
+                        std::move(callback))
+                  : OnceCallback<void(Preargs..., Args...)>();
 }
 
 // ScopedClosureRunner is akin to std::unique_ptr<> for Closures. It ensures
@@ -212,6 +260,20 @@
       std::forward<Args>(args)...);
 }
 
+// Creates a callback that returns `value` when invoked. This helper is useful
+// for implementing factories that return a constant value.
+// Example:
+//
+// void F(base::OnceCallback<Widget()> factory);
+//
+// Widget widget = ...;
+// F(base::ReturnValueOnce(std::move(widget)));
+template <typename T>
+constexpr OnceCallback<T(void)> ReturnValueOnce(T value) {
+  static_assert(!std::is_reference_v<T>);
+  return base::BindOnce([](T value) { return value; }, std::move(value));
+}
+
 // Useful for creating a Closure that will delete a pointer when invoked. Only
 // use this when necessary. In most cases MessageLoop::DeleteSoon() is a better
 // fit.
diff --git a/base/functional/callback_helpers_unittest.cc b/base/functional/callback_helpers_unittest.cc
index e16d072..194475f 100644
--- a/base/functional/callback_helpers_unittest.cc
+++ b/base/functional/callback_helpers_unittest.cc
@@ -85,6 +85,10 @@
   (*value)++;
 }
 
+void IncrementWithRef(int& value) {
+  value++;
+}
+
 TEST(CallbackHelpersTest, ScopedClosureRunnerHasClosure) {
   base::ScopedClosureRunner runner1;
   EXPECT_FALSE(runner1);
@@ -182,9 +186,9 @@
 
   auto split = base::SplitOnceCallback(std::move(cb));
 
-  static_assert(std::is_same<decltype(split),
-                             std::pair<base::OnceCallback<void(int*)>,
-                                       base::OnceCallback<void(int*)>>>::value,
+  static_assert(std::is_same_v<decltype(split),
+                               std::pair<base::OnceCallback<void(int*)>,
+                                         base::OnceCallback<void(int*)>>>,
                 "");
   EXPECT_FALSE(split.first);
   EXPECT_FALSE(split.second);
@@ -197,9 +201,9 @@
 
   auto split = base::SplitOnceCallback(std::move(cb));
 
-  static_assert(std::is_same<decltype(split),
-                             std::pair<base::OnceCallback<void(int*)>,
-                                       base::OnceCallback<void(int*)>>>::value,
+  static_assert(std::is_same_v<decltype(split),
+                               std::pair<base::OnceCallback<void(int*)>,
+                                         base::OnceCallback<void(int*)>>>,
                 "");
 
   EXPECT_EQ(0, count);
@@ -218,9 +222,9 @@
 
   auto split = base::SplitOnceCallback(std::move(cb));
 
-  static_assert(std::is_same<decltype(split),
-                             std::pair<base::OnceCallback<void(int*)>,
-                                       base::OnceCallback<void(int*)>>>::value,
+  static_assert(std::is_same_v<decltype(split),
+                               std::pair<base::OnceCallback<void(int*)>,
+                                         base::OnceCallback<void(int*)>>>,
                 "");
 
   EXPECT_EQ(0, count);
@@ -285,6 +289,19 @@
   EXPECT_EQ(2, count);
   std::move(once_int_cb).Run(42);
   EXPECT_EQ(3, count);
+
+  // Ignore only some (one) argument and forward the rest.
+  auto repeating_callback = base::BindRepeating(&Increment);
+  auto repeating_cb_with_extra_arg = base::IgnoreArgs<bool>(repeating_callback);
+  repeating_cb_with_extra_arg.Run(false, &count);
+  EXPECT_EQ(4, count);
+
+  // Ignore two arguments and forward the rest.
+  auto once_callback = base::BindOnce(&Increment);
+  auto once_cb_with_extra_arg =
+      base::IgnoreArgs<char, bool>(repeating_callback);
+  std::move(once_cb_with_extra_arg).Run('d', false, &count);
+  EXPECT_EQ(5, count);
 }
 
 TEST(CallbackHelpersTest, IgnoreArgs_EmptyCallback) {
@@ -297,4 +314,31 @@
   EXPECT_FALSE(once_int_cb);
 }
 
+TEST(CallbackHelpersTest, ForwardRepeatingCallbacks) {
+  int count = 0;
+  auto tie_cb =
+      base::ForwardRepeatingCallbacks({base::BindRepeating(&IncrementWithRef),
+                                       base::BindRepeating(&IncrementWithRef)});
+
+  tie_cb.Run(count);
+  EXPECT_EQ(count, 2);
+
+  tie_cb.Run(count);
+  EXPECT_EQ(count, 4);
+}
+
+TEST(CallbackHelpersTest, ReturnValueOnce) {
+  // Check that copyable types are supported.
+  auto string_factory = base::ReturnValueOnce(std::string("test"));
+  static_assert(std::is_same_v<decltype(string_factory),
+                               base::OnceCallback<std::string(void)>>);
+  EXPECT_EQ(std::move(string_factory).Run(), "test");
+
+  // Check that move-only types are supported.
+  auto unique_ptr_factory = base::ReturnValueOnce(std::make_unique<int>(42));
+  static_assert(std::is_same_v<decltype(unique_ptr_factory),
+                               base::OnceCallback<std::unique_ptr<int>(void)>>);
+  EXPECT_EQ(*std::move(unique_ptr_factory).Run(), 42);
+}
+
 }  // namespace
diff --git a/base/functional/callback_unittest.cc b/base/functional/callback_unittest.cc
index 99ae111..f56b2aa 100644
--- a/base/functional/callback_unittest.cc
+++ b/base/functional/callback_unittest.cc
@@ -14,6 +14,7 @@
 #include "base/notreached.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/test/bind.h"
+#include "base/test/gtest_util.h"
 #include "base/test/test_timeouts.h"
 #include "base/threading/thread.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -62,21 +63,20 @@
 };
 
 TEST_F(CallbackTest, Types) {
-  static_assert(std::is_same<void, OnceClosure::ResultType>::value, "");
-  static_assert(std::is_same<void(), OnceClosure::RunType>::value, "");
+  static_assert(std::is_same_v<void, OnceClosure::ResultType>, "");
+  static_assert(std::is_same_v<void(), OnceClosure::RunType>, "");
 
   using OnceCallbackT = OnceCallback<double(int, char)>;
-  static_assert(std::is_same<double, OnceCallbackT::ResultType>::value, "");
-  static_assert(std::is_same<double(int, char), OnceCallbackT::RunType>::value,
-                "");
+  static_assert(std::is_same_v<double, OnceCallbackT::ResultType>, "");
+  static_assert(std::is_same_v<double(int, char), OnceCallbackT::RunType>, "");
 
-  static_assert(std::is_same<void, RepeatingClosure::ResultType>::value, "");
-  static_assert(std::is_same<void(), RepeatingClosure::RunType>::value, "");
+  static_assert(std::is_same_v<void, RepeatingClosure::ResultType>, "");
+  static_assert(std::is_same_v<void(), RepeatingClosure::RunType>, "");
 
   using RepeatingCallbackT = RepeatingCallback<bool(float, short)>;
-  static_assert(std::is_same<bool, RepeatingCallbackT::ResultType>::value, "");
-  static_assert(
-      std::is_same<bool(float, short), RepeatingCallbackT::RunType>::value, "");
+  static_assert(std::is_same_v<bool, RepeatingCallbackT::ResultType>, "");
+  static_assert(std::is_same_v<bool(float, short), RepeatingCallbackT::RunType>,
+                "");
 }
 
 // Ensure we can create unbound callbacks. We need this to be able to store
@@ -323,8 +323,7 @@
     return BindRepeating(function, std::forward<FArgs>(args)...);
   }
 
-  template <typename R2 = R,
-            std::enable_if_t<!std::is_void<R2>::value, int> = 0>
+  template <typename R2 = R, std::enable_if_t<!std::is_void_v<R2>, int> = 0>
   static int Outer(std::string* s,
                    std::unique_ptr<int> a,
                    std::unique_ptr<int> b) {
@@ -332,34 +331,32 @@
     *s += base::NumberToString(*a) + base::NumberToString(*b);
     return *a + *b;
   }
-  template <typename R2 = R,
-            std::enable_if_t<!std::is_void<R2>::value, int> = 0>
+  template <typename R2 = R, std::enable_if_t<!std::is_void_v<R2>, int> = 0>
   static int Outer(std::string* s, int a, int b) {
     *s += "Outer";
     *s += base::NumberToString(a) + base::NumberToString(b);
     return a + b;
   }
-  template <typename R2 = R,
-            std::enable_if_t<!std::is_void<R2>::value, int> = 0>
+  template <typename R2 = R, std::enable_if_t<!std::is_void_v<R2>, int> = 0>
   static int Outer(std::string* s) {
     *s += "Outer";
     *s += "None";
     return 99;
   }
 
-  template <typename R2 = R, std::enable_if_t<std::is_void<R2>::value, int> = 0>
+  template <typename R2 = R, std::enable_if_t<std::is_void_v<R2>, int> = 0>
   static void Outer(std::string* s,
                     std::unique_ptr<int> a,
                     std::unique_ptr<int> b) {
     *s += "Outer";
     *s += base::NumberToString(*a) + base::NumberToString(*b);
   }
-  template <typename R2 = R, std::enable_if_t<std::is_void<R2>::value, int> = 0>
+  template <typename R2 = R, std::enable_if_t<std::is_void_v<R2>, int> = 0>
   static void Outer(std::string* s, int a, int b) {
     *s += "Outer";
     *s += base::NumberToString(a) + base::NumberToString(b);
   }
-  template <typename R2 = R, std::enable_if_t<std::is_void<R2>::value, int> = 0>
+  template <typename R2 = R, std::enable_if_t<std::is_void_v<R2>, int> = 0>
   static void Outer(std::string* s) {
     *s += "Outer";
     *s += "None";
@@ -367,20 +364,20 @@
 
   template <typename OuterR,
             typename InnerR,
-            std::enable_if_t<!std::is_void<OuterR>::value, int> = 0,
-            std::enable_if_t<!std::is_void<InnerR>::value, int> = 0>
+            std::enable_if_t<!std::is_void_v<OuterR>, int> = 0,
+            std::enable_if_t<!std::is_void_v<InnerR>, int> = 0>
   static int Inner(std::string* s, OuterR a) {
-    static_assert(std::is_same<InnerR, int>::value, "Use int return type");
+    static_assert(std::is_same_v<InnerR, int>, "Use int return type");
     *s += "Inner";
     *s += base::NumberToString(a);
     return a;
   }
   template <typename OuterR,
             typename InnerR,
-            std::enable_if_t<std::is_void<OuterR>::value, int> = 0,
-            std::enable_if_t<!std::is_void<InnerR>::value, int> = 0>
+            std::enable_if_t<std::is_void_v<OuterR>, int> = 0,
+            std::enable_if_t<!std::is_void_v<InnerR>, int> = 0>
   static int Inner(std::string* s) {
-    static_assert(std::is_same<InnerR, int>::value, "Use int return type");
+    static_assert(std::is_same_v<InnerR, int>, "Use int return type");
     *s += "Inner";
     *s += "None";
     return 99;
@@ -388,16 +385,16 @@
 
   template <typename OuterR,
             typename InnerR,
-            std::enable_if_t<!std::is_void<OuterR>::value, int> = 0,
-            std::enable_if_t<std::is_void<InnerR>::value, int> = 0>
+            std::enable_if_t<!std::is_void_v<OuterR>, int> = 0,
+            std::enable_if_t<std::is_void_v<InnerR>, int> = 0>
   static void Inner(std::string* s, OuterR a) {
     *s += "Inner";
     *s += base::NumberToString(a);
   }
   template <typename OuterR,
             typename InnerR,
-            std::enable_if_t<std::is_void<OuterR>::value, int> = 0,
-            std::enable_if_t<std::is_void<InnerR>::value, int> = 0>
+            std::enable_if_t<std::is_void_v<OuterR>, int> = 0,
+            std::enable_if_t<std::is_void_v<InnerR>, int> = 0>
   static void Inner(std::string* s) {
     *s += "Inner";
     *s += "None";
@@ -777,5 +774,26 @@
   ASSERT_TRUE(deleted);
 }
 
+// According to legends, it is good practice to put death tests into their own
+// test suite, so they are grouped separately from regular tests, since death
+// tests are somewhat slow and have quirks that can slow down test running if
+// intermixed.
+TEST(CallbackDeathTest, RunNullCallbackChecks) {
+  {
+    base::OnceClosure closure;
+    EXPECT_CHECK_DEATH(std::move(closure).Run());
+  }
+
+  {
+    base::RepeatingClosure closure;
+    EXPECT_CHECK_DEATH(std::move(closure).Run());
+  }
+
+  {
+    base::RepeatingClosure closure;
+    EXPECT_CHECK_DEATH(closure.Run());
+  }
+}
+
 }  // namespace
 }  // namespace base
diff --git a/base/functional/invoke.h b/base/functional/invoke.h
index 0f47311..aa3f58f 100644
--- a/base/functional/invoke.h
+++ b/base/functional/invoke.h
@@ -34,17 +34,15 @@
 
 // Small helpers used below in internal::invoke to make the SFINAE more concise.
 template <typename F>
-const bool& IsMemFunPtr =
-    std::is_member_function_pointer<std::decay_t<F>>::value;
+const bool& IsMemFunPtr = std::is_member_function_pointer_v<std::decay_t<F>>;
 
 template <typename F>
-const bool& IsMemObjPtr = std::is_member_object_pointer<std::decay_t<F>>::value;
+const bool& IsMemObjPtr = std::is_member_object_pointer_v<std::decay_t<F>>;
 
 template <typename F,
           typename T,
           typename MemPtrClass = member_pointer_class_t<std::decay_t<F>>>
-const bool& IsMemPtrToBaseOf =
-    std::is_base_of<MemPtrClass, std::decay_t<T>>::value;
+const bool& IsMemPtrToBaseOf = std::is_base_of_v<MemPtrClass, std::decay_t<T>>;
 
 template <typename T>
 const bool& IsRefWrapper = is_reference_wrapper<std::decay_t<T>>::value;
diff --git a/base/hash/hash.cc b/base/hash/hash.cc
index 389d50a..7dfc42c 100644
--- a/base/hash/hash.cc
+++ b/base/hash/hash.cc
@@ -4,6 +4,8 @@
 
 #include "base/hash/hash.h"
 
+#include <string_view>
+
 #include "base/check_op.h"
 #include "base/notreached.h"
 #include "base/rand_util.h"
@@ -130,10 +132,6 @@
   return PersistentHash(as_bytes(make_span(str)));
 }
 
-uint32_t Hash(const std::u16string& str) {
-  return PersistentHash(as_bytes(make_span(str)));
-}
-
 uint32_t PersistentHash(span<const uint8_t> data) {
   // This hash function must not change, since it is designed to be persistable
   // to disk.
@@ -149,8 +147,8 @@
   return PersistentHash(make_span(static_cast<const uint8_t*>(data), length));
 }
 
-uint32_t PersistentHash(const std::string& str) {
-  return PersistentHash(str.data(), str.size());
+uint32_t PersistentHash(std::string_view str) {
+  return PersistentHash(as_bytes(make_span(str)));
 }
 
 size_t HashInts32(uint32_t value1, uint32_t value2) {
diff --git a/base/hash/hash.h b/base/hash/hash.h
index b5b782f..e99be98 100644
--- a/base/hash/hash.h
+++ b/base/hash/hash.h
@@ -10,6 +10,7 @@
 
 #include <limits>
 #include <string>
+#include <string_view>
 #include <utility>
 
 #include "base/base_export.h"
@@ -27,7 +28,6 @@
 // TODO(https://crbug.com/1025358): Migrate client code to new hash function.
 BASE_EXPORT uint32_t Hash(const void* data, size_t length);
 BASE_EXPORT uint32_t Hash(const std::string& str);
-BASE_EXPORT uint32_t Hash(const std::u16string& str);
 
 // Really *fast* and high quality hash.
 // Recommended hash function for general use, we pick the best performant
@@ -48,7 +48,7 @@
 // WARNING: This hash function should not be used for any cryptographic purpose.
 BASE_EXPORT uint32_t PersistentHash(base::span<const uint8_t> data);
 BASE_EXPORT uint32_t PersistentHash(const void* data, size_t length);
-BASE_EXPORT uint32_t PersistentHash(const std::string& str);
+BASE_EXPORT uint32_t PersistentHash(std::string_view str);
 
 // Hash pairs of 32-bit or 64-bit numbers.
 BASE_EXPORT size_t HashInts32(uint32_t value1, uint32_t value2);
diff --git a/base/hash/md5_nacl.cc b/base/hash/md5_nacl.cc
index 7de4b43..c49e20c 100644
--- a/base/hash/md5_nacl.cc
+++ b/base/hash/md5_nacl.cc
@@ -24,6 +24,7 @@
 #include <stddef.h>
 
 #include "base/hash/md5.h"
+#include "base/strings/string_number_conversions.h"
 
 namespace {
 
@@ -262,15 +263,10 @@
 }
 
 std::string MD5DigestToBase16(const MD5Digest& digest) {
-  static char const zEncode[] = "0123456789abcdef";
-
   std::string ret;
-  ret.resize(32);
-
-  for (size_t i = 0, j = 0; i < 16; i++, j += 2) {
-    uint8_t a = digest.a[i];
-    ret[j] = zEncode[(a >> 4) & 0xf];
-    ret[j + 1] = zEncode[a & 0xf];
+  ret.reserve(32);
+  for (uint8_t byte : digest.a) {
+    base::AppendHexEncodedByte(byte, ret, false);
   }
   return ret;
 }
diff --git a/base/i18n/icu_util.cc b/base/i18n/icu_util.cc
index 045fbb8..b62e461 100644
--- a/base/i18n/icu_util.cc
+++ b/base/i18n/icu_util.cc
@@ -40,7 +40,7 @@
 #endif
 
 #if BUILDFLAG(IS_APPLE)
-#include "base/mac/foundation_util.h"
+#include "base/apple/foundation_util.h"
 #endif
 
 #if BUILDFLAG(IS_FUCHSIA)
@@ -167,7 +167,7 @@
 
 #else  // !BUILDFLAG(IS_APPLE)
   // Assume it is in the framework bundle's Resources directory.
-  FilePath data_path = mac::PathForFrameworkBundleResource(kIcuDataFileName);
+  FilePath data_path = apple::PathForFrameworkBundleResource(kIcuDataFileName);
 #if BUILDFLAG(IS_IOS)
   FilePath override_data_path = ios::FilePathOfEmbeddedICU();
   if (!override_data_path.empty()) {
diff --git a/base/i18n/icu_util_unittest.cc b/base/i18n/icu_util_unittest.cc
index fc0f3ca..6d451e9 100644
--- a/base/i18n/icu_util_unittest.cc
+++ b/base/i18n/icu_util_unittest.cc
@@ -7,8 +7,8 @@
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
-#if !BUILDFLAG(IS_NACL)
-#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+#if !BUILDFLAG(IS_NACL) && (ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE) && \
+    (BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_ANDROID))
 
 namespace base::i18n {
 
@@ -17,25 +17,12 @@
   void SetUp() override { ResetGlobalsForTesting(); }
 };
 
-#if BUILDFLAG(IS_CHROMEOS_LACROS)
 TEST_F(IcuUtilTest, InitializeIcuSucceeds) {
   bool success = InitializeICU();
 
   ASSERT_TRUE(success);
 }
-#endif  // BUILDFLAG(IS_CHROMEOS_LACROS)
-
-#if BUILDFLAG(IS_ANDROID)
-
-TEST_F(IcuUtilTest, InitializeIcuSucceeds) {
-  bool success = InitializeICU();
-
-  ASSERT_TRUE(success);
-}
-
-#endif  // BUILDFLAG(IS_ANDROID)
 
 }  // namespace base::i18n
 
-#endif  // ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
-#endif  // !BUILDFLAG(IS_NACL)
+#endif
diff --git a/base/i18n/message_formatter.cc b/base/i18n/message_formatter.cc
index 709bd3c..4abe9fa 100644
--- a/base/i18n/message_formatter.cc
+++ b/base/i18n/message_formatter.cc
@@ -47,7 +47,8 @@
 MessageArg::MessageArg(double d) : formattable(new icu::Formattable(d)) {}
 
 MessageArg::MessageArg(const Time& t)
-    : formattable(new icu::Formattable(static_cast<UDate>(t.ToJsTime()))) {}
+    : formattable(new icu::Formattable(
+          static_cast<UDate>(t.InMillisecondsFSinceUnixEpoch()))) {}
 
 MessageArg::~MessageArg() = default;
 
diff --git a/base/i18n/message_formatter_unittest.cc b/base/i18n/message_formatter_unittest.cc
index 2660d06..ffd3df3 100644
--- a/base/i18n/message_formatter_unittest.cc
+++ b/base/i18n/message_formatter_unittest.cc
@@ -42,8 +42,8 @@
                              const Time& now,
                              std::u16string* result) {
   icu::UnicodeString formatted;
-  result->append(UnicodeStringToString16(
-      df->format(static_cast<UDate>(now.ToJsTime()), formatted)));
+  result->append(UnicodeStringToString16(df->format(
+      static_cast<UDate>(now.InMillisecondsFSinceUnixEpoch()), formatted)));
 }
 
 }  // namespace
diff --git a/base/i18n/streaming_utf8_validator_perftest.cc b/base/i18n/streaming_utf8_validator_perftest.cc
index ac6a649..150e360 100644
--- a/base/i18n/streaming_utf8_validator_perftest.cc
+++ b/base/i18n/streaming_utf8_validator_perftest.cc
@@ -158,12 +158,10 @@
     const int real_length = static_cast<int>(test_string.length());
     const int times = (1 << 24) / real_length;
     for (size_t test_index = 0; test_index < test_count; ++test_index) {
-      EXPECT_TRUE(RunTest(StringPrintf(format,
-                                       test_functions[test_index].function_name,
-                                       real_length,
-                                       times),
-                          test_functions[test_index].function,
-                          test_string,
+      EXPECT_TRUE(RunTest(StringPrintfNonConstexpr(
+                              format, test_functions[test_index].function_name,
+                              real_length, times),
+                          test_functions[test_index].function, test_string,
                           times));
     }
   }
diff --git a/base/i18n/time_formatting.cc b/base/i18n/time_formatting.cc
index cfba1e0..bbb5e292c 100644
--- a/base/i18n/time_formatting.cc
+++ b/base/i18n/time_formatting.cc
@@ -7,14 +7,17 @@
 #include <stddef.h>
 
 #include <memory>
+#include <string>
 
 #include "base/i18n/unicodestring.h"
 #include "base/logging.h"
 #include "base/notreached.h"
 #include "base/numerics/safe_conversions.h"
+#include "base/strings/string_piece.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/time/time.h"
 #include "build/chromeos_buildflags.h"
+#include "third_party/icu/source/common/unicode/locid.h"
 #include "third_party/icu/source/common/unicode/utypes.h"
 #include "third_party/icu/source/i18n/unicode/datefmt.h"
 #include "third_party/icu/source/i18n/unicode/dtitvfmt.h"
@@ -22,15 +25,22 @@
 #include "third_party/icu/source/i18n/unicode/fmtable.h"
 #include "third_party/icu/source/i18n/unicode/measfmt.h"
 #include "third_party/icu/source/i18n/unicode/smpdtfmt.h"
+#include "third_party/icu/source/i18n/unicode/timezone.h"
 
 namespace base {
 namespace {
 
-std::u16string TimeFormat(const icu::DateFormat* formatter, const Time& time) {
-  DCHECK(formatter);
+UDate ToUDate(const Time& time) {
+  // TODO(crbug.com/1392437): Consider using the `...IgnoringNull` variant and
+  // adding a `CHECK(!time.is_null())`; trying to format a null Time as a string
+  // is almost certainly an indication that the caller has made a mistake.
+  return time.InMillisecondsFSinceUnixEpoch();
+}
+
+std::u16string TimeFormat(const icu::DateFormat& formatter, const Time& time) {
   icu::UnicodeString date_string;
 
-  formatter->format(static_cast<UDate>(time.ToDoubleT() * 1000), date_string);
+  formatter.format(ToUDate(time), date_string);
   return i18n::UnicodeStringToString16(date_string);
 }
 
@@ -40,8 +50,7 @@
   icu::UnicodeString time_string;
 
   icu::FieldPosition ampm_field(icu::DateFormat::kAmPmField);
-  formatter->format(
-      static_cast<UDate>(time.ToDoubleT() * 1000), time_string, ampm_field);
+  formatter->format(ToUDate(time), time_string, ampm_field);
   int ampm_length = ampm_field.getEndIndex() - ampm_field.getBeginIndex();
   if (ampm_length) {
     int begin = ampm_field.getBeginIndex();
@@ -53,21 +62,27 @@
   return i18n::UnicodeStringToString16(time_string);
 }
 
-icu::SimpleDateFormat CreateSimpleDateFormatter(const char* pattern) {
-  // Generate a locale-dependent format pattern. The generator will take
-  // care of locale-dependent formatting issues like which separator to
-  // use (some locales use '.' instead of ':'), and where to put the am/pm
-  // marker.
+icu::SimpleDateFormat CreateSimpleDateFormatter(
+    StringPiece pattern,
+    bool generate_pattern = true,
+    const icu::Locale& locale = icu::Locale::getDefault()) {
   UErrorCode status = U_ZERO_ERROR;
-  std::unique_ptr<icu::DateTimePatternGenerator> generator(
-      icu::DateTimePatternGenerator::createInstance(status));
-  DCHECK(U_SUCCESS(status));
-  icu::UnicodeString generated_pattern =
-      generator->getBestPattern(icu::UnicodeString(pattern), status);
-  DCHECK(U_SUCCESS(status));
+  icu::UnicodeString generated_pattern(pattern.data(), pattern.length());
 
-  // Then, format the time using the generated pattern.
-  icu::SimpleDateFormat formatter(generated_pattern, status);
+  if (generate_pattern) {
+    // Generate a locale-dependent format pattern. The generator will take
+    // care of locale-dependent formatting issues like which separator to
+    // use (some locales use '.' instead of ':'), and where to put the am/pm
+    // marker.
+    std::unique_ptr<icu::DateTimePatternGenerator> generator(
+        icu::DateTimePatternGenerator::createInstance(status));
+    DCHECK(U_SUCCESS(status));
+    generated_pattern = generator->getBestPattern(generated_pattern, status);
+    DCHECK(U_SUCCESS(status));
+  }
+
+  // Then, format the time using the desired pattern.
+  icu::SimpleDateFormat formatter(generated_pattern, locale, status);
   DCHECK(U_SUCCESS(status));
 
   return formatter;
@@ -102,7 +117,7 @@
   // Chrome's application locale.
   std::unique_ptr<icu::DateFormat> formatter(
       icu::DateFormat::createTimeInstance(icu::DateFormat::kShort));
-  return TimeFormat(formatter.get(), time);
+  return TimeFormat(*formatter, time);
 }
 
 std::u16string TimeFormatTimeOfDayWithMilliseconds(const Time& time) {
@@ -123,35 +138,33 @@
   const char* base_pattern = (type == k12HourClock ? "ahm" : "Hm");
   icu::SimpleDateFormat formatter = CreateSimpleDateFormatter(base_pattern);
 
-  if (ampm == kKeepAmPm) {
-    return TimeFormat(&formatter, time);
-  }
-  return TimeFormatWithoutAmPm(&formatter, time);
+  return (ampm == kKeepAmPm) ? TimeFormat(formatter, time)
+                             : TimeFormatWithoutAmPm(&formatter, time);
 }
 
 std::u16string TimeFormatShortDate(const Time& time) {
   std::unique_ptr<icu::DateFormat> formatter(
       icu::DateFormat::createDateInstance(icu::DateFormat::kMedium));
-  return TimeFormat(formatter.get(), time);
+  return TimeFormat(*formatter, time);
 }
 
 std::u16string TimeFormatShortDateNumeric(const Time& time) {
   std::unique_ptr<icu::DateFormat> formatter(
       icu::DateFormat::createDateInstance(icu::DateFormat::kShort));
-  return TimeFormat(formatter.get(), time);
+  return TimeFormat(*formatter, time);
 }
 
 std::u16string TimeFormatShortDateAndTime(const Time& time) {
   std::unique_ptr<icu::DateFormat> formatter(
       icu::DateFormat::createDateTimeInstance(icu::DateFormat::kShort));
-  return TimeFormat(formatter.get(), time);
+  return TimeFormat(*formatter, time);
 }
 
 std::u16string TimeFormatShortDateAndTimeWithTimeZone(const Time& time) {
   std::unique_ptr<icu::DateFormat> formatter(
       icu::DateFormat::createDateTimeInstance(icu::DateFormat::kShort,
                                               icu::DateFormat::kLong));
-  return TimeFormat(formatter.get(), time);
+  return TimeFormat(*formatter, time);
 }
 
 #if BUILDFLAG(IS_CHROMEOS_ASH)
@@ -161,31 +174,106 @@
   icu::SimpleDateFormat formatter =
       CreateSimpleDateFormatter(DateFormatToString(DATE_FORMAT_YEAR_MONTH));
   formatter.setTimeZone(*time_zone);
-  return TimeFormat(&formatter, time);
+  return TimeFormat(formatter, time);
 }
 #endif
 
 std::u16string TimeFormatMonthAndYear(const Time& time) {
-  icu::SimpleDateFormat formatter =
-      CreateSimpleDateFormatter(DateFormatToString(DATE_FORMAT_YEAR_MONTH));
-  return TimeFormat(&formatter, time);
+  return TimeFormat(
+      CreateSimpleDateFormatter(DateFormatToString(DATE_FORMAT_YEAR_MONTH)),
+      time);
 }
 
 std::u16string TimeFormatFriendlyDateAndTime(const Time& time) {
   std::unique_ptr<icu::DateFormat> formatter(
       icu::DateFormat::createDateTimeInstance(icu::DateFormat::kFull));
-  return TimeFormat(formatter.get(), time);
+  return TimeFormat(*formatter, time);
 }
 
 std::u16string TimeFormatFriendlyDate(const Time& time) {
   std::unique_ptr<icu::DateFormat> formatter(
       icu::DateFormat::createDateInstance(icu::DateFormat::kFull));
-  return TimeFormat(formatter.get(), time);
+  return TimeFormat(*formatter, time);
 }
 
-std::u16string TimeFormatWithPattern(const Time& time, const char* pattern) {
-  icu::SimpleDateFormat formatter = CreateSimpleDateFormatter(pattern);
-  return TimeFormat(&formatter, time);
+std::u16string LocalizedTimeFormatWithPattern(const Time& time,
+                                              StringPiece pattern) {
+  return TimeFormat(CreateSimpleDateFormatter(std::move(pattern)), time);
+}
+
+std::string UnlocalizedTimeFormatWithPattern(const Time& time,
+                                             StringPiece pattern,
+                                             const icu::TimeZone* time_zone) {
+  icu::SimpleDateFormat formatter =
+      CreateSimpleDateFormatter({}, false, icu::Locale("en_US"));
+  if (time_zone) {
+    formatter.setTimeZone(*time_zone);
+  }
+
+  // Formats `time` according to `pattern`.
+  const auto format_time = [&formatter](const Time& time, StringPiece pattern) {
+    formatter.applyPattern(
+        icu::UnicodeString(pattern.data(), pattern.length()));
+    return base::UTF16ToUTF8(TimeFormat(formatter, time));
+  };
+
+  // If `time` has nonzero microseconds, check if the caller requested
+  // microsecond-precision output; this must be handled internally since
+  // `SimpleDateFormat` won't do it.
+  std::string output;
+  if (const int64_t microseconds =
+          time.ToDeltaSinceWindowsEpoch().InMicroseconds() %
+          Time::kMicrosecondsPerMillisecond) {
+    // Adds digits to `output` for each 'S' at the start of `pattern`.
+    const auto format_microseconds = [&output](int64_t mutable_micros,
+                                               StringPiece pattern) {
+      size_t i = 0;
+      for (; i < pattern.length() && pattern[i] == 'S'; ++i) {
+        output += static_cast<char>('0' + mutable_micros / 100);
+        mutable_micros = (mutable_micros % 100) * 10;
+      }
+      return i;
+    };
+
+    // Look for fractional seconds patterns with greater-than-millisecond
+    // precision.
+    bool in_quotes = false;
+    for (size_t i = 0; i < pattern.length();) {
+      if (pattern[i] == '\'') {
+        in_quotes = !in_quotes;
+      } else if (!in_quotes && !pattern.compare(i, 4, "SSSS")) {
+        // Let ICU format everything up through milliseconds.
+        const size_t fourth_s = i + 3;
+        if (i != 0) {
+          output += format_time(time, pattern.substr(0, fourth_s));
+        }
+
+        // Add microseconds digits, then truncate to the remaining pattern.
+        pattern = pattern.substr(
+            fourth_s +
+            format_microseconds(microseconds, pattern.substr(fourth_s)));
+        i = 0;
+        continue;
+      }
+      ++i;
+    }
+  }
+
+  // Format any remaining pattern.
+  if (!pattern.empty()) {
+    output += format_time(time, std::move(pattern));
+  }
+  return output;
+}
+
+std::string TimeFormatAsIso8601(const Time& time) {
+  return UnlocalizedTimeFormatWithPattern(time, "yyyy-MM-dd'T'HH:mm:ss.SSSX",
+                                          icu::TimeZone::getGMT());
+}
+
+std::string TimeFormatHTTP(const Time& time) {
+  return UnlocalizedTimeFormatWithPattern(time, "E, dd MMM yyyy HH:mm:ss O",
+                                          icu::TimeZone::getGMT());
 }
 
 bool TimeDurationFormat(TimeDelta time,
@@ -264,8 +352,8 @@
                                               status));
 
   icu::FieldPosition pos = 0;
-  UDate start_date = static_cast<UDate>(begin_time.ToDoubleT() * 1000);
-  UDate end_date = static_cast<UDate>(end_time.ToDoubleT() * 1000);
+  UDate start_date = ToUDate(begin_time);
+  UDate end_date = ToUDate(end_time);
   icu::DateInterval interval(start_date, end_date);
   icu::UnicodeString formatted;
   formatter->format(&interval, formatted, pos, status);
diff --git a/base/i18n/time_formatting.h b/base/i18n/time_formatting.h
index 281b70d..bed134a 100644
--- a/base/i18n/time_formatting.h
+++ b/base/i18n/time_formatting.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Basic time formatting methods.  These methods use the current locale
-// formatting for displaying the time.
+// Basic time formatting methods.  Most methods format based on the current
+// locale. *TimeFormatWithPattern() are special; see comments there.
 
 #ifndef BASE_I18N_TIME_FORMATTING_H_
 #define BASE_I18N_TIME_FORMATTING_H_
@@ -11,12 +11,14 @@
 #include <string>
 
 #include "base/i18n/base_i18n_export.h"
+#include "base/strings/string_piece.h"
 #include "build/build_config.h"
 #include "build/chromeos_buildflags.h"
+#include "third_party/icu/source/common/unicode/uversion.h"
 
-#if BUILDFLAG(IS_CHROMEOS_ASH)
-#include "third_party/icu/source/i18n/unicode/timezone.h"
-#endif  // BUILDFLAG(IS_CHROMEOS_ASH)
+U_NAMESPACE_BEGIN
+class TimeZone;
+U_NAMESPACE_END
 
 namespace base {
 
@@ -102,12 +104,52 @@
 // "Monday, March 6, 2008".
 BASE_I18N_EXPORT std::u16string TimeFormatFriendlyDate(const Time& time);
 
-// Formats a time using a skeleton to produce a format for different locales
-// when an unusual time format is needed, e.g. "Feb. 2, 18:00".
+// Formats a time using a pattern to produce output for different locales when
+// an unusual time format is needed, e.g. "Feb. 2, 18:00". See
+// https://unicode-org.github.io/icu/userguide/format_parse/datetime/#datetime-format-syntax
+// for pattern details.
 //
-// See http://userguide.icu-project.org/formatparse/datetime for details.
-BASE_I18N_EXPORT std::u16string TimeFormatWithPattern(const Time& time,
-                                                      const char* pattern);
+// Use this version when you want to display the resulting string to the user.
+//
+// This localizes more than you might expect: it does not simply translate days
+// of the week, etc., and then feed them into the provided pattern. The pattern
+// will also be run through a pattern localizer that will add spacing,
+// delimiters, etc. appropriate for the current locale. If you don't want this,
+// look at `UnlocalizedTimeFormatWithPattern()` below. If you want translation
+// but don't want to adjust the pattern as well, talk to base/ OWNERS about your
+// use case.
+BASE_I18N_EXPORT std::u16string LocalizedTimeFormatWithPattern(
+    const Time& time,
+    StringPiece pattern);
+
+// Formats a time using a pattern to produce en-US-like output, e.g. "Feb. 2,
+// 18:00". See
+// https://unicode-org.github.io/icu/userguide/format_parse/datetime/#datetime-format-syntax
+// for pattern details. NOTE: While ICU only supports millisecond precision
+// (fractional second patterns "SSS..." will be filled with zeroes after the
+// third 'S'), this supports microsecond precision (up to six 'S's may become
+// non-zero values), since some callers need that.
+//
+// `time_zone` can be set to a desired time zone (e.g.
+// icu::TimeZone::getGMT()); if left as null, the local time zone will be used.
+//
+// Use this version when you want to control the output format precisely, e.g.
+// for logging or to format a string for consumption by some server.
+//
+// This always outputs in US English and does not change the provided pattern at
+// all before formatting. It returns a `std::string` instead of a
+// `std::u16string` under the assumption that it will not be used in UI.
+BASE_I18N_EXPORT std::string UnlocalizedTimeFormatWithPattern(
+    const Time& time,
+    StringPiece pattern,
+    const icu::TimeZone* time_zone = nullptr);
+
+// Formats a time compliant to ISO 8601 in UTC, e.g. "2020-12-31T23:59:59.999Z".
+BASE_I18N_EXPORT std::string TimeFormatAsIso8601(const Time& time);
+
+// Formats a time in the IMF-fixdate format defined by RFC 7231 (satisfying its
+// HTTP-date format), e.g. "Sun, 06 Nov 1994 08:49:37 GMT".
+BASE_I18N_EXPORT std::string TimeFormatHTTP(const Time& time);
 
 // Formats a time duration of hours and minutes into various formats, e.g.,
 // "3:07" or "3 hours, 7 minutes", and returns true on success. See
diff --git a/base/i18n/time_formatting_unittest.cc b/base/i18n/time_formatting_unittest.cc
index 6967df8..f4e72fc 100644
--- a/base/i18n/time_formatting_unittest.cc
+++ b/base/i18n/time_formatting_unittest.cc
@@ -22,10 +22,13 @@
 namespace base {
 namespace {
 
-const Time::Exploded kTestDateTimeExploded = {
-    2011, 4,  6, 30,  // Sat, Apr 30, 2011
-    22,   42, 7, 0    // 22:42:07.000 in UTC = 15:42:07 in US PDT.
-};
+constexpr Time::Exploded kTestDateTimeExploded = {.year = 2011,
+                                                  .month = 4,
+                                                  .day_of_week = 6,
+                                                  .day_of_month = 30,
+                                                  .hour = 22,
+                                                  .minute = 42,
+                                                  .second = 7};
 
 // Returns difference between the local time and GMT formatted as string.
 // This function gets |time| because the difference depends on time,
@@ -37,9 +40,10 @@
       icu::TimeZoneFormat::createInstance(icu::Locale::getDefault(), status));
   EXPECT_TRUE(U_SUCCESS(status));
   icu::UnicodeString name;
-  zone_formatter->format(UTZFMT_STYLE_SPECIFIC_SHORT, *zone,
-                         static_cast<UDate>(time.ToDoubleT() * 1000),
-                         name, nullptr);
+  zone_formatter->format(
+      UTZFMT_STYLE_SPECIFIC_SHORT, *zone,
+      static_cast<UDate>(time.InSecondsFSinceUnixEpoch() * 1000), name,
+      nullptr);
   return i18n::UnicodeStringToString16(name);
 }
 
@@ -285,17 +289,79 @@
   EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
 
   i18n::SetICUDefaultLocale("en_US");
-  EXPECT_EQ(u"Apr 30, 2011", TimeFormatWithPattern(time, "yMMMd"));
+  EXPECT_EQ(u"Apr 30, 2011", LocalizedTimeFormatWithPattern(time, "yMMMd"));
   EXPECT_EQ(u"April 30 at 3:42:07\u202fPM",
-            TimeFormatWithPattern(time, "MMMMdjmmss"));
+            LocalizedTimeFormatWithPattern(time, "MMMMdjmmss"));
+  EXPECT_EQ(
+      "Sat! 30 Apr 2011 at 15.42+07",
+      UnlocalizedTimeFormatWithPattern(time, "E! dd MMM y 'at' HH.mm+ss"));
+  EXPECT_EQ("Sat! 30 Apr 2011 at 22.42+07",
+            UnlocalizedTimeFormatWithPattern(time, "E! dd MMM y 'at' HH.mm+ss",
+                                             icu::TimeZone::getGMT()));
 
   i18n::SetICUDefaultLocale("en_GB");
-  EXPECT_EQ(u"30 Apr 2011", TimeFormatWithPattern(time, "yMMMd"));
-  EXPECT_EQ(u"30 April at 15:42:07", TimeFormatWithPattern(time, "MMMMdjmmss"));
+  EXPECT_EQ(u"30 Apr 2011", LocalizedTimeFormatWithPattern(time, "yMMMd"));
+  EXPECT_EQ(u"30 April at 15:42:07",
+            LocalizedTimeFormatWithPattern(time, "MMMMdjmmss"));
+  EXPECT_EQ(
+      "Sat! 30 Apr 2011 at 15.42+07",
+      UnlocalizedTimeFormatWithPattern(time, "E! dd MMM y 'at' HH.mm+ss"));
 
   i18n::SetICUDefaultLocale("ja_JP");
-  EXPECT_EQ(u"2011年4月30日", TimeFormatWithPattern(time, "yMMMd"));
-  EXPECT_EQ(u"4月30日 15:42:07", TimeFormatWithPattern(time, "MMMMdjmmss"));
+  EXPECT_EQ(u"2011年4月30日", LocalizedTimeFormatWithPattern(time, "yMMMd"));
+  EXPECT_EQ(u"4月30日 15:42:07",
+            LocalizedTimeFormatWithPattern(time, "MMMMdjmmss"));
+  EXPECT_EQ(
+      "Sat! 30 Apr 2011 at 15.42+07",
+      UnlocalizedTimeFormatWithPattern(time, "E! dd MMM y 'at' HH.mm+ss"));
+}
+
+TEST(TimeFormattingTest, UnlocalizedTimeFormatWithPatternMicroseconds) {
+  Time no_micros;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &no_micros));
+  const Time micros = no_micros + Microseconds(987);
+
+  // Should support >3 'S' characters, truncating.
+  EXPECT_EQ("07.0009", UnlocalizedTimeFormatWithPattern(micros, "ss.SSSS"));
+  EXPECT_EQ("07.00098", UnlocalizedTimeFormatWithPattern(micros, "ss.SSSSS"));
+  EXPECT_EQ("07.000987", UnlocalizedTimeFormatWithPattern(micros, "ss.SSSSSS"));
+
+  // >6 'S' characters is also valid, and should be zero-filled.
+  EXPECT_EQ("07.0009870",
+            UnlocalizedTimeFormatWithPattern(micros, "ss.SSSSSSS"));
+
+  // Quoted 'S's should be ignored.
+  EXPECT_EQ("07.SSSSSS",
+            UnlocalizedTimeFormatWithPattern(micros, "ss.'SSSSSS'"));
+
+  // Multiple substitutions are possible.
+  EXPECT_EQ("07.000987'000987.07",
+            UnlocalizedTimeFormatWithPattern(micros, "ss.SSSSSS''SSSSSS.ss"));
+
+  // All the above should still work when the number of microseconds is zero.
+  EXPECT_EQ("07.0000", UnlocalizedTimeFormatWithPattern(no_micros, "ss.SSSS"));
+  EXPECT_EQ("07.00000",
+            UnlocalizedTimeFormatWithPattern(no_micros, "ss.SSSSS"));
+  EXPECT_EQ("07.000000",
+            UnlocalizedTimeFormatWithPattern(no_micros, "ss.SSSSSS"));
+  EXPECT_EQ("07.0000000",
+            UnlocalizedTimeFormatWithPattern(no_micros, "ss.SSSSSSS"));
+  EXPECT_EQ("07.SSSSSS",
+            UnlocalizedTimeFormatWithPattern(no_micros, "ss.'SSSSSS'"));
+  EXPECT_EQ("07.000000'000000.07", UnlocalizedTimeFormatWithPattern(
+                                       no_micros, "ss.SSSSSS''SSSSSS.ss"));
+}
+
+TEST(TimeFormattingTest, TimeFormatAsIso8601) {
+  Time time;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
+  EXPECT_EQ("2011-04-30T22:42:07.000Z", TimeFormatAsIso8601(time));
+}
+
+TEST(TimeFormattingTest, TimeFormatHTTP) {
+  Time time;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
+  EXPECT_EQ("Sat, 30 Apr 2011 22:42:07 GMT", TimeFormatHTTP(time));
 }
 
 TEST(TimeFormattingTest, TimeDurationFormat) {
diff --git a/base/ios/device_util.mm b/base/ios/device_util.mm
index 448a2e0..b4333ae 100644
--- a/base/ios/device_util.mm
+++ b/base/ios/device_util.mm
@@ -15,10 +15,10 @@
 
 #include <memory>
 
+#include "base/apple/scoped_cftyperef.h"
 #include "base/check.h"
-#include "base/mac/scoped_cftyperef.h"
 #include "base/numerics/safe_conversions.h"
-#include "base/strings/string_util.h"
+#include "base/posix/sysctl.h"
 #include "base/strings/stringprintf.h"
 #include "base/strings/sys_string_conversions.h"
 
@@ -60,11 +60,7 @@
 #if TARGET_OS_SIMULATOR
   return getenv("SIMULATOR_MODEL_IDENTIFIER");
 #elif TARGET_OS_IPHONE
-  std::string platform;
-  size_t size = 0;
-  sysctlbyname("hw.machine", NULL, &size, NULL, 0);
-  sysctlbyname("hw.machine", base::WriteInto(&platform, size), &size, NULL, 0);
-  return platform;
+  return base::StringSysctl({CTL_HW, HW_MACHINE}).value();
 #endif
 }
 
@@ -129,9 +125,9 @@
 }
 
 std::string GetRandomId() {
-  base::ScopedCFTypeRef<CFUUIDRef> uuid_object(
+  base::apple::ScopedCFTypeRef<CFUUIDRef> uuid_object(
       CFUUIDCreate(kCFAllocatorDefault));
-  base::ScopedCFTypeRef<CFStringRef> uuid_string(
+  base::apple::ScopedCFTypeRef<CFStringRef> uuid_string(
       CFUUIDCreateString(kCFAllocatorDefault, uuid_object));
   return base::SysCFStringRefToUTF8(uuid_string);
 }
@@ -177,9 +173,9 @@
             hash);
   CFUUIDBytes* uuid_bytes = reinterpret_cast<CFUUIDBytes*>(hash);
 
-  base::ScopedCFTypeRef<CFUUIDRef> uuid_object(
+  base::apple::ScopedCFTypeRef<CFUUIDRef> uuid_object(
       CFUUIDCreateFromUUIDBytes(kCFAllocatorDefault, *uuid_bytes));
-  base::ScopedCFTypeRef<CFStringRef> device_id(
+  base::apple::ScopedCFTypeRef<CFStringRef> device_id(
       CFUUIDCreateString(kCFAllocatorDefault, uuid_object));
   return base::SysCFStringRefToUTF8(device_id);
 }
diff --git a/base/ios/ios_util.mm b/base/ios/ios_util.mm
index fdb715c..6ada730 100644
--- a/base/ios/ios_util.mm
+++ b/base/ios/ios_util.mm
@@ -8,8 +8,8 @@
 #import <UIKit/UIKit.h>
 #include <stddef.h>
 
+#include "base/apple/foundation_util.h"
 #import "base/ios/device_util.h"
-#include "base/mac/foundation_util.h"
 #include "base/system/sys_info.h"
 
 namespace {
diff --git a/base/ios/sim_header_shims.h b/base/ios/sim_header_shims.h
index 3f9f5e0..fbf643f 100644
--- a/base/ios/sim_header_shims.h
+++ b/base/ios/sim_header_shims.h
@@ -13,6 +13,7 @@
 
 #include <mach/kern_return.h>
 #include <mach/message.h>
+#include <stdint.h>
 #include <sys/param.h>
 
 // This file includes the necessary headers that are not part of the
@@ -46,13 +47,28 @@
 #define SHARED_REGION_BASE_ARM64 0x180000000ULL
 #define SHARED_REGION_SIZE_ARM64 0x100000000ULL
 
+int proc_pidinfo(int pid,
+                 int flavor,
+                 uint64_t arg,
+                 void* buffer,
+                 int buffersize);
 int proc_pidpath(int pid, void* buffer, uint32_t buffersize);
 int proc_regionfilename(int pid,
                         uint64_t address,
                         void* buffer,
                         uint32_t buffersize);
+
 #define PROC_PIDPATHINFO_MAXSIZE (4 * MAXPATHLEN)
 
+// These values are copied from xnu/xnu-4570.1.46/bsd/sys/proc_info.h.
+// https://opensource.apple.com/source/xnu/xnu-4570.1.46/bsd/sys/proc_info.h#L697-L710
+struct proc_fdinfo {
+  int32_t proc_fd;
+  uint32_t proc_fdtype;
+};
+#define PROC_PIDLISTFDS 1
+#define PROC_PIDLISTFD_SIZE (sizeof(struct proc_fdinfo))
+
 __END_DECLS
 
 #endif  // BASE_IOS_SIM_HEADER_SHIMS_H_
diff --git a/base/json/json_parser.cc b/base/json/json_parser.cc
index fbb226e..71381a4 100644
--- a/base/json/json_parser.cc
+++ b/base/json/json_parser.cc
@@ -10,6 +10,8 @@
 #include <vector>
 
 #include "base/check_op.h"
+#include "base/feature_list.h"
+#include "base/features.h"
 #include "base/json/json_reader.h"
 #include "base/metrics/histogram_functions.h"
 #include "base/notreached.h"
@@ -801,8 +803,16 @@
   StringPiece num_string(num_start, end_index - start_index);
 
   int num_int;
-  if (StringToInt(num_string, &num_int))
+  if (StringToInt(num_string, &num_int)) {
+    // StringToInt will treat `-0` as zero, losing the significance of the
+    // negation.
+    if (num_int == 0 && num_string.starts_with('-')) {
+      if (base::FeatureList::IsEnabled(features::kJsonNegativeZero)) {
+        return Value(-0.0);
+      }
+    }
     return Value(num_int);
+  }
 
   double num_double;
   if (StringToDouble(num_string, &num_double) && std::isfinite(num_double)) {
diff --git a/base/json/json_parser_unittest.cc b/base/json/json_parser_unittest.cc
index e8de7d5..a185e47 100644
--- a/base/json/json_parser_unittest.cc
+++ b/base/json/json_parser_unittest.cc
@@ -156,6 +156,18 @@
   ASSERT_TRUE(value->is_int());
   EXPECT_EQ(-1234, value->GetInt());
 
+  // Negative zero integer.
+  input = "-0,|";
+  parser.reset(NewTestParser(input));
+  value = parser->ConsumeNumber();
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  ASSERT_TRUE(value->is_double());
+  EXPECT_EQ(-0.0, value->GetDouble());
+
   // Double.
   input = "12.34,|";
   parser.reset(NewTestParser(input));
@@ -168,6 +180,18 @@
   ASSERT_TRUE(value->is_double());
   EXPECT_EQ(12.34, value->GetDouble());
 
+  // Negative zero double.
+  input = "-0.0,|";
+  parser.reset(NewTestParser(input));
+  value = parser->ConsumeNumber();
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  ASSERT_TRUE(value->is_double());
+  EXPECT_EQ(-0.0, value->GetDouble());
+
   // Scientific.
   input = "42e3,|";
   parser.reset(NewTestParser(input));
diff --git a/base/json/json_reader.cc b/base/json/json_reader.cc
index 591a4f9..32de397 100644
--- a/base/json/json_reader.cc
+++ b/base/json/json_reader.cc
@@ -6,7 +6,9 @@
 
 #include <utility>
 
+#include "base/features.h"
 #include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
 #include "base/rust_buildflags.h"
 #include "third_party/abseil-cpp/absl/types/optional.h"
 
@@ -14,9 +16,8 @@
 #include "base/strings/string_piece_rust.h"
 #include "third_party/rust/serde_json_lenient/v0_1/wrapper/functions.h"
 #include "third_party/rust/serde_json_lenient/v0_1/wrapper/lib.rs.h"
-#else
+#endif  // BUILDFLAG(BUILD_RUST_JSON_READER)
 #include "base/json/json_parser.h"
-#endif
 
 namespace base {
 
@@ -25,6 +26,8 @@
 namespace {
 using serde_json_lenient::ContextPointer;
 
+const char kSecurityJsonParsingTime[] = "Security.JSONParser.ParsingTime";
+
 ContextPointer& ListAppendList(ContextPointer& ctx, size_t reserve) {
   auto& value = reinterpret_cast<base::Value&>(ctx);
   value.GetList().reserve(reserve);
@@ -135,11 +138,17 @@
                                        int options,
                                        size_t max_depth) {
 #if BUILDFLAG(BUILD_RUST_JSON_READER)
-  JSONReader::Result result = DecodeJSONInRust(json, options, max_depth);
-  if (!result.has_value()) {
-    return absl::nullopt;
+  SCOPED_UMA_HISTOGRAM_TIMER_MICROS(kSecurityJsonParsingTime);
+  if (UsingRust()) {
+    JSONReader::Result result = DecodeJSONInRust(json, options, max_depth);
+    if (!result.has_value()) {
+      return absl::nullopt;
+    }
+    return std::move(*result);
+  } else {
+    internal::JSONParser parser(options, max_depth);
+    return parser.Parse(json);
   }
-  return std::move(*result);
 #else   // BUILDFLAG(BUILD_RUST_JSON_READER)
   internal::JSONParser parser(options, max_depth);
   return parser.Parse(json);
@@ -161,7 +170,22 @@
 JSONReader::Result JSONReader::ReadAndReturnValueWithError(StringPiece json,
                                                            int options) {
 #if BUILDFLAG(BUILD_RUST_JSON_READER)
-  return DecodeJSONInRust(json, options, internal::kAbsoluteMaxDepth);
+  SCOPED_UMA_HISTOGRAM_TIMER_MICROS(kSecurityJsonParsingTime);
+  if (UsingRust()) {
+    return DecodeJSONInRust(json, options, internal::kAbsoluteMaxDepth);
+  } else {
+    internal::JSONParser parser(options);
+    auto value = parser.Parse(json);
+    if (!value) {
+      Error error;
+      error.message = parser.GetErrorMessage();
+      error.line = parser.error_line();
+      error.column = parser.error_column();
+      return base::unexpected(std::move(error));
+    }
+
+    return std::move(*value);
+  }
 #else   // BUILDFLAG(BUILD_RUST_JSON_READER)
   internal::JSONParser parser(options);
   auto value = parser.Parse(json);
@@ -177,4 +201,19 @@
 #endif  // BUILDFLAG(BUILD_RUST_JSON_READER)
 }
 
+// static
+bool JSONReader::UsingRust() {
+  // If features have not yet been enabled, we cannot check the feature, so fall
+  // back to the C++ parser. In practice, this seems to apply to
+  // `ReadPrefsFromDisk()`, which is parsing trusted JSON.
+  if (!base::FeatureList::GetInstance()) {
+    return false;
+  }
+#if BUILDFLAG(BUILD_RUST_JSON_READER)
+  return base::FeatureList::IsEnabled(base::features::kUseRustJsonParser);
+#else   // BUILDFLAG(BUILD_RUST_JSON_READER)
+  return false;
+#endif  // BUILDFLAG(BUILD_RUST_JSON_READER)
+}
+
 }  // namespace base
diff --git a/base/json/json_reader.h b/base/json/json_reader.h
index 3f83f4e..fa3b5bb 100644
--- a/base/json/json_reader.h
+++ b/base/json/json_reader.h
@@ -126,6 +126,9 @@
   static Result ReadAndReturnValueWithError(
       StringPiece json,
       int options = JSON_PARSE_CHROMIUM_EXTENSIONS);
+
+  // Determine whether the Rust parser is in use.
+  static bool UsingRust();
 };
 
 }  // namespace base
diff --git a/base/json/json_reader_unittest.cc b/base/json/json_reader_unittest.cc
index 835b1a8..10a9ba2 100644
--- a/base/json/json_reader_unittest.cc
+++ b/base/json/json_reader_unittest.cc
@@ -6,16 +6,21 @@
 
 #include <stddef.h>
 
+#include <cmath>
 #include <utility>
 
 #include "base/base_paths.h"
+#include "base/features.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/path_service.h"
+#include "base/rust_buildflags.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/stringprintf.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/test/gmock_expected_support.h"
+#include "base/test/metrics/histogram_tester.h"
+#include "base/test/scoped_feature_list.h"
 #include "base/values.h"
 #include "build/build_config.h"
 #include "testing/gmock/include/gmock/gmock.h"
@@ -39,26 +44,44 @@
 
 namespace base {
 
-TEST(JSONReaderTest, Whitespace) {
+class JSONReaderTest : public testing::TestWithParam<bool> {
+ public:
+  void SetUp() override {
+    feature_list_.InitWithFeatureState(base::features::kUseRustJsonParser,
+                                       using_rust_);
+  }
+
+ protected:
+  bool using_rust_ = GetParam();
+  base::test::ScopedFeatureList feature_list_;
+};
+
+TEST_P(JSONReaderTest, Whitespace) {
   absl::optional<Value> root = JSONReader::Read("   null   ");
   ASSERT_TRUE(root);
   EXPECT_TRUE(root->is_none());
 }
 
-TEST(JSONReaderTest, InvalidString) {
+TEST_P(JSONReaderTest, InvalidString) {
   // These are invalid because they do not represent a JSON value,
   // see https://tools.ietf.org/rfc/rfc8259.txt
   EXPECT_FALSE(JSONReader::Read(""));
   EXPECT_FALSE(JSONReader::Read("nu"));
 }
 
-TEST(JSONReaderTest, SimpleBool) {
+TEST_P(JSONReaderTest, SimpleBool) {
+#if BUILDFLAG(BUILD_RUST_JSON_READER)
+  base::HistogramTester histograms;
+#endif  // BUILDFLAG(BUILD_RUST_JSON_READER)
   absl::optional<Value> root = JSONReader::Read("true  ");
   ASSERT_TRUE(root);
   EXPECT_TRUE(root->is_bool());
+#if BUILDFLAG(BUILD_RUST_JSON_READER)
+  histograms.ExpectTotalCount("Security.JSONParser.ParsingTime", 1);
+#endif  // BUILDFLAG(BUILD_RUST_JSON_READER)
 }
 
-TEST(JSONReaderTest, EmbeddedComments) {
+TEST_P(JSONReaderTest, EmbeddedComments) {
   absl::optional<Value> root = JSONReader::Read("/* comment */null");
   ASSERT_TRUE(root);
   EXPECT_TRUE(root->is_none());
@@ -114,21 +137,21 @@
   EXPECT_FALSE(JSONReader::Read("/33"));
 }
 
-TEST(JSONReaderTest, Ints) {
+TEST_P(JSONReaderTest, Ints) {
   absl::optional<Value> root = JSONReader::Read("43");
   ASSERT_TRUE(root);
   ASSERT_TRUE(root->is_int());
   EXPECT_EQ(43, root->GetInt());
 }
 
-TEST(JSONReaderTest, NonDecimalNumbers) {
+TEST_P(JSONReaderTest, NonDecimalNumbers) {
   // According to RFC 8259, oct, hex, and leading zeros are invalid JSON.
   EXPECT_FALSE(JSONReader::Read("043"));
   EXPECT_FALSE(JSONReader::Read("0x43"));
   EXPECT_FALSE(JSONReader::Read("00"));
 }
 
-TEST(JSONReaderTest, NumberZero) {
+TEST_P(JSONReaderTest, NumberZero) {
   // Test 0 (which needs to be special cased because of the leading zero
   // clause).
   absl::optional<Value> root = JSONReader::Read("0");
@@ -137,7 +160,7 @@
   EXPECT_EQ(0, root->GetInt());
 }
 
-TEST(JSONReaderTest, LargeIntPromotion) {
+TEST_P(JSONReaderTest, LargeIntPromotion) {
   // Numbers that overflow ints should succeed, being internally promoted to
   // storage as doubles
   absl::optional<Value> root = JSONReader::Read("2147483648");
@@ -150,7 +173,7 @@
   EXPECT_DOUBLE_EQ(-2147483649.0, root->GetDouble());
 }
 
-TEST(JSONReaderTest, LargerIntIsLossy) {
+TEST_P(JSONReaderTest, LargerIntIsLossy) {
   // Parse LONG_MAX as a JSON number (not a JSON string). The result of the
   // parse is a base::Value, either a (32-bit) int or a (64-bit) double.
   // LONG_MAX would overflow an int and can only be approximated by a double.
@@ -167,7 +190,7 @@
   EXPECT_EQ(std::string(etc808), StringPrintf("%f", root->GetDouble()));
 }
 
-TEST(JSONReaderTest, Doubles) {
+TEST_P(JSONReaderTest, Doubles) {
   absl::optional<Value> root = JSONReader::Read("43.1");
   ASSERT_TRUE(root);
   EXPECT_TRUE(root->is_double());
@@ -211,14 +234,14 @@
   ASSERT_FALSE(value.has_value());
 }
 
-TEST(JSONReaderTest, FractionalNumbers) {
+TEST_P(JSONReaderTest, FractionalNumbers) {
   // Fractional parts must have a digit before and after the decimal point.
   EXPECT_FALSE(JSONReader::Read("1."));
   EXPECT_FALSE(JSONReader::Read(".1"));
   EXPECT_FALSE(JSONReader::Read("1.e10"));
 }
 
-TEST(JSONReaderTest, ExponentialNumbers) {
+TEST_P(JSONReaderTest, ExponentialNumbers) {
   // Exponent must have a digit following the 'e'.
   EXPECT_FALSE(JSONReader::Read("1e"));
   EXPECT_FALSE(JSONReader::Read("1E"));
@@ -226,7 +249,7 @@
   EXPECT_FALSE(JSONReader::Read("1e1.0"));
 }
 
-TEST(JSONReaderTest, InvalidInfNAN) {
+TEST_P(JSONReaderTest, InvalidInfNAN) {
   // The largest finite double is roughly 1.8e308.
   EXPECT_FALSE(JSONReader::Read("1e1000"));
   EXPECT_FALSE(JSONReader::Read("-1e1000"));
@@ -235,7 +258,7 @@
   EXPECT_FALSE(JSONReader::Read("inf"));
 }
 
-TEST(JSONReaderTest, InvalidNumbers) {
+TEST_P(JSONReaderTest, InvalidNumbers) {
   EXPECT_TRUE(JSONReader::Read("4.3"));
   EXPECT_FALSE(JSONReader::Read("4."));
   EXPECT_FALSE(JSONReader::Read("4.3.1"));
@@ -244,21 +267,46 @@
   EXPECT_FALSE(JSONReader::Read("42a"));
 }
 
-TEST(JSONReaderTest, SimpleString) {
+TEST_P(JSONReaderTest, Zeroes) {
+  absl::optional<Value> root = JSONReader::Read("0");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_int());
+  EXPECT_DOUBLE_EQ(0, root->GetInt());
+
+  root = JSONReader::Read("0.0");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_double());
+  EXPECT_DOUBLE_EQ(0.0, root->GetDouble());
+  EXPECT_FALSE(std::signbit(root->GetDouble()));
+
+  root = JSONReader::Read("-0");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_double());
+  EXPECT_DOUBLE_EQ(0.0, root->GetDouble());
+  EXPECT_TRUE(std::signbit(root->GetDouble()));
+
+  root = JSONReader::Read("-0.0");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_double());
+  EXPECT_DOUBLE_EQ(-0.0, root->GetDouble());
+  EXPECT_TRUE(std::signbit(root->GetDouble()));
+}
+
+TEST_P(JSONReaderTest, SimpleString) {
   absl::optional<Value> root = JSONReader::Read("\"hello world\"");
   ASSERT_TRUE(root);
   ASSERT_TRUE(root->is_string());
   EXPECT_EQ("hello world", root->GetString());
 }
 
-TEST(JSONReaderTest, EmptyString) {
+TEST_P(JSONReaderTest, EmptyString) {
   absl::optional<Value> root = JSONReader::Read("\"\"");
   ASSERT_TRUE(root);
   ASSERT_TRUE(root->is_string());
   EXPECT_EQ("", root->GetString());
 }
 
-TEST(JSONReaderTest, BasicStringEscapes) {
+TEST_P(JSONReaderTest, BasicStringEscapes) {
   absl::optional<Value> root =
       JSONReader::Read("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
   ASSERT_TRUE(root);
@@ -266,7 +314,7 @@
   EXPECT_EQ(" \"\\/\b\f\n\r\t\v", root->GetString());
 }
 
-TEST(JSONReaderTest, UnicodeEscapes) {
+TEST_P(JSONReaderTest, UnicodeEscapes) {
   // Test hex and unicode escapes including the null character.
   absl::optional<Value> root =
       JSONReader::Read("\"\\x41\\xFF\\x00\\u1234\\u0000\"");
@@ -283,7 +331,7 @@
   EXPECT_TRUE(JSONReader::Read("\"\\uD834\\uDD1E\""));  // U+1D11E
 }
 
-TEST(JSONReaderTest, InvalidStrings) {
+TEST_P(JSONReaderTest, InvalidStrings) {
   EXPECT_FALSE(JSONReader::Read("\"no closing quote"));
   EXPECT_FALSE(JSONReader::Read("\"\\z invalid escape char\""));
   EXPECT_FALSE(JSONReader::Read("\"\\xAQ invalid hex code\""));
@@ -292,7 +340,7 @@
   EXPECT_FALSE(JSONReader::Read("\"extra backslash at end of input\\\""));
 }
 
-TEST(JSONReaderTest, BasicArray) {
+TEST_P(JSONReaderTest, BasicArray) {
   absl::optional<Value> root = JSONReader::Read("[true, false, null]");
   ASSERT_TRUE(root);
   Value::List* list = root->GetIfList();
@@ -306,7 +354,7 @@
   EXPECT_EQ(*list, *root2);
 }
 
-TEST(JSONReaderTest, EmptyArray) {
+TEST_P(JSONReaderTest, EmptyArray) {
   absl::optional<Value> value = JSONReader::Read("[]");
   ASSERT_TRUE(value);
   Value::List* list = value->GetIfList();
@@ -314,7 +362,7 @@
   EXPECT_TRUE(list->empty());
 }
 
-TEST(JSONReaderTest, CompleteArray) {
+TEST_P(JSONReaderTest, CompleteArray) {
   absl::optional<Value> value = JSONReader::Read("[\"a\", 3, 4.56, null]");
   ASSERT_TRUE(value);
   Value::List* list = value->GetIfList();
@@ -322,7 +370,7 @@
   EXPECT_EQ(4U, list->size());
 }
 
-TEST(JSONReaderTest, NestedArrays) {
+TEST_P(JSONReaderTest, NestedArrays) {
   absl::optional<Value> value = JSONReader::Read(
       "[[true], [], {\"smell\": \"nice\",\"taste\": \"yummy\" }, [false, [], "
       "[null]], null]");
@@ -340,7 +388,7 @@
   EXPECT_EQ(*list, *root2);
 }
 
-TEST(JSONReaderTest, InvalidArrays) {
+TEST_P(JSONReaderTest, InvalidArrays) {
   // Missing close brace.
   EXPECT_FALSE(JSONReader::Read("[[true], [], [false, [], [null]], null"));
 
@@ -355,7 +403,7 @@
   EXPECT_FALSE(JSONReader::Read("[true,]"));
 }
 
-TEST(JSONReaderTest, ArrayTrailingComma) {
+TEST_P(JSONReaderTest, ArrayTrailingComma) {
   // Valid if we set |allow_trailing_comma| to true.
   absl::optional<Value> value =
       JSONReader::Read("[true,]", JSON_ALLOW_TRAILING_COMMAS);
@@ -368,7 +416,7 @@
   EXPECT_TRUE(value1.GetBool());
 }
 
-TEST(JSONReaderTest, ArrayTrailingCommaNoEmptyElements) {
+TEST_P(JSONReaderTest, ArrayTrailingCommaNoEmptyElements) {
   // Don't allow empty elements, even if |allow_trailing_comma| is
   // true.
   EXPECT_FALSE(JSONReader::Read("[,]", JSON_ALLOW_TRAILING_COMMAS));
@@ -377,13 +425,13 @@
   EXPECT_FALSE(JSONReader::Read("[true,,false]", JSON_ALLOW_TRAILING_COMMAS));
 }
 
-TEST(JSONReaderTest, EmptyDictionary) {
+TEST_P(JSONReaderTest, EmptyDictionary) {
   absl::optional<Value> dict_val = JSONReader::Read("{}");
   ASSERT_TRUE(dict_val);
   ASSERT_TRUE(dict_val->is_dict());
 }
 
-TEST(JSONReaderTest, CompleteDictionary) {
+TEST_P(JSONReaderTest, CompleteDictionary) {
   absl::optional<Value> root1 = JSONReader::Read(
       "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\", \"bool\": "
       "false, \"more\": {} }");
@@ -442,7 +490,7 @@
   EXPECT_EQ(*root1_dict, *root2_dict);
 }
 
-TEST(JSONReaderTest, NestedDictionaries) {
+TEST_P(JSONReaderTest, NestedDictionaries) {
   absl::optional<Value> root1 = JSONReader::Read(
       "{\"inner\":{\"array\":[true, 3, 4.56, null]},\"false\":false,\"d\":{}}");
   ASSERT_TRUE(root1);
@@ -467,7 +515,7 @@
   EXPECT_EQ(*root1_dict, *root2);
 }
 
-TEST(JSONReaderTest, DictionaryKeysWithPeriods) {
+TEST_P(JSONReaderTest, DictionaryKeysWithPeriods) {
   absl::optional<Value> root =
       JSONReader::Read("{\"a.b\":3,\"c\":2,\"d.e.f\":{\"g.h.i.j\":1}}");
   ASSERT_TRUE(root);
@@ -499,7 +547,7 @@
   EXPECT_EQ(1, *integer_value);
 }
 
-TEST(JSONReaderTest, DuplicateKeys) {
+TEST_P(JSONReaderTest, DuplicateKeys) {
   absl::optional<Value> root = JSONReader::Read("{\"x\":1,\"x\":2,\"y\":3}");
   ASSERT_TRUE(root);
   const Value::Dict* root_dict = root->GetIfDict();
@@ -510,7 +558,7 @@
   EXPECT_EQ(2, *integer_value);
 }
 
-TEST(JSONReaderTest, InvalidDictionaries) {
+TEST_P(JSONReaderTest, InvalidDictionaries) {
   // No closing brace.
   EXPECT_FALSE(JSONReader::Read("{\"a\": true"));
 
@@ -540,7 +588,7 @@
                                 JSON_ALLOW_TRAILING_COMMAS));
 }
 
-TEST(JSONReaderTest, StackOverflow) {
+TEST_P(JSONReaderTest, StackOverflow) {
   std::string evil(1000000, '[');
   evil.append(std::string(1000000, ']'));
   EXPECT_FALSE(JSONReader::Read(evil));
@@ -558,7 +606,7 @@
   EXPECT_EQ(5001U, list->size());
 }
 
-TEST(JSONReaderTest, UTF8Input) {
+TEST_P(JSONReaderTest, UTF8Input) {
   absl::optional<Value> root = JSONReader::Read("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
   ASSERT_TRUE(root);
   ASSERT_TRUE(root->is_string());
@@ -622,13 +670,13 @@
   }
 }
 
-TEST(JSONReaderTest, InvalidUTF8Input) {
+TEST_P(JSONReaderTest, InvalidUTF8Input) {
   EXPECT_FALSE(JSONReader::Read("\"345\xb0\xa1\xb0\xa2\""));
   EXPECT_FALSE(JSONReader::Read("\"123\xc0\x81\""));
   EXPECT_FALSE(JSONReader::Read("\"abc\xc0\xae\""));
 }
 
-TEST(JSONReaderTest, UTF16Escapes) {
+TEST_P(JSONReaderTest, UTF16Escapes) {
   absl::optional<Value> root = JSONReader::Read("\"\\u20ac3,14\"");
   ASSERT_TRUE(root);
   ASSERT_TRUE(root->is_string());
@@ -643,7 +691,7 @@
   EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", root->GetString());
 }
 
-TEST(JSONReaderTest, InvalidUTF16Escapes) {
+TEST_P(JSONReaderTest, InvalidUTF16Escapes) {
   const char* const cases[] = {
       "\"\\u123\"",          // Invalid scalar.
       "\"\\ud83d\"",         // Invalid scalar.
@@ -665,7 +713,7 @@
   }
 }
 
-TEST(JSONReaderTest, LiteralRoots) {
+TEST_P(JSONReaderTest, LiteralRoots) {
   absl::optional<Value> root = JSONReader::Read("null");
   ASSERT_TRUE(root);
   EXPECT_TRUE(root->is_none());
@@ -686,7 +734,7 @@
   EXPECT_EQ("root", root->GetString());
 }
 
-TEST(JSONReaderTest, ReadFromFile) {
+TEST_P(JSONReaderTest, ReadFromFile) {
   FilePath path;
   ASSERT_TRUE(PathService::Get(base::DIR_TEST_DATA, &path));
   path = path.AppendASCII("json");
@@ -702,7 +750,7 @@
 
 // Tests that the root of a JSON object can be deleted safely while its
 // children outlive it.
-TEST(JSONReaderTest, StringOptimizations) {
+TEST_P(JSONReaderTest, StringOptimizations) {
   Value dict_literal_0;
   Value dict_literal_1;
   Value dict_string_0;
@@ -778,7 +826,7 @@
 // A smattering of invalid JSON designed to test specific portions of the
 // parser implementation against buffer overflow. Best run with DCHECKs so
 // that the one in NextChar fires.
-TEST(JSONReaderTest, InvalidSanity) {
+TEST_P(JSONReaderTest, InvalidSanity) {
   const char* const kInvalidJson[] = {
       "/* test *", "{\"foo\"", "{\"foo\":", "  [", "\"\\u123g\"", "{\n\"eh:\n}",
   };
@@ -791,7 +839,7 @@
   }
 }
 
-TEST(JSONReaderTest, IllegalTrailingNull) {
+TEST_P(JSONReaderTest, IllegalTrailingNull) {
   const char json[] = {'"', 'n', 'u', 'l', 'l', '"', '\0'};
   std::string json_string(json, sizeof(json));
   auto root = JSONReader::ReadAndReturnValueWithError(json_string);
@@ -799,7 +847,7 @@
   EXPECT_NE("", root.error().message);
 }
 
-TEST(JSONReaderTest, ASCIIControlCodes) {
+TEST_P(JSONReaderTest, ASCIIControlCodes) {
   // A literal NUL byte or a literal new line, in a JSON string, should be
   // rejected. RFC 8259 section 7 says "the characters that MUST be escaped
   // [include]... the control characters (U+0000 through U+001F)".
@@ -813,13 +861,13 @@
   EXPECT_EQ(5u, root->GetString().length());
 }
 
-TEST(JSONReaderTest, MaxNesting) {
+TEST_P(JSONReaderTest, MaxNesting) {
   std::string json(R"({"outer": { "inner": {"foo": true}}})");
   EXPECT_FALSE(JSONReader::Read(json, JSON_PARSE_RFC, 3));
   EXPECT_TRUE(JSONReader::Read(json, JSON_PARSE_RFC, 4));
 }
 
-TEST(JSONReaderTest, Decode4ByteUtf8Char) {
+TEST_P(JSONReaderTest, Decode4ByteUtf8Char) {
   // kUtf8Data contains a 4 byte unicode character (a smiley!) that JSONReader
   // should be able to handle. The UTF-8 encoding of U+1F607 SMILING FACE WITH
   // HALO is "\xF0\x9F\x98\x87".
@@ -833,7 +881,7 @@
   EXPECT_EQ("\xF0\x9F\x98\x87", (*list)[0].GetString());
 }
 
-TEST(JSONReaderTest, DecodeUnicodeNonCharacter) {
+TEST_P(JSONReaderTest, DecodeUnicodeNonCharacter) {
   // Tests Unicode code points (encoded as escaped UTF-16) that are not valid
   // characters.
   EXPECT_TRUE(JSONReader::Read("[\"\\uFDD0\"]"));         // U+FDD0
@@ -875,13 +923,13 @@
   EXPECT_TRUE(JSONReader::Read("[\"\\uDBFF\\uDFFF\"]"));  // U+10FFFF
 }
 
-TEST(JSONReaderTest, DecodeNegativeEscapeSequence) {
+TEST_P(JSONReaderTest, DecodeNegativeEscapeSequence) {
   EXPECT_FALSE(JSONReader::Read("[\"\\x-A\"]"));
   EXPECT_FALSE(JSONReader::Read("[\"\\u-00A\"]"));
 }
 
 // Verifies invalid code points are replaced.
-TEST(JSONReaderTest, ReplaceInvalidCharacters) {
+TEST_P(JSONReaderTest, ReplaceInvalidCharacters) {
   // U+D800 is a lone high surrogate.
   const std::string invalid_high = "\"\xED\xA0\x80\"";
   absl::optional<Value> value =
@@ -900,7 +948,7 @@
   EXPECT_EQ("\xEF\xBF\xBD\xEF\xBF\xBD\xEF\xBF\xBD", value->GetString());
 }
 
-TEST(JSONReaderTest, ReplaceInvalidUTF16EscapeSequence) {
+TEST_P(JSONReaderTest, ReplaceInvalidUTF16EscapeSequence) {
   // U+D800 is a lone high surrogate.
   const std::string invalid_high = "\"_\\uD800_\"";
   absl::optional<Value> value =
@@ -917,7 +965,7 @@
   EXPECT_EQ("_\xEF\xBF\xBD_", value->GetString());
 }
 
-TEST(JSONReaderTest, ParseNumberErrors) {
+TEST_P(JSONReaderTest, ParseNumberErrors) {
   const struct {
     const char* input;
     bool parse_success;
@@ -955,7 +1003,7 @@
   }
 }
 
-TEST(JSONReaderTest, UnterminatedInputs) {
+TEST_P(JSONReaderTest, UnterminatedInputs) {
   const char* const kCases[] = {
       // clang-format off
       "/",
@@ -988,7 +1036,7 @@
   }
 }
 
-TEST(JSONReaderTest, LineColumnCounting) {
+TEST_P(JSONReaderTest, LineColumnCounting) {
   const struct {
     const char* input;
     int error_line;
@@ -1061,7 +1109,7 @@
   }
 }
 
-TEST(JSONReaderTest, ChromiumExtensions) {
+TEST_P(JSONReaderTest, ChromiumExtensions) {
   // All of these cases should parse with JSON_PARSE_CHROMIUM_EXTENSIONS but
   // fail with JSON_PARSE_RFC.
   const struct {
@@ -1104,4 +1152,19 @@
   }
 }
 
+TEST_P(JSONReaderTest, UsingRust) {
+  ASSERT_EQ(JSONReader::UsingRust(), using_rust_);
+}
+
+INSTANTIATE_TEST_SUITE_P(All,
+                         JSONReaderTest,
+#if BUILDFLAG(BUILD_RUST_JSON_READER)
+                         testing::Bool(),
+#else   // BUILDFLAG(BUILD_RUST_JSON_READER)
+                         testing::Values(false),
+#endif  // BUILDFLAG(BUILD_RUST_JSON_READER)
+                         [](const testing::TestParamInfo<bool>& info) {
+                           return info.param ? "Rust" : "Cpp";
+                         });
+
 }  // namespace base
diff --git a/base/libcpp_hardening_test.cc b/base/libcpp_hardening_test.cc
index fef2062..0b616b9 100644
--- a/base/libcpp_hardening_test.cc
+++ b/base/libcpp_hardening_test.cc
@@ -10,9 +10,11 @@
 
 namespace {
 
-#if !_LIBCPP_ENABLE_ASSERTIONS
+// TODO(thakis): Remove _LIBCPP_ENABLE_ASSERTIONS here once
+// pnacl-saigo's libc++ is new enough.
+#if !_LIBCPP_ENABLE_ASSERTIONS && !_LIBCPP_ENABLE_SAFE_MODE
 #error \
-    "Define _LIBCPP_ENABLE_ASSERTIONS to 1 in \
+    "Define _LIBCPP_ENABLE_SAFE_MODE to 1 in \
 buildtools/third_party/libc++/__config_site"
 
 #endif
diff --git a/base/linux_util.cc b/base/linux_util.cc
index dea7ca4..51fb324 100644
--- a/base/linux_util.cc
+++ b/base/linux_util.cc
@@ -88,6 +88,24 @@
 };
 #endif  // !BUILDFLAG(IS_CHROMEOS_ASH)
 
+bool GetThreadsFromProcessDir(const char* dir_path, std::vector<pid_t>* tids) {
+  DirReaderPosix dir_reader(dir_path);
+
+  if (!dir_reader.IsValid()) {
+    DLOG(WARNING) << "Cannot open " << dir_path;
+    return false;
+  }
+
+  while (dir_reader.Next()) {
+    pid_t tid;
+    if (StringToInt(dir_reader.name(), &tid)) {
+      tids->push_back(tid);
+    }
+  }
+
+  return true;
+}
+
 // Account for the terminating null character.
 constexpr int kDistroSize = 128 + 1;
 
@@ -138,20 +156,11 @@
   // 25 > strlen("/proc//task") + strlen(std::to_string(INT_MAX)) + 1 = 22
   char buf[25];
   strings::SafeSPrintf(buf, "/proc/%d/task", pid);
-  DirReaderPosix dir_reader(buf);
+  return GetThreadsFromProcessDir(buf, tids);
+}
 
-  if (!dir_reader.IsValid()) {
-    DLOG(WARNING) << "Cannot open " << buf;
-    return false;
-  }
-
-  while (dir_reader.Next()) {
-    pid_t tid;
-    if (StringToInt(dir_reader.name(), &tid))
-      tids->push_back(tid);
-  }
-
-  return true;
+bool GetThreadsForCurrentProcess(std::vector<pid_t>* tids) {
+  return GetThreadsFromProcessDir("/proc/self/task", tids);
 }
 
 pid_t FindThreadIDWithSyscall(pid_t pid, const std::string& expected_data,
diff --git a/base/linux_util.h b/base/linux_util.h
index b377283..9f913e8 100644
--- a/base/linux_util.h
+++ b/base/linux_util.h
@@ -37,6 +37,14 @@
 // true and appends the list of threads to |tids|. Otherwise, returns false.
 BASE_EXPORT bool GetThreadsForProcess(pid_t pid, std::vector<pid_t>* tids);
 
+// Get a list of all threads for the current process. On success, returns true
+// and appends the list of threads to |tids|. Otherwise, returns false.
+// Unlike the function above, this function reads /proc/self/tasks, not
+// /proc/<pid>/tasks. On Android, the former should always be accessible to
+// GPU and Browser processes, while the latter may or may not be accessible
+// depending on the system and the app configuration.
+BASE_EXPORT bool GetThreadsForCurrentProcess(std::vector<pid_t>* tids);
+
 // For a given process |pid|, look through all its threads and find the first
 // thread with /proc/[pid]/task/[thread_id]/syscall whose first N bytes matches
 // |expected_data|, where N is the length of |expected_data|.
diff --git a/base/logging.cc b/base/logging.cc
index 869cb05..8b13aa8 100644
--- a/base/logging.cc
+++ b/base/logging.cc
@@ -105,6 +105,7 @@
 
 #if BUILDFLAG(IS_ANDROID)
 #include <android/log.h>
+#include "base/android/jni_android.h"
 #endif
 
 #if BUILDFLAG(IS_CHROMEOS_ASH)
@@ -725,6 +726,13 @@
     base::debug::StackTrace stack_trace;
     stream_ << std::endl;  // Newline to separate from log message.
     stack_trace.OutputToStream(&stream_);
+#if BUILDFLAG(IS_ANDROID)
+    std::string java_stack = base::android::GetJavaStackTraceIfPresent();
+    if (!java_stack.empty()) {
+      stream_ << "Java stack (may interleave with native stack):\n";
+      stream_ << java_stack << '\n';
+    }
+#endif
     base::debug::TaskTrace task_trace;
     if (!task_trace.empty())
       task_trace.OutputToStream(&stream_);
diff --git a/base/mac/BUILD.gn b/base/mac/BUILD.gn
new file mode 100644
index 0000000..1f919da
--- /dev/null
+++ b/base/mac/BUILD.gn
@@ -0,0 +1,21 @@
+# Copyright 2023 The Chromium Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/mac/rules.gni")
+
+executable("launch_application_test_helper") {
+  configs += [ "//build/config/compiler:wexit_time_destructors" ]
+  sources = [ "test/launch_application_test_helper_main.m" ]
+
+  # This executable gets moved around at runtime; linking against default deps
+  # could result in libraries not being found in component builds. Fortunately
+  # this executable doesn't actually need anything from default deps.
+  no_default_deps = true
+
+  frameworks = [
+    "AppKit.framework",
+    "CoreFoundation.framework",
+    "Foundation.framework",
+  ]
+}
diff --git a/base/mac/authorization_util.mm b/base/mac/authorization_util.mm
index 7bc7262..e276960 100644
--- a/base/mac/authorization_util.mm
+++ b/base/mac/authorization_util.mm
@@ -11,9 +11,9 @@
 #include <string>
 
 #include "base/apple/bundle_locations.h"
+#include "base/apple/foundation_util.h"
+#include "base/apple/osstatus_logging.h"
 #include "base/logging.h"
-#include "base/mac/foundation_util.h"
-#include "base/mac/mac_logging.h"
 #include "base/mac/scoped_authorizationref.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/strings/string_number_conversions.h"
diff --git a/base/mac/bind_objc_block_unittest_arc.mm b/base/mac/bind_objc_block_unittest_arc.mm
deleted file mode 100644
index 3a93974..0000000
--- a/base/mac/bind_objc_block_unittest_arc.mm
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "base/functional/bind.h"
-#include "base/functional/callback.h"
-#include "base/functional/callback_helpers.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/gtest_mac.h"
-
-namespace {
-
-TEST(BindObjcBlockTestARC, TestScopedClosureRunnerExitScope) {
-  int run_count = 0;
-  int* ptr = &run_count;
-  {
-    base::ScopedClosureRunner runner(base::BindOnce(^{
-      (*ptr)++;
-    }));
-    EXPECT_EQ(0, run_count);
-  }
-  EXPECT_EQ(1, run_count);
-}
-
-TEST(BindObjcBlockTestARC, TestScopedClosureRunnerRelease) {
-  int run_count = 0;
-  int* ptr = &run_count;
-  base::OnceClosure c;
-  {
-    base::ScopedClosureRunner runner(base::BindOnce(^{
-      (*ptr)++;
-    }));
-    c = runner.Release();
-    EXPECT_EQ(0, run_count);
-  }
-  EXPECT_EQ(0, run_count);
-  std::move(c).Run();
-  EXPECT_EQ(1, run_count);
-}
-
-TEST(BindObjcBlockTestARC, TestReturnValue) {
-  const int kReturnValue = 42;
-  base::OnceCallback<int(void)> c = base::BindOnce(^{
-    return kReturnValue;
-  });
-  EXPECT_EQ(kReturnValue, std::move(c).Run());
-}
-
-TEST(BindObjcBlockTestARC, TestArgument) {
-  const int kArgument = 42;
-  base::OnceCallback<int(int)> c = base::BindOnce(^(int a) {
-    return a + 1;
-  });
-  EXPECT_EQ(kArgument + 1, std::move(c).Run(kArgument));
-}
-
-TEST(BindObjcBlockTestARC, TestTwoArguments) {
-  std::string result;
-  std::string* ptr = &result;
-  base::OnceCallback<void(const std::string&, const std::string&)> c =
-      base::BindOnce(^(const std::string& a, const std::string& b) {
-        *ptr = a + b;
-      });
-  std::move(c).Run("forty", "two");
-  EXPECT_EQ(result, "fortytwo");
-}
-
-TEST(BindObjcBlockTestARC, TestThreeArguments) {
-  std::string result;
-  std::string* ptr = &result;
-  base::OnceCallback<void(const std::string&, const std::string&,
-                          const std::string&)>
-      cb = base::BindOnce(
-          ^(const std::string& a, const std::string& b, const std::string& c) {
-            *ptr = a + b + c;
-          });
-  std::move(cb).Run("six", "times", "nine");
-  EXPECT_EQ(result, "sixtimesnine");
-}
-
-TEST(BindObjcBlockTestARC, TestSixArguments) {
-  std::string result1;
-  std::string* ptr = &result1;
-  int result2;
-  int* ptr2 = &result2;
-  base::OnceCallback<void(int, int, const std::string&, const std::string&, int,
-                          const std::string&)>
-      cb = base::BindOnce(^(int a, int b, const std::string& c,
-                            const std::string& d, int e, const std::string& f) {
-        *ptr = c + d + f;
-        *ptr2 = a + b + e;
-      });
-  std::move(cb).Run(1, 2, "infinite", "improbability", 3, "drive");
-  EXPECT_EQ(result1, "infiniteimprobabilitydrive");
-  EXPECT_EQ(result2, 6);
-}
-
-TEST(BindObjcBlockTestARC, TestBlockMoveable) {
-  base::OnceClosure c;
-  __block BOOL invoked_block = NO;
-  @autoreleasepool {
-    c = base::BindOnce(
-        ^(std::unique_ptr<BOOL> v) {
-          invoked_block = *v;
-        },
-        std::make_unique<BOOL>(YES));
-  }
-  std::move(c).Run();
-  EXPECT_TRUE(invoked_block);
-}
-
-// Tests that the bound block is retained until the end of its execution,
-// even if the callback itself is destroyed during the invocation. It was
-// found that some code depends on this behaviour (see crbug.com/845687).
-TEST(BindObjcBlockTestARC, TestBlockDeallocation) {
-  base::RepeatingClosure closure;
-  __block BOOL invoked_block = NO;
-  closure = base::BindRepeating(
-      ^(base::RepeatingClosure* this_closure) {
-        *this_closure = base::RepeatingClosure();
-        invoked_block = YES;
-      },
-      &closure);
-  closure.Run();
-  EXPECT_TRUE(invoked_block);
-}
-
-#if BUILDFLAG(IS_IOS)
-
-TEST(BindObjcBlockTestARC, TestBlockReleased) {
-  __weak NSObject* weak_nsobject;
-  @autoreleasepool {
-    NSObject* nsobject = [[NSObject alloc] init];
-    weak_nsobject = nsobject;
-
-    auto callback = base::BindOnce(^{
-      [nsobject description];
-    });
-  }
-  EXPECT_NSEQ(nil, weak_nsobject);
-}
-
-#endif
-
-}  // namespace
diff --git a/base/mac/call_with_eh_frame.cc b/base/mac/call_with_eh_frame.cc
deleted file mode 100644
index 483bbee..0000000
--- a/base/mac/call_with_eh_frame.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2015 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/call_with_eh_frame.h"
-
-#include <stdint.h>
-#include <unwind.h>
-
-#include "build/build_config.h"
-
-namespace base::mac {
-
-#if defined(__x86_64__) || defined(__aarch64__)
-extern "C" _Unwind_Reason_Code __gxx_personality_v0(int,
-                                                    _Unwind_Action,
-                                                    uint64_t,
-                                                    struct _Unwind_Exception*,
-                                                    struct _Unwind_Context*);
-
-_Unwind_Reason_Code CxxPersonalityRoutine(
-    int version,
-    _Unwind_Action actions,
-    uint64_t exception_class,
-    struct _Unwind_Exception* exception_object,
-    struct _Unwind_Context* context) {
-  // Unwinding is a two-phase process: phase one searches for an exception
-  // handler, and phase two performs cleanup. For phase one, this custom
-  // personality will terminate the search. For phase two, this should delegate
-  // back to the standard personality routine.
-
-  if ((actions & _UA_SEARCH_PHASE) != 0) {
-    // Tell libunwind that this is the end of the stack. When it encounters the
-    // CallWithEHFrame, it will stop searching for an exception handler. The
-    // result is that no exception handler has been found higher on the stack,
-    // and any that are lower on the stack (e.g. in CFRunLoopRunSpecific), will
-    // now be skipped. Since this is reporting the end of the stack, and no
-    // exception handler will have been found, std::terminate() will be called.
-    return _URC_END_OF_STACK;
-  }
-
-  return __gxx_personality_v0(version, actions, exception_class,
-                              exception_object, context);
-}
-#else  // !defined(__x86_64__) && !defined(__aarch64__)
-// No implementation exists, so just call the block directly.
-void CallWithEHFrame(void (^block)(void)) {
-  block();
-}
-#endif  // defined(__x86_64__) || defined(__aarch64__)
-}  // namespace base::mac
diff --git a/base/mac/call_with_eh_frame.h b/base/mac/call_with_eh_frame.h
deleted file mode 100644
index bab60dc..0000000
--- a/base/mac/call_with_eh_frame.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_CALL_WITH_EH_FRAME_H_
-#define BASE_MAC_CALL_WITH_EH_FRAME_H_
-
-#include "base/base_export.h"
-
-namespace base::mac {
-
-// Invokes the specified block in a stack frame with a special exception
-// handler. This function creates an exception handling stack frame that
-// specifies a custom C++ exception personality routine, which terminates the
-// search for an exception handler at this frame.
-//
-// The purpose of this function is to prevent a try/catch statement in system
-// libraries, acting as a global exception handler, from handling exceptions
-// in such a way that disrupts the generation of useful stack traces.
-void BASE_EXPORT CallWithEHFrame(void (^block)(void));
-
-}  // namespace base::mac
-
-#endif  // BASE_MAC_CALL_WITH_EH_FRAME_H_
diff --git a/base/mac/call_with_eh_frame_asm.S b/base/mac/call_with_eh_frame_asm.S
deleted file mode 100644
index 841a953..0000000
--- a/base/mac/call_with_eh_frame_asm.S
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2015 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if defined(__x86_64__) || defined(__aarch64__)
-
-// base::mac::CallWithEHFrame(void () block_pointer)
-#define CALL_WITH_EH_FRAME __ZN4base3mac15CallWithEHFrameEU13block_pointerFvvE
-
-  .section __TEXT,__text,regular,pure_instructions
-#if !defined(COMPONENT_BUILD)
-  .private_extern CALL_WITH_EH_FRAME
-#endif
-  .globl CALL_WITH_EH_FRAME
-  .p2align 2
-CALL_WITH_EH_FRAME:
-
-  .cfi_startproc
-
-  // Configure the C++ exception handler personality routine. Normally the
-  // compiler would emit ___gxx_personality_v0 here. The purpose of this
-  // function is to use a custom personality routine.
-  .cfi_personality 155, __ZN4base3mac21CxxPersonalityRoutineEi14_Unwind_ActionyP17_Unwind_ExceptionP15_Unwind_Context
-  .cfi_lsda 16, CallWithEHFrame_exception_table
-
-#if defined(__x86_64__)
-Lfunction_start:
-  pushq %rbp
-  .cfi_def_cfa_offset 16
-  .cfi_offset %rbp, -16
-  movq %rsp, %rbp
-  .cfi_def_cfa_register %rbp
-
-  // Load the function pointer from the block descriptor.
-  movq 16(%rdi), %rax
-
-  // Execute the block in the context of a C++ try{}.
-Ltry_start:
-  callq *%rax
-Ltry_end:
-  popq %rbp
-  ret
-
-  // Landing pad for the exception handler. This should never be called, since
-  // the personality routine will stop the search for an exception handler,
-  // which will cause the runtime to invoke the default terminate handler.
-Lcatch:
-  movq %rax, %rdi
-  callq ___cxa_begin_catch  // The ABI requires a call to the catch handler.
-  ud2  // In the event this is called, make it fatal.
-
-#elif defined(__aarch64__)
-Lfunction_start:
-  stp x29, x30, [sp, #-16]!
-  mov x29, sp
-  .cfi_def_cfa w29, 16
-  .cfi_offset w30, -8
-  .cfi_offset w29, -16
-
-  // Load the function pointer from the block descriptor.
-  ldr x8, [x0, #16]
-
-  // Execute the block in the context of a C++ try{}.
-Ltry_start:
-  blr x8
-Ltry_end:
-  ldp x29, x30, [sp], #16
-  ret
-
-  // Landing pad for the exception handler. This should never be called, since
-  // the personality routine will stop the search for an exception handler,
-  // which will cause the runtime to invoke the default terminate handler.
-Lcatch:
-  bl ___cxa_begin_catch  // The ABI requires a call to the catch handler.
-  brk #0x1  // In the event this is called, make it fatal.
-#endif
-
-Lfunction_end:
-  .cfi_endproc
-
-  // The C++ exception table that is used to identify this frame as an
-  // exception handler. See https://llvm.org/docs/ExceptionHandling.html,
-  // https://itanium-cxx-abi.github.io/cxx-abi/exceptions.pdf and
-  // https://www.airs.com/blog/archives/464.
-  .section __TEXT,__gcc_except_tab
-  .p2align 2
-CallWithEHFrame_exception_table:
-  .byte 255  // DW_EH_PE_omit
-  .byte 155  // DW_EH_PE_indirect | DW_EH_PE_pcrel | DW_EH_PE_sdata4
-  // The number of bytes in this table
-  .uleb128 Ltypes_table_base - Ltypes_table_ref_base
-
-Ltypes_table_ref_base:
-  .byte 1  // DW_EH_PE_uleb128
-  // Callsite table length.
-  .uleb128 Lcall_site_table_end - Lcall_site_table_start
-
-Lcall_site_table_start:
-// First callsite.
-CS1_begin = Ltry_start - Lfunction_start
-  .uleb128 CS1_begin
-CS1_end = Ltry_end - Ltry_start
-  .uleb128 CS1_end
-
-// First landing pad.
-LP1 = Lcatch - Lfunction_start
-  .uleb128 LP1
-  .uleb128 1  // Action record.
-
-// Second callsite.
-CS2_begin = Ltry_end - Lfunction_start
-  .uleb128 CS2_begin
-CS2_end = Lfunction_end - Ltry_end
-  .uleb128 CS2_end
-
-  // Second landing pad (none).
-  .uleb128 0
-  .uleb128 0  // No action.
-
-Lcall_site_table_end:
-  // Action table.
-  // Action record 1.
-  .uleb128 1  // Type filter -1.
-  .uleb128 0  // No further action to take.
-
-  // Types table.
-  .p2align 2
-  .long 0  // Type filter -1: no type filter for this catch(){} clause.
-
-Ltypes_table_base:
-  .p2align 2
-
-#endif  // defined(__x86_64__) || defined(__aarch64__)
diff --git a/base/mac/call_with_eh_frame_unittest.mm b/base/mac/call_with_eh_frame_unittest.mm
deleted file mode 100644
index 4c4f43e..0000000
--- a/base/mac/call_with_eh_frame_unittest.mm
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2015 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/call_with_eh_frame.h"
-
-#import <Foundation/Foundation.h>
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base::mac {
-namespace {
-
-class CallWithEHFrameTest : public testing::Test {
- protected:
-  void ThrowException() {
-    @throw [NSException exceptionWithName:@"TestException"
-                                   reason:@"Testing exceptions"
-                                 userInfo:nil];
-  }
-};
-
-// Catching from within the EHFrame is allowed.
-TEST_F(CallWithEHFrameTest, CatchExceptionHigher) {
-  bool __block saw_exception = false;
-  base::mac::CallWithEHFrame(^{
-    @try {
-      ThrowException();
-    } @catch (NSException* exception) {
-      saw_exception = true;
-    }
-  });
-  EXPECT_TRUE(saw_exception);
-}
-
-// Trying to catch an exception outside the EHFrame is blocked.
-TEST_F(CallWithEHFrameTest, CatchExceptionLower) {
-  auto catch_exception_lower = ^{
-    bool saw_exception = false;
-    @try {
-      base::mac::CallWithEHFrame(^{
-        ThrowException();
-      });
-    } @catch (NSException* exception) {
-      saw_exception = true;
-    }
-    ASSERT_FALSE(saw_exception);
-  };
-  EXPECT_DEATH(catch_exception_lower(), "");
-}
-
-}  // namespace
-}  // namespace base::mac
diff --git a/base/mac/dispatch_source_mach.cc b/base/mac/dispatch_source_mach.cc
deleted file mode 100644
index 12c1321..0000000
--- a/base/mac/dispatch_source_mach.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/dispatch_source_mach.h"
-
-#include "base/mac/scoped_dispatch_object.h"
-
-namespace base {
-
-struct DispatchSourceMach::Storage {
-  // The dispatch queue used to service the source_.
-  ScopedDispatchObject<dispatch_queue_t> queue;
-
-  // A MACH_RECV dispatch source.
-  ScopedDispatchObject<dispatch_source_t> source;
-
-  // Semaphore used to wait on the |source_|'s cancellation in the destructor.
-  ScopedDispatchObject<dispatch_semaphore_t> source_canceled;
-};
-
-DispatchSourceMach::DispatchSourceMach(const char* name,
-                                       mach_port_t port,
-                                       void (^event_handler)())
-    : DispatchSourceMach(dispatch_queue_create(name, DISPATCH_QUEUE_SERIAL),
-                         port,
-                         event_handler) {
-  // Since the queue was created above in the delegated constructor, and it was
-  // subsequently retained, release it here.
-  dispatch_release(storage_->queue);
-}
-
-DispatchSourceMach::DispatchSourceMach(dispatch_queue_t queue,
-                                       mach_port_t port,
-                                       void (^event_handler)())
-    : storage_(std::make_unique<Storage>()) {
-  storage_->queue.reset(queue, base::scoped_policy::RETAIN);
-  storage_->source.reset(dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV,
-                                                port, 0, storage_->queue));
-  storage_->source_canceled.reset(dispatch_semaphore_create(0));
-
-  dispatch_source_set_event_handler(storage_->source, event_handler);
-  dispatch_source_set_cancel_handler(storage_->source, ^{
-    dispatch_semaphore_signal(storage_->source_canceled);
-  });
-}
-
-DispatchSourceMach::~DispatchSourceMach() {
-  // Cancel the source and wait for the semaphore to be signaled. This will
-  // ensure the source managed by this class is not used after it is freed.
-  dispatch_source_cancel(storage_->source);
-  storage_->source.reset();
-
-  dispatch_semaphore_wait(storage_->source_canceled, DISPATCH_TIME_FOREVER);
-}
-
-void DispatchSourceMach::Resume() {
-  dispatch_resume(storage_->source);
-}
-
-dispatch_queue_t DispatchSourceMach::Queue() const {
-  return storage_->queue.get();
-}
-
-}  // namespace base
diff --git a/base/mac/dispatch_source_mach.h b/base/mac/dispatch_source_mach.h
deleted file mode 100644
index aafd635..0000000
--- a/base/mac/dispatch_source_mach.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_DISPATCH_SOURCE_MACH_H_
-#define BASE_MAC_DISPATCH_SOURCE_MACH_H_
-
-#include <dispatch/dispatch.h>
-
-#include <memory>
-
-#include "base/base_export.h"
-
-namespace base {
-
-// This class encapsulates a MACH_RECV dispatch source. When this object is
-// destroyed, the source will be cancelled and it will wait for the source
-// to stop executing work. The source can run on either a user-supplied queue,
-// or it can create its own for the source.
-class BASE_EXPORT DispatchSourceMach {
- public:
-  // Creates a new dispatch source for the |port| and schedules it on a new
-  // queue that will be created with |name|. When a Mach message is received,
-  // the |event_handler| will be called.
-  DispatchSourceMach(const char* name,
-                     mach_port_t port,
-                     void (^event_handler)());
-
-  // Creates a new dispatch source with the same semantics as above, but rather
-  // than creating a new queue, it schedules the source on |queue|.
-  DispatchSourceMach(dispatch_queue_t queue,
-                     mach_port_t port,
-                     void (^event_handler)());
-
-  DispatchSourceMach(const DispatchSourceMach&) = delete;
-  DispatchSourceMach& operator=(const DispatchSourceMach&) = delete;
-
-  // Cancels the source and waits for it to become fully cancelled before
-  // releasing the source.
-  ~DispatchSourceMach();
-
-  // Resumes the source. This must be called before any Mach messages will
-  // be received.
-  void Resume();
-
-  dispatch_queue_t Queue() const;
-
- private:
-  struct Storage;
-  std::unique_ptr<Storage> storage_;
-};
-
-}  // namespace base
-
-#endif  // BASE_MAC_DISPATCH_SOURCE_MACH_H_
diff --git a/base/mac/dispatch_source_mach_unittest.cc b/base/mac/dispatch_source_mach_unittest.cc
deleted file mode 100644
index d6f9771..0000000
--- a/base/mac/dispatch_source_mach_unittest.cc
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/dispatch_source_mach.h"
-
-#include <mach/mach.h>
-
-#include <memory>
-
-#include "base/logging.h"
-#include "base/mac/scoped_mach_port.h"
-#include "base/test/test_timeouts.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-class DispatchSourceMachTest : public testing::Test {
- public:
-  void SetUp() override {
-    mach_port_t port = MACH_PORT_NULL;
-    ASSERT_EQ(KERN_SUCCESS, mach_port_allocate(mach_task_self(),
-        MACH_PORT_RIGHT_RECEIVE, &port));
-    receive_right_.reset(port);
-
-    ASSERT_EQ(KERN_SUCCESS, mach_port_insert_right(mach_task_self(), port,
-        port, MACH_MSG_TYPE_MAKE_SEND));
-    send_right_.reset(port);
-  }
-
-  mach_port_t GetPort() { return receive_right_.get(); }
-
-  void WaitForSemaphore(dispatch_semaphore_t semaphore) {
-    dispatch_semaphore_wait(semaphore, dispatch_time(
-        DISPATCH_TIME_NOW,
-        TestTimeouts::action_timeout().InSeconds() * NSEC_PER_SEC));
-  }
-
- private:
-  base::mac::ScopedMachReceiveRight receive_right_;
-  base::mac::ScopedMachSendRight send_right_;
-};
-
-TEST_F(DispatchSourceMachTest, ReceiveAfterResume) {
-  dispatch_semaphore_t signal = dispatch_semaphore_create(0);
-  mach_port_t port = GetPort();
-
-  bool __block did_receive = false;
-  DispatchSourceMach source("org.chromium.base.test.ReceiveAfterResume",
-      port, ^{
-          mach_msg_empty_rcv_t msg = {{0}};
-          msg.header.msgh_size = sizeof(msg);
-          msg.header.msgh_local_port = port;
-          mach_msg_receive(&msg.header);
-          did_receive = true;
-
-          dispatch_semaphore_signal(signal);
-      });
-
-  mach_msg_empty_send_t msg = {{0}};
-  msg.header.msgh_size = sizeof(msg);
-  msg.header.msgh_remote_port = port;
-  msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND);
-  ASSERT_EQ(KERN_SUCCESS, mach_msg_send(&msg.header));
-
-  EXPECT_FALSE(did_receive);
-
-  source.Resume();
-
-  WaitForSemaphore(signal);
-  dispatch_release(signal);
-
-  EXPECT_TRUE(did_receive);
-}
-
-TEST_F(DispatchSourceMachTest, NoMessagesAfterDestruction) {
-  mach_port_t port = GetPort();
-
-  std::unique_ptr<int> count(new int(0));
-  int* __block count_ptr = count.get();
-
-  std::unique_ptr<DispatchSourceMach> source(new DispatchSourceMach(
-      "org.chromium.base.test.NoMessagesAfterDestruction", port, ^{
-        mach_msg_empty_rcv_t msg = {{0}};
-        msg.header.msgh_size = sizeof(msg);
-        msg.header.msgh_local_port = port;
-        mach_msg_receive(&msg.header);
-        LOG(INFO) << "Receieve " << *count_ptr;
-        ++(*count_ptr);
-      }));
-  source->Resume();
-
-  dispatch_queue_t queue =
-      dispatch_queue_create("org.chromium.base.test.MessageSend", NULL);
-  dispatch_semaphore_t signal = dispatch_semaphore_create(0);
-  for (int i = 0; i < 30; ++i) {
-    dispatch_async(queue, ^{
-        mach_msg_empty_send_t msg = {{0}};
-        msg.header.msgh_size = sizeof(msg);
-        msg.header.msgh_remote_port = port;
-        msg.header.msgh_bits =
-            MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND);
-        mach_msg_send(&msg.header);
-    });
-
-    // After sending five messages, shut down the source and taint the
-    // pointer the handler dereferences. The test will crash if |count_ptr|
-    // is being used after "free".
-    if (i == 5) {
-      std::unique_ptr<DispatchSourceMach>* source_ptr = &source;
-      dispatch_async(queue, ^{
-          source_ptr->reset();
-          count_ptr = reinterpret_cast<int*>(0xdeaddead);
-          dispatch_semaphore_signal(signal);
-      });
-    }
-  }
-
-  WaitForSemaphore(signal);
-  dispatch_release(signal);
-
-  dispatch_release(queue);
-}
-
-}  // namespace base
diff --git a/base/mac/foundation_util.h b/base/mac/foundation_util.h
deleted file mode 100644
index 79b34a0..0000000
--- a/base/mac/foundation_util.h
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_FOUNDATION_UTIL_H_
-#define BASE_MAC_FOUNDATION_UTIL_H_
-
-#include <AvailabilityMacros.h>
-#include <CoreFoundation/CoreFoundation.h>
-#include <CoreText/CoreText.h>
-#include <Security/Security.h>
-
-#include <string>
-
-#include "base/base_export.h"
-#include "base/logging.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "build/build_config.h"
-
-#if defined(__OBJC__)
-#import <Foundation/Foundation.h>
-@class NSFont;
-@class UIFont;
-#endif  // __OBJC__
-
-namespace base {
-class FilePath;
-}
-
-namespace base::mac {
-
-// Returns true if the application is running from a bundle
-BASE_EXPORT bool AmIBundled();
-BASE_EXPORT void SetOverrideAmIBundled(bool value);
-
-#if defined(UNIT_TEST)
-// This is required because instantiating some tests requires checking the
-// directory structure, which sets the AmIBundled cache state. Individual tests
-// may or may not be bundled, and this would trip them up if the cache weren't
-// cleared. This should not be called from individual tests, just from test
-// instantiation code that gets a path from PathService.
-BASE_EXPORT void ClearAmIBundledCache();
-#endif
-
-// Returns true if this process is marked as a "Background only process".
-BASE_EXPORT bool IsBackgroundOnlyProcess();
-
-// Returns the path to a resource within the framework bundle.
-BASE_EXPORT FilePath PathForFrameworkBundleResource(const char* resource_name);
-
-// Returns the creator code associated with the CFBundleRef at bundle.
-OSType CreatorCodeForCFBundleRef(CFBundleRef bundle);
-
-// Returns the creator code associated with this application, by calling
-// CreatorCodeForCFBundleRef for the application's main bundle.  If this
-// information cannot be determined, returns kUnknownType ('????').  This
-// does not respect the override app bundle because it's based on CFBundle
-// instead of NSBundle, and because callers probably don't want the override
-// app bundle's creator code anyway.
-BASE_EXPORT OSType CreatorCodeForApplication();
-
-#if defined(__OBJC__)
-
-// Searches for directories for the given key in only the given |domain_mask|.
-// If found, fills result (which must always be non-NULL) with the
-// first found directory and returns true.  Otherwise, returns false.
-BASE_EXPORT bool GetSearchPathDirectory(NSSearchPathDirectory directory,
-                                        NSSearchPathDomainMask domain_mask,
-                                        FilePath* result);
-
-// Searches for directories for the given key in only the local domain.
-// If found, fills result (which must always be non-NULL) with the
-// first found directory and returns true.  Otherwise, returns false.
-BASE_EXPORT bool GetLocalDirectory(NSSearchPathDirectory directory,
-                                   FilePath* result);
-
-// Searches for directories for the given key in only the user domain.
-// If found, fills result (which must always be non-NULL) with the
-// first found directory and returns true.  Otherwise, returns false.
-BASE_EXPORT bool GetUserDirectory(NSSearchPathDirectory directory,
-                                  FilePath* result);
-
-#endif  // __OBJC__
-
-// Returns the ~/Library directory.
-BASE_EXPORT FilePath GetUserLibraryPath();
-
-// Returns the ~/Documents directory.
-BASE_EXPORT FilePath GetUserDocumentPath();
-
-// Takes a path to an (executable) binary and tries to provide the path to an
-// application bundle containing it. It takes the outermost bundle that it can
-// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces "/Foo/Bar.app").
-//   |exec_name| - path to the binary
-//   returns - path to the application bundle, or empty on error
-BASE_EXPORT FilePath GetAppBundlePath(const FilePath& exec_name);
-
-// Takes a path to an (executable) binary and tries to provide the path to an
-// application bundle containing it. It takes the innermost bundle that it can
-// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces
-// "/Foo/Bar.app/.../Baz.app").
-//   |exec_name| - path to the binary
-//   returns - path to the application bundle, or empty on error
-BASE_EXPORT FilePath GetInnermostAppBundlePath(const FilePath& exec_name);
-
-#define TYPE_NAME_FOR_CF_TYPE_DECL(TypeCF) \
-  BASE_EXPORT std::string TypeNameForCFType(TypeCF##Ref)
-
-TYPE_NAME_FOR_CF_TYPE_DECL(CFArray);
-TYPE_NAME_FOR_CF_TYPE_DECL(CFBag);
-TYPE_NAME_FOR_CF_TYPE_DECL(CFBoolean);
-TYPE_NAME_FOR_CF_TYPE_DECL(CFData);
-TYPE_NAME_FOR_CF_TYPE_DECL(CFDate);
-TYPE_NAME_FOR_CF_TYPE_DECL(CFDictionary);
-TYPE_NAME_FOR_CF_TYPE_DECL(CFNull);
-TYPE_NAME_FOR_CF_TYPE_DECL(CFNumber);
-TYPE_NAME_FOR_CF_TYPE_DECL(CFSet);
-TYPE_NAME_FOR_CF_TYPE_DECL(CFString);
-TYPE_NAME_FOR_CF_TYPE_DECL(CFURL);
-TYPE_NAME_FOR_CF_TYPE_DECL(CFUUID);
-
-TYPE_NAME_FOR_CF_TYPE_DECL(CGColor);
-
-TYPE_NAME_FOR_CF_TYPE_DECL(CTFont);
-TYPE_NAME_FOR_CF_TYPE_DECL(CTRun);
-
-TYPE_NAME_FOR_CF_TYPE_DECL(SecAccessControl);
-TYPE_NAME_FOR_CF_TYPE_DECL(SecCertificate);
-TYPE_NAME_FOR_CF_TYPE_DECL(SecKey);
-TYPE_NAME_FOR_CF_TYPE_DECL(SecPolicy);
-
-#undef TYPE_NAME_FOR_CF_TYPE_DECL
-
-// Returns the base bundle ID, which can be set by SetBaseBundleID but
-// defaults to a reasonable string. This never returns NULL. BaseBundleID
-// returns a pointer to static storage that must not be freed.
-BASE_EXPORT const char* BaseBundleID();
-
-// Sets the base bundle ID to override the default. The implementation will
-// make its own copy of new_base_bundle_id.
-BASE_EXPORT void SetBaseBundleID(const char* new_base_bundle_id);
-
-// CFCast<>() and CFCastStrict<>() cast a basic CFTypeRef to a more
-// specific CoreFoundation type. The compatibility of the passed
-// object is found by comparing its opaque type against the
-// requested type identifier. If the supplied object is not
-// compatible with the requested return type, CFCast<>() returns
-// NULL and CFCastStrict<>() will DCHECK. Providing a NULL pointer
-// to either variant results in NULL being returned without
-// triggering any DCHECK.
-//
-// Example usage:
-// CFNumberRef some_number = base::mac::CFCast<CFNumberRef>(
-//     CFArrayGetValueAtIndex(array, index));
-//
-// CFTypeRef hello = CFSTR("hello world");
-// CFStringRef some_string = base::mac::CFCastStrict<CFStringRef>(hello);
-
-template<typename T>
-T CFCast(const CFTypeRef& cf_val);
-
-template<typename T>
-T CFCastStrict(const CFTypeRef& cf_val);
-
-#define CF_CAST_DECL(TypeCF)                                            \
-  template <>                                                           \
-  BASE_EXPORT TypeCF##Ref CFCast<TypeCF##Ref>(const CFTypeRef& cf_val); \
-                                                                        \
-  template <>                                                           \
-  BASE_EXPORT TypeCF##Ref CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val)
-
-CF_CAST_DECL(CFArray);
-CF_CAST_DECL(CFBag);
-CF_CAST_DECL(CFBoolean);
-CF_CAST_DECL(CFData);
-CF_CAST_DECL(CFDate);
-CF_CAST_DECL(CFDictionary);
-CF_CAST_DECL(CFNull);
-CF_CAST_DECL(CFNumber);
-CF_CAST_DECL(CFSet);
-CF_CAST_DECL(CFString);
-CF_CAST_DECL(CFURL);
-CF_CAST_DECL(CFUUID);
-
-CF_CAST_DECL(CGColor);
-
-CF_CAST_DECL(CTFont);
-CF_CAST_DECL(CTFontDescriptor);
-CF_CAST_DECL(CTRun);
-
-CF_CAST_DECL(SecAccessControl);
-CF_CAST_DECL(SecCertificate);
-CF_CAST_DECL(SecKey);
-CF_CAST_DECL(SecPolicy);
-
-#undef CF_CAST_DECL
-
-#if defined(__OBJC__)
-
-// ObjCCast<>() and ObjCCastStrict<>() cast a basic id to a more
-// specific (NSObject-derived) type. The compatibility of the passed
-// object is found by checking if it's a kind of the requested type
-// identifier. If the supplied object is not compatible with the
-// requested return type, ObjCCast<>() returns nil and
-// ObjCCastStrict<>() will DCHECK. Providing a nil pointer to either
-// variant results in nil being returned without triggering any DCHECK.
-//
-// The strict variant is useful when retrieving a value from a
-// collection which only has values of a specific type, e.g. an
-// NSArray of NSStrings. The non-strict variant is useful when
-// retrieving values from data that you can't fully control. For
-// example, a plist read from disk may be beyond your exclusive
-// control, so you'd only want to check that the values you retrieve
-// from it are of the expected types, but not crash if they're not.
-//
-// Example usage:
-// NSString* version = base::mac::ObjCCast<NSString>(
-//     [bundle objectForInfoDictionaryKey:@"CFBundleShortVersionString"]);
-//
-// NSString* str = base::mac::ObjCCastStrict<NSString>(
-//     [ns_arr_of_ns_strs objectAtIndex:0]);
-template<typename T>
-T* ObjCCast(id objc_val) {
-  if ([objc_val isKindOfClass:[T class]]) {
-    return reinterpret_cast<T*>(objc_val);
-  }
-  return nil;
-}
-
-template<typename T>
-T* ObjCCastStrict(id objc_val) {
-  T* rv = ObjCCast<T>(objc_val);
-  DCHECK(objc_val == nil || rv);
-  return rv;
-}
-
-#endif  // defined(__OBJC__)
-
-// Helper function for GetValueFromDictionary to create the error message
-// that appears when a type mismatch is encountered.
-BASE_EXPORT std::string GetValueFromDictionaryErrorMessage(
-    CFStringRef key, const std::string& expected_type, CFTypeRef value);
-
-// Utility function to pull out a value from a dictionary, check its type, and
-// return it. Returns NULL if the key is not present or of the wrong type.
-template<typename T>
-T GetValueFromDictionary(CFDictionaryRef dict, CFStringRef key) {
-  CFTypeRef value = CFDictionaryGetValue(dict, key);
-  T value_specific = CFCast<T>(value);
-
-  if (value && !value_specific) {
-    std::string expected_type = TypeNameForCFType(value_specific);
-    DLOG(WARNING) << GetValueFromDictionaryErrorMessage(key,
-                                                        expected_type,
-                                                        value);
-  }
-
-  return value_specific;
-}
-
-#if defined(__OBJC__)
-
-// Converts |path| to an autoreleased NSURL. Returns nil if |path| is empty.
-BASE_EXPORT NSURL* FilePathToNSURL(const FilePath& path);
-
-// Converts |path| to an autoreleased NSString. Returns nil if |path| is empty.
-BASE_EXPORT NSString* FilePathToNSString(const FilePath& path);
-
-// Converts |str| to a FilePath. Returns an empty path if |str| is nil.
-BASE_EXPORT FilePath NSStringToFilePath(NSString* str);
-
-// Converts |url| to a FilePath. Returns an empty path if |url| is nil or if
-// |url| is not of scheme "file".
-BASE_EXPORT FilePath NSURLToFilePath(NSURL* url);
-
-#endif  // __OBJC__
-
-// Converts a non-null |path| to a CFURLRef. |path| must not be empty.
-//
-// This function only uses manually-owned resources, so it does not depend on an
-// NSAutoreleasePool being set up on the current thread.
-BASE_EXPORT ScopedCFTypeRef<CFURLRef> FilePathToCFURL(const FilePath& path);
-
-#if defined(__OBJC__)
-// Converts |range| to an NSRange, returning the new range in |range_out|.
-// Returns true if conversion was successful, false if the values of |range|
-// could not be converted to NSUIntegers.
-[[nodiscard]] BASE_EXPORT bool CFRangeToNSRange(CFRange range,
-                                                NSRange* range_out);
-#endif  // defined(__OBJC__)
-
-}  // namespace base::mac
-
-// Stream operations for CFTypes. They can be used with Objective-C types as
-// well by using the casting methods in base/apple/bridging.h.
-//
-// For example: LOG(INFO) << base::apple::NSToCFPtrCast(@"foo");
-//
-// operator<<() can not be overloaded for Objective-C types as the compiler
-// cannot distinguish between overloads for id with overloads for void*.
-BASE_EXPORT extern std::ostream& operator<<(std::ostream& o,
-                                            const CFErrorRef err);
-BASE_EXPORT extern std::ostream& operator<<(std::ostream& o,
-                                            const CFStringRef str);
-BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, CFRange);
-
-#if defined(__OBJC__)
-BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, id);
-BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, NSRange);
-BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, SEL);
-
-#if BUILDFLAG(IS_MAC)
-BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, NSPoint);
-BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, NSRect);
-BASE_EXPORT extern std::ostream& operator<<(std::ostream& o, NSSize);
-#endif  // IS_MAC
-
-#endif  // __OBJC__
-
-#endif  // BASE_MAC_FOUNDATION_UTIL_H_
diff --git a/base/mac/foundation_util.mm b/base/mac/foundation_util.mm
deleted file mode 100644
index 149cdea..0000000
--- a/base/mac/foundation_util.mm
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/foundation_util.h"
-
-#include <stddef.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <vector>
-
-#include "base/apple/bundle_locations.h"
-#include "base/containers/adapters.h"
-#include "base/files/file_path.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "base/notreached.h"
-#include "base/numerics/checked_math.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/ranges/algorithm.h"
-#include "base/strings/string_util.h"
-#include "base/strings/sys_string_conversions.h"
-#include "build/branding_buildflags.h"
-#include "build/build_config.h"
-
-#if !BUILDFLAG(IS_IOS)
-#import <AppKit/AppKit.h>
-#endif
-
-extern "C" {
-CFTypeID SecKeyGetTypeID();
-}  // extern "C"
-
-namespace base::mac {
-
-namespace {
-
-bool g_cached_am_i_bundled_called = false;
-bool g_cached_am_i_bundled_value = false;
-bool g_override_am_i_bundled = false;
-bool g_override_am_i_bundled_value = false;
-
-bool UncachedAmIBundled() {
-#if BUILDFLAG(IS_IOS)
-  // All apps are bundled on iOS.
-  return true;
-#else
-  if (g_override_am_i_bundled)
-    return g_override_am_i_bundled_value;
-
-  // Yes, this is cheap.
-  return [apple::OuterBundle().bundlePath hasSuffix:@".app"];
-#endif
-}
-
-}  // namespace
-
-bool AmIBundled() {
-  // If the return value is not cached, this function will return different
-  // values depending on when it's called. This confuses some client code, see
-  // http://crbug.com/63183 .
-  if (!g_cached_am_i_bundled_called) {
-    g_cached_am_i_bundled_called = true;
-    g_cached_am_i_bundled_value = UncachedAmIBundled();
-  }
-  DCHECK_EQ(g_cached_am_i_bundled_value, UncachedAmIBundled())
-      << "The return value of AmIBundled() changed. This will confuse tests. "
-      << "Call SetAmIBundled() override manually if your test binary "
-      << "delay-loads the framework.";
-  return g_cached_am_i_bundled_value;
-}
-
-void SetOverrideAmIBundled(bool value) {
-#if BUILDFLAG(IS_IOS)
-  // It doesn't make sense not to be bundled on iOS.
-  if (!value)
-    NOTREACHED();
-#endif
-  g_override_am_i_bundled = true;
-  g_override_am_i_bundled_value = value;
-}
-
-BASE_EXPORT void ClearAmIBundledCache() {
-  g_cached_am_i_bundled_called = false;
-}
-
-bool IsBackgroundOnlyProcess() {
-  // This function really does want to examine NSBundle's idea of the main
-  // bundle dictionary.  It needs to look at the actual running .app's
-  // Info.plist to access its LSUIElement property.
-  @autoreleasepool {
-    NSDictionary* info_dictionary = [apple::MainBundle() infoDictionary];
-    return [info_dictionary[@"LSUIElement"] boolValue] != NO;
-  }
-}
-
-FilePath PathForFrameworkBundleResource(const char* resource_name) {
-  NSBundle* bundle = apple::FrameworkBundle();
-  NSURL* resource_url = [bundle URLForResource:@(resource_name)
-                                 withExtension:nil];
-  return NSURLToFilePath(resource_url);
-}
-
-OSType CreatorCodeForCFBundleRef(CFBundleRef bundle) {
-  OSType creator = kUnknownType;
-  CFBundleGetPackageInfo(bundle, /*packageType=*/nullptr, &creator);
-  return creator;
-}
-
-OSType CreatorCodeForApplication() {
-  CFBundleRef bundle = CFBundleGetMainBundle();
-  if (!bundle)
-    return kUnknownType;
-
-  return CreatorCodeForCFBundleRef(bundle);
-}
-
-bool GetSearchPathDirectory(NSSearchPathDirectory directory,
-                            NSSearchPathDomainMask domain_mask,
-                            FilePath* result) {
-  DCHECK(result);
-  NSArray<NSString*>* dirs =
-      NSSearchPathForDirectoriesInDomains(directory, domain_mask, YES);
-  if (dirs.count < 1) {
-    return false;
-  }
-  *result = NSStringToFilePath(dirs[0]);
-  return true;
-}
-
-bool GetLocalDirectory(NSSearchPathDirectory directory, FilePath* result) {
-  return GetSearchPathDirectory(directory, NSLocalDomainMask, result);
-}
-
-bool GetUserDirectory(NSSearchPathDirectory directory, FilePath* result) {
-  return GetSearchPathDirectory(directory, NSUserDomainMask, result);
-}
-
-FilePath GetUserLibraryPath() {
-  FilePath user_library_path;
-  if (!GetUserDirectory(NSLibraryDirectory, &user_library_path)) {
-    DLOG(WARNING) << "Could not get user library path";
-  }
-  return user_library_path;
-}
-
-FilePath GetUserDocumentPath() {
-  FilePath user_document_path;
-  if (!GetUserDirectory(NSDocumentDirectory, &user_document_path)) {
-    DLOG(WARNING) << "Could not get user document path";
-  }
-  return user_document_path;
-}
-
-// Takes a path to an (executable) binary and tries to provide the path to an
-// application bundle containing it. It takes the outermost bundle that it can
-// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces "/Foo/Bar.app").
-//   |exec_name| - path to the binary
-//   returns - path to the application bundle, or empty on error
-FilePath GetAppBundlePath(const FilePath& exec_name) {
-  const char kExt[] = ".app";
-  const size_t kExtLength = std::size(kExt) - 1;
-
-  // Split the path into components.
-  std::vector<std::string> components = exec_name.GetComponents();
-
-  // It's an error if we don't get any components.
-  if (components.empty())
-    return FilePath();
-
-  // Don't prepend '/' to the first component.
-  std::vector<std::string>::const_iterator it = components.begin();
-  std::string bundle_name = *it;
-  DCHECK_GT(it->length(), 0U);
-  // If the first component ends in ".app", we're already done.
-  if (it->length() > kExtLength &&
-      !it->compare(it->length() - kExtLength, kExtLength, kExt, kExtLength))
-    return FilePath(bundle_name);
-
-  // The first component may be "/" or "//", etc. Only append '/' if it doesn't
-  // already end in '/'.
-  if (bundle_name.back() != '/')
-    bundle_name += '/';
-
-  // Go through the remaining components.
-  for (++it; it != components.end(); ++it) {
-    DCHECK_GT(it->length(), 0U);
-
-    bundle_name += *it;
-
-    // If the current component ends in ".app", we're done.
-    if (it->length() > kExtLength &&
-        !it->compare(it->length() - kExtLength, kExtLength, kExt, kExtLength))
-      return FilePath(bundle_name);
-
-    // Separate this component from the next one.
-    bundle_name += '/';
-  }
-
-  return FilePath();
-}
-
-// Takes a path to an (executable) binary and tries to provide the path to an
-// application bundle containing it. It takes the innermost bundle that it can
-// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces
-// "/Foo/Bar.app/.../Baz.app").
-//   |exec_name| - path to the binary
-//   returns - path to the application bundle, or empty on error
-FilePath GetInnermostAppBundlePath(const FilePath& exec_name) {
-  static constexpr char kExt[] = ".app";
-  static constexpr size_t kExtLength = std::size(kExt) - 1;
-
-  // Split the path into components.
-  std::vector<std::string> components = exec_name.GetComponents();
-
-  // It's an error if we don't get any components.
-  if (components.empty()) {
-    return FilePath();
-  }
-
-  auto app = ranges::find_if(
-      Reversed(components), [](const std::string& component) -> bool {
-        return component.size() > kExtLength && EndsWith(component, kExt);
-      });
-
-  if (app == components.rend()) {
-    return FilePath();
-  }
-
-  // Remove all path components after the final ".app" extension.
-  components.erase(app.base(), components.end());
-
-  std::string bundle_path;
-  for (const std::string& component : components) {
-    // Don't prepend a slash if this is the first component or if the
-    // previous component ended with a slash, which can happen when dealing
-    // with an absolute path.
-    if (!bundle_path.empty() && bundle_path.back() != '/') {
-      bundle_path += '/';
-    }
-
-    bundle_path += component;
-  }
-
-  return FilePath(bundle_path);
-}
-
-#define TYPE_NAME_FOR_CF_TYPE_DEFN(TypeCF) \
-std::string TypeNameForCFType(TypeCF##Ref) { \
-  return #TypeCF; \
-}
-
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFArray)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFBag)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFBoolean)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFData)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFDate)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFDictionary)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFNull)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFNumber)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFSet)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFString)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFURL)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CFUUID)
-
-TYPE_NAME_FOR_CF_TYPE_DEFN(CGColor)
-
-TYPE_NAME_FOR_CF_TYPE_DEFN(CTFont)
-TYPE_NAME_FOR_CF_TYPE_DEFN(CTRun)
-
-#if !BUILDFLAG(IS_IOS)
-TYPE_NAME_FOR_CF_TYPE_DEFN(SecAccessControl)
-TYPE_NAME_FOR_CF_TYPE_DEFN(SecCertificate)
-TYPE_NAME_FOR_CF_TYPE_DEFN(SecKey)
-TYPE_NAME_FOR_CF_TYPE_DEFN(SecPolicy)
-#endif
-
-#undef TYPE_NAME_FOR_CF_TYPE_DEFN
-
-static const char* base_bundle_id;
-
-const char* BaseBundleID() {
-  if (base_bundle_id) {
-    return base_bundle_id;
-  }
-
-#if BUILDFLAG(GOOGLE_CHROME_BRANDING)
-  return "com.google.Chrome";
-#else
-  return "org.chromium.Chromium";
-#endif
-}
-
-void SetBaseBundleID(const char* new_base_bundle_id) {
-  if (new_base_bundle_id != base_bundle_id) {
-    free((void*)base_bundle_id);
-    base_bundle_id = new_base_bundle_id ? strdup(new_base_bundle_id) : nullptr;
-  }
-}
-
-#define CF_CAST_DEFN(TypeCF) \
-template<> TypeCF##Ref \
-CFCast<TypeCF##Ref>(const CFTypeRef& cf_val) { \
-  if (cf_val == NULL) { \
-    return NULL; \
-  } \
-  if (CFGetTypeID(cf_val) == TypeCF##GetTypeID()) { \
-    return (TypeCF##Ref)(cf_val); \
-  } \
-  return NULL; \
-} \
-\
-template<> TypeCF##Ref \
-CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val) { \
-  TypeCF##Ref rv = CFCast<TypeCF##Ref>(cf_val); \
-  DCHECK(cf_val == NULL || rv); \
-  return rv; \
-}
-
-CF_CAST_DEFN(CFArray)
-CF_CAST_DEFN(CFBag)
-CF_CAST_DEFN(CFBoolean)
-CF_CAST_DEFN(CFData)
-CF_CAST_DEFN(CFDate)
-CF_CAST_DEFN(CFDictionary)
-CF_CAST_DEFN(CFNull)
-CF_CAST_DEFN(CFNumber)
-CF_CAST_DEFN(CFSet)
-CF_CAST_DEFN(CFString)
-CF_CAST_DEFN(CFURL)
-CF_CAST_DEFN(CFUUID)
-
-CF_CAST_DEFN(CGColor)
-
-CF_CAST_DEFN(CTFont)
-CF_CAST_DEFN(CTFontDescriptor)
-CF_CAST_DEFN(CTRun)
-
-CF_CAST_DEFN(SecCertificate)
-
-#if !BUILDFLAG(IS_IOS)
-CF_CAST_DEFN(SecAccessControl)
-CF_CAST_DEFN(SecKey)
-CF_CAST_DEFN(SecPolicy)
-#endif
-
-#undef CF_CAST_DEFN
-
-std::string GetValueFromDictionaryErrorMessage(
-    CFStringRef key, const std::string& expected_type, CFTypeRef value) {
-  ScopedCFTypeRef<CFStringRef> actual_type_ref(
-      CFCopyTypeIDDescription(CFGetTypeID(value)));
-  return "Expected value for key " + SysCFStringRefToUTF8(key) + " to be " +
-         expected_type + " but it was " +
-         SysCFStringRefToUTF8(actual_type_ref) + " instead";
-}
-
-NSURL* FilePathToNSURL(const FilePath& path) {
-  if (NSString* path_string = FilePathToNSString(path))
-    return [NSURL fileURLWithPath:path_string];
-  return nil;
-}
-
-NSString* FilePathToNSString(const FilePath& path) {
-  if (path.empty())
-    return nil;
-  return @(path.value().c_str());  // @() does UTF8 conversion.
-}
-
-FilePath NSStringToFilePath(NSString* str) {
-  if (!str.length) {
-    return FilePath();
-  }
-  return FilePath(str.fileSystemRepresentation);
-}
-
-FilePath NSURLToFilePath(NSURL* url) {
-  if (!url.fileURL) {
-    return FilePath();
-  }
-  return NSStringToFilePath(url.path);
-}
-
-ScopedCFTypeRef<CFURLRef> FilePathToCFURL(const FilePath& path) {
-  DCHECK(!path.empty());
-
-  // The function's docs promise that it does not require an NSAutoreleasePool.
-  // A straightforward way to accomplish this is to use *Create* functions,
-  // combined with ScopedCFTypeRef.
-  const std::string& path_string = path.value();
-  ScopedCFTypeRef<CFStringRef> path_cfstring(CFStringCreateWithBytes(
-      kCFAllocatorDefault, reinterpret_cast<const UInt8*>(path_string.data()),
-      checked_cast<CFIndex>(path_string.length()), kCFStringEncodingUTF8,
-      /*isExternalRepresentation=*/FALSE));
-  if (!path_cfstring)
-    return ScopedCFTypeRef<CFURLRef>();
-
-  return ScopedCFTypeRef<CFURLRef>(CFURLCreateWithFileSystemPath(
-      kCFAllocatorDefault, path_cfstring, kCFURLPOSIXPathStyle,
-      /*isDirectory=*/FALSE));
-}
-
-bool CFRangeToNSRange(CFRange range, NSRange* range_out) {
-  NSUInteger end;
-  if (IsValueInRangeForNumericType<NSUInteger>(range.location) &&
-      IsValueInRangeForNumericType<NSUInteger>(range.length) &&
-      CheckAdd(range.location, range.length).AssignIfValid(&end) &&
-      IsValueInRangeForNumericType<NSUInteger>(end)) {
-    *range_out = NSMakeRange(static_cast<NSUInteger>(range.location),
-                             static_cast<NSUInteger>(range.length));
-    return true;
-  }
-  return false;
-}
-
-}  // namespace base::mac
-
-std::ostream& operator<<(std::ostream& o, const CFStringRef string) {
-  return o << base::SysCFStringRefToUTF8(string);
-}
-
-std::ostream& operator<<(std::ostream& o, const CFErrorRef err) {
-  base::ScopedCFTypeRef<CFStringRef> desc(CFErrorCopyDescription(err));
-  base::ScopedCFTypeRef<CFDictionaryRef> user_info(CFErrorCopyUserInfo(err));
-  CFStringRef errorDesc = nullptr;
-  if (user_info.get()) {
-    errorDesc = reinterpret_cast<CFStringRef>(
-        CFDictionaryGetValue(user_info.get(), kCFErrorDescriptionKey));
-  }
-  o << "Code: " << CFErrorGetCode(err)
-    << " Domain: " << CFErrorGetDomain(err)
-    << " Desc: " << desc.get();
-  if(errorDesc) {
-    o << "(" << errorDesc << ")";
-  }
-  return o;
-}
-
-std::ostream& operator<<(std::ostream& o, CFRange range) {
-  return o << NSStringFromRange(
-             NSMakeRange(static_cast<NSUInteger>(range.location),
-                         static_cast<NSUInteger>(range.length)));
-}
-
-std::ostream& operator<<(std::ostream& o, id obj) {
-  return obj ? o << [obj description].UTF8String : o << "(nil)";
-}
-
-std::ostream& operator<<(std::ostream& o, NSRange range) {
-  return o << NSStringFromRange(range);
-}
-
-std::ostream& operator<<(std::ostream& o, SEL selector) {
-  return o << NSStringFromSelector(selector);
-}
-
-#if !BUILDFLAG(IS_IOS)
-std::ostream& operator<<(std::ostream& o, NSPoint point) {
-  return o << NSStringFromPoint(point);
-}
-std::ostream& operator<<(std::ostream& o, NSRect rect) {
-  return o << NSStringFromRect(rect);
-}
-std::ostream& operator<<(std::ostream& o, NSSize size) {
-  return o << NSStringFromSize(size);
-}
-#endif
diff --git a/base/mac/foundation_util_unittest.mm b/base/mac/foundation_util_unittest.mm
deleted file mode 100644
index 9bc63e8..0000000
--- a/base/mac/foundation_util_unittest.mm
+++ /dev/null
@@ -1,421 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/foundation_util.h"
-
-#include <CoreFoundation/CoreFoundation.h>
-#include <Foundation/Foundation.h>
-#include <limits.h>
-#include <stddef.h>
-
-#include "base/files/file_path.h"
-#include "base/format_macros.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "base/strings/stringprintf.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#import "testing/gtest_mac.h"
-
-namespace base::mac {
-
-TEST(FoundationUtilTest, CFCast) {
-  // Build out the CF types to be tested as empty containers.
-  ScopedCFTypeRef<CFTypeRef> test_array(
-      CFArrayCreate(nullptr, nullptr, 0, &kCFTypeArrayCallBacks));
-  ScopedCFTypeRef<CFTypeRef> test_array_mutable(
-      CFArrayCreateMutable(nullptr, 0, &kCFTypeArrayCallBacks));
-  ScopedCFTypeRef<CFTypeRef> test_bag(
-      CFBagCreate(nullptr, nullptr, 0, &kCFTypeBagCallBacks));
-  ScopedCFTypeRef<CFTypeRef> test_bag_mutable(
-      CFBagCreateMutable(nullptr, 0, &kCFTypeBagCallBacks));
-  CFTypeRef test_bool = kCFBooleanTrue;
-  ScopedCFTypeRef<CFTypeRef> test_data(CFDataCreate(nullptr, nullptr, 0));
-  ScopedCFTypeRef<CFTypeRef> test_data_mutable(CFDataCreateMutable(nullptr, 0));
-  ScopedCFTypeRef<CFTypeRef> test_date(CFDateCreate(nullptr, 0));
-  ScopedCFTypeRef<CFTypeRef> test_dict(CFDictionaryCreate(
-      nullptr, nullptr, nullptr, 0, &kCFCopyStringDictionaryKeyCallBacks,
-      &kCFTypeDictionaryValueCallBacks));
-  ScopedCFTypeRef<CFTypeRef> test_dict_mutable(CFDictionaryCreateMutable(
-      nullptr, 0, &kCFCopyStringDictionaryKeyCallBacks,
-      &kCFTypeDictionaryValueCallBacks));
-  int int_val = 256;
-  ScopedCFTypeRef<CFTypeRef> test_number(
-      CFNumberCreate(nullptr, kCFNumberIntType, &int_val));
-  CFTypeRef test_null = kCFNull;
-  ScopedCFTypeRef<CFTypeRef> test_set(
-      CFSetCreate(nullptr, nullptr, 0, &kCFTypeSetCallBacks));
-  ScopedCFTypeRef<CFTypeRef> test_set_mutable(
-      CFSetCreateMutable(nullptr, 0, &kCFTypeSetCallBacks));
-  ScopedCFTypeRef<CFTypeRef> test_str(CFStringCreateWithBytes(
-      nullptr, nullptr, 0, kCFStringEncodingASCII, false));
-  CFTypeRef test_str_const = CFSTR("hello");
-  ScopedCFTypeRef<CFTypeRef> test_str_mutable(
-      CFStringCreateMutable(nullptr, 0));
-
-  // Make sure the allocations of CF types are good.
-  EXPECT_TRUE(test_array);
-  EXPECT_TRUE(test_array_mutable);
-  EXPECT_TRUE(test_bag);
-  EXPECT_TRUE(test_bag_mutable);
-  EXPECT_TRUE(test_bool);
-  EXPECT_TRUE(test_data);
-  EXPECT_TRUE(test_data_mutable);
-  EXPECT_TRUE(test_date);
-  EXPECT_TRUE(test_dict);
-  EXPECT_TRUE(test_dict_mutable);
-  EXPECT_TRUE(test_number);
-  EXPECT_TRUE(test_null);
-  EXPECT_TRUE(test_set);
-  EXPECT_TRUE(test_set_mutable);
-  EXPECT_TRUE(test_str);
-  EXPECT_TRUE(test_str_const);
-  EXPECT_TRUE(test_str_mutable);
-
-  // Casting the CFTypeRef objects correctly provides the same pointer.
-  EXPECT_EQ(test_array, CFCast<CFArrayRef>(test_array));
-  EXPECT_EQ(test_array_mutable, CFCast<CFArrayRef>(test_array_mutable));
-  EXPECT_EQ(test_bag, CFCast<CFBagRef>(test_bag));
-  EXPECT_EQ(test_bag_mutable, CFCast<CFBagRef>(test_bag_mutable));
-  EXPECT_EQ(test_bool, CFCast<CFBooleanRef>(test_bool));
-  EXPECT_EQ(test_data, CFCast<CFDataRef>(test_data));
-  EXPECT_EQ(test_data_mutable, CFCast<CFDataRef>(test_data_mutable));
-  EXPECT_EQ(test_date, CFCast<CFDateRef>(test_date));
-  EXPECT_EQ(test_dict, CFCast<CFDictionaryRef>(test_dict));
-  EXPECT_EQ(test_dict_mutable, CFCast<CFDictionaryRef>(test_dict_mutable));
-  EXPECT_EQ(test_number, CFCast<CFNumberRef>(test_number));
-  EXPECT_EQ(test_null, CFCast<CFNullRef>(test_null));
-  EXPECT_EQ(test_set, CFCast<CFSetRef>(test_set));
-  EXPECT_EQ(test_set_mutable, CFCast<CFSetRef>(test_set_mutable));
-  EXPECT_EQ(test_str, CFCast<CFStringRef>(test_str));
-  EXPECT_EQ(test_str_const, CFCast<CFStringRef>(test_str_const));
-  EXPECT_EQ(test_str_mutable, CFCast<CFStringRef>(test_str_mutable));
-
-  // When given an incorrect CF cast, provide nullptr.
-  EXPECT_FALSE(CFCast<CFStringRef>(test_array));
-  EXPECT_FALSE(CFCast<CFStringRef>(test_array_mutable));
-  EXPECT_FALSE(CFCast<CFStringRef>(test_bag));
-  EXPECT_FALSE(CFCast<CFSetRef>(test_bag_mutable));
-  EXPECT_FALSE(CFCast<CFSetRef>(test_bool));
-  EXPECT_FALSE(CFCast<CFNullRef>(test_data));
-  EXPECT_FALSE(CFCast<CFDictionaryRef>(test_data_mutable));
-  EXPECT_FALSE(CFCast<CFDictionaryRef>(test_date));
-  EXPECT_FALSE(CFCast<CFNumberRef>(test_dict));
-  EXPECT_FALSE(CFCast<CFDateRef>(test_dict_mutable));
-  EXPECT_FALSE(CFCast<CFDataRef>(test_number));
-  EXPECT_FALSE(CFCast<CFDataRef>(test_null));
-  EXPECT_FALSE(CFCast<CFBooleanRef>(test_set));
-  EXPECT_FALSE(CFCast<CFBagRef>(test_set_mutable));
-  EXPECT_FALSE(CFCast<CFBagRef>(test_str));
-  EXPECT_FALSE(CFCast<CFArrayRef>(test_str_const));
-  EXPECT_FALSE(CFCast<CFArrayRef>(test_str_mutable));
-
-  // Giving a nullptr provides a nullptr.
-  EXPECT_FALSE(CFCast<CFArrayRef>(nullptr));
-  EXPECT_FALSE(CFCast<CFBagRef>(nullptr));
-  EXPECT_FALSE(CFCast<CFBooleanRef>(nullptr));
-  EXPECT_FALSE(CFCast<CFDataRef>(nullptr));
-  EXPECT_FALSE(CFCast<CFDateRef>(nullptr));
-  EXPECT_FALSE(CFCast<CFDictionaryRef>(nullptr));
-  EXPECT_FALSE(CFCast<CFNullRef>(nullptr));
-  EXPECT_FALSE(CFCast<CFNumberRef>(nullptr));
-  EXPECT_FALSE(CFCast<CFSetRef>(nullptr));
-  EXPECT_FALSE(CFCast<CFStringRef>(nullptr));
-
-  // CFCastStrict: correct cast results in correct pointer being returned.
-  EXPECT_EQ(test_array, CFCastStrict<CFArrayRef>(test_array));
-  EXPECT_EQ(test_array_mutable, CFCastStrict<CFArrayRef>(test_array_mutable));
-  EXPECT_EQ(test_bag, CFCastStrict<CFBagRef>(test_bag));
-  EXPECT_EQ(test_bag_mutable, CFCastStrict<CFBagRef>(test_bag_mutable));
-  EXPECT_EQ(test_bool, CFCastStrict<CFBooleanRef>(test_bool));
-  EXPECT_EQ(test_data, CFCastStrict<CFDataRef>(test_data));
-  EXPECT_EQ(test_data_mutable, CFCastStrict<CFDataRef>(test_data_mutable));
-  EXPECT_EQ(test_date, CFCastStrict<CFDateRef>(test_date));
-  EXPECT_EQ(test_dict, CFCastStrict<CFDictionaryRef>(test_dict));
-  EXPECT_EQ(test_dict_mutable,
-            CFCastStrict<CFDictionaryRef>(test_dict_mutable));
-  EXPECT_EQ(test_number, CFCastStrict<CFNumberRef>(test_number));
-  EXPECT_EQ(test_null, CFCastStrict<CFNullRef>(test_null));
-  EXPECT_EQ(test_set, CFCastStrict<CFSetRef>(test_set));
-  EXPECT_EQ(test_set_mutable, CFCastStrict<CFSetRef>(test_set_mutable));
-  EXPECT_EQ(test_str, CFCastStrict<CFStringRef>(test_str));
-  EXPECT_EQ(test_str_const, CFCastStrict<CFStringRef>(test_str_const));
-  EXPECT_EQ(test_str_mutable, CFCastStrict<CFStringRef>(test_str_mutable));
-
-  // CFCastStrict: Giving a nullptr provides a nullptr.
-  EXPECT_FALSE(CFCastStrict<CFArrayRef>(nullptr));
-  EXPECT_FALSE(CFCastStrict<CFBagRef>(nullptr));
-  EXPECT_FALSE(CFCastStrict<CFBooleanRef>(nullptr));
-  EXPECT_FALSE(CFCastStrict<CFDataRef>(nullptr));
-  EXPECT_FALSE(CFCastStrict<CFDateRef>(nullptr));
-  EXPECT_FALSE(CFCastStrict<CFDictionaryRef>(nullptr));
-  EXPECT_FALSE(CFCastStrict<CFNullRef>(nullptr));
-  EXPECT_FALSE(CFCastStrict<CFNumberRef>(nullptr));
-  EXPECT_FALSE(CFCastStrict<CFSetRef>(nullptr));
-  EXPECT_FALSE(CFCastStrict<CFStringRef>(nullptr));
-}
-
-TEST(FoundationUtilTest, ObjCCast) {
-  @autoreleasepool {
-    id test_array = @[];
-    id test_array_mutable = [NSMutableArray array];
-    id test_data = [NSData data];
-    id test_data_mutable = [NSMutableData dataWithCapacity:10];
-    id test_date = [NSDate date];
-    id test_dict = @{@"meaning" : @42};
-    id test_dict_mutable = [NSMutableDictionary dictionaryWithCapacity:10];
-    id test_number = @42;
-    id test_null = [NSNull null];
-    id test_set = [NSSet setWithObject:@"string object"];
-    id test_set_mutable = [NSMutableSet setWithCapacity:10];
-    id test_str = [NSString string];
-    id test_str_const = @"bonjour";
-    id test_str_mutable = [NSMutableString stringWithCapacity:10];
-
-    // Make sure the allocations of NS types are good.
-    EXPECT_TRUE(test_array);
-    EXPECT_TRUE(test_array_mutable);
-    EXPECT_TRUE(test_data);
-    EXPECT_TRUE(test_data_mutable);
-    EXPECT_TRUE(test_date);
-    EXPECT_TRUE(test_dict);
-    EXPECT_TRUE(test_dict_mutable);
-    EXPECT_TRUE(test_number);
-    EXPECT_TRUE(test_null);
-    EXPECT_TRUE(test_set);
-    EXPECT_TRUE(test_set_mutable);
-    EXPECT_TRUE(test_str);
-    EXPECT_TRUE(test_str_const);
-    EXPECT_TRUE(test_str_mutable);
-
-    // Casting the id correctly provides the same pointer.
-    EXPECT_EQ(test_array, ObjCCast<NSArray>(test_array));
-    EXPECT_EQ(test_array_mutable, ObjCCast<NSArray>(test_array_mutable));
-    EXPECT_EQ(test_data, ObjCCast<NSData>(test_data));
-    EXPECT_EQ(test_data_mutable, ObjCCast<NSData>(test_data_mutable));
-    EXPECT_EQ(test_date, ObjCCast<NSDate>(test_date));
-    EXPECT_EQ(test_dict, ObjCCast<NSDictionary>(test_dict));
-    EXPECT_EQ(test_dict_mutable, ObjCCast<NSDictionary>(test_dict_mutable));
-    EXPECT_EQ(test_number, ObjCCast<NSNumber>(test_number));
-    EXPECT_EQ(test_null, ObjCCast<NSNull>(test_null));
-    EXPECT_EQ(test_set, ObjCCast<NSSet>(test_set));
-    EXPECT_EQ(test_set_mutable, ObjCCast<NSSet>(test_set_mutable));
-    EXPECT_EQ(test_str, ObjCCast<NSString>(test_str));
-    EXPECT_EQ(test_str_const, ObjCCast<NSString>(test_str_const));
-    EXPECT_EQ(test_str_mutable, ObjCCast<NSString>(test_str_mutable));
-
-    // When given an incorrect ObjC cast, provide nil.
-    EXPECT_FALSE(ObjCCast<NSString>(test_array));
-    EXPECT_FALSE(ObjCCast<NSString>(test_array_mutable));
-    EXPECT_FALSE(ObjCCast<NSString>(test_data));
-    EXPECT_FALSE(ObjCCast<NSString>(test_data_mutable));
-    EXPECT_FALSE(ObjCCast<NSSet>(test_date));
-    EXPECT_FALSE(ObjCCast<NSSet>(test_dict));
-    EXPECT_FALSE(ObjCCast<NSNumber>(test_dict_mutable));
-    EXPECT_FALSE(ObjCCast<NSNull>(test_number));
-    EXPECT_FALSE(ObjCCast<NSDictionary>(test_null));
-    EXPECT_FALSE(ObjCCast<NSDictionary>(test_set));
-    EXPECT_FALSE(ObjCCast<NSDate>(test_set_mutable));
-    EXPECT_FALSE(ObjCCast<NSData>(test_str));
-    EXPECT_FALSE(ObjCCast<NSData>(test_str_const));
-    EXPECT_FALSE(ObjCCast<NSArray>(test_str_mutable));
-
-    // Giving a nil provides a nil.
-    EXPECT_FALSE(ObjCCast<NSArray>(nil));
-    EXPECT_FALSE(ObjCCast<NSData>(nil));
-    EXPECT_FALSE(ObjCCast<NSDate>(nil));
-    EXPECT_FALSE(ObjCCast<NSDictionary>(nil));
-    EXPECT_FALSE(ObjCCast<NSNull>(nil));
-    EXPECT_FALSE(ObjCCast<NSNumber>(nil));
-    EXPECT_FALSE(ObjCCast<NSSet>(nil));
-    EXPECT_FALSE(ObjCCast<NSString>(nil));
-
-    // ObjCCastStrict: correct cast results in correct pointer being returned.
-    EXPECT_EQ(test_array, ObjCCastStrict<NSArray>(test_array));
-    EXPECT_EQ(test_array_mutable, ObjCCastStrict<NSArray>(test_array_mutable));
-    EXPECT_EQ(test_data, ObjCCastStrict<NSData>(test_data));
-    EXPECT_EQ(test_data_mutable, ObjCCastStrict<NSData>(test_data_mutable));
-    EXPECT_EQ(test_date, ObjCCastStrict<NSDate>(test_date));
-    EXPECT_EQ(test_dict, ObjCCastStrict<NSDictionary>(test_dict));
-    EXPECT_EQ(test_dict_mutable,
-              ObjCCastStrict<NSDictionary>(test_dict_mutable));
-    EXPECT_EQ(test_number, ObjCCastStrict<NSNumber>(test_number));
-    EXPECT_EQ(test_null, ObjCCastStrict<NSNull>(test_null));
-    EXPECT_EQ(test_set, ObjCCastStrict<NSSet>(test_set));
-    EXPECT_EQ(test_set_mutable, ObjCCastStrict<NSSet>(test_set_mutable));
-    EXPECT_EQ(test_str, ObjCCastStrict<NSString>(test_str));
-    EXPECT_EQ(test_str_const, ObjCCastStrict<NSString>(test_str_const));
-    EXPECT_EQ(test_str_mutable, ObjCCastStrict<NSString>(test_str_mutable));
-
-    // ObjCCastStrict: Giving a nil provides a nil.
-    EXPECT_FALSE(ObjCCastStrict<NSArray>(nil));
-    EXPECT_FALSE(ObjCCastStrict<NSData>(nil));
-    EXPECT_FALSE(ObjCCastStrict<NSDate>(nil));
-    EXPECT_FALSE(ObjCCastStrict<NSDictionary>(nil));
-    EXPECT_FALSE(ObjCCastStrict<NSNull>(nil));
-    EXPECT_FALSE(ObjCCastStrict<NSNumber>(nil));
-    EXPECT_FALSE(ObjCCastStrict<NSSet>(nil));
-    EXPECT_FALSE(ObjCCastStrict<NSString>(nil));
-  }
-}
-
-TEST(FoundationUtilTest, GetValueFromDictionary) {
-  int one = 1, two = 2, three = 3;
-
-  ScopedCFTypeRef<CFNumberRef> cf_one(
-      CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &one));
-  ScopedCFTypeRef<CFNumberRef> cf_two(
-      CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &two));
-  ScopedCFTypeRef<CFNumberRef> cf_three(
-      CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &three));
-
-  CFStringRef keys[] = { CFSTR("one"), CFSTR("two"), CFSTR("three") };
-  CFNumberRef values[] = { cf_one, cf_two, cf_three };
-
-  static_assert(std::size(keys) == std::size(values),
-                "keys and values arrays must have the same size");
-
-  ScopedCFTypeRef<CFDictionaryRef> test_dict(CFDictionaryCreate(
-      kCFAllocatorDefault, reinterpret_cast<const void**>(keys),
-      reinterpret_cast<const void**>(values), std::size(values),
-      &kCFCopyStringDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks));
-
-  // GetValueFromDictionary<>(_, _) should produce the correct
-  // expected output.
-  EXPECT_EQ(values[0],
-            GetValueFromDictionary<CFNumberRef>(test_dict, CFSTR("one")));
-  EXPECT_EQ(values[1],
-            GetValueFromDictionary<CFNumberRef>(test_dict, CFSTR("two")));
-  EXPECT_EQ(values[2],
-            GetValueFromDictionary<CFNumberRef>(test_dict, CFSTR("three")));
-
-  // Bad input should produce bad output.
-  EXPECT_FALSE(GetValueFromDictionary<CFNumberRef>(test_dict, CFSTR("four")));
-  EXPECT_FALSE(GetValueFromDictionary<CFStringRef>(test_dict, CFSTR("one")));
-}
-
-TEST(FoundationUtilTest, FilePathToNSURL) {
-  EXPECT_NSEQ(nil, FilePathToNSURL(FilePath()));
-  EXPECT_NSEQ([NSURL fileURLWithPath:@"/a/b"],
-              FilePathToNSURL(FilePath("/a/b")));
-}
-
-TEST(FoundationUtilTest, FilePathToNSString) {
-  EXPECT_NSEQ(nil, FilePathToNSString(FilePath()));
-  EXPECT_NSEQ(@"/a/b", FilePathToNSString(FilePath("/a/b")));
-}
-
-TEST(FoundationUtilTest, NSStringToFilePath) {
-  EXPECT_EQ(FilePath(), NSStringToFilePath(nil));
-  EXPECT_EQ(FilePath(), NSStringToFilePath(@""));
-  EXPECT_EQ(FilePath("/a/b"), NSStringToFilePath(@"/a/b"));
-}
-
-TEST(FoundationUtilTest, FilePathToCFURL) {
-  ScopedCFTypeRef<CFURLRef> url(CFURLCreateWithFileSystemPath(
-      nullptr, CFSTR("/a/b"), kCFURLPOSIXPathStyle, false));
-  EXPECT_TRUE(CFEqual(url.get(), FilePathToCFURL(FilePath("/a/b"))));
-}
-
-TEST(FoundationUtilTest, CFRangeToNSRange) {
-  NSRange range_out;
-  EXPECT_TRUE(CFRangeToNSRange(CFRangeMake(10, 5), &range_out));
-  EXPECT_EQ(10UL, range_out.location);
-  EXPECT_EQ(5UL, range_out.length);
-  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(-1, 5), &range_out));
-  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(5, -1), &range_out));
-  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(-1, -1), &range_out));
-  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(LONG_MAX, LONG_MAX), &range_out));
-  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(LONG_MIN, LONG_MAX), &range_out));
-}
-
-TEST(StringNumberConversionsTest, FormatNSInteger) {
-  // The PRI[dxu]NS macro assumes that NSInteger is a typedef to "int" on
-  // 32-bit architecture and a typedef to "long" on 64-bit architecture
-  // (respectively "unsigned int" and "unsigned long" for NSUInteger). Use
-  // pointer incompatibility to validate this at compilation.
-#if defined(ARCH_CPU_64_BITS)
-  typedef long FormatNSIntegerAsType;
-  typedef unsigned long FormatNSUIntegerAsType;
-#else
-  typedef int FormatNSIntegerAsType;
-  typedef unsigned int FormatNSUIntegerAsType;
-#endif  // defined(ARCH_CPU_64_BITS)
-
-  NSInteger some_nsinteger;
-  [[maybe_unused]] FormatNSIntegerAsType* pointer_to_some_nsinteger =
-      &some_nsinteger;
-
-  NSUInteger some_nsuinteger;
-  [[maybe_unused]] FormatNSUIntegerAsType* pointer_to_some_nsuinteger =
-      &some_nsuinteger;
-
-  // Check that format specifier works correctly for NSInteger.
-  const struct {
-    NSInteger value;
-    const char* expected;
-    const char* expected_hex;
-  } nsinteger_cases[] = {
-#if !defined(ARCH_CPU_64_BITS)
-    {12345678, "12345678", "bc614e"},
-    {-12345678, "-12345678", "ff439eb2"},
-#else
-    {12345678, "12345678", "bc614e"},
-    {-12345678, "-12345678", "ffffffffff439eb2"},
-    {137451299150l, "137451299150", "2000bc614e"},
-    {-137451299150l, "-137451299150", "ffffffdfff439eb2"},
-#endif  // !defined(ARCH_CPU_64_BITS)
-  };
-
-  for (const auto& nsinteger_case : nsinteger_cases) {
-    EXPECT_EQ(nsinteger_case.expected,
-              StringPrintf("%" PRIdNS, nsinteger_case.value));
-    EXPECT_EQ(nsinteger_case.expected_hex,
-              StringPrintf("%" PRIxNS, nsinteger_case.value));
-  }
-
-  // Check that format specifier works correctly for NSUInteger.
-  const struct {
-    NSUInteger value;
-    const char* expected;
-    const char* expected_hex;
-  } nsuinteger_cases[] = {
-#if !defined(ARCH_CPU_64_BITS)
-    {12345678u, "12345678", "bc614e"},
-    {4282621618u, "4282621618", "ff439eb2"},
-#else
-    {12345678u, "12345678", "bc614e"},
-    {4282621618u, "4282621618", "ff439eb2"},
-    {137451299150ul, "137451299150", "2000bc614e"},
-    {18446743936258252466ul, "18446743936258252466", "ffffffdfff439eb2"},
-#endif  // !defined(ARCH_CPU_64_BITS)
-  };
-
-  for (const auto& nsuinteger_case : nsuinteger_cases) {
-    EXPECT_EQ(nsuinteger_case.expected,
-              StringPrintf("%" PRIuNS, nsuinteger_case.value));
-    EXPECT_EQ(nsuinteger_case.expected_hex,
-              StringPrintf("%" PRIxNS, nsuinteger_case.value));
-  }
-}
-
-#define EXPECT_LOG_EQ(expected, val) \
-  EXPECT_EQ(expected, (std::ostringstream() << (val)).str())
-
-TEST(FoundationLoggingTest, ObjCObject) {
-  EXPECT_LOG_EQ("Hello, world!", @"Hello, world!");
-}
-
-TEST(FoundationLoggingTest, ObjCNil) {
-  EXPECT_LOG_EQ("(nil)", static_cast<id>(nil));
-}
-
-TEST(FoundationLoggingTest, CFRange) {
-  EXPECT_LOG_EQ("{0, 100}", CFRangeMake(0, 100));
-}
-
-TEST(FoundationLoggingTest, NSRange) {
-  EXPECT_LOG_EQ("{0, 100}", NSMakeRange(0, 100));
-}
-
-}  // namespace base::mac
diff --git a/base/mac/launch_application.h b/base/mac/launch_application.h
index 94dbea7..783a318 100644
--- a/base/mac/launch_application.h
+++ b/base/mac/launch_application.h
@@ -30,6 +30,13 @@
   bool activate = true;
   bool create_new_instance = false;
   bool prompt_user_if_needed = false;
+
+  // When this option is set to true, a private SPI is used to launch the app
+  // "invisibly". Apps launched this way do not show up as running.
+  // Note that opening URLs in an already running hidden-in-background app
+  // appears to always cause the app to transition to foreground, even if we've
+  // requested a background launch.
+  bool hidden_in_background = false;
 };
 
 using LaunchApplicationCallback =
diff --git a/base/mac/launch_application.mm b/base/mac/launch_application.mm
index 73460ee..4e8cd0b 100644
--- a/base/mac/launch_application.mm
+++ b/base/mac/launch_application.mm
@@ -4,10 +4,14 @@
 
 #import "base/mac/launch_application.h"
 
+#include "base/apple/bridging.h"
+#include "base/apple/foundation_util.h"
 #include "base/command_line.h"
 #include "base/functional/callback.h"
 #include "base/logging.h"
-#include "base/mac/foundation_util.h"
+#include "base/mac/launch_services_spi.h"
+#include "base/mac/mac_util.h"
+#include "base/metrics/histogram_functions.h"
 #include "base/strings/sys_string_conversions.h"
 #include "base/types/expected.h"
 
@@ -15,6 +19,19 @@
 
 namespace {
 
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused.
+enum class LaunchResult {
+  kSuccess = 0,
+  kSuccessDespiteError = 1,
+  kFailure = 2,
+  kMaxValue = kFailure,
+};
+
+void LogLaunchResult(LaunchResult result) {
+  UmaHistogramEnumeration("Mac.LaunchApplicationResult", result);
+}
+
 NSArray* CommandLineArgsToArgsArray(const CommandLineArgs& command_line_args) {
   if (const CommandLine* command_line =
           absl::get_if<CommandLine>(&command_line_args)) {
@@ -60,6 +77,81 @@
   return config;
 }
 
+NSDictionary* GetOpenOptions(LaunchApplicationOptions options,
+                             const CommandLineArgs& command_line_args) {
+  NSDictionary* dict = @{
+    base::apple::CFToNSPtrCast(_kLSOpenOptionArgumentsKey) :
+        CommandLineArgsToArgsArray(command_line_args),
+    base::apple::CFToNSPtrCast(_kLSOpenOptionHideKey) :
+        @(options.hidden_in_background),
+    base::apple::CFToNSPtrCast(_kLSOpenOptionBackgroundLaunchKey) :
+        @(options.hidden_in_background),
+    base::apple::CFToNSPtrCast(_kLSOpenOptionAddToRecentsKey) :
+        @(!options.hidden_in_background),
+    base::apple::CFToNSPtrCast(_kLSOpenOptionActivateKey) : @(options.activate),
+    base::apple::CFToNSPtrCast(_kLSOpenOptionPreferRunningInstanceKey) :
+        @(!options.create_new_instance),
+  };
+  return dict;
+}
+
+// Sometimes macOS 11 and 12 report an error launching even though the launch
+// succeeded anyway. This helper returns true for the error codes we have
+// observed where scanning the list of running applications appears to be a
+// usable workaround for this.
+bool ShouldScanRunningAppsForError(NSError* error) {
+  if (!error) {
+    return false;
+  }
+  if (error.domain == NSCocoaErrorDomain &&
+      error.code == NSFileReadUnknownError) {
+    return true;
+  }
+  if (error.domain == NSOSStatusErrorDomain && error.code == procNotFound) {
+    return true;
+  }
+  return false;
+}
+
+void LogResultAndInvokeCallback(const base::FilePath& app_bundle_path,
+                                bool create_new_instance,
+                                LaunchApplicationCallback callback,
+                                NSRunningApplication* app,
+                                NSError* error) {
+  // Sometimes macOS 11 and 12 report an error launching even though the
+  // launch succeeded anyway. To work around such cases, check if we can
+  // find a running application matching the app we were trying to launch.
+  // Only do this if `options.create_new_instance` is false though, as
+  // otherwise we wouldn't know which instance to return.
+  if ((MacOSMajorVersion() == 11 || MacOSMajorVersion() == 12) &&
+      !create_new_instance && !app && ShouldScanRunningAppsForError(error)) {
+    NSArray<NSRunningApplication*>* all_apps =
+        NSWorkspace.sharedWorkspace.runningApplications;
+    for (NSRunningApplication* running_app in all_apps) {
+      if (apple::NSURLToFilePath(running_app.bundleURL) == app_bundle_path) {
+        LOG(ERROR) << "Launch succeeded despite error: "
+                   << base::SysNSStringToUTF8(error.localizedDescription);
+        app = running_app;
+        break;
+      }
+    }
+    if (app) {
+      error = nil;
+    }
+    LogLaunchResult(app ? LaunchResult::kSuccessDespiteError
+                        : LaunchResult::kFailure);
+  } else {
+    LogLaunchResult(app ? LaunchResult::kSuccess : LaunchResult::kFailure);
+  }
+
+  if (error) {
+    LOG(ERROR) << base::SysNSStringToUTF8(error.localizedDescription);
+    std::move(callback).Run(nil, error);
+  } else {
+    std::move(callback).Run(app, nil);
+  }
+}
+
 }  // namespace
 
 void LaunchApplication(const base::FilePath& app_bundle_path,
@@ -67,9 +159,11 @@
                        const std::vector<std::string>& url_specs,
                        LaunchApplicationOptions options,
                        LaunchApplicationCallback callback) {
-  __block LaunchApplicationCallback callback_block_access = std::move(callback);
+  __block LaunchApplicationCallback callback_block_access =
+      base::BindOnce(&LogResultAndInvokeCallback, app_bundle_path,
+                     options.create_new_instance, std::move(callback));
 
-  NSURL* bundle_url = FilePathToNSURL(app_bundle_path);
+  NSURL* bundle_url = apple::FilePathToNSURL(app_bundle_path);
   if (!bundle_url) {
     dispatch_async(dispatch_get_main_queue(), ^{
       std::move(callback_block_access)
@@ -89,15 +183,32 @@
     }
   }
 
+  if (options.hidden_in_background) {
+    _LSOpenCompletionHandler action_block =
+        ^void(LSASNRef asn, Boolean success, CFErrorRef cf_error) {
+          NSRunningApplication* app = nil;
+          if (asn) {
+            app = [[NSRunningApplication alloc]
+                initWithApplicationSerialNumber:asn];
+          }
+          NSError* error = base::apple::CFToNSPtrCast(cf_error);
+          dispatch_async(dispatch_get_main_queue(), ^{
+            std::move(callback_block_access).Run(app, error);
+          });
+        };
+
+    _LSOpenURLsWithCompletionHandler(
+        base::apple::NSToCFPtrCast(ns_urls ? ns_urls : @[]),
+        apple::FilePathToCFURL(app_bundle_path).get(),
+        base::apple::NSToCFPtrCast(GetOpenOptions(options, command_line_args)),
+        action_block);
+    return;
+  }
+
   void (^action_block)(NSRunningApplication*, NSError*) =
       ^void(NSRunningApplication* app, NSError* error) {
         dispatch_async(dispatch_get_main_queue(), ^{
-          if (error) {
-            LOG(ERROR) << base::SysNSStringToUTF8(error.localizedDescription);
-            std::move(callback_block_access).Run(nil, error);
-          } else {
-            std::move(callback_block_access).Run(app, nil);
-          }
+          std::move(callback_block_access).Run(app, error);
         });
       };
 
diff --git a/base/mac/launch_application_unittest.mm b/base/mac/launch_application_unittest.mm
new file mode 100644
index 0000000..cd5d7c1
--- /dev/null
+++ b/base/mac/launch_application_unittest.mm
@@ -0,0 +1,583 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/launch_application.h"
+
+#include <sys/select.h>
+
+#include "base/apple/bridging.h"
+#include "base/apple/foundation_util.h"
+#include "base/base_paths.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/functional/callback_helpers.h"
+#include "base/logging.h"
+#include "base/mac/mac_util.h"
+#include "base/path_service.h"
+#include "base/process/launch.h"
+#include "base/strings/string_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/task/bind_post_task.h"
+#include "base/task/thread_pool.h"
+#include "base/test/bind.h"
+#include "base/test/task_environment.h"
+#include "base/test/test_future.h"
+#include "base/threading/platform_thread.h"
+#include "base/types/expected.h"
+#include "base/uuid.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#import "testing/gtest_mac.h"
+
+namespace base::mac {
+namespace {
+
+// Reads XML encoded property lists from `fifo_path`, calling `callback` for
+// each succesfully parsed dictionary. Loops indefinitely until the string
+// "<!FINISHED>" is read from `fifo_path`.
+void ReadLaunchEventsFromFifo(
+    const FilePath& fifo_path,
+    RepeatingCallback<void(NSDictionary* event)> callback) {
+  File f(fifo_path, File::FLAG_OPEN | File::FLAG_READ);
+  std::string data;
+  while (true) {
+    char buf[4096];
+    int read_count = f.ReadAtCurrentPosNoBestEffort(buf, sizeof buf);
+    if (read_count) {
+      data += std::string(buf, read_count);
+      // Assume that at any point the beginning of the data buffer is the start
+      // of a plist. Search for the first end, and parse that substring.
+      size_t end_of_plist;
+      while ((end_of_plist = data.find("</plist>")) != std::string::npos) {
+        std::string plist = data.substr(0, end_of_plist + 8);
+        data = data.substr(plist.length());
+        NSDictionary* event = apple::ObjCCastStrict<NSDictionary>(
+            SysUTF8ToNSString(TrimWhitespaceASCII(plist, TRIM_ALL))
+                .propertyList);
+        callback.Run(event);
+      }
+      // No more plists found, check if the termination marker was send.
+      if (data.find("<!FINISHED>") != std::string::npos) {
+        break;
+      }
+    } else {
+      // No data was read, wait for the file descriptor to become readable
+      // again.
+      fd_set fds;
+      FD_ZERO(&fds);
+      FD_SET(f.GetPlatformFile(), &fds);
+      select(FD_SETSIZE, &fds, nullptr, nullptr, nullptr);
+    }
+  }
+}
+
+// This test harness creates an app bundle with a random bundle identifier to
+// avoid conflicts with concurrently running other tests. The binary in this app
+// bundle writes various events to a named pipe, allowing tests here to verify
+// that correct events were received by the app.
+class LaunchApplicationTest : public testing::Test {
+ public:
+  void SetUp() override {
+    helper_bundle_id_ =
+        SysUTF8ToNSString("org.chromium.LaunchApplicationTestHelper." +
+                          Uuid::GenerateRandomV4().AsLowercaseString());
+
+    FilePath data_root;
+    ASSERT_TRUE(PathService::Get(DIR_OUT_TEST_DATA_ROOT, &data_root));
+    const FilePath helper_app_executable =
+        data_root.AppendASCII("launch_application_test_helper");
+
+    // Put helper app inside home dir, as the default temp location gets special
+    // treatment by launch services, effecting the behavior of some of these
+    // tests.
+    ASSERT_TRUE(temp_dir_.CreateUniqueTempDirUnderPath(base::GetHomeDir()));
+
+    helper_app_bundle_path_ =
+        temp_dir_.GetPath().AppendASCII("launch_application_test_helper.app");
+
+    const base::FilePath destination_contents_path =
+        helper_app_bundle_path_.AppendASCII("Contents");
+    const base::FilePath destination_executable_path =
+        destination_contents_path.AppendASCII("MacOS");
+
+    // First create the .app bundle directory structure.
+    // Use NSFileManager so that the permissions can be set appropriately. The
+    // base::CreateDirectory() routine forces mode 0700.
+    NSError* error = nil;
+    ASSERT_TRUE([NSFileManager.defaultManager
+               createDirectoryAtURL:base::apple::FilePathToNSURL(
+                                        destination_executable_path)
+        withIntermediateDirectories:YES
+                         attributes:@{
+                           NSFilePosixPermissions : @(0755)
+                         }
+                              error:&error])
+        << SysNSStringToUTF8(error.description);
+
+    // Copy the executable file.
+    helper_app_executable_path_ =
+        destination_executable_path.Append(helper_app_executable.BaseName());
+    ASSERT_TRUE(
+        base::CopyFile(helper_app_executable, helper_app_executable_path_));
+
+    // Write the PkgInfo file.
+    constexpr char kPkgInfoData[] = "APPL????";
+    ASSERT_TRUE(base::WriteFile(
+        destination_contents_path.AppendASCII("PkgInfo"), kPkgInfoData));
+
+#if defined(ADDRESS_SANITIZER)
+    const base::FilePath asan_library_path =
+        data_root.AppendASCII("libclang_rt.asan_osx_dynamic.dylib");
+    ASSERT_TRUE(base::CopyFile(
+        asan_library_path,
+        destination_executable_path.Append(asan_library_path.BaseName())));
+#endif
+
+    // Generate the Plist file
+    NSDictionary* plist = @{
+      @"CFBundleExecutable" :
+          apple::FilePathToNSString(helper_app_executable.BaseName()),
+      @"CFBundleIdentifier" : helper_bundle_id_,
+    };
+    ASSERT_TRUE([plist
+        writeToURL:apple::FilePathToNSURL(
+                       destination_contents_path.AppendASCII("Info.plist"))
+             error:nil]);
+
+    // Register the app with LaunchServices.
+    LSRegisterURL(base::apple::FilePathToCFURL(helper_app_bundle_path_).get(),
+                  true);
+
+    // Ensure app was registered with LaunchServices. Sometimes it takes a
+    // little bit of time for this to happen, and some tests might fail if the
+    // app wasn't registered yet.
+    while (true) {
+      NSArray<NSURL*>* apps = nil;
+      if (@available(macOS 12.0, *)) {
+        apps = [[NSWorkspace sharedWorkspace]
+            URLsForApplicationsWithBundleIdentifier:helper_bundle_id_];
+      } else {
+        apps =
+            apple::CFToNSOwnershipCast(LSCopyApplicationURLsForBundleIdentifier(
+                apple::NSToCFPtrCast(helper_bundle_id_), /*outError=*/nullptr));
+      }
+      if (apps.count > 0) {
+        break;
+      }
+      PlatformThread::Sleep(Milliseconds(50));
+    }
+
+    // Setup fifo to receive logs from the helper app.
+    helper_app_fifo_path_ =
+        temp_dir_.GetPath().AppendASCII("launch_application_test_helper.fifo");
+    ASSERT_EQ(0, mkfifo(helper_app_fifo_path_.value().c_str(),
+                        S_IWUSR | S_IRUSR | S_IWGRP | S_IWGRP));
+
+    // Create array to store received events in, and start listening for events.
+    launch_events_ = [[NSMutableArray alloc] init];
+    base::ThreadPool::PostTask(
+        FROM_HERE, {MayBlock()},
+        base::BindOnce(
+            &ReadLaunchEventsFromFifo, helper_app_fifo_path_,
+            BindPostTaskToCurrentDefault(BindRepeating(
+                &LaunchApplicationTest::OnLaunchEvent, Unretained(this)))));
+  }
+
+  void TearDown() override {
+    if (temp_dir_.IsValid()) {
+      // Make sure fifo reading task stops reading/waiting.
+      WriteFile(helper_app_fifo_path_, "<!FINISHED>");
+
+      // Make sure all apps that were launched by this test are terminated.
+      NSArray<NSRunningApplication*>* apps =
+          NSWorkspace.sharedWorkspace.runningApplications;
+      for (NSRunningApplication* app in apps) {
+        if (temp_dir_.GetPath().IsParent(
+                apple::NSURLToFilePath(app.bundleURL)) ||
+            [app.bundleIdentifier isEqualToString:helper_bundle_id_]) {
+          [app forceTerminate];
+        }
+      }
+
+      // And make sure the temp dir was successfully deleted.
+      EXPECT_TRUE(temp_dir_.Delete());
+    }
+  }
+
+  // Calls `LaunchApplication` with the given parameters, expecting the launch
+  // to succeed. Returns the `NSRunningApplication*` the callback passed to
+  // `LaunchApplication` was called with.
+  NSRunningApplication* LaunchApplicationSyncExpectSuccess(
+      const FilePath& app_bundle_path,
+      const CommandLineArgs& command_line_args,
+      const std::vector<std::string>& url_specs,
+      LaunchApplicationOptions options) {
+    test::TestFuture<NSRunningApplication*, NSError*> result;
+    LaunchApplication(app_bundle_path, command_line_args, url_specs, options,
+                      result.GetCallback());
+    EXPECT_FALSE(result.Get<1>());
+    EXPECT_TRUE(result.Get<0>());
+    return result.Get<0>();
+  }
+
+  // Similar to the above method, except that this version expects the launch to
+  // fail, returning the error.
+  NSError* LaunchApplicationSyncExpectError(
+      const FilePath& app_bundle_path,
+      const CommandLineArgs& command_line_args,
+      const std::vector<std::string>& url_specs,
+      LaunchApplicationOptions options) {
+    test::TestFuture<NSRunningApplication*, NSError*> result;
+    LaunchApplication(app_bundle_path, command_line_args, url_specs, options,
+                      result.GetCallback());
+    EXPECT_FALSE(result.Get<0>());
+    EXPECT_TRUE(result.Get<1>());
+    return result.Get<1>();
+  }
+
+  // Waits for the total number of received launch events to reach at least
+  // `expected_count`.
+  void WaitForLaunchEvents(unsigned expected_count) {
+    if (LaunchEventCount() >= expected_count) {
+      return;
+    }
+    base::RunLoop loop;
+    launch_event_callback_ = BindLambdaForTesting([&]() {
+      if (LaunchEventCount() >= expected_count) {
+        launch_event_callback_ = NullCallback();
+        loop.Quit();
+      }
+    });
+    loop.Run();
+  }
+
+  unsigned LaunchEventCount() { return launch_events_.count; }
+  NSString* LaunchEventName(unsigned i) {
+    if (i >= launch_events_.count) {
+      return nil;
+    }
+    return apple::ObjCCastStrict<NSString>(launch_events_[i][@"name"]);
+  }
+  NSDictionary* LaunchEventData(unsigned i) {
+    if (i >= launch_events_.count) {
+      return nil;
+    }
+    return apple::ObjCCastStrict<NSDictionary>(launch_events_[i][@"data"]);
+  }
+
+ protected:
+  ScopedTempDir temp_dir_;
+
+  NSString* helper_bundle_id_;
+  FilePath helper_app_bundle_path_;
+  FilePath helper_app_executable_path_;
+  FilePath helper_app_fifo_path_;
+
+  NSMutableArray<NSDictionary*>* launch_events_;
+  RepeatingClosure launch_event_callback_;
+
+  test::TaskEnvironment task_environment_{
+      test::TaskEnvironment::MainThreadType::UI};
+
+ private:
+  void OnLaunchEvent(NSDictionary* event) {
+    NSLog(@"Event: %@", event);
+    [launch_events_ addObject:event];
+    if (launch_event_callback_) {
+      launch_event_callback_.Run();
+    }
+  }
+};
+
+TEST_F(LaunchApplicationTest, Basic) {
+  std::vector<std::string> command_line_args;
+  NSRunningApplication* app = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {}, {});
+  ASSERT_TRUE(app);
+  EXPECT_NSEQ(app.bundleIdentifier, helper_bundle_id_);
+  EXPECT_EQ(apple::NSURLToFilePath(app.bundleURL), helper_app_bundle_path_);
+
+  WaitForLaunchEvents(1);
+  EXPECT_NSEQ(LaunchEventName(0), @"applicationDidFinishLaunching");
+  EXPECT_NSEQ(LaunchEventData(0)[@"activationPolicy"],
+              @(NSApplicationActivationPolicyRegular));
+  EXPECT_EQ(app.activationPolicy, NSApplicationActivationPolicyRegular);
+  EXPECT_NSEQ(LaunchEventData(0)[@"commandLine"],
+              (@[ apple::FilePathToNSString(helper_app_executable_path_) ]));
+  EXPECT_NSEQ(LaunchEventData(0)[@"processIdentifier"],
+              @(app.processIdentifier));
+}
+
+TEST_F(LaunchApplicationTest, BundleDoesntExist) {
+  std::vector<std::string> command_line_args;
+  NSError* err = LaunchApplicationSyncExpectError(
+      temp_dir_.GetPath().AppendASCII("notexists.app"), command_line_args, {},
+      {});
+  ASSERT_TRUE(err);
+  err = LaunchApplicationSyncExpectError(
+      temp_dir_.GetPath().AppendASCII("notexists.app"), command_line_args, {},
+      {.hidden_in_background = true});
+  ASSERT_TRUE(err);
+}
+
+TEST_F(LaunchApplicationTest, BundleCorrupt) {
+  base::DeleteFile(helper_app_executable_path_);
+  std::vector<std::string> command_line_args;
+  NSError* err = LaunchApplicationSyncExpectError(helper_app_bundle_path_,
+                                                  command_line_args, {}, {});
+  ASSERT_TRUE(err);
+  err = LaunchApplicationSyncExpectError(helper_app_bundle_path_,
+                                         command_line_args, {},
+                                         {.hidden_in_background = true});
+  ASSERT_TRUE(err);
+}
+
+TEST_F(LaunchApplicationTest, CommandLineArgs_StringVector) {
+  std::vector<std::string> command_line_args = {"--foo", "bar", "-v"};
+  NSRunningApplication* app = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {}, {});
+  ASSERT_TRUE(app);
+
+  WaitForLaunchEvents(1);
+  EXPECT_NSEQ(LaunchEventName(0), @"applicationDidFinishLaunching");
+  EXPECT_NSEQ(LaunchEventData(0)[@"commandLine"], (@[
+                apple::FilePathToNSString(helper_app_executable_path_),
+                @"--foo", @"bar", @"-v"
+              ]));
+}
+
+TEST_F(LaunchApplicationTest, CommandLineArgs_BaseCommandLine) {
+  CommandLine command_line(CommandLine::NO_PROGRAM);
+  command_line.AppendSwitchASCII("foo", "bar");
+  command_line.AppendSwitch("v");
+  command_line.AppendSwitchPath("path", FilePath("/tmp"));
+
+  NSRunningApplication* app = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line, {}, {});
+  ASSERT_TRUE(app);
+
+  WaitForLaunchEvents(1);
+  EXPECT_NSEQ(LaunchEventName(0), @"applicationDidFinishLaunching");
+  EXPECT_NSEQ(LaunchEventData(0)[@"commandLine"], (@[
+                apple::FilePathToNSString(helper_app_executable_path_),
+                @"--foo=bar", @"--v", @"--path=/tmp"
+              ]));
+}
+
+TEST_F(LaunchApplicationTest, UrlSpecs) {
+  std::vector<std::string> command_line_args;
+  std::vector<std::string> urls = {"https://example.com",
+                                   "x-chrome-launch://1"};
+  NSRunningApplication* app = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, urls, {});
+  ASSERT_TRUE(app);
+  WaitForLaunchEvents(3);
+
+  EXPECT_NSEQ(LaunchEventName(0), @"openURLs");
+  EXPECT_NSEQ(LaunchEventName(1), @"applicationDidFinishLaunching");
+  EXPECT_NSEQ(LaunchEventName(2), @"openURLs");
+
+  if (MacOSMajorVersion() == 11) {
+    // macOS 11 (and only macOS 11) appears to sometimes trigger the openURLs
+    // calls in reverse order.
+    std::vector<std::string> received_urls;
+    for (NSString* url in apple::ObjCCastStrict<NSArray>(
+             LaunchEventData(0)[@"urls"])) {
+      received_urls.push_back(SysNSStringToUTF8(url));
+    }
+    EXPECT_EQ(received_urls.size(), 1u);
+    for (NSString* url in apple::ObjCCastStrict<NSArray>(
+             LaunchEventData(2)[@"urls"])) {
+      received_urls.push_back(SysNSStringToUTF8(url));
+    }
+    EXPECT_THAT(received_urls, testing::UnorderedElementsAreArray(urls));
+  } else {
+    EXPECT_NSEQ(LaunchEventData(0)[@"urls"], @[ @"https://example.com" ]);
+    EXPECT_NSEQ(LaunchEventData(2)[@"urls"], @[ @"x-chrome-launch://1" ]);
+  }
+}
+
+TEST_F(LaunchApplicationTest, CreateNewInstance) {
+  std::vector<std::string> command_line_args;
+  NSRunningApplication* app1 = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {},
+      {.create_new_instance = false});
+  WaitForLaunchEvents(1);
+  EXPECT_NSEQ(LaunchEventName(0), @"applicationDidFinishLaunching");
+  EXPECT_NSEQ(LaunchEventData(0)[@"processIdentifier"],
+              @(app1.processIdentifier));
+
+  NSRunningApplication* app2 = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {"x-chrome-launch://0"},
+      {.create_new_instance = false});
+  EXPECT_NSEQ(app1, app2);
+  EXPECT_EQ(app1.processIdentifier, app2.processIdentifier);
+  WaitForLaunchEvents(2);
+  EXPECT_NSEQ(LaunchEventName(1), @"openURLs");
+  EXPECT_NSEQ(LaunchEventData(1)[@"processIdentifier"],
+              @(app2.processIdentifier));
+
+  NSRunningApplication* app3 = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {"x-chrome-launch://1"},
+      {.create_new_instance = true});
+  EXPECT_NSNE(app1, app3);
+  EXPECT_NE(app1.processIdentifier, app3.processIdentifier);
+  WaitForLaunchEvents(4);
+  EXPECT_NSEQ(LaunchEventName(2), @"openURLs");
+  EXPECT_NSEQ(LaunchEventName(3), @"applicationDidFinishLaunching");
+  EXPECT_NSEQ(LaunchEventData(3)[@"processIdentifier"],
+              @(app3.processIdentifier));
+}
+
+TEST_F(LaunchApplicationTest, HiddenInBackground) {
+  std::vector<std::string> command_line_args = {"--test", "--foo"};
+  NSRunningApplication* app = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {},
+      {.hidden_in_background = true});
+  ASSERT_TRUE(app);
+  EXPECT_NSEQ(app.bundleIdentifier, helper_bundle_id_);
+  EXPECT_EQ(helper_app_bundle_path_, apple::NSURLToFilePath(app.bundleURL));
+
+  WaitForLaunchEvents(1);
+  EXPECT_NSEQ(LaunchEventName(0), @"applicationDidFinishLaunching");
+  EXPECT_NSEQ(LaunchEventData(0)[@"activationPolicy"],
+              @(NSApplicationActivationPolicyProhibited));
+  EXPECT_EQ(app.activationPolicy, NSApplicationActivationPolicyProhibited);
+  EXPECT_NSEQ(LaunchEventData(0)[@"commandLine"], (@[
+                apple::FilePathToNSString(helper_app_executable_path_),
+                @"--test", @"--foo"
+              ]));
+  EXPECT_NSEQ(LaunchEventData(0)[@"processIdentifier"],
+              @(app.processIdentifier));
+
+  NSRunningApplication* app2 = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {},
+      {.create_new_instance = false, .hidden_in_background = true});
+  EXPECT_NSEQ(app, app2);
+  EXPECT_EQ(app.processIdentifier, app2.processIdentifier);
+  EXPECT_EQ(app.activationPolicy, NSApplicationActivationPolicyProhibited);
+  EXPECT_EQ(app2.activationPolicy, NSApplicationActivationPolicyProhibited);
+  // Launching without opening anything should not trigger any launch events.
+
+  // Opening a URL in a new instance, should leave both instances in the
+  // background.
+  NSRunningApplication* app3 = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {"x-chrome-launch://2"},
+      {.create_new_instance = true, .hidden_in_background = true});
+  EXPECT_NSNE(app, app3);
+  EXPECT_NE(app.processIdentifier, app3.processIdentifier);
+  WaitForLaunchEvents(3);
+  EXPECT_NSEQ(LaunchEventName(1), @"openURLs");
+  EXPECT_NSEQ(LaunchEventName(2), @"applicationDidFinishLaunching");
+  EXPECT_NSEQ(LaunchEventData(2)[@"processIdentifier"],
+              @(app3.processIdentifier));
+  EXPECT_EQ(app.activationPolicy, NSApplicationActivationPolicyProhibited);
+  EXPECT_EQ(app2.activationPolicy, NSApplicationActivationPolicyProhibited);
+  EXPECT_EQ(app3.activationPolicy, NSApplicationActivationPolicyProhibited);
+}
+
+TEST_F(LaunchApplicationTest,
+       HiddenInBackground_OpenUrlChangesActivationPolicy) {
+  std::vector<std::string> command_line_args = {"--test", "--foo"};
+  NSRunningApplication* app = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {},
+      {.hidden_in_background = true});
+  ASSERT_TRUE(app);
+  EXPECT_NSEQ(app.bundleIdentifier, helper_bundle_id_);
+  EXPECT_EQ(helper_app_bundle_path_, apple::NSURLToFilePath(app.bundleURL));
+
+  WaitForLaunchEvents(1);
+  EXPECT_NSEQ(LaunchEventName(0), @"applicationDidFinishLaunching");
+  EXPECT_NSEQ(LaunchEventData(0)[@"activationPolicy"],
+              @(NSApplicationActivationPolicyProhibited));
+  EXPECT_EQ(app.activationPolicy, NSApplicationActivationPolicyProhibited);
+  EXPECT_NSEQ(LaunchEventData(0)[@"commandLine"], (@[
+                apple::FilePathToNSString(helper_app_executable_path_),
+                @"--test", @"--foo"
+              ]));
+  EXPECT_NSEQ(LaunchEventData(0)[@"processIdentifier"],
+              @(app.processIdentifier));
+
+  NSRunningApplication* app2 = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {"chrome://app-launch/0"},
+      {.create_new_instance = false, .hidden_in_background = true});
+  EXPECT_NSEQ(app, app2);
+  EXPECT_EQ(app.processIdentifier, app2.processIdentifier);
+  // Unexpected to me, but opening a URL seems to always change the activation
+  // policy.
+  EXPECT_EQ(app.activationPolicy, NSApplicationActivationPolicyRegular);
+  EXPECT_EQ(app2.activationPolicy, NSApplicationActivationPolicyRegular);
+  WaitForLaunchEvents(3);
+  EXPECT_THAT(
+      std::vector<std::string>({SysNSStringToUTF8(LaunchEventName(1)),
+                                SysNSStringToUTF8(LaunchEventName(2))}),
+      testing::UnorderedElementsAre("activationPolicyChanged", "openURLs"));
+}
+
+TEST_F(LaunchApplicationTest, HiddenInBackground_TransitionToForeground) {
+  std::vector<std::string> command_line_args;
+  NSRunningApplication* app = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {"x-chrome-launch://1"},
+      {.hidden_in_background = true});
+  ASSERT_TRUE(app);
+
+  WaitForLaunchEvents(2);
+  EXPECT_NSEQ(LaunchEventName(0), @"openURLs");
+  EXPECT_NSEQ(LaunchEventName(1), @"applicationDidFinishLaunching");
+  EXPECT_NSEQ(LaunchEventData(1)[@"activationPolicy"],
+              @(NSApplicationActivationPolicyProhibited));
+  EXPECT_EQ(app.activationPolicy, NSApplicationActivationPolicyProhibited);
+  EXPECT_NSEQ(LaunchEventData(1)[@"processIdentifier"],
+              @(app.processIdentifier));
+
+  // Second launch with hidden_in_background set to false should cause the first
+  // app to switch activation policy.
+  NSRunningApplication* app2 = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {},
+      {.hidden_in_background = false});
+  EXPECT_NSEQ(app, app2);
+  WaitForLaunchEvents(3);
+  EXPECT_NSEQ(LaunchEventName(2), @"activationPolicyChanged");
+  EXPECT_NSEQ(LaunchEventData(2)[@"activationPolicy"],
+              @(NSApplicationActivationPolicyRegular));
+  EXPECT_EQ(app2.activationPolicy, NSApplicationActivationPolicyRegular);
+}
+
+TEST_F(LaunchApplicationTest, HiddenInBackground_AlreadyInForeground) {
+  std::vector<std::string> command_line_args;
+  NSRunningApplication* app = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {"x-chrome-launch://1"},
+      {.hidden_in_background = false});
+  ASSERT_TRUE(app);
+
+  WaitForLaunchEvents(2);
+  EXPECT_NSEQ(LaunchEventName(0), @"openURLs");
+  EXPECT_NSEQ(LaunchEventName(1), @"applicationDidFinishLaunching");
+  EXPECT_NSEQ(LaunchEventData(1)[@"activationPolicy"],
+              @(NSApplicationActivationPolicyRegular));
+  EXPECT_EQ(app.activationPolicy, NSApplicationActivationPolicyRegular);
+  EXPECT_NSEQ(LaunchEventData(1)[@"processIdentifier"],
+              @(app.processIdentifier));
+
+  // Second (and third) launch with hidden_in_background set to true should
+  // reuse the existing app and keep it visible.
+  NSRunningApplication* app2 = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {},
+      {.hidden_in_background = true});
+  EXPECT_NSEQ(app, app2);
+  EXPECT_EQ(app2.activationPolicy, NSApplicationActivationPolicyRegular);
+  NSRunningApplication* app3 = LaunchApplicationSyncExpectSuccess(
+      helper_app_bundle_path_, command_line_args, {"x-chrome-launch://23"},
+      {.hidden_in_background = true});
+  EXPECT_NSEQ(app, app3);
+  WaitForLaunchEvents(3);
+  EXPECT_NSEQ(LaunchEventName(2), @"openURLs");
+  EXPECT_NSEQ(LaunchEventData(2)[@"processIdentifier"],
+              @(app.processIdentifier));
+  EXPECT_EQ(app3.activationPolicy, NSApplicationActivationPolicyRegular);
+}
+
+}  // namespace
+}  // namespace base::mac
diff --git a/base/mac/launch_services_spi.h b/base/mac/launch_services_spi.h
new file mode 100644
index 0000000..7ec4410
--- /dev/null
+++ b/base/mac/launch_services_spi.h
@@ -0,0 +1,38 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_LAUNCH_SERVICES_SPI_H_
+#define BASE_MAC_LAUNCH_SERVICES_SPI_H_
+
+#import <AppKit/AppKit.h>
+#include <CoreFoundation/CoreFoundation.h>
+
+// Private SPIs exposed by LaunchServices. Largely derived from usage of these
+// in open source WebKit code and some inspection of the LaunchServices binary.
+
+extern "C" {
+
+using LSASNRef = const struct CF_BRIDGED_TYPE(id) __LSASN*;
+
+extern const CFStringRef _kLSOpenOptionActivateKey;
+extern const CFStringRef _kLSOpenOptionAddToRecentsKey;
+extern const CFStringRef _kLSOpenOptionArgumentsKey;
+extern const CFStringRef _kLSOpenOptionBackgroundLaunchKey;
+extern const CFStringRef _kLSOpenOptionHideKey;
+extern const CFStringRef _kLSOpenOptionPreferRunningInstanceKey;
+
+using _LSOpenCompletionHandler = void (^)(LSASNRef, Boolean, CFErrorRef);
+void _LSOpenURLsWithCompletionHandler(
+    CFArrayRef urls,
+    CFURLRef application_url,
+    CFDictionaryRef options,
+    _LSOpenCompletionHandler completion_handler);
+
+@interface NSRunningApplication ()
+- (id)initWithApplicationSerialNumber:(LSASNRef)asn;
+@end
+
+}  // extern "C"
+
+#endif  // BASE_MAC_LAUNCH_SERVICES_SPI_H_
diff --git a/base/mac/mac_logging.h b/base/mac/mac_logging.h
deleted file mode 100644
index 26c9300..0000000
--- a/base/mac/mac_logging.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_MAC_LOGGING_H_
-#define BASE_MAC_MAC_LOGGING_H_
-
-#include <string>
-
-#include "base/base_export.h"
-#include "base/logging.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(IS_IOS)
-#include <MacTypes.h>
-#else
-#include <libkern/OSTypes.h>
-#endif
-
-// Use the OSSTATUS_LOG family to log messages related to errors in Mac OS X
-// system routines that report status via an OSStatus or OSErr value. It is
-// similar to the PLOG family which operates on errno, but because there is no
-// global (or thread-local) OSStatus or OSErr value, the specific error must
-// be supplied as an argument to the OSSTATUS_LOG macro. The message logged
-// will contain the symbolic constant name corresponding to the status value,
-// along with the value itself.
-//
-// OSErr is just an older 16-bit form of the newer 32-bit OSStatus. Despite
-// the name, OSSTATUS_LOG can be used equally well for OSStatus and OSErr.
-
-namespace logging {
-
-// Returns a UTF8 description from an OS X Status error.
-BASE_EXPORT std::string DescriptionFromOSStatus(OSStatus err);
-
-class BASE_EXPORT OSStatusLogMessage : public logging::LogMessage {
- public:
-  OSStatusLogMessage(const char* file_path,
-                     int line,
-                     LogSeverity severity,
-                     OSStatus status);
-
-  OSStatusLogMessage(const OSStatusLogMessage&) = delete;
-  OSStatusLogMessage& operator=(const OSStatusLogMessage&) = delete;
-
-  ~OSStatusLogMessage() override;
-
- private:
-  OSStatus status_;
-};
-
-}  // namespace logging
-
-#if DCHECK_IS_ON()
-#define MAC_DVLOG_IS_ON(verbose_level) VLOG_IS_ON(verbose_level)
-#else
-#define MAC_DVLOG_IS_ON(verbose_level) 0
-#endif
-
-#define OSSTATUS_LOG_STREAM(severity, status) \
-    COMPACT_GOOGLE_LOG_EX_ ## severity(OSStatusLogMessage, status).stream()
-#define OSSTATUS_VLOG_STREAM(verbose_level, status) \
-    logging::OSStatusLogMessage(__FILE__, __LINE__, \
-                                -verbose_level, status).stream()
-
-#define OSSTATUS_LOG(severity, status) \
-    LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), LOG_IS_ON(severity))
-#define OSSTATUS_LOG_IF(severity, condition, status) \
-    LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), \
-                LOG_IS_ON(severity) && (condition))
-
-#define OSSTATUS_VLOG(verbose_level, status) \
-    LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
-                VLOG_IS_ON(verbose_level))
-#define OSSTATUS_VLOG_IF(verbose_level, condition, status) \
-    LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
-                VLOG_IS_ON(verbose_level) && (condition))
-
-#define OSSTATUS_CHECK(condition, status) \
-    LAZY_STREAM(OSSTATUS_LOG_STREAM(FATAL, status), !(condition)) \
-    << "Check failed: " # condition << ". "
-
-#define OSSTATUS_DLOG(severity, status) \
-    LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), DLOG_IS_ON(severity))
-#define OSSTATUS_DLOG_IF(severity, condition, status) \
-    LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), \
-                DLOG_IS_ON(severity) && (condition))
-
-#define OSSTATUS_DVLOG(verbose_level, status) \
-    LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
-                MAC_DVLOG_IS_ON(verbose_level))
-#define OSSTATUS_DVLOG_IF(verbose_level, condition, status) \
-    LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
-                MAC_DVLOG_IS_ON(verbose_level) && (condition))
-
-#define OSSTATUS_DCHECK(condition, status)        \
-  LAZY_STREAM(OSSTATUS_LOG_STREAM(FATAL, status), \
-              DCHECK_IS_ON() && !(condition))     \
-      << "Check failed: " #condition << ". "
-
-#endif  // BASE_MAC_MAC_LOGGING_H_
diff --git a/base/mac/mac_logging.mm b/base/mac/mac_logging.mm
deleted file mode 100644
index 1950fa7..0000000
--- a/base/mac/mac_logging.mm
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/mac_logging.h"
-
-#import <Foundation/Foundation.h>
-
-#include <iomanip>
-
-#include "build/build_config.h"
-
-#if !BUILDFLAG(IS_IOS)
-#include <CoreServices/CoreServices.h>
-#endif
-
-namespace logging {
-
-std::string DescriptionFromOSStatus(OSStatus err) {
-  NSError* error =
-      [NSError errorWithDomain:NSOSStatusErrorDomain code:err userInfo:nil];
-  return error.description.UTF8String;
-}
-
-OSStatusLogMessage::OSStatusLogMessage(const char* file_path,
-                                       int line,
-                                       LogSeverity severity,
-                                       OSStatus status)
-    : LogMessage(file_path, line, severity),
-      status_(status) {
-}
-
-OSStatusLogMessage::~OSStatusLogMessage() {
-#if BUILDFLAG(IS_IOS)
-  // TODO(crbug.com/546375): Consider using NSError with NSOSStatusErrorDomain
-  // to try to get a description of the failure.
-  stream() << ": " << status_;
-#else
-  stream() << ": "
-           << DescriptionFromOSStatus(status_)
-           << " ("
-           << status_
-           << ")";
-#endif
-}
-
-}  // namespace logging
diff --git a/base/mac/mac_util.h b/base/mac/mac_util.h
index b0bf5a6..e6cf9e3 100644
--- a/base/mac/mac_util.h
+++ b/base/mac/mac_util.h
@@ -10,6 +10,7 @@
 #include <stdint.h>
 
 #include <string>
+#include <string_view>
 
 #include "base/base_export.h"
 
@@ -61,90 +62,17 @@
 // an error, or true otherwise.
 BASE_EXPORT bool RemoveQuarantineAttribute(const FilePath& file_path);
 
-namespace internal {
-
-// Returns the system's macOS major and minor version numbers combined into an
-// integer value. For example, for macOS Sierra this returns 1012, and for macOS
-// Big Sur it returns 1100. Note that the accuracy returned by this function is
-// as granular as the major version number of Darwin.
-BASE_EXPORT int MacOSVersion();
-
-}  // namespace internal
-
-// Run-time OS version checks. Prefer @available in Objective-C files. If that
-// is not possible, use these functions instead of
-// base::SysInfo::OperatingSystemVersionNumbers. Prefer the "AtLeast" and
-// "AtMost" variants to those that check for a specific version, unless you know
-// for sure that you need to check for a specific version.
-
-#define DEFINE_OLD_IS_OS_FUNCS_CR_MIN_REQUIRED(V, DEPLOYMENT_TARGET_TEST) \
-  inline bool IsOS10_##V() {                                              \
-    DEPLOYMENT_TARGET_TEST(>, V, false)                                   \
-    return internal::MacOSVersion() == 1000 + V;                          \
-  }
-
-#define DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED(V, DEPLOYMENT_TARGET_TEST) \
-  inline bool IsOS##V() {                                             \
-    DEPLOYMENT_TARGET_TEST(>, V, false)                               \
-    return internal::MacOSVersion() == V * 100;                       \
-  }
-
-#define DEFINE_IS_OS_FUNCS(V, DEPLOYMENT_TARGET_TEST)           \
-  DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED(V, DEPLOYMENT_TARGET_TEST) \
-  inline bool IsAtLeastOS##V() {                                \
-    DEPLOYMENT_TARGET_TEST(>=, V, true)                         \
-    return internal::MacOSVersion() >= V * 100;                 \
-  }                                                             \
-  inline bool IsAtMostOS##V() {                                 \
-    DEPLOYMENT_TARGET_TEST(>, V, false)                         \
-    return internal::MacOSVersion() <= V * 100;                 \
-  }
-
-#define OLD_TEST_DEPLOYMENT_TARGET(OP, V, RET)                  \
-  if (MAC_OS_X_VERSION_MIN_REQUIRED OP MAC_OS_X_VERSION_10_##V) \
-    return RET;
-#define TEST_DEPLOYMENT_TARGET(OP, V, RET)                     \
-  if (MAC_OS_X_VERSION_MIN_REQUIRED OP MAC_OS_VERSION_##V##_0) \
-    return RET;
-#define IGNORE_DEPLOYMENT_TARGET(OP, V, RET)
-
-// Notes:
-// - When bumping the minimum version of the macOS required by Chromium, remove
-//   lines from below corresponding to versions of the macOS no longer
-//   supported. Ensure that the minimum supported version uses the
-//   DEFINE_OLD_IS_OS_FUNCS_CR_MIN_REQUIRED macro. When macOS 11.0 is the
-//   minimum required version, remove all the OLD versions of the macros.
-// - When bumping the minimum version of the macOS SDK required to build
-//   Chromium, remove the #ifdef that switches between
-//   TEST_DEPLOYMENT_TARGET and IGNORE_DEPLOYMENT_TARGET.
-
-// Versions of macOS supported at runtime but whose SDK is not supported for
-// building.
-DEFINE_OLD_IS_OS_FUNCS_CR_MIN_REQUIRED(15, OLD_TEST_DEPLOYMENT_TARGET)
-DEFINE_IS_OS_FUNCS(11, TEST_DEPLOYMENT_TARGET)
-DEFINE_IS_OS_FUNCS(12, TEST_DEPLOYMENT_TARGET)
-
-// Versions of macOS supported at runtime and whose SDK is supported for
-// building.
-#ifdef MAC_OS_VERSION_13_0
-DEFINE_IS_OS_FUNCS(13, TEST_DEPLOYMENT_TARGET)
-#else
-DEFINE_IS_OS_FUNCS(13, IGNORE_DEPLOYMENT_TARGET)
-#endif
-
-#ifdef MAC_OS_VERSION_14_0
-DEFINE_IS_OS_FUNCS(14, TEST_DEPLOYMENT_TARGET)
-#else
-DEFINE_IS_OS_FUNCS(14, IGNORE_DEPLOYMENT_TARGET)
-#endif
-
-#undef DEFINE_OLD_IS_OS_FUNCS_CR_MIN_REQUIRED
-#undef DEFINE_OLD_IS_OS_FUNCS
-#undef DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED
-#undef DEFINE_IS_OS_FUNCS
-#undef OLD_TEST_DEPLOYMENT_TARGET
-#undef TEST_DEPLOYMENT_TARGET
-#undef IGNORE_DEPLOYMENT_TARGET
+// The following two functions return the version of the macOS currently
+// running. MacOSVersion() returns the full trio of version numbers, packed into
+// one int (e.g. macOS 12.6.5 returns 12'06'05), and MacOSMajorVersion() returns
+// only the major version number (e.g. macOS 12.6.5 returns 12). Use for runtime
+// OS version checking. Prefer to use @available in Objective-C files. Note that
+// this does not include any Rapid Security Response (RSR) suffixes (the "(a)"
+// at the end of version numbers.)
+BASE_EXPORT __attribute__((const)) int MacOSVersion();
+inline __attribute__((const)) int MacOSMajorVersion() {
+  return MacOSVersion() / 1'00'00;
+}
 
 enum class CPUType {
   kIntel,
@@ -155,18 +83,6 @@
 // Returns the type of CPU this is being executed on.
 BASE_EXPORT CPUType GetCPUType();
 
-// Retrieve the system's model identifier string from the IOKit registry:
-// for example, "MacPro4,1", "MacBookPro6,1". Returns empty string upon
-// failure.
-BASE_EXPORT std::string GetModelIdentifier();
-
-// Parse a model identifier string; for example, into ("MacBookPro", 6, 1).
-// If any error occurs, none of the input pointers are touched.
-BASE_EXPORT bool ParseModelIdentifier(const std::string& ident,
-                                      std::string* type,
-                                      int32_t* major,
-                                      int32_t* minor);
-
 // Returns an OS name + version string. e.g.:
 //
 //   "macOS Version 10.14.3 (Build 18D109)"
@@ -215,6 +131,9 @@
 
   // Privacy & Security > Screen Recording
   kPrivacySecurity_ScreenRecording,
+
+  // Trackpad
+  kTrackpad,
 };
 
 // Opens the specified System Settings pane. If the specified subpane does not
@@ -222,6 +141,12 @@
 // instead.
 BASE_EXPORT void OpenSystemSettingsPane(SystemSettingsPane pane);
 
+// ------- For testing --------
+
+// An implementation detail of `MacOSVersion()` above, exposed for testing.
+BASE_EXPORT int ParseOSProductVersionForTesting(
+    const std::string_view& version);
+
 }  // namespace base::mac
 
 #endif  // BASE_MAC_MAC_UTIL_H_
diff --git a/base/mac/mac_util.mm b/base/mac/mac_util.mm
index f7f443f..76723ed 100644
--- a/base/mac/mac_util.mm
+++ b/base/mac/mac_util.mm
@@ -15,17 +15,24 @@
 #include <sys/utsname.h>
 #include <sys/xattr.h>
 
+#include <string>
+#include <string_view>
+#include <vector>
+
 #include "base/apple/bridging.h"
 #include "base/apple/bundle_locations.h"
+#include "base/apple/foundation_util.h"
+#include "base/apple/osstatus_logging.h"
+#include "base/apple/scoped_cftyperef.h"
+#include "base/check.h"
 #include "base/files/file_path.h"
 #include "base/logging.h"
-#include "base/mac/foundation_util.h"
-#include "base/mac/mac_logging.h"
 #include "base/mac/scoped_aedesc.h"
-#include "base/mac/scoped_cftyperef.h"
 #include "base/mac/scoped_ioobject.h"
+#include "base/posix/sysctl.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_piece.h"
+#include "base/strings/string_split.h"
 #include "base/strings/string_util.h"
 #include "base/strings/sys_string_conversions.h"
 #include "base/threading/scoped_blocking_call.h"
@@ -43,7 +50,7 @@
   ~LoginItemsFileList() = default;
 
   [[nodiscard]] bool Initialize() {
-    DCHECK(!login_items_.get()) << __func__ << " called more than once.";
+    DCHECK(!login_items_) << __func__ << " called more than once.";
     // The LSSharedFileList suite of functions has been deprecated. Instead,
     // a LoginItems helper should be registered with SMLoginItemSetEnabled()
     // https://crbug.com/1154377.
@@ -57,26 +64,28 @@
   }
 
   LSSharedFileListRef GetLoginFileList() {
-    DCHECK(login_items_.get()) << "Initialize() failed or not called.";
-    return login_items_;
+    DCHECK(login_items_) << "Initialize() failed or not called.";
+    return login_items_.get();
   }
 
   // Looks into Shared File Lists corresponding to Login Items for the item
   // representing the specified bundle.  If such an item is found, returns a
   // retained reference to it. Caller is responsible for releasing the
   // reference.
-  ScopedCFTypeRef<LSSharedFileListItemRef> GetLoginItemForApp(NSURL* url) {
-    DCHECK(login_items_.get()) << "Initialize() failed or not called.";
+  apple::ScopedCFTypeRef<LSSharedFileListItemRef> GetLoginItemForApp(
+      NSURL* url) {
+    DCHECK(login_items_) << "Initialize() failed or not called.";
 
 #pragma clang diagnostic push  // https://crbug.com/1154377
 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
-    ScopedCFTypeRef<CFArrayRef> login_items_array(
-        LSSharedFileListCopySnapshot(login_items_, /*inList=*/nullptr));
+    apple::ScopedCFTypeRef<CFArrayRef> login_items_array(
+        LSSharedFileListCopySnapshot(login_items_.get(), /*inList=*/nullptr));
 #pragma clang diagnostic pop
 
-    for (CFIndex i = 0; i < CFArrayGetCount(login_items_array); ++i) {
+    for (CFIndex i = 0; i < CFArrayGetCount(login_items_array.get()); ++i) {
       LSSharedFileListItemRef item =
-          (LSSharedFileListItemRef)CFArrayGetValueAtIndex(login_items_array, i);
+          (LSSharedFileListItemRef)CFArrayGetValueAtIndex(
+              login_items_array.get(), i);
 #pragma clang diagnostic push  // https://crbug.com/1154377
 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
       // kLSSharedFileListDoNotMountVolumes is used so that we don't trigger
@@ -88,32 +97,32 @@
 #pragma clang diagnostic pop
 
       if (item_url && [item_url isEqual:url]) {
-        return ScopedCFTypeRef<LSSharedFileListItemRef>(
+        return apple::ScopedCFTypeRef<LSSharedFileListItemRef>(
             item, base::scoped_policy::RETAIN);
       }
     }
 
-    return ScopedCFTypeRef<LSSharedFileListItemRef>();
+    return apple::ScopedCFTypeRef<LSSharedFileListItemRef>();
   }
 
-  ScopedCFTypeRef<LSSharedFileListItemRef> GetLoginItemForMainApp() {
+  apple::ScopedCFTypeRef<LSSharedFileListItemRef> GetLoginItemForMainApp() {
     NSURL* url = [NSURL fileURLWithPath:base::apple::MainBundle().bundlePath];
     return GetLoginItemForApp(url);
   }
 
  private:
-  ScopedCFTypeRef<LSSharedFileListRef> login_items_;
+  apple::ScopedCFTypeRef<LSSharedFileListRef> login_items_;
 };
 
 bool IsHiddenLoginItem(LSSharedFileListItemRef item) {
 #pragma clang diagnostic push  // https://crbug.com/1154377
 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
-  ScopedCFTypeRef<CFBooleanRef> hidden(
+  apple::ScopedCFTypeRef<CFBooleanRef> hidden(
       reinterpret_cast<CFBooleanRef>(LSSharedFileListItemCopyProperty(
           item, kLSSharedFileListLoginItemHidden)));
 #pragma clang diagnostic pop
 
-  return hidden && hidden == kCFBooleanTrue;
+  return hidden && hidden.get() == kCFBooleanTrue;
 }
 
 }  // namespace
@@ -163,11 +172,11 @@
     return;
   }
 
-  NSURL* app_bundle_url = base::mac::FilePathToNSURL(app_bundle_file_path);
-  base::ScopedCFTypeRef<LSSharedFileListItemRef> item =
+  NSURL* app_bundle_url = base::apple::FilePathToNSURL(app_bundle_file_path);
+  apple::ScopedCFTypeRef<LSSharedFileListItemRef> item =
       login_items.GetLoginItemForApp(app_bundle_url);
 
-  if (item.get() && (IsHiddenLoginItem(item) == hide_on_startup)) {
+  if (item.get() && (IsHiddenLoginItem(item.get()) == hide_on_startup)) {
     return;  // There already is a login item with required hide flag.
   }
 
@@ -175,7 +184,7 @@
   if (item.get()) {
 #pragma clang diagnostic push  // https://crbug.com/1154377
 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
-    LSSharedFileListItemRemove(login_items.GetLoginFileList(), item);
+    LSSharedFileListItemRemove(login_items.GetLoginFileList(), item.get());
 #pragma clang diagnostic pop
   }
 
@@ -185,7 +194,7 @@
   NSDictionary* properties =
       @{apple::CFToNSPtrCast(kLSSharedFileListLoginItemHidden) : @(hide)};
 
-  ScopedCFTypeRef<LSSharedFileListItemRef> new_item(
+  apple::ScopedCFTypeRef<LSSharedFileListItemRef> new_item(
       LSSharedFileListInsertItemURL(
           login_items.GetLoginFileList(), kLSSharedFileListItemLast,
           /*inDisplayName=*/nullptr,
@@ -204,8 +213,8 @@
     return;
   }
 
-  NSURL* app_bundle_url = base::mac::FilePathToNSURL(app_bundle_file_path);
-  base::ScopedCFTypeRef<LSSharedFileListItemRef> item =
+  NSURL* app_bundle_url = base::apple::FilePathToNSURL(app_bundle_file_path);
+  apple::ScopedCFTypeRef<LSSharedFileListItemRef> item =
       login_items.GetLoginItemForApp(app_bundle_url);
   if (!item.get()) {
     return;
@@ -213,7 +222,7 @@
 
 #pragma clang diagnostic push  // https://crbug.com/1154377
 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
-  LSSharedFileListItemRemove(login_items.GetLoginFileList(), item);
+  LSSharedFileListItemRemove(login_items.GetLoginFileList(), item.get());
 #pragma clang diagnostic pop
 }
 
@@ -250,7 +259,7 @@
 
   CFStringRef app = CFSTR("com.apple.loginwindow");
   CFStringRef save_state = CFSTR("TALLogoutSavesState");
-  ScopedCFTypeRef<CFPropertyListRef> plist(
+  apple::ScopedCFTypeRef<CFPropertyListRef> plist(
       CFPreferencesCopyAppValue(save_state, app));
   // According to documentation, com.apple.loginwindow.plist does not exist on a
   // fresh installation until the user changes a login window setting.  The
@@ -261,7 +270,8 @@
     return true;
   }
 
-  if (CFBooleanRef restore_state = base::mac::CFCast<CFBooleanRef>(plist)) {
+  if (CFBooleanRef restore_state =
+          base::apple::CFCast<CFBooleanRef>(plist.get())) {
     return CFBooleanGetValue(restore_state);
   }
 
@@ -278,13 +288,13 @@
     return false;
   }
 
-  base::ScopedCFTypeRef<LSSharedFileListItemRef> item(
+  apple::ScopedCFTypeRef<LSSharedFileListItemRef> item(
       login_items.GetLoginItemForMainApp());
   if (!item.get()) {
     // The OS itself can launch items, usually for the resume feature.
     return false;
   }
-  return IsHiddenLoginItem(item);
+  return IsHiddenLoginItem(item.get());
 }
 
 bool RemoveQuarantineAttribute(const FilePath& file_path) {
@@ -295,86 +305,68 @@
 
 namespace {
 
-// Returns the running system's Darwin major version. Don't call this, it's an
-// implementation detail and its result is meant to be cached by
-// MacOSVersionInternal().
-int DarwinMajorVersionInternal() {
-  // base::OperatingSystemVersionNumbers() at one time called Gestalt(), which
-  // was observed to be able to spawn threads (see https://crbug.com/53200).
-  // Nowadays that function calls -[NSProcessInfo operatingSystemVersion], whose
-  // current implementation does things like hit the file system, which is
-  // possibly a blocking operation. Either way, it's overkill for what needs to
-  // be done here.
-  //
-  // uname, on the other hand, is implemented as a simple series of sysctl
-  // system calls to obtain the relevant data from the kernel. The data is
-  // compiled right into the kernel, so no threads or blocking or other
-  // funny business is necessary.
-  //
-  // TODO: Switch to the kern.osproductversion sysctl? It's compiled in and
-  // should require less Darwin offset guessing and parsing.
+int ParseOSProductVersion(const std::string_view& version) {
+  int macos_version = 0;
 
-  struct utsname uname_info;
-  if (uname(&uname_info) != 0) {
-    DPLOG(ERROR) << "uname";
-    return 0;
-  }
+  // The number of parts that need to be a part of the return value
+  // (major/minor/bugfix).
+  int parts = 3;
 
-  if (strcmp(uname_info.sysname, "Darwin") != 0) {
-    DLOG(ERROR) << "unexpected uname sysname " << uname_info.sysname;
-    return 0;
-  }
+  // When a Rapid Security Response is applied to a system, the UI will display
+  // an additional letter (e.g. "13.4.1 (a)"). That extra letter should not be
+  // present in `version_string`; in fact, the version string should not contain
+  // any spaces. However, take the first string-delimited "word" for parsing.
+  std::vector<std::string_view> words = base::SplitStringPiece(
+      version, " ", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+  CHECK_GE(words.size(), 1u);
 
-  int darwin_major_version = 0;
-  char* dot = strchr(uname_info.release, '.');
-  if (dot) {
-    if (!base::StringToInt(
-            base::StringPiece(uname_info.release,
-                              static_cast<size_t>(dot - uname_info.release)),
-            &darwin_major_version)) {
-      dot = nullptr;
+  // There are expected to be either two or three numbers separated by a dot.
+  // Walk through them, and add them to the version string.
+  for (const auto& value_str : base::SplitStringPiece(
+           words[0], ".", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL)) {
+    int value;
+    bool success = base::StringToInt(value_str, &value);
+    CHECK(success);
+    macos_version *= 100;
+    macos_version += value;
+    if (--parts == 0) {
+      break;
     }
   }
 
-  if (!dot) {
-    DLOG(ERROR) << "could not parse uname release " << uname_info.release;
-    return 0;
+  // While historically the string has comprised exactly two or three numbers
+  // separated by a dot, it's not inconceivable that it might one day be only
+  // one number. Therefore, only check to see that at least one number was found
+  // and processed.
+  CHECK_LE(parts, 2);
+
+  // Tack on as many '00 digits as needed to be sure that exactly three version
+  // numbers are returned.
+  for (int i = 0; i < parts; ++i) {
+    macos_version *= 100;
   }
 
-  return darwin_major_version;
-}
+  // Checks that the value is within expected bounds corresponding to released
+  // OS version numbers. The most important bit is making sure that the "10.16"
+  // compatibility mode isn't engaged.
+  CHECK(macos_version >= 10'00'00);
+  CHECK(macos_version < 10'16'00 || macos_version >= 11'00'00);
 
-// The implementation of MacOSVersion() as defined in the header. Don't call
-// this, it's an implementation detail and the result is meant to be cached by
-// MacOSVersion().
-int MacOSVersionInternal() {
-  int darwin_major_version = DarwinMajorVersionInternal();
-
-  // Darwin major versions 6 through 19 corresponded to macOS versions 10.2
-  // through 10.15.
-  CHECK(darwin_major_version >= 6);
-  if (darwin_major_version <= 19) {
-    return 1000 + darwin_major_version - 4;
-  }
-
-  // Darwin major version 20 corresponds to macOS version 11.0. Assume a
-  // correspondence between Darwin's major version numbers and macOS major
-  // version numbers.
-  int macos_major_version = darwin_major_version - 9;
-
-  return macos_major_version * 100;
+  return macos_version;
 }
 
 }  // namespace
 
-namespace internal {
-
-int MacOSVersion() {
-  static int macos_version = MacOSVersionInternal();
-  return macos_version;
+int ParseOSProductVersionForTesting(const std::string_view& version) {
+  return ParseOSProductVersion(version);
 }
 
-}  // namespace internal
+int MacOSVersion() {
+  static int macos_version = ParseOSProductVersion(
+      StringSysctlByName("kern.osproductversion").value());
+
+  return macos_version;
+}
 
 namespace {
 
@@ -402,51 +394,6 @@
 #endif  // ARCH_CPU_*
 }
 
-std::string GetModelIdentifier() {
-  std::string return_string;
-  ScopedIOObject<io_service_t> platform_expert(IOServiceGetMatchingService(
-      kIOMasterPortDefault, IOServiceMatching("IOPlatformExpertDevice")));
-  if (platform_expert) {
-    ScopedCFTypeRef<CFDataRef> model_data(
-        static_cast<CFDataRef>(IORegistryEntryCreateCFProperty(
-            platform_expert, CFSTR("model"), kCFAllocatorDefault, 0)));
-    if (model_data) {
-      return_string =
-          reinterpret_cast<const char*>(CFDataGetBytePtr(model_data));
-    }
-  }
-  return return_string;
-}
-
-bool ParseModelIdentifier(const std::string& ident,
-                          std::string* type,
-                          int32_t* major,
-                          int32_t* minor) {
-  size_t number_loc = ident.find_first_of("0123456789");
-  if (number_loc == std::string::npos) {
-    return false;
-  }
-  size_t comma_loc = ident.find(',', number_loc);
-  if (comma_loc == std::string::npos) {
-    return false;
-  }
-  int32_t major_tmp, minor_tmp;
-  std::string::const_iterator begin = ident.begin();
-  if (!StringToInt(MakeStringPiece(begin + static_cast<ptrdiff_t>(number_loc),
-                                   begin + static_cast<ptrdiff_t>(comma_loc)),
-                   &major_tmp) ||
-      !StringToInt(
-          MakeStringPiece(begin + static_cast<ptrdiff_t>(comma_loc) + 1,
-                          ident.end()),
-          &minor_tmp)) {
-    return false;
-  }
-  *type = ident.substr(0, number_loc);
-  *major = major_tmp;
-  *minor = minor_tmp;
-  return true;
-}
-
 std::string GetOSDisplayName() {
   std::string version_string = base::SysNSStringToUTF8(
       NSProcessInfo.processInfo.operatingSystemVersionString);
@@ -462,12 +409,12 @@
     return std::string();
   }
 
-  base::ScopedCFTypeRef<CFTypeRef> serial_number(
-      IORegistryEntryCreateCFProperty(expert_device,
+  apple::ScopedCFTypeRef<CFTypeRef> serial_number(
+      IORegistryEntryCreateCFProperty(expert_device.get(),
                                       CFSTR(kIOPlatformSerialNumberKey),
                                       kCFAllocatorDefault, 0));
   CFStringRef serial_number_cfstring =
-      base::mac::CFCast<CFStringRef>(serial_number);
+      base::apple::CFCast<CFStringRef>(serial_number.get());
   if (!serial_number_cfstring) {
     DLOG(ERROR) << "Error retrieving the machine serial number.";
     return std::string();
@@ -492,7 +439,7 @@
   // guessing. Clarity was requested from Apple in FB11753405.
   switch (pane) {
     case SystemSettingsPane::kAccessibility_Captions:
-      if (IsAtLeastOS13()) {
+      if (MacOSMajorVersion() >= 13) {
         url = @"x-apple.systempreferences:com.apple.Accessibility-Settings."
               @"extension?Captioning";
       } else {
@@ -501,7 +448,7 @@
       }
       break;
     case SystemSettingsPane::kDateTime:
-      if (IsAtLeastOS13()) {
+      if (MacOSMajorVersion() >= 13) {
         url =
             @"x-apple.systempreferences:com.apple.Date-Time-Settings.extension";
       } else {
@@ -509,7 +456,7 @@
       }
       break;
     case SystemSettingsPane::kNetwork_Proxies:
-      if (IsAtLeastOS13()) {
+      if (MacOSMajorVersion() >= 13) {
         url = @"x-apple.systempreferences:com.apple.Network-Settings.extension?"
               @"Proxies";
       } else {
@@ -518,7 +465,7 @@
       }
       break;
     case SystemSettingsPane::kPrintersScanners:
-      if (IsAtLeastOS13()) {
+      if (MacOSMajorVersion() >= 13) {
         url = @"x-apple.systempreferences:com.apple.Print-Scan-Settings."
               @"extension";
       } else {
@@ -526,7 +473,7 @@
       }
       break;
     case SystemSettingsPane::kPrivacySecurity_Accessibility:
-      if (IsAtLeastOS13()) {
+      if (MacOSMajorVersion() >= 13) {
         url = @"x-apple.systempreferences:com.apple.settings.PrivacySecurity."
               @"extension?Privacy_Accessibility";
       } else {
@@ -535,7 +482,7 @@
       }
       break;
     case SystemSettingsPane::kPrivacySecurity_Bluetooth:
-      if (IsAtLeastOS13()) {
+      if (MacOSMajorVersion() >= 13) {
         url = @"x-apple.systempreferences:com.apple.settings.PrivacySecurity."
               @"extension?Privacy_Bluetooth";
       } else {
@@ -544,7 +491,7 @@
       }
       break;
     case SystemSettingsPane::kPrivacySecurity_Camera:
-      if (IsAtLeastOS13()) {
+      if (MacOSMajorVersion() >= 13) {
         url = @"x-apple.systempreferences:com.apple.settings.PrivacySecurity."
               @"extension?Privacy_Camera";
       } else {
@@ -553,7 +500,7 @@
       }
       break;
     case SystemSettingsPane::kPrivacySecurity_Extensions_Sharing:
-      if (IsAtLeastOS13()) {
+      if (MacOSMajorVersion() >= 13) {
         // See ShareKit, -[SHKSharingServicePicker openAppExtensionsPrefpane].
         url = @"x-apple.systempreferences:com.apple.ExtensionsPreferences?"
               @"Sharing";
@@ -573,7 +520,7 @@
       }
       break;
     case SystemSettingsPane::kPrivacySecurity_LocationServices:
-      if (IsAtLeastOS13()) {
+      if (MacOSMajorVersion() >= 13) {
         url = @"x-apple.systempreferences:com.apple.settings.PrivacySecurity."
               @"extension?Privacy_LocationServices";
       } else {
@@ -582,7 +529,7 @@
       }
       break;
     case SystemSettingsPane::kPrivacySecurity_Microphone:
-      if (IsAtLeastOS13()) {
+      if (MacOSMajorVersion() >= 13) {
         url = @"x-apple.systempreferences:com.apple.settings.PrivacySecurity."
               @"extension?Privacy_Microphone";
       } else {
@@ -591,7 +538,7 @@
       }
       break;
     case SystemSettingsPane::kPrivacySecurity_ScreenRecording:
-      if (IsAtLeastOS13()) {
+      if (MacOSMajorVersion() >= 13) {
         url = @"x-apple.systempreferences:com.apple.settings.PrivacySecurity."
               @"extension?Privacy_ScreenCapture";
       } else {
@@ -599,6 +546,14 @@
               @"Privacy_ScreenCapture";
       }
       break;
+    case SystemSettingsPane::kTrackpad:
+      if (MacOSMajorVersion() >= 13) {
+        url = @"x-apple.systempreferences:com.apple.Trackpad-Settings."
+              @"extension";
+      } else {
+        pane_file = @"/System/Library/PreferencePanes/Trackpad.prefPane";
+      }
+      break;
   }
 
   DCHECK(url != nil ^ pane_file != nil);
diff --git a/base/mac/mac_util_unittest.mm b/base/mac/mac_util_unittest.mm
index 3420f0b..5ed5e4e 100644
--- a/base/mac/mac_util_unittest.mm
+++ b/base/mac/mac_util_unittest.mm
@@ -10,11 +10,11 @@
 #include <stdint.h>
 #include <sys/xattr.h>
 
+#include "base/apple/foundation_util.h"
+#include "base/apple/scoped_cftyperef.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/files/scoped_temp_dir.h"
-#include "base/mac/foundation_util.h"
-#include "base/mac/scoped_cftyperef.h"
 #include "base/system/sys_info.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/platform_test.h"
@@ -28,21 +28,21 @@
 TEST_F(MacUtilTest, GetUserDirectoryTest) {
   // Try a few keys, make sure they come back with non-empty paths.
   FilePath caches_dir;
-  EXPECT_TRUE(GetUserDirectory(NSCachesDirectory, &caches_dir));
+  EXPECT_TRUE(apple::GetUserDirectory(NSCachesDirectory, &caches_dir));
   EXPECT_FALSE(caches_dir.empty());
 
   FilePath application_support_dir;
-  EXPECT_TRUE(GetUserDirectory(NSApplicationSupportDirectory,
-                               &application_support_dir));
+  EXPECT_TRUE(apple::GetUserDirectory(NSApplicationSupportDirectory,
+                                      &application_support_dir));
   EXPECT_FALSE(application_support_dir.empty());
 
   FilePath library_dir;
-  EXPECT_TRUE(GetUserDirectory(NSLibraryDirectory, &library_dir));
+  EXPECT_TRUE(apple::GetUserDirectory(NSLibraryDirectory, &library_dir));
   EXPECT_FALSE(library_dir.empty());
 }
 
 TEST_F(MacUtilTest, TestLibraryPath) {
-  FilePath library_dir = GetUserLibraryPath();
+  FilePath library_dir = apple::GetUserLibraryPath();
   // Make sure the string isn't empty.
   EXPECT_FALSE(library_dir.value().empty());
 }
@@ -51,7 +51,7 @@
   FilePath out;
 
   // Make sure it doesn't crash.
-  out = GetAppBundlePath(FilePath());
+  out = apple::GetAppBundlePath(FilePath());
   EXPECT_TRUE(out.empty());
 
   // Some more invalid inputs.
@@ -60,7 +60,7 @@
     "foo/bar./bazquux", "foo/.app", "//foo",
   };
   for (size_t i = 0; i < std::size(invalid_inputs); i++) {
-    out = GetAppBundlePath(FilePath(invalid_inputs[i]));
+    out = apple::GetAppBundlePath(FilePath(invalid_inputs[i]));
     EXPECT_TRUE(out.empty()) << "loop: " << i;
   }
 
@@ -84,7 +84,7 @@
         "/Applications/Google Foo.app" },
   };
   for (size_t i = 0; i < std::size(valid_inputs); i++) {
-    out = GetAppBundlePath(FilePath(valid_inputs[i].in));
+    out = apple::GetAppBundlePath(FilePath(valid_inputs[i].in));
     EXPECT_FALSE(out.empty()) << "loop: " << i;
     EXPECT_STREQ(valid_inputs[i].expected_out,
         out.value().c_str()) << "loop: " << i;
@@ -95,7 +95,7 @@
   FilePath out;
 
   // Make sure it doesn't crash.
-  out = GetInnermostAppBundlePath(FilePath());
+  out = apple::GetInnermostAppBundlePath(FilePath());
   EXPECT_TRUE(out.empty());
 
   // Some more invalid inputs.
@@ -113,7 +113,7 @@
   for (size_t i = 0; i < std::size(invalid_inputs); i++) {
     SCOPED_TRACE(testing::Message()
                  << "case #" << i << ", input: " << invalid_inputs[i]);
-    out = GetInnermostAppBundlePath(FilePath(invalid_inputs[i]));
+    out = apple::GetInnermostAppBundlePath(FilePath(invalid_inputs[i]));
     EXPECT_TRUE(out.empty());
   }
 
@@ -139,111 +139,46 @@
   for (size_t i = 0; i < std::size(valid_inputs); i++) {
     SCOPED_TRACE(testing::Message()
                  << "case #" << i << ", input " << valid_inputs[i].in);
-    out = GetInnermostAppBundlePath(FilePath(valid_inputs[i].in));
+    out = apple::GetInnermostAppBundlePath(FilePath(valid_inputs[i].in));
     EXPECT_FALSE(out.empty());
     EXPECT_STREQ(valid_inputs[i].expected_out, out.value().c_str());
   }
 }
 
-TEST_F(MacUtilTest, IsOSEllipsis) {
+TEST_F(MacUtilTest, MacOSVersion) {
   int32_t major, minor, bugfix;
   base::SysInfo::OperatingSystemVersionNumbers(&major, &minor, &bugfix);
 
-  // The patterns here are:
-  // - FALSE/FALSE/TRUE (it is not the earlier version, it is not "at most" the
-  //   earlier version, it is "at least" the earlier version)
-  // - TRUE/TRUE/TRUE (it is the same version, it is "at most" the same version,
-  //   it is "at least" the same version)
-  // - FALSE/TRUE/FALSE (it is not the later version, it is "at most" the later
-  //   version, it is not "at least" the later version)
-
-#define TEST_FOR_PAST_OS(V)      \
-  EXPECT_FALSE(IsOS##V());       \
-  EXPECT_FALSE(IsAtMostOS##V()); \
-  EXPECT_TRUE(IsAtLeastOS##V());
-
-#define TEST_FOR_SAME_OS(V)     \
-  EXPECT_TRUE(IsOS##V());       \
-  EXPECT_TRUE(IsAtMostOS##V()); \
-  EXPECT_TRUE(IsAtLeastOS##V());
-
-#define TEST_FOR_FUTURE_OS(V)   \
-  EXPECT_FALSE(IsOS##V());      \
-  EXPECT_TRUE(IsAtMostOS##V()); \
-  EXPECT_FALSE(IsAtLeastOS##V());
-
-  if (major == 10) {
-    if (minor == 15) {
-      EXPECT_TRUE(IsOS10_15());
-
-      TEST_FOR_FUTURE_OS(11);
-      TEST_FOR_FUTURE_OS(12);
-      TEST_FOR_FUTURE_OS(13);
-      TEST_FOR_FUTURE_OS(14);
-    } else {
-      // macOS 10.15 was the end of the line.
-      FAIL() << "Unexpected 10.x macOS.";
-    }
-  } else if (major == 11) {
-    EXPECT_FALSE(IsOS10_15());
-
-    TEST_FOR_SAME_OS(11);
-    TEST_FOR_FUTURE_OS(12);
-    TEST_FOR_FUTURE_OS(13);
-    TEST_FOR_FUTURE_OS(14);
-  } else if (major == 12) {
-    EXPECT_FALSE(IsOS10_15());
-
-    TEST_FOR_PAST_OS(11);
-    TEST_FOR_SAME_OS(12);
-    TEST_FOR_FUTURE_OS(13);
-    TEST_FOR_FUTURE_OS(14);
-  } else if (major == 13) {
-    EXPECT_FALSE(IsOS10_15());
-
-    TEST_FOR_PAST_OS(11);
-    TEST_FOR_PAST_OS(12);
-    TEST_FOR_SAME_OS(13);
-    TEST_FOR_FUTURE_OS(14);
-  } else if (major == 14) {
-    EXPECT_FALSE(IsOS10_15());
-
-    TEST_FOR_PAST_OS(11);
-    TEST_FOR_PAST_OS(12);
-    TEST_FOR_PAST_OS(13);
-    TEST_FOR_SAME_OS(14);
-  } else {
-    // The spooky future.
-    FAIL() << "Time to update the OS macros!";
-  }
+  EXPECT_EQ(major * 1'00'00 + minor * 1'00 + bugfix, MacOSVersion());
+  EXPECT_EQ(major, MacOSMajorVersion());
 }
 
-#undef TEST_FOR_PAST_10_OS
-#undef TEST_FOR_PAST_OS
-#undef TEST_FOR_SAME_10_OS
-#undef TEST_FOR_SAME_OS
-#undef TEST_FOR_FUTURE_10_OS
-#undef TEST_FOR_FUTURE_OS
+TEST_F(MacUtilTest, ParseOSProductVersion) {
+  // Various strings in shapes that would be expected to be returned from the
+  // API that would need to be parsed.
+  EXPECT_EQ(10'06'02, ParseOSProductVersionForTesting("10.6.2"));
+  EXPECT_EQ(10'15'00, ParseOSProductVersionForTesting("10.15"));
+  EXPECT_EQ(13'05'01, ParseOSProductVersionForTesting("13.5.1"));
+  EXPECT_EQ(14'00'00, ParseOSProductVersionForTesting("14.0"));
 
-TEST_F(MacUtilTest, ParseModelIdentifier) {
-  std::string model;
-  int32_t major = 1, minor = 2;
+  // Various strings in shapes that would not be expected, but that should parse
+  // without CHECKing.
+  EXPECT_EQ(13'04'01, ParseOSProductVersionForTesting("13.4.1 (c)"));
+  EXPECT_EQ(14'00'00, ParseOSProductVersionForTesting("14.0.0"));
+  EXPECT_EQ(18'00'00, ParseOSProductVersionForTesting("18"));
+  EXPECT_EQ(18'03'04, ParseOSProductVersionForTesting("18.3.4.3.2.5"));
 
-  EXPECT_FALSE(ParseModelIdentifier("", &model, &major, &minor));
-  EXPECT_EQ(0U, model.length());
-  EXPECT_EQ(1, major);
-  EXPECT_EQ(2, minor);
-  EXPECT_FALSE(ParseModelIdentifier("FooBar", &model, &major, &minor));
-
-  EXPECT_TRUE(ParseModelIdentifier("MacPro4,1", &model, &major, &minor));
-  EXPECT_EQ(model, "MacPro");
-  EXPECT_EQ(4, major);
-  EXPECT_EQ(1, minor);
-
-  EXPECT_TRUE(ParseModelIdentifier("MacBookPro6,2", &model, &major, &minor));
-  EXPECT_EQ(model, "MacBookPro");
-  EXPECT_EQ(6, major);
-  EXPECT_EQ(2, minor);
+  // Various strings in shapes that are so unexpected that they should not
+  // parse.
+  EXPECT_DEATH_IF_SUPPORTED(ParseOSProductVersionForTesting("Mac OS X 10.0"),
+                            "");
+  EXPECT_DEATH_IF_SUPPORTED(ParseOSProductVersionForTesting(""), "");
+  EXPECT_DEATH_IF_SUPPORTED(ParseOSProductVersionForTesting("  "), "");
+  EXPECT_DEATH_IF_SUPPORTED(ParseOSProductVersionForTesting("."), "");
+  EXPECT_DEATH_IF_SUPPORTED(ParseOSProductVersionForTesting("10.a.5"), "");
+  EXPECT_DEATH_IF_SUPPORTED(ParseOSProductVersionForTesting("१०.१५.७"), "");
+  EXPECT_DEATH_IF_SUPPORTED(ParseOSProductVersionForTesting("7.6.1"), "");
+  EXPECT_DEATH_IF_SUPPORTED(ParseOSProductVersionForTesting("10.16"), "");
 }
 
 TEST_F(MacUtilTest, TestRemoveQuarantineAttribute) {
diff --git a/base/mac/mach_logging.cc b/base/mac/mach_logging.cc
deleted file mode 100644
index d792370..0000000
--- a/base/mac/mach_logging.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/mach_logging.h"
-
-#include <iomanip>
-#include <string>
-
-#include "base/strings/stringprintf.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(USE_BLINK)
-#if BUILDFLAG(IS_IOS)
-#include "base/ios/sim_header_shims.h"
-#else
-#include <servers/bootstrap.h>
-#endif  // BUILDFLAG(IS_IOS)
-#endif  // BUILDFLAG(USE_BLINK)
-
-namespace {
-
-std::string FormatMachErrorNumber(mach_error_t mach_err) {
-  // For the os/kern subsystem, give the error number in decimal as in
-  // <mach/kern_return.h>. Otherwise, give it in hexadecimal to make it easier
-  // to visualize the various bits. See <mach/error.h>.
-  if (mach_err >= 0 && mach_err < KERN_RETURN_MAX) {
-    return base::StringPrintf(" (%d)", mach_err);
-  }
-  return base::StringPrintf(" (0x%08x)", mach_err);
-}
-
-}  // namespace
-
-namespace logging {
-
-MachLogMessage::MachLogMessage(const char* file_path,
-                               int line,
-                               LogSeverity severity,
-                               mach_error_t mach_err)
-    : LogMessage(file_path, line, severity),
-      mach_err_(mach_err) {
-}
-
-MachLogMessage::~MachLogMessage() {
-  stream() << ": "
-           << mach_error_string(mach_err_)
-           << FormatMachErrorNumber(mach_err_);
-}
-
-#if BUILDFLAG(USE_BLINK)
-
-BootstrapLogMessage::BootstrapLogMessage(const char* file_path,
-                                         int line,
-                                         LogSeverity severity,
-                                         kern_return_t bootstrap_err)
-    : LogMessage(file_path, line, severity),
-      bootstrap_err_(bootstrap_err) {
-}
-
-BootstrapLogMessage::~BootstrapLogMessage() {
-  stream() << ": "
-           << bootstrap_strerror(bootstrap_err_);
-
-  switch (bootstrap_err_) {
-    case BOOTSTRAP_SUCCESS:
-    case BOOTSTRAP_NOT_PRIVILEGED:
-    case BOOTSTRAP_NAME_IN_USE:
-    case BOOTSTRAP_UNKNOWN_SERVICE:
-    case BOOTSTRAP_SERVICE_ACTIVE:
-    case BOOTSTRAP_BAD_COUNT:
-    case BOOTSTRAP_NO_MEMORY:
-    case BOOTSTRAP_NO_CHILDREN: {
-      // Show known bootstrap errors in decimal because that's how they're
-      // defined in <servers/bootstrap.h>.
-      stream() << " (" << bootstrap_err_ << ")";
-      break;
-    }
-
-    default: {
-      // bootstrap_strerror passes unknown errors to mach_error_string, so
-      // format them as they would be if they were handled by
-      // MachErrorMessage.
-      stream() << FormatMachErrorNumber(bootstrap_err_);
-      break;
-    }
-  }
-}
-
-#endif  // BUILDFLAG(USE_BLINK)
-
-}  // namespace logging
diff --git a/base/mac/mach_logging.h b/base/mac/mach_logging.h
deleted file mode 100644
index e29be96..0000000
--- a/base/mac/mach_logging.h
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_MACH_LOGGING_H_
-#define BASE_MAC_MACH_LOGGING_H_
-
-#include <mach/mach.h>
-
-#include "base/base_export.h"
-#include "base/logging.h"
-#include "build/blink_buildflags.h"
-#include "build/build_config.h"
-
-// Use the MACH_LOG family of macros along with a mach_error_t (kern_return_t)
-// containing a Mach error. The error value will be decoded so that logged
-// messages explain the error.
-//
-// Use the BOOTSTRAP_LOG family of macros specifically for errors that occur
-// while interoperating with the bootstrap subsystem. These errors will first
-// be looked up as bootstrap error messages. If no match is found, they will
-// be treated as generic Mach errors, as in MACH_LOG.
-//
-// Examples:
-//
-//   kern_return_t kr = mach_timebase_info(&info);
-//   if (kr != KERN_SUCCESS) {
-//     MACH_LOG(ERROR, kr) << "mach_timebase_info";
-//   }
-//
-//   kr = vm_deallocate(task, address, size);
-//   MACH_DCHECK(kr == KERN_SUCCESS, kr) << "vm_deallocate";
-
-namespace logging {
-
-class BASE_EXPORT MachLogMessage : public logging::LogMessage {
- public:
-  MachLogMessage(const char* file_path,
-                 int line,
-                 LogSeverity severity,
-                 mach_error_t mach_err);
-
-  MachLogMessage(const MachLogMessage&) = delete;
-  MachLogMessage& operator=(const MachLogMessage&) = delete;
-
-  ~MachLogMessage() override;
-
- private:
-  mach_error_t mach_err_;
-};
-
-}  // namespace logging
-
-#if DCHECK_IS_ON()
-#define MACH_DVLOG_IS_ON(verbose_level) VLOG_IS_ON(verbose_level)
-#else
-#define MACH_DVLOG_IS_ON(verbose_level) 0
-#endif
-
-#define MACH_LOG_STREAM(severity, mach_err) \
-    COMPACT_GOOGLE_LOG_EX_ ## severity(MachLogMessage, mach_err).stream()
-#define MACH_VLOG_STREAM(verbose_level, mach_err) \
-    logging::MachLogMessage(__FILE__, __LINE__, \
-                            -verbose_level, mach_err).stream()
-
-#define MACH_LOG(severity, mach_err) \
-    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), LOG_IS_ON(severity))
-#define MACH_LOG_IF(severity, condition, mach_err) \
-    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), \
-                LOG_IS_ON(severity) && (condition))
-
-#define MACH_VLOG(verbose_level, mach_err) \
-    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
-                VLOG_IS_ON(verbose_level))
-#define MACH_VLOG_IF(verbose_level, condition, mach_err) \
-    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
-                VLOG_IS_ON(verbose_level) && (condition))
-
-#define MACH_CHECK(condition, mach_err) \
-    LAZY_STREAM(MACH_LOG_STREAM(FATAL, mach_err), !(condition)) \
-    << "Check failed: " # condition << ". "
-
-#define MACH_DLOG(severity, mach_err) \
-    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), DLOG_IS_ON(severity))
-#define MACH_DLOG_IF(severity, condition, mach_err) \
-    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), \
-                DLOG_IS_ON(severity) && (condition))
-
-#define MACH_DVLOG(verbose_level, mach_err) \
-    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
-                MACH_DVLOG_IS_ON(verbose_level))
-#define MACH_DVLOG_IF(verbose_level, condition, mach_err) \
-    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
-                MACH_DVLOG_IS_ON(verbose_level) && (condition))
-
-#define MACH_DCHECK(condition, mach_err)        \
-  LAZY_STREAM(MACH_LOG_STREAM(FATAL, mach_err), \
-              DCHECK_IS_ON() && !(condition))   \
-      << "Check failed: " #condition << ". "
-
-#if BUILDFLAG(USE_BLINK)
-
-namespace logging {
-
-class BASE_EXPORT BootstrapLogMessage : public logging::LogMessage {
- public:
-  BootstrapLogMessage(const char* file_path,
-                      int line,
-                      LogSeverity severity,
-                      kern_return_t bootstrap_err);
-
-  BootstrapLogMessage(const BootstrapLogMessage&) = delete;
-  BootstrapLogMessage& operator=(const BootstrapLogMessage&) = delete;
-
-  ~BootstrapLogMessage() override;
-
- private:
-  kern_return_t bootstrap_err_;
-};
-
-}  // namespace logging
-
-#define BOOTSTRAP_DVLOG_IS_ON MACH_DVLOG_IS_ON
-
-#define BOOTSTRAP_LOG_STREAM(severity, bootstrap_err) \
-    COMPACT_GOOGLE_LOG_EX_ ## severity(BootstrapLogMessage, \
-                                       bootstrap_err).stream()
-#define BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err) \
-    logging::BootstrapLogMessage(__FILE__, __LINE__, \
-                                 -verbose_level, bootstrap_err).stream()
-
-#define BOOTSTRAP_LOG(severity, bootstrap_err) \
-    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, \
-                                     bootstrap_err), LOG_IS_ON(severity))
-#define BOOTSTRAP_LOG_IF(severity, condition, bootstrap_err) \
-    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
-                LOG_IS_ON(severity) && (condition))
-
-#define BOOTSTRAP_VLOG(verbose_level, bootstrap_err) \
-    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
-                VLOG_IS_ON(verbose_level))
-#define BOOTSTRAP_VLOG_IF(verbose_level, condition, bootstrap_err) \
-    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
-                VLOG_IS_ON(verbose_level) && (condition))
-
-#define BOOTSTRAP_CHECK(condition, bootstrap_err) \
-    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(FATAL, bootstrap_err), !(condition)) \
-    << "Check failed: " # condition << ". "
-
-#define BOOTSTRAP_DLOG(severity, bootstrap_err) \
-    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
-                DLOG_IS_ON(severity))
-#define BOOTSTRAP_DLOG_IF(severity, condition, bootstrap_err) \
-    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
-                DLOG_IS_ON(severity) && (condition))
-
-#define BOOTSTRAP_DVLOG(verbose_level, bootstrap_err) \
-    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
-                BOOTSTRAP_DVLOG_IS_ON(verbose_level))
-#define BOOTSTRAP_DVLOG_IF(verbose_level, condition, bootstrap_err) \
-    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
-                BOOTSTRAP_DVLOG_IS_ON(verbose_level) && (condition))
-
-#define BOOTSTRAP_DCHECK(condition, bootstrap_err)        \
-  LAZY_STREAM(BOOTSTRAP_LOG_STREAM(FATAL, bootstrap_err), \
-              DCHECK_IS_ON() && !(condition))             \
-      << "Check failed: " #condition << ". "
-
-#endif  //  BUILDFLAG(USE_BLINK)
-
-#endif  // BASE_MAC_MACH_LOGGING_H_
diff --git a/base/mac/mach_port_rendezvous.cc b/base/mac/mach_port_rendezvous.cc
index 6efa77f..7b3b4c2 100644
--- a/base/mac/mach_port_rendezvous.cc
+++ b/base/mac/mach_port_rendezvous.cc
@@ -9,10 +9,10 @@
 
 #include <utility>
 
+#include "base/apple/foundation_util.h"
+#include "base/apple/mach_logging.h"
 #include "base/containers/buffer_iterator.h"
 #include "base/logging.h"
-#include "base/mac/foundation_util.h"
-#include "base/mac/mach_logging.h"
 #include "base/mac/scoped_mach_msg_destroy.h"
 #include "base/notreached.h"
 #include "base/strings/stringprintf.h"
@@ -59,11 +59,11 @@
          disposition == MACH_MSG_TYPE_MAKE_SEND_ONCE);
 }
 
-MachRendezvousPort::MachRendezvousPort(mac::ScopedMachSendRight send_right)
+MachRendezvousPort::MachRendezvousPort(apple::ScopedMachSendRight send_right)
     : name_(send_right.release()), disposition_(MACH_MSG_TYPE_MOVE_SEND) {}
 
 MachRendezvousPort::MachRendezvousPort(
-    mac::ScopedMachReceiveRight receive_right)
+    apple::ScopedMachReceiveRight receive_right)
     : name_(receive_right.release()),
       disposition_(MACH_MSG_TYPE_MOVE_RECEIVE) {}
 
@@ -116,13 +116,14 @@
   DCHECK_LT(ports.size(), kMaximumRendezvousPorts);
   DCHECK(!ports.empty());
 
-  ScopedDispatchObject<dispatch_source_t> exit_watcher(dispatch_source_create(
-      DISPATCH_SOURCE_TYPE_PROC, static_cast<uintptr_t>(pid),
-      DISPATCH_PROC_EXIT, dispatch_source_->Queue()));
-  dispatch_source_set_event_handler(exit_watcher, ^{
+  apple::ScopedDispatchObject<dispatch_source_t> exit_watcher(
+      dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC,
+                             static_cast<uintptr_t>(pid), DISPATCH_PROC_EXIT,
+                             dispatch_source_->Queue()));
+  dispatch_source_set_event_handler(exit_watcher.get(), ^{
     OnClientExited(pid);
   });
-  dispatch_resume(exit_watcher);
+  dispatch_resume(exit_watcher.get());
 
   auto it =
       client_data_.emplace(pid, ClientData{std::move(exit_watcher), ports});
@@ -130,7 +131,7 @@
 }
 
 MachPortRendezvousServer::ClientData::ClientData(
-    ScopedDispatchObject<dispatch_source_t> exit_watcher,
+    apple::ScopedDispatchObject<dispatch_source_t> exit_watcher,
     MachPortsForRendezvous ports)
     : exit_watcher(exit_watcher), ports(ports) {}
 
@@ -140,14 +141,14 @@
 
 MachPortRendezvousServer::MachPortRendezvousServer() {
   std::string bootstrap_name =
-      StringPrintf(kBootstrapNameFormat, mac::BaseBundleID(), getpid());
+      StringPrintf(kBootstrapNameFormat, apple::BaseBundleID(), getpid());
   kern_return_t kr = bootstrap_check_in(
       bootstrap_port, bootstrap_name.c_str(),
-      mac::ScopedMachReceiveRight::Receiver(server_port_).get());
+      apple::ScopedMachReceiveRight::Receiver(server_port_).get());
   BOOTSTRAP_CHECK(kr == KERN_SUCCESS, kr)
       << "bootstrap_check_in " << bootstrap_name;
 
-  dispatch_source_ = std::make_unique<DispatchSourceMach>(
+  dispatch_source_ = std::make_unique<apple::DispatchSourceMach>(
       bootstrap_name.c_str(), server_port_.get(), ^{
         HandleRequest();
       });
@@ -277,21 +278,21 @@
   return client;
 }
 
-mac::ScopedMachSendRight MachPortRendezvousClient::TakeSendRight(
+apple::ScopedMachSendRight MachPortRendezvousClient::TakeSendRight(
     MachPortsForRendezvous::key_type key) {
   MachRendezvousPort port = PortForKey(key);
   DCHECK(port.disposition() == 0 ||
          port.disposition() == MACH_MSG_TYPE_PORT_SEND ||
          port.disposition() == MACH_MSG_TYPE_PORT_SEND_ONCE);
-  return mac::ScopedMachSendRight(port.name());
+  return apple::ScopedMachSendRight(port.name());
 }
 
-mac::ScopedMachReceiveRight MachPortRendezvousClient::TakeReceiveRight(
+apple::ScopedMachReceiveRight MachPortRendezvousClient::TakeReceiveRight(
     MachPortsForRendezvous::key_type key) {
   MachRendezvousPort port = PortForKey(key);
   DCHECK(port.disposition() == 0 ||
          port.disposition() == MACH_MSG_TYPE_PORT_RECEIVE);
-  return mac::ScopedMachReceiveRight(port.name());
+  return apple::ScopedMachReceiveRight(port.name());
 }
 
 size_t MachPortRendezvousClient::GetPortCount() {
@@ -301,17 +302,17 @@
 
 // static
 std::string MachPortRendezvousClient::GetBootstrapName() {
-  return StringPrintf(kBootstrapNameFormat, mac::BaseBundleID(), getppid());
+  return StringPrintf(kBootstrapNameFormat, apple::BaseBundleID(), getppid());
 }
 
 bool MachPortRendezvousClient::AcquirePorts() {
   AutoLock lock(lock_);
 
-  mac::ScopedMachSendRight server_port;
+  apple::ScopedMachSendRight server_port;
   std::string bootstrap_name = GetBootstrapName();
   kern_return_t kr = bootstrap_look_up(
       bootstrap_port, const_cast<char*>(bootstrap_name.c_str()),
-      mac::ScopedMachSendRight::Receiver(server_port).get());
+      apple::ScopedMachSendRight::Receiver(server_port).get());
   if (kr != KERN_SUCCESS) {
     BOOTSTRAP_LOG(ERROR, kr) << "bootstrap_look_up " << bootstrap_name;
     return false;
@@ -321,7 +322,7 @@
 }
 
 bool MachPortRendezvousClient::SendRequest(
-    mac::ScopedMachSendRight server_port) {
+    apple::ScopedMachSendRight server_port) {
   const size_t buffer_size = CalculateResponseSize(kMaximumRendezvousPorts) +
                              sizeof(mach_msg_trailer_t);
   auto buffer = std::make_unique<uint8_t[]>(buffer_size);
diff --git a/base/mac/mach_port_rendezvous.h b/base/mac/mach_port_rendezvous.h
index 7d460a3..5c7ed0d 100644
--- a/base/mac/mach_port_rendezvous.h
+++ b/base/mac/mach_port_rendezvous.h
@@ -14,10 +14,10 @@
 #include <memory>
 #include <string>
 
+#include "base/apple/dispatch_source_mach.h"
+#include "base/apple/scoped_dispatch_object.h"
+#include "base/apple/scoped_mach_port.h"
 #include "base/base_export.h"
-#include "base/mac/dispatch_source_mach.h"
-#include "base/mac/scoped_dispatch_object.h"
-#include "base/mac/scoped_mach_port.h"
 #include "base/synchronization/lock.h"
 #include "base/thread_annotations.h"
 
@@ -48,9 +48,9 @@
   // Creates a rendezvous port that allows specifying the specific disposition.
   MachRendezvousPort(mach_port_t name, mach_msg_type_name_t disposition);
   // Creates a rendezvous port for MACH_MSG_TYPE_MOVE_SEND.
-  explicit MachRendezvousPort(mac::ScopedMachSendRight send_right);
+  explicit MachRendezvousPort(apple::ScopedMachSendRight send_right);
   // Creates a rendezvous port for MACH_MSG_TYPE_MOVE_RECEIVE.
-  explicit MachRendezvousPort(mac::ScopedMachReceiveRight receive_right);
+  explicit MachRendezvousPort(apple::ScopedMachReceiveRight receive_right);
 
   // Note that the destructor does not call Destroy() explicitly.
   // To avoid leaking ports, either use dispositions that create rights during
@@ -109,14 +109,14 @@
   friend struct MachPortRendezvousFuzzer;
 
   struct ClientData {
-    ClientData(ScopedDispatchObject<dispatch_source_t> exit_watcher,
+    ClientData(apple::ScopedDispatchObject<dispatch_source_t> exit_watcher,
                MachPortsForRendezvous ports);
     ClientData(ClientData&&);
     ~ClientData();
 
     // A DISPATCH_SOURCE_TYPE_PROC / DISPATCH_PROC_EXIT dispatch source. When
     // the source is triggered, it calls OnClientExited().
-    ScopedDispatchObject<dispatch_source_t> exit_watcher;
+    apple::ScopedDispatchObject<dispatch_source_t> exit_watcher;
 
     MachPortsForRendezvous ports;
   };
@@ -147,10 +147,10 @@
 
   // The Mach receive right for the server. A send right to this is port is
   // registered in the bootstrap server.
-  mac::ScopedMachReceiveRight server_port_;
+  apple::ScopedMachReceiveRight server_port_;
 
   // Mach message dispatch source for |server_port_|.
-  std::unique_ptr<DispatchSourceMach> dispatch_source_;
+  std::unique_ptr<apple::DispatchSourceMach> dispatch_source_;
 
   Lock lock_;
   // Association of pid-to-ports.
@@ -175,13 +175,14 @@
   // right exists, or it was already taken, returns an invalid right. Safe to
   // call from any thread. DCHECKs if the right referenced by |key| is not a
   // send or send-once right.
-  mac::ScopedMachSendRight TakeSendRight(MachPortsForRendezvous::key_type key);
+  apple::ScopedMachSendRight TakeSendRight(
+      MachPortsForRendezvous::key_type key);
 
   // Returns the Mach receive right that was registered with |key|. If no such
   // right exists, or it was already taken, returns an invalid right. Safe to
   // call from any thread. DCHECKs if the right referenced by |key| is not a
   // receive right.
-  mac::ScopedMachReceiveRight TakeReceiveRight(
+  apple::ScopedMachReceiveRight TakeReceiveRight(
       MachPortsForRendezvous::key_type key);
 
   // Returns the number of ports in the client. After PerformRendezvous(), this
@@ -201,7 +202,7 @@
   bool AcquirePorts();
 
   // Sends the actual IPC message to |server_port| and parses the reply.
-  bool SendRequest(mac::ScopedMachSendRight server_port)
+  bool SendRequest(apple::ScopedMachSendRight server_port)
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
   // Returns a MachRendezvousPort for a given key and removes it from the
diff --git a/base/mac/mach_port_rendezvous_fuzzer.cc b/base/mac/mach_port_rendezvous_fuzzer.cc
index c56c1cc..e49cecc 100644
--- a/base/mac/mach_port_rendezvous_fuzzer.cc
+++ b/base/mac/mach_port_rendezvous_fuzzer.cc
@@ -4,8 +4,8 @@
 
 #include "base/mac/mach_port_rendezvous.h"
 
+#include "base/apple/mach_logging.h"
 #include "base/logging.h"
-#include "base/mac/mach_logging.h"
 #include "base/synchronization/lock.h"
 #include "testing/libfuzzer/fuzzers/mach/mach_message_converter.h"
 #include "testing/libfuzzer/proto/lpm_interface.h"
@@ -30,7 +30,7 @@
     base::MachPortRendezvousServer::GetInstance()->client_data_.clear();
   }
 
-  base::mac::ScopedMachSendRight server_send_right;
+  base::apple::ScopedMachSendRight server_send_right;
 };
 
 }  // namespace base
diff --git a/base/mac/mach_port_rendezvous_unittest.cc b/base/mac/mach_port_rendezvous_unittest.cc
index 89c7fbc..ddfb06f 100644
--- a/base/mac/mach_port_rendezvous_unittest.cc
+++ b/base/mac/mach_port_rendezvous_unittest.cc
@@ -8,9 +8,9 @@
 
 #include <utility>
 
+#include "base/apple/foundation_util.h"
+#include "base/apple/mach_logging.h"
 #include "base/at_exit.h"
-#include "base/mac/foundation_util.h"
-#include "base/mac/mach_logging.h"
 #include "base/strings/stringprintf.h"
 #include "base/test/multiprocess_test.h"
 #include "base/test/test_timeouts.h"
@@ -44,7 +44,7 @@
 
   CHECK_EQ(1u, rendezvous_client->GetPortCount());
 
-  mac::ScopedMachSendRight port =
+  apple::ScopedMachSendRight port =
       rendezvous_client->TakeSendRight(kTestPortKey);
   CHECK(port.is_valid());
 
@@ -66,10 +66,10 @@
   auto* server = MachPortRendezvousServer::GetInstance();
   ASSERT_TRUE(server);
 
-  mac::ScopedMachReceiveRight port;
+  apple::ScopedMachReceiveRight port;
   kern_return_t kr =
       mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
-                         mac::ScopedMachReceiveRight::Receiver(port).get());
+                         apple::ScopedMachReceiveRight::Receiver(port).get());
   ASSERT_EQ(kr, KERN_SUCCESS);
 
   MachRendezvousPort rendezvous_port(port.get(), MACH_MSG_TYPE_MAKE_SEND);
@@ -127,10 +127,10 @@
   auto* server = MachPortRendezvousServer::GetInstance();
   ASSERT_TRUE(server);
 
-  mac::ScopedMachReceiveRight port;
+  apple::ScopedMachReceiveRight port;
   kern_return_t kr =
       mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
-                         mac::ScopedMachReceiveRight::Receiver(port).get());
+                         apple::ScopedMachReceiveRight::Receiver(port).get());
   ASSERT_EQ(kr, KERN_SUCCESS);
 
   MachRendezvousPort rendezvous_port(port.get(), MACH_MSG_TYPE_MAKE_SEND);
@@ -224,7 +224,7 @@
 MULTIPROCESS_TEST_MAIN(FailToRendezvous) {
   // The rendezvous system uses the BaseBundleID to construct the bootstrap
   // server name, so changing it will result in a failure to look it up.
-  base::mac::SetBaseBundleID("org.chromium.totallyfake");
+  base::apple::SetBaseBundleID("org.chromium.totallyfake");
   CHECK_EQ(nullptr, base::MachPortRendezvousClient::GetInstance());
   return 0;
 }
diff --git a/base/mac/scoped_cffiledescriptorref.h b/base/mac/scoped_cffiledescriptorref.h
deleted file mode 100644
index 86955c4..0000000
--- a/base/mac/scoped_cffiledescriptorref.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_SCOPED_CFFILEDESCRIPTORREF_H_
-#define BASE_MAC_SCOPED_CFFILEDESCRIPTORREF_H_
-
-#include <CoreFoundation/CoreFoundation.h>
-
-#include "base/scoped_generic.h"
-
-namespace base::mac {
-
-namespace internal {
-
-struct ScopedCFFileDescriptorRefTraits {
-  static CFFileDescriptorRef InvalidValue() { return nullptr; }
-  static void Free(CFFileDescriptorRef ref) {
-    CFFileDescriptorInvalidate(ref);
-    CFRelease(ref);
-  }
-};
-
-}  // namespace internal
-
-// ScopedCFFileDescriptorRef is designed after ScopedCFTypeRef<>. On
-// destruction, it will invalidate the file descriptor.
-// ScopedCFFileDescriptorRef (unlike ScopedCFTypeRef<>) does not support RETAIN
-// semantics, copying, or assignment, as doing so would increase the chances
-// that a file descriptor is invalidated while still in use.
-using ScopedCFFileDescriptorRef =
-    ScopedGeneric<CFFileDescriptorRef,
-                  internal::ScopedCFFileDescriptorRefTraits>;
-
-}  // namespace base::mac
-
-#endif  // BASE_MAC_SCOPED_CFFILEDESCRIPTORREF_H_
diff --git a/base/mac/scoped_cftyperef.h b/base/mac/scoped_cftyperef.h
deleted file mode 100644
index c661ddd..0000000
--- a/base/mac/scoped_cftyperef.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_SCOPED_CFTYPEREF_H_
-#define BASE_MAC_SCOPED_CFTYPEREF_H_
-
-#include <CoreFoundation/CoreFoundation.h>
-
-#include "base/mac/scoped_typeref.h"
-
-namespace base {
-
-// ScopedCFTypeRef<> is patterned after std::unique_ptr<>, but maintains
-// ownership of a CoreFoundation object: any object that can be represented
-// as a CFTypeRef.  Style deviations here are solely for compatibility with
-// std::unique_ptr<>'s interface, with which everyone is already familiar.
-//
-// By default, ScopedCFTypeRef<> takes ownership of an object (in the
-// constructor or in reset()) by taking over the caller's existing ownership
-// claim.  The caller must own the object it gives to ScopedCFTypeRef<>, and
-// relinquishes an ownership claim to that object.  ScopedCFTypeRef<> does not
-// call CFRetain(). This behavior is parameterized by the |OwnershipPolicy|
-// enum. If the value |RETAIN| is passed (in the constructor or in reset()),
-// then ScopedCFTypeRef<> will call CFRetain() on the object, and the initial
-// ownership is not changed.
-
-namespace internal {
-
-template<typename CFT>
-struct ScopedCFTypeRefTraits {
-  static CFT InvalidValue() { return nullptr; }
-  static CFT Retain(CFT object) {
-    CFRetain(object);
-    return object;
-  }
-  static void Release(CFT object) {
-    CFRelease(object);
-  }
-};
-
-}  // namespace internal
-
-template<typename CFT>
-using ScopedCFTypeRef =
-    ScopedTypeRef<CFT, internal::ScopedCFTypeRefTraits<CFT>>;
-
-}  // namespace base
-
-#endif  // BASE_MAC_SCOPED_CFTYPEREF_H_
diff --git a/base/mac/scoped_dispatch_object.h b/base/mac/scoped_dispatch_object.h
deleted file mode 100644
index 206b48e..0000000
--- a/base/mac/scoped_dispatch_object.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2016 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
-#define BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
-
-#include <dispatch/dispatch.h>
-
-#include "base/mac/scoped_typeref.h"
-
-#if __OBJC__
-// In Objective-C ARC, dispatch types are Objective-C types, and must be managed
-// as such with __strong, etc. This header file must not be included in
-// Objective-C code, nor may it be allowed to be recursively included. Use the
-// pimpl pattern to isolate its use in a pure C++ file if needed.
-#error Do not use this file, or allow it to be included, in Objective-C code.
-#endif
-
-namespace base {
-
-namespace internal {
-
-template <typename T>
-struct ScopedDispatchObjectTraits {
-  static constexpr T InvalidValue() { return nullptr; }
-  static T Retain(T object) {
-    dispatch_retain(object);
-    return object;
-  }
-  static void Release(T object) {
-    dispatch_release(object);
-  }
-};
-
-}  // namespace internal
-
-template <typename T>
-using ScopedDispatchObject =
-    ScopedTypeRef<T, internal::ScopedDispatchObjectTraits<T>>;
-
-}  // namespace base
-
-#endif  // BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
diff --git a/base/mac/scoped_ioobject.h b/base/mac/scoped_ioobject.h
index d8db22b..67d5370 100644
--- a/base/mac/scoped_ioobject.h
+++ b/base/mac/scoped_ioobject.h
@@ -7,7 +7,7 @@
 
 #include <IOKit/IOKitLib.h>
 
-#include "base/mac/scoped_typeref.h"
+#include "base/apple/scoped_typeref.h"
 
 namespace base::mac {
 
@@ -27,7 +27,8 @@
 
 // Just like ScopedCFTypeRef but for io_object_t and subclasses.
 template <typename IOT>
-using ScopedIOObject = ScopedTypeRef<IOT, internal::ScopedIOObjectTraits<IOT>>;
+using ScopedIOObject =
+    apple::ScopedTypeRef<IOT, internal::ScopedIOObjectTraits<IOT>>;
 
 }  // namespace base::mac
 
diff --git a/base/mac/scoped_ioplugininterface.h b/base/mac/scoped_ioplugininterface.h
index dadb39d..4770d88 100644
--- a/base/mac/scoped_ioplugininterface.h
+++ b/base/mac/scoped_ioplugininterface.h
@@ -7,7 +7,7 @@
 
 #include <IOKit/IOKitLib.h>
 
-#include "base/mac/scoped_typeref.h"
+#include "base/apple/scoped_typeref.h"
 
 namespace base::mac {
 
@@ -29,7 +29,7 @@
 // (IOUSBInterfaceStruct and IOUSBDeviceStruct320 in particular).
 template <typename T>
 using ScopedIOPluginInterface =
-    ScopedTypeRef<T**, internal::ScopedIOPluginInterfaceTraits<T**>>;
+    apple::ScopedTypeRef<T**, internal::ScopedIOPluginInterfaceTraits<T**>>;
 
 }  // namespace base::mac
 
diff --git a/base/mac/scoped_mach_port.cc b/base/mac/scoped_mach_port.cc
deleted file mode 100644
index 6dea806..0000000
--- a/base/mac/scoped_mach_port.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/scoped_mach_port.h"
-
-#include "base/mac/mach_logging.h"
-
-namespace base::mac {
-namespace internal {
-
-// static
-void SendRightTraits::Free(mach_port_t port) {
-  kern_return_t kr = mach_port_deallocate(mach_task_self(), port);
-  MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
-      << "ScopedMachSendRight mach_port_deallocate";
-}
-
-// static
-void ReceiveRightTraits::Free(mach_port_t port) {
-  kern_return_t kr =
-      mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_RECEIVE, -1);
-  MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
-      << "ScopedMachReceiveRight mach_port_mod_refs";
-}
-
-// static
-void PortSetTraits::Free(mach_port_t port) {
-  kern_return_t kr =
-      mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_PORT_SET, -1);
-  MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
-      << "ScopedMachPortSet mach_port_mod_refs";
-}
-
-}  // namespace internal
-
-bool CreateMachPort(ScopedMachReceiveRight* receive,
-                    ScopedMachSendRight* send,
-                    absl::optional<mach_port_msgcount_t> queue_limit) {
-  mach_port_options_t options{};
-  options.flags = (send != nullptr ? MPO_INSERT_SEND_RIGHT : 0);
-
-  if (queue_limit.has_value()) {
-    options.flags |= MPO_QLIMIT;
-    options.mpl.mpl_qlimit = *queue_limit;
-  }
-
-  kern_return_t kr =
-      mach_port_construct(mach_task_self(), &options, 0,
-                          ScopedMachReceiveRight::Receiver(*receive).get());
-  if (kr != KERN_SUCCESS) {
-    MACH_LOG(ERROR, kr) << "mach_port_construct";
-    return false;
-  }
-
-  // Multiple rights are coalesced to the same name in a task, so assign the
-  // send rights to the same name.
-  if (send) {
-    send->reset(receive->get());
-  }
-
-  return true;
-}
-
-ScopedMachSendRight RetainMachSendRight(mach_port_t port) {
-  kern_return_t kr =
-      mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1);
-  if (kr == KERN_SUCCESS)
-    return ScopedMachSendRight(port);
-  MACH_DLOG(ERROR, kr) << "mach_port_mod_refs +1";
-  return {};
-}
-
-}  // namespace base::mac
diff --git a/base/mac/scoped_mach_port.h b/base/mac/scoped_mach_port.h
deleted file mode 100644
index f56929b..0000000
--- a/base/mac/scoped_mach_port.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_SCOPED_MACH_PORT_H_
-#define BASE_MAC_SCOPED_MACH_PORT_H_
-
-#include <mach/mach.h>
-
-#include "base/base_export.h"
-#include "base/scoped_generic.h"
-#include "third_party/abseil-cpp/absl/types/optional.h"
-
-namespace base::mac {
-
-namespace internal {
-
-struct BASE_EXPORT SendRightTraits {
-  static mach_port_t InvalidValue() {
-    return MACH_PORT_NULL;
-  }
-
-  BASE_EXPORT static void Free(mach_port_t port);
-};
-
-struct BASE_EXPORT ReceiveRightTraits {
-  static mach_port_t InvalidValue() {
-    return MACH_PORT_NULL;
-  }
-
-  BASE_EXPORT static void Free(mach_port_t port);
-};
-
-struct PortSetTraits {
-  static mach_port_t InvalidValue() {
-    return MACH_PORT_NULL;
-  }
-
-  BASE_EXPORT static void Free(mach_port_t port);
-};
-
-}  // namespace internal
-
-// A scoper for handling a Mach port that names a send right. Send rights are
-// reference counted, and this takes ownership of the right on construction
-// and then removes a reference to the right on destruction. If the reference
-// is the last one on the right, the right is deallocated.
-using ScopedMachSendRight =
-    ScopedGeneric<mach_port_t, internal::SendRightTraits>;
-
-// A scoper for handling a Mach port's receive right. There is only one
-// receive right per port. This takes ownership of the receive right on
-// construction and then destroys the right on destruction, turning all
-// outstanding send rights into dead names.
-using ScopedMachReceiveRight =
-    ScopedGeneric<mach_port_t, internal::ReceiveRightTraits>;
-
-// A scoper for handling a Mach port set. A port set can have only one
-// reference. This takes ownership of that single reference on construction and
-// destroys the port set on destruction. Destroying a port set does not destroy
-// the receive rights that are members of the port set.
-using ScopedMachPortSet = ScopedGeneric<mach_port_t, internal::PortSetTraits>;
-
-// Constructs a Mach port receive right and places the result in |receive|.
-// If |send| is non-null, a send right will be created as well and stored
-// there. If |queue_limit| is specified, the receive right will be constructed
-// with the specified mpo_qlmit. Returns true on success and false on failure.
-BASE_EXPORT bool CreateMachPort(
-    ScopedMachReceiveRight* receive,
-    ScopedMachSendRight* send,
-    absl::optional<mach_port_msgcount_t> queue_limit = absl::nullopt);
-
-// Increases the user reference count for MACH_PORT_RIGHT_SEND by 1 and returns
-// a new scoper to manage the additional right.
-BASE_EXPORT ScopedMachSendRight RetainMachSendRight(mach_port_t port);
-
-}  // namespace base::mac
-
-#endif  // BASE_MAC_SCOPED_MACH_PORT_H_
diff --git a/base/mac/scoped_mach_vm.cc b/base/mac/scoped_mach_vm.cc
deleted file mode 100644
index e05a5f1..0000000
--- a/base/mac/scoped_mach_vm.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/scoped_mach_vm.h"
-
-#include "base/mac/mach_logging.h"
-
-namespace base::mac {
-
-void ScopedMachVM::reset(vm_address_t address, vm_size_t size) {
-  DCHECK_EQ(address % PAGE_SIZE, 0u);
-  DCHECK_EQ(size % PAGE_SIZE, 0u);
-  reset_unaligned(address, size);
-}
-
-void ScopedMachVM::reset_unaligned(vm_address_t address, vm_size_t size) {
-  if (size_) {
-    if (address_ < address) {
-      kern_return_t kr = vm_deallocate(mach_task_self(), address_,
-                                       std::min(size_, address - address_));
-      MACH_DCHECK(kr == KERN_SUCCESS, kr) << "vm_deallocate";
-    }
-    if (address_ + size_ > address + size) {
-      vm_address_t deallocate_start = std::max(address_, address + size);
-      kern_return_t kr = vm_deallocate(mach_task_self(), deallocate_start,
-                                       address_ + size_ - deallocate_start);
-      MACH_DCHECK(kr == KERN_SUCCESS, kr) << "vm_deallocate";
-    }
-  }
-
-  address_ = address;
-  size_ = size;
-}
-
-}  // namespace base::mac
diff --git a/base/mac/scoped_mach_vm.h b/base/mac/scoped_mach_vm.h
deleted file mode 100644
index e3359d9..0000000
--- a/base/mac/scoped_mach_vm.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_SCOPED_MACH_VM_H_
-#define BASE_MAC_SCOPED_MACH_VM_H_
-
-#include <mach/mach.h>
-#include <stddef.h>
-
-#include <algorithm>
-#include <utility>
-
-#include "base/base_export.h"
-#include "base/check_op.h"
-
-// Use ScopedMachVM to supervise ownership of pages in the current process
-// through the Mach VM subsystem. Pages allocated with vm_allocate can be
-// released when exiting a scope with ScopedMachVM.
-//
-// The Mach VM subsystem operates on a page-by-page basis, and a single VM
-// allocation managed by a ScopedMachVM object may span multiple pages. As far
-// as Mach is concerned, allocated pages may be deallocated individually. This
-// is in contrast to higher-level allocators such as malloc, where the base
-// address of an allocation implies the size of an allocated block.
-// Consequently, it is not sufficient to just pass the base address of an
-// allocation to ScopedMachVM, it also needs to know the size of the
-// allocation. To avoid any confusion, both the base address and size must
-// be page-aligned.
-//
-// When dealing with Mach VM, base addresses will naturally be page-aligned,
-// but user-specified sizes may not be. If there's a concern that a size is
-// not page-aligned, use the mach_vm_round_page macro to correct it.
-//
-// Example:
-//
-//   vm_address_t address = 0;
-//   vm_size_t size = 12345;  // This requested size is not page-aligned.
-//   kern_return_t kr =
-//       vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
-//   if (kr != KERN_SUCCESS) {
-//     return false;
-//   }
-//   ScopedMachVM vm_owner(address, mach_vm_round_page(size));
-
-namespace base::mac {
-
-class BASE_EXPORT ScopedMachVM {
- public:
-  explicit ScopedMachVM(vm_address_t address = 0, vm_size_t size = 0)
-      : address_(address), size_(size) {
-    DCHECK_EQ(address % PAGE_SIZE, 0u);
-    DCHECK_EQ(size % PAGE_SIZE, 0u);
-  }
-
-  ScopedMachVM(const ScopedMachVM&) = delete;
-  ScopedMachVM& operator=(const ScopedMachVM&) = delete;
-
-  ~ScopedMachVM() {
-    if (size_) {
-      vm_deallocate(mach_task_self(), address_, size_);
-    }
-  }
-
-  // Resets the scoper to manage a new memory region. Both |address| and |size|
-  // must be page-aligned. If the new region is a smaller subset of the
-  // existing region (i.e. the new and old regions overlap), the non-
-  // overlapping part of the old region is deallocated.
-  void reset(vm_address_t address = 0, vm_size_t size = 0);
-
-  // Like reset() but does not DCHECK that |address| and |size| are page-
-  // aligned.
-  void reset_unaligned(vm_address_t address, vm_size_t size);
-
-  vm_address_t address() const {
-    return address_;
-  }
-
-  vm_size_t size() const {
-    return size_;
-  }
-
-  void swap(ScopedMachVM& that) {
-    std::swap(address_, that.address_);
-    std::swap(size_, that.size_);
-  }
-
-  void release() {
-    address_ = 0;
-    size_ = 0;
-  }
-
- private:
-  vm_address_t address_;
-  vm_size_t size_;
-};
-
-}  // namespace base::mac
-
-#endif  // BASE_MAC_SCOPED_MACH_VM_H_
diff --git a/base/mac/scoped_mach_vm_unittest.cc b/base/mac/scoped_mach_vm_unittest.cc
deleted file mode 100644
index 7611c98..0000000
--- a/base/mac/scoped_mach_vm_unittest.cc
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2019 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/scoped_mach_vm.h"
-
-#include <mach/mach.h>
-
-#include "base/memory/page_size.h"
-#include "base/test/gtest_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// Note: This test CANNOT be run multiple times within the same process (e.g.
-// with --gtest_repeat). Allocating and deallocating in quick succession, even
-// with different sizes, will typically result in the kernel returning the same
-// address. If the allocation pattern is small->large->small, the second small
-// allocation will report being part of the previously-deallocated large region.
-// That will cause the GetRegionInfo() expectations to fail.
-
-namespace base::mac {
-namespace {
-
-void GetRegionInfo(vm_address_t* region_address, vm_size_t* region_size) {
-  vm_region_basic_info_64 region_info;
-  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
-  mach_port_t object;
-  kern_return_t kr = vm_region_64(
-      mach_task_self(), region_address, region_size, VM_REGION_BASIC_INFO_64,
-      reinterpret_cast<vm_region_info_t>(&region_info), &count, &object);
-  EXPECT_EQ(KERN_SUCCESS, kr);
-}
-
-TEST(ScopedMachVMTest, Basic) {
-  vm_address_t address;
-  vm_size_t size = base::GetPageSize();
-  kern_return_t kr =
-      vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
-  ASSERT_EQ(KERN_SUCCESS, kr);
-
-  ScopedMachVM scoper(address, size);
-  EXPECT_EQ(address, scoper.address());
-  EXPECT_EQ(size, scoper.size());
-
-  // Test the initial region.
-  vm_address_t region_address = address;
-  vm_size_t region_size;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(KERN_SUCCESS, kr);
-  EXPECT_EQ(address, region_address);
-  EXPECT_EQ(1u * base::GetPageSize(), region_size);
-
-  {
-    ScopedMachVM scoper2;
-    EXPECT_EQ(0u, scoper2.address());
-    EXPECT_EQ(0u, scoper2.size());
-
-    scoper.swap(scoper2);
-
-    EXPECT_EQ(address, scoper2.address());
-    EXPECT_EQ(size, scoper2.size());
-
-    EXPECT_EQ(0u, scoper.address());
-    EXPECT_EQ(0u, scoper.size());
-  }
-
-  // After deallocation, the kernel will return the next highest address.
-  region_address = address;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(KERN_SUCCESS, kr);
-  EXPECT_LT(address, region_address);
-}
-
-TEST(ScopedMachVMTest, Reset) {
-  vm_address_t address;
-  vm_size_t size = base::GetPageSize();
-  kern_return_t kr =
-      vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
-  ASSERT_EQ(KERN_SUCCESS, kr);
-
-  ScopedMachVM scoper(address, size);
-
-  // Test the initial region.
-  vm_address_t region_address = address;
-  vm_size_t region_size;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(KERN_SUCCESS, kr);
-  EXPECT_EQ(address, region_address);
-  EXPECT_EQ(1u * base::GetPageSize(), region_size);
-
-  scoper.reset();
-
-  // After deallocation, the kernel will return the next highest address.
-  region_address = address;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(KERN_SUCCESS, kr);
-  EXPECT_LT(address, region_address);
-}
-
-TEST(ScopedMachVMTest, ResetSmallerAddress) {
-  vm_address_t address;
-  vm_size_t size = 2 * base::GetPageSize();
-  kern_return_t kr =
-      vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
-  ASSERT_EQ(KERN_SUCCESS, kr);
-
-  ScopedMachVM scoper(address, base::GetPageSize());
-
-  // Test the initial region.
-  vm_address_t region_address = address;
-  vm_size_t region_size;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(KERN_SUCCESS, kr);
-  EXPECT_EQ(address, region_address);
-  EXPECT_EQ(2u * base::GetPageSize(), region_size);
-
-  // This will free address..base::GetPageSize() that is currently in the
-  // scoper.
-  scoper.reset(address + base::GetPageSize(), base::GetPageSize());
-
-  // Verify that the region is now only one page.
-  region_address = address;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(address + base::GetPageSize(), region_address);
-  EXPECT_EQ(1u * base::GetPageSize(), region_size);
-}
-
-TEST(ScopedMachVMTest, ResetLargerAddressAndSize) {
-  vm_address_t address;
-  vm_size_t size = 3 * base::GetPageSize();
-  kern_return_t kr =
-      vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
-  ASSERT_EQ(KERN_SUCCESS, kr);
-
-  // Test the initial region.
-  vm_address_t region_address = address;
-  vm_size_t region_size;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(KERN_SUCCESS, kr);
-  EXPECT_EQ(address, region_address);
-  EXPECT_EQ(3u * base::GetPageSize(), region_size);
-
-  ScopedMachVM scoper(address + 2 * base::GetPageSize(), base::GetPageSize());
-  // Expand the region to be larger.
-  scoper.reset(address, size);
-
-  // Verify that the region is still three pages.
-  region_address = address;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(address, region_address);
-  EXPECT_EQ(3u * base::GetPageSize(), region_size);
-}
-
-TEST(ScopedMachVMTest, ResetLargerAddress) {
-  vm_address_t address;
-  vm_size_t size = 6 * base::GetPageSize();
-  kern_return_t kr =
-      vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
-  ASSERT_EQ(KERN_SUCCESS, kr);
-
-  // Test the initial region.
-  vm_address_t region_address = address;
-  vm_size_t region_size;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(KERN_SUCCESS, kr);
-  EXPECT_EQ(address, region_address);
-  EXPECT_EQ(6u * base::GetPageSize(), region_size);
-
-  ScopedMachVM scoper(address + 3 * base::GetPageSize(),
-                      3 * base::GetPageSize());
-
-  // Shift the region by three pages; the last three pages should be
-  // deallocated, while keeping the first three.
-  scoper.reset(address, 3 * base::GetPageSize());
-
-  // Verify that the region is just three pages.
-  region_address = address;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(address, region_address);
-  EXPECT_EQ(3u * base::GetPageSize(), region_size);
-}
-
-TEST(ScopedMachVMTest, ResetUnaligned) {
-  vm_address_t address;
-  vm_size_t size = 2 * base::GetPageSize();
-  kern_return_t kr =
-      vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
-  ASSERT_EQ(KERN_SUCCESS, kr);
-
-  ScopedMachVM scoper;
-
-  // Test the initial region.
-  vm_address_t region_address = address;
-  vm_size_t region_size;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(address, region_address);
-  EXPECT_EQ(2u * base::GetPageSize(), region_size);
-
-  // Initialize with unaligned size.
-  scoper.reset_unaligned(address + base::GetPageSize(),
-                         base::GetPageSize() - 3);
-  // Reset with another unaligned size.
-  scoper.reset_unaligned(address + base::GetPageSize(),
-                         base::GetPageSize() - 11);
-
-  // The entire unaligned page gets deallocated.
-  region_address = address;
-  GetRegionInfo(&region_address, &region_size);
-  EXPECT_EQ(address, region_address);
-  EXPECT_EQ(1u * base::GetPageSize(), region_size);
-
-  // Reset with the remaining page.
-  scoper.reset_unaligned(address, base::GetPageSize());
-}
-
-#if DCHECK_IS_ON()
-
-TEST(ScopedMachVMTest, ResetMustBeAligned) {
-  vm_address_t address;
-  vm_size_t size = 2 * base::GetPageSize();
-  kern_return_t kr =
-      vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
-  ASSERT_EQ(KERN_SUCCESS, kr);
-
-  ScopedMachVM scoper;
-  EXPECT_DCHECK_DEATH(scoper.reset(address, base::GetPageSize() + 1));
-}
-
-#endif  // DCHECK_IS_ON()
-
-}  // namespace
-}  // namespace base::mac
diff --git a/base/mac/scoped_nsautorelease_pool.cc b/base/mac/scoped_nsautorelease_pool.cc
deleted file mode 100644
index b9b28b7..0000000
--- a/base/mac/scoped_nsautorelease_pool.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2010 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/scoped_nsautorelease_pool.h"
-
-// Note that this uses the direct runtime interface to the autorelease pool.
-// https://clang.llvm.org/docs/AutomaticReferenceCounting.html#runtime-support
-// This is so this can work when compiled for ARC.
-
-extern "C" {
-void* objc_autoreleasePoolPush(void);
-void objc_autoreleasePoolPop(void* pool);
-}
-
-namespace base::mac {
-
-ScopedNSAutoreleasePool::ScopedNSAutoreleasePool()
-    : autorelease_pool_(objc_autoreleasePoolPush()) {}
-
-ScopedNSAutoreleasePool::~ScopedNSAutoreleasePool() {
-  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
-  objc_autoreleasePoolPop(autorelease_pool_);
-}
-
-// Cycle the internal pool, allowing everything there to get cleaned up and
-// start anew.
-void ScopedNSAutoreleasePool::Recycle() {
-  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
-  objc_autoreleasePoolPop(autorelease_pool_);
-  autorelease_pool_ = objc_autoreleasePoolPush();
-}
-
-}  // namespace base::mac
diff --git a/base/mac/scoped_nsautorelease_pool.h b/base/mac/scoped_nsautorelease_pool.h
deleted file mode 100644
index d556cd5f..0000000
--- a/base/mac/scoped_nsautorelease_pool.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2011 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_SCOPED_NSAUTORELEASE_POOL_H_
-#define BASE_MAC_SCOPED_NSAUTORELEASE_POOL_H_
-
-#include "base/base_export.h"
-#include "base/memory/raw_ptr_exclusion.h"
-#include "base/threading/thread_checker.h"
-
-namespace base::mac {
-
-// ScopedNSAutoreleasePool creates an autorelease pool when instantiated and
-// pops it when destroyed.  This allows an autorelease pool to be maintained in
-// ordinary C++ code without bringing in any direct Objective-C dependency.
-//
-// Before using, please be aware that the semantics of autorelease pools do not
-// match the semantics of a C++ class. In particular, recycling or destructing a
-// pool lower on the stack destroys all pools higher on the stack, which does
-// not mesh well with the existence of C++ objects for each pool.
-//
-// TODO(https://crbug.com/1424190): Enforce stack-only use via the
-// STACK_ALLOCATED annotation.
-//
-// Use this class only in C++ code; use @autoreleasepool in Obj-C(++) code.
-
-class BASE_EXPORT ScopedNSAutoreleasePool {
- public:
-  ScopedNSAutoreleasePool();
-
-  ScopedNSAutoreleasePool(const ScopedNSAutoreleasePool&) = delete;
-  ScopedNSAutoreleasePool& operator=(const ScopedNSAutoreleasePool&) = delete;
-  ScopedNSAutoreleasePool(ScopedNSAutoreleasePool&&) = delete;
-  ScopedNSAutoreleasePool& operator=(ScopedNSAutoreleasePool&&) = delete;
-
-  ~ScopedNSAutoreleasePool();
-
-  // Clear out the pool in case its position on the stack causes it to be alive
-  // for long periods of time (such as the entire length of the app). Only use
-  // then when you're certain the items currently in the pool are no longer
-  // needed.
-  void Recycle();
-
- private:
-  // This field is not a raw_ptr<> because it is a pointer to an Objective-C
-  // object.
-  RAW_PTR_EXCLUSION void* autorelease_pool_ GUARDED_BY_CONTEXT(thread_checker_);
-
-  THREAD_CHECKER(thread_checker_);
-};
-
-}  // namespace base::mac
-
-#endif  // BASE_MAC_SCOPED_NSAUTORELEASE_POOL_H_
diff --git a/base/mac/scoped_nsobject.h b/base/mac/scoped_nsobject.h
deleted file mode 100644
index 4acf210..0000000
--- a/base/mac/scoped_nsobject.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_SCOPED_NSOBJECT_H_
-#define BASE_MAC_SCOPED_NSOBJECT_H_
-
-// Include NSObject.h directly because Foundation.h pulls in many dependencies.
-// (Approx 100k lines of code versus 1.5k for NSObject.h). scoped_nsobject gets
-// singled out because it is most typically included from other header files.
-#import <Foundation/NSObject.h>
-
-#include <type_traits>
-
-#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/mac/scoped_typeref.h"
-
-#if defined(__has_feature) && __has_feature(objc_arc)
-#error "Do not use scoped_nsobject in ARC code; use __strong instead."
-#endif
-
-@class NSAutoreleasePool;
-
-namespace base {
-
-// scoped_nsobject<> is patterned after std::unique_ptr<>, but maintains
-// ownership of an NSObject subclass object.  Style deviations here are solely
-// for compatibility with std::unique_ptr<>'s interface, with which everyone is
-// already familiar.
-//
-// scoped_nsobject<> takes ownership of an object (in the constructor or in
-// reset()) by taking over the caller's existing ownership claim.  The caller
-// must own the object it gives to scoped_nsobject<>, and relinquishes an
-// ownership claim to that object.  scoped_nsobject<> does not call -retain,
-// callers have to call this manually if appropriate.
-//
-// scoped_nsprotocol<> has the same behavior as scoped_nsobject, but can be used
-// with protocols.
-//
-// scoped_nsobject<> is not to be used for NSAutoreleasePools. For C++ code use
-// NSAutoreleasePool; for Objective-C(++) code use @autoreleasepool instead. We
-// check for bad uses of scoped_nsobject and NSAutoreleasePool at compile time
-// with a template specialization (see below).
-
-namespace internal {
-
-template <typename NST>
-struct ScopedNSProtocolTraits {
-  static NST InvalidValue() { return nil; }
-  static NST Retain(NST nst) { return [nst retain]; }
-  static void Release(NST nst) { [nst release]; }
-};
-
-}  // namespace internal
-
-template <typename NST>
-class scoped_nsprotocol
-    : public ScopedTypeRef<NST, internal::ScopedNSProtocolTraits<NST>> {
- public:
-  using ScopedTypeRef<NST,
-                      internal::ScopedNSProtocolTraits<NST>>::ScopedTypeRef;
-
-  // Shift reference to the autorelease pool to be released later.
-  NST autorelease() { return [this->release() autorelease]; }
-};
-
-// Free functions
-template <class C>
-void swap(scoped_nsprotocol<C>& p1, scoped_nsprotocol<C>& p2) {
-  p1.swap(p2);
-}
-
-template <class C>
-bool operator==(C p1, const scoped_nsprotocol<C>& p2) {
-  return p1 == p2.get();
-}
-
-template <class C>
-bool operator!=(C p1, const scoped_nsprotocol<C>& p2) {
-  return p1 != p2.get();
-}
-
-template <typename NST>
-class scoped_nsobject : public scoped_nsprotocol<NST*> {
- public:
-  using scoped_nsprotocol<NST*>::scoped_nsprotocol;
-
-  static_assert(std::is_same<NST, NSAutoreleasePool>::value == false,
-                "Use @autoreleasepool instead");
-};
-
-// Specialization to make scoped_nsobject<id> work.
-template<>
-class scoped_nsobject<id> : public scoped_nsprotocol<id> {
- public:
-  using scoped_nsprotocol<id>::scoped_nsprotocol;
-};
-
-}  // namespace base
-
-#endif  // BASE_MAC_SCOPED_NSOBJECT_H_
diff --git a/base/mac/scoped_nsobject_unittest.mm b/base/mac/scoped_nsobject_unittest.mm
deleted file mode 100644
index c6b1bde..0000000
--- a/base/mac/scoped_nsobject_unittest.mm
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <vector>
-
-#include "base/mac/scoped_nsobject.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if defined(__has_feature) && __has_feature(objc_arc)
-#error "This file must not be compiled with ARC."
-#endif
-
-namespace {
-
-TEST(ScopedNSObjectTest, ScopedNSObject) {
-  base::scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
-  ASSERT_TRUE(p1.get());
-  ASSERT_EQ(1u, [p1 retainCount]);
-  base::scoped_nsobject<NSObject> p2(p1);
-  ASSERT_EQ(p1.get(), p2.get());
-  ASSERT_EQ(2u, [p1 retainCount]);
-  p2.reset();
-  ASSERT_EQ(nil, p2.get());
-  ASSERT_EQ(1u, [p1 retainCount]);
-  {
-    base::scoped_nsobject<NSObject> p3 = p1;
-    ASSERT_EQ(p1.get(), p3.get());
-    ASSERT_EQ(2u, [p1 retainCount]);
-    @autoreleasepool {
-      p3 = p1;
-    }
-    ASSERT_EQ(p1.get(), p3.get());
-    ASSERT_EQ(2u, [p1 retainCount]);
-  }
-  ASSERT_EQ(1u, [p1 retainCount]);
-  base::scoped_nsobject<NSObject> p4([p1.get() retain]);
-  ASSERT_EQ(2u, [p1 retainCount]);
-  ASSERT_TRUE(p1 == p1.get());
-  ASSERT_TRUE(p1 == p1);
-  ASSERT_FALSE(p1 != p1);
-  ASSERT_FALSE(p1 != p1.get());
-  base::scoped_nsobject<NSObject> p5([[NSObject alloc] init]);
-  ASSERT_TRUE(p1 != p5);
-  ASSERT_TRUE(p1 != p5.get());
-  ASSERT_FALSE(p1 == p5);
-  ASSERT_FALSE(p1 == p5.get());
-
-  base::scoped_nsobject<NSObject> p6 = p1;
-  ASSERT_EQ(3u, [p6 retainCount]);
-  @autoreleasepool {
-    p6.autorelease();
-    ASSERT_EQ(nil, p6.get());
-    ASSERT_EQ(3u, [p1 retainCount]);
-  }
-  ASSERT_EQ(2u, [p1 retainCount]);
-
-  base::scoped_nsobject<NSObject> p7([NSObject new]);
-  base::scoped_nsobject<NSObject> p8(std::move(p7));
-  ASSERT_TRUE(p8);
-  ASSERT_EQ(1u, [p8 retainCount]);
-  ASSERT_FALSE(p7.get());
-}
-
-// Instantiating scoped_nsobject<> with T=NSAutoreleasePool should trip a
-// static_assert.
-#if 0
-TEST(ScopedNSObjectTest, FailToCreateScopedNSObjectAutoreleasePool) {
-  base::scoped_nsobject<NSAutoreleasePool> pool;
-}
-#endif
-
-TEST(ScopedNSObjectTest, ScopedNSObjectInContainer) {
-  base::scoped_nsobject<id> p([[NSObject alloc] init]);
-  ASSERT_TRUE(p.get());
-  ASSERT_EQ(1u, [p retainCount]);
-  {
-    std::vector<base::scoped_nsobject<id>> objects;
-    objects.push_back(p);
-    ASSERT_EQ(2u, [p retainCount]);
-    ASSERT_EQ(p.get(), objects[0].get());
-    objects.push_back(base::scoped_nsobject<id>([[NSObject alloc] init]));
-    ASSERT_TRUE(objects[1].get());
-    ASSERT_EQ(1u, [objects[1] retainCount]);
-  }
-  ASSERT_EQ(1u, [p retainCount]);
-}
-
-TEST(ScopedNSObjectTest, ScopedNSObjectFreeFunctions) {
-  base::scoped_nsobject<id> p1([[NSObject alloc] init]);
-  id o1 = p1.get();
-  ASSERT_TRUE(o1 == p1);
-  ASSERT_FALSE(o1 != p1);
-  base::scoped_nsobject<id> p2([[NSObject alloc] init]);
-  ASSERT_TRUE(o1 != p2);
-  ASSERT_FALSE(o1 == p2);
-  id o2 = p2.get();
-  swap(p1, p2);
-  ASSERT_EQ(o2, p1.get());
-  ASSERT_EQ(o1, p2.get());
-}
-
-TEST(ScopedNSObjectTest, ResetWithAnotherScopedNSObject) {
-  base::scoped_nsobject<id> p1([[NSObject alloc] init]);
-  id o1 = p1.get();
-
-  id o2 = nil;
-  {
-    base::scoped_nsobject<id> p2([[NSObject alloc] init]);
-    o2 = p2.get();
-    p1.reset(p2);
-    EXPECT_EQ(2u, [p1 retainCount]);
-  }
-
-  EXPECT_NE(o1, p1.get());
-  EXPECT_EQ(o2, p1.get());
-  EXPECT_NE(p1.get(), nil);
-
-  EXPECT_EQ(1u, [p1 retainCount]);
-}
-
-}  // namespace
diff --git a/base/mac/scoped_objc_class_swizzler.h b/base/mac/scoped_objc_class_swizzler.h
deleted file mode 100644
index 5676741..0000000
--- a/base/mac/scoped_objc_class_swizzler.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_SCOPED_OBJC_CLASS_SWIZZLER_H_
-#define BASE_MAC_SCOPED_OBJC_CLASS_SWIZZLER_H_
-
-#import <objc/runtime.h>
-
-#include "base/base_export.h"
-
-namespace base::mac {
-
-// Within a given scope, swaps method implementations of a class interface, or
-// between two class interfaces. The argument and return types must match.
-class BASE_EXPORT ScopedObjCClassSwizzler {
- public:
-  // Given two classes that each respond to |selector|, swap the implementations
-  // of those methods.
-  ScopedObjCClassSwizzler(Class target, Class source, SEL selector);
-
-  // Given two selectors on the same class interface, |target| (e.g. via
-  // inheritance or categories), swap the implementations of methods |original|
-  // and |alternate|.
-  ScopedObjCClassSwizzler(Class target, SEL original, SEL alternate);
-
-  ScopedObjCClassSwizzler(const ScopedObjCClassSwizzler&) = delete;
-  ScopedObjCClassSwizzler& operator=(const ScopedObjCClassSwizzler&) = delete;
-
-  ~ScopedObjCClassSwizzler();
-
-  // Return a callable function pointer for the replaced method. To call this
-  // from the replacing function, the first two arguments should be |self| and
-  // |_cmd|. These are followed by the (variadic) method arguments.
-  IMP GetOriginalImplementation() const;
-
-  // Invoke the original function directly, optionally with some arguments.
-  // Prefer this to hanging onto pointers to the original implementation
-  // function or to casting the result of GetOriginalImplementation() yourself.
-  template <typename Ret, typename... Args>
-  Ret InvokeOriginal(id receiver, SEL selector, Args... args) const {
-    auto func = reinterpret_cast<Ret (*)(id, SEL, Args...)>(
-        GetOriginalImplementation());
-    return func(receiver, selector, args...);
-  }
-
- private:
-  // Delegated constructor.
-  void Init(Class target, Class source, SEL original, SEL alternate);
-
-  Method old_selector_impl_;
-  Method new_selector_impl_;
-};
-
-}  // namespace base::mac
-
-#endif  // BASE_MAC_SCOPED_OBJC_CLASS_SWIZZLER_H_
diff --git a/base/mac/scoped_objc_class_swizzler.mm b/base/mac/scoped_objc_class_swizzler.mm
deleted file mode 100644
index 0a085f5..0000000
--- a/base/mac/scoped_objc_class_swizzler.mm
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#import "base/mac/scoped_objc_class_swizzler.h"
-
-#include <string.h>
-
-#include "base/check_op.h"
-
-namespace base::mac {
-
-ScopedObjCClassSwizzler::ScopedObjCClassSwizzler(Class target,
-                                                 Class source,
-                                                 SEL selector)
-    : old_selector_impl_(nullptr), new_selector_impl_(nullptr) {
-  Init(target, source, selector, selector);
-}
-
-ScopedObjCClassSwizzler::ScopedObjCClassSwizzler(Class target,
-                                                 SEL original,
-                                                 SEL alternate)
-    : old_selector_impl_(nullptr), new_selector_impl_(nullptr) {
-  Init(target, target, original, alternate);
-}
-
-ScopedObjCClassSwizzler::~ScopedObjCClassSwizzler() {
-  if (old_selector_impl_ && new_selector_impl_)
-    method_exchangeImplementations(old_selector_impl_, new_selector_impl_);
-}
-
-IMP ScopedObjCClassSwizzler::GetOriginalImplementation() const {
-  // Note that while the swizzle is in effect the "new" method is actually
-  // pointing to the original implementation, since they have been swapped.
-  return method_getImplementation(new_selector_impl_);
-}
-
-void ScopedObjCClassSwizzler::Init(Class target,
-                                   Class source,
-                                   SEL original,
-                                   SEL alternate) {
-  old_selector_impl_ = class_getInstanceMethod(target, original);
-  new_selector_impl_ = class_getInstanceMethod(source, alternate);
-  if (!old_selector_impl_ && !new_selector_impl_) {
-    // Try class methods.
-    old_selector_impl_ = class_getClassMethod(target, original);
-    new_selector_impl_ = class_getClassMethod(source, alternate);
-  }
-
-  DCHECK(old_selector_impl_);
-  DCHECK(new_selector_impl_);
-  if (!old_selector_impl_ || !new_selector_impl_)
-    return;
-
-  // The argument and return types must match exactly.
-  const char* old_types = method_getTypeEncoding(old_selector_impl_);
-  const char* new_types = method_getTypeEncoding(new_selector_impl_);
-  DCHECK(old_types);
-  DCHECK(new_types);
-  DCHECK_EQ(0, strcmp(old_types, new_types));
-  if (!old_types || !new_types || strcmp(old_types, new_types)) {
-    old_selector_impl_ = new_selector_impl_ = nullptr;
-    return;
-  }
-
-  method_exchangeImplementations(old_selector_impl_, new_selector_impl_);
-}
-
-}  // namespace base::mac
diff --git a/base/mac/scoped_objc_class_swizzler_unittest.mm b/base/mac/scoped_objc_class_swizzler_unittest.mm
deleted file mode 100644
index 7138e71..0000000
--- a/base/mac/scoped_objc_class_swizzler_unittest.mm
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#import "base/mac/scoped_objc_class_swizzler.h"
-
-#import <Foundation/Foundation.h>
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-@interface ObjCClassSwizzlerTestOne : NSObject
-+ (NSInteger)function;
-- (NSInteger)method;
-- (NSInteger)modifier;
-@end
-
-@interface ObjCClassSwizzlerTestTwo : NSObject
-+ (NSInteger)function;
-- (NSInteger)method;
-- (NSInteger)modifier;
-@end
-
-@implementation ObjCClassSwizzlerTestOne : NSObject
-
-+ (NSInteger)function {
-  return 10;
-}
-
-- (NSInteger)method {
-  // Multiply by a modifier to ensure |self| in a swizzled implementation
-  // refers to the original object.
-  return 1 * [self modifier];
-}
-
-- (NSInteger)modifier {
-  return 3;
-}
-
-@end
-
-@implementation ObjCClassSwizzlerTestTwo : NSObject
-
-+ (NSInteger)function {
-  return 20;
-}
-
-- (NSInteger)method {
-  return 2 * [self modifier];
-}
-
-- (NSInteger)modifier {
-  return 7;
-}
-
-@end
-
-@interface ObjCClassSwizzlerTestOne (AlternateCategory)
-- (NSInteger)alternate;
-@end
-
-@implementation ObjCClassSwizzlerTestOne (AlternateCategory)
-- (NSInteger)alternate {
-  return 3 * [self modifier];
-}
-@end
-
-@interface ObjCClassSwizzlerTestOneChild : ObjCClassSwizzlerTestOne
-- (NSInteger)childAlternate;
-@end
-
-@implementation ObjCClassSwizzlerTestOneChild
-- (NSInteger)childAlternate {
-  return 5 * [self modifier];
-}
-@end
-
-namespace base::mac {
-
-TEST(ObjCClassSwizzlerTest, SwizzleInstanceMethods) {
-  ObjCClassSwizzlerTestOne* object_one =
-      [[ObjCClassSwizzlerTestOne alloc] init];
-  ObjCClassSwizzlerTestTwo* object_two =
-      [[ObjCClassSwizzlerTestTwo alloc] init];
-  EXPECT_EQ(3, [object_one method]);
-  EXPECT_EQ(14, [object_two method]);
-
-  {
-    base::mac::ScopedObjCClassSwizzler swizzler(
-        [ObjCClassSwizzlerTestOne class],
-        [ObjCClassSwizzlerTestTwo class],
-        @selector(method));
-    EXPECT_EQ(6, [object_one method]);
-    EXPECT_EQ(7, [object_two method]);
-
-    EXPECT_EQ(3, swizzler.InvokeOriginal<int>(object_one, @selector(method)));
-  }
-
-  EXPECT_EQ(3, [object_one method]);
-  EXPECT_EQ(14, [object_two method]);
-}
-
-TEST(ObjCClassSwizzlerTest, SwizzleClassMethods) {
-  EXPECT_EQ(10, [ObjCClassSwizzlerTestOne function]);
-  EXPECT_EQ(20, [ObjCClassSwizzlerTestTwo function]);
-
-  {
-    base::mac::ScopedObjCClassSwizzler swizzler(
-        [ObjCClassSwizzlerTestOne class],
-        [ObjCClassSwizzlerTestTwo class],
-        @selector(function));
-    EXPECT_EQ(20, [ObjCClassSwizzlerTestOne function]);
-    EXPECT_EQ(10, [ObjCClassSwizzlerTestTwo function]);
-
-    EXPECT_EQ(10, swizzler.InvokeOriginal<int>([ObjCClassSwizzlerTestOne class],
-                                               @selector(function)));
-  }
-
-  EXPECT_EQ(10, [ObjCClassSwizzlerTestOne function]);
-  EXPECT_EQ(20, [ObjCClassSwizzlerTestTwo function]);
-}
-
-TEST(ObjCClassSwizzlerTest, SwizzleViaCategory) {
-  ObjCClassSwizzlerTestOne* object_one =
-      [[ObjCClassSwizzlerTestOne alloc] init];
-  EXPECT_EQ(3, [object_one method]);
-
-  {
-    base::mac::ScopedObjCClassSwizzler swizzler(
-        [ObjCClassSwizzlerTestOne class],
-        @selector(method),
-        @selector(alternate));
-    EXPECT_EQ(9, [object_one method]);
-
-    EXPECT_EQ(3, swizzler.InvokeOriginal<int>(object_one, @selector(method)));
-  }
-
-  EXPECT_EQ(3, [object_one method]);
-}
-
-TEST(ObjCClassSwizzlerTest, SwizzleViaInheritance) {
-  ObjCClassSwizzlerTestOneChild* child =
-      [[ObjCClassSwizzlerTestOneChild alloc] init];
-  EXPECT_EQ(3, [child method]);
-
-  {
-    base::mac::ScopedObjCClassSwizzler swizzler(
-        [ObjCClassSwizzlerTestOneChild class],
-        @selector(method),
-        @selector(childAlternate));
-    EXPECT_EQ(15, [child method]);
-
-    EXPECT_EQ(3, swizzler.InvokeOriginal<int>(child, @selector(method)));
-  }
-
-  EXPECT_EQ(3, [child method]);
-}
-
-}  // namespace base::mac
diff --git a/base/mac/scoped_sending_event.h b/base/mac/scoped_sending_event.h
index d45cbd0..34e87d7 100644
--- a/base/mac/scoped_sending_event.h
+++ b/base/mac/scoped_sending_event.h
@@ -6,7 +6,7 @@
 #define BASE_MAC_SCOPED_SENDING_EVENT_H_
 
 #include "base/base_export.h"
-#include "base/message_loop/message_pump_mac.h"
+#include "base/message_loop/message_pump_apple.h"
 
 // Nested event loops can pump IPC messages, including
 // script-initiated tab closes, which could release objects that the
diff --git a/base/mac/scoped_typeref.h b/base/mac/scoped_typeref.h
deleted file mode 100644
index ac72cfd..0000000
--- a/base/mac/scoped_typeref.h
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_SCOPED_TYPEREF_H_
-#define BASE_MAC_SCOPED_TYPEREF_H_
-
-#include "base/check.h"
-#include "base/memory/scoped_policy.h"
-
-namespace base {
-
-// ScopedTypeRef<> is patterned after std::unique_ptr<>, but maintains ownership
-// of a reference to any type that is maintained by Retain and Release methods.
-//
-// The Traits structure must provide the Retain and Release methods for type T.
-// A default ScopedTypeRefTraits is used but not defined, and should be defined
-// for each type to use this interface. For example, an appropriate definition
-// of ScopedTypeRefTraits for CGLContextObj would be:
-//
-//   template<>
-//   struct ScopedTypeRefTraits<CGLContextObj> {
-//     static CGLContextObj InvalidValue() { return nullptr; }
-//     static CGLContextObj Retain(CGLContextObj object) {
-//       CGLContextRetain(object);
-//       return object;
-//     }
-//     static void Release(CGLContextObj object) { CGLContextRelease(object); }
-//   };
-//
-// For the many types that have pass-by-pointer create functions, the function
-// InitializeInto() is provided to allow direct initialization and assumption
-// of ownership of the object. For example, continuing to use the above
-// CGLContextObj specialization:
-//
-//   base::ScopedTypeRef<CGLContextObj> context;
-//   CGLCreateContext(pixel_format, share_group, context.InitializeInto());
-//
-// For initialization with an existing object, the caller may specify whether
-// the ScopedTypeRef<> being initialized is assuming the caller's existing
-// ownership of the object (and should not call Retain in initialization) or if
-// it should not assume this ownership and must create its own (by calling
-// Retain in initialization). This behavior is based on the |policy| parameter,
-// with |ASSUME| for the former and |RETAIN| for the latter. The default policy
-// is to |ASSUME|.
-
-template<typename T>
-struct ScopedTypeRefTraits;
-
-template<typename T, typename Traits = ScopedTypeRefTraits<T>>
-class ScopedTypeRef {
- public:
-  using element_type = T;
-
-  explicit constexpr ScopedTypeRef(
-      element_type object = Traits::InvalidValue(),
-      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
-      : object_(object) {
-    if (object_ && policy == base::scoped_policy::RETAIN)
-      object_ = Traits::Retain(object_);
-  }
-
-  ScopedTypeRef(const ScopedTypeRef<T, Traits>& that)
-      : object_(that.object_) {
-    if (object_)
-      object_ = Traits::Retain(object_);
-  }
-
-  // This allows passing an object to a function that takes its superclass.
-  template <typename R, typename RTraits>
-  explicit ScopedTypeRef(const ScopedTypeRef<R, RTraits>& that_as_subclass)
-      : object_(that_as_subclass.get()) {
-    if (object_)
-      object_ = Traits::Retain(object_);
-  }
-
-  ScopedTypeRef(ScopedTypeRef<T, Traits>&& that) : object_(that.object_) {
-    that.object_ = Traits::InvalidValue();
-  }
-
-  ~ScopedTypeRef() {
-    if (object_)
-      Traits::Release(object_);
-  }
-
-  ScopedTypeRef& operator=(const ScopedTypeRef<T, Traits>& that) {
-    reset(that.get(), base::scoped_policy::RETAIN);
-    return *this;
-  }
-
-  // This is to be used only to take ownership of objects that are created
-  // by pass-by-pointer create functions. To enforce this, require that the
-  // object be reset to NULL before this may be used.
-  [[nodiscard]] element_type* InitializeInto() {
-    DCHECK(!object_);
-    return &object_;
-  }
-
-  void reset(const ScopedTypeRef<T, Traits>& that) {
-    reset(that.get(), base::scoped_policy::RETAIN);
-  }
-
-  void reset(element_type object = Traits::InvalidValue(),
-             base::scoped_policy::OwnershipPolicy policy =
-                 base::scoped_policy::ASSUME) {
-    if (object && policy == base::scoped_policy::RETAIN)
-      object = Traits::Retain(object);
-    if (object_)
-      Traits::Release(object_);
-    object_ = object;
-  }
-
-  bool operator==(const ScopedTypeRef& that) const {
-    return object_ == that.object_;
-  }
-
-  bool operator!=(const ScopedTypeRef& that) const {
-    return object_ != that.object_;
-  }
-
-  operator element_type() const { return object_; }
-
-  element_type get() const { return object_; }
-
-  void swap(ScopedTypeRef& that) {
-    element_type temp = that.object_;
-    that.object_ = object_;
-    object_ = temp;
-  }
-
-  // ScopedTypeRef<>::release() is like std::unique_ptr<>::release.  It is NOT
-  // a wrapper for Release().  To force a ScopedTypeRef<> object to call
-  // Release(), use ScopedTypeRef<>::reset().
-  [[nodiscard]] element_type release() {
-    element_type temp = object_;
-    object_ = Traits::InvalidValue();
-    return temp;
-  }
-
- private:
-  element_type object_;
-};
-
-}  // namespace base
-
-#endif  // BASE_MAC_SCOPED_TYPEREF_H_
diff --git a/base/mac/test/launch_application_test_helper_main.m b/base/mac/test/launch_application_test_helper_main.m
new file mode 100644
index 0000000..a85020f
--- /dev/null
+++ b/base/mac/test/launch_application_test_helper_main.m
@@ -0,0 +1,120 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a helper application for launch_application_unittest.mm. This
+// application records several events by writing them to a named pipe;
+// the unit tests then use this information to verify that this helper was
+// launched in the correct manner.
+// The named pipe this writes to is equal to the name of the app bundle,
+// with .app replaced by .fifo.
+
+#import <Cocoa/Cocoa.h>
+
+@interface AppDelegate : NSObject <NSApplicationDelegate>
+@end
+
+@implementation AppDelegate {
+  NSArray* _command_line;
+  NSURL* _fifo_url;
+  NSRunningApplication* _running_app;
+}
+
+- (instancetype)initWithCommandLine:(NSArray*)command_line {
+  self = [super init];
+  if (self) {
+    _command_line = command_line;
+    NSURL* bundle_url = NSBundle.mainBundle.bundleURL;
+    _fifo_url = [bundle_url.URLByDeletingLastPathComponent
+        URLByAppendingPathComponent:
+            [bundle_url.lastPathComponent
+                stringByReplacingOccurrencesOfString:@".app"
+                                          withString:@".fifo"]];
+    _running_app = NSRunningApplication.currentApplication;
+    [_running_app addObserver:self
+                   forKeyPath:@"activationPolicy"
+                      options:NSKeyValueObservingOptionNew
+                      context:nil];
+  }
+  return self;
+}
+
+- (void)dealloc {
+  [_running_app removeObserver:self forKeyPath:@"activationPolicy" context:nil];
+}
+
+- (void)observeValueForKeyPath:(NSString*)keyPath
+                      ofObject:(id)object
+                        change:(NSDictionary*)change
+                       context:(void*)context {
+  [self
+      addLaunchEvent:@"activationPolicyChanged"
+            withData:@{
+              @"activationPolicy" : change[@"new"],
+              @"processIdentifier" :
+                  @(NSRunningApplication.currentApplication.processIdentifier),
+            }];
+}
+
+- (void)applicationDidFinishLaunching:(NSNotification*)notification {
+  [self
+      addLaunchEvent:@"applicationDidFinishLaunching"
+            withData:@{
+              @"activationPolicy" : @(NSApp.activationPolicy),
+              @"commandLine" : _command_line,
+              @"processIdentifier" :
+                  @(NSRunningApplication.currentApplication.processIdentifier),
+            }];
+}
+
+- (void)application:(NSApplication*)app openURLs:(NSArray<NSURL*>*)urls {
+  [app replyToOpenOrPrint:NSApplicationDelegateReplySuccess];
+
+  NSMutableArray* url_specs =
+      [[NSMutableArray alloc] initWithCapacity:urls.count];
+  for (NSURL* url in urls) {
+    [url_specs addObject:url.absoluteString];
+  }
+  [self
+      addLaunchEvent:@"openURLs"
+            withData:@{
+              @"activationPolicy" : @(NSApp.activationPolicy),
+              @"processIdentifier" :
+                  @(NSRunningApplication.currentApplication.processIdentifier),
+              @"urls" : url_specs,
+            }];
+}
+
+- (void)addLaunchEvent:(NSString*)event {
+  [self addLaunchEvent:event withData:nil];
+}
+
+- (void)addLaunchEvent:(NSString*)event withData:(NSDictionary*)data {
+  NSLog(@"Logging %@ with data %@", event, data);
+  NSDictionary* event_dict = @{
+    @"name" : event,
+    @"data" : data,
+  };
+  // It is important to write this dictionary to the named pipe non-atomically,
+  // as otherwise the write would replace the named pipe with a regular file
+  // rather than writing to the pipe.
+  [event_dict writeToURL:_fifo_url atomically:NO];
+}
+
+@end
+
+__attribute__((visibility("default"))) int main(int argc, char** argv) {
+  [NSApplication sharedApplication];
+
+  NSMutableArray* command_line = [[NSMutableArray alloc] initWithCapacity:argc];
+  for (int i = 0; i < argc; ++i) {
+    [command_line addObject:[NSString stringWithUTF8String:argv[i]]];
+  }
+
+  AppDelegate* delegate =
+      [[AppDelegate alloc] initWithCommandLine:command_line];
+  NSApp.delegate = delegate;
+
+  [NSApp run];
+  return 0;
+}
diff --git a/base/mac/wrap_cg_display.h b/base/mac/wrap_cg_display.h
deleted file mode 100644
index a579ef1..0000000
--- a/base/mac/wrap_cg_display.h
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2023 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_WRAP_CG_DISPLAY_H_
-#define BASE_MAC_WRAP_CG_DISPLAY_H_
-
-// All these symbols have incorrect availability annotations in the 13.3 SDK.
-// These have the correct annotation. See https://crbug.com/1431897.
-// TODO(thakis): Remove this once FB12109479 is fixed and we updated to an SDK
-// with the fix.
-
-#include <CoreGraphics/CoreGraphics.h>
-
-inline CGDisplayStreamRef __nullable wrapCGDisplayStreamCreate(
-    CGDirectDisplayID display,
-    size_t outputWidth,
-    size_t outputHeight,
-    int32_t pixelFormat,
-    CFDictionaryRef __nullable properties,
-    CGDisplayStreamFrameAvailableHandler __nullable handler)
-    CG_AVAILABLE_BUT_DEPRECATED(
-        10.8,
-        14.0,
-        "Please use ScreenCaptureKit API's "
-        "initWithFilter:configuration:delegate: instead") {
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-  return CGDisplayStreamCreate(display, outputWidth, outputHeight, pixelFormat,
-                               properties, handler);
-#pragma clang diagnostic pop
-}
-
-inline CFRunLoopSourceRef __nullable wrapCGDisplayStreamGetRunLoopSource(
-    CGDisplayStreamRef cg_nullable displayStream)
-    CG_AVAILABLE_BUT_DEPRECATED(
-        10.8,
-        14.0,
-        "There is no direct replacement for this function. Please use "
-        "ScreenCaptureKit API's SCStream to replace CGDisplayStream") {
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-  return CGDisplayStreamGetRunLoopSource(displayStream);
-#pragma clang diagnostic pop
-}
-
-inline CGError wrapCGDisplayStreamStart(
-    CGDisplayStreamRef cg_nullable displayStream)
-    CG_AVAILABLE_BUT_DEPRECATED(
-        10.8,
-        14.0,
-        "Please use ScreenCaptureKit API's "
-        "startCaptureWithCompletionHandler: to start a stream instead") {
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-  return CGDisplayStreamStart(displayStream);
-#pragma clang diagnostic pop
-}
-
-inline CGError wrapCGDisplayStreamStop(
-    CGDisplayStreamRef cg_nullable displayStream)
-    CG_AVAILABLE_BUT_DEPRECATED(
-        10.8,
-        14.0,
-        "Please use ScreenCaptureKit API's "
-        "stopCaptureWithCompletionHandler: to stop a stream instead") {
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-  return CGDisplayStreamStop(displayStream);
-#pragma clang diagnostic pop
-}
-
-inline _Null_unspecified CFStringRef wrapkCGDisplayStreamColorSpace()
-    CG_AVAILABLE_BUT_DEPRECATED(
-        10.8,
-        14.0,
-        "Please use ScreenCaptureKit API's SCStreamConfiguration "
-        "colorSpaceName property instead") {
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-  return kCGDisplayStreamColorSpace;
-#pragma clang diagnostic pop
-}
-
-inline _Null_unspecified CFStringRef wrapkCGDisplayStreamDestinationRect()
-    CG_AVAILABLE_BUT_DEPRECATED(
-        10.8,
-        14.0,
-        "Please use ScreenCaptureKit API's SCStreamConfiguration "
-        "destinationRect property instead") {
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-  return kCGDisplayStreamDestinationRect;
-#pragma clang diagnostic pop
-}
-
-inline _Null_unspecified CFStringRef wrapkCGDisplayStreamMinimumFrameTime()
-    CG_AVAILABLE_BUT_DEPRECATED(
-        10.8,
-        14.0,
-        "Please use ScreenCaptureKit API's SCStreamConfiguration "
-        "minimumFrameInterval property instead") {
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-  return kCGDisplayStreamMinimumFrameTime;
-#pragma clang diagnostic pop
-}
-
-inline _Null_unspecified CFStringRef wrapkCGDisplayStreamPreserveAspectRatio()
-    CG_AVAILABLE_BUT_DEPRECATED(
-        10.8,
-        14.0,
-        "Please use ScreenCaptureKit API's SCStreamConfiguration "
-        "preserveAspectRatio property instead") {
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-  return kCGDisplayStreamPreserveAspectRatio;
-#pragma clang diagnostic pop
-}
-
-inline _Null_unspecified CFStringRef wrapkCGDisplayStreamShowCursor()
-    CG_AVAILABLE_BUT_DEPRECATED(
-        10.8,
-        14.0,
-        "Please use ScreenCaptureKit API's SCStreamConfiguration showsCursor "
-        "property instead") {
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-  return kCGDisplayStreamShowCursor;
-#pragma clang diagnostic pop
-}
-
-inline const CGRect* __nullable
-wrapCGDisplayStreamUpdateGetRects(CGDisplayStreamUpdateRef __nullable updateRef,
-                                  CGDisplayStreamUpdateRectType rectType,
-                                  size_t* _Null_unspecified rectCount)
-    CG_AVAILABLE_BUT_DEPRECATED(
-        10.8,
-        14.0,
-        "Please use ScreenCaptureKit API's SCStreamFrameInfo with "
-        "SCStreamFrameInfoContentRect instead") {
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-  return CGDisplayStreamUpdateGetRects(updateRef, rectType, rectCount);
-#pragma clang diagnostic pop
-}
-
-#endif  // BASE_MAC_WRAP_CG_DISPLAY_H_
diff --git a/base/memory/aligned_memory.h b/base/memory/aligned_memory.h
index cc4b3d9..f2fcabb 100644
--- a/base/memory/aligned_memory.h
+++ b/base/memory/aligned_memory.h
@@ -24,6 +24,8 @@
 // A runtime sized aligned allocation can be created:
 //
 //   float* my_array = static_cast<float*>(AlignedAlloc(size, alignment));
+//   CHECK(reinterpret_cast<uintptr_t>(my_array) % alignment == 0);
+//   memset(my_array, 0, size);  // fills entire object.
 //
 //   // ... later, to release the memory:
 //   AlignedFree(my_array);
diff --git a/base/memory/aligned_memory_unittest.cc b/base/memory/aligned_memory_unittest.cc
index 912de77..16ee0f0 100644
--- a/base/memory/aligned_memory_unittest.cc
+++ b/base/memory/aligned_memory_unittest.cc
@@ -4,6 +4,8 @@
 
 #include "base/memory/aligned_memory.h"
 
+#include <string.h>
+
 #include <memory>
 
 #include "build/build_config.h"
@@ -13,23 +15,27 @@
 
 TEST(AlignedMemoryTest, DynamicAllocation) {
   void* p = AlignedAlloc(8, 8);
-  EXPECT_TRUE(p);
+  ASSERT_TRUE(p);
   EXPECT_TRUE(IsAligned(p, 8));
+  memset(p, 0, 8);  // Fill to check allocated size under ASAN.
   AlignedFree(p);
 
   p = AlignedAlloc(8, 16);
-  EXPECT_TRUE(p);
+  ASSERT_TRUE(p);
   EXPECT_TRUE(IsAligned(p, 16));
+  memset(p, 0, 8);  // Fill to check allocated size under ASAN.
   AlignedFree(p);
 
   p = AlignedAlloc(8, 256);
-  EXPECT_TRUE(p);
+  ASSERT_TRUE(p);
   EXPECT_TRUE(IsAligned(p, 256));
+  memset(p, 0, 8);  // Fill to check allocated size under ASAN.
   AlignedFree(p);
 
   p = AlignedAlloc(8, 4096);
-  EXPECT_TRUE(p);
+  ASSERT_TRUE(p);
   EXPECT_TRUE(IsAligned(p, 4096));
+  memset(p, 0, 8);  // Fill to check allocated size under ASAN.
   AlignedFree(p);
 }
 
diff --git a/base/memory/discardable_shared_memory.cc b/base/memory/discardable_shared_memory.cc
index 23c95ba..ecba260 100644
--- a/base/memory/discardable_shared_memory.cc
+++ b/base/memory/discardable_shared_memory.cc
@@ -8,7 +8,7 @@
 
 #include <algorithm>
 
-#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
 #include "base/atomicops.h"
 #include "base/bits.h"
 #include "base/feature_list.h"
diff --git a/base/memory/discardable_shared_memory_unittest.cc b/base/memory/discardable_shared_memory_unittest.cc
index 6ed2e1b..9c7d495 100644
--- a/base/memory/discardable_shared_memory_unittest.cc
+++ b/base/memory/discardable_shared_memory_unittest.cc
@@ -70,14 +70,14 @@
   ASSERT_TRUE(rv);
 
   // Memory is initially locked. Unlock it.
-  memory1.SetNow(Time::FromDoubleT(1));
+  memory1.SetNow(Time::FromSecondsSinceUnixEpoch(1));
   memory1.Unlock(0, 0);
   EXPECT_FALSE(memory1.IsMemoryLocked());
 
   // Lock and unlock memory.
   DiscardableSharedMemory::LockResult lock_rv = memory1.Lock(0, 0);
   EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
-  memory1.SetNow(Time::FromDoubleT(2));
+  memory1.SetNow(Time::FromSecondsSinceUnixEpoch(2));
   memory1.Unlock(0, 0);
 
   // Lock again before duplicating and passing ownership to new instance.
@@ -93,7 +93,7 @@
   ASSERT_TRUE(rv);
 
   // Unlock second instance.
-  memory2.SetNow(Time::FromDoubleT(3));
+  memory2.SetNow(Time::FromSecondsSinceUnixEpoch(3));
   memory2.Unlock(0, 0);
 
   // Both memory instances should be unlocked now.
@@ -110,7 +110,7 @@
   EXPECT_TRUE(memory1.IsMemoryLocked());
 
   // Unlock first instance.
-  memory1.SetNow(Time::FromDoubleT(4));
+  memory1.SetNow(Time::FromSecondsSinceUnixEpoch(4));
   memory1.Unlock(0, 0);
 }
 
@@ -129,22 +129,22 @@
   ASSERT_TRUE(rv);
 
   // This should fail as memory is locked.
-  rv = memory1.Purge(Time::FromDoubleT(1));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(1));
   EXPECT_FALSE(rv);
 
-  memory2.SetNow(Time::FromDoubleT(2));
+  memory2.SetNow(Time::FromSecondsSinceUnixEpoch(2));
   memory2.Unlock(0, 0);
 
   ASSERT_TRUE(memory2.IsMemoryResident());
 
   // Memory is unlocked, but our usage timestamp is incorrect.
-  rv = memory1.Purge(Time::FromDoubleT(3));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(3));
   EXPECT_FALSE(rv);
 
   ASSERT_TRUE(memory2.IsMemoryResident());
 
   // Memory is unlocked and our usage timestamp should be correct.
-  rv = memory1.Purge(Time::FromDoubleT(4));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(4));
   EXPECT_TRUE(rv);
 
   // Lock should fail as memory has been purged.
@@ -162,12 +162,12 @@
   ASSERT_TRUE(rv);
 
   // Unlock things so we can Purge().
-  memory.SetNow(Time::FromDoubleT(2));
+  memory.SetNow(Time::FromSecondsSinceUnixEpoch(2));
   memory.Unlock(0, 0);
 
   // It should be safe to Purge() |memory| after Close()ing the handle.
   memory.Close();
-  rv = memory.Purge(Time::FromDoubleT(4));
+  rv = memory.Purge(Time::FromSecondsSinceUnixEpoch(4));
   EXPECT_TRUE(rv);
 }
 
@@ -185,47 +185,47 @@
   rv = memory2.Map(kDataSize);
   ASSERT_TRUE(rv);
 
-  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
   memory2.Unlock(0, 0);
 
-  EXPECT_EQ(memory2.last_known_usage(), Time::FromDoubleT(1));
+  EXPECT_EQ(memory2.last_known_usage(), Time::FromSecondsSinceUnixEpoch(1));
 
   DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
   EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
 
   // This should fail as memory is locked.
-  rv = memory1.Purge(Time::FromDoubleT(2));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(2));
   ASSERT_FALSE(rv);
 
   // Last usage should have been updated to timestamp passed to Purge above.
-  EXPECT_EQ(memory1.last_known_usage(), Time::FromDoubleT(2));
+  EXPECT_EQ(memory1.last_known_usage(), Time::FromSecondsSinceUnixEpoch(2));
 
-  memory2.SetNow(Time::FromDoubleT(3));
+  memory2.SetNow(Time::FromSecondsSinceUnixEpoch(3));
   memory2.Unlock(0, 0);
 
   // Usage time should be correct for |memory2| instance.
-  EXPECT_EQ(memory2.last_known_usage(), Time::FromDoubleT(3));
+  EXPECT_EQ(memory2.last_known_usage(), Time::FromSecondsSinceUnixEpoch(3));
 
   // However, usage time has not changed as far as |memory1| instance knows.
-  EXPECT_EQ(memory1.last_known_usage(), Time::FromDoubleT(2));
+  EXPECT_EQ(memory1.last_known_usage(), Time::FromSecondsSinceUnixEpoch(2));
 
   // Memory is unlocked, but our usage timestamp is incorrect.
-  rv = memory1.Purge(Time::FromDoubleT(4));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(4));
   EXPECT_FALSE(rv);
 
   // The failed purge attempt should have updated usage time to the correct
   // value.
-  EXPECT_EQ(memory1.last_known_usage(), Time::FromDoubleT(3));
+  EXPECT_EQ(memory1.last_known_usage(), Time::FromSecondsSinceUnixEpoch(3));
 
   // Purge memory through |memory2| instance. The last usage time should be
   // set to 0 as a result of this.
-  rv = memory2.Purge(Time::FromDoubleT(5));
+  rv = memory2.Purge(Time::FromSecondsSinceUnixEpoch(5));
   EXPECT_TRUE(rv);
   EXPECT_TRUE(memory2.last_known_usage().is_null());
 
   // This should fail as memory has already been purged and |memory1|'s usage
   // time is incorrect as a result.
-  rv = memory1.Purge(Time::FromDoubleT(6));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(6));
   EXPECT_FALSE(rv);
 
   // The failed purge attempt should have updated usage time to the correct
@@ -233,7 +233,7 @@
   EXPECT_TRUE(memory1.last_known_usage().is_null());
 
   // Purge should succeed now that usage time is correct.
-  rv = memory1.Purge(Time::FromDoubleT(7));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(7));
   EXPECT_TRUE(rv);
 }
 
@@ -251,10 +251,10 @@
   rv = memory2.Map(kDataSize);
   ASSERT_TRUE(rv);
 
-  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
   memory2.Unlock(0, 0);
 
-  rv = memory2.Purge(Time::FromDoubleT(2));
+  rv = memory2.Purge(Time::FromSecondsSinceUnixEpoch(2));
   EXPECT_TRUE(rv);
 
   // Lock should fail as memory has been purged.
@@ -317,46 +317,46 @@
   ASSERT_TRUE(rv);
 
   // Unlock first page.
-  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
   memory2.Unlock(0, base::GetPageSize());
 
-  rv = memory1.Purge(Time::FromDoubleT(2));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(2));
   EXPECT_FALSE(rv);
 
   // Lock first page again.
-  memory2.SetNow(Time::FromDoubleT(3));
+  memory2.SetNow(Time::FromSecondsSinceUnixEpoch(3));
   DiscardableSharedMemory::LockResult lock_rv =
       memory2.Lock(0, base::GetPageSize());
   EXPECT_NE(DiscardableSharedMemory::FAILED, lock_rv);
 
   // Unlock first page.
-  memory2.SetNow(Time::FromDoubleT(4));
+  memory2.SetNow(Time::FromSecondsSinceUnixEpoch(4));
   memory2.Unlock(0, base::GetPageSize());
 
-  rv = memory1.Purge(Time::FromDoubleT(5));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(5));
   EXPECT_FALSE(rv);
 
   // Unlock second page.
-  memory2.SetNow(Time::FromDoubleT(6));
+  memory2.SetNow(Time::FromSecondsSinceUnixEpoch(6));
   memory2.Unlock(base::GetPageSize(), base::GetPageSize());
 
-  rv = memory1.Purge(Time::FromDoubleT(7));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(7));
   EXPECT_FALSE(rv);
 
   // Unlock anything onwards.
-  memory2.SetNow(Time::FromDoubleT(8));
+  memory2.SetNow(Time::FromSecondsSinceUnixEpoch(8));
   memory2.Unlock(2 * base::GetPageSize(), 0);
 
   // Memory is unlocked, but our usage timestamp is incorrect.
-  rv = memory1.Purge(Time::FromDoubleT(9));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(9));
   EXPECT_FALSE(rv);
 
   // The failed purge attempt should have updated usage time to the correct
   // value.
-  EXPECT_EQ(Time::FromDoubleT(8), memory1.last_known_usage());
+  EXPECT_EQ(Time::FromSecondsSinceUnixEpoch(8), memory1.last_known_usage());
 
   // Purge should now succeed.
-  rv = memory1.Purge(Time::FromDoubleT(10));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(10));
   EXPECT_TRUE(rv);
 }
 
@@ -387,13 +387,13 @@
   EXPECT_LE(kDataSize, memory.mapped_size());
 
   // Memory is initially locked. Unlock it.
-  memory.SetNow(Time::FromDoubleT(1));
+  memory.SetNow(Time::FromSecondsSinceUnixEpoch(1));
   memory.Unlock(0, 0);
 
   // Lock and unlock memory.
   DiscardableSharedMemory::LockResult lock_rv = memory.Lock(0, 0);
   EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
-  memory.SetNow(Time::FromDoubleT(2));
+  memory.SetNow(Time::FromSecondsSinceUnixEpoch(2));
   memory.Unlock(0, 0);
 }
 
@@ -405,13 +405,13 @@
   EXPECT_LE(0u, memory.mapped_size());
 
   // Memory is initially locked. Unlock it.
-  memory.SetNow(Time::FromDoubleT(1));
+  memory.SetNow(Time::FromSecondsSinceUnixEpoch(1));
   memory.Unlock(0, 0);
 
   // Lock and unlock memory.
   DiscardableSharedMemory::LockResult lock_rv = memory.Lock(0, 0);
   EXPECT_NE(DiscardableSharedMemory::FAILED, lock_rv);
-  memory.SetNow(Time::FromDoubleT(2));
+  memory.SetNow(Time::FromSecondsSinceUnixEpoch(2));
   memory.Unlock(0, 0);
 }
 
@@ -437,14 +437,14 @@
   memset(memory2.memory(), 0xaa, kDataSize);
 
   // Unlock memory.
-  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.SetNow(Time::FromSecondsSinceUnixEpoch(1));
   memory2.Unlock(0, 0);
   EXPECT_FALSE(memory1.IsMemoryLocked());
 
   // Memory is unlocked, but our usage timestamp is incorrect.
-  rv = memory1.Purge(Time::FromDoubleT(2));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(2));
   EXPECT_FALSE(rv);
-  rv = memory1.Purge(Time::FromDoubleT(3));
+  rv = memory1.Purge(Time::FromSecondsSinceUnixEpoch(3));
   EXPECT_TRUE(rv);
 
   // Check that reading memory after it has been purged is returning
@@ -462,7 +462,7 @@
   ASSERT_TRUE(rv);
 
   base::trace_event::MemoryDumpArgs args = {
-      base::trace_event::MemoryDumpLevelOfDetail::DETAILED};
+      base::trace_event::MemoryDumpLevelOfDetail::kDetailed};
   trace_event::ProcessMemoryDump pmd(args);
   trace_event::MemoryAllocatorDump* client_dump =
       pmd.CreateAllocatorDump("discardable_manager/map1");
diff --git a/base/memory/madv_free_discardable_memory_allocator_posix.cc b/base/memory/madv_free_discardable_memory_allocator_posix.cc
index d40576f..75505f1 100644
--- a/base/memory/madv_free_discardable_memory_allocator_posix.cc
+++ b/base/memory/madv_free_discardable_memory_allocator_posix.cc
@@ -54,7 +54,7 @@
     trace_event::ProcessMemoryDump* pmd) {
 #if BUILDFLAG(ENABLE_BASE_TRACING)
   if (args.level_of_detail !=
-      base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND) {
+      base::trace_event::MemoryDumpLevelOfDetail::kBackground) {
     return true;
   }
 
diff --git a/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc b/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc
index 6985988..d22068b 100644
--- a/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc
+++ b/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc
@@ -37,7 +37,7 @@
   MadvFreeDiscardableMemoryAllocatorPosixTest() {
 #if BUILDFLAG(ENABLE_BASE_TRACING)
     base::trace_event::MemoryDumpArgs dump_args = {
-        base::trace_event::MemoryDumpLevelOfDetail::DETAILED};
+        base::trace_event::MemoryDumpLevelOfDetail::kDetailed};
     pmd_ = std::make_unique<base::trace_event::ProcessMemoryDump>(dump_args);
 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
   }
diff --git a/base/memory/nonscannable_memory.cc b/base/memory/nonscannable_memory.cc
index 99714bd..c78df46 100644
--- a/base/memory/nonscannable_memory.cc
+++ b/base/memory/nonscannable_memory.cc
@@ -4,10 +4,10 @@
 
 #include "base/memory/nonscannable_memory.h"
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 
 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-#include "base/allocator/partition_allocator/shim/nonscannable_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.h"
 #else
 #include <stdlib.h>
 #endif
diff --git a/base/memory/platform_shared_memory_handle.h b/base/memory/platform_shared_memory_handle.h
index 9487788..42dc4ef 100644
--- a/base/memory/platform_shared_memory_handle.h
+++ b/base/memory/platform_shared_memory_handle.h
@@ -9,7 +9,7 @@
 
 #if BUILDFLAG(IS_APPLE)
 #include <mach/mach.h>
-#include "base/mac/scoped_mach_port.h"
+#include "base/apple/scoped_mach_port.h"
 #elif BUILDFLAG(IS_FUCHSIA)
 #include <lib/zx/vmo.h>
 #elif BUILDFLAG(IS_WIN)
@@ -51,7 +51,7 @@
 // Platform-specific shared memory type used by the shared memory system.
 #if BUILDFLAG(IS_APPLE)
 using PlatformSharedMemoryHandle = mach_port_t;
-using ScopedPlatformSharedMemoryHandle = mac::ScopedMachSendRight;
+using ScopedPlatformSharedMemoryHandle = apple::ScopedMachSendRight;
 #elif BUILDFLAG(IS_FUCHSIA)
 using PlatformSharedMemoryHandle = zx::unowned_vmo;
 using ScopedPlatformSharedMemoryHandle = zx::vmo;
diff --git a/base/memory/platform_shared_memory_mapper_apple.cc b/base/memory/platform_shared_memory_mapper_apple.cc
new file mode 100644
index 0000000..1f44c1e
--- /dev/null
+++ b/base/memory/platform_shared_memory_mapper_apple.cc
@@ -0,0 +1,45 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_mapper.h"
+
+#include "base/logging.h"
+
+#include <mach/vm_map.h>
+#include "base/apple/mach_logging.h"
+
+namespace base {
+
+absl::optional<span<uint8_t>> PlatformSharedMemoryMapper::Map(
+    subtle::PlatformSharedMemoryHandle handle,
+    bool write_allowed,
+    uint64_t offset,
+    size_t size) {
+  vm_prot_t vm_prot_write = write_allowed ? VM_PROT_WRITE : 0;
+  vm_address_t address = 0;
+  kern_return_t kr = vm_map(mach_task_self(),
+                            &address,  // Output parameter
+                            size,
+                            0,  // Alignment mask
+                            VM_FLAGS_ANYWHERE, handle, offset,
+                            FALSE,                         // Copy
+                            VM_PROT_READ | vm_prot_write,  // Current protection
+                            VM_PROT_READ | vm_prot_write,  // Maximum protection
+                            VM_INHERIT_NONE);
+  if (kr != KERN_SUCCESS) {
+    MACH_DLOG(ERROR, kr) << "vm_map";
+    return absl::nullopt;
+  }
+
+  return make_span(reinterpret_cast<uint8_t*>(address), size);
+}
+
+void PlatformSharedMemoryMapper::Unmap(span<uint8_t> mapping) {
+  kern_return_t kr = vm_deallocate(
+      mach_task_self(), reinterpret_cast<vm_address_t>(mapping.data()),
+      mapping.size());
+  MACH_DLOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "vm_deallocate";
+}
+
+}  // namespace base
diff --git a/base/memory/platform_shared_memory_mapper_mac.cc b/base/memory/platform_shared_memory_mapper_mac.cc
deleted file mode 100644
index b3dd55c..0000000
--- a/base/memory/platform_shared_memory_mapper_mac.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2022 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/platform_shared_memory_mapper.h"
-
-#include "base/logging.h"
-
-#include <mach/vm_map.h>
-#include "base/mac/mach_logging.h"
-
-namespace base {
-
-absl::optional<span<uint8_t>> PlatformSharedMemoryMapper::Map(
-    subtle::PlatformSharedMemoryHandle handle,
-    bool write_allowed,
-    uint64_t offset,
-    size_t size) {
-  vm_prot_t vm_prot_write = write_allowed ? VM_PROT_WRITE : 0;
-  vm_address_t address = 0;
-  kern_return_t kr = vm_map(mach_task_self(),
-                            &address,  // Output parameter
-                            size,
-                            0,  // Alignment mask
-                            VM_FLAGS_ANYWHERE, handle, offset,
-                            FALSE,                         // Copy
-                            VM_PROT_READ | vm_prot_write,  // Current protection
-                            VM_PROT_READ | vm_prot_write,  // Maximum protection
-                            VM_INHERIT_NONE);
-  if (kr != KERN_SUCCESS) {
-    MACH_DLOG(ERROR, kr) << "vm_map";
-    return absl::nullopt;
-  }
-
-  return make_span(reinterpret_cast<uint8_t*>(address), size);
-}
-
-void PlatformSharedMemoryMapper::Unmap(span<uint8_t> mapping) {
-  kern_return_t kr = vm_deallocate(
-      mach_task_self(), reinterpret_cast<vm_address_t>(mapping.data()),
-      mapping.size());
-  MACH_DLOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "vm_deallocate";
-}
-
-}  // namespace base
diff --git a/base/memory/platform_shared_memory_mapper_win.cc b/base/memory/platform_shared_memory_mapper_win.cc
index 8ba40d8..12c75b3 100644
--- a/base/memory/platform_shared_memory_mapper_win.cc
+++ b/base/memory/platform_shared_memory_mapper_win.cc
@@ -4,7 +4,7 @@
 
 #include "base/memory/platform_shared_memory_mapper.h"
 
-#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
 #include "base/logging.h"
 
 #include <aclapi.h>
diff --git a/base/memory/platform_shared_memory_region_apple.cc b/base/memory/platform_shared_memory_region_apple.cc
new file mode 100644
index 0000000..1699d1e
--- /dev/null
+++ b/base/memory/platform_shared_memory_region_apple.cc
@@ -0,0 +1,199 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <mach/vm_map.h>
+
+#include "base/apple/mach_logging.h"
+#include "base/apple/scoped_mach_vm.h"
+
+namespace base::subtle {
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+    apple::ScopedMachSendRight handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid) {
+  if (!handle.is_valid()) {
+    return {};
+  }
+
+  if (size == 0) {
+    return {};
+  }
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max())) {
+    return {};
+  }
+
+  CHECK(
+      CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
+
+  return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+mach_port_t PlatformSharedMemoryRegion::GetPlatformHandle() const {
+  return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+  return handle_.is_valid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+  if (!IsValid()) {
+    return {};
+  }
+
+  CHECK_NE(mode_, Mode::kWritable)
+      << "Duplicating a writable shared memory region is prohibited";
+
+  // Increment the ref count.
+  kern_return_t kr = mach_port_mod_refs(mach_task_self(), handle_.get(),
+                                        MACH_PORT_RIGHT_SEND, 1);
+  if (kr != KERN_SUCCESS) {
+    MACH_DLOG(ERROR, kr) << "mach_port_mod_refs";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion(apple::ScopedMachSendRight(handle_.get()),
+                                    mode_, size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+  return ConvertToReadOnly(nullptr);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly(void* mapped_addr) {
+  if (!IsValid()) {
+    return false;
+  }
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to read-only";
+
+  apple::ScopedMachSendRight handle_copy(handle_.release());
+
+  void* temp_addr = mapped_addr;
+  apple::ScopedMachVM scoped_memory;
+  if (!temp_addr) {
+    // Intentionally lower current prot and max prot to |VM_PROT_READ|.
+    kern_return_t kr =
+        vm_map(mach_task_self(), reinterpret_cast<vm_address_t*>(&temp_addr),
+               size_, 0, VM_FLAGS_ANYWHERE, handle_copy.get(), 0, FALSE,
+               VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
+    if (kr != KERN_SUCCESS) {
+      MACH_DLOG(ERROR, kr) << "vm_map";
+      return false;
+    }
+    scoped_memory.reset(reinterpret_cast<vm_address_t>(temp_addr),
+                        mach_vm_round_page(size_));
+  }
+
+  // Make new memory object.
+  memory_object_size_t allocation_size = size_;
+  apple::ScopedMachSendRight named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(), &allocation_size,
+      reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
+      apple::ScopedMachSendRight::Receiver(named_right).get(), MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS) {
+    MACH_DLOG(ERROR, kr) << "mach_make_memory_entry_64";
+    return false;
+  }
+  DCHECK_GE(allocation_size, size_);
+
+  handle_ = std::move(named_right);
+  mode_ = Mode::kReadOnly;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+  if (!IsValid()) {
+    return false;
+  }
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to unsafe";
+
+  mode_ = Mode::kUnsafe;
+  return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+                                                              size_t size) {
+  if (size == 0) {
+    return {};
+  }
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max())) {
+    return {};
+  }
+
+  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+                                     "lead to this region being non-modifiable";
+
+  memory_object_size_t vm_size = size;
+  apple::ScopedMachSendRight named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(), &vm_size,
+      0,  // Address.
+      MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
+      apple::ScopedMachSendRight::Receiver(named_right).get(),
+      MACH_PORT_NULL);  // Parent handle.
+  // Crash as soon as shm allocation fails to debug the issue
+  // https://crbug.com/872237.
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_make_memory_entry_64";
+  DCHECK_GE(vm_size, size);
+
+  return PlatformSharedMemoryRegion(std::move(named_right), mode, size,
+                                    UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+    PlatformSharedMemoryHandle handle,
+    Mode mode,
+    size_t size) {
+  vm_address_t temp_addr = 0;
+  kern_return_t kr =
+      vm_map(mach_task_self(), &temp_addr, size, 0, VM_FLAGS_ANYWHERE, handle,
+             0, FALSE, VM_PROT_READ | VM_PROT_WRITE,
+             VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
+  if (kr == KERN_SUCCESS) {
+    kern_return_t kr_deallocate =
+        vm_deallocate(mach_task_self(), temp_addr, size);
+    // TODO(https://crbug.com/838365): convert to DLOG when bug fixed.
+    MACH_LOG_IF(ERROR, kr_deallocate != KERN_SUCCESS, kr_deallocate)
+        << "vm_deallocate";
+  } else if (kr != KERN_INVALID_RIGHT) {
+    MACH_LOG(ERROR, kr) << "vm_map";
+    return false;
+  }
+
+  bool is_read_only = kr == KERN_INVALID_RIGHT;
+  bool expected_read_only = mode == Mode::kReadOnly;
+
+  if (is_read_only != expected_read_only) {
+    // TODO(https://crbug.com/838365): convert to DLOG when bug fixed.
+    LOG(ERROR) << "VM region has a wrong protection mask: it is"
+               << (is_read_only ? " " : " not ") << "read-only but it should"
+               << (expected_read_only ? " " : " not ") << "be";
+    return false;
+  }
+
+  return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    apple::ScopedMachSendRight handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid)
+    : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+}  // namespace base::subtle
diff --git a/base/memory/platform_shared_memory_region_mac.cc b/base/memory/platform_shared_memory_region_mac.cc
deleted file mode 100644
index 798f69a..0000000
--- a/base/memory/platform_shared_memory_region_mac.cc
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/platform_shared_memory_region.h"
-
-#include <mach/vm_map.h>
-
-#include "base/mac/mach_logging.h"
-#include "base/mac/scoped_mach_vm.h"
-#include "base/metrics/histogram_functions.h"
-#include "base/metrics/histogram_macros.h"
-#include "build/build_config.h"
-
-namespace base {
-namespace subtle {
-
-namespace {
-
-}  // namespace
-
-// static
-PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
-    mac::ScopedMachSendRight handle,
-    Mode mode,
-    size_t size,
-    const UnguessableToken& guid) {
-  if (!handle.is_valid())
-    return {};
-
-  if (size == 0)
-    return {};
-
-  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
-    return {};
-
-  CHECK(
-      CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
-
-  return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
-}
-
-mach_port_t PlatformSharedMemoryRegion::GetPlatformHandle() const {
-  return handle_.get();
-}
-
-bool PlatformSharedMemoryRegion::IsValid() const {
-  return handle_.is_valid();
-}
-
-PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
-  if (!IsValid())
-    return {};
-
-  CHECK_NE(mode_, Mode::kWritable)
-      << "Duplicating a writable shared memory region is prohibited";
-
-  // Increment the ref count.
-  kern_return_t kr = mach_port_mod_refs(mach_task_self(), handle_.get(),
-                                        MACH_PORT_RIGHT_SEND, 1);
-  if (kr != KERN_SUCCESS) {
-    MACH_DLOG(ERROR, kr) << "mach_port_mod_refs";
-    return {};
-  }
-
-  return PlatformSharedMemoryRegion(mac::ScopedMachSendRight(handle_.get()),
-                                    mode_, size_, guid_);
-}
-
-bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
-  return ConvertToReadOnly(nullptr);
-}
-
-bool PlatformSharedMemoryRegion::ConvertToReadOnly(void* mapped_addr) {
-  if (!IsValid())
-    return false;
-
-  CHECK_EQ(mode_, Mode::kWritable)
-      << "Only writable shared memory region can be converted to read-only";
-
-  mac::ScopedMachSendRight handle_copy(handle_.release());
-
-  void* temp_addr = mapped_addr;
-  mac::ScopedMachVM scoped_memory;
-  if (!temp_addr) {
-    // Intentionally lower current prot and max prot to |VM_PROT_READ|.
-    kern_return_t kr =
-        vm_map(mach_task_self(), reinterpret_cast<vm_address_t*>(&temp_addr),
-               size_, 0, VM_FLAGS_ANYWHERE, handle_copy.get(), 0, FALSE,
-               VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
-    if (kr != KERN_SUCCESS) {
-      MACH_DLOG(ERROR, kr) << "vm_map";
-      return false;
-    }
-    scoped_memory.reset(reinterpret_cast<vm_address_t>(temp_addr),
-                        mach_vm_round_page(size_));
-  }
-
-  // Make new memory object.
-  memory_object_size_t allocation_size = size_;
-  mac::ScopedMachSendRight named_right;
-  kern_return_t kr = mach_make_memory_entry_64(
-      mach_task_self(), &allocation_size,
-      reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
-      mac::ScopedMachSendRight::Receiver(named_right).get(), MACH_PORT_NULL);
-  if (kr != KERN_SUCCESS) {
-    MACH_DLOG(ERROR, kr) << "mach_make_memory_entry_64";
-    return false;
-  }
-  DCHECK_GE(allocation_size, size_);
-
-  handle_ = std::move(named_right);
-  mode_ = Mode::kReadOnly;
-  return true;
-}
-
-bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
-  if (!IsValid())
-    return false;
-
-  CHECK_EQ(mode_, Mode::kWritable)
-      << "Only writable shared memory region can be converted to unsafe";
-
-  mode_ = Mode::kUnsafe;
-  return true;
-}
-
-// static
-PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
-                                                              size_t size) {
-  if (size == 0) {
-    return {};
-  }
-
-  if (size > static_cast<size_t>(std::numeric_limits<int>::max())) {
-    return {};
-  }
-
-  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
-                                     "lead to this region being non-modifiable";
-
-  memory_object_size_t vm_size = size;
-  mac::ScopedMachSendRight named_right;
-  kern_return_t kr = mach_make_memory_entry_64(
-      mach_task_self(), &vm_size,
-      0,  // Address.
-      MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
-      mac::ScopedMachSendRight::Receiver(named_right).get(),
-      MACH_PORT_NULL);  // Parent handle.
-  // Crash as soon as shm allocation fails to debug the issue
-  // https://crbug.com/872237.
-  MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_make_memory_entry_64";
-  DCHECK_GE(vm_size, size);
-
-  return PlatformSharedMemoryRegion(std::move(named_right), mode, size,
-                                    UnguessableToken::Create());
-}
-
-// static
-bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
-    PlatformSharedMemoryHandle handle,
-    Mode mode,
-    size_t size) {
-  vm_address_t temp_addr = 0;
-  kern_return_t kr =
-      vm_map(mach_task_self(), &temp_addr, size, 0, VM_FLAGS_ANYWHERE, handle,
-             0, FALSE, VM_PROT_READ | VM_PROT_WRITE,
-             VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
-  if (kr == KERN_SUCCESS) {
-    kern_return_t kr_deallocate =
-        vm_deallocate(mach_task_self(), temp_addr, size);
-    // TODO(crbug.com/838365): convert to DLOG when bug fixed.
-    MACH_LOG_IF(ERROR, kr_deallocate != KERN_SUCCESS, kr_deallocate)
-        << "vm_deallocate";
-  } else if (kr != KERN_INVALID_RIGHT) {
-    MACH_LOG(ERROR, kr) << "vm_map";
-    return false;
-  }
-
-  bool is_read_only = kr == KERN_INVALID_RIGHT;
-  bool expected_read_only = mode == Mode::kReadOnly;
-
-  if (is_read_only != expected_read_only) {
-    // TODO(crbug.com/838365): convert to DLOG when bug fixed.
-    LOG(ERROR) << "VM region has a wrong protection mask: it is"
-               << (is_read_only ? " " : " not ") << "read-only but it should"
-               << (expected_read_only ? " " : " not ") << "be";
-    return false;
-  }
-
-  return true;
-}
-
-PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
-    mac::ScopedMachSendRight handle,
-    Mode mode,
-    size_t size,
-    const UnguessableToken& guid)
-    : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
-
-}  // namespace subtle
-}  // namespace base
diff --git a/base/memory/platform_shared_memory_region_win.cc b/base/memory/platform_shared_memory_region_win.cc
index e387015..9698660 100644
--- a/base/memory/platform_shared_memory_region_win.cc
+++ b/base/memory/platform_shared_memory_region_win.cc
@@ -8,7 +8,7 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
 #include "base/bits.h"
 #include "base/logging.h"
 #include "base/metrics/histogram_functions.h"
diff --git a/base/memory/raw_ptr.h b/base/memory/raw_ptr.h
index 03acb73..aca6d4a 100644
--- a/base/memory/raw_ptr.h
+++ b/base/memory/raw_ptr.h
@@ -8,6 +8,6 @@
 // Although `raw_ptr` is part of the standalone PA distribution, it is
 // easier to use the shorter path in `//base/memory`. We retain this
 // facade header for ease of typing.
-#include "base/allocator/partition_allocator/pointers/raw_ptr.h"  // IWYU pragma: export
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr.h"  // IWYU pragma: export
 
 #endif  // BASE_MEMORY_RAW_PTR_H_
diff --git a/base/memory/raw_ptr.md b/base/memory/raw_ptr.md
index 701d49b..ef77f28 100644
--- a/base/memory/raw_ptr.md
+++ b/base/memory/raw_ptr.md
@@ -1,43 +1,65 @@
-# raw_ptr&lt;T&gt; (aka MiraclePtr, aka BackupRefPtr)
+# raw_ptr&lt;T&gt; (aka. MiraclePtr, aka. BackupRefPtr, aka. BRP)
+
+Before telling you what `raw_ptr<T>` is, we'd like you to follow one simple
+rule: think of it as a raw C++ pointer. In particular:
+- Initialize it yourself, don't assume the constructor default-initializes it
+  (it may or may not). (Always use the `raw_ptr<T> member_ = nullptr;` form of
+  initialization rather than the so-called uniform initialization form
+  (empty braces) `raw_ptr<T> member_{};` whose meaning varies with the
+  implementation.)
+- Don't assume that moving clears the pointer (it may or may not).
+- The owner of the memory must free it when the time is right, don't assume
+  `raw_ptr<T>` will free it for  you (it won't). Unlike `std::unique_ptr<T>`,
+  `base::scoped_refptr<T>`, etc., it does not manage ownership or lifetime of
+  an allocated object.
+  - If this is the owner of the memory, consider using an alternative smart
+    pointer.
+- Don't assume `raw_ptr<T>` will protect you from freeing memory too early (it
+  likely will, but there are gotchas; one of them is that dereferencing will
+  result in other type of undefined behavior).
+
+(There are other, much more subtle rules that you should follow. They're also
+harder to violate. More about it in
+[the "Extra pointer rules" section](#Extra-pointer-rules).)
 
 `raw_ptr<T>` is a non-owning smart pointer that has improved memory-safety over
 raw pointers.  It behaves just like a raw pointer on platforms where
-USE_BACKUP_REF_PTR is off, and almost like one when it's on. The main
-difference is that when USE_BACKUP_REF_PTR is enabled, `raw_ptr<T>` is
-beneficial for security, because it can prevent a significant percentage of
+ENABLE_BACKUP_REF_PTR_SUPPORT is off, and almost like one when it's on. The main
+difference is that when ENABLE_BACKUP_REF_PTR_SUPPORT is enabled, `raw_ptr<T>`
+is beneficial for security, because it can prevent a significant percentage of
 Use-after-Free (UaF) bugs from being exploitable. It does this by poisoning
 the freed memory and quarantining it as long as a dangling `raw_ptr<T>`
 exists.
 
-When USE_BACKUP_REF_PTR is on, `raw_ptr<T>` it is zero-initialized and cleared
-on destruction and move. However, when USE_BACKUP_REF_PTR is off, this is not
-the case. You must continue to explicitly initialize raw_ptr members to ensure
-consistent behavior across all cases. Because the underlying implementation
-may vary, always use the `raw_ptr<T> member_ = nullptr;` form of
-initialization rather than the so-called uniform initialization form
-(empty braces) `raw_ptr<T> member_{};` whose meaning varies with the
-implementation.
-
-Unlike `std::unique_ptr<T>`, `base::scoped_refptr<T>`, etc., `raw_ptr<T>`
-does not manage ownership or lifetime of an allocated object - you are still
-responsible for freeing the object when no longer used, just as you would
-with a raw C++ pointer.
-
 `raw_ptr<T>` has limited impact on stability - dereferencing
 a dangling pointer remains Undefined Behavior (although poisoning may
 lead to earlier, easier to debug crashes).
-Note that the security protection is not yet enabled by default.
 
 `raw_ptr<T>` is a part of
 [the MiraclePtr project](https://docs.google.com/document/d/1pnnOAIz_DMWDI4oIOFoMAqLnf_MZ2GsrJNb_dbQ3ZBg/edit?usp=sharing)
 and currently implements
 [the BackupRefPtr algorithm](https://docs.google.com/document/d/1m0c63vXXLyGtIGBi9v6YFANum7-IRC3-dmiYBCWqkMk/edit?usp=sharing).
 If needed, please reach out to
-[[email protected]](https://groups.google.com/u/1/a/chromium.org/g/memory-safety-dev)
+[[email protected]](https://groups.google.com/a/chromium.org/g/memory-safety-dev)
 or (Google-internal)
 [[email protected]](https://groups.google.com/a/google.com/g/chrome-memory-safety)
 with questions or concerns.
 
+As of M116, it is enabled by default in all non-Renderer processes, on:
+- Android (incl. AndroidWebView, Android WebEngine)
+- Windows
+- ChromeOS (Ash), except for the `ExperimentalAsh` pointers
+- macOS
+- Linux
+
+In particular, it isn't enabled by default on those platforms:
+- iOS
+- ChromeOS (Lacros)
+- ChromeCast
+- Fuchsia
+- Aix
+- Zos
+
 [TOC]
 
 ## When to use |raw_ptr&lt;T&gt;|
@@ -47,21 +69,13 @@
 a raw C++ pointer `T*` whenever possible, except in Renderer-only code.
 This guide offers more details.
 
-The usage guidelines are *not* enforced currently (the MiraclePtr team will turn
-on enforcement via Chromium Clang Plugin after confirming performance results
-via Stable channel experiments).  Afterwards we plan to allow
+The usage guidelines are currently enforced via Chromium Clang Plugin. We allow
 exclusions via:
-- [manual-paths-to-ignore.txt](../../tools/clang/rewrite_raw_ptr_fields/manual-paths-to-ignore.txt)
-  to exclude at a directory level.  Examples:
-    - Renderer-only code (i.e. code in paths that contain `/renderer/` or
-      `third_party/blink/public/web/`)
-    - Code that cannot depend on `//base`
-    - Code in `//ppapi`
 - `RAW_PTR_EXCLUSION` C++ attribute to exclude individual fields.  Examples:
     - Cases where `raw_ptr<T>` won't compile (e.g. cases covered in
       [the "Unsupported cases leading to compile errors" section](#Unsupported-cases-leading-to-compile-errors)).
       Make sure to also look at
-      [the "Recoverable compile-time problems" section](#Recoverable-compile_time-problems).
+      [the "Recoverable compile-time problems" section](#Recoverable-compile-time-problems).
     - Cases where the pointer always points outside of PartitionAlloc
       (e.g.  literals, stack allocated memory, shared memory, mmap'ed memory,
       V8/Oilpan/Java heaps, TLS, etc.).
@@ -70,7 +84,14 @@
       Make sure to look at
       [the "Extra pointer rules" section](#Extra-pointer-rules)
       before resorting to this exclusion.
-- No explicit exclusions will be needed for:
+- [RawPtrManualPathsToIgnore.h](../../tools/clang/plugins/RawPtrManualPathsToIgnore.h)
+  to exclude at a directory level (NOTE, use it as last resort, and be aware
+  it'll require a Clang plugin roll).  Examples:
+    - Renderer-only code (i.e. code in paths that contain `/renderer/` or
+      `third_party/blink/public/web/`)
+    - Code that cannot depend on `//base`
+    - Code in `//ppapi`
+- No explicit exclusions are needed for:
     - `const char*`, `const wchar_t*`, etc.
     - Function pointers
     - ObjC pointers
@@ -108,17 +129,17 @@
 `operator->`, `operator*` and other operators
 that one expects from a raw pointer.
 Cases where other code needs to be modified are described in
-[the "Recoverable compile-time problems" section](#Recoverable-compile_time-problems)
+[the "Recoverable compile-time problems" section](#Recoverable-compile-time-problems)
 below.
 
 ## Performance
 
 ### Performance impact of using |raw_ptr&lt;T&gt;| instead of |T\*|
 
-Compared to a raw C++ pointer, on platforms where USE_BACKUP_REF_PTR is on,
-`raw_ptr<T>` incurs additional runtime
+Compared to a raw C++ pointer, on platforms where ENABLE_BACKUP_REF_PTR_SUPPORT
+is on, `raw_ptr<T>` incurs additional runtime
 overhead for initialization, destruction, and assignment (including
-`ptr++` and `ptr += ...`).
+`ptr++`, `ptr += ...`, etc.).
 There is no overhead when dereferencing or extracting a pointer (including
 `*ptr`, `ptr->foobar`, `ptr.get()`, or implicit conversions to a raw C++
 pointer).
@@ -131,14 +152,16 @@
 constructor, destructor, and assignment operators.
 If the pointed memory is unprotected,
 then `raw_ptr<T>` behaves just like a `T*`
-and the runtime overhead is limited to the extra check.
+and the runtime overhead is limited to that extra check.
 (The security protection incurs additional overhead
 described in
-[the "Performance impact of enabling Use-after-Free protection" section](#Performance-impact-of-enabling-Use_after_Free-protection)
+[the "Performance impact of enabling Use-after-Free protection" section](#Performance-impact-of-enabling-Use-after-Free-protection)
 below.)
 
 Some additional overhead comes from setting `raw_ptr<T>` to `nullptr`
-when default-constructed, destructed, or moved.
+when default-constructed, destructed, or moved. (Yes, we said above to not rely
+on it, but to be precise this will always happen when
+ENABLE_BACKUP_REF_PTR_SUPPORT is on; no guarantees otherwise.)
 
 During
 [the "Big Rewrite"](https://groups.google.com/a/chromium.org/g/chromium-dev/c/vAEeVifyf78/m/SkBUc6PhBAAJ)
@@ -151,30 +174,32 @@
 (for more detailed results see
 [the document here](https://docs.google.com/document/d/1MfDT-JQh_UIpSQw3KQttjbQ_drA7zw1gQDwU3cbB6_c/edit?usp=sharing)).
 
-### Performance impact of enabling Use-after-Free protection
+### Performance impact of enabling Use-after-Free protection {#Performance-impact-of-enabling-Use-after-Free-protection}
 
 When the Use-after-Free protection is enabled, then `raw_ptr<T>` has some
-additional performance overhead.  This protection is currently disabled
-by default.  We will enable the protection incrementally, starting with
-more non-Renderer processes first.
+additional performance overhead.
 
 The protection can increase memory usage:
 - For each memory allocation Chromium's allocator (PartitionAlloc)
-  allocates extra 16 bytes (4 bytes to store the BackupRefPtr's
-  ref-count associated with the allocation, the rest to maintain
-  alignment requirements).
+  carves out extra 4 bytes. (That doesn't necessarily mean that each allocation
+  grows by 4B. Allocation sizes come from predefined buckets, so it's possible
+  for an allocation to stay within the same bucket and incur no additional
+  overhead, or hop over to the next bucket and incur much higher overhead.)
 - Freed memory is quarantined and not available for reuse as long
-  as dangling `raw_ptr<T>` pointers exist.
-- Enabling protection requires additional partitions in PartitionAlloc,
-  which increases memory fragmentation.
+  as dangling `raw_ptr<T>` pointers exist. (In practice this overhead has been
+  observed to be low, but on a couple occasions it led to significant memory
+  leaks, fortunately caught early.)
 
-The protection can increase runtime costs - `raw_ptr<T>`'s constructor,
-destructor, and assignment operators (including `ptr++` and `ptr += ...`) need
-to maintain BackupRefPtr's ref-count.
+The protection increases runtime costs - `raw_ptr<T>`'s constructor,
+destructor, and assignment operators need to maintain BackupRefPtr's ref-count
+(atomic increment/decrement). `ptr++`, `ptr += ...`, etc. don't need to do that,
+but instead have to incur the cost
+of verifying that resulting pointer stays within the same allocation (important
+for BRP integrity).
 
 ## When it is okay to continue using raw C++ pointers
 
-### Unsupported cases leading to compile errors
+### Unsupported cases leading to compile errors {#Unsupported-cases-leading-to-compile-errors}
 
 Continue to use raw C++ pointers in the following cases, which may otherwise
 result in compile errors:
@@ -184,10 +209,6 @@
   `thread_local` variables (see more details in the
   [Rewrite exclusion statistics](https://docs.google.com/document/d/1uAsWnwy8HfIJhDPSh1efohnqfGsv2LJmYTRBj0JzZh8/edit#heading=h.dg4eebu87wg9)
   )
-- Pointer fields that require non-null, constexpr initialization
-  (see more details in the
-  [Rewrite exclusion statistics](https://docs.google.com/document/d/1uAsWnwy8HfIJhDPSh1efohnqfGsv2LJmYTRBj0JzZh8/edit#heading=h.dg4eebu87wg9)
-  )
 - Pointer fields in classes/structs that have to be trivially constructible or
   destructible
 - Code that doesn’t depend on `//base` (including non-Chromium repositories and
@@ -206,11 +227,13 @@
   TLS, etc.
 - `const char*` (and `const wchar_t*`) pointer fields, unless you’re convinced
   they can point to a heap-allocated object, not just a string literal
-- Pointer fields that can only point to aligned allocations (requested via
-  PartitionAlloc’s `AlignedAlloc` or `memalign` family of functions, with
-  alignment higher than `base::kAlignment`)
-- Pointer fields in Renderer-only code.  (This might change in the future
-  as we explore expanding `raw_ptr<T>` usage in https://crbug.com/1273204.)
+- Pointer fields in certain renderer code. Specifically, we disallow usage in
+
+``` none
+third_party/blink/renderer/core/
+third_party/blink/renderer/platform/heap/
+third_party/blink/renderer/platform/wtf/
+```
 
 ### Other perf optimizations
 
@@ -223,9 +246,9 @@
 Use raw C++ pointers instead of `raw_ptr<T>` in the following scenarios:
 - Pointers in local variables and function parameters and return values. This
   includes pointer fields in classes/structs that are used only on the stack.
-  (We plan to enforce this in the Chromium Clang Plugin.  Using `raw_ptr<T>`
-  here would cumulatively lead to performance regression and the security
-  benefit of UaF protection is lower for such short-lived pointers.)
+  (Using `raw_ptr<T>` here would cumulatively lead to performance regression and
+  the security benefit of UaF protection is lower for such short-lived
+  pointers.)
 - Pointer fields in unions. However, note that a much better, modern alternative
   is `absl::variant` + `raw_ptr<T>`. If use of C++ union is absolutely
   unavoidable, prefer a regular C++ pointer: incorrect management of a
@@ -233,7 +256,9 @@
 - Pointers whose addresses are used only as identifiers and which are
   never dereferenced (e.g. keys in a map). There is a performance gain
   by not using `raw_ptr` in this case; prefer to use `uintptr_t` to
-  emphasize that the entity can dangle and must not be dereferenced.
+  emphasize that the entity can dangle and must not be dereferenced. (NOTE,
+  this is a dangerous practice irrespective of raw_ptr usage, as there is a risk
+  of memory being freed and another pointer allocated with the same address!)
 
 You don’t have to, but may use `raw_ptr<T>`, in the following scenarios:
 - Pointers that are used as an element type of collections/wrappers. E.g.
@@ -242,13 +267,13 @@
   optimizations above might still apply and argue for using a raw C++ pointer).
 
 
-## Extra pointer rules
+## Extra pointer rules {#Extra-pointer-rules}
 
 `raw_ptr<T>` requires following some extra rules compared to a raw C++ pointer:
 - Don’t assign invalid, non-null addresses (this includes previously valid and
   now freed memory,
   [Win32 handles](https://crbug.com/1262017), and more). You can only assign an
-  address of memory that is allocated at the time of assignment. Exceptions:
+  address of memory that is valid at the time of assignment. Exceptions:
     - a pointer to the end of a valid allocation (but not even 1 byte further)
     - a pointer to the last page of the address space, e.g. for sentinels like
       `reinterpret_cast<void*>(-1)`
@@ -257,11 +282,11 @@
   `memcpy(reinterpret_cast<void*>(&obj_with_raw_ptr), buffer)`.
 - Don’t assign to a `raw_ptr<T>` concurrently, even if the same value.
 - Don’t rely on moved-from pointers to keep their old value. Unlike raw
-  pointers, `raw_ptr<T>` is cleared upon moving.
+  pointers, `raw_ptr<T>` may be cleared upon moving.
 - Don't use the pointer after it is destructed. Unlike raw pointers,
-  `raw_ptr<T>` is cleared upon destruction. This may happen e.g. when fields are
-  ordered such that the pointer field is destructed before the class field whose
-  destructor uses that pointer field (e.g. see
+  `raw_ptr<T>` may be cleared upon destruction. This may happen e.g. when fields
+  are ordered such that the pointer field is destructed before the class field
+  whose destructor uses that pointer field (e.g. see
   [Esoteric Issues](https://docs.google.com/document/d/14Ol_adOdNpy4Ge-XReI7CXNKMzs_LL5vucDQIERDQyg/edit#heading=h.yoba1l8bnfmv)).
 - Don’t assign to a `raw_ptr<T>` until its constructor has run. This may happen
   when a base class’s constructor uses a not-yet-initialized field of a derived
@@ -277,7 +302,27 @@
 `RawPtrBackupRefImpl::ReleaseInternal()`, but you may also experience memory
 corruption or a silent drop of UaF protection.
 
-## Recoverable compile-time problems
+## Pointer Annotations
+
+### The AllowPtrArithmetic trait
+
+In an ideal world, a raw_ptr would point to a single object, rather than to
+a C-style array of objects accessed via pointer arithmetic, since the latter
+is best handled via a C++ construct such as base::span<> or std::vector<>.
+raw_ptrs upon which such operations are performed and for which conversion is
+desirable have been tagged with the AllowPtrArithmetic trait. That all such
+pointer are tagged can be enforced by setting the GN build arg
+enable_pointer_arithmetic_trait_check=true.
+
+### The AllowUninitialized trait
+
+When building Chromium, raw_ptrs are always nullptr initialized, either as
+the result of specific implementation that requires it (e.g. BackupRefPtr),
+or as the result of build flags (to enforce consistency). However, we provide
+an opt-out to allow third-party code to skip this step (where possible). Use
+this trait sparingly.
+
+## Recoverable compile-time problems {#Recoverable-compile-time-problems}
 
 ### Explicit |raw_ptr.get()| might be needed
 
@@ -290,8 +335,8 @@
       copy the pointer, which incurs a performance overhead.
 - `return condition ? raw_ptr : wrapped_ptr_.get();` (ternary operator needs
   identical types in both branches)
-- `base::WrapUniquePtr(wrapped_ptr_.get());` (implicit cast doesn't kick in for
-  arguments in templates)
+- `TemplatedFunction(wrapped_ptr_.get());` (implicit cast doesn't kick in for
+  `T*` arguments in templates)
 - `printf("%p", wrapped_ptr_.get());` (can't pass class type arguments to
   variadic functions)
 - `reinterpret_cast<SomeClass*>(wrapped_ptr_.get())` (`const_cast` and
@@ -677,7 +722,7 @@
 ASAN_OPTIONS=quarantine_size_mb=1024 path/to/chrome
 ```
 
-## Appendix: Is raw\_ptr Live?
+## Appendix: Is raw_ptr Live?
 
 ![Diagram showing how both code support and feature flag must be present
   for raw_ptr to be BRP.](./raw_ptr_liveness.png)
diff --git a/base/memory/raw_ptr_asan_bound_arg_tracker.cc b/base/memory/raw_ptr_asan_bound_arg_tracker.cc
index a8cd5e2..c9ca564 100644
--- a/base/memory/raw_ptr_asan_bound_arg_tracker.cc
+++ b/base/memory/raw_ptr_asan_bound_arg_tracker.cc
@@ -1,10 +1,10 @@
-// Copyright 2022 The Chromium Authors. All rights reserved.
+// Copyright 2022 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/memory/raw_ptr_asan_bound_arg_tracker.h"
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 
 #if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
 
diff --git a/base/memory/raw_ptr_asan_bound_arg_tracker.h b/base/memory/raw_ptr_asan_bound_arg_tracker.h
index ab8d1cb..9157fc8 100644
--- a/base/memory/raw_ptr_asan_bound_arg_tracker.h
+++ b/base/memory/raw_ptr_asan_bound_arg_tracker.h
@@ -1,11 +1,11 @@
-// Copyright 2022 The Chromium Authors. All rights reserved.
+// Copyright 2022 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_MEMORY_RAW_PTR_ASAN_BOUND_ARG_TRACKER_H_
 #define BASE_MEMORY_RAW_PTR_ASAN_BOUND_ARG_TRACKER_H_
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 
 #if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
 #include <cstddef>
diff --git a/base/memory/raw_ptr_asan_hooks.h b/base/memory/raw_ptr_asan_hooks.h
index 42185c5..43a7f1a 100644
--- a/base/memory/raw_ptr_asan_hooks.h
+++ b/base/memory/raw_ptr_asan_hooks.h
@@ -5,7 +5,7 @@
 #ifndef BASE_MEMORY_RAW_PTR_ASAN_HOOKS_H_
 #define BASE_MEMORY_RAW_PTR_ASAN_HOOKS_H_
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 
 #if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
 
diff --git a/base/memory/raw_ptr_asan_service.h b/base/memory/raw_ptr_asan_service.h
index bff9336..c8baffa 100644
--- a/base/memory/raw_ptr_asan_service.h
+++ b/base/memory/raw_ptr_asan_service.h
@@ -5,7 +5,7 @@
 #ifndef BASE_MEMORY_RAW_PTR_ASAN_SERVICE_H_
 #define BASE_MEMORY_RAW_PTR_ASAN_SERVICE_H_
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 
 #if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
 #include <cstddef>
diff --git a/base/memory/raw_ptr_asan_unittest.cc b/base/memory/raw_ptr_asan_unittest.cc
index 3a42db7..48b8231 100644
--- a/base/memory/raw_ptr_asan_unittest.cc
+++ b/base/memory/raw_ptr_asan_unittest.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 
 #if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
 
diff --git a/base/memory/raw_ptr_cast.h b/base/memory/raw_ptr_cast.h
index 06bc61a..789c324 100644
--- a/base/memory/raw_ptr_cast.h
+++ b/base/memory/raw_ptr_cast.h
@@ -8,6 +8,6 @@
 // Although `raw_ptr` is part of the standalone PA distribution, it is
 // easier to use the shorter path in `//base/memory`. We retain this
 // facade header for ease of typing.
-#include "base/allocator/partition_allocator/pointers/raw_ptr_cast.h"  // IWYU pragma: export
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_cast.h"  // IWYU pragma: export
 
 #endif  // BASE_MEMORY_RAW_PTR_CAST_H_
diff --git a/base/memory/raw_ptr_chromium_unittest.cc b/base/memory/raw_ptr_chromium_unittest.cc
new file mode 100644
index 0000000..02a6746
--- /dev/null
+++ b/base/memory/raw_ptr_chromium_unittest.cc
@@ -0,0 +1,12 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/raw_ptr.h"
+
+// This file contains tests related to raw_ptr, that test Chromium-specific
+// configuration.
+
+// Chromium expects these to be always enabled.
+static_assert(raw_ptr<int>::kZeroOnConstruct);
+static_assert(raw_ptr<int>::kZeroOnMove);
diff --git a/base/memory/raw_ptr_exclusion.h b/base/memory/raw_ptr_exclusion.h
index 3ce1d60..e4d355d 100644
--- a/base/memory/raw_ptr_exclusion.h
+++ b/base/memory/raw_ptr_exclusion.h
@@ -8,6 +8,6 @@
 // Although `raw_ptr` is part of the standalone PA distribution, it is
 // easier to use the shorter path in `//base/memory`. We retain this
 // facade header for ease of typing.
-#include "base/allocator/partition_allocator/pointers/raw_ptr_exclusion.h"  // IWYU pragma: export
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_exclusion.h"  // IWYU pragma: export
 
 #endif  // BASE_MEMORY_RAW_PTR_EXCLUSION_H_
diff --git a/base/memory/raw_ptr_liveness.dot b/base/memory/raw_ptr_liveness.dot
index 0876f0f..c8162cb 100644
--- a/base/memory/raw_ptr_liveness.dot
+++ b/base/memory/raw_ptr_liveness.dot
@@ -1,5 +1,5 @@
 digraph {
-  graph[bgcolor=transparent]
+  graph[bgcolor=white]
   node[shape=box]
 
   component[label="Is it a\ncomponent\nbuild?"]
diff --git a/base/memory/raw_ptr_liveness.png b/base/memory/raw_ptr_liveness.png
index 6778b67..8f69f6b 100644
--- a/base/memory/raw_ptr_liveness.png
+++ b/base/memory/raw_ptr_liveness.png
Binary files differ
diff --git a/base/memory/raw_ref.h b/base/memory/raw_ref.h
index 6a599d9..6cbca47 100644
--- a/base/memory/raw_ref.h
+++ b/base/memory/raw_ref.h
@@ -8,6 +8,6 @@
 // Although `raw_ref` is part of the standalone PA distribution, it is
 // easier to use the shorter path in `//base/memory`. We retain this
 // facade header for ease of typing.
-#include "base/allocator/partition_allocator/pointers/raw_ref.h"  // IWYU pragma: export
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ref.h"  // IWYU pragma: export
 
 #endif  // BASE_MEMORY_RAW_REF_H_
diff --git a/base/memory/raw_scoped_refptr_mismatch_checker.h b/base/memory/raw_scoped_refptr_mismatch_checker.h
index 0e08a84..06df106 100644
--- a/base/memory/raw_scoped_refptr_mismatch_checker.h
+++ b/base/memory/raw_scoped_refptr_mismatch_checker.h
@@ -44,7 +44,7 @@
                            IsRefCountedType<base::RemoveRawRefT<T>>>,
           std::conjunction<base::IsPointer<T>,
                            IsRefCountedType<base::RemovePointerT<T>>>> {
-  static_assert(!std::is_reference<T>::value,
+  static_assert(!std::is_reference_v<T>,
                 "NeedsScopedRefptrButGetsRawPtr requires non-reference type.");
 };
 
diff --git a/base/memory/raw_span.h b/base/memory/raw_span.h
new file mode 100644
index 0000000..9dd043d
--- /dev/null
+++ b/base/memory/raw_span.h
@@ -0,0 +1,33 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_SPAN_H_
+#define BASE_MEMORY_RAW_SPAN_H_
+
+#include "base/containers/span.h"
+#include "base/memory/raw_ptr.h"
+
+namespace base {
+
+// raw_span<T> is a type that provides the spatial safety of span<T> along
+// with the temporal safety of raw_ptr<T>. This is intended to be a safer
+// replacement for classes that store pointer + size fields. As is the case
+// with raw_ptr<>, raw_span<> should be used for class members only, with
+// ordinary span<> used for function arguments and the like. Note that
+// raw_span<> will implicitly convert to span<> for ease of use in these
+// cases.
+
+template <typename T, RawPtrTraits Traits = RawPtrTraits::kEmpty>
+using raw_span =
+    span<T, dynamic_extent, raw_ptr<T, Traits | AllowPtrArithmetic>>;
+
+template <typename T>
+span<T> ExtractAsDanglingSpan(raw_span<T>& arg) {
+  span<T> result = std::exchange(arg, raw_span<T>());
+  return result;
+}
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_RAW_SPAN_H_
diff --git a/base/memory/raw_span_unittest.cc b/base/memory/raw_span_unittest.cc
new file mode 100644
index 0000000..53fc8e2
--- /dev/null
+++ b/base/memory/raw_span_unittest.cc
@@ -0,0 +1,49 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/raw_span.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// This file contains tests related to raw_span, to show that they
+// convert to span.
+
+TEST(RawSpan, ConvertToSpan) {
+  int arr[3] = {100, 101, 102};
+  base::raw_span<int> span1(arr);
+  base::span<int> span2(span1);
+  base::span<int> span3;
+  span3 = span1;
+
+  EXPECT_THAT(span1, ::testing::ElementsAre(100, 101, 102));
+  EXPECT_THAT(span2, ::testing::ElementsAre(100, 101, 102));
+  EXPECT_THAT(span3, ::testing::ElementsAre(100, 101, 102));
+}
+
+TEST(RawSpan, ConvertFromSpan) {
+  int arr[3] = {100, 101, 102};
+  base::span<int> span1(arr);
+  base::raw_span<int> span2(span1);
+  base::raw_span<int> span3;
+  span3 = span1;
+
+  EXPECT_THAT(span1, ::testing::ElementsAre(100, 101, 102));
+  EXPECT_THAT(span2, ::testing::ElementsAre(100, 101, 102));
+  EXPECT_THAT(span3, ::testing::ElementsAre(100, 101, 102));
+}
+
+TEST(RawSpan, UnderstandsDanglingAttribute) {
+  // Test passes if it doesn't trip Dangling Ptr Detectors.
+  int* arr = new int[3];
+  base::raw_span<int, DisableDanglingPtrDetection> span(arr, 3u);
+  delete[] span.data();
+}
+
+TEST(RawSpan, ExtractAsDangling) {
+  // Test passes if it doesn't trip Dangling Ptr Detectors.
+  int* arr = new int[3];
+  base::raw_span<int> span(arr, 3u);
+  delete[] base::ExtractAsDanglingSpan(span).data();
+}
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index 6658dba..a7b12e7 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -135,7 +135,7 @@
 #endif
 
   mutable uint32_t ref_count_ = 0;
-  static_assert(std::is_unsigned<decltype(ref_count_)>::value,
+  static_assert(std::is_unsigned_v<decltype(ref_count_)>,
                 "ref_count_ must be an unsigned type.");
 
 #if DCHECK_IS_ON()
diff --git a/base/memory/ref_counted_unittest.nc b/base/memory/ref_counted_unittest.nc
index d5aa4fa..1ed0228 100644
--- a/base/memory/ref_counted_unittest.nc
+++ b/base/memory/ref_counted_unittest.nc
@@ -15,7 +15,7 @@
   ~InitialRefCountIsZero() {}
 };
 
-#if defined(NCTEST_ADOPT_REF_TO_ZERO_START)  // [r"fatal error: static assertion failed due to requirement 'std::is_same<base::subtle::StartRefCountFromOneTag, base::subtle::StartRefCountFromZeroTag>::value': Use AdoptRef only if the reference count starts from one\."]
+#if defined(NCTEST_ADOPT_REF_TO_ZERO_START)  // [r"fatal error: static assertion failed due to requirement 'std::is_same_v<base::subtle::StartRefCountFromOneTag, base::subtle::StartRefCountFromZeroTag>': Use AdoptRef only if the reference count starts from one\."]
 
 void WontCompile() {
   AdoptRef(new InitialRefCountIsZero());
diff --git a/base/memory/safety_checks.h b/base/memory/safety_checks.h
new file mode 100644
index 0000000..ea7cf61
--- /dev/null
+++ b/base/memory/safety_checks.h
@@ -0,0 +1,244 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SAFETY_CHECKS_H_
+#define BASE_MEMORY_SAFETY_CHECKS_H_
+
+#include <new>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
+#include "base/compiler_specific.h"
+#include "base/dcheck_is_on.h"
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+// This header defines following macros:
+// - ADVANCED_MEMORY_SAFETY_CHECKS()
+// They can be used to specify a class/struct that is targeted to perform
+// additional CHECKS across variety of memory safety mechanisms such as
+// PartitionAllocator.
+//
+//   class Foo {
+//     ADVANCED_MEMORY_SAFETY_CHECKS();
+//   }
+//
+// Checks here are disabled by default because of their performance cost.
+// Currently, the macro is managed by the memory safety team internally and
+// you should not add / remove it manually.
+
+// We cannot hide things behind anonymous namespace because they are referenced
+// via macro, which can be defined anywhere.
+// To avoid tainting ::base namespace, define things inside this namespace.
+namespace base::internal {
+
+enum class MemorySafetyCheck : uint32_t {
+  kForcePartitionAlloc = (1u << 0),
+  // Enables |FreeFlags::kSchedulerLoopQuarantine|.
+  // Requires PA-E.
+  kSchedulerLoopQuarantine = (1u << 1),
+
+  // TODO(crbug.com/1462223): Implement more advanced checks and add flags here.
+  // kBRPDereferenceAfterFree = (1u << 2),
+  // kZapOnFree = (1u << 3), etc.
+};
+
+constexpr MemorySafetyCheck operator|(MemorySafetyCheck a,
+                                      MemorySafetyCheck b) {
+  return static_cast<MemorySafetyCheck>(static_cast<uint32_t>(a) |
+                                        static_cast<uint32_t>(b));
+}
+
+constexpr MemorySafetyCheck operator&(MemorySafetyCheck a,
+                                      MemorySafetyCheck b) {
+  return static_cast<MemorySafetyCheck>(static_cast<uint32_t>(a) &
+                                        static_cast<uint32_t>(b));
+}
+
+// Set of checks for ADVANCED_MEMORY_SAFETY_CHECKS() annotated objects.
+constexpr auto kAdvancedMemorySafetyChecks =
+    MemorySafetyCheck::kForcePartitionAlloc |
+    MemorySafetyCheck::kSchedulerLoopQuarantine;
+
+// Define type traits to determine type |T|'s memory safety check status.
+namespace {
+
+// Primary template: having |value = 0| (none) as a default.
+template <typename T, typename AlwaysVoid = void>
+struct GetChecksInternal {
+  static constexpr MemorySafetyCheck kValue = static_cast<MemorySafetyCheck>(0);
+};
+
+// Specialization: having |value = T::kMemorySafetyChecks| is present.
+template <typename T>
+struct GetChecksInternal<T, std::void_t<decltype(T::kMemorySafetyChecks)>> {
+  static constexpr MemorySafetyCheck kValue = T::kMemorySafetyChecks;
+};
+
+// Allocator type traits.
+constexpr bool ShouldUsePartitionAlloc(MemorySafetyCheck checks) {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  return static_cast<bool>(checks &
+                           (MemorySafetyCheck::kForcePartitionAlloc |
+                            MemorySafetyCheck::kSchedulerLoopQuarantine));
+#else
+  return false;
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+}
+
+// Returns |partition_alloc::AllocFlags| corresponding to |checks|.
+constexpr partition_alloc::AllocFlags GetAllocFlags(MemorySafetyCheck checks) {
+  return partition_alloc::AllocFlags::kReturnNull |
+         partition_alloc::AllocFlags::kNoHooks;
+}
+
+// Returns |partition_alloc::FreeFlags| corresponding to |checks|.
+constexpr partition_alloc::FreeFlags GetFreeFlags(MemorySafetyCheck checks) {
+  auto flags = partition_alloc::FreeFlags::kNone;
+  if (static_cast<bool>(checks & MemorySafetyCheck::kSchedulerLoopQuarantine)) {
+    flags |= partition_alloc::FreeFlags::kSchedulerLoopQuarantine;
+  }
+  return flags;
+}
+
+}  // namespace
+
+// Public utility type traits.
+template <typename T>
+constexpr MemorySafetyCheck get_memory_safety_checks =
+    GetChecksInternal<T>::kValue;
+
+template <typename T, MemorySafetyCheck c>
+constexpr bool is_memory_safety_checked =
+    (get_memory_safety_checks<T> & c) == c;
+
+// Allocator functions.
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+ALWAYS_INLINE partition_alloc::PartitionRoot*
+GetPartitionRootForMemorySafetyCheckedAllocation() {
+  return allocator_shim::internal::PartitionAllocMalloc::Allocator();
+}
+
+ALWAYS_INLINE partition_alloc::PartitionRoot*
+GetAlignedPartitionRootForMemorySafetyCheckedAllocation() {
+  return allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator();
+}
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+template <MemorySafetyCheck checks>
+NOINLINE void* HandleMemorySafetyCheckedOperatorNew(std::size_t count) {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  if constexpr (ShouldUsePartitionAlloc(checks)) {
+    return GetPartitionRootForMemorySafetyCheckedAllocation()
+        ->AllocInline<GetAllocFlags(checks)>(count);
+  } else
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  {
+    return ::operator new(count);
+  }
+}
+
+template <MemorySafetyCheck checks>
+NOINLINE void* HandleMemorySafetyCheckedOperatorNew(
+    std::size_t count,
+    std::align_val_t alignment) {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  if constexpr (ShouldUsePartitionAlloc(checks)) {
+    return GetAlignedPartitionRootForMemorySafetyCheckedAllocation()
+        ->AlignedAlloc<GetAllocFlags(checks)>(static_cast<size_t>(alignment),
+                                              count);
+  } else
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  {
+    return ::operator new(count, alignment);
+  }
+}
+
+template <MemorySafetyCheck checks>
+NOINLINE void HandleMemorySafetyCheckedOperatorDelete(void* ptr) {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  if constexpr (ShouldUsePartitionAlloc(checks)) {
+    GetPartitionRootForMemorySafetyCheckedAllocation()
+        ->Free<GetFreeFlags(checks)>(ptr);
+  } else
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  {
+    ::operator delete(ptr);
+  }
+}
+
+template <MemorySafetyCheck checks>
+NOINLINE void HandleMemorySafetyCheckedOperatorDelete(
+    void* ptr,
+    std::align_val_t alignment) {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  if constexpr (ShouldUsePartitionAlloc(checks)) {
+    GetAlignedPartitionRootForMemorySafetyCheckedAllocation()
+        ->Free<GetFreeFlags(checks)>(ptr);
+  } else
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  {
+    ::operator delete(ptr, alignment);
+  }
+}
+
+}  // namespace base::internal
+
+// Macros to annotate class/struct's default memory safety check.
+// ADVANCED_MEMORY_SAFETY_CHECKS(): Enable Check |kAdvancedChecks| for this
+// object.
+//
+// Note that if you use this macro at the top of struct declaration, the
+// declaration context would be left as |private|. Please switch it back to
+// |public| manually if needed.
+//
+//   struct ObjectWithAdvancedChecks {
+//     ADVANCED_MEMORY_SAFETY_CHECKS();
+//   public:
+//     int public_field;
+//   };
+#define ADVANCED_MEMORY_SAFETY_CHECKS_INTERNAL(SPECIFIER)                      \
+ public:                                                                       \
+  static constexpr auto kMemorySafetyChecks =                                  \
+      base::internal::kAdvancedMemorySafetyChecks;                             \
+  SPECIFIER static void* operator new(std::size_t count) {                     \
+    return base::internal::HandleMemorySafetyCheckedOperatorNew<               \
+        kMemorySafetyChecks>(count);                                           \
+  }                                                                            \
+  SPECIFIER static void* operator new(std::size_t count,                       \
+                                      std::align_val_t alignment) {            \
+    return base::internal::HandleMemorySafetyCheckedOperatorNew<               \
+        kMemorySafetyChecks>(count, alignment);                                \
+  }                                                                            \
+  /* Though we do not hook placement new, we need to define this */            \
+  /* explicitly to allow it. */                                                \
+  ALWAYS_INLINE static void* operator new(std::size_t, void* ptr) {            \
+    return ptr;                                                                \
+  }                                                                            \
+  SPECIFIER static void operator delete(void* ptr) noexcept {                  \
+    base::internal::HandleMemorySafetyCheckedOperatorDelete<                   \
+        kMemorySafetyChecks>(ptr);                                             \
+  }                                                                            \
+  SPECIFIER static void operator delete(void* ptr,                             \
+                                        std::align_val_t alignment) noexcept { \
+    base::internal::HandleMemorySafetyCheckedOperatorDelete<                   \
+        kMemorySafetyChecks>(ptr, alignment);                                  \
+  }                                                                            \
+                                                                               \
+ private:                                                                      \
+  static_assert(true) /* semicolon here */
+
+#if DCHECK_IS_ON()
+// Specify NOINLINE to display the operator on a stack trace.
+#define ADVANCED_MEMORY_SAFETY_CHECKS() \
+  ADVANCED_MEMORY_SAFETY_CHECKS_INTERNAL(NOINLINE NOT_TAIL_CALLED)
+#else
+#define ADVANCED_MEMORY_SAFETY_CHECKS() \
+  ADVANCED_MEMORY_SAFETY_CHECKS_INTERNAL(ALWAYS_INLINE)
+#endif  // DCHECK_IS_ON()
+
+#endif  // BASE_MEMORY_SAFETY_CHECKS_H_
diff --git a/base/memory/safety_checks_unitttest.cc b/base/memory/safety_checks_unitttest.cc
new file mode 100644
index 0000000..f0949d5
--- /dev/null
+++ b/base/memory/safety_checks_unitttest.cc
@@ -0,0 +1,129 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <new>
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_address_space.h"
+#include "base/memory/safety_checks.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+using base::internal::is_memory_safety_checked;
+using base::internal::MemorySafetyCheck;
+
+// Normal object: should be targeted by no additional |MemorySafetyCheck|.
+struct DefaultChecks {};
+
+// Annotated object: should have |base::internal::kAdvancedMemorySafetyChecks|.
+struct AdvancedChecks {
+  ADVANCED_MEMORY_SAFETY_CHECKS();
+};
+
+// Annotated and aligned object for testing aligned allocations.
+constexpr int kLargeAlignment = 2 * __STDCPP_DEFAULT_NEW_ALIGNMENT__;
+struct alignas(kLargeAlignment) AlignedAdvancedChecks {
+  ADVANCED_MEMORY_SAFETY_CHECKS();
+};
+
+// The macro may hook memory allocation/deallocation but should forward the
+// request to PA or any other allocator via
+// |HandleMemorySafetyCheckedOperator***|.
+TEST(MemorySafetyCheckTest, AllocatorFunctions) {
+  static_assert(
+      !is_memory_safety_checked<DefaultChecks,
+                                MemorySafetyCheck::kForcePartitionAlloc>);
+  static_assert(
+      is_memory_safety_checked<AdvancedChecks,
+                               MemorySafetyCheck::kForcePartitionAlloc>);
+  static_assert(
+      is_memory_safety_checked<AlignedAdvancedChecks,
+                               MemorySafetyCheck::kForcePartitionAlloc>);
+
+  // void* operator new(std::size_t count);
+  auto* ptr1 = new DefaultChecks();
+  auto* ptr2 = new AdvancedChecks();
+  EXPECT_NE(ptr1, nullptr);
+  EXPECT_NE(ptr2, nullptr);
+
+// AdvancedChecks is kForcePartitionAlloc.
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  EXPECT_TRUE(partition_alloc::IsManagedByPartitionAlloc(
+      reinterpret_cast<uintptr_t>(ptr2)));
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+  // void operator delete(void* ptr);
+  delete ptr1;
+  delete ptr2;
+
+  // void* operator new(std::size_t count, std::align_val_t alignment)
+  ptr1 = new (std::align_val_t(64)) DefaultChecks();
+  ptr2 = new (std::align_val_t(64)) AdvancedChecks();
+  EXPECT_NE(ptr1, nullptr);
+  EXPECT_NE(ptr2, nullptr);
+
+// AdvancedChecks is kForcePartitionAlloc.
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  EXPECT_TRUE(partition_alloc::IsManagedByPartitionAlloc(
+      reinterpret_cast<uintptr_t>(ptr2)));
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+  // void operator delete(void* ptr, std::align_val_t alignment)
+  ::operator delete(ptr1, std::align_val_t(64));
+  AdvancedChecks::operator delete(ptr2, std::align_val_t(64));
+
+  // void* operator new(std::size_t count, std::align_val_t alignment)
+  auto* ptr3 = new AlignedAdvancedChecks();
+  EXPECT_NE(ptr3, nullptr);
+
+// AlignedAdvancedChecks is kForcePartitionAlloc.
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  EXPECT_TRUE(partition_alloc::IsManagedByPartitionAlloc(
+      reinterpret_cast<uintptr_t>(ptr3)));
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+  // void operator delete(void* ptr, std::align_val_t alignment)
+  delete ptr3;
+
+  // void* operator new(std::size_t, void* ptr)
+  alignas(AlignedAdvancedChecks) char data[32];
+  ptr1 = new (data) DefaultChecks();
+  ptr2 = new (data) AdvancedChecks();
+  ptr3 = new (data) AlignedAdvancedChecks();
+}
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+TEST(MemorySafetyCheckTest, SchedulerLoopQuarantine) {
+  static_assert(
+      !is_memory_safety_checked<DefaultChecks,
+                                MemorySafetyCheck::kSchedulerLoopQuarantine>);
+  static_assert(
+      is_memory_safety_checked<AdvancedChecks,
+                               MemorySafetyCheck::kSchedulerLoopQuarantine>);
+
+  constexpr size_t kCapacityInBytes = 1024;
+
+  auto* root =
+      base::internal::GetPartitionRootForMemorySafetyCheckedAllocation();
+  auto& list = root->GetSchedulerLoopQuarantineForTesting();
+
+  size_t original_capacity_in_bytes = list.GetCapacityInBytes();
+  list.SetCapacityInBytesForTesting(kCapacityInBytes);
+
+  auto* ptr1 = new DefaultChecks();
+  EXPECT_NE(ptr1, nullptr);
+  delete ptr1;
+  EXPECT_FALSE(list.IsQuarantinedForTesting(ptr1));
+
+  auto* ptr2 = new AdvancedChecks();
+  EXPECT_NE(ptr2, nullptr);
+  delete ptr2;
+  EXPECT_TRUE(list.IsQuarantinedForTesting(ptr2));
+
+  list.Purge();
+  list.SetCapacityInBytesForTesting(original_capacity_in_bytes);
+}
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+}  // namespace
diff --git a/base/memory/scoped_refptr.h b/base/memory/scoped_refptr.h
index 79979fe..547e9e9 100644
--- a/base/memory/scoped_refptr.h
+++ b/base/memory/scoped_refptr.h
@@ -27,17 +27,10 @@
 template <class>
 class RefCountedDeleteOnSequence;
 class SequencedTaskRunner;
-class WrappedPromise;
 
 template <typename T>
 scoped_refptr<T> AdoptRef(T* t);
 
-namespace internal {
-
-class BasePromise;
-
-}  // namespace internal
-
 namespace subtle {
 
 enum AdoptRefTag { kAdoptRefTag };
@@ -125,7 +118,7 @@
 template <typename T>
 scoped_refptr<T> AdoptRef(T* obj) {
   using Tag = std::decay_t<decltype(subtle::GetRefCountPreference<T>())>;
-  static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
+  static_assert(std::is_same_v<subtle::StartRefCountFromOneTag, Tag>,
                 "Use AdoptRef only if the reference count starts from one.");
 
   DCHECK(obj);
@@ -254,8 +247,7 @@
 
   // Copy conversion constructor.
   template <typename U,
-            typename = typename std::enable_if<
-                std::is_convertible<U*, T*>::value>::type>
+            typename = std::enable_if_t<std::is_convertible_v<U*, T*>>>
   scoped_refptr(const scoped_refptr<U>& r) : scoped_refptr(r.ptr_) {}
 
   // Move constructor. This is required in addition to the move conversion
@@ -264,8 +256,7 @@
 
   // Move conversion constructor.
   template <typename U,
-            typename = typename std::enable_if<
-                std::is_convertible<U*, T*>::value>::type>
+            typename = std::enable_if_t<std::is_convertible_v<U*, T*>>>
   scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.ptr_) {
     r.ptr_ = nullptr;
   }
@@ -342,11 +333,6 @@
   friend scoped_refptr<U> base::AdoptRef(U*);
   friend class ::base::SequencedTaskRunner;
 
-  // Friend access so these classes can use the constructor below as part of a
-  // binary size optimization.
-  friend class ::base::internal::BasePromise;
-  friend class ::base::WrappedPromise;
-
   scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
 
   // Friend required for move constructors that set r.ptr_ to null.
diff --git a/base/memory/shared_memory_mapping.h b/base/memory/shared_memory_mapping.h
index 011e1d8..d546deb 100644
--- a/base/memory/shared_memory_mapping.h
+++ b/base/memory/shared_memory_mapping.h
@@ -114,7 +114,7 @@
   // large enough to contain a T, or nullptr otherwise.
   template <typename T>
   const T* GetMemoryAs() const {
-    static_assert(std::is_trivially_copyable<T>::value,
+    static_assert(std::is_trivially_copyable_v<T>,
                   "Copying non-trivially-copyable object across memory spaces "
                   "is dangerous");
     if (!IsValid())
@@ -132,7 +132,7 @@
   // page-aligned.
   template <typename T>
   span<const T> GetMemoryAsSpan() const {
-    static_assert(std::is_trivially_copyable<T>::value,
+    static_assert(std::is_trivially_copyable_v<T>,
                   "Copying non-trivially-copyable object across memory spaces "
                   "is dangerous");
     if (!IsValid())
@@ -146,7 +146,7 @@
   // first element, if any, is guaranteed to be page-aligned.
   template <typename T>
   span<const T> GetMemoryAsSpan(size_t count) const {
-    static_assert(std::is_trivially_copyable<T>::value,
+    static_assert(std::is_trivially_copyable_v<T>,
                   "Copying non-trivially-copyable object across memory spaces "
                   "is dangerous");
     if (!IsValid())
@@ -189,7 +189,7 @@
   // enough to contain a T, or nullptr otherwise.
   template <typename T>
   T* GetMemoryAs() const {
-    static_assert(std::is_trivially_copyable<T>::value,
+    static_assert(std::is_trivially_copyable_v<T>,
                   "Copying non-trivially-copyable object across memory spaces "
                   "is dangerous");
     if (!IsValid())
@@ -206,7 +206,7 @@
   // The first element, if any, is guaranteed to be page-aligned.
   template <typename T>
   span<T> GetMemoryAsSpan() const {
-    static_assert(std::is_trivially_copyable<T>::value,
+    static_assert(std::is_trivially_copyable_v<T>,
                   "Copying non-trivially-copyable object across memory spaces "
                   "is dangerous");
     if (!IsValid())
@@ -220,7 +220,7 @@
   // element, if any, is guaranteed to be page-aligned.
   template <typename T>
   span<T> GetMemoryAsSpan(size_t count) const {
-    static_assert(std::is_trivially_copyable<T>::value,
+    static_assert(std::is_trivially_copyable_v<T>,
                   "Copying non-trivially-copyable object across memory spaces "
                   "is dangerous");
     if (!IsValid())
diff --git a/base/memory/stack_allocated.h b/base/memory/stack_allocated.h
index 4eee701..b12adee 100644
--- a/base/memory/stack_allocated.h
+++ b/base/memory/stack_allocated.h
@@ -5,6 +5,8 @@
 #ifndef BASE_MEMORY_STACK_ALLOCATED_H_
 #define BASE_MEMORY_STACK_ALLOCATED_H_
 
+#include <stddef.h>
+
 #if defined(__clang__)
 #define STACK_ALLOCATED_IGNORE(reason) \
   __attribute__((annotate("stack_allocated_ignore")))
diff --git a/base/memory/weak_ptr.h b/base/memory/weak_ptr.h
index 996866b..ce81874 100644
--- a/base/memory/weak_ptr.h
+++ b/base/memory/weak_ptr.h
@@ -176,29 +176,36 @@
 class SupportsWeakPtrBase {
  public:
   // A safe static downcast of a WeakPtr<Base> to WeakPtr<Derived>. This
-  // conversion will only compile if there is exists a Base which inherits
-  // from SupportsWeakPtr<Base>. See base::AsWeakPtr() below for a helper
-  // function that makes calling this easier.
+  // conversion will only compile if Derived singly inherits from
+  // SupportsWeakPtr<Base>. See base::AsWeakPtr() below for a helper function
+  // that makes calling this easier.
   //
   // Precondition: t != nullptr
   template<typename Derived>
   static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
-    static_assert(
-        std::is_base_of<internal::SupportsWeakPtrBase, Derived>::value,
-        "AsWeakPtr argument must inherit from SupportsWeakPtr");
-    return AsWeakPtrImpl<Derived>(t);
-  }
-
- private:
-  // This template function uses type inference to find a Base of Derived
-  // which is an instance of SupportsWeakPtr<Base>. We can then safely
-  // static_cast the Base* to a Derived*.
-  template <typename Derived, typename Base>
-  static WeakPtr<Derived> AsWeakPtrImpl(SupportsWeakPtr<Base>* t) {
-    WeakPtr<Base> weak = t->AsWeakPtr();
+    static_assert(std::is_base_of_v<internal::SupportsWeakPtrBase, Derived>,
+                  "AsWeakPtr argument must inherit from SupportsWeakPtr");
+    using Base = typename decltype(ExtractSinglyInheritedBase(t))::Base;
+    // Ensure SupportsWeakPtr<Base>::AsWeakPtr() is called even if the subclass
+    // hides or overloads it.
+    WeakPtr<Base> weak = static_cast<SupportsWeakPtr<Base>*>(t)->AsWeakPtr();
     return WeakPtr<Derived>(weak.CloneWeakReference(),
                             static_cast<Derived*>(weak.ptr_));
   }
+
+ private:
+  // This class can only be instantiated if the constructor argument inherits
+  // from SupportsWeakPtr<T> in exactly one way.
+  template <typename T>
+  struct ExtractSinglyInheritedBase;
+  template <typename T>
+  struct ExtractSinglyInheritedBase<SupportsWeakPtr<T>> {
+    using Base = T;
+    explicit ExtractSinglyInheritedBase(SupportsWeakPtr<T>*);
+  };
+  template <typename T>
+  ExtractSinglyInheritedBase(SupportsWeakPtr<T>*)
+      -> ExtractSinglyInheritedBase<SupportsWeakPtr<T>>;
 };
 
 // Forward declaration from safe_ptr.h.
diff --git a/base/memory/weak_ptr_nocompile.nc b/base/memory/weak_ptr_nocompile.nc
new file mode 100644
index 0000000..84e36ed
--- /dev/null
+++ b/base/memory/weak_ptr_nocompile.nc
@@ -0,0 +1,120 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+
+struct Producer : SupportsWeakPtr<Producer> {};
+struct DerivedProducer : Producer {};
+struct OtherDerivedProducer : Producer {};
+struct MultiplyDerivedProducer : Producer,
+                                 SupportsWeakPtr<MultiplyDerivedProducer> {};
+struct Unrelated {};
+struct DerivedUnrelated : Unrelated {};
+
+void DowncastDisallowed() {
+  Producer f;
+  WeakPtr<Producer> ptr = f.AsWeakPtr();
+  {
+    WeakPtr<DerivedProducer> derived_ptr = ptr;  // expected-error {{no viable conversion from 'WeakPtr<Producer>' to 'WeakPtr<DerivedProducer>'}}
+  }
+  {
+    WeakPtr<DerivedProducer> derived_ptr =
+        static_cast<WeakPtr<DerivedProducer> >(ptr);  // expected-error {{no matching conversion for static_cast from 'WeakPtr<Producer>' to 'WeakPtr<DerivedProducer>'}}
+  }
+}
+
+void RefDowncastDisallowed() {
+  Producer f;
+  WeakPtr<Producer> ptr = f.AsWeakPtr();
+  {
+    WeakPtr<DerivedProducer>& derived_ptr = ptr;  // expected-error {{non-const lvalue reference to type 'WeakPtr<DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<Producer>'}}
+  }
+  {
+    WeakPtr<DerivedProducer>& derived_ptr =
+        static_cast<WeakPtr<DerivedProducer>&>(ptr);  // expected-error {{non-const lvalue reference to type 'WeakPtr<DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<Producer>'}}
+  }
+}
+
+void AsWeakPtrDowncastDisallowed() {
+  Producer f;
+  WeakPtr<DerivedProducer> ptr =
+      SupportsWeakPtr<Producer>::StaticAsWeakPtr<DerivedProducer>(&f);  // expected-error {{no matching function for call to 'StaticAsWeakPtr'}}
+}
+
+void UnsafeDowncastViaAsWeakPtrDisallowed() {
+  Producer f;
+  {
+    WeakPtr<DerivedProducer> ptr = AsWeakPtr(&f);  // expected-error {{no viable conversion from 'WeakPtr<base::Producer>' to 'WeakPtr<DerivedProducer>'}}
+  }
+  {
+    WeakPtr<DerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);  // expected-error {{no matching function for call to 'AsWeakPtr'}}
+  }
+  {
+    WeakPtr<DerivedProducer> ptr = AsWeakPtr<Producer>(&f);  // expected-error {{no viable conversion from 'WeakPtr<base::Producer>' to 'WeakPtr<DerivedProducer>'}}
+  }
+}
+
+void UnsafeSidecastViaAsWeakPtrDisallowed() {
+  DerivedProducer f;
+  {
+    WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr(&f);  // expected-error {{no viable conversion from 'WeakPtr<base::DerivedProducer>' to 'WeakPtr<OtherDerivedProducer>'}}
+  }
+  {
+    WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);  // expected-error {{no viable conversion from 'WeakPtr<base::DerivedProducer>' to 'WeakPtr<OtherDerivedProducer>'}}
+  }
+  {
+    WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<OtherDerivedProducer>(&f);  // expected-error {{no matching function for call to 'AsWeakPtr'}}
+  }
+}
+
+void UnrelatedCastViaAsWeakPtrDisallowed() {
+  DerivedProducer f;
+  {
+    WeakPtr<Unrelated> ptr = AsWeakPtr(&f);  // expected-error {{no viable conversion from 'WeakPtr<base::DerivedProducer>' to 'WeakPtr<Unrelated>'}}
+  }
+  {
+    WeakPtr<Unrelated> ptr = AsWeakPtr<Unrelated>(&f);  // expected-error {{no matching function for call to 'AsWeakPtr'}}
+  }
+}
+
+void AsWeakPtrWithoutSupportsWeakPtrDisallowed() {
+  {
+    Unrelated f;
+    WeakPtr<Unrelated> ptr = AsWeakPtr(&f);  // expected-error@*:* {{AsWeakPtr argument must inherit from SupportsWeakPtr}}
+    // expected-error@*:* {{no viable constructor or deduction guide for deduction of template arguments of 'ExtractSinglyInheritedBase'}}
+    // expected-error@*:* {{static_cast from 'base::Unrelated *' to 'SupportsWeakPtr<Base> *' (aka 'SupportsWeakPtr<int> *'), which are not related by inheritance, is not allowed}}
+  }
+  {
+    DerivedUnrelated f;
+    WeakPtr<Unrelated> ptr = AsWeakPtr(&f);  // expected-error@*:* {{AsWeakPtr argument must inherit from SupportsWeakPtr}}
+    // expected-error@*:* {{no viable constructor or deduction guide for deduction of template arguments of 'ExtractSinglyInheritedBase'}}
+    // expected-error@*:* {{static_cast from 'base::DerivedUnrelated *' to 'SupportsWeakPtr<Base> *' (aka 'SupportsWeakPtr<int> *'), which are not related by inheritance, is not allowed}}
+  }
+}
+
+void AsWeakPtrWithAmbiguousAncestorsDisallowed() {
+  MultiplyDerivedProducer f;
+  WeakPtr<MultiplyDerivedProducer> ptr = AsWeakPtr(&f);  // expected-error@*:* {{no viable constructor or deduction guide for deduction of template arguments of 'ExtractSinglyInheritedBase'}}
+  // expected-error@*:* {{static_cast from 'base::MultiplyDerivedProducer *' to 'SupportsWeakPtr<Base> *' (aka 'SupportsWeakPtr<int> *'), which are not related by inheritance, is not allowed}}
+}
+
+void VendingMutablePtrsFromConstFactoryDisallowed() {
+  {
+    Unrelated unrelated;
+    const WeakPtrFactory<const Unrelated> factory(&unrelated);
+    factory.GetMutableWeakPtr();  // expected-error {{no matching member function for call to 'GetMutableWeakPtr'}}
+  }
+  {
+    DerivedUnrelated derived_unrelated;
+    const WeakPtrFactory<DerivedUnrelated> factory(&derived_unrelated);
+    factory.GetMutableWeakPtr<Unrelated>();  // expected-error {{no matching member function for call to 'GetMutableWeakPtr'}}
+  }
+}
+
+}  // namespace base
diff --git a/base/memory/weak_ptr_unittest.nc b/base/memory/weak_ptr_unittest.nc
deleted file mode 100644
index 6bd21da..0000000
--- a/base/memory/weak_ptr_unittest.nc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a "No Compile Test" suite.
-// http://dev.chromium.org/developers/testing/no-compile-tests
-
-#include "base/memory/weak_ptr.h"
-
-namespace base {
-
-struct Producer : SupportsWeakPtr<Producer> {};
-struct DerivedProducer : Producer {};
-struct OtherDerivedProducer : Producer {};
-struct MultiplyDerivedProducer : Producer,
-                                 SupportsWeakPtr<MultiplyDerivedProducer> {};
-struct Unrelated {};
-struct DerivedUnrelated : Unrelated {};
-
-#if defined(NCTEST_AUTO_DOWNCAST)  // [r"no viable conversion from 'WeakPtr<Producer>' to 'WeakPtr<DerivedProducer>'"]
-
-void WontCompile() {
-  Producer f;
-  WeakPtr<Producer> ptr = f.AsWeakPtr();
-  WeakPtr<DerivedProducer> derived_ptr = ptr;
-}
-
-#elif defined(NCTEST_STATIC_DOWNCAST)  // [r"no matching conversion for static_cast from 'WeakPtr<Producer>' to 'WeakPtr<DerivedProducer>'"]
-
-void WontCompile() {
-  Producer f;
-  WeakPtr<Producer> ptr = f.AsWeakPtr();
-  WeakPtr<DerivedProducer> derived_ptr =
-      static_cast<WeakPtr<DerivedProducer> >(ptr);
-}
-
-#elif defined(NCTEST_AUTO_REF_DOWNCAST)  // [r"fatal error: non-const lvalue reference to type 'WeakPtr<DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<Producer>'"]
-
-void WontCompile() {
-  Producer f;
-  WeakPtr<Producer> ptr = f.AsWeakPtr();
-  WeakPtr<DerivedProducer>& derived_ptr = ptr;
-}
-
-#elif defined(NCTEST_STATIC_REF_DOWNCAST)  // [r"fatal error: non-const lvalue reference to type 'WeakPtr<DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<Producer>'"]
-
-void WontCompile() {
-  Producer f;
-  WeakPtr<Producer> ptr = f.AsWeakPtr();
-  WeakPtr<DerivedProducer>& derived_ptr =
-      static_cast<WeakPtr<DerivedProducer>&>(ptr);
-}
-
-#elif defined(NCTEST_STATIC_ASWEAKPTR_DOWNCAST)  // [r"no matching function"]
-
-void WontCompile() {
-  Producer f;
-  WeakPtr<DerivedProducer> ptr =
-      SupportsWeakPtr<Producer>::StaticAsWeakPtr<DerivedProducer>(&f);
-}
-
-#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST)  // [r"no viable conversion from 'WeakPtr<base::Producer>' to 'WeakPtr<DerivedProducer>'"]
-
-void WontCompile() {
-  Producer f;
-  WeakPtr<DerivedProducer> ptr = AsWeakPtr(&f);
-}
-
-#elif defined(NCTEST_UNSAFE_INSTANTIATED_HELPER_DOWNCAST)  // [r"no matching function"]
-
-void WontCompile() {
-  Producer f;
-  WeakPtr<DerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
-}
-
-#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST)  // [r"no viable conversion from 'WeakPtr<base::Producer>' to 'WeakPtr<DerivedProducer>'"]
-
-void WontCompile() {
-  Producer f;
-  WeakPtr<DerivedProducer> ptr = AsWeakPtr<Producer>(&f);
-}
-
-#elif defined(NCTEST_UNSAFE_HELPER_CAST)  // [r"no viable conversion from 'WeakPtr<base::DerivedProducer>' to 'WeakPtr<OtherDerivedProducer>'"]
-
-void WontCompile() {
-  DerivedProducer f;
-  WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr(&f);
-}
-
-#elif defined(NCTEST_UNSAFE_INSTANTIATED_HELPER_SIDECAST)  // [r"fatal error: no matching function for call to 'AsWeakPtr'"]
-
-void WontCompile() {
-  DerivedProducer f;
-  WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<OtherDerivedProducer>(&f);
-}
-
-#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST)  // [r"no viable conversion from 'WeakPtr<base::DerivedProducer>' to 'WeakPtr<OtherDerivedProducer>'"]
-
-void WontCompile() {
-  DerivedProducer f;
-  WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
-}
-
-#elif defined(NCTEST_UNRELATED_HELPER)  // [r"no viable conversion from 'WeakPtr<base::DerivedProducer>' to 'WeakPtr<Unrelated>'"]
-
-void WontCompile() {
-  DerivedProducer f;
-  WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
-}
-
-#elif defined(NCTEST_UNRELATED_INSTANTIATED_HELPER)  // [r"no matching function"]
-
-void WontCompile() {
-  DerivedProducer f;
-  WeakPtr<Unrelated> ptr = AsWeakPtr<Unrelated>(&f);
-}
-
-#elif defined(NCTEST_COMPLETELY_UNRELATED_HELPER)  // [r"fatal error: static assertion failed due to requirement 'std::is_base_of<base::internal::SupportsWeakPtrBase, base::Unrelated>::value': AsWeakPtr argument must inherit from SupportsWeakPtr"]
-
-void WontCompile() {
-  Unrelated f;
-  WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
-}
-
-#elif defined(NCTEST_DERIVED_COMPLETELY_UNRELATED_HELPER)  // [r"fatal error: static assertion failed due to requirement 'std::is_base_of<base::internal::SupportsWeakPtrBase, base::DerivedUnrelated>::value': AsWeakPtr argument must inherit from SupportsWeakPtr"]
-
-void WontCompile() {
-  DerivedUnrelated f;
-  WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
-}
-
-#elif defined(NCTEST_AMBIGUOUS_ANCESTORS)  // [r"fatal error: no matching function for call to 'AsWeakPtrImpl'"]
-
-void WontCompile() {
-  MultiplyDerivedProducer f;
-  WeakPtr<MultiplyDerivedProducer> ptr = AsWeakPtr(&f);
-}
-
-#elif defined(NCTEST_GETMUTABLEWEAKPTR_CONST_T)  // [r"fatal error: no matching member function for call to 'GetMutableWeakPtr'"]
-
-void WontCompile() {
-  Unrelated unrelated;
-  const WeakPtrFactory<const Unrelated> factory(&unrelated);
-  factory.GetMutableWeakPtr();
-}
-
-#elif defined(NCTEST_GETMUTABLEWEAKPTR_NOT_T)  // [r"fatal error: no matching member function for call to 'GetMutableWeakPtr'"]
-
-void WontCompile() {
-  DerivedUnrelated derived_unrelated;
-  const WeakPtrFactory<DerivedUnrelated> factory(&derived_unrelated);
-  factory.GetMutableWeakPtr<Unrelated>();
-}
-
-#endif
-
-}
diff --git a/base/message_loop/message_pump.cc b/base/message_loop/message_pump.cc
index c17b447..0634f90 100644
--- a/base/message_loop/message_pump.cc
+++ b/base/message_loop/message_pump.cc
@@ -9,16 +9,22 @@
 #include "base/message_loop/message_pump_for_io.h"
 #include "base/message_loop/message_pump_for_ui.h"
 #include "base/notreached.h"
+#include "base/task/task_features.h"
 #include "build/build_config.h"
 
 #if BUILDFLAG(IS_APPLE)
-#include "base/message_loop/message_pump_mac.h"
+#include "base/message_loop/message_pump_apple.h"
 #endif
 
 namespace base {
 
 namespace {
 
+std::atomic_bool g_align_wake_ups = false;
+#if BUILDFLAG(IS_WIN)
+bool g_explicit_high_resolution_timer_win = true;
+#endif  // BUILDFLAG(IS_WIN)
+
 MessagePump::MessagePumpFactory* message_pump_for_ui_factory_ = nullptr;
 
 }  // namespace
@@ -45,7 +51,7 @@
       if (message_pump_for_ui_factory_)
         return message_pump_for_ui_factory_();
 #if BUILDFLAG(IS_APPLE)
-      return message_pump_mac::Create();
+      return message_pump_apple::Create();
 #elif BUILDFLAG(IS_NACL) || BUILDFLAG(IS_AIX)
       // Currently NaCl and AIX don't have a UI MessagePump.
       // TODO(abarth): Figure out if we need this.
@@ -82,4 +88,32 @@
   }
 }
 
+// static
+void MessagePump::InitializeFeatures() {
+  g_align_wake_ups = FeatureList::IsEnabled(kAlignWakeUps);
+#if BUILDFLAG(IS_WIN)
+  g_explicit_high_resolution_timer_win =
+      FeatureList::IsEnabled(kExplicitHighResolutionTimerWin);
+#endif
+}
+
+TimeTicks MessagePump::AdjustDelayedRunTime(TimeTicks earliest_time,
+                                            TimeTicks run_time,
+                                            TimeTicks latest_time) {
+  // Windows relies on the low resolution timer rather than manual wake up
+  // alignment.
+#if BUILDFLAG(IS_WIN)
+  if (g_explicit_high_resolution_timer_win) {
+    return earliest_time;
+  }
+#else  // BUILDFLAG(IS_WIN)
+  if (g_align_wake_ups.load(std::memory_order_relaxed)) {
+    TimeTicks aligned_run_time = earliest_time.SnappedToNextTick(
+        TimeTicks(), GetTaskLeewayForCurrentThread());
+    return std::min(aligned_run_time, latest_time);
+  }
+#endif
+  return run_time;
+}
+
 }  // namespace base
diff --git a/base/message_loop/message_pump.h b/base/message_loop/message_pump.h
index fb26cc2..0d3645f 100644
--- a/base/message_loop/message_pump.h
+++ b/base/message_loop/message_pump.h
@@ -31,6 +31,8 @@
   // Returns true if the MessagePumpForUI has been overidden.
   static bool IsMessagePumpForUIFactoryOveridden();
 
+  static void InitializeFeatures();
+
   // Creates the default MessagePump based on |type|. Caller owns return value.
   static std::unique_ptr<MessagePump> Create(MessagePumpType type);
 
@@ -57,6 +59,12 @@
       // delayed tasks.
       TimeTicks delayed_run_time;
 
+      // |leeway| determines the preferred time range for scheduling
+      // work. A larger leeway provides more freedom to schedule work at
+      // an optimal time for power consumption. This field is ignored
+      // for immediate work.
+      TimeDelta leeway;
+
       // A recent view of TimeTicks::Now(). Only valid if |delayed_run_time|
       // isn't null nor max. MessagePump impls should use remaining_delay()
       // instead of resampling Now() if they wish to sleep for a TimeDelta.
@@ -251,6 +259,11 @@
   // entered.
   virtual void ScheduleDelayedWork(
       const Delegate::NextWorkInfo& next_work_info) = 0;
+
+  // Returns an adjusted |run_time| based on alignment policies of the pump.
+  virtual TimeTicks AdjustDelayedRunTime(TimeTicks earliest_time,
+                                         TimeTicks run_time,
+                                         TimeTicks latest_time);
 };
 
 }  // namespace base
diff --git a/base/message_loop/message_pump_apple.h b/base/message_loop/message_pump_apple.h
new file mode 100644
index 0000000..2fe5b55
--- /dev/null
+++ b/base/message_loop/message_pump_apple.h
@@ -0,0 +1,437 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The basis for all native run loops on macOS/iOS is the CFRunLoop.  It can
+// be used directly, it can be used as the driving force behind the similar
+// Foundation NSRunLoop, and it can be used to implement higher-level event
+// loops such as the NSApplication event loop.
+//
+// This file introduces a basic CFRunLoop-based implementation of the
+// MessagePump interface called CFRunLoopBase.  CFRunLoopBase contains all of
+// the machinery necessary to dispatch events to a delegate, but does not
+// implement the specific run loop.  Concrete subclasses must provide their own
+// DoRun and DoQuit implementations.
+//
+// A concrete subclass that just runs a CFRunLoop loop is provided in
+// MessagePumpCFRunLoop.  For an NSRunLoop, the similar MessagePumpNSRunLoop is
+// provided.
+//
+// For the application's event loop, an implementation based on AppKit's
+// NSApplication event system is provided in MessagePumpNSApplication.
+//
+// Typically, MessagePumpNSApplication only makes sense on a Cocoa application's
+// main thread.  If a CFRunLoop-based message pump is needed on any other
+// thread, one of the other concrete subclasses is preferable.
+// message_pump_apple::Create is defined, which returns a new
+// NSApplication-based or NSRunLoop-based MessagePump subclass depending on
+// which thread it is called on.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_APPLE_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_APPLE_H_
+
+#include "base/message_loop/message_pump.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include <memory>
+
+#include "base/apple/scoped_cftyperef.h"
+#include "base/containers/stack.h"
+#include "base/memory/raw_ptr.h"
+#include "base/run_loop.h"
+#include "build/build_config.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+#if defined(__OBJC__)
+#if BUILDFLAG(IS_IOS)
+#import <Foundation/Foundation.h>
+#else
+#import <AppKit/AppKit.h>
+
+// Clients must subclass NSApplication and implement this protocol if they use
+// MessagePumpMac.
+@protocol CrAppProtocol
+// Must return true if -[NSApplication sendEvent:] is currently on the stack.
+// See the comment for |CreateAutoreleasePool()| in the cc file for why this is
+// necessary.
+- (BOOL)isHandlingSendEvent;
+@end
+#endif  // BUILDFLAG(IS_IOS)
+#endif  // defined(__OBJC__)
+
+namespace base {
+
+class BASE_EXPORT MessagePumpCFRunLoopBase : public MessagePump {
+ public:
+  MessagePumpCFRunLoopBase(const MessagePumpCFRunLoopBase&) = delete;
+  MessagePumpCFRunLoopBase& operator=(const MessagePumpCFRunLoopBase&) = delete;
+
+  static void InitializeFeatures();
+
+  // MessagePump:
+  void Run(Delegate* delegate) override;
+  void Quit() override;
+  void ScheduleWork() override;
+  void ScheduleDelayedWork(
+      const Delegate::NextWorkInfo& next_work_info) override;
+  TimeTicks AdjustDelayedRunTime(TimeTicks earliest_time,
+                                 TimeTicks run_time,
+                                 TimeTicks latest_time) override;
+
+#if BUILDFLAG(IS_IOS)
+  // Some iOS message pumps do not support calling |Run()| to spin the main
+  // message loop directly.  Instead, call |Attach()| to set up a delegate, then
+  // |Detach()| before destroying the message pump.  These methods do nothing if
+  // the message pump supports calling |Run()| and |Quit()|.
+  virtual void Attach(Delegate* delegate);
+  virtual void Detach();
+#endif  // BUILDFLAG(IS_IOS)
+
+ protected:
+  // Needs access to CreateAutoreleasePool.
+  friend class OptionalAutoreleasePool;
+  friend class TestMessagePumpCFRunLoopBase;
+
+  // Tasks will be pumped in the run loop modes described by
+  // |initial_mode_mask|, which maps bits to the index of an internal array of
+  // run loop mode identifiers.
+  explicit MessagePumpCFRunLoopBase(int initial_mode_mask);
+  ~MessagePumpCFRunLoopBase() override;
+
+  // Subclasses should implement the work they need to do in MessagePump::Run
+  // in the DoRun method.  MessagePumpCFRunLoopBase::Run calls DoRun directly.
+  // This arrangement is used because MessagePumpCFRunLoopBase needs to set
+  // up and tear down things before and after the "meat" of DoRun.
+  virtual void DoRun(Delegate* delegate) = 0;
+
+  // Similar to DoRun, this allows subclasses to perform custom handling when
+  // quitting a run loop. Return true if the quit took effect immediately;
+  // otherwise call OnDidQuit() when the quit is actually applied (e.g., a
+  // nested native runloop exited).
+  virtual bool DoQuit() = 0;
+
+  // Should be called by subclasses to signal when a deferred quit takes place.
+  void OnDidQuit();
+
+  // Accessors for private data members to be used by subclasses.
+  CFRunLoopRef run_loop() const { return run_loop_.get(); }
+  int nesting_level() const { return nesting_level_; }
+  int run_nesting_level() const { return run_nesting_level_; }
+  bool keep_running() const { return keep_running_; }
+
+#if BUILDFLAG(IS_IOS)
+  void OnAttach();
+  void OnDetach();
+#endif
+
+  // Sets this pump's delegate.  Signals the appropriate sources if
+  // |delegateless_work_| is true.  |delegate| can be NULL.
+  void SetDelegate(Delegate* delegate);
+
+  // Return whether an autorelease pool should be created to wrap around any
+  // work being performed. If false is returned to prevent an autorelease pool
+  // from being created, any objects autoreleased by work will fall into the
+  // current autorelease pool.
+  virtual bool ShouldCreateAutoreleasePool();
+
+  // Enable and disable entries in |enabled_modes_| to match |mode_mask|.
+  void SetModeMask(int mode_mask);
+
+  // Get the current mode mask from |enabled_modes_|.
+  int GetModeMask() const;
+
+ protected:
+  raw_ptr<Delegate> delegate() { return delegate_; }
+
+ private:
+  class ScopedModeEnabler;
+
+  // The maximum number of run loop modes that can be monitored.
+  static constexpr int kNumModes = 3;
+
+  // Timer callback scheduled by ScheduleDelayedWork.  This does not do any
+  // work, but it signals |work_source_| so that delayed work can be performed
+  // within the appropriate priority constraints.
+  static void RunDelayedWorkTimer(CFRunLoopTimerRef timer, void* info);
+
+  // Perform highest-priority work.  This is associated with |work_source_|
+  // signalled by ScheduleWork or RunDelayedWorkTimer.  The static method calls
+  // the instance method; the instance method returns true if it resignalled
+  // |work_source_| to be called again from the loop.
+  static void RunWorkSource(void* info);
+  bool RunWork();
+
+  // Perform idle-priority work.  This is normally called by PreWaitObserver,
+  // but can also be invoked from RunNestingDeferredWork when returning from a
+  // nested loop.  When this function actually does perform idle work, it will
+  // re-signal the |work_source_|.
+  void RunIdleWork();
+
+  // Perform work that may have been deferred because it was not runnable
+  // within a nested run loop.  This is associated with
+  // |nesting_deferred_work_source_| and is signalled by
+  // MaybeScheduleNestingDeferredWork when returning from a nested loop,
+  // so that an outer loop will be able to perform the necessary tasks if it
+  // permits nestable tasks.
+  static void RunNestingDeferredWorkSource(void* info);
+  void RunNestingDeferredWork();
+
+  // Called before the run loop goes to sleep to notify delegate.
+  void BeforeWait();
+
+  // Schedules possible nesting-deferred work to be processed before the run
+  // loop goes to sleep, exits, or begins processing sources at the top of its
+  // loop.  If this function detects that a nested loop had run since the
+  // previous attempt to schedule nesting-deferred work, it will schedule a
+  // call to RunNestingDeferredWorkSource.
+  void MaybeScheduleNestingDeferredWork();
+
+  // Observer callback responsible for performing idle-priority work, before
+  // the run loop goes to sleep.  Associated with |pre_wait_observer_|.
+  static void PreWaitObserver(CFRunLoopObserverRef observer,
+                              CFRunLoopActivity activity,
+                              void* info);
+
+  static void AfterWaitObserver(CFRunLoopObserverRef observer,
+                                CFRunLoopActivity activity,
+                                void* info);
+
+  // Observer callback called before the run loop processes any sources.
+  // Associated with |pre_source_observer_|.
+  static void PreSourceObserver(CFRunLoopObserverRef observer,
+                                CFRunLoopActivity activity,
+                                void* info);
+
+  // Observer callback called when the run loop starts and stops, at the
+  // beginning and end of calls to CFRunLoopRun.  This is used to maintain
+  // |nesting_level_|.  Associated with |enter_exit_observer_|.
+  static void EnterExitObserver(CFRunLoopObserverRef observer,
+                                CFRunLoopActivity activity,
+                                void* info);
+
+  // Called by EnterExitObserver after performing maintenance on
+  // |nesting_level_|. This allows subclasses an opportunity to perform
+  // additional processing on the basis of run loops starting and stopping.
+  virtual void EnterExitRunLoop(CFRunLoopActivity activity);
+
+  // Gets rid of the top work item scope.
+  void PopWorkItemScope();
+
+  // Starts tracking a new work item.
+  void PushWorkItemScope();
+
+  // The thread's run loop.
+  apple::ScopedCFTypeRef<CFRunLoopRef> run_loop_;
+
+  // The enabled modes. Posted tasks may run in any non-null entry.
+  std::unique_ptr<ScopedModeEnabler> enabled_modes_[kNumModes];
+
+  // The timer, sources, and observers are described above alongside their
+  // callbacks.
+  apple::ScopedCFTypeRef<CFRunLoopTimerRef> delayed_work_timer_;
+  apple::ScopedCFTypeRef<CFRunLoopSourceRef> work_source_;
+  apple::ScopedCFTypeRef<CFRunLoopSourceRef> nesting_deferred_work_source_;
+  apple::ScopedCFTypeRef<CFRunLoopObserverRef> pre_wait_observer_;
+  apple::ScopedCFTypeRef<CFRunLoopObserverRef> after_wait_observer_;
+  apple::ScopedCFTypeRef<CFRunLoopObserverRef> pre_source_observer_;
+  apple::ScopedCFTypeRef<CFRunLoopObserverRef> enter_exit_observer_;
+
+  // (weak) Delegate passed as an argument to the innermost Run call.
+  raw_ptr<Delegate> delegate_ = nullptr;
+
+  // Time at which `delayed_work_timer_` is set to fire.
+  base::TimeTicks delayed_work_scheduled_at_ = base::TimeTicks::Max();
+  base::TimeDelta delayed_work_leeway_;
+
+  // The recursion depth of the currently-executing CFRunLoopRun loop on the
+  // run loop's thread.  0 if no run loops are running inside of whatever scope
+  // the object was created in.
+  int nesting_level_ = 0;
+
+  // The recursion depth (calculated in the same way as |nesting_level_|) of the
+  // innermost executing CFRunLoopRun loop started by a call to Run.
+  int run_nesting_level_ = 0;
+
+  // The deepest (numerically highest) recursion depth encountered since the
+  // most recent attempt to run nesting-deferred work.
+  int deepest_nesting_level_ = 0;
+
+  // Whether we should continue running application tasks. Set to false when
+  // Quit() is called for the innermost run loop.
+  bool keep_running_ = true;
+
+  // "Delegateless" work flags are set when work is ready to be performed but
+  // must wait until a delegate is available to process it.  This can happen
+  // when a MessagePumpCFRunLoopBase is instantiated and work arrives without
+  // any call to Run on the stack.  The Run method will check for delegateless
+  // work on entry and redispatch it as needed once a delegate is available.
+  bool delegateless_work_ = false;
+
+  // Used to keep track of the native event work items processed by the message
+  // pump. Made of optionals because tracking can be suspended when it's
+  // determined the loop is not processing a native event but the depth of the
+  // stack should match |nesting_level_| at all times. A nullopt is also used
+  // as a stand-in during delegateless operation.
+  base::stack<absl::optional<base::MessagePump::Delegate::ScopedDoWorkItem>>
+      stack_;
+};
+
+class BASE_EXPORT MessagePumpCFRunLoop : public MessagePumpCFRunLoopBase {
+ public:
+  MessagePumpCFRunLoop();
+
+  MessagePumpCFRunLoop(const MessagePumpCFRunLoop&) = delete;
+  MessagePumpCFRunLoop& operator=(const MessagePumpCFRunLoop&) = delete;
+
+  ~MessagePumpCFRunLoop() override;
+
+  void DoRun(Delegate* delegate) override;
+  bool DoQuit() override;
+
+ private:
+  void EnterExitRunLoop(CFRunLoopActivity activity) override;
+
+  // True if Quit is called to stop the innermost MessagePump
+  // (|innermost_quittable_|) but some other CFRunLoopRun loop
+  // (|nesting_level_|) is running inside the MessagePump's innermost Run call.
+  bool quit_pending_;
+};
+
+class BASE_EXPORT MessagePumpNSRunLoop : public MessagePumpCFRunLoopBase {
+ public:
+  MessagePumpNSRunLoop();
+
+  MessagePumpNSRunLoop(const MessagePumpNSRunLoop&) = delete;
+  MessagePumpNSRunLoop& operator=(const MessagePumpNSRunLoop&) = delete;
+
+  ~MessagePumpNSRunLoop() override;
+
+  void DoRun(Delegate* delegate) override;
+  bool DoQuit() override;
+
+ private:
+  // A source that doesn't do anything but provide something signalable
+  // attached to the run loop.  This source will be signalled when Quit
+  // is called, to cause the loop to wake up so that it can stop.
+  apple::ScopedCFTypeRef<CFRunLoopSourceRef> quit_source_;
+};
+
+#if BUILDFLAG(IS_IOS)
+// This is a fake message pump.  It attaches sources to the main thread's
+// CFRunLoop, so PostTask() will work, but it is unable to drive the loop
+// directly, so calling Run() or Quit() are errors.
+class MessagePumpUIApplication : public MessagePumpCFRunLoopBase {
+ public:
+  MessagePumpUIApplication();
+
+  MessagePumpUIApplication(const MessagePumpUIApplication&) = delete;
+  MessagePumpUIApplication& operator=(const MessagePumpUIApplication&) = delete;
+
+  ~MessagePumpUIApplication() override;
+  void DoRun(Delegate* delegate) override;
+  bool DoQuit() override;
+
+  // MessagePumpCFRunLoopBase.
+  // MessagePumpUIApplication can not spin the main message loop directly.
+  // Instead, call |Attach()| to set up a delegate.  It is an error to call
+  // |Run()|.
+  void Attach(Delegate* delegate) override;
+  void Detach() override;
+
+ private:
+  absl::optional<RunLoop> run_loop_;
+};
+
+#else
+
+// While in scope, permits posted tasks to be run in private AppKit run loop
+// modes that would otherwise make the UI unresponsive. E.g., menu fade out.
+class BASE_EXPORT ScopedPumpMessagesInPrivateModes {
+ public:
+  ScopedPumpMessagesInPrivateModes();
+
+  ScopedPumpMessagesInPrivateModes(const ScopedPumpMessagesInPrivateModes&) =
+      delete;
+  ScopedPumpMessagesInPrivateModes& operator=(
+      const ScopedPumpMessagesInPrivateModes&) = delete;
+
+  ~ScopedPumpMessagesInPrivateModes();
+
+  int GetModeMaskForTest();
+};
+
+class MessagePumpNSApplication : public MessagePumpCFRunLoopBase {
+ public:
+  MessagePumpNSApplication();
+
+  MessagePumpNSApplication(const MessagePumpNSApplication&) = delete;
+  MessagePumpNSApplication& operator=(const MessagePumpNSApplication&) = delete;
+
+  ~MessagePumpNSApplication() override;
+
+  void DoRun(Delegate* delegate) override;
+  bool DoQuit() override;
+
+ private:
+  friend class ScopedPumpMessagesInPrivateModes;
+
+  void EnterExitRunLoop(CFRunLoopActivity activity) override;
+
+  // True if DoRun is managing its own run loop as opposed to letting
+  // -[NSApplication run] handle it.  The outermost run loop in the application
+  // is managed by -[NSApplication run], inner run loops are handled by a loop
+  // in DoRun.
+  bool running_own_loop_ = false;
+
+  // True if Quit() was called while a modal window was shown and needed to be
+  // deferred.
+  bool quit_pending_ = false;
+};
+
+class MessagePumpCrApplication : public MessagePumpNSApplication {
+ public:
+  MessagePumpCrApplication();
+
+  MessagePumpCrApplication(const MessagePumpCrApplication&) = delete;
+  MessagePumpCrApplication& operator=(const MessagePumpCrApplication&) = delete;
+
+  ~MessagePumpCrApplication() override;
+
+ protected:
+  // Returns false if NSApp is currently in the middle of calling -sendEvent.
+  // Requires NSApp implementing CrAppProtocol.
+  bool ShouldCreateAutoreleasePool() override;
+};
+#endif  // BUILDFLAG(IS_IOS)
+
+namespace message_pump_apple {
+
+// If not on the main thread, returns a new instance of
+// MessagePumpNSRunLoop.
+//
+// On the main thread, if NSApp exists and conforms to
+// CrAppProtocol, creates an instances of MessagePumpCrApplication.
+//
+// Otherwise creates an instance of MessagePumpNSApplication using a
+// default NSApplication.
+BASE_EXPORT std::unique_ptr<MessagePump> Create();
+
+#if !BUILDFLAG(IS_IOS)
+// If a pump is created before the required CrAppProtocol is
+// created, the wrong MessagePump subclass could be used.
+// UsingCrApp() returns false if the message pump was created before
+// NSApp was initialized, or if NSApp does not implement
+// CrAppProtocol.  NSApp must be initialized before calling.
+BASE_EXPORT bool UsingCrApp();
+
+// Wrapper to query -[NSApp isHandlingSendEvent] from C++ code.
+// Requires NSApp to implement CrAppProtocol.
+BASE_EXPORT bool IsHandlingSendEvent();
+#endif  // !BUILDFLAG(IS_IOS)
+
+}  // namespace message_pump_apple
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_APPLE_H_
diff --git a/base/message_loop/message_pump_apple.mm b/base/message_loop/message_pump_apple.mm
new file mode 100644
index 0000000..c599875
--- /dev/null
+++ b/base/message_loop/message_pump_apple.mm
@@ -0,0 +1,959 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/message_loop/message_pump_apple.h"
+
+#import <Foundation/Foundation.h>
+
+#include <atomic>
+#include <limits>
+#include <memory>
+
+#include "base/apple/call_with_eh_frame.h"
+#include "base/apple/scoped_cftyperef.h"
+#include "base/apple/scoped_nsautorelease_pool.h"
+#include "base/auto_reset.h"
+#include "base/check_op.h"
+#include "base/feature_list.h"
+#include "base/memory/raw_ptr.h"
+#include "base/memory/scoped_policy.h"
+#include "base/memory/stack_allocated.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/notreached.h"
+#include "base/run_loop.h"
+#include "base/task/task_features.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+#if !BUILDFLAG(IS_IOS)
+#import <AppKit/AppKit.h>
+#endif  // !BUILDFLAG(IS_IOS)
+
+namespace base {
+
+namespace {
+
+// Caches the state of the "TimerSlackMac" feature for efficiency.
+std::atomic_bool g_timer_slack = false;
+
+// Mask that determines which modes to use.
+enum { kCommonModeMask = 0b0000'0001, kAllModesMask = 0b0000'0111 };
+
+// Modes to use for MessagePumpNSApplication that are considered "safe".
+// Currently just the common mode. Ideally, messages would be pumped in all
+// modes, but that interacts badly with app modal dialogs (e.g. NSAlert).
+enum { kNSApplicationModalSafeModeMask = 0b0000'0001 };
+
+void NoOp(void* info) {}
+
+constexpr CFTimeInterval kCFTimeIntervalMax =
+    std::numeric_limits<CFTimeInterval>::max();
+
+#if !BUILDFLAG(IS_IOS)
+// Set to true if message_pump_apple::Create() is called before NSApp is
+// initialized.  Only accessed from the main thread.
+bool g_not_using_cr_app = false;
+
+// The MessagePump controlling [NSApp run].
+MessagePumpNSApplication* g_app_pump;
+#endif  // !BUILDFLAG(IS_IOS)
+
+}  // namespace
+
+// A scoper for an optional autorelease pool.
+class OptionalAutoreleasePool {
+  STACK_ALLOCATED();
+
+ public:
+  explicit OptionalAutoreleasePool(MessagePumpCFRunLoopBase* pump) {
+    if (pump->ShouldCreateAutoreleasePool()) {
+      pool_.emplace();
+    }
+  }
+
+  OptionalAutoreleasePool(const OptionalAutoreleasePool&) = delete;
+  OptionalAutoreleasePool& operator=(const OptionalAutoreleasePool&) = delete;
+
+ private:
+  absl::optional<base::apple::ScopedNSAutoreleasePool> pool_;
+};
+
+class MessagePumpCFRunLoopBase::ScopedModeEnabler {
+ public:
+  ScopedModeEnabler(MessagePumpCFRunLoopBase* owner, int mode_index)
+      : owner_(owner), mode_index_(mode_index) {
+    CFRunLoopRef loop = owner_->run_loop_.get();
+    CFRunLoopAddTimer(loop, owner_->delayed_work_timer_.get(), mode());
+    CFRunLoopAddSource(loop, owner_->work_source_.get(), mode());
+    CFRunLoopAddSource(loop, owner_->nesting_deferred_work_source_.get(),
+                       mode());
+    CFRunLoopAddObserver(loop, owner_->pre_wait_observer_.get(), mode());
+    CFRunLoopAddObserver(loop, owner_->after_wait_observer_.get(), mode());
+    CFRunLoopAddObserver(loop, owner_->pre_source_observer_.get(), mode());
+    CFRunLoopAddObserver(loop, owner_->enter_exit_observer_.get(), mode());
+  }
+
+  ScopedModeEnabler(const ScopedModeEnabler&) = delete;
+  ScopedModeEnabler& operator=(const ScopedModeEnabler&) = delete;
+
+  ~ScopedModeEnabler() {
+    CFRunLoopRef loop = owner_->run_loop_.get();
+    CFRunLoopRemoveObserver(loop, owner_->enter_exit_observer_.get(), mode());
+    CFRunLoopRemoveObserver(loop, owner_->pre_source_observer_.get(), mode());
+    CFRunLoopRemoveObserver(loop, owner_->pre_wait_observer_.get(), mode());
+    CFRunLoopRemoveObserver(loop, owner_->after_wait_observer_.get(), mode());
+    CFRunLoopRemoveSource(loop, owner_->nesting_deferred_work_source_.get(),
+                          mode());
+    CFRunLoopRemoveSource(loop, owner_->work_source_.get(), mode());
+    CFRunLoopRemoveTimer(loop, owner_->delayed_work_timer_.get(), mode());
+  }
+
+  // This function knows about the AppKit RunLoop modes observed to potentially
+  // run tasks posted to Chrome's main thread task runner. Some are internal to
+  // AppKit but must be observed to keep Chrome's UI responsive. Others that may
+  // be interesting, but are not watched:
+  //  - com.apple.hitoolbox.windows.transitionmode
+  //  - com.apple.hitoolbox.windows.flushmode
+  const CFStringRef& mode() const {
+    static const CFStringRef modes[] = {
+        // The standard Core Foundation "common modes" constant. Must always be
+        // first in this list to match the value of kCommonModeMask.
+        kCFRunLoopCommonModes,
+
+        // Process work when NSMenus are fading out.
+        CFSTR("com.apple.hitoolbox.windows.windowfadingmode"),
+
+        // Process work when AppKit is highlighting an item on the main menubar.
+        CFSTR("NSUnhighlightMenuRunLoopMode"),
+    };
+    static_assert(std::size(modes) == kNumModes, "mode size mismatch");
+    static_assert((1 << kNumModes) - 1 == kAllModesMask,
+                  "kAllModesMask not large enough");
+
+    return modes[mode_index_];
+  }
+
+ private:
+  const raw_ptr<MessagePumpCFRunLoopBase> owner_;  // Weak. Owns this.
+  const int mode_index_;
+};
+
+// Must be called on the run loop thread.
+void MessagePumpCFRunLoopBase::Run(Delegate* delegate) {
+  AutoReset<bool> auto_reset_keep_running(&keep_running_, true);
+  // nesting_level_ will be incremented in EnterExitRunLoop, so set
+  // run_nesting_level_ accordingly.
+  int last_run_nesting_level = run_nesting_level_;
+  run_nesting_level_ = nesting_level_ + 1;
+
+  Delegate* last_delegate = delegate_;
+  SetDelegate(delegate);
+
+  ScheduleWork();
+  DoRun(delegate);
+
+  // Restore the previous state of the object.
+  SetDelegate(last_delegate);
+  run_nesting_level_ = last_run_nesting_level;
+}
+
+void MessagePumpCFRunLoopBase::Quit() {
+  if (DoQuit()) {
+    OnDidQuit();
+  }
+}
+
+void MessagePumpCFRunLoopBase::OnDidQuit() {
+  keep_running_ = false;
+}
+
+// May be called on any thread.
+void MessagePumpCFRunLoopBase::ScheduleWork() {
+  CFRunLoopSourceSignal(work_source_.get());
+  CFRunLoopWakeUp(run_loop_.get());
+}
+
+// Must be called on the run loop thread.
+void MessagePumpCFRunLoopBase::ScheduleDelayedWork(
+    const Delegate::NextWorkInfo& next_work_info) {
+  DCHECK(!next_work_info.is_immediate());
+
+  // The tolerance needs to be set before the fire date or it may be ignored.
+  if (g_timer_slack.load(std::memory_order_relaxed) &&
+      !next_work_info.delayed_run_time.is_max() &&
+      delayed_work_leeway_ != next_work_info.leeway) {
+    if (!next_work_info.leeway.is_zero()) {
+      // Specify slack based on |next_work_info|.
+      CFRunLoopTimerSetTolerance(delayed_work_timer_.get(),
+                                 next_work_info.leeway.InSecondsF());
+    } else {
+      CFRunLoopTimerSetTolerance(delayed_work_timer_.get(), 0);
+    }
+    delayed_work_leeway_ = next_work_info.leeway;
+  }
+
+  // No-op if the delayed run time hasn't changed.
+  if (next_work_info.delayed_run_time != delayed_work_scheduled_at_) {
+    if (next_work_info.delayed_run_time.is_max()) {
+      CFRunLoopTimerSetNextFireDate(delayed_work_timer_.get(),
+                                    kCFTimeIntervalMax);
+    } else {
+      const double delay_seconds =
+          next_work_info.remaining_delay().InSecondsF();
+      CFRunLoopTimerSetNextFireDate(delayed_work_timer_.get(),
+                                    CFAbsoluteTimeGetCurrent() + delay_seconds);
+    }
+
+    delayed_work_scheduled_at_ = next_work_info.delayed_run_time;
+  }
+}
+
+TimeTicks MessagePumpCFRunLoopBase::AdjustDelayedRunTime(
+    TimeTicks earliest_time,
+    TimeTicks run_time,
+    TimeTicks latest_time) {
+  if (g_timer_slack.load(std::memory_order_relaxed)) {
+    return earliest_time;
+  }
+  return MessagePump::AdjustDelayedRunTime(earliest_time, run_time,
+                                           latest_time);
+}
+
+#if BUILDFLAG(IS_IOS)
+void MessagePumpCFRunLoopBase::Attach(Delegate* delegate) {}
+
+void MessagePumpCFRunLoopBase::Detach() {}
+#endif  // BUILDFLAG(IS_IOS)
+
+// Must be called on the run loop thread.
+MessagePumpCFRunLoopBase::MessagePumpCFRunLoopBase(int initial_mode_mask) {
+  run_loop_.reset(CFRunLoopGetCurrent(), base::scoped_policy::RETAIN);
+
+  // Set a repeating timer with a preposterous firing time and interval.  The
+  // timer will effectively never fire as-is.  The firing time will be adjusted
+  // as needed when ScheduleDelayedWork is called.
+  CFRunLoopTimerContext timer_context = {0};
+  timer_context.info = this;
+  delayed_work_timer_.reset(
+      CFRunLoopTimerCreate(/*allocator=*/nullptr,
+                           /*fireDate=*/kCFTimeIntervalMax,
+                           /*interval=*/kCFTimeIntervalMax,
+                           /*flags=*/0,
+                           /*order=*/0,
+                           /*callout=*/RunDelayedWorkTimer,
+                           /*context=*/&timer_context));
+
+  CFRunLoopSourceContext source_context = {0};
+  source_context.info = this;
+  source_context.perform = RunWorkSource;
+  work_source_.reset(CFRunLoopSourceCreate(/*allocator=*/nullptr,
+                                           /*order=*/1,
+                                           /*context=*/&source_context));
+  source_context.perform = RunNestingDeferredWorkSource;
+  nesting_deferred_work_source_.reset(
+      CFRunLoopSourceCreate(/*allocator=*/nullptr,
+                            /*order=*/0,
+                            /*context=*/&source_context));
+
+  CFRunLoopObserverContext observer_context = {0};
+  observer_context.info = this;
+  pre_wait_observer_.reset(
+      CFRunLoopObserverCreate(/*allocator=*/nullptr,
+                              /*activities=*/kCFRunLoopBeforeWaiting,
+                              /*repeats=*/true,
+                              /*order=*/0,
+                              /*callout=*/PreWaitObserver,
+                              /*context=*/&observer_context));
+  after_wait_observer_.reset(CFRunLoopObserverCreate(
+      /*allocator=*/nullptr,
+      /*activities=*/kCFRunLoopAfterWaiting,
+      /*repeats=*/true,
+      /*order=*/0,
+      /*callout=*/AfterWaitObserver,
+      /*context=*/&observer_context));
+  pre_source_observer_.reset(
+      CFRunLoopObserverCreate(/*allocator=*/nullptr,
+                              /*activities=*/kCFRunLoopBeforeSources,
+                              /*repeats=*/true,
+                              /*order=*/0,
+                              /*callout=*/PreSourceObserver,
+                              /*context=*/&observer_context));
+  enter_exit_observer_.reset(
+      CFRunLoopObserverCreate(/*allocator=*/nullptr,
+                              /*activities=*/kCFRunLoopEntry | kCFRunLoopExit,
+                              /*repeats=*/true,
+                              /*order=*/0,
+                              /*callout=*/EnterExitObserver,
+                              /*context=*/&observer_context));
+  SetModeMask(initial_mode_mask);
+}
+
+// Ideally called on the run loop thread.  If other run loops were running
+// lower on the run loop thread's stack when this object was created, the
+// same number of run loops must be running when this object is destroyed.
+MessagePumpCFRunLoopBase::~MessagePumpCFRunLoopBase() {
+  SetModeMask(0);
+}
+
+// static
+void MessagePumpCFRunLoopBase::InitializeFeatures() {
+  g_timer_slack.store(FeatureList::IsEnabled(kTimerSlackMac),
+                      std::memory_order_relaxed);
+}
+
+#if BUILDFLAG(IS_IOS)
+void MessagePumpCFRunLoopBase::OnAttach() {
+  CHECK_EQ(nesting_level_, 0);
+  // On iOS: the MessagePump is attached while it's already running.
+  nesting_level_ = 1;
+
+  // There could be some native work done after attaching to the loop and before
+  // |work_source_| is invoked.
+  PushWorkItemScope();
+}
+
+void MessagePumpCFRunLoopBase::OnDetach() {
+  // This function is called on shutdown. This can happen at either
+  // `nesting_level` >=1 or 0:
+  //   `nesting_level_ == 0`: When this is detached as part of tear down outside
+  //   of a run loop (e.g. ~TaskEnvironment). `nesting_level_ >= 1`: When this
+  //   is detached as part of a native shutdown notification ran from the
+  //   message pump itself. Nesting levels higher than 1 can happen in
+  //   legitimate nesting situations like the browser being dismissed while
+  //   displaying a long press context menu (CRWContextMenuController).
+  CHECK_GE(nesting_level_, 0);
+}
+#endif  // BUILDFLAG(IS_IOS)
+
+void MessagePumpCFRunLoopBase::SetDelegate(Delegate* delegate) {
+  delegate_ = delegate;
+
+  if (delegate) {
+    // If any work showed up but could not be dispatched for want of a
+    // delegate, set it up for dispatch again now that a delegate is
+    // available.
+    if (delegateless_work_) {
+      CFRunLoopSourceSignal(work_source_.get());
+      delegateless_work_ = false;
+    }
+  }
+}
+
+// Base version creates an autorelease pool.
+bool MessagePumpCFRunLoopBase::ShouldCreateAutoreleasePool() {
+  return true;
+}
+
+void MessagePumpCFRunLoopBase::SetModeMask(int mode_mask) {
+  for (size_t i = 0; i < kNumModes; ++i) {
+    bool enable = mode_mask & (0x1 << i);
+    if (enable == !enabled_modes_[i]) {
+      enabled_modes_[i] =
+          enable ? std::make_unique<ScopedModeEnabler>(this, i) : nullptr;
+    }
+  }
+}
+
+int MessagePumpCFRunLoopBase::GetModeMask() const {
+  int mask = 0;
+  for (size_t i = 0; i < kNumModes; ++i) {
+    mask |= enabled_modes_[i] ? (0x1 << i) : 0;
+  }
+  return mask;
+}
+
+void MessagePumpCFRunLoopBase::PopWorkItemScope() {
+  // A WorkItemScope should never have been pushed unless the loop was entered.
+  DCHECK_NE(nesting_level_, 0);
+  // If no WorkItemScope was pushed it cannot be popped.
+  DCHECK_GT(stack_.size(), 0u);
+
+  stack_.pop();
+}
+
+void MessagePumpCFRunLoopBase::PushWorkItemScope() {
+  // A WorkItemScope should never be pushed unless the loop was entered.
+  DCHECK_NE(nesting_level_, 0);
+
+  // See RunWork() comments on why the size of |stack| is never bigger than
+  // |nesting_level_| even in nested loops.
+  DCHECK_LT(stack_.size(), static_cast<size_t>(nesting_level_));
+
+  if (delegate_) {
+    stack_.push(delegate_->BeginWorkItem());
+  } else {
+    stack_.push(absl::nullopt);
+  }
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunDelayedWorkTimer(CFRunLoopTimerRef timer,
+                                                   void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+  // The timer fired, assume we have work and let RunWork() figure out what to
+  // do and what to schedule after.
+  base::apple::CallWithEHFrame(^{
+    // It would be incorrect to expect that `self->delayed_work_scheduled_at_`
+    // is smaller than or equal to `TimeTicks::Now()` because the fire date of a
+    // CFRunLoopTimer can be adjusted slightly.
+    // https://developer.apple.com/documentation/corefoundation/1543570-cfrunlooptimercreate?language=objc
+    DCHECK(!self->delayed_work_scheduled_at_.is_max());
+
+    self->delayed_work_scheduled_at_ = base::TimeTicks::Max();
+    self->RunWork();
+  });
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunWorkSource(void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+  base::apple::CallWithEHFrame(^{
+    self->RunWork();
+  });
+}
+
+// Called by MessagePumpCFRunLoopBase::RunWorkSource and RunDelayedWorkTimer.
+bool MessagePumpCFRunLoopBase::RunWork() {
+  if (!delegate_) {
+    // This point can be reached with a nullptr |delegate_| if Run is not on the
+    // stack but foreign code is spinning the CFRunLoop.  Arrange to come back
+    // here when a delegate is available.
+    delegateless_work_ = true;
+    return false;
+  }
+  if (!keep_running()) {
+    return false;
+  }
+
+  // The NSApplication-based run loop only drains the autorelease pool at each
+  // UI event (NSEvent).  The autorelease pool is not drained for each
+  // CFRunLoopSource target that's run.  Use a local pool for any autoreleased
+  // objects if the app is not currently handling a UI event to ensure they're
+  // released promptly even in the absence of UI events.
+  OptionalAutoreleasePool autorelease_pool(this);
+
+  // Pop the current work item scope as it captures any native work happening
+  // *between* DoWork()'s. This DoWork() happens in sequence to that native
+  // work, not nested within it.
+  PopWorkItemScope();
+  Delegate::NextWorkInfo next_work_info = delegate_->DoWork();
+  // DoWork() (and its own work item coverage) is over so push a new scope to
+  // cover any native work that could possibly happen before the next RunWork().
+  PushWorkItemScope();
+
+  if (next_work_info.is_immediate()) {
+    CFRunLoopSourceSignal(work_source_.get());
+    return true;
+  } else {
+    // This adjusts the next delayed wake up time (potentially cancels an
+    // already scheduled wake up if there is no delayed work).
+    ScheduleDelayedWork(next_work_info);
+    return false;
+  }
+}
+
+void MessagePumpCFRunLoopBase::RunIdleWork() {
+  if (!delegate_) {
+    // This point can be reached with a nullptr delegate_ if Run is not on the
+    // stack but foreign code is spinning the CFRunLoop.
+    return;
+  }
+  if (!keep_running()) {
+    return;
+  }
+  // The NSApplication-based run loop only drains the autorelease pool at each
+  // UI event (NSEvent).  The autorelease pool is not drained for each
+  // CFRunLoopSource target that's run.  Use a local pool for any autoreleased
+  // objects if the app is not currently handling a UI event to ensure they're
+  // released promptly even in the absence of UI events.
+  OptionalAutoreleasePool autorelease_pool(this);
+  bool did_work = delegate_->DoIdleWork();
+  if (did_work) {
+    CFRunLoopSourceSignal(work_source_.get());
+  }
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunNestingDeferredWorkSource(void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+  base::apple::CallWithEHFrame(^{
+    self->RunNestingDeferredWork();
+  });
+}
+
+// Called by MessagePumpCFRunLoopBase::RunNestingDeferredWorkSource.
+void MessagePumpCFRunLoopBase::RunNestingDeferredWork() {
+  if (!delegate_) {
+    // This point can be reached with a nullptr |delegate_| if Run is not on the
+    // stack but foreign code is spinning the CFRunLoop.  There's no sense in
+    // attempting to do any work or signalling the work sources because
+    // without a delegate, work is not possible.
+    return;
+  }
+
+  // Attempt to do work, if there's any more work to do this call will re-signal
+  // |work_source_| and keep things going; otherwise, PreWaitObserver will be
+  // invoked by the native pump to declare us idle.
+  RunWork();
+}
+
+void MessagePumpCFRunLoopBase::BeforeWait() {
+  if (!delegate_) {
+    // This point can be reached with a nullptr |delegate_| if Run is not on the
+    // stack but foreign code is spinning the CFRunLoop.
+    return;
+  }
+  delegate_->BeforeWait();
+}
+
+// Called before the run loop goes to sleep or exits, or processes sources.
+void MessagePumpCFRunLoopBase::MaybeScheduleNestingDeferredWork() {
+  // deepest_nesting_level_ is set as run loops are entered.  If the deepest
+  // level encountered is deeper than the current level, a nested loop
+  // (relative to the current level) ran since the last time nesting-deferred
+  // work was scheduled.  When that situation is encountered, schedule
+  // nesting-deferred work in case any work was deferred because nested work
+  // was disallowed.
+  if (deepest_nesting_level_ > nesting_level_) {
+    deepest_nesting_level_ = nesting_level_;
+    CFRunLoopSourceSignal(nesting_deferred_work_source_.get());
+  }
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::PreWaitObserver(CFRunLoopObserverRef observer,
+                                               CFRunLoopActivity activity,
+                                               void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+  base::apple::CallWithEHFrame(^{
+    // Current work item tracking needs to go away since execution will stop.
+    // Matches the PushWorkItemScope() in AfterWaitObserver() (with an arbitrary
+    // amount of matching Pop/Push in between when running work items).
+    self->PopWorkItemScope();
+
+    // Attempt to do some idle work before going to sleep.
+    self->RunIdleWork();
+
+    // The run loop is about to go to sleep.  If any of the work done since it
+    // started or woke up resulted in a nested run loop running,
+    // nesting-deferred work may have accumulated.  Schedule it for processing
+    // if appropriate.
+    self->MaybeScheduleNestingDeferredWork();
+
+    // Notify the delegate that the loop is about to sleep.
+    self->BeforeWait();
+  });
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::AfterWaitObserver(CFRunLoopObserverRef observer,
+                                                 CFRunLoopActivity activity,
+                                                 void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+  base::apple::CallWithEHFrame(^{
+    // Emerging from sleep, any work happening after this (outside of a
+    // RunWork()) should be considered native work. Matching PopWorkItemScope()
+    // is in BeforeWait().
+    self->PushWorkItemScope();
+  });
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::PreSourceObserver(CFRunLoopObserverRef observer,
+                                                 CFRunLoopActivity activity,
+                                                 void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+  // The run loop has reached the top of the loop and is about to begin
+  // processing sources.  If the last iteration of the loop at this nesting
+  // level did not sleep or exit, nesting-deferred work may have accumulated
+  // if a nested loop ran.  Schedule nesting-deferred work for processing if
+  // appropriate.
+  base::apple::CallWithEHFrame(^{
+    self->MaybeScheduleNestingDeferredWork();
+  });
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::EnterExitObserver(CFRunLoopObserverRef observer,
+                                                 CFRunLoopActivity activity,
+                                                 void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+  switch (activity) {
+    case kCFRunLoopEntry:
+      ++self->nesting_level_;
+
+      // There could be some native work done after entering the loop and before
+      // the next observer.
+      self->PushWorkItemScope();
+      if (self->nesting_level_ > self->deepest_nesting_level_) {
+        self->deepest_nesting_level_ = self->nesting_level_;
+      }
+      break;
+
+    case kCFRunLoopExit:
+      // Not all run loops go to sleep.  If a run loop is stopped before it
+      // goes to sleep due to a CFRunLoopStop call, or if the timeout passed
+      // to CFRunLoopRunInMode expires, the run loop may proceed directly from
+      // handling sources to exiting without any sleep.  This most commonly
+      // occurs when CFRunLoopRunInMode is passed a timeout of 0, causing it
+      // to make a single pass through the loop and exit without sleep.  Some
+      // native loops use CFRunLoop in this way.  Because PreWaitObserver will
+      // not be called in these case, MaybeScheduleNestingDeferredWork needs
+      // to be called here, as the run loop exits.
+      //
+      // MaybeScheduleNestingDeferredWork consults self->nesting_level_
+      // to determine whether to schedule nesting-deferred work.  It expects
+      // the nesting level to be set to the depth of the loop that is going
+      // to sleep or exiting.  It must be called before decrementing the
+      // value so that the value still corresponds to the level of the exiting
+      // loop.
+      base::apple::CallWithEHFrame(^{
+        self->MaybeScheduleNestingDeferredWork();
+      });
+
+      // Current work item tracking needs to go away since execution will stop.
+      self->PopWorkItemScope();
+
+      --self->nesting_level_;
+      break;
+
+    default:
+      break;
+  }
+
+  base::apple::CallWithEHFrame(^{
+    self->EnterExitRunLoop(activity);
+  });
+}
+
+// Called by MessagePumpCFRunLoopBase::EnterExitRunLoop.  The default
+// implementation is a no-op.
+void MessagePumpCFRunLoopBase::EnterExitRunLoop(CFRunLoopActivity activity) {}
+
+MessagePumpCFRunLoop::MessagePumpCFRunLoop()
+    : MessagePumpCFRunLoopBase(kCommonModeMask), quit_pending_(false) {}
+
+MessagePumpCFRunLoop::~MessagePumpCFRunLoop() = default;
+
+// Called by MessagePumpCFRunLoopBase::DoRun.  If other CFRunLoopRun loops were
+// running lower on the run loop thread's stack when this object was created,
+// the same number of CFRunLoopRun loops must be running for the outermost call
+// to Run.  Run/DoRun are reentrant after that point.
+void MessagePumpCFRunLoop::DoRun(Delegate* delegate) {
+  // This is completely identical to calling CFRunLoopRun(), except autorelease
+  // pool management is introduced.
+  int result;
+  do {
+    OptionalAutoreleasePool autorelease_pool(this);
+    result =
+        CFRunLoopRunInMode(kCFRunLoopDefaultMode, kCFTimeIntervalMax, false);
+  } while (result != kCFRunLoopRunStopped && result != kCFRunLoopRunFinished);
+}
+
+// Must be called on the run loop thread.
+bool MessagePumpCFRunLoop::DoQuit() {
+  // Stop the innermost run loop managed by this MessagePumpCFRunLoop object.
+  if (nesting_level() == run_nesting_level()) {
+    // This object is running the innermost loop, just stop it.
+    CFRunLoopStop(run_loop());
+    return true;
+  } else {
+    // There's another loop running inside the loop managed by this object.
+    // In other words, someone else called CFRunLoopRunInMode on the same
+    // thread, deeper on the stack than the deepest Run call.  Don't preempt
+    // other run loops, just mark this object to quit the innermost Run as
+    // soon as the other inner loops not managed by Run are done.
+    quit_pending_ = true;
+    return false;
+  }
+}
+
+// Called by MessagePumpCFRunLoopBase::EnterExitObserver.
+void MessagePumpCFRunLoop::EnterExitRunLoop(CFRunLoopActivity activity) {
+  if (activity == kCFRunLoopExit && nesting_level() == run_nesting_level() &&
+      quit_pending_) {
+    // Quit was called while loops other than those managed by this object
+    // were running further inside a run loop managed by this object.  Now
+    // that all unmanaged inner run loops are gone, stop the loop running
+    // just inside Run.
+    CFRunLoopStop(run_loop());
+    quit_pending_ = false;
+    OnDidQuit();
+  }
+}
+
+MessagePumpNSRunLoop::MessagePumpNSRunLoop()
+    : MessagePumpCFRunLoopBase(kCommonModeMask) {
+  CFRunLoopSourceContext source_context = {0};
+  source_context.perform = NoOp;
+  quit_source_.reset(CFRunLoopSourceCreate(/*allocator=*/nullptr,
+                                           /*order=*/0,
+                                           /*context=*/&source_context));
+  CFRunLoopAddSource(run_loop(), quit_source_.get(), kCFRunLoopCommonModes);
+}
+
+MessagePumpNSRunLoop::~MessagePumpNSRunLoop() {
+  CFRunLoopRemoveSource(run_loop(), quit_source_.get(), kCFRunLoopCommonModes);
+}
+
+void MessagePumpNSRunLoop::DoRun(Delegate* delegate) {
+  while (keep_running()) {
+    // NSRunLoop manages autorelease pools itself.
+    [NSRunLoop.currentRunLoop runMode:NSDefaultRunLoopMode
+                           beforeDate:NSDate.distantFuture];
+  }
+}
+
+bool MessagePumpNSRunLoop::DoQuit() {
+  CFRunLoopSourceSignal(quit_source_.get());
+  CFRunLoopWakeUp(run_loop());
+  return true;
+}
+
+#if BUILDFLAG(IS_IOS)
+MessagePumpUIApplication::MessagePumpUIApplication()
+    : MessagePumpCFRunLoopBase(kCommonModeMask) {}
+
+MessagePumpUIApplication::~MessagePumpUIApplication() = default;
+
+void MessagePumpUIApplication::DoRun(Delegate* delegate) {
+  NOTREACHED();
+}
+
+bool MessagePumpUIApplication::DoQuit() {
+  NOTREACHED();
+  return false;
+}
+
+void MessagePumpUIApplication::Attach(Delegate* delegate) {
+  DCHECK(!run_loop_);
+  run_loop_.emplace();
+
+  CHECK(run_loop_->BeforeRun());
+  SetDelegate(delegate);
+
+  OnAttach();
+}
+
+void MessagePumpUIApplication::Detach() {
+  DCHECK(run_loop_);
+  run_loop_->AfterRun();
+  SetDelegate(nullptr);
+  run_loop_.reset();
+
+  OnDetach();
+}
+
+#else
+
+ScopedPumpMessagesInPrivateModes::ScopedPumpMessagesInPrivateModes() {
+  DCHECK(g_app_pump);
+  DCHECK_EQ(kNSApplicationModalSafeModeMask, g_app_pump->GetModeMask());
+  // Pumping events in private runloop modes is known to interact badly with
+  // app modal windows like NSAlert.
+  if (NSApp.modalWindow) {
+    return;
+  }
+  g_app_pump->SetModeMask(kAllModesMask);
+}
+
+ScopedPumpMessagesInPrivateModes::~ScopedPumpMessagesInPrivateModes() {
+  DCHECK(g_app_pump);
+  g_app_pump->SetModeMask(kNSApplicationModalSafeModeMask);
+}
+
+int ScopedPumpMessagesInPrivateModes::GetModeMaskForTest() {
+  return g_app_pump ? g_app_pump->GetModeMask() : -1;
+}
+
+MessagePumpNSApplication::MessagePumpNSApplication()
+    : MessagePumpCFRunLoopBase(kNSApplicationModalSafeModeMask) {
+  DCHECK_EQ(nullptr, g_app_pump);
+  g_app_pump = this;
+}
+
+MessagePumpNSApplication::~MessagePumpNSApplication() {
+  DCHECK_EQ(this, g_app_pump);
+  g_app_pump = nullptr;
+}
+
+void MessagePumpNSApplication::DoRun(Delegate* delegate) {
+  bool last_running_own_loop_ = running_own_loop_;
+
+  // NSApp must be initialized by calling:
+  // [{some class which implements CrAppProtocol} sharedApplication]
+  // Most likely candidates are CrApplication or BrowserCrApplication.
+  // These can be initialized from C++ code by calling
+  // RegisterCrApp() or RegisterBrowserCrApp().
+  CHECK(NSApp);
+
+  if (!NSApp.running) {
+    running_own_loop_ = false;
+    // NSApplication manages autorelease pools itself when run this way.
+    [NSApp run];
+  } else {
+    running_own_loop_ = true;
+    while (keep_running()) {
+      OptionalAutoreleasePool autorelease_pool(this);
+      NSEvent* event = [NSApp nextEventMatchingMask:NSEventMaskAny
+                                          untilDate:NSDate.distantFuture
+                                             inMode:NSDefaultRunLoopMode
+                                            dequeue:YES];
+      if (event) {
+        [NSApp sendEvent:event];
+      }
+    }
+  }
+
+  running_own_loop_ = last_running_own_loop_;
+}
+
+bool MessagePumpNSApplication::DoQuit() {
+  // If the app is displaying a modal window in a native run loop, we can only
+  // quit our run loop after the window is closed. Otherwise the [NSApplication
+  // stop] below will apply to the modal window run loop instead. To work around
+  // this, the quit is applied when we re-enter our own run loop after the
+  // window is gone (see MessagePumpNSApplication::EnterExitRunLoop).
+  if (nesting_level() > run_nesting_level() && NSApp.modalWindow != nil) {
+    quit_pending_ = true;
+    return false;
+  }
+
+  if (!running_own_loop_) {
+    [NSApp stop:nil];
+  }
+
+  // Send a fake event to wake the loop up.
+  [NSApp postEvent:[NSEvent otherEventWithType:NSEventTypeApplicationDefined
+                                      location:NSZeroPoint
+                                 modifierFlags:0
+                                     timestamp:0
+                                  windowNumber:0
+                                       context:nil
+                                       subtype:0
+                                         data1:0
+                                         data2:0]
+           atStart:NO];
+  return true;
+}
+
+void MessagePumpNSApplication::EnterExitRunLoop(CFRunLoopActivity activity) {
+  // If we previously tried quitting while a modal window was active, check if
+  // the window is gone now and we're no longer nested in a system run loop.
+  if (activity == kCFRunLoopEntry && quit_pending_ &&
+      nesting_level() <= run_nesting_level() && NSApp.modalWindow == nil) {
+    quit_pending_ = false;
+    if (DoQuit()) {
+      OnDidQuit();
+    }
+  }
+}
+
+MessagePumpCrApplication::MessagePumpCrApplication() = default;
+
+MessagePumpCrApplication::~MessagePumpCrApplication() = default;
+
+// Prevents an autorelease pool from being created if the app is in the midst of
+// handling a UI event because various parts of AppKit depend on objects that
+// are created while handling a UI event to be autoreleased in the event loop.
+// An example of this is NSWindowController. When a window with a window
+// controller is closed it goes through a stack like this:
+// (Several stack frames elided for clarity)
+//
+// #0 [NSWindowController autorelease]
+// #1 DoAClose
+// #2 MessagePumpCFRunLoopBase::DoWork()
+// #3 [NSRunLoop run]
+// #4 [NSButton performClick:]
+// #5 [NSWindow sendEvent:]
+// #6 [NSApp sendEvent:]
+// #7 [NSApp run]
+//
+// -performClick: spins a nested run loop. If the pool created in DoWork was a
+// standard NSAutoreleasePool, it would release the objects that were
+// autoreleased into it once DoWork released it. This would cause the window
+// controller, which autoreleased itself in frame #0, to release itself, and
+// possibly free itself. Unfortunately this window controller controls the
+// window in frame #5. When the stack is unwound to frame #5, the window would
+// no longer exists and crashes may occur. Apple gets around this by never
+// releasing the pool it creates in frame #4, and letting frame #7 clean it up
+// when it cleans up the pool that wraps frame #7. When an autorelease pool is
+// released it releases all other pools that were created after it on the
+// autorelease pool stack.
+//
+// CrApplication is responsible for setting handlingSendEvent to true just
+// before it sends the event through the event handling mechanism, and
+// returning it to its previous value once the event has been sent.
+bool MessagePumpCrApplication::ShouldCreateAutoreleasePool() {
+  if (message_pump_apple::IsHandlingSendEvent()) {
+    return false;
+  }
+  return MessagePumpNSApplication::ShouldCreateAutoreleasePool();
+}
+
+#endif  // BUILDFLAG(IS_IOS)
+
+namespace message_pump_apple {
+
+std::unique_ptr<MessagePump> Create() {
+  if (NSThread.isMainThread) {
+#if BUILDFLAG(IS_IOS)
+    return std::make_unique<MessagePumpUIApplication>();
+#else
+    if ([NSApp conformsToProtocol:@protocol(CrAppProtocol)]) {
+      return std::make_unique<MessagePumpCrApplication>();
+    }
+
+    // The main-thread MessagePump implementations REQUIRE an NSApp.
+    // Executables which have specific requirements for their
+    // NSApplication subclass should initialize appropriately before
+    // creating an event loop.
+    [NSApplication sharedApplication];
+    g_not_using_cr_app = true;
+    return std::make_unique<MessagePumpNSApplication>();
+#endif
+  }
+
+  return std::make_unique<MessagePumpNSRunLoop>();
+}
+
+#if !BUILDFLAG(IS_IOS)
+
+bool UsingCrApp() {
+  DCHECK(NSThread.isMainThread);
+
+  // If NSApp is still not initialized, then the subclass used cannot
+  // be determined.
+  DCHECK(NSApp);
+
+  // The pump was created using MessagePumpNSApplication.
+  if (g_not_using_cr_app) {
+    return false;
+  }
+
+  return [NSApp conformsToProtocol:@protocol(CrAppProtocol)];
+}
+
+bool IsHandlingSendEvent() {
+  DCHECK([NSApp conformsToProtocol:@protocol(CrAppProtocol)]);
+  NSObject<CrAppProtocol>* app = static_cast<NSObject<CrAppProtocol>*>(NSApp);
+  return [app isHandlingSendEvent];
+}
+
+#endif  // !BUILDFLAG(IS_IOS)
+
+}  // namespace message_pump_apple
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_apple_unittest.mm b/base/message_loop/message_pump_apple_unittest.mm
new file mode 100644
index 0000000..b68124b
--- /dev/null
+++ b/base/message_loop/message_pump_apple_unittest.mm
@@ -0,0 +1,163 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_apple.h"
+
+#include "base/apple/scoped_cftyperef.h"
+#include "base/cancelable_callback.h"
+#include "base/functional/bind.h"
+#include "base/task/current_thread.h"
+#include "base/task/single_thread_task_runner.h"
+#include "base/test/bind.h"
+#include "base/test/task_environment.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+@interface TestModalAlertCloser : NSObject
+- (void)runTestThenCloseAlert:(NSAlert*)alert;
+@end
+
+namespace {
+
+// Internal constants from message_pump_apple.mm.
+constexpr int kAllModesMask = 0b0000'0111;
+constexpr int kNSApplicationModalSafeModeMask = 0b0000'0001;
+
+}  // namespace
+
+namespace base {
+
+namespace {
+
+// PostedTasks are only executed while the message pump has a delegate. That is,
+// when a base::RunLoop is running, so in order to test whether posted tasks
+// are run by CFRunLoopRunInMode and *not* by the regular RunLoop, we need to
+// be inside a task that is also calling CFRunLoopRunInMode.
+// This function posts |task| and runs the given |mode|.
+void RunTaskInMode(CFRunLoopMode mode, OnceClosure task) {
+  // Since this task is "ours" rather than a system task, allow nesting.
+  CurrentThread::ScopedAllowApplicationTasksInNativeNestedLoop allow;
+  CancelableOnceClosure cancelable(std::move(task));
+  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(FROM_HERE,
+                                                        cancelable.callback());
+  while (CFRunLoopRunInMode(mode, 0, true) == kCFRunLoopRunHandledSource)
+    ;
+}
+
+}  // namespace
+
+// Tests the correct behavior of ScopedPumpMessagesInPrivateModes.
+TEST(MessagePumpAppleTest, ScopedPumpMessagesInPrivateModes) {
+  test::SingleThreadTaskEnvironment task_environment(
+      test::SingleThreadTaskEnvironment::MainThreadType::UI);
+
+  CFRunLoopMode kRegular = kCFRunLoopDefaultMode;
+  CFRunLoopMode kPrivate = CFSTR("NSUnhighlightMenuRunLoopMode");
+
+  // Work is seen when running in the default mode.
+  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
+      FROM_HERE,
+      BindOnce(&RunTaskInMode, kRegular, MakeExpectedRunClosure(FROM_HERE)));
+  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+
+  // But not seen when running in a private mode.
+  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
+      FROM_HERE,
+      BindOnce(&RunTaskInMode, kPrivate, MakeExpectedNotRunClosure(FROM_HERE)));
+  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+
+  {
+    ScopedPumpMessagesInPrivateModes allow_private;
+    // Now the work should be seen.
+    SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
+        FROM_HERE,
+        BindOnce(&RunTaskInMode, kPrivate, MakeExpectedRunClosure(FROM_HERE)));
+    EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+
+    // The regular mode should also work the same.
+    SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
+        FROM_HERE,
+        BindOnce(&RunTaskInMode, kRegular, MakeExpectedRunClosure(FROM_HERE)));
+    EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+  }
+
+  // And now the scoper is out of scope, private modes should no longer see it.
+  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
+      FROM_HERE,
+      BindOnce(&RunTaskInMode, kPrivate, MakeExpectedNotRunClosure(FROM_HERE)));
+  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+
+  // Only regular modes see it.
+  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
+      FROM_HERE,
+      BindOnce(&RunTaskInMode, kRegular, MakeExpectedRunClosure(FROM_HERE)));
+  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+}
+
+// Tests that private message loop modes are not pumped while a modal dialog is
+// present.
+TEST(MessagePumpAppleTest, ScopedPumpMessagesAttemptWithModalDialog) {
+  test::SingleThreadTaskEnvironment task_environment(
+      test::SingleThreadTaskEnvironment::MainThreadType::UI);
+
+  {
+    base::ScopedPumpMessagesInPrivateModes allow_private;
+    // No modal window, so all modes should be pumped.
+    EXPECT_EQ(kAllModesMask, allow_private.GetModeMaskForTest());
+  }
+
+  NSAlert* alert = [[NSAlert alloc] init];
+  [alert addButtonWithTitle:@"OK"];
+  TestModalAlertCloser* closer = [[TestModalAlertCloser alloc] init];
+  [closer performSelector:@selector(runTestThenCloseAlert:)
+               withObject:alert
+               afterDelay:0
+                  inModes:@[ NSModalPanelRunLoopMode ]];
+  NSInteger result = [alert runModal];
+  EXPECT_EQ(NSAlertFirstButtonReturn, result);
+}
+
+TEST(MessagePumpAppleTest, QuitWithModalWindow) {
+  test::SingleThreadTaskEnvironment task_environment(
+      test::SingleThreadTaskEnvironment::MainThreadType::UI);
+  NSWindow* window =
+      [[NSWindow alloc] initWithContentRect:NSMakeRect(0, 0, 100, 100)
+                                  styleMask:NSWindowStyleMaskBorderless
+                                    backing:NSBackingStoreBuffered
+                                      defer:NO];
+  window.releasedWhenClosed = NO;
+
+  // Check that quitting the run loop while a modal window is shown applies to
+  // |run_loop| rather than the internal NSApplication modal run loop.
+  RunLoop run_loop;
+  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
+      FROM_HERE, base::BindLambdaForTesting([&] {
+        CurrentThread::ScopedAllowApplicationTasksInNativeNestedLoop allow;
+        ScopedPumpMessagesInPrivateModes pump_private;
+        [NSApp runModalForWindow:window];
+      }));
+  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
+      FROM_HERE, base::BindLambdaForTesting([&] {
+        [NSApp stopModal];
+        run_loop.Quit();
+      }));
+
+  EXPECT_NO_FATAL_FAILURE(run_loop.Run());
+}
+
+}  // namespace base
+
+@implementation TestModalAlertCloser
+
+- (void)runTestThenCloseAlert:(NSAlert*)alert {
+  EXPECT_TRUE([NSApp modalWindow]);
+  {
+    base::ScopedPumpMessagesInPrivateModes allow_private;
+    // With a modal window, only safe modes should be pumped.
+    EXPECT_EQ(kNSApplicationModalSafeModeMask,
+              allow_private.GetModeMaskForTest());
+  }
+  [[alert buttons][0] performClick:nil];
+}
+
+@end
diff --git a/base/message_loop/message_pump_default.cc b/base/message_loop/message_pump_default.cc
index bd1b5fd..c3efad1 100644
--- a/base/message_loop/message_pump_default.cc
+++ b/base/message_loop/message_pump_default.cc
@@ -12,9 +12,9 @@
 #if BUILDFLAG(IS_APPLE)
 #include <mach/thread_policy.h>
 
-#include "base/mac/mach_logging.h"
-#include "base/mac/scoped_mach_port.h"
-#include "base/mac/scoped_nsautorelease_pool.h"
+#include "base/apple/mach_logging.h"
+#include "base/apple/scoped_mach_port.h"
+#include "base/apple/scoped_nsautorelease_pool.h"
 #include "base/threading/threading_features.h"
 #endif
 
@@ -34,7 +34,7 @@
 
   for (;;) {
 #if BUILDFLAG(IS_APPLE)
-    mac::ScopedNSAutoreleasePool autorelease_pool;
+    apple::ScopedNSAutoreleasePool autorelease_pool;
 #endif
 
     Delegate::NextWorkInfo next_work_info = delegate->DoWork();
diff --git a/base/message_loop/message_pump_for_ui.h b/base/message_loop/message_pump_for_ui.h
index 0bca2f1..27b65e7 100644
--- a/base/message_loop/message_pump_for_ui.h
+++ b/base/message_loop/message_pump_for_ui.h
@@ -36,7 +36,7 @@
 using MessagePumpForUI = MessagePumpForUI;
 #elif BUILDFLAG(IS_APPLE)
 // MessagePumpForUI isn't bound to a specific impl on Mac. While each impl can
-// be represented by a plain MessagePump: message_pump_mac::Create() must be
+// be represented by a plain MessagePump: message_pump_apple::Create() must be
 // used to instantiate the right impl.
 using MessagePumpForUI = MessagePump;
 #elif BUILDFLAG(IS_NACL) || BUILDFLAG(IS_AIX)
diff --git a/base/message_loop/message_pump_io_ios.cc b/base/message_loop/message_pump_io_ios.cc
index 1ce11b5..999a929 100644
--- a/base/message_loop/message_pump_io_ios.cc
+++ b/base/message_loop/message_pump_io_ios.cc
@@ -91,9 +91,9 @@
 
   CFFileDescriptorRef fdref = controller->fdref_.get();
   if (fdref == NULL) {
-    base::ScopedCFTypeRef<CFFileDescriptorRef> scoped_fdref(
-        CFFileDescriptorCreate(
-            kCFAllocatorDefault, fd, false, HandleFdIOEvent, &source_context));
+    apple::ScopedCFTypeRef<CFFileDescriptorRef> scoped_fdref(
+        CFFileDescriptorCreate(kCFAllocatorDefault, fd, false, HandleFdIOEvent,
+                               &source_context));
     if (scoped_fdref == NULL) {
       NOTREACHED() << "CFFileDescriptorCreate failed";
       return false;
@@ -102,9 +102,9 @@
     CFFileDescriptorEnableCallBacks(scoped_fdref, callback_types);
 
     // TODO(wtc): what should the 'order' argument be?
-    base::ScopedCFTypeRef<CFRunLoopSourceRef> scoped_fd_source(
-        CFFileDescriptorCreateRunLoopSource(
-            kCFAllocatorDefault, scoped_fdref, 0));
+    apple::ScopedCFTypeRef<CFRunLoopSourceRef> scoped_fd_source(
+        CFFileDescriptorCreateRunLoopSource(kCFAllocatorDefault, scoped_fdref,
+                                            0));
     if (scoped_fd_source == NULL) {
       NOTREACHED() << "CFFileDescriptorCreateRunLoopSource failed";
       return false;
@@ -154,7 +154,7 @@
   // Ensure that |fdref| will remain live for the duration of this function
   // call even if |controller| is deleted or |StopWatchingFileDescriptor()| is
   // called, either of which will cause |fdref| to be released.
-  ScopedCFTypeRef<CFFileDescriptorRef> scoped_fdref(
+  apple::ScopedCFTypeRef<CFFileDescriptorRef> scoped_fdref(
       fdref, base::scoped_policy::RETAIN);
 
   int fd = CFFileDescriptorGetNativeDescriptor(fdref);
diff --git a/base/message_loop/message_pump_io_ios.h b/base/message_loop/message_pump_io_ios.h
index 791cb9e..806ed24 100644
--- a/base/message_loop/message_pump_io_ios.h
+++ b/base/message_loop/message_pump_io_ios.h
@@ -5,11 +5,11 @@
 #ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_IO_IOS_H_
 #define BASE_MESSAGE_LOOP_MESSAGE_PUMP_IO_IOS_H_
 
+#include "base/apple/scoped_cffiledescriptorref.h"
+#include "base/apple/scoped_cftyperef.h"
 #include "base/base_export.h"
-#include "base/mac/scoped_cffiledescriptorref.h"
-#include "base/mac/scoped_cftyperef.h"
 #include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_pump_mac.h"
+#include "base/message_loop/message_pump_apple.h"
 #include "base/message_loop/watchable_io_message_pump_posix.h"
 #include "base/threading/thread_checker.h"
 
@@ -53,10 +53,10 @@
     void OnFileCanWriteWithoutBlocking(int fd, MessagePumpIOSForIO* pump);
 
     bool is_persistent_ = false;  // false if this event is one-shot.
-    base::mac::ScopedCFFileDescriptorRef fdref_;
+    apple::ScopedCFFileDescriptorRef fdref_;
     CFOptionFlags callback_types_ = 0;
-    base::ScopedCFTypeRef<CFRunLoopSourceRef> fd_source_;
-    base::WeakPtr<MessagePumpIOSForIO> pump_;
+    apple::ScopedCFTypeRef<CFRunLoopSourceRef> fd_source_;
+    WeakPtr<MessagePumpIOSForIO> pump_;
     FdWatcher* watcher_ = nullptr;
   };
 
diff --git a/base/message_loop/message_pump_kqueue.cc b/base/message_loop/message_pump_kqueue.cc
index 47aaf21..f07086c 100644
--- a/base/message_loop/message_pump_kqueue.cc
+++ b/base/message_loop/message_pump_kqueue.cc
@@ -8,14 +8,15 @@
 
 #include <atomic>
 
+#include "base/apple/mach_logging.h"
+#include "base/apple/scoped_nsautorelease_pool.h"
 #include "base/auto_reset.h"
 #include "base/feature_list.h"
 #include "base/logging.h"
 #include "base/mac/mac_util.h"
-#include "base/mac/mach_logging.h"
-#include "base/mac/scoped_nsautorelease_pool.h"
 #include "base/notreached.h"
 #include "base/posix/eintr_wrapper.h"
+#include "base/task/task_features.h"
 #include "base/time/time_override.h"
 
 namespace base {
@@ -32,6 +33,9 @@
 // Caches the state of the "UseSimplifiedMessagePumpKqueueLoop".
 std::atomic_bool g_use_simplified_version = false;
 
+// Caches the state of the "TimerSlackMac" feature for efficiency.
+std::atomic_bool g_timer_slack = false;
+
 #if DCHECK_IS_ON()
 // Prior to macOS 10.14, kqueue timers may spuriously wake up, because earlier
 // wake ups race with timer resets in the kernel. As of macOS 10.14, updating a
@@ -130,7 +134,7 @@
   // using an EVFILT_USER event, especially when triggered across threads.
   kern_return_t kr = mach_port_allocate(
       mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
-      base::mac::ScopedMachReceiveRight::Receiver(wakeup_).get());
+      base::apple::ScopedMachReceiveRight::Receiver(wakeup_).get());
   MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_port_allocate";
 
   // Configure the event to directly receive the Mach message as part of the
@@ -153,6 +157,8 @@
   g_use_simplified_version.store(
       base::FeatureList::IsEnabled(kUseSimplifiedMessagePumpKqueueLoop),
       std::memory_order_relaxed);
+  g_timer_slack.store(FeatureList::IsEnabled(kTimerSlackMac),
+                      std::memory_order_relaxed);
 }
 
 void MessagePumpKqueue::Run(Delegate* delegate) {
@@ -162,7 +168,7 @@
     RunSimplified(delegate);
   } else {
     while (keep_running_) {
-      mac::ScopedNSAutoreleasePool pool;
+      apple::ScopedNSAutoreleasePool pool;
 
       bool do_more_work = DoInternalWork(delegate, nullptr);
       if (!keep_running_)
@@ -195,7 +201,7 @@
   DoInternalWork(delegate, nullptr);
 
   while (keep_running_) {
-    mac::ScopedNSAutoreleasePool pool;
+    apple::ScopedNSAutoreleasePool pool;
 
     Delegate::NextWorkInfo next_work_info = delegate->DoWork();
     if (!keep_running_)
@@ -271,6 +277,16 @@
   return true;
 }
 
+TimeTicks MessagePumpKqueue::AdjustDelayedRunTime(TimeTicks earliest_time,
+                                                  TimeTicks run_time,
+                                                  TimeTicks latest_time) {
+  if (g_timer_slack.load(std::memory_order_relaxed)) {
+    return earliest_time;
+  }
+  return MessagePump::AdjustDelayedRunTime(earliest_time, run_time,
+                                           latest_time);
+}
+
 bool MessagePumpKqueue::WatchFileDescriptor(int fd,
                                             bool persistent,
                                             int mode,
@@ -319,6 +335,7 @@
 }
 
 void MessagePumpKqueue::SetWakeupTimerEvent(const base::TimeTicks& wakeup_time,
+                                            base::TimeDelta leeway,
                                             kevent64_s* timer_event) {
   // The ident of the wakeup timer. There's only the one timer as the pair
   // (ident, filter) is the identity of the event.
@@ -341,6 +358,13 @@
     // timer is set immediately.
     timer_event->fflags = NOTE_USECONDS;
     timer_event->data = (wakeup_time - base::TimeTicks::Now()).InMicroseconds();
+
+    if (!leeway.is_zero() && g_timer_slack.load(std::memory_order_relaxed)) {
+      // Specify slack based on |leeway|.
+      // See "man kqueue" in recent macOSen for documentation.
+      timer_event->fflags |= NOTE_LEEWAY;
+      timer_event->ext[1] = static_cast<uint64_t>(leeway.InMicroseconds());
+    }
   }
 }
 
@@ -417,7 +441,8 @@
   unsigned int flags = immediate ? KEVENT_FLAG_IMMEDIATE : 0;
 
   if (!immediate) {
-    MaybeUpdateWakeupTimer(next_work_info->delayed_run_time);
+    MaybeUpdateWakeupTimer(next_work_info->delayed_run_time,
+                           next_work_info->leeway);
     DCHECK_EQ(scheduled_wakeup_time_, next_work_info->delayed_run_time);
     delegate->BeforeWait();
   }
@@ -514,7 +539,8 @@
 }
 
 void MessagePumpKqueue::MaybeUpdateWakeupTimer(
-    const base::TimeTicks& wakeup_time) {
+    const base::TimeTicks& wakeup_time,
+    base::TimeDelta leeway) {
   if (wakeup_time == scheduled_wakeup_time_) {
     // No change in the timer setting necessary.
     return;
@@ -525,7 +551,7 @@
     if (scheduled_wakeup_time_ != base::TimeTicks::Max()) {
       // Clear the timer.
       kevent64_s timer{};
-      SetWakeupTimerEvent(wakeup_time, &timer);
+      SetWakeupTimerEvent(wakeup_time, leeway, &timer);
       int rv = ChangeOneEvent(kqueue_, &timer);
       PCHECK(rv == 0) << "kevent64, delete timer";
       --event_count_;
@@ -533,7 +559,7 @@
   } else {
     // Set/reset the timer.
     kevent64_s timer{};
-    SetWakeupTimerEvent(wakeup_time, &timer);
+    SetWakeupTimerEvent(wakeup_time, leeway, &timer);
     int rv = ChangeOneEvent(kqueue_, &timer);
     PCHECK(rv == 0) << "kevent64, set timer";
 
diff --git a/base/message_loop/message_pump_kqueue.h b/base/message_loop/message_pump_kqueue.h
index 177f02c..f62f44e 100644
--- a/base/message_loop/message_pump_kqueue.h
+++ b/base/message_loop/message_pump_kqueue.h
@@ -11,10 +11,10 @@
 
 #include <vector>
 
+#include "base/apple/scoped_mach_port.h"
 #include "base/containers/id_map.h"
 #include "base/files/scoped_file.h"
 #include "base/location.h"
-#include "base/mac/scoped_mach_port.h"
 #include "base/memory/raw_ptr.h"
 #include "base/memory/weak_ptr.h"
 #include "base/message_loop/message_pump.h"
@@ -115,6 +115,9 @@
   void ScheduleWork() override;
   void ScheduleDelayedWork(
       const Delegate::NextWorkInfo& next_work_info) override;
+  TimeTicks AdjustDelayedRunTime(TimeTicks earliest_time,
+                                 TimeTicks run_time,
+                                 TimeTicks latest_time) override;
 
   // Begins watching the Mach receive right named by |port|. The |controller|
   // can be used to stop watching for incoming messages, and new message
@@ -154,14 +157,16 @@
   // scheduled wakeup. Clears the wakeup timer if |wakeup_time| is
   // base::TimeTicks::Max().
   // Updates |scheduled_wakeup_time_| to follow.
-  void MaybeUpdateWakeupTimer(const base::TimeTicks& wakeup_time);
+  void MaybeUpdateWakeupTimer(const base::TimeTicks& wakeup_time,
+                              base::TimeDelta leeway);
 
   void SetWakeupTimerEvent(const base::TimeTicks& wakeup_time,
+                           base::TimeDelta leeway,
                            kevent64_s* timer_event);
 
   // Receive right to which an empty Mach message is sent to wake up the pump
   // in response to ScheduleWork().
-  mac::ScopedMachReceiveRight wakeup_;
+  apple::ScopedMachReceiveRight wakeup_;
   // Scratch buffer that is used to receive the message sent to |wakeup_|.
   mach_msg_empty_rcv_t wakeup_buffer_;
 
diff --git a/base/message_loop/message_pump_kqueue_unittest.cc b/base/message_loop/message_pump_kqueue_unittest.cc
index fb7c3c3..d993fba 100644
--- a/base/message_loop/message_pump_kqueue_unittest.cc
+++ b/base/message_loop/message_pump_kqueue_unittest.cc
@@ -28,16 +28,16 @@
 
   MessagePumpKqueue* pump() { return pump_; }
 
-  static void CreatePortPair(mac::ScopedMachReceiveRight* receive,
-                             mac::ScopedMachSendRight* send) {
+  static void CreatePortPair(apple::ScopedMachReceiveRight* receive,
+                             apple::ScopedMachSendRight* send) {
     mach_port_options_t options{};
     options.flags = MPO_INSERT_SEND_RIGHT;
-    mac::ScopedMachReceiveRight port;
+    apple::ScopedMachReceiveRight port;
     kern_return_t kr = mach_port_construct(
         mach_task_self(), &options, 0,
-        mac::ScopedMachReceiveRight::Receiver(*receive).get());
+        apple::ScopedMachReceiveRight::Receiver(*receive).get());
     ASSERT_EQ(kr, KERN_SUCCESS);
-    *send = mac::ScopedMachSendRight(receive->get());
+    *send = apple::ScopedMachSendRight(receive->get());
   }
 
   static mach_msg_return_t SendEmptyMessage(mach_port_t remote_port,
@@ -79,8 +79,8 @@
 };
 
 TEST_F(MessagePumpKqueueTest, MachPortBasicWatch) {
-  mac::ScopedMachReceiveRight port;
-  mac::ScopedMachSendRight send_right;
+  apple::ScopedMachReceiveRight port;
+  apple::ScopedMachSendRight send_right;
   CreatePortPair(&port, &send_right);
 
   mach_msg_id_t msgid = 'helo';
@@ -110,8 +110,8 @@
 }
 
 TEST_F(MessagePumpKqueueTest, MachPortStopWatching) {
-  mac::ScopedMachReceiveRight port;
-  mac::ScopedMachSendRight send_right;
+  apple::ScopedMachReceiveRight port;
+  apple::ScopedMachSendRight send_right;
   CreatePortPair(&port, &send_right);
 
   RunLoop run_loop;
@@ -141,8 +141,8 @@
 }
 
 TEST_F(MessagePumpKqueueTest, MultipleMachWatchers) {
-  mac::ScopedMachReceiveRight port1, port2;
-  mac::ScopedMachSendRight send_right1, send_right2;
+  apple::ScopedMachReceiveRight port1, port2;
+  apple::ScopedMachSendRight send_right1, send_right2;
   CreatePortPair(&port1, &send_right1);
   CreatePortPair(&port2, &send_right2);
 
diff --git a/base/message_loop/message_pump_mac.h b/base/message_loop/message_pump_mac.h
deleted file mode 100644
index 6196737..0000000
--- a/base/message_loop/message_pump_mac.h
+++ /dev/null
@@ -1,434 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// The basis for all native run loops on the Mac is the CFRunLoop.  It can be
-// used directly, it can be used as the driving force behind the similar
-// Foundation NSRunLoop, and it can be used to implement higher-level event
-// loops such as the NSApplication event loop.
-//
-// This file introduces a basic CFRunLoop-based implementation of the
-// MessagePump interface called CFRunLoopBase.  CFRunLoopBase contains all
-// of the machinery necessary to dispatch events to a delegate, but does not
-// implement the specific run loop.  Concrete subclasses must provide their
-// own DoRun and DoQuit implementations.
-//
-// A concrete subclass that just runs a CFRunLoop loop is provided in
-// MessagePumpCFRunLoop.  For an NSRunLoop, the similar MessagePumpNSRunLoop
-// is provided.
-//
-// For the application's event loop, an implementation based on AppKit's
-// NSApplication event system is provided in MessagePumpNSApplication.
-//
-// Typically, MessagePumpNSApplication only makes sense on a Cocoa
-// application's main thread.  If a CFRunLoop-based message pump is needed on
-// any other thread, one of the other concrete subclasses is preferable.
-// message_pump_mac::Create is defined, which returns a new NSApplication-based
-// or NSRunLoop-based MessagePump subclass depending on which thread it is
-// called on.
-
-#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_MAC_H_
-#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_MAC_H_
-
-#include "base/message_loop/message_pump.h"
-
-#include <CoreFoundation/CoreFoundation.h>
-
-#include <memory>
-
-#include "base/containers/stack.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "base/memory/raw_ptr.h"
-#include "base/run_loop.h"
-#include "build/build_config.h"
-#include "third_party/abseil-cpp/absl/types/optional.h"
-
-#if defined(__OBJC__)
-#if BUILDFLAG(IS_IOS)
-#import <Foundation/Foundation.h>
-#else
-#import <AppKit/AppKit.h>
-
-// Clients must subclass NSApplication and implement this protocol if they use
-// MessagePumpMac.
-@protocol CrAppProtocol
-// Must return true if -[NSApplication sendEvent:] is currently on the stack.
-// See the comment for |CreateAutoreleasePool()| in the cc file for why this is
-// necessary.
-- (BOOL)isHandlingSendEvent;
-@end
-#endif  // BUILDFLAG(IS_IOS)
-#endif  // defined(__OBJC__)
-
-namespace base {
-
-class BASE_EXPORT MessagePumpCFRunLoopBase : public MessagePump {
- public:
-  MessagePumpCFRunLoopBase(const MessagePumpCFRunLoopBase&) = delete;
-  MessagePumpCFRunLoopBase& operator=(const MessagePumpCFRunLoopBase&) = delete;
-
-  static void InitializeFeatures();
-
-  // MessagePump:
-  void Run(Delegate* delegate) override;
-  void Quit() override;
-  void ScheduleWork() override;
-  void ScheduleDelayedWork(
-      const Delegate::NextWorkInfo& next_work_info) override;
-
-#if BUILDFLAG(IS_IOS)
-  // Some iOS message pumps do not support calling |Run()| to spin the main
-  // message loop directly.  Instead, call |Attach()| to set up a delegate, then
-  // |Detach()| before destroying the message pump.  These methods do nothing if
-  // the message pump supports calling |Run()| and |Quit()|.
-  virtual void Attach(Delegate* delegate);
-  virtual void Detach();
-#endif  // BUILDFLAG(IS_IOS)
-
- protected:
-  // Needs access to CreateAutoreleasePool.
-  friend class OptionalAutoreleasePool;
-  friend class TestMessagePumpCFRunLoopBase;
-
-  // Tasks will be pumped in the run loop modes described by
-  // |initial_mode_mask|, which maps bits to the index of an internal array of
-  // run loop mode identifiers.
-  explicit MessagePumpCFRunLoopBase(int initial_mode_mask);
-  ~MessagePumpCFRunLoopBase() override;
-
-  // Subclasses should implement the work they need to do in MessagePump::Run
-  // in the DoRun method.  MessagePumpCFRunLoopBase::Run calls DoRun directly.
-  // This arrangement is used because MessagePumpCFRunLoopBase needs to set
-  // up and tear down things before and after the "meat" of DoRun.
-  virtual void DoRun(Delegate* delegate) = 0;
-
-  // Similar to DoRun, this allows subclasses to perform custom handling when
-  // quitting a run loop. Return true if the quit took effect immediately;
-  // otherwise call OnDidQuit() when the quit is actually applied (e.g., a
-  // nested native runloop exited).
-  virtual bool DoQuit() = 0;
-
-  // Should be called by subclasses to signal when a deferred quit takes place.
-  void OnDidQuit();
-
-  // Accessors for private data members to be used by subclasses.
-  CFRunLoopRef run_loop() const { return run_loop_; }
-  int nesting_level() const { return nesting_level_; }
-  int run_nesting_level() const { return run_nesting_level_; }
-  bool keep_running() const { return keep_running_; }
-
-#if BUILDFLAG(IS_IOS)
-  void OnAttach();
-  void OnDetach();
-#endif
-
-  // Sets this pump's delegate.  Signals the appropriate sources if
-  // |delegateless_work_| is true.  |delegate| can be NULL.
-  void SetDelegate(Delegate* delegate);
-
-  // Return whether an autorelease pool should be created to wrap around any
-  // work being performed. If false is returned to prevent an autorelease pool
-  // from being created, any objects autoreleased by work will fall into the
-  // current autorelease pool.
-  virtual bool ShouldCreateAutoreleasePool();
-
-  // Enable and disable entries in |enabled_modes_| to match |mode_mask|.
-  void SetModeMask(int mode_mask);
-
-  // Get the current mode mask from |enabled_modes_|.
-  int GetModeMask() const;
-
- protected:
-  raw_ptr<Delegate> delegate() { return delegate_; }
-
- private:
-  class ScopedModeEnabler;
-
-  // The maximum number of run loop modes that can be monitored.
-  static constexpr int kNumModes = 4;
-
-  // Timer callback scheduled by ScheduleDelayedWork.  This does not do any
-  // work, but it signals |work_source_| so that delayed work can be performed
-  // within the appropriate priority constraints.
-  static void RunDelayedWorkTimer(CFRunLoopTimerRef timer, void* info);
-
-  // Perform highest-priority work.  This is associated with |work_source_|
-  // signalled by ScheduleWork or RunDelayedWorkTimer.  The static method calls
-  // the instance method; the instance method returns true if it resignalled
-  // |work_source_| to be called again from the loop.
-  static void RunWorkSource(void* info);
-  bool RunWork();
-
-  // Perform idle-priority work.  This is normally called by PreWaitObserver,
-  // but can also be invoked from RunNestingDeferredWork when returning from a
-  // nested loop.  When this function actually does perform idle work, it will
-  // re-signal the |work_source_|.
-  void RunIdleWork();
-
-  // Perform work that may have been deferred because it was not runnable
-  // within a nested run loop.  This is associated with
-  // |nesting_deferred_work_source_| and is signalled by
-  // MaybeScheduleNestingDeferredWork when returning from a nested loop,
-  // so that an outer loop will be able to perform the necessary tasks if it
-  // permits nestable tasks.
-  static void RunNestingDeferredWorkSource(void* info);
-  void RunNestingDeferredWork();
-
-  // Called before the run loop goes to sleep to notify delegate.
-  void BeforeWait();
-
-  // Schedules possible nesting-deferred work to be processed before the run
-  // loop goes to sleep, exits, or begins processing sources at the top of its
-  // loop.  If this function detects that a nested loop had run since the
-  // previous attempt to schedule nesting-deferred work, it will schedule a
-  // call to RunNestingDeferredWorkSource.
-  void MaybeScheduleNestingDeferredWork();
-
-  // Observer callback responsible for performing idle-priority work, before
-  // the run loop goes to sleep.  Associated with |pre_wait_observer_|.
-  static void PreWaitObserver(CFRunLoopObserverRef observer,
-                              CFRunLoopActivity activity, void* info);
-
-  static void AfterWaitObserver(CFRunLoopObserverRef observer,
-                                CFRunLoopActivity activity,
-                                void* info);
-
-  // Observer callback called before the run loop processes any sources.
-  // Associated with |pre_source_observer_|.
-  static void PreSourceObserver(CFRunLoopObserverRef observer,
-                                CFRunLoopActivity activity, void* info);
-
-  // Observer callback called when the run loop starts and stops, at the
-  // beginning and end of calls to CFRunLoopRun.  This is used to maintain
-  // |nesting_level_|.  Associated with |enter_exit_observer_|.
-  static void EnterExitObserver(CFRunLoopObserverRef observer,
-                                CFRunLoopActivity activity, void* info);
-
-  // Called by EnterExitObserver after performing maintenance on
-  // |nesting_level_|. This allows subclasses an opportunity to perform
-  // additional processing on the basis of run loops starting and stopping.
-  virtual void EnterExitRunLoop(CFRunLoopActivity activity);
-
-  // Gets rid of the top work item scope.
-  void PopWorkItemScope();
-
-  // Starts tracking a new work item.
-  void PushWorkItemScope();
-
-  // The thread's run loop.
-  base::ScopedCFTypeRef<CFRunLoopRef> run_loop_;
-
-  // The enabled modes. Posted tasks may run in any non-null entry.
-  std::unique_ptr<ScopedModeEnabler> enabled_modes_[kNumModes];
-
-  // The timer, sources, and observers are described above alongside their
-  // callbacks.
-  base::ScopedCFTypeRef<CFRunLoopTimerRef> delayed_work_timer_;
-  base::ScopedCFTypeRef<CFRunLoopSourceRef> work_source_;
-  base::ScopedCFTypeRef<CFRunLoopSourceRef> nesting_deferred_work_source_;
-  base::ScopedCFTypeRef<CFRunLoopObserverRef> pre_wait_observer_;
-  base::ScopedCFTypeRef<CFRunLoopObserverRef> after_wait_observer_;
-  base::ScopedCFTypeRef<CFRunLoopObserverRef> pre_source_observer_;
-  base::ScopedCFTypeRef<CFRunLoopObserverRef> enter_exit_observer_;
-
-  // (weak) Delegate passed as an argument to the innermost Run call.
-  raw_ptr<Delegate> delegate_ = nullptr;
-
-  // Time at which `delayed_work_timer_` is set to fire.
-  base::TimeTicks delayed_work_scheduled_at_ = base::TimeTicks::Max();
-
-  // The recursion depth of the currently-executing CFRunLoopRun loop on the
-  // run loop's thread.  0 if no run loops are running inside of whatever scope
-  // the object was created in.
-  int nesting_level_ = 0;
-
-  // The recursion depth (calculated in the same way as |nesting_level_|) of the
-  // innermost executing CFRunLoopRun loop started by a call to Run.
-  int run_nesting_level_ = 0;
-
-  // The deepest (numerically highest) recursion depth encountered since the
-  // most recent attempt to run nesting-deferred work.
-  int deepest_nesting_level_ = 0;
-
-  // Whether we should continue running application tasks. Set to false when
-  // Quit() is called for the innermost run loop.
-  bool keep_running_ = true;
-
-  // "Delegateless" work flags are set when work is ready to be performed but
-  // must wait until a delegate is available to process it.  This can happen
-  // when a MessagePumpCFRunLoopBase is instantiated and work arrives without
-  // any call to Run on the stack.  The Run method will check for delegateless
-  // work on entry and redispatch it as needed once a delegate is available.
-  bool delegateless_work_ = false;
-
-  // Used to keep track of the native event work items processed by the message
-  // pump. Made of optionals because tracking can be suspended when it's
-  // determined the loop is not processing a native event but the depth of the
-  // stack should match |nesting_level_| at all times. A nullopt is also used
-  // as a stand-in during delegateless operation.
-  base::stack<absl::optional<base::MessagePump::Delegate::ScopedDoWorkItem>>
-      stack_;
-};
-
-class BASE_EXPORT MessagePumpCFRunLoop : public MessagePumpCFRunLoopBase {
- public:
-  MessagePumpCFRunLoop();
-
-  MessagePumpCFRunLoop(const MessagePumpCFRunLoop&) = delete;
-  MessagePumpCFRunLoop& operator=(const MessagePumpCFRunLoop&) = delete;
-
-  ~MessagePumpCFRunLoop() override;
-
-  void DoRun(Delegate* delegate) override;
-  bool DoQuit() override;
-
- private:
-  void EnterExitRunLoop(CFRunLoopActivity activity) override;
-
-  // True if Quit is called to stop the innermost MessagePump
-  // (|innermost_quittable_|) but some other CFRunLoopRun loop
-  // (|nesting_level_|) is running inside the MessagePump's innermost Run call.
-  bool quit_pending_;
-};
-
-class BASE_EXPORT MessagePumpNSRunLoop : public MessagePumpCFRunLoopBase {
- public:
-  MessagePumpNSRunLoop();
-
-  MessagePumpNSRunLoop(const MessagePumpNSRunLoop&) = delete;
-  MessagePumpNSRunLoop& operator=(const MessagePumpNSRunLoop&) = delete;
-
-  ~MessagePumpNSRunLoop() override;
-
-  void DoRun(Delegate* delegate) override;
-  bool DoQuit() override;
-
- private:
-  // A source that doesn't do anything but provide something signalable
-  // attached to the run loop.  This source will be signalled when Quit
-  // is called, to cause the loop to wake up so that it can stop.
-  base::ScopedCFTypeRef<CFRunLoopSourceRef> quit_source_;
-};
-
-#if BUILDFLAG(IS_IOS)
-// This is a fake message pump.  It attaches sources to the main thread's
-// CFRunLoop, so PostTask() will work, but it is unable to drive the loop
-// directly, so calling Run() or Quit() are errors.
-class MessagePumpUIApplication : public MessagePumpCFRunLoopBase {
- public:
-  MessagePumpUIApplication();
-
-  MessagePumpUIApplication(const MessagePumpUIApplication&) = delete;
-  MessagePumpUIApplication& operator=(const MessagePumpUIApplication&) = delete;
-
-  ~MessagePumpUIApplication() override;
-  void DoRun(Delegate* delegate) override;
-  bool DoQuit() override;
-
-  // MessagePumpCFRunLoopBase.
-  // MessagePumpUIApplication can not spin the main message loop directly.
-  // Instead, call |Attach()| to set up a delegate.  It is an error to call
-  // |Run()|.
-  void Attach(Delegate* delegate) override;
-  void Detach() override;
-
- private:
-  absl::optional<RunLoop> run_loop_;
-};
-
-#else
-
-// While in scope, permits posted tasks to be run in private AppKit run loop
-// modes that would otherwise make the UI unresponsive. E.g., menu fade out.
-class BASE_EXPORT ScopedPumpMessagesInPrivateModes {
- public:
-  ScopedPumpMessagesInPrivateModes();
-
-  ScopedPumpMessagesInPrivateModes(const ScopedPumpMessagesInPrivateModes&) =
-      delete;
-  ScopedPumpMessagesInPrivateModes& operator=(
-      const ScopedPumpMessagesInPrivateModes&) = delete;
-
-  ~ScopedPumpMessagesInPrivateModes();
-
-  int GetModeMaskForTest();
-};
-
-class MessagePumpNSApplication : public MessagePumpCFRunLoopBase {
- public:
-  MessagePumpNSApplication();
-
-  MessagePumpNSApplication(const MessagePumpNSApplication&) = delete;
-  MessagePumpNSApplication& operator=(const MessagePumpNSApplication&) = delete;
-
-  ~MessagePumpNSApplication() override;
-
-  void DoRun(Delegate* delegate) override;
-  bool DoQuit() override;
-
- private:
-  friend class ScopedPumpMessagesInPrivateModes;
-
-  void EnterExitRunLoop(CFRunLoopActivity activity) override;
-
-  // True if DoRun is managing its own run loop as opposed to letting
-  // -[NSApplication run] handle it.  The outermost run loop in the application
-  // is managed by -[NSApplication run], inner run loops are handled by a loop
-  // in DoRun.
-  bool running_own_loop_ = false;
-
-  // True if Quit() was called while a modal window was shown and needed to be
-  // deferred.
-  bool quit_pending_ = false;
-};
-
-class MessagePumpCrApplication : public MessagePumpNSApplication {
- public:
-  MessagePumpCrApplication();
-
-  MessagePumpCrApplication(const MessagePumpCrApplication&) = delete;
-  MessagePumpCrApplication& operator=(const MessagePumpCrApplication&) = delete;
-
-  ~MessagePumpCrApplication() override;
-
- protected:
-  // Returns false if NSApp is currently in the middle of calling -sendEvent.
-  // Requires NSApp implementing CrAppProtocol.
-  bool ShouldCreateAutoreleasePool() override;
-};
-#endif  // BUILDFLAG(IS_IOS)
-
-namespace message_pump_mac {
-
-// If not on the main thread, returns a new instance of
-// MessagePumpNSRunLoop.
-//
-// On the main thread, if NSApp exists and conforms to
-// CrAppProtocol, creates an instances of MessagePumpCrApplication.
-//
-// Otherwise creates an instance of MessagePumpNSApplication using a
-// default NSApplication.
-BASE_EXPORT std::unique_ptr<MessagePump> Create();
-
-#if !BUILDFLAG(IS_IOS)
-  // If a pump is created before the required CrAppProtocol is
-  // created, the wrong MessagePump subclass could be used.
-  // UsingCrApp() returns false if the message pump was created before
-  // NSApp was initialized, or if NSApp does not implement
-  // CrAppProtocol.  NSApp must be initialized before calling.
-BASE_EXPORT bool UsingCrApp();
-
-// Wrapper to query -[NSApp isHandlingSendEvent] from C++ code.
-// Requires NSApp to implement CrAppProtocol.
-BASE_EXPORT bool IsHandlingSendEvent();
-#endif  // !BUILDFLAG(IS_IOS)
-
-}  // namespace message_pump_mac
-
-// Tasks posted to the message loop are posted under this mode, as well
-// as kCFRunLoopCommonModes.
-extern const CFStringRef BASE_EXPORT kMessageLoopExclusiveRunLoopMode;
-
-}  // namespace base
-
-#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_MAC_H_
diff --git a/base/message_loop/message_pump_mac.mm b/base/message_loop/message_pump_mac.mm
deleted file mode 100644
index 442fceb..0000000
--- a/base/message_loop/message_pump_mac.mm
+++ /dev/null
@@ -1,952 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#import "base/message_loop/message_pump_mac.h"
-
-#import <Foundation/Foundation.h>
-
-#include <atomic>
-#include <limits>
-#include <memory>
-
-#include "base/auto_reset.h"
-#include "base/check_op.h"
-#include "base/feature_list.h"
-#include "base/mac/call_with_eh_frame.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "base/mac/scoped_nsautorelease_pool.h"
-#include "base/memory/raw_ptr.h"
-#include "base/memory/scoped_policy.h"
-#include "base/metrics/histogram_samples.h"
-#include "base/notreached.h"
-#include "base/run_loop.h"
-#include "base/threading/platform_thread.h"
-#include "base/time/time.h"
-#include "build/build_config.h"
-#include "third_party/abseil-cpp/absl/types/optional.h"
-
-#if !BUILDFLAG(IS_IOS)
-#import <AppKit/AppKit.h>
-#endif  // !BUILDFLAG(IS_IOS)
-
-namespace base {
-
-const CFStringRef kMessageLoopExclusiveRunLoopMode =
-    CFSTR("kMessageLoopExclusiveRunLoopMode");
-
-namespace {
-
-// Enables two optimizations in MessagePumpCFRunLoop:
-// - Skip calling CFRunLoopTimerSetNextFireDate if the next delayed wake up
-//  time hasn't changed.
-// - Cancel an already scheduled timer wake up if there is no delayed work.
-BASE_FEATURE(kMessagePumpMacDelayedWorkOptimizations,
-             "MessagePumpMacDelayedWorkOptimizations",
-             base::FEATURE_ENABLED_BY_DEFAULT);
-
-// Caches the state of the "MessagePumpMacDelayedWorkOptimizations"
-// feature for efficiency.
-std::atomic_bool g_enable_optimizations = false;
-
-// Mask that determines which modes to use.
-enum { kCommonModeMask = 0x1, kAllModesMask = 0xf };
-
-// Modes to use for MessagePumpNSApplication that are considered "safe".
-// Currently just common and exclusive modes. Ideally, messages would be pumped
-// in all modes, but that interacts badly with app modal dialogs (e.g. NSAlert).
-enum { kNSApplicationModalSafeModeMask = 0x3 };
-
-void NoOp(void* info) {
-}
-
-constexpr CFTimeInterval kCFTimeIntervalMax =
-    std::numeric_limits<CFTimeInterval>::max();
-
-#if !BUILDFLAG(IS_IOS)
-// Set to true if message_pump_mac::Create() is called before NSApp is
-// initialized.  Only accessed from the main thread.
-bool g_not_using_cr_app = false;
-
-// The MessagePump controlling [NSApp run].
-MessagePumpNSApplication* g_app_pump;
-#endif  // !BUILDFLAG(IS_IOS)
-
-}  // namespace
-
-// A scoper for an optional autorelease pool.
-class OptionalAutoreleasePool {
- public:
-  explicit OptionalAutoreleasePool(MessagePumpCFRunLoopBase* pump) {
-    if (pump->ShouldCreateAutoreleasePool()) {
-      pool_.emplace();
-    }
-  }
-
-  OptionalAutoreleasePool(const OptionalAutoreleasePool&) = delete;
-  OptionalAutoreleasePool& operator=(const OptionalAutoreleasePool&) = delete;
-
- private:
-  absl::optional<base::mac::ScopedNSAutoreleasePool> pool_;
-};
-
-class MessagePumpCFRunLoopBase::ScopedModeEnabler {
- public:
-  ScopedModeEnabler(MessagePumpCFRunLoopBase* owner, int mode_index)
-      : owner_(owner), mode_index_(mode_index) {
-    CFRunLoopRef loop = owner_->run_loop_;
-    CFRunLoopAddTimer(loop, owner_->delayed_work_timer_, mode());
-    CFRunLoopAddSource(loop, owner_->work_source_, mode());
-    CFRunLoopAddSource(loop, owner_->nesting_deferred_work_source_, mode());
-    CFRunLoopAddObserver(loop, owner_->pre_wait_observer_, mode());
-    CFRunLoopAddObserver(loop, owner_->after_wait_observer_, mode());
-    CFRunLoopAddObserver(loop, owner_->pre_source_observer_, mode());
-    CFRunLoopAddObserver(loop, owner_->enter_exit_observer_, mode());
-  }
-
-  ScopedModeEnabler(const ScopedModeEnabler&) = delete;
-  ScopedModeEnabler& operator=(const ScopedModeEnabler&) = delete;
-
-  ~ScopedModeEnabler() {
-    CFRunLoopRef loop = owner_->run_loop_;
-    CFRunLoopRemoveObserver(loop, owner_->enter_exit_observer_, mode());
-    CFRunLoopRemoveObserver(loop, owner_->pre_source_observer_, mode());
-    CFRunLoopRemoveObserver(loop, owner_->pre_wait_observer_, mode());
-    CFRunLoopRemoveObserver(loop, owner_->after_wait_observer_, mode());
-    CFRunLoopRemoveSource(loop, owner_->nesting_deferred_work_source_, mode());
-    CFRunLoopRemoveSource(loop, owner_->work_source_, mode());
-    CFRunLoopRemoveTimer(loop, owner_->delayed_work_timer_, mode());
-  }
-
-  // This function knows about the AppKit RunLoop modes observed to potentially
-  // run tasks posted to Chrome's main thread task runner. Some are internal to
-  // AppKit but must be observed to keep Chrome's UI responsive. Others that may
-  // be interesting, but are not watched:
-  //  - com.apple.hitoolbox.windows.transitionmode
-  //  - com.apple.hitoolbox.windows.flushmode
-  const CFStringRef& mode() const {
-    static const CFStringRef modes[] = {
-        // The standard Core Foundation "common modes" constant. Must always be
-        // first in this list to match the value of kCommonModeMask.
-        kCFRunLoopCommonModes,
-
-        // Mode that only sees Chrome work sources.
-        kMessageLoopExclusiveRunLoopMode,
-
-        // Process work when NSMenus are fading out.
-        CFSTR("com.apple.hitoolbox.windows.windowfadingmode"),
-
-        // Process work when AppKit is highlighting an item on the main menubar.
-        CFSTR("NSUnhighlightMenuRunLoopMode"),
-    };
-    static_assert(std::size(modes) == kNumModes, "mode size mismatch");
-    static_assert((1 << kNumModes) - 1 == kAllModesMask,
-                  "kAllModesMask not large enough");
-
-    return modes[mode_index_];
-  }
-
- private:
-  const raw_ptr<MessagePumpCFRunLoopBase> owner_;  // Weak. Owns this.
-  const int mode_index_;
-};
-
-// Must be called on the run loop thread.
-void MessagePumpCFRunLoopBase::Run(Delegate* delegate) {
-  AutoReset<bool> auto_reset_keep_running(&keep_running_, true);
-  // nesting_level_ will be incremented in EnterExitRunLoop, so set
-  // run_nesting_level_ accordingly.
-  int last_run_nesting_level = run_nesting_level_;
-  run_nesting_level_ = nesting_level_ + 1;
-
-  Delegate* last_delegate = delegate_;
-  SetDelegate(delegate);
-
-  ScheduleWork();
-  DoRun(delegate);
-
-  // Restore the previous state of the object.
-  SetDelegate(last_delegate);
-  run_nesting_level_ = last_run_nesting_level;
-}
-
-void MessagePumpCFRunLoopBase::Quit() {
-  if (DoQuit())
-    OnDidQuit();
-}
-
-void MessagePumpCFRunLoopBase::OnDidQuit() {
-  keep_running_ = false;
-}
-
-// May be called on any thread.
-void MessagePumpCFRunLoopBase::ScheduleWork() {
-  CFRunLoopSourceSignal(work_source_);
-  CFRunLoopWakeUp(run_loop_);
-}
-
-// Must be called on the run loop thread.
-void MessagePumpCFRunLoopBase::ScheduleDelayedWork(
-    const Delegate::NextWorkInfo& next_work_info) {
-  DCHECK(!next_work_info.is_immediate());
-
-  if (g_enable_optimizations.load(std::memory_order_relaxed)) {
-    // No-op if the delayed run time hasn't changed.
-    if (next_work_info.delayed_run_time == delayed_work_scheduled_at_)
-      return;
-  } else {
-    // Preserve the old behavior of not adjusting the timer when
-    // `delayed_run_time.is_max()`.
-    //
-    // TODO(crbug.com/1335524): Remove this once the
-    // "MessagePumpMacDelayedWorkOptimizations" feature is shipped.
-    if (next_work_info.delayed_run_time.is_max())
-      return;
-  }
-
-  if (next_work_info.delayed_run_time.is_max()) {
-    CFRunLoopTimerSetNextFireDate(delayed_work_timer_, kCFTimeIntervalMax);
-  } else {
-    const double delay_seconds = next_work_info.remaining_delay().InSecondsF();
-
-    CFRunLoopTimerSetNextFireDate(delayed_work_timer_,
-                                  CFAbsoluteTimeGetCurrent() + delay_seconds);
-  }
-
-  delayed_work_scheduled_at_ = next_work_info.delayed_run_time;
-}
-
-#if BUILDFLAG(IS_IOS)
-void MessagePumpCFRunLoopBase::Attach(Delegate* delegate) {}
-
-void MessagePumpCFRunLoopBase::Detach() {}
-#endif  // BUILDFLAG(IS_IOS)
-
-// Must be called on the run loop thread.
-MessagePumpCFRunLoopBase::MessagePumpCFRunLoopBase(int initial_mode_mask) {
-  run_loop_.reset(CFRunLoopGetCurrent(), base::scoped_policy::RETAIN);
-
-  // Set a repeating timer with a preposterous firing time and interval.  The
-  // timer will effectively never fire as-is.  The firing time will be adjusted
-  // as needed when ScheduleDelayedWork is called.
-  CFRunLoopTimerContext timer_context = {0};
-  timer_context.info = this;
-  delayed_work_timer_.reset(
-      CFRunLoopTimerCreate(/*allocator=*/nullptr,
-                           /*fireDate=*/kCFTimeIntervalMax,
-                           /*interval=*/kCFTimeIntervalMax,
-                           /*flags=*/0,
-                           /*order=*/0,
-                           /*callout=*/RunDelayedWorkTimer,
-                           /*context=*/&timer_context));
-  CFRunLoopTimerSetTolerance(delayed_work_timer_, 0);
-
-  CFRunLoopSourceContext source_context = {0};
-  source_context.info = this;
-  source_context.perform = RunWorkSource;
-  work_source_.reset(CFRunLoopSourceCreate(/*allocator=*/nullptr,
-                                           /*order=*/1,
-                                           /*context=*/&source_context));
-  source_context.perform = RunNestingDeferredWorkSource;
-  nesting_deferred_work_source_.reset(
-      CFRunLoopSourceCreate(/*allocator=*/nullptr,
-                            /*order=*/0,
-                            /*context=*/&source_context));
-
-  CFRunLoopObserverContext observer_context = {0};
-  observer_context.info = this;
-  pre_wait_observer_.reset(
-      CFRunLoopObserverCreate(/*allocator=*/nullptr,
-                              /*activities=*/kCFRunLoopBeforeWaiting,
-                              /*repeats=*/true,
-                              /*order=*/0,
-                              /*callout=*/PreWaitObserver,
-                              /*context=*/&observer_context));
-  after_wait_observer_.reset(CFRunLoopObserverCreate(
-      /*allocator=*/nullptr,
-      /*activities=*/kCFRunLoopAfterWaiting,
-      /*repeats=*/true,
-      /*order=*/0,
-      /*callout=*/AfterWaitObserver,
-      /*context=*/&observer_context));
-  pre_source_observer_.reset(
-      CFRunLoopObserverCreate(/*allocator=*/nullptr,
-                              /*activities=*/kCFRunLoopBeforeSources,
-                              /*repeats=*/true,
-                              /*order=*/0,
-                              /*callout=*/PreSourceObserver,
-                              /*context=*/&observer_context));
-  enter_exit_observer_.reset(
-      CFRunLoopObserverCreate(/*allocator=*/nullptr,
-                              /*activities=*/kCFRunLoopEntry | kCFRunLoopExit,
-                              /*repeats=*/true,
-                              /*order=*/0,
-                              /*callout=*/EnterExitObserver,
-                              /*context=*/&observer_context));
-  SetModeMask(initial_mode_mask);
-}
-
-// Ideally called on the run loop thread.  If other run loops were running
-// lower on the run loop thread's stack when this object was created, the
-// same number of run loops must be running when this object is destroyed.
-MessagePumpCFRunLoopBase::~MessagePumpCFRunLoopBase() {
-  SetModeMask(0);
-}
-
-// static
-void MessagePumpCFRunLoopBase::InitializeFeatures() {
-  g_enable_optimizations.store(
-      base::FeatureList::IsEnabled(kMessagePumpMacDelayedWorkOptimizations),
-      std::memory_order_relaxed);
-}
-
-#if BUILDFLAG(IS_IOS)
-void MessagePumpCFRunLoopBase::OnAttach() {
-  CHECK_EQ(nesting_level_, 0);
-  // On iOS: the MessagePump is attached while it's already running.
-  nesting_level_ = 1;
-
-  // There could be some native work done after attaching to the loop and before
-  // |work_source_| is invoked.
-  PushWorkItemScope();
-}
-
-void MessagePumpCFRunLoopBase::OnDetach() {
-  // This function is called on shutdown. This can happen at either
-  // `nesting_level` >=1 or 0:
-  //   `nesting_level_ == 0`: When this is detached as part of tear down outside
-  //   of a run loop (e.g. ~TaskEnvironment). `nesting_level_ >= 1`: When this
-  //   is detached as part of a native shutdown notification ran from the
-  //   message pump itself. Nesting levels higher than 1 can happen in
-  //   legitimate nesting situations like the browser being dismissed while
-  //   displaying a long press context menu (CRWContextMenuController).
-  CHECK_GE(nesting_level_, 0);
-}
-#endif  // BUILDFLAG(IS_IOS)
-
-void MessagePumpCFRunLoopBase::SetDelegate(Delegate* delegate) {
-  delegate_ = delegate;
-
-  if (delegate) {
-    // If any work showed up but could not be dispatched for want of a
-    // delegate, set it up for dispatch again now that a delegate is
-    // available.
-    if (delegateless_work_) {
-      CFRunLoopSourceSignal(work_source_);
-      delegateless_work_ = false;
-    }
-  }
-}
-
-// Base version creates an autorelease pool.
-bool MessagePumpCFRunLoopBase::ShouldCreateAutoreleasePool() {
-  return true;
-}
-
-void MessagePumpCFRunLoopBase::SetModeMask(int mode_mask) {
-  for (size_t i = 0; i < kNumModes; ++i) {
-    bool enable = mode_mask & (0x1 << i);
-    if (enable == !enabled_modes_[i]) {
-      enabled_modes_[i] =
-          enable ? std::make_unique<ScopedModeEnabler>(this, i) : nullptr;
-    }
-  }
-}
-
-int MessagePumpCFRunLoopBase::GetModeMask() const {
-  int mask = 0;
-  for (size_t i = 0; i < kNumModes; ++i)
-    mask |= enabled_modes_[i] ? (0x1 << i) : 0;
-  return mask;
-}
-
-void MessagePumpCFRunLoopBase::PopWorkItemScope() {
-  // A WorkItemScope should never have been pushed unless the loop was entered.
-  DCHECK_NE(nesting_level_, 0);
-  // If no WorkItemScope was pushed it cannot be popped.
-  DCHECK_GT(stack_.size(), 0u);
-
-  stack_.pop();
-}
-
-void MessagePumpCFRunLoopBase::PushWorkItemScope() {
-  // A WorkItemScope should never be pushed unless the loop was entered.
-  DCHECK_NE(nesting_level_, 0);
-
-  // See RunWork() comments on why the size of |stack| is never bigger than
-  // |nesting_level_| even in nested loops.
-  DCHECK_LT(stack_.size(), static_cast<size_t>(nesting_level_));
-
-  if (delegate_) {
-    stack_.push(delegate_->BeginWorkItem());
-  } else {
-    stack_.push(absl::nullopt);
-  }
-}
-
-// Called from the run loop.
-// static
-void MessagePumpCFRunLoopBase::RunDelayedWorkTimer(CFRunLoopTimerRef timer,
-                                                   void* info) {
-  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
-  // The timer fired, assume we have work and let RunWork() figure out what to
-  // do and what to schedule after.
-  base::mac::CallWithEHFrame(^{
-    // It would be incorrect to expect that `self->delayed_work_scheduled_at_`
-    // is smaller than or equal to `TimeTicks::Now()` because the fire date of a
-    // CFRunLoopTimer can be adjusted slightly.
-    // https://developer.apple.com/documentation/corefoundation/1543570-cfrunlooptimercreate?language=objc
-    DCHECK(!self->delayed_work_scheduled_at_.is_max());
-
-    self->delayed_work_scheduled_at_ = base::TimeTicks::Max();
-    self->RunWork();
-  });
-}
-
-// Called from the run loop.
-// static
-void MessagePumpCFRunLoopBase::RunWorkSource(void* info) {
-  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
-  base::mac::CallWithEHFrame(^{
-    self->RunWork();
-  });
-}
-
-// Called by MessagePumpCFRunLoopBase::RunWorkSource and RunDelayedWorkTimer.
-bool MessagePumpCFRunLoopBase::RunWork() {
-  if (!delegate_) {
-    // This point can be reached with a nullptr |delegate_| if Run is not on the
-    // stack but foreign code is spinning the CFRunLoop.  Arrange to come back
-    // here when a delegate is available.
-    delegateless_work_ = true;
-    return false;
-  }
-  if (!keep_running())
-    return false;
-
-  // The NSApplication-based run loop only drains the autorelease pool at each
-  // UI event (NSEvent).  The autorelease pool is not drained for each
-  // CFRunLoopSource target that's run.  Use a local pool for any autoreleased
-  // objects if the app is not currently handling a UI event to ensure they're
-  // released promptly even in the absence of UI events.
-  OptionalAutoreleasePool autorelease_pool(this);
-
-  // Pop the current work item scope as it captures any native work happening
-  // *between* DoWork()'s. This DoWork() happens in sequence to that native
-  // work, not nested within it.
-  PopWorkItemScope();
-  Delegate::NextWorkInfo next_work_info = delegate_->DoWork();
-  // DoWork() (and its own work item coverage) is over so push a new scope to
-  // cover any native work that could possibly happen before the next RunWork().
-  PushWorkItemScope();
-
-  if (next_work_info.is_immediate()) {
-    CFRunLoopSourceSignal(work_source_);
-    return true;
-  } else {
-    // This adjusts the next delayed wake up time (potentially cancels an
-    // already scheduled wake up if there is no delayed work).
-    ScheduleDelayedWork(next_work_info);
-    return false;
-  }
-}
-
-void MessagePumpCFRunLoopBase::RunIdleWork() {
-  if (!delegate_) {
-    // This point can be reached with a nullptr delegate_ if Run is not on the
-    // stack but foreign code is spinning the CFRunLoop.
-    return;
-  }
-  if (!keep_running())
-    return;
-  // The NSApplication-based run loop only drains the autorelease pool at each
-  // UI event (NSEvent).  The autorelease pool is not drained for each
-  // CFRunLoopSource target that's run.  Use a local pool for any autoreleased
-  // objects if the app is not currently handling a UI event to ensure they're
-  // released promptly even in the absence of UI events.
-  OptionalAutoreleasePool autorelease_pool(this);
-  bool did_work = delegate_->DoIdleWork();
-  if (did_work)
-    CFRunLoopSourceSignal(work_source_);
-}
-
-// Called from the run loop.
-// static
-void MessagePumpCFRunLoopBase::RunNestingDeferredWorkSource(void* info) {
-  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
-  base::mac::CallWithEHFrame(^{
-    self->RunNestingDeferredWork();
-  });
-}
-
-// Called by MessagePumpCFRunLoopBase::RunNestingDeferredWorkSource.
-void MessagePumpCFRunLoopBase::RunNestingDeferredWork() {
-  if (!delegate_) {
-    // This point can be reached with a nullptr |delegate_| if Run is not on the
-    // stack but foreign code is spinning the CFRunLoop.  There's no sense in
-    // attempting to do any work or signalling the work sources because
-    // without a delegate, work is not possible.
-    return;
-  }
-
-  // Attempt to do work, if there's any more work to do this call will re-signal
-  // |work_source_| and keep things going; otherwise, PreWaitObserver will be
-  // invoked by the native pump to declare us idle.
-  RunWork();
-}
-
-void MessagePumpCFRunLoopBase::BeforeWait() {
-  if (!delegate_) {
-    // This point can be reached with a nullptr |delegate_| if Run is not on the
-    // stack but foreign code is spinning the CFRunLoop.
-    return;
-  }
-  delegate_->BeforeWait();
-}
-
-// Called before the run loop goes to sleep or exits, or processes sources.
-void MessagePumpCFRunLoopBase::MaybeScheduleNestingDeferredWork() {
-  // deepest_nesting_level_ is set as run loops are entered.  If the deepest
-  // level encountered is deeper than the current level, a nested loop
-  // (relative to the current level) ran since the last time nesting-deferred
-  // work was scheduled.  When that situation is encountered, schedule
-  // nesting-deferred work in case any work was deferred because nested work
-  // was disallowed.
-  if (deepest_nesting_level_ > nesting_level_) {
-    deepest_nesting_level_ = nesting_level_;
-    CFRunLoopSourceSignal(nesting_deferred_work_source_);
-  }
-}
-
-// Called from the run loop.
-// static
-void MessagePumpCFRunLoopBase::PreWaitObserver(CFRunLoopObserverRef observer,
-                                               CFRunLoopActivity activity,
-                                               void* info) {
-  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
-  base::mac::CallWithEHFrame(^{
-    // Current work item tracking needs to go away since execution will stop.
-    // Matches the PushWorkItemScope() in AfterWaitObserver() (with an arbitrary
-    // amount of matching Pop/Push in between when running work items).
-    self->PopWorkItemScope();
-
-    // Attempt to do some idle work before going to sleep.
-    self->RunIdleWork();
-
-    // The run loop is about to go to sleep.  If any of the work done since it
-    // started or woke up resulted in a nested run loop running,
-    // nesting-deferred work may have accumulated.  Schedule it for processing
-    // if appropriate.
-    self->MaybeScheduleNestingDeferredWork();
-
-    // Notify the delegate that the loop is about to sleep.
-    self->BeforeWait();
-  });
-}
-
-// Called from the run loop.
-// static
-void MessagePumpCFRunLoopBase::AfterWaitObserver(CFRunLoopObserverRef observer,
-                                                 CFRunLoopActivity activity,
-                                                 void* info) {
-  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
-  base::mac::CallWithEHFrame(^{
-    // Emerging from sleep, any work happening after this (outside of a
-    // RunWork()) should be considered native work. Matching PopWorkItemScope()
-    // is in BeforeWait().
-    self->PushWorkItemScope();
-  });
-}
-
-// Called from the run loop.
-// static
-void MessagePumpCFRunLoopBase::PreSourceObserver(CFRunLoopObserverRef observer,
-                                                 CFRunLoopActivity activity,
-                                                 void* info) {
-  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
-
-  // The run loop has reached the top of the loop and is about to begin
-  // processing sources.  If the last iteration of the loop at this nesting
-  // level did not sleep or exit, nesting-deferred work may have accumulated
-  // if a nested loop ran.  Schedule nesting-deferred work for processing if
-  // appropriate.
-  base::mac::CallWithEHFrame(^{
-    self->MaybeScheduleNestingDeferredWork();
-  });
-}
-
-// Called from the run loop.
-// static
-void MessagePumpCFRunLoopBase::EnterExitObserver(CFRunLoopObserverRef observer,
-                                                 CFRunLoopActivity activity,
-                                                 void* info) {
-  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
-
-  switch (activity) {
-    case kCFRunLoopEntry:
-      ++self->nesting_level_;
-
-      // There could be some native work done after entering the loop and before
-      // the next observer.
-      self->PushWorkItemScope();
-      if (self->nesting_level_ > self->deepest_nesting_level_) {
-        self->deepest_nesting_level_ = self->nesting_level_;
-      }
-      break;
-
-    case kCFRunLoopExit:
-      // Not all run loops go to sleep.  If a run loop is stopped before it
-      // goes to sleep due to a CFRunLoopStop call, or if the timeout passed
-      // to CFRunLoopRunInMode expires, the run loop may proceed directly from
-      // handling sources to exiting without any sleep.  This most commonly
-      // occurs when CFRunLoopRunInMode is passed a timeout of 0, causing it
-      // to make a single pass through the loop and exit without sleep.  Some
-      // native loops use CFRunLoop in this way.  Because PreWaitObserver will
-      // not be called in these case, MaybeScheduleNestingDeferredWork needs
-      // to be called here, as the run loop exits.
-      //
-      // MaybeScheduleNestingDeferredWork consults self->nesting_level_
-      // to determine whether to schedule nesting-deferred work.  It expects
-      // the nesting level to be set to the depth of the loop that is going
-      // to sleep or exiting.  It must be called before decrementing the
-      // value so that the value still corresponds to the level of the exiting
-      // loop.
-      base::mac::CallWithEHFrame(^{
-        self->MaybeScheduleNestingDeferredWork();
-      });
-
-      // Current work item tracking needs to go away since execution will stop.
-      self->PopWorkItemScope();
-
-      --self->nesting_level_;
-      break;
-
-    default:
-      break;
-  }
-
-  base::mac::CallWithEHFrame(^{
-    self->EnterExitRunLoop(activity);
-  });
-}
-
-// Called by MessagePumpCFRunLoopBase::EnterExitRunLoop.  The default
-// implementation is a no-op.
-void MessagePumpCFRunLoopBase::EnterExitRunLoop(CFRunLoopActivity activity) {
-}
-
-MessagePumpCFRunLoop::MessagePumpCFRunLoop()
-    : MessagePumpCFRunLoopBase(kCommonModeMask), quit_pending_(false) {}
-
-MessagePumpCFRunLoop::~MessagePumpCFRunLoop() = default;
-
-// Called by MessagePumpCFRunLoopBase::DoRun.  If other CFRunLoopRun loops were
-// running lower on the run loop thread's stack when this object was created,
-// the same number of CFRunLoopRun loops must be running for the outermost call
-// to Run.  Run/DoRun are reentrant after that point.
-void MessagePumpCFRunLoop::DoRun(Delegate* delegate) {
-  // This is completely identical to calling CFRunLoopRun(), except autorelease
-  // pool management is introduced.
-  int result;
-  do {
-    OptionalAutoreleasePool autorelease_pool(this);
-    result = CFRunLoopRunInMode(kCFRunLoopDefaultMode,
-                                kCFTimeIntervalMax,
-                                false);
-  } while (result != kCFRunLoopRunStopped && result != kCFRunLoopRunFinished);
-}
-
-// Must be called on the run loop thread.
-bool MessagePumpCFRunLoop::DoQuit() {
-  // Stop the innermost run loop managed by this MessagePumpCFRunLoop object.
-  if (nesting_level() == run_nesting_level()) {
-    // This object is running the innermost loop, just stop it.
-    CFRunLoopStop(run_loop());
-    return true;
-  } else {
-    // There's another loop running inside the loop managed by this object.
-    // In other words, someone else called CFRunLoopRunInMode on the same
-    // thread, deeper on the stack than the deepest Run call.  Don't preempt
-    // other run loops, just mark this object to quit the innermost Run as
-    // soon as the other inner loops not managed by Run are done.
-    quit_pending_ = true;
-    return false;
-  }
-}
-
-// Called by MessagePumpCFRunLoopBase::EnterExitObserver.
-void MessagePumpCFRunLoop::EnterExitRunLoop(CFRunLoopActivity activity) {
-  if (activity == kCFRunLoopExit &&
-      nesting_level() == run_nesting_level() &&
-      quit_pending_) {
-    // Quit was called while loops other than those managed by this object
-    // were running further inside a run loop managed by this object.  Now
-    // that all unmanaged inner run loops are gone, stop the loop running
-    // just inside Run.
-    CFRunLoopStop(run_loop());
-    quit_pending_ = false;
-    OnDidQuit();
-  }
-}
-
-MessagePumpNSRunLoop::MessagePumpNSRunLoop()
-    : MessagePumpCFRunLoopBase(kCommonModeMask) {
-  CFRunLoopSourceContext source_context = {0};
-  source_context.perform = NoOp;
-  quit_source_.reset(CFRunLoopSourceCreate(/*allocator=*/nullptr,
-                                           /*order=*/0,
-                                           /*context=*/&source_context));
-  CFRunLoopAddSource(run_loop(), quit_source_, kCFRunLoopCommonModes);
-}
-
-MessagePumpNSRunLoop::~MessagePumpNSRunLoop() {
-  CFRunLoopRemoveSource(run_loop(), quit_source_, kCFRunLoopCommonModes);
-}
-
-void MessagePumpNSRunLoop::DoRun(Delegate* delegate) {
-  while (keep_running()) {
-    // NSRunLoop manages autorelease pools itself.
-    [NSRunLoop.currentRunLoop runMode:NSDefaultRunLoopMode
-                           beforeDate:NSDate.distantFuture];
-  }
-}
-
-bool MessagePumpNSRunLoop::DoQuit() {
-  CFRunLoopSourceSignal(quit_source_);
-  CFRunLoopWakeUp(run_loop());
-  return true;
-}
-
-#if BUILDFLAG(IS_IOS)
-MessagePumpUIApplication::MessagePumpUIApplication()
-    : MessagePumpCFRunLoopBase(kCommonModeMask) {}
-
-MessagePumpUIApplication::~MessagePumpUIApplication() = default;
-
-void MessagePumpUIApplication::DoRun(Delegate* delegate) {
-  NOTREACHED();
-}
-
-bool MessagePumpUIApplication::DoQuit() {
-  NOTREACHED();
-  return false;
-}
-
-void MessagePumpUIApplication::Attach(Delegate* delegate) {
-  DCHECK(!run_loop_);
-  run_loop_.emplace();
-
-  CHECK(run_loop_->BeforeRun());
-  SetDelegate(delegate);
-
-  OnAttach();
-}
-
-void MessagePumpUIApplication::Detach() {
-  DCHECK(run_loop_);
-  run_loop_->AfterRun();
-  SetDelegate(nullptr);
-  run_loop_.reset();
-
-  OnDetach();
-}
-
-#else
-
-ScopedPumpMessagesInPrivateModes::ScopedPumpMessagesInPrivateModes() {
-  DCHECK(g_app_pump);
-  DCHECK_EQ(kNSApplicationModalSafeModeMask, g_app_pump->GetModeMask());
-  // Pumping events in private runloop modes is known to interact badly with
-  // app modal windows like NSAlert.
-  if (NSApp.modalWindow) {
-    return;
-  }
-  g_app_pump->SetModeMask(kAllModesMask);
-}
-
-ScopedPumpMessagesInPrivateModes::~ScopedPumpMessagesInPrivateModes() {
-  DCHECK(g_app_pump);
-  g_app_pump->SetModeMask(kNSApplicationModalSafeModeMask);
-}
-
-int ScopedPumpMessagesInPrivateModes::GetModeMaskForTest() {
-  return g_app_pump ? g_app_pump->GetModeMask() : -1;
-}
-
-MessagePumpNSApplication::MessagePumpNSApplication()
-    : MessagePumpCFRunLoopBase(kNSApplicationModalSafeModeMask) {
-  DCHECK_EQ(nullptr, g_app_pump);
-  g_app_pump = this;
-}
-
-MessagePumpNSApplication::~MessagePumpNSApplication() {
-  DCHECK_EQ(this, g_app_pump);
-  g_app_pump = nullptr;
-}
-
-void MessagePumpNSApplication::DoRun(Delegate* delegate) {
-  bool last_running_own_loop_ = running_own_loop_;
-
-  // NSApp must be initialized by calling:
-  // [{some class which implements CrAppProtocol} sharedApplication]
-  // Most likely candidates are CrApplication or BrowserCrApplication.
-  // These can be initialized from C++ code by calling
-  // RegisterCrApp() or RegisterBrowserCrApp().
-  CHECK(NSApp);
-
-  if (!NSApp.running) {
-    running_own_loop_ = false;
-    // NSApplication manages autorelease pools itself when run this way.
-    [NSApp run];
-  } else {
-    running_own_loop_ = true;
-    while (keep_running()) {
-      OptionalAutoreleasePool autorelease_pool(this);
-      NSEvent* event = [NSApp nextEventMatchingMask:NSEventMaskAny
-                                          untilDate:NSDate.distantFuture
-                                             inMode:NSDefaultRunLoopMode
-                                            dequeue:YES];
-      if (event) {
-        [NSApp sendEvent:event];
-      }
-    }
-  }
-
-  running_own_loop_ = last_running_own_loop_;
-}
-
-bool MessagePumpNSApplication::DoQuit() {
-  // If the app is displaying a modal window in a native run loop, we can only
-  // quit our run loop after the window is closed. Otherwise the [NSApplication
-  // stop] below will apply to the modal window run loop instead. To work around
-  // this, the quit is applied when we re-enter our own run loop after the
-  // window is gone (see MessagePumpNSApplication::EnterExitRunLoop).
-  if (nesting_level() > run_nesting_level() && NSApp.modalWindow != nil) {
-    quit_pending_ = true;
-    return false;
-  }
-
-  if (!running_own_loop_) {
-    [NSApp stop:nil];
-  }
-
-  // Send a fake event to wake the loop up.
-  [NSApp postEvent:[NSEvent otherEventWithType:NSEventTypeApplicationDefined
-                                      location:NSZeroPoint
-                                 modifierFlags:0
-                                     timestamp:0
-                                  windowNumber:0
-                                       context:nil
-                                       subtype:0
-                                         data1:0
-                                         data2:0]
-           atStart:NO];
-  return true;
-}
-
-void MessagePumpNSApplication::EnterExitRunLoop(CFRunLoopActivity activity) {
-  // If we previously tried quitting while a modal window was active, check if
-  // the window is gone now and we're no longer nested in a system run loop.
-  if (activity == kCFRunLoopEntry && quit_pending_ &&
-      nesting_level() <= run_nesting_level() && NSApp.modalWindow == nil) {
-    quit_pending_ = false;
-    if (DoQuit())
-      OnDidQuit();
-  }
-}
-
-MessagePumpCrApplication::MessagePumpCrApplication() = default;
-
-MessagePumpCrApplication::~MessagePumpCrApplication() = default;
-
-// Prevents an autorelease pool from being created if the app is in the midst of
-// handling a UI event because various parts of AppKit depend on objects that
-// are created while handling a UI event to be autoreleased in the event loop.
-// An example of this is NSWindowController. When a window with a window
-// controller is closed it goes through a stack like this:
-// (Several stack frames elided for clarity)
-//
-// #0 [NSWindowController autorelease]
-// #1 DoAClose
-// #2 MessagePumpCFRunLoopBase::DoWork()
-// #3 [NSRunLoop run]
-// #4 [NSButton performClick:]
-// #5 [NSWindow sendEvent:]
-// #6 [NSApp sendEvent:]
-// #7 [NSApp run]
-//
-// -performClick: spins a nested run loop. If the pool created in DoWork was a
-// standard NSAutoreleasePool, it would release the objects that were
-// autoreleased into it once DoWork released it. This would cause the window
-// controller, which autoreleased itself in frame #0, to release itself, and
-// possibly free itself. Unfortunately this window controller controls the
-// window in frame #5. When the stack is unwound to frame #5, the window would
-// no longer exists and crashes may occur. Apple gets around this by never
-// releasing the pool it creates in frame #4, and letting frame #7 clean it up
-// when it cleans up the pool that wraps frame #7. When an autorelease pool is
-// released it releases all other pools that were created after it on the
-// autorelease pool stack.
-//
-// CrApplication is responsible for setting handlingSendEvent to true just
-// before it sends the event through the event handling mechanism, and
-// returning it to its previous value once the event has been sent.
-bool MessagePumpCrApplication::ShouldCreateAutoreleasePool() {
-  if (message_pump_mac::IsHandlingSendEvent()) {
-    return false;
-  }
-  return MessagePumpNSApplication::ShouldCreateAutoreleasePool();
-}
-
-#endif  // BUILDFLAG(IS_IOS)
-
-namespace message_pump_mac {
-
-std::unique_ptr<MessagePump> Create() {
-  if (NSThread.isMainThread) {
-#if BUILDFLAG(IS_IOS)
-    return std::make_unique<MessagePumpUIApplication>();
-#else
-    if ([NSApp conformsToProtocol:@protocol(CrAppProtocol)])
-      return std::make_unique<MessagePumpCrApplication>();
-
-    // The main-thread MessagePump implementations REQUIRE an NSApp.
-    // Executables which have specific requirements for their
-    // NSApplication subclass should initialize appropriately before
-    // creating an event loop.
-    [NSApplication sharedApplication];
-    g_not_using_cr_app = true;
-    return std::make_unique<MessagePumpNSApplication>();
-#endif
-  }
-
-  return std::make_unique<MessagePumpNSRunLoop>();
-}
-
-#if !BUILDFLAG(IS_IOS)
-
-bool UsingCrApp() {
-  DCHECK(NSThread.isMainThread);
-
-  // If NSApp is still not initialized, then the subclass used cannot
-  // be determined.
-  DCHECK(NSApp);
-
-  // The pump was created using MessagePumpNSApplication.
-  if (g_not_using_cr_app) {
-    return false;
-  }
-
-  return [NSApp conformsToProtocol:@protocol(CrAppProtocol)];
-}
-
-bool IsHandlingSendEvent() {
-  DCHECK([NSApp conformsToProtocol:@protocol(CrAppProtocol)]);
-  NSObject<CrAppProtocol>* app = static_cast<NSObject<CrAppProtocol>*>(NSApp);
-  return [app isHandlingSendEvent];
-}
-
-#endif  // !BUILDFLAG(IS_IOS)
-
-}  // namespace message_pump_mac
-
-}  // namespace base
diff --git a/base/message_loop/message_pump_mac_unittest.mm b/base/message_loop/message_pump_mac_unittest.mm
deleted file mode 100644
index ebfdde8..0000000
--- a/base/message_loop/message_pump_mac_unittest.mm
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/message_loop/message_pump_mac.h"
-
-#include "base/cancelable_callback.h"
-#include "base/functional/bind.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "base/task/current_thread.h"
-#include "base/task/single_thread_task_runner.h"
-#include "base/test/bind.h"
-#include "base/test/task_environment.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-@interface TestModalAlertCloser : NSObject
-- (void)runTestThenCloseAlert:(NSAlert*)alert;
-@end
-
-namespace {
-
-// Internal constants from message_pump_mac.mm.
-constexpr int kAllModesMask = 0xf;
-constexpr int kNSApplicationModalSafeModeMask = 0x3;
-
-}  // namespace
-
-namespace base {
-
-namespace {
-
-// PostedTasks are only executed while the message pump has a delegate. That is,
-// when a base::RunLoop is running, so in order to test whether posted tasks
-// are run by CFRunLoopRunInMode and *not* by the regular RunLoop, we need to
-// be inside a task that is also calling CFRunLoopRunInMode.
-// This function posts |task| and runs the given |mode|.
-void RunTaskInMode(CFRunLoopMode mode, OnceClosure task) {
-  // Since this task is "ours" rather than a system task, allow nesting.
-  CurrentThread::ScopedAllowApplicationTasksInNativeNestedLoop allow;
-  CancelableOnceClosure cancelable(std::move(task));
-  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(FROM_HERE,
-                                                        cancelable.callback());
-  while (CFRunLoopRunInMode(mode, 0, true) == kCFRunLoopRunHandledSource)
-    ;
-}
-
-}  // namespace
-
-// Tests the correct behavior of ScopedPumpMessagesInPrivateModes.
-TEST(MessagePumpMacTest, ScopedPumpMessagesInPrivateModes) {
-  test::SingleThreadTaskEnvironment task_environment(
-      test::SingleThreadTaskEnvironment::MainThreadType::UI);
-
-  CFRunLoopMode kRegular = kCFRunLoopDefaultMode;
-  CFRunLoopMode kPrivate = CFSTR("NSUnhighlightMenuRunLoopMode");
-
-  // Work is seen when running in the default mode.
-  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
-      FROM_HERE,
-      BindOnce(&RunTaskInMode, kRegular, MakeExpectedRunClosure(FROM_HERE)));
-  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
-
-  // But not seen when running in a private mode.
-  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
-      FROM_HERE,
-      BindOnce(&RunTaskInMode, kPrivate, MakeExpectedNotRunClosure(FROM_HERE)));
-  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
-
-  {
-    ScopedPumpMessagesInPrivateModes allow_private;
-    // Now the work should be seen.
-    SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
-        FROM_HERE,
-        BindOnce(&RunTaskInMode, kPrivate, MakeExpectedRunClosure(FROM_HERE)));
-    EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
-
-    // The regular mode should also work the same.
-    SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
-        FROM_HERE,
-        BindOnce(&RunTaskInMode, kRegular, MakeExpectedRunClosure(FROM_HERE)));
-    EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
-  }
-
-  // And now the scoper is out of scope, private modes should no longer see it.
-  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
-      FROM_HERE,
-      BindOnce(&RunTaskInMode, kPrivate, MakeExpectedNotRunClosure(FROM_HERE)));
-  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
-
-  // Only regular modes see it.
-  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
-      FROM_HERE,
-      BindOnce(&RunTaskInMode, kRegular, MakeExpectedRunClosure(FROM_HERE)));
-  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
-}
-
-// Tests that private message loop modes are not pumped while a modal dialog is
-// present.
-TEST(MessagePumpMacTest, ScopedPumpMessagesAttemptWithModalDialog) {
-  test::SingleThreadTaskEnvironment task_environment(
-      test::SingleThreadTaskEnvironment::MainThreadType::UI);
-
-  {
-    base::ScopedPumpMessagesInPrivateModes allow_private;
-    // No modal window, so all modes should be pumped.
-    EXPECT_EQ(kAllModesMask, allow_private.GetModeMaskForTest());
-  }
-
-  NSAlert* alert = [[NSAlert alloc] init];
-  [alert addButtonWithTitle:@"OK"];
-  TestModalAlertCloser* closer = [[TestModalAlertCloser alloc] init];
-  [closer performSelector:@selector(runTestThenCloseAlert:)
-               withObject:alert
-               afterDelay:0
-                  inModes:@[ NSModalPanelRunLoopMode ]];
-  NSInteger result = [alert runModal];
-  EXPECT_EQ(NSAlertFirstButtonReturn, result);
-}
-
-TEST(MessagePumpMacTest, QuitWithModalWindow) {
-  test::SingleThreadTaskEnvironment task_environment(
-      test::SingleThreadTaskEnvironment::MainThreadType::UI);
-  NSWindow* window =
-      [[NSWindow alloc] initWithContentRect:NSMakeRect(0, 0, 100, 100)
-                                  styleMask:NSWindowStyleMaskBorderless
-                                    backing:NSBackingStoreBuffered
-                                      defer:NO];
-  window.releasedWhenClosed = NO;
-
-  // Check that quitting the run loop while a modal window is shown applies to
-  // |run_loop| rather than the internal NSApplication modal run loop.
-  RunLoop run_loop;
-  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
-      FROM_HERE, base::BindLambdaForTesting([&] {
-        CurrentThread::ScopedAllowApplicationTasksInNativeNestedLoop allow;
-        ScopedPumpMessagesInPrivateModes pump_private;
-        [NSApp runModalForWindow:window];
-      }));
-  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
-      FROM_HERE, base::BindLambdaForTesting([&] {
-        [NSApp stopModal];
-        run_loop.Quit();
-      }));
-
-  EXPECT_NO_FATAL_FAILURE(run_loop.Run());
-}
-
-}  // namespace base
-
-@implementation TestModalAlertCloser
-
-- (void)runTestThenCloseAlert:(NSAlert*)alert {
-  EXPECT_TRUE([NSApp modalWindow]);
-  {
-    base::ScopedPumpMessagesInPrivateModes allow_private;
-    // With a modal window, only safe modes should be pumped.
-    EXPECT_EQ(kNSApplicationModalSafeModeMask,
-              allow_private.GetModeMaskForTest());
-  }
-  [[alert buttons][0] performClick:nil];
-}
-
-@end
diff --git a/base/message_loop/message_pump_unittest.cc b/base/message_loop/message_pump_unittest.cc
index 44cc1c1..11418ff 100644
--- a/base/message_loop/message_pump_unittest.cc
+++ b/base/message_loop/message_pump_unittest.cc
@@ -282,7 +282,8 @@
   // Return an immediate task with |yield_to_native| set.
   AddPreDoWorkExpectations(delegate);
   EXPECT_CALL(delegate, DoWork).WillOnce(Invoke([] {
-    return MessagePump::Delegate::NextWorkInfo{TimeTicks(), TimeTicks(),
+    return MessagePump::Delegate::NextWorkInfo{TimeTicks(), TimeDelta(),
+                                               TimeTicks(),
                                                /* yield_to_native = */ true};
   }));
   AddPostDoWorkExpectations(delegate);
@@ -292,8 +293,31 @@
   EXPECT_CALL(delegate, DoWork).WillOnce(Invoke([this] {
     message_pump_->Quit();
     auto now = TimeTicks::Now();
-    return MessagePump::Delegate::NextWorkInfo{now + Milliseconds(1), now,
-                                               true};
+    return MessagePump::Delegate::NextWorkInfo{now + Milliseconds(1),
+                                               TimeDelta(), now, true};
+  }));
+  EXPECT_CALL(delegate, DoIdleWork()).Times(AnyNumber());
+
+  message_pump_->ScheduleWork();
+  message_pump_->Run(&delegate);
+}
+
+TEST_P(MessagePumpTest, LeewaySmokeTest) {
+  // The handling of the "leeway" in the NextWorkInfo is only implemented on
+  // mac. However since we inject a fake one for testing this is hard to test.
+  // This test ensures that setting this boolean doesn't cause any MessagePump
+  // to explode.
+  testing::StrictMock<MockMessagePumpDelegate> delegate(GetParam());
+
+  testing::InSequence sequence;
+
+  AddPreDoWorkExpectations(delegate);
+  // Return a delayed task with |yield_to_native| set, and exit.
+  EXPECT_CALL(delegate, DoWork).WillOnce(Invoke([this] {
+    message_pump_->Quit();
+    auto now = TimeTicks::Now();
+    return MessagePump::Delegate::NextWorkInfo{now + Milliseconds(1),
+                                               Milliseconds(8), now};
   }));
   EXPECT_CALL(delegate, DoIdleWork()).Times(AnyNumber());
 
diff --git a/base/message_loop/message_pump_win.cc b/base/message_loop/message_pump_win.cc
index e6d84c4..645217f 100644
--- a/base/message_loop/message_pump_win.cc
+++ b/base/message_loop/message_pump_win.cc
@@ -9,6 +9,7 @@
 #include <type_traits>
 
 #include "base/auto_reset.h"
+#include "base/check.h"
 #include "base/debug/alias.h"
 #include "base/feature_list.h"
 #include "base/functional/bind.h"
@@ -50,7 +51,7 @@
 
   // A saturated_cast with an unsigned destination automatically clamps negative
   // values at zero.
-  static_assert(!std::is_signed<DWORD>::value, "DWORD is unexpectedly signed");
+  static_assert(!std::is_signed_v<DWORD>, "DWORD is unexpectedly signed");
   return saturated_cast<DWORD>(timeout_ms);
 }
 
@@ -90,7 +91,7 @@
 MessagePumpForUI::MessagePumpForUI() {
   bool succeeded = message_window_.Create(
       BindRepeating(&MessagePumpForUI::MessageCallback, Unretained(this)));
-  DCHECK(succeeded);
+  CHECK(succeeded);
 }
 
 MessagePumpForUI::~MessagePumpForUI() = default;
diff --git a/base/metrics/DEPS b/base/metrics/DEPS
deleted file mode 100644
index d1dc1b1..0000000
--- a/base/metrics/DEPS
+++ /dev/null
@@ -1,10 +0,0 @@
-specific_include_rules = {
-  "statistics_recorder.*": [
-    # Explicitly allow absl::Mutex for targetted use in StatisticsRecorder.
-    # Note: absl::Mutex is currently not generally allowed in Chromium (see
-    # src/DEPS) but this specific use has been explicitly discussed and agreed
-    # on [email protected] here:
-    # https://groups.google.com/a/chromium.org/g/cxx/c/bIlGr1URn8I/m/NbawW6rBDwAJ
-    '+third_party/abseil-cpp/absl/synchronization/mutex.h',
-  ]
-}
diff --git a/base/metrics/dummy_histogram.cc b/base/metrics/dummy_histogram.cc
index 1de4814..dd19905 100644
--- a/base/metrics/dummy_histogram.cc
+++ b/base/metrics/dummy_histogram.cc
@@ -56,6 +56,10 @@
   std::unique_ptr<SampleCountIterator> ExtractingIterator() override {
     return std::make_unique<DummySampleCountIterator>();
   }
+  bool IsDefinitelyEmpty() const override {
+    NOTREACHED();
+    return true;
+  }
   bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override {
     return true;
   }
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
index c65b7ba..c38e1d9 100644
--- a/base/metrics/field_trial.cc
+++ b/base/metrics/field_trial.cc
@@ -174,26 +174,6 @@
 }
 
 #if BUILDFLAG(USE_BLINK)
-#if BUILDFLAG(IS_POSIX)
-// Exits the process gracefully if the parent process is dead. We've seen cases
-// where the child will still be executing after its parent process has died.
-// In those cases, if we hit an error that would otherwise result in a CHECK,
-// this function can be used to exit gracefully instead of producing a crash
-// report. Note: This function calls Sleep() so should not be called in a code
-// path that wouldn't otherwise result in a CHECK().
-void ExitGracefullyIfParentProcessIsDead() {
-  // The parent process crash may not be visible immediately so loop for 100ms.
-  for (int i = 0; i < 100; i++) {
-    // If the parent process has died, getppid() will return 1, meaning we were
-    // orphaned and parented to init.
-    if (getppid() == 1) {
-      base::Process::TerminateCurrentProcessImmediately(0);
-    }
-    PlatformThread::Sleep(base::Milliseconds(1));
-  }
-}
-#endif  // BUILDFLAG(IS_POSIX)
-
 // Returns whether the operation succeeded.
 bool DeserializeGUIDFromStringPieces(StringPiece first,
                                      StringPiece second,
@@ -671,13 +651,6 @@
     std::string switch_value =
         cmd_line.GetSwitchValueASCII(switches::kFieldTrialHandle);
     bool result = CreateTrialsFromSwitchValue(switch_value, fd_key);
-#if BUILDFLAG(IS_POSIX)
-    if (!result) {
-      // This may be an error mapping the shared memory segment if the parent
-      // process just died. Exit gracefully in this case.
-      ExitGracefullyIfParentProcessIsDead();
-    }
-#endif  // BUILDFLAG(IS_POSIX)
     CHECK(result);
   }
 #endif  // BUILDFLAG(USE_BLINK)
@@ -1103,9 +1076,11 @@
   win::ScopedHandle scoped_handle(handle);
 #elif BUILDFLAG(IS_APPLE) && BUILDFLAG(USE_BLINK)
   auto* rendezvous = MachPortRendezvousClient::GetInstance();
-  if (!rendezvous)
-    return ReadOnlySharedMemoryRegion();
-  mac::ScopedMachSendRight scoped_handle = rendezvous->TakeSendRight(
+  if (!rendezvous) {
+    LOG(ERROR) << "Mach rendezvous failed, terminating process (parent died?)";
+    base::Process::TerminateCurrentProcessImmediately(0);
+  }
+  apple::ScopedMachSendRight scoped_handle = rendezvous->TakeSendRight(
       static_cast<MachPortsForRendezvous::key_type>(field_trial_handle));
   if (!scoped_handle.is_valid())
     return ReadOnlySharedMemoryRegion();
diff --git a/base/metrics/field_trial_params.cc b/base/metrics/field_trial_params.cc
index 5dfeaf3..37f90ef 100644
--- a/base/metrics/field_trial_params.cc
+++ b/base/metrics/field_trial_params.cc
@@ -31,7 +31,7 @@
                      const std::string& value_as_string,
                      const std::string& default_value_as_string) {
   UmaHistogramSparse("Variations.FieldTriamParamsLogInvalidValue",
-                     static_cast<int>(base::HashName(
+                     static_cast<int>(base::HashFieldTrialName(
                          FeatureList::GetFieldTrial(feature)->trial_name())));
   // To anyone noticing these crash dumps in the wild, these parameters come
   // from server-side experiment configuration. If you're seeing an increase it
diff --git a/base/metrics/field_trial_params.h b/base/metrics/field_trial_params.h
index e15465b..1cf2bf4 100644
--- a/base/metrics/field_trial_params.h
+++ b/base/metrics/field_trial_params.h
@@ -109,6 +109,15 @@
     const std::string& param_name,
     bool default_value);
 
+// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
+// string value into a base::TimeDelta and returns it, if successful. Otherwise,
+// it returns `default_value`. If the string value is not empty and the
+// conversion does not succeed, it produces a warning to LOG.
+BASE_EXPORT base::TimeDelta GetFieldTrialParamByFeatureAsTimeDelta(
+    const Feature& feature,
+    const std::string& param_name,
+    base::TimeDelta default_value);
+
 // Shared declaration for various FeatureParam<T> types.
 //
 // This template is defined for the following types T:
@@ -124,12 +133,12 @@
 //
 // Getting a param value from a FeatureParam<T> will have the same semantics as
 // GetFieldTrialParamValueByFeature(), see that function's comments for details.
-template <typename T, bool IsEnum = std::is_enum<T>::value>
+template <typename T, bool IsEnum = std::is_enum_v<T>>
 struct FeatureParam {
   // Prevent use of FeatureParam<> with unsupported types (e.g. void*). Uses T
   // in its definition so that evaluation is deferred until the template is
   // instantiated.
-  static_assert(!std::is_same<T, T>::value, "unsupported FeatureParam<> type");
+  static_assert(!std::is_same_v<T, T>, "unsupported FeatureParam<> type");
 };
 
 // Declares a string-valued parameter. Example:
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
index b3f5a53..9c3645d 100644
--- a/base/metrics/histogram.cc
+++ b/base/metrics/histogram.cc
@@ -738,7 +738,7 @@
   }
 
  private:
-  raw_ptr<const DescriptionPair> descriptions_;
+  raw_ptr<const DescriptionPair, AllowPtrArithmetic> descriptions_;
 };
 
 LinearHistogram::~LinearHistogram() = default;
diff --git a/base/metrics/histogram_functions.h b/base/metrics/histogram_functions.h
index 2717fbc..e46afc1 100644
--- a/base/metrics/histogram_functions.h
+++ b/base/metrics/histogram_functions.h
@@ -69,7 +69,7 @@
 // Keep them synchronized.
 template <typename T>
 void UmaHistogramEnumeration(const std::string& name, T sample) {
-  static_assert(std::is_enum<T>::value, "T is not an enum.");
+  static_assert(std::is_enum_v<T>, "T is not an enum.");
   // This also ensures that an enumeration that doesn't define kMaxValue fails
   // with a semi-useful error ("no member named 'kMaxValue' in ...").
   static_assert(static_cast<uintmax_t>(T::kMaxValue) <=
@@ -83,7 +83,7 @@
 
 template <typename T>
 void UmaHistogramEnumeration(const char* name, T sample) {
-  static_assert(std::is_enum<T>::value, "T is not an enum.");
+  static_assert(std::is_enum_v<T>, "T is not an enum.");
   // This also ensures that an enumeration that doesn't define kMaxValue fails
   // with a semi-useful error ("no member named 'kMaxValue' in ...").
   static_assert(static_cast<uintmax_t>(T::kMaxValue) <=
@@ -113,7 +113,7 @@
 // otherwise functionally equivalent to the above.
 template <typename T>
 void UmaHistogramEnumeration(const std::string& name, T sample, T enum_size) {
-  static_assert(std::is_enum<T>::value, "T is not an enum.");
+  static_assert(std::is_enum_v<T>, "T is not an enum.");
   DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
   DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
   return UmaHistogramExactLinear(name, static_cast<int>(sample),
@@ -122,7 +122,7 @@
 
 template <typename T>
 void UmaHistogramEnumeration(const char* name, T sample, T enum_size) {
-  static_assert(std::is_enum<T>::value, "T is not an enum.");
+  static_assert(std::is_enum_v<T>, "T is not an enum.");
   DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
   DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
   return UmaHistogramExactLinear(name, static_cast<int>(sample),
diff --git a/base/metrics/histogram_macros_internal.h b/base/metrics/histogram_macros_internal.h
index 5ec9643..91b0ff9 100644
--- a/base/metrics/histogram_macros_internal.h
+++ b/base/metrics/histogram_macros_internal.h
@@ -40,7 +40,7 @@
 template <typename Enum>
 struct EnumSizeTraits<
     Enum,
-    std::enable_if_t<std::is_enum<decltype(Enum::kMaxValue)>::value>> {
+    std::enable_if_t<std::is_enum_v<decltype(Enum::kMaxValue)>>> {
   static constexpr Enum Count() {
     // If you're getting
     //   note: integer value X is outside the valid range of values [0, X] for
@@ -141,9 +141,9 @@
 #define INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(name, sample, boundary,  \
                                                   flag)                    \
   do {                                                                     \
-    static_assert(!std::is_enum<std::decay_t<decltype(sample)>>::value,    \
+    static_assert(!std::is_enum_v<std::decay_t<decltype(sample)>>,         \
                   "|sample| should not be an enum type!");                 \
-    static_assert(!std::is_enum<std::decay_t<decltype(boundary)>>::value,  \
+    static_assert(!std::is_enum_v<std::decay_t<decltype(boundary)>>,       \
                   "|boundary| should not be an enum type!");               \
     STATIC_HISTOGRAM_POINTER_BLOCK(                                        \
         name, Add(sample),                                                 \
@@ -157,9 +157,9 @@
 #define INTERNAL_HISTOGRAM_SCALED_EXACT_LINEAR_WITH_FLAG(                      \
     name, sample, count, boundary, scale, flag)                                \
   do {                                                                         \
-    static_assert(!std::is_enum<std::decay_t<decltype(sample)>>::value,        \
+    static_assert(!std::is_enum_v<std::decay_t<decltype(sample)>>,             \
                   "|sample| should not be an enum type!");                     \
-    static_assert(!std::is_enum<std::decay_t<decltype(boundary)>>::value,      \
+    static_assert(!std::is_enum_v<std::decay_t<decltype(boundary)>>,           \
                   "|boundary| should not be an enum type!");                   \
     class ScaledLinearHistogramInstance : public base::ScaledLinearHistogram { \
      public:                                                                   \
@@ -206,12 +206,12 @@
   do {                                                                         \
     using decayed_sample = std::decay<decltype(sample)>::type;                 \
     using decayed_boundary = std::decay<decltype(boundary)>::type;             \
-    static_assert(!std::is_enum<decayed_boundary>::value ||                    \
-                      std::is_enum<decayed_sample>::value,                     \
-                  "Unexpected: |boundary| is enum, but |sample| is not.");     \
-    static_assert(!std::is_enum<decayed_sample>::value ||                      \
-                      !std::is_enum<decayed_boundary>::value ||                \
-                      std::is_same<decayed_sample, decayed_boundary>::value,   \
+    static_assert(                                                             \
+        !std::is_enum_v<decayed_boundary> || std::is_enum_v<decayed_sample>,   \
+        "Unexpected: |boundary| is enum, but |sample| is not.");               \
+    static_assert(!std::is_enum_v<decayed_sample> ||                           \
+                      !std::is_enum_v<decayed_boundary> ||                     \
+                      std::is_same_v<decayed_sample, decayed_boundary>,        \
                   "|sample| and |boundary| shouldn't be of different enums");  \
     static_assert(                                                             \
         static_cast<uintmax_t>(boundary) <                                     \
@@ -227,7 +227,7 @@
                                                         scale, flag)         \
   do {                                                                       \
     using decayed_sample = std::decay<decltype(sample)>::type;               \
-    static_assert(std::is_enum<decayed_sample>::value,                       \
+    static_assert(std::is_enum_v<decayed_sample>,                            \
                   "Unexpected: |sample| is not at enum.");                   \
     constexpr auto boundary = base::internal::EnumSizeTraits<                \
         std::decay_t<decltype(sample)>>::Count();                            \
diff --git a/base/metrics/histogram_samples.cc b/base/metrics/histogram_samples.cc
index 58069b3..76aa633 100644
--- a/base/metrics/histogram_samples.cc
+++ b/base/metrics/histogram_samples.cc
@@ -293,6 +293,10 @@
   DCHECK(success);
 }
 
+bool HistogramSamples::IsDefinitelyEmpty() const {
+  return sum() == 0 && redundant_count() == 0;
+}
+
 void HistogramSamples::Serialize(Pickle* pickle) const {
   pickle->WriteInt64(sum());
   pickle->WriteInt(redundant_count());
diff --git a/base/metrics/histogram_samples.h b/base/metrics/histogram_samples.h
index 3fe2504..5b3496f 100644
--- a/base/metrics/histogram_samples.h
+++ b/base/metrics/histogram_samples.h
@@ -165,6 +165,22 @@
   // enforced by a DCHECK in the destructor).
   virtual std::unique_ptr<SampleCountIterator> ExtractingIterator() = 0;
 
+  // Returns true if |this| is empty (has no samples, has a |sum| of zero, and
+  // has a |redundant_count| of zero), which is indicative that the caller does
+  // not need to process |this|.
+  // - Note 1: This should only be called when |this| is only manipulated on one
+  // thread at a time (e.g., the underlying data does not change on another
+  // thread). If this is not the case, then the returned value cannot be trusted
+  // at all.
+  // - Note 2: For performance reasons, this is not guaranteed to return the
+  // correct value. If false is returned, |this| may or may not be empty.
+  // However, if true is returned, then |this| is guaranteed to be empty (no
+  // false positives). Of course, this assumes that "Note 1" is respected.
+  //  - Note 3: The base implementation of this method checks for |sum| and
+  // |redundant_count|, but the child implementations should also check for
+  // samples.
+  virtual bool IsDefinitelyEmpty() const;
+
   void Serialize(Pickle* pickle) const;
 
   // Returns ASCII representation of histograms data for histogram samples.
@@ -257,7 +273,7 @@
   //   external object. The callers guarantees the value will outlive this
   //   instance.
   std::unique_ptr<Metadata> meta_owned_;
-  raw_ptr<Metadata, LeakedDanglingUntriaged> meta_;
+  raw_ptr<Metadata> meta_;
 };
 
 class BASE_EXPORT SampleCountIterator {
diff --git a/base/metrics/histogram_threadsafe_unittest.cc b/base/metrics/histogram_threadsafe_unittest.cc
index 8e8ac76..85b7c48 100644
--- a/base/metrics/histogram_threadsafe_unittest.cc
+++ b/base/metrics/histogram_threadsafe_unittest.cc
@@ -175,7 +175,8 @@
         std::make_unique<PersistentMemoryAllocator>(
             /*base=*/const_cast<void*>(allocator->data()), allocator->size(),
             /*page_size=*/0, /*id=*/0,
-            /*name=*/"GlobalHistogramAllocatorView", /*readonly=*/false);
+            /*name=*/"GlobalHistogramAllocatorView",
+            PersistentMemoryAllocator::kReadWrite);
     allocator_view_ =
         std::make_unique<PersistentHistogramAllocator>(std::move(memory_view));
   }
@@ -281,6 +282,59 @@
     histograms_.emplace_back(std::move(subprocess_numeric_histogram));
     histograms_.emplace_back(std::move(subprocess_sparse_histogram));
 
+    // Lastly, again, create two additional *different* histogram objects that
+    // point to the same underlying data as the first two (|numeric_histogram|
+    // and |sparse_histogram|). Unlike above, this is not necessarily done to
+    // simulate subprocess histograms, but rather to verify that different
+    // histogram objects created through the *same* allocator work correctly
+    // together. In particular, the sparse histogram found here will use the
+    // same "data manager" (see base::PersistentSparseHistogramDataManager) as
+    // the original |sparse_histogram|. This is in contrast to the "subprocess"
+    // histograms above, which will use a different "data manager" since those
+    // histogram objects were created through a different allocator
+    // (allocator_view_). In production, this is what happens when we try to
+    // merge the histograms of a child process multiple times concurrently
+    // (e.g. while we are merging the histograms of a certain child process in
+    // the background, the browser is backgrounded, triggering another merge but
+    // on the main thread).
+    PersistentHistogramAllocator::Iterator hist_it2(
+        GlobalHistogramAllocator::Get());
+    std::unique_ptr<HistogramBase> numeric_histogram2;
+    std::unique_ptr<HistogramBase> sparse_histogram2;
+    while (true) {
+      // GetNext() creates a new histogram instance that points to the same
+      // underlying data as the histogram the iterator is pointing to.
+      std::unique_ptr<HistogramBase> histogram = hist_it2.GetNext();
+      if (!histogram) {
+        break;
+      }
+
+      // Make sure the "local heap" histograms are not in persistent memory.
+      EXPECT_NE(local_heap_histogram_name, histogram->histogram_name());
+      EXPECT_NE(local_heap_sparse_histogram_name, histogram->histogram_name());
+
+      if (histogram->histogram_name() == numeric_histogram_name) {
+        numeric_histogram2 = std::move(histogram);
+      } else if (histogram->histogram_name() == sparse_histogram_name) {
+        sparse_histogram2 = std::move(histogram);
+      }
+    }
+    // Make sure we found the histograms, and ensure that they are not the same
+    // histogram objects. Assertions to verify that they are actually pointing
+    // to the same underlying data are not done now (to not mess up the sample
+    // counts).
+    EXPECT_TRUE(numeric_histogram2);
+    EXPECT_TRUE(sparse_histogram2);
+    histograms.push_back(numeric_histogram2.get());
+    histograms.push_back(sparse_histogram2.get());
+    EXPECT_NE(numeric_histogram, numeric_histogram2.get());
+    EXPECT_NE(sparse_histogram, sparse_histogram2.get());
+
+    // Store the histograms in |histograms_| so that they are not freed during
+    // the test.
+    histograms_.emplace_back(std::move(numeric_histogram2));
+    histograms_.emplace_back(std::move(sparse_histogram2));
+
     return histograms;
   }
 
@@ -367,22 +421,24 @@
     HistogramBase::Count logged_total_samples_count = 0;
     std::vector<HistogramBase::Count> logged_bucket_counts(
         /*value=*/kHistogramMax, 0);
-    // We ignore the last two histograms since they are the same as the first
+    // We ignore the last four histograms since they are the same as the first
     // two (they are simulations of histogram instances from a subprocess that
-    // point to the same underlying data). Otherwise, we will be counting the
-    // samples from those histograms twice.
-    for (size_t i = 0; i < histograms.size() - 2; ++i) {
+    // point to the same underlying data, and different histogram instances that
+    // are created from the same allocator). Otherwise, we will be counting the
+    // samples from those histograms thrice.
+    for (size_t i = 0; i < histograms.size() - 4; ++i) {
       HistogramBase* histogram = histograms[i];
       ASSERT_EQ(histogram->SnapshotDelta()->TotalCount(), 0);
       std::unique_ptr<HistogramSamples> logged_samples =
           histogram->SnapshotSamples();
       // Each individual histograms should have been emitted to a specific
-      // amount of times. Non-"local heap" histograms were emitted to twice as
-      // much because they appeared twice in the |histograms| array -- once as a
-      // normal histogram, and once as a simulation of a subprocess histogram.
+      // amount of times. Non-"local heap" histograms were emitted to thrice as
+      // much because they appeared thrice in the |histograms| array -- once as
+      // a normal histogram, once as a simulation of a subprocess histogram, and
+      // once as a duplicate histogram created from the same allocator.
       size_t expected_logged_samples_count = kNumThreads * kNumEmissions;
       if (!strstr(histogram->histogram_name(), "LocalHeap")) {
-        expected_logged_samples_count *= 2;
+        expected_logged_samples_count *= 3;
       }
       ASSERT_EQ(static_cast<size_t>(logged_samples->TotalCount()),
                 expected_logged_samples_count);
@@ -402,10 +458,10 @@
       ASSERT_EQ(logged_bucket_counts[i], real_bucket_counts[i]);
     }
 
-    // Finally, verify that our "subprocess histograms" actually point to the
-    // same underlying data as the "main browser" histograms, despite being
-    // different instances (this was verified earlier). This is done at the end
-    // of the test so as to not mess up the sample counts.
+    // Verify that our "subprocess histograms" actually point to the same
+    // underlying data as the "main browser" histograms, despite being different
+    // instances (this was verified earlier). This is done at the end of the
+    // test so as to not mess up the sample counts.
     HistogramBase* numeric_histogram = histograms[0];
     HistogramBase* subprocess_numeric_histogram = histograms[4];
     HistogramBase* sparse_histogram = histograms[1];
@@ -418,6 +474,21 @@
     ASSERT_EQ(subprocess_sparse_histogram->SnapshotDelta()->TotalCount(), 1);
     ASSERT_EQ(numeric_histogram->SnapshotDelta()->TotalCount(), 0);
     ASSERT_EQ(sparse_histogram->SnapshotDelta()->TotalCount(), 0);
+
+    // Verify that our "duplicate histograms" created from the same allocator
+    // actually point to the same underlying data as the "main" histograms,
+    // despite being different instances (this was verified earlier). This is
+    // done at the end of the test so as to not mess up the sample counts.
+    HistogramBase* numeric_histogram2 = histograms[6];
+    HistogramBase* sparse_histogram2 = histograms[7];
+    ASSERT_EQ(numeric_histogram2->SnapshotDelta()->TotalCount(), 0);
+    ASSERT_EQ(sparse_histogram2->SnapshotDelta()->TotalCount(), 0);
+    numeric_histogram->Add(0);
+    sparse_histogram->Add(0);
+    ASSERT_EQ(numeric_histogram2->SnapshotDelta()->TotalCount(), 1);
+    ASSERT_EQ(sparse_histogram2->SnapshotDelta()->TotalCount(), 1);
+    ASSERT_EQ(numeric_histogram->SnapshotDelta()->TotalCount(), 0);
+    ASSERT_EQ(sparse_histogram->SnapshotDelta()->TotalCount(), 0);
   }
 }
 
diff --git a/base/metrics/histogram_unittest.cc b/base/metrics/histogram_unittest.cc
index 626909f..5cde264 100644
--- a/base/metrics/histogram_unittest.cc
+++ b/base/metrics/histogram_unittest.cc
@@ -307,6 +307,29 @@
   EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
 }
 
+// Check that IsDefinitelyEmpty() works with the results of SnapshotDelta().
+TEST_P(HistogramTest, IsDefinitelyEmpty_SnapshotDelta) {
+  HistogramBase* histogram = Histogram::FactoryGet("DeltaHistogram", 1, 64, 8,
+                                                   HistogramBase::kNoFlags);
+  // No samples initially.
+  EXPECT_TRUE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+
+  // Verify when |histogram| is using SingleSample.
+  histogram->Add(1);
+  EXPECT_FALSE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+  EXPECT_TRUE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+  histogram->Add(10);
+  histogram->Add(10);
+  EXPECT_FALSE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+  EXPECT_TRUE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+
+  // Verify when |histogram| uses a counts array instead of SingleSample.
+  histogram->Add(1);
+  histogram->Add(50);
+  EXPECT_FALSE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+  EXPECT_TRUE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+}
+
 TEST_P(HistogramTest, ExponentialRangesTest) {
   // Check that we got a nice exponential when there was enough room.
   BucketRanges ranges(9);
diff --git a/base/metrics/histogram_unittest.nc b/base/metrics/histogram_unittest.nc
index c677106..5b5acad 100644
--- a/base/metrics/histogram_unittest.nc
+++ b/base/metrics/histogram_unittest.nc
@@ -67,7 +67,7 @@
   UmaHistogramEnumeration("", NoMaxValue::kMoo);
 }
 
-#elif defined(NCTEST_FUNCTION_INT_AS_ENUM)  // [r"static assertion failed due to requirement 'std::is_enum<int>::value'"]
+#elif defined(NCTEST_FUNCTION_INT_AS_ENUM)  // [r"static assertion failed due to requirement 'std::is_enum_v<int>'"]
 
 void WontCompile() {
   UmaHistogramEnumeration("", 1, 2);
diff --git a/base/metrics/metrics_hashes.cc b/base/metrics/metrics_hashes.cc
index 82726bd..0632fd2 100644
--- a/base/metrics/metrics_hashes.cc
+++ b/base/metrics/metrics_hashes.cc
@@ -50,7 +50,7 @@
   return DigestToUInt32(digest);
 }
 
-uint32_t HashName(base::StringPiece name) {
+uint32_t HashFieldTrialName(base::StringPiece name) {
   // SHA-1 is designed to produce a uniformly random spread in its output space,
   // even for nearly-identical inputs.
   unsigned char sha1_hash[base::kSHA1Length];
diff --git a/base/metrics/metrics_hashes.h b/base/metrics/metrics_hashes.h
index fb0433b..4977461 100644
--- a/base/metrics/metrics_hashes.h
+++ b/base/metrics/metrics_hashes.h
@@ -22,7 +22,7 @@
 
 // Computes a uint32_t hash of a given string based on its SHA1 hash. Suitable
 // for uniquely identifying field trial names and group names.
-BASE_EXPORT uint32_t HashName(base::StringPiece name);
+BASE_EXPORT uint32_t HashFieldTrialName(base::StringPiece name);
 
 }  // namespace base
 
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
index dbe9df0..a3c8a1c 100644
--- a/base/metrics/persistent_histogram_allocator.cc
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -15,7 +15,6 @@
 #include "base/files/memory_mapped_file.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/memory/ptr_util.h"
 #include "base/memory/shared_memory_mapping.h"
 #include "base/memory/writable_shared_memory_region.h"
 #include "base/metrics/histogram.h"
@@ -26,10 +25,8 @@
 #include "base/metrics/sparse_histogram.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/notreached.h"
-#include "base/numerics/safe_conversions.h"
 #include "base/pickle.h"
 #include "base/process/process_handle.h"
-#include "base/strings/strcat.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/string_split.h"
@@ -100,6 +97,62 @@
   return bucket_count * kBytesPerBucket;
 }
 
+void MergeSamplesToExistingHistogram(
+    HistogramBase* existing,
+    const HistogramBase* histogram,
+    std::unique_ptr<HistogramSamples> samples) {
+#if !BUILDFLAG(IS_NACL)
+  // If the passed |histogram| does not match with |existing| (i.e. the one
+  // registered with the global StatisticsRecorder) due to not being the same
+  // type of histogram or due to specifying different buckets, then unexpected
+  // things may happen further down the line. This may be indicative that a
+  // child process is emitting a histogram with different parameters than the
+  // browser process, for example.
+  // TODO(crbug/1432981): Remove this. Used to investigate failures when merging
+  // histograms from an allocator to the global StatisticsRecorder.
+  bool histograms_match = true;
+  HistogramType existing_type = existing->GetHistogramType();
+  if (histogram->GetHistogramType() != existing_type) {
+    // Different histogram types.
+    histograms_match = false;
+  } else if (existing_type == HistogramType::HISTOGRAM ||
+             existing_type == HistogramType::LINEAR_HISTOGRAM ||
+             existing_type == HistogramType::BOOLEAN_HISTOGRAM ||
+             existing_type == HistogramType::CUSTOM_HISTOGRAM) {
+    // Only numeric histograms make use of BucketRanges.
+    const BucketRanges* existing_buckets =
+        static_cast<const Histogram*>(existing)->bucket_ranges();
+    const BucketRanges* histogram_buckets =
+        static_cast<const Histogram*>(histogram)->bucket_ranges();
+    // DCHECK because HasValidChecksum() recomputes the checksum which can be
+    // expensive to do in a loop.
+    DCHECK(existing_buckets->HasValidChecksum() &&
+           histogram_buckets->HasValidChecksum());
+
+    if (existing_buckets->checksum() != histogram_buckets->checksum()) {
+      // Different buckets.
+      histograms_match = false;
+    }
+  }
+
+  if (!histograms_match) {
+    // If the histograms do not match, then the call to AddSamples() below might
+    // trigger a NOTREACHED(). Include the histogram name here for debugging
+    // purposes. This is not done in GetOrCreateStatisticsRecorderHistogram()
+    // directly, since that could incorrectly create crash reports for enum
+    // histograms that have newly appended entries (different bucket max and
+    // count).
+    SCOPED_CRASH_KEY_STRING256("PersistentHistogramAllocator", "histogram",
+                               existing->histogram_name());
+    existing->AddSamples(*samples);
+    return;
+  }
+#endif  // !BUILDFLAG(IS_NACL)
+
+  // Merge the delta from the passed object to the one in the SR.
+  existing->AddSamples(*samples);
+}
+
 }  // namespace
 
 PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
@@ -109,116 +162,108 @@
 PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() =
     default;
 
-PersistentSampleMapRecords*
-PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
-                                                          const void* user) {
+std::unique_ptr<PersistentSampleMapRecords>
+PersistentSparseHistogramDataManager::CreateSampleMapRecords(uint64_t id) {
   base::AutoLock auto_lock(lock_);
-  return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
+  return std::make_unique<PersistentSampleMapRecords>(
+      this, id, GetSampleMapRecordsWhileLocked(id));
 }
 
-PersistentSampleMapRecords*
+std::vector<PersistentSparseHistogramDataManager::ReferenceAndSample>*
 PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
     uint64_t id) {
-  auto found = sample_records_.find(id);
-  if (found != sample_records_.end())
-    return found->second.get();
-
-  std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
-  samples = std::make_unique<PersistentSampleMapRecords>(this, id);
-  return samples.get();
+  auto* samples = &sample_records_[id];
+  if (!samples->get()) {
+    *samples = std::make_unique<std::vector<ReferenceAndSample>>();
+  }
+  return samples->get();
 }
 
-bool PersistentSparseHistogramDataManager::LoadRecords(
-    PersistentSampleMapRecords* sample_map_records) {
-  // DataManager must be locked in order to access the found_ field of any
-  // PersistentSampleMapRecords object.
+std::vector<PersistentMemoryAllocator::Reference>
+PersistentSparseHistogramDataManager::LoadRecords(
+    PersistentSampleMapRecords* sample_map_records,
+    absl::optional<HistogramBase::Sample> until_value) {
+  // DataManager must be locked in order to access the |sample_records_|
+  // vectors.
   base::AutoLock auto_lock(lock_);
-  bool found = false;
-
-  // If there are already "found" entries for the passed object, move them.
-  if (!sample_map_records->found_.empty()) {
-    sample_map_records->records_.reserve(sample_map_records->records_.size() +
-                                         sample_map_records->found_.size());
-    sample_map_records->records_.insert(sample_map_records->records_.end(),
-                                        sample_map_records->found_.begin(),
-                                        sample_map_records->found_.end());
-    sample_map_records->found_.clear();
-    found = true;
-  }
 
   // Acquiring a lock is a semi-expensive operation so load some records with
   // each call. More than this number may be loaded if it takes longer to
   // find at least one matching record for the passed object.
-  const int kMinimumNumberToLoad = 10;
+  const size_t kMinimumNumberToLoad = 10;
   const uint64_t match_id = sample_map_records->sample_map_id_;
 
-  // Loop while no enty is found OR we haven't yet loaded the minimum number.
-  // This will continue reading even after a match is found.
-  for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
+  // Loop while no entry is found OR we haven't yet loaded the minimum number.
+  // This will continue reading even after a match is found. Note that it is
+  // possible that entries for the passed object were already found in a
+  // different call.
+  auto& found_records = *sample_map_records->records_;
+  bool found = (found_records.size() > sample_map_records->seen_);
+  size_t new_records = 0;
+  while (!found || new_records < kMinimumNumberToLoad) {
     // Get the next sample-record. The iterator will always resume from where
     // it left off even if it previously had nothing further to return.
     uint64_t found_id;
+    HistogramBase::Sample value;
     PersistentMemoryAllocator::Reference ref =
         PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
-                                                     &found_id);
+                                                     &found_id, &value);
 
     // Stop immediately if there are none.
-    if (!ref)
+    if (!ref) {
       break;
+    }
+    ++new_records;
 
     // The sample-record could be for any sparse histogram. Add the reference
     // to the appropriate collection for later use.
     if (found_id == match_id) {
-      sample_map_records->records_.push_back(ref);
+      found_records.emplace_back(ref, value);
       found = true;
     } else {
-      PersistentSampleMapRecords* samples =
+      std::vector<ReferenceAndSample>* samples =
           GetSampleMapRecordsWhileLocked(found_id);
-      DCHECK(samples);
-      samples->found_.push_back(ref);
+      CHECK(samples);
+      samples->emplace_back(ref, value);
     }
   }
 
-  return found;
+  // Return all references found that have not yet been seen by
+  // |sample_map_records|, up until |until_value| (if applicable).
+  std::vector<PersistentMemoryAllocator::Reference> new_references;
+  CHECK_GE(found_records.size(), sample_map_records->seen_);
+  auto new_found_records = base::make_span(found_records)
+                               .subspan(/*offset=*/sample_map_records->seen_);
+  new_references.reserve(new_found_records.size());
+  for (const auto& new_record : new_found_records) {
+    new_references.push_back(new_record.reference);
+    // Maybe references after |until_value| were found. Stop here immediately in
+    // such a case, since the caller will not expect any more samples after
+    // |until_value|.
+    if (until_value.has_value() && new_record.value == until_value.value()) {
+      break;
+    }
+  }
+  return new_references;
 }
 
-
 PersistentSampleMapRecords::PersistentSampleMapRecords(
     PersistentSparseHistogramDataManager* data_manager,
-    uint64_t sample_map_id)
-    : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
+    uint64_t sample_map_id,
+    std::vector<PersistentSparseHistogramDataManager::ReferenceAndSample>*
+        records)
+    : data_manager_(data_manager),
+      sample_map_id_(sample_map_id),
+      records_(records) {}
 
 PersistentSampleMapRecords::~PersistentSampleMapRecords() = default;
 
-PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
-    const void* user) {
-  DCHECK(!user_);
-  user_ = user;
-  seen_ = 0;
-  return this;
-}
-
-void PersistentSampleMapRecords::Release(const void* user) {
-  DCHECK_EQ(user_, user);
-  user_ = nullptr;
-}
-
-PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
-  DCHECK(user_);
-
-  // If there are no unseen records, lock and swap in all the found ones.
-  if (records_.size() == seen_) {
-    if (!data_manager_->LoadRecords(this))
-      return false;
-  }
-
-  // Return the next record. Records *must* be returned in the same order
-  // they are found in the persistent memory in order to ensure that all
-  // objects using this data always have the same state. Race conditions
-  // can cause duplicate records so using the "first found" is the only
-  // guarantee that all objects always access the same one.
-  DCHECK_LT(seen_, records_.size());
-  return records_[seen_++];
+std::vector<PersistentMemoryAllocator::Reference>
+PersistentSampleMapRecords::GetNextRecords(
+    absl::optional<HistogramBase::Sample> until_value) {
+  auto references = data_manager_->LoadRecords(this, until_value);
+  seen_ += references.size();
+  return references;
 }
 
 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
@@ -455,52 +500,14 @@
     HistogramBase* histogram) {
   DCHECK(histogram);
 
-  HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
-  if (!existing) {
-    // The above should never fail but if it does, no real harm is done.
-    // The data won't be merged but it also won't be recorded as merged
-    // so a future try, if successful, will get what was missed. If it
-    // continues to fail, some metric data will be lost but that is better
-    // than crashing.
+  // Return immediately if the histogram has no samples since the last delta
+  // snapshot. This is to prevent looking up or registering the histogram with
+  // the StatisticsRecorder, which requires acquiring a lock.
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+  if (samples->IsDefinitelyEmpty()) {
     return;
   }
 
-  // TODO(crbug/1432981): Remove this. Used to investigate unexpected failures.
-  HistogramType type = existing->GetHistogramType();
-  if ((type == HistogramType::HISTOGRAM ||
-       type == HistogramType::LINEAR_HISTOGRAM ||
-       type == HistogramType::BOOLEAN_HISTOGRAM ||
-       type == HistogramType::CUSTOM_HISTOGRAM) &&
-      histogram->GetHistogramType() == type) {
-    const BucketRanges* existing_buckets =
-        static_cast<Histogram*>(existing)->bucket_ranges();
-    const BucketRanges* histogram_buckets =
-        static_cast<Histogram*>(histogram)->bucket_ranges();
-    DCHECK(existing_buckets->HasValidChecksum() &&
-           histogram_buckets->HasValidChecksum());
-
-    // If the buckets do not match, then the call to AddSamples() below should
-    // trigger a NOTREACHED(). This may be indicative that a child process is
-    // emitting a histogram with different parameters than the browser
-    // process, for example.
-    if (!existing_buckets->Equals(histogram_buckets)) {
-#if !BUILDFLAG(IS_NACL)
-      SCOPED_CRASH_KEY_STRING256("PersistentHistogramAllocator", "histogram",
-                                 existing->histogram_name());
-#endif  // !BUILDFLAG(IS_NACL)
-      existing->AddSamples(*histogram->SnapshotDelta());
-      return;
-    }
-  }
-
-  // Merge the delta from the passed object to the one in the SR.
-  existing->AddSamples(*histogram->SnapshotDelta());
-}
-
-void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
-    const HistogramBase* histogram) {
-  DCHECK(histogram);
-
   HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
   if (!existing) {
     // The above should never fail but if it does, no real harm is done.
@@ -508,14 +515,34 @@
     return;
   }
 
-  // Merge the delta from the passed object to the one in the SR.
-  existing->AddSamples(*histogram->SnapshotFinalDelta());
+  MergeSamplesToExistingHistogram(existing, histogram, std::move(samples));
 }
 
-PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
-    uint64_t id,
-    const void* user) {
-  return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
+void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
+    const HistogramBase* histogram) {
+  DCHECK(histogram);
+
+  // Return immediately if the histogram has no samples. This is to prevent
+  // looking up or registering the histogram with the StatisticsRecorder, which
+  // requires acquiring a lock.
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotFinalDelta();
+  if (samples->IsDefinitelyEmpty()) {
+    return;
+  }
+
+  HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
+  if (!existing) {
+    // The above should never fail but if it does, no real harm is done.
+    // Some metric data will be lost but that is better than crashing.
+    return;
+  }
+
+  MergeSamplesToExistingHistogram(existing, histogram, std::move(samples));
+}
+
+std::unique_ptr<PersistentSampleMapRecords>
+PersistentHistogramAllocator::CreateSampleMapRecords(uint64_t id) {
+  return sparse_histogram_data_manager_.CreateSampleMapRecords(id);
 }
 
 void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
@@ -591,8 +618,12 @@
             histogram_maximum);
   const BucketRanges* ranges;
   if (ranges_manager_) {
-    ranges = ranges_manager_->RegisterOrDeleteDuplicateRanges(
-        created_ranges.release());
+    ranges =
+        ranges_manager_->GetOrRegisterCanonicalRanges(created_ranges.get());
+    if (ranges == created_ranges.get()) {
+      // `ranges_manager_` took ownership of `created_ranges`.
+      created_ranges.release();
+    }
   } else {
     ranges = StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
         created_ranges.release());
@@ -679,8 +710,9 @@
 
   HistogramBase* existing =
       StatisticsRecorder::FindHistogram(histogram->histogram_name());
-  if (existing)
+  if (existing) {
     return existing;
+  }
 
   // Adding the passed histogram to the SR would cause a problem if the
   // allocator that holds it eventually goes away. Instead, create a new
@@ -700,7 +732,11 @@
   return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
 }
 
-GlobalHistogramAllocator::~GlobalHistogramAllocator() = default;
+GlobalHistogramAllocator::~GlobalHistogramAllocator() {
+  // GlobalHistogramAllocator should never be destroyed because Histogram
+  // objects may keep pointers to its memory.
+  NOTREACHED();
+}
 
 // static
 void GlobalHistogramAllocator::CreateWithPersistentMemory(
@@ -709,9 +745,8 @@
     size_t page_size,
     uint64_t id,
     StringPiece name) {
-  Set(WrapUnique(
-      new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>(
-          base, size, page_size, id, name, false))));
+  Set(new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>(
+      base, size, page_size, id, name, PersistentMemoryAllocator::kReadWrite)));
 }
 
 // static
@@ -719,8 +754,8 @@
     size_t size,
     uint64_t id,
     StringPiece name) {
-  Set(WrapUnique(new GlobalHistogramAllocator(
-      std::make_unique<LocalPersistentMemoryAllocator>(size, id, name))));
+  Set(new GlobalHistogramAllocator(
+      std::make_unique<LocalPersistentMemoryAllocator>(size, id, name)));
 }
 
 #if !BUILDFLAG(IS_NACL)
@@ -740,7 +775,8 @@
 
   std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
   bool success = false;
-  if (file.created()) {
+  const bool file_created = file.created();
+  if (file_created) {
     success = mmfile->Initialize(std::move(file), {0, size},
                                  MemoryMappedFile::READ_WRITE_EXTEND);
   } else {
@@ -748,12 +784,19 @@
   }
   if (!success ||
       !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
+    if (file_created) {
+      // If we created the file, but it couldn't be used, delete it.
+      // This could happen if we were able to create a file of all-zeroes, but
+      // couldn't write to it due to lack of disk space.
+      base::DeleteFile(file_path);
+    }
     return false;
   }
 
-  Set(WrapUnique(new GlobalHistogramAllocator(
-      std::make_unique<FilePersistentMemoryAllocator>(std::move(mmfile), 0, id,
-                                                      name, false))));
+  Set(new GlobalHistogramAllocator(
+      std::make_unique<FilePersistentMemoryAllocator>(
+          std::move(mmfile), 0, id, name,
+          PersistentMemoryAllocator::kReadWrite)));
   Get()->SetPersistentLocation(file_path);
   return true;
 }
@@ -897,20 +940,19 @@
     return;
   }
 
-  Set(WrapUnique(new GlobalHistogramAllocator(
+  Set(new GlobalHistogramAllocator(
       std::make_unique<WritableSharedPersistentMemoryAllocator>(
-          std::move(mapping), 0, StringPiece()))));
+          std::move(mapping), 0, StringPiece())));
 }
 
 // static
-void GlobalHistogramAllocator::Set(
-    std::unique_ptr<GlobalHistogramAllocator> allocator) {
+void GlobalHistogramAllocator::Set(GlobalHistogramAllocator* allocator) {
   // Releasing or changing an allocator is extremely dangerous because it
   // likely has histograms stored within it. If the backing memory is also
   // also released, future accesses to those histograms will seg-fault.
   CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
   subtle::Release_Store(&g_histogram_allocator,
-                        reinterpret_cast<intptr_t>(allocator.release()));
+                        reinterpret_cast<intptr_t>(allocator));
   size_t existing = StatisticsRecorder::GetHistogramCount();
 
   DVLOG_IF(1, existing)
@@ -924,8 +966,7 @@
 }
 
 // static
-std::unique_ptr<GlobalHistogramAllocator>
-GlobalHistogramAllocator::ReleaseForTesting() {
+GlobalHistogramAllocator* GlobalHistogramAllocator::ReleaseForTesting() {
   GlobalHistogramAllocator* histogram_allocator = Get();
   if (!histogram_allocator)
     return nullptr;
@@ -942,7 +983,8 @@
   }
 
   subtle::Release_Store(&g_histogram_allocator, 0);
-  return WrapUnique(histogram_allocator);
+  ANNOTATE_LEAKING_OBJECT_PTR(histogram_allocator);
+  return histogram_allocator;
 }
 
 void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
index 5c30fb5..fdf136f 100644
--- a/base/metrics/persistent_histogram_allocator.h
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -20,6 +20,7 @@
 #include "base/strings/string_piece.h"
 #include "base/synchronization/lock.h"
 #include "build/build_config.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
 
 namespace base {
 
@@ -30,13 +31,7 @@
 class WritableSharedMemoryRegion;
 
 // A data manager for sparse histograms so each instance of such doesn't have
-// to separately iterate over the entire memory segment. Though this class
-// will generally be accessed through the PersistentHistogramAllocator above,
-// it can be used independently on any PersistentMemoryAllocator (making it
-// useable for testing). This object supports only one instance of a sparse
-// histogram for a given id. Tests that create multiple identical histograms,
-// perhaps to simulate multiple processes, should create a separate manager
-// for each.
+// to separately iterate over the entire memory segment.
 class BASE_EXPORT PersistentSparseHistogramDataManager {
  public:
   // Constructs the data manager. The allocator must live longer than any
@@ -51,13 +46,11 @@
 
   ~PersistentSparseHistogramDataManager();
 
-  // Returns the object that manages the persistent-sample-map records for a
-  // given |id|. Only one |user| of this data is allowed at a time. This does
-  // an automatic Acquire() on the records. The user must call Release() on
-  // the returned object when it is finished with it. Ownership of the records
-  // object stays with this manager.
-  PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
-                                                  const void* user);
+  // Returns an object that manages persistent-sample-map records for a given
+  // |id|. The returned object queries |this| for records. Hence, the returned
+  // object must not outlive |this|.
+  std::unique_ptr<PersistentSampleMapRecords> CreateSampleMapRecords(
+      uint64_t id);
 
   // Convenience method that gets the object for a given reference so callers
   // don't have to also keep their own pointer to the appropriate allocator.
@@ -69,17 +62,31 @@
  private:
   friend class PersistentSampleMapRecords;
 
-  // Gets the object holding records for a given sample-map id.
-  PersistentSampleMapRecords* GetSampleMapRecordsWhileLocked(uint64_t id)
+  struct ReferenceAndSample {
+    PersistentMemoryAllocator::Reference reference;
+    HistogramBase::Sample value;
+  };
+
+  // Gets the vector holding records for a given sample-map id.
+  std::vector<ReferenceAndSample>* GetSampleMapRecordsWhileLocked(uint64_t id)
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
-  // Loads sample-map records looking for those belonging to the specified
-  // |load_id|. Records found for other sample-maps are held for later use
-  // without having to iterate again. This should be called only from a
-  // PersistentSampleMapRecords object because those objects have a contract
-  // that there are no other threads accessing the internal records_ field
-  // of the object that is passed in.
-  bool LoadRecords(PersistentSampleMapRecords* sample_map_records);
+  // Returns sample-map records belonging to the specified |sample_map_records|.
+  // Only records found that were not yet seen by |sample_map_records| will be
+  // returned, determined by its |seen_| field. Records found for other
+  // sample-maps are held for later use without having to iterate again. This
+  // should be called only from a PersistentSampleMapRecords object because
+  // those objects have a contract that there are no other threads accessing the
+  // internal records_ field of the object that is passed in. If |until_value|
+  // is set and a sample is found with said value, the search will stop early
+  // and the last entry in the returned vector will be that sample.
+  // Note: The returned vector is not guaranteed to contain all unseen records
+  // for |sample_map_records|. If this is needed, then repeatedly call this
+  // until an empty vector is returned, which definitely means that
+  // |sample_map_records| has seen all its records.
+  std::vector<PersistentMemoryAllocator::Reference> LoadRecords(
+      PersistentSampleMapRecords* sample_map_records,
+      absl::optional<HistogramBase::Sample> until_value);
 
   // Weak-pointer to the allocator used by the sparse histograms.
   raw_ptr<PersistentMemoryAllocator> allocator_;
@@ -88,7 +95,7 @@
   PersistentMemoryAllocator::Iterator record_iterator_ GUARDED_BY(lock_);
 
   // Mapping of sample-map IDs to their sample records.
-  std::map<uint64_t, std::unique_ptr<PersistentSampleMapRecords>>
+  std::map<uint64_t, std::unique_ptr<std::vector<ReferenceAndSample>>>
       sample_records_ GUARDED_BY(lock_);
 
   base::Lock lock_;
@@ -105,9 +112,12 @@
   // Constructs an instance of this class. The manager object must live longer
   // than all instances of this class that reference it, which is not usually
   // a problem since these objects are generally managed from within that
-  // manager instance.
-  PersistentSampleMapRecords(PersistentSparseHistogramDataManager* data_manager,
-                             uint64_t sample_map_id);
+  // manager instance. The same caveats apply for for the |records| vector.
+  PersistentSampleMapRecords(
+      PersistentSparseHistogramDataManager* data_manager,
+      uint64_t sample_map_id,
+      std::vector<PersistentSparseHistogramDataManager::ReferenceAndSample>*
+          records);
 
   PersistentSampleMapRecords(const PersistentSampleMapRecords&) = delete;
   PersistentSampleMapRecords& operator=(const PersistentSampleMapRecords&) =
@@ -115,17 +125,16 @@
 
   ~PersistentSampleMapRecords();
 
-  // Resets the internal state for a new object using this data. The return
-  // value is "this" as a convenience.
-  PersistentSampleMapRecords* Acquire(const void* user);
-
-  // Indicates that the using object is done with this data.
-  void Release(const void* user);
-
-  // Gets the next reference to a persistent sample-map record. The type and
-  // layout of the data being referenced is defined entirely within the
-  // PersistentSampleMap class.
-  PersistentMemoryAllocator::Reference GetNext();
+  // Gets next references to persistent sample-map records. If |until_value| is
+  // passed, and said value is found, then it will be the last element in the
+  // returned vector. The type and layout of the data being referenced is
+  // defined entirely within the PersistentSampleMap class.
+  // Note: The returned vector is not guaranteed to contain all unseen records
+  // for |this|. If this is needed, then repeatedly call this until an empty
+  // vector is returned, which definitely means that |this| has seen all its
+  // records.
+  std::vector<PersistentMemoryAllocator::Reference> GetNextRecords(
+      absl::optional<HistogramBase::Sample> until_value);
 
   // Creates a new persistent sample-map record for sample |value| and returns
   // a reference to it.
@@ -151,25 +160,15 @@
   // ID of PersistentSampleMap to which these records apply.
   const uint64_t sample_map_id_;
 
-  // The current user of this set of records. It is used to ensure that no
-  // more than one object is using these records at a given time.
-  raw_ptr<const void> user_ = nullptr;
-
-  // This is the count of how many "records" have already been read by the
-  // owning sample-map.
+  // This is the count of how many "records" have already been read by |this|.
   size_t seen_ = 0;
 
-  // This is the set of records previously found for a sample map. Because
-  // there is ever only one object with a given ID (typically a hash of a
-  // histogram name) and because the parent SparseHistogram has acquired
-  // its own lock before accessing the PersistentSampleMap it controls, this
-  // list can be accessed without acquiring any additional lock.
-  std::vector<PersistentMemoryAllocator::Reference> records_;
-
-  // This is the set of records found during iteration through memory. It
-  // is appended in bulk to "records". Access to this vector can be done
-  // only while holding the parent manager's lock.
-  std::vector<PersistentMemoryAllocator::Reference> found_;
+  // This is the set of records found during iteration through memory, owned by
+  // the parent manager. When GetNextRecords() is called, this is looked up to
+  // find new references. Access to this vector should only be done while
+  // holding the parent manager's lock.
+  raw_ptr<std::vector<PersistentSparseHistogramDataManager::ReferenceAndSample>>
+      records_;
 };
 
 
@@ -245,7 +244,7 @@
   // This method will return null if any problem is detected with the data.
   std::unique_ptr<HistogramBase> GetHistogram(Reference ref);
 
-  // Allocate a new persistent histogram. The returned histogram will not
+  // Allocates a new persistent histogram. The returned histogram will not
   // be able to be located by other allocators until it is "finalized".
   std::unique_ptr<HistogramBase> AllocateHistogram(
       HistogramType histogram_type,
@@ -274,15 +273,14 @@
   void MergeHistogramFinalDeltaToStatisticsRecorder(
       const HistogramBase* histogram);
 
-  // Returns the object that manages the persistent-sample-map records for a
-  // given |id|. Only one |user| of this data is allowed at a time. This does
-  // an automatic Acquire() on the records. The user must call Release() on
-  // the returned object when it is finished with it. Ownership stays with
-  // this allocator.
-  PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
-                                                  const void* user);
+  // Returns an object that manages persistent-sample-map records for a given
+  // |id|. The returned object queries |sparse_histogram_data_manager_| for
+  // records. Hence, the returned object must not outlive
+  // |sparse_histogram_data_manager_| (and hence |this|).
+  std::unique_ptr<PersistentSampleMapRecords> CreateSampleMapRecords(
+      uint64_t id);
 
-  // Create internal histograms for tracking memory use and allocation sizes
+  // Creates internal histograms for tracking memory use and allocation sizes
   // for allocator of |name| (which can simply be the result of Name()). This
   // is done seperately from construction for situations such as when the
   // histograms will be backed by memory provided by this very allocator.
@@ -296,6 +294,13 @@
 
   // Sets the internal |ranges_manager_|, which will be used by the allocator to
   // register BucketRanges. Takes ownership of the passed |ranges_manager|.
+  //
+  // WARNING: Since histograms may be created from |this| from multiple threads,
+  // for example through a direct call to CreateHistogram(), or while iterating
+  // through |this|, then the passed manager may also be accessed concurrently.
+  // Hence, care must be taken to ensure that either:
+  //   1) The passed manager is threadsafe (see ThreadSafeRangesManager), or
+  //   2) |this| is not used concurrently.
   void SetRangesManager(RangesManager* ranges_manager);
 
   // Clears the internal |last_created_| reference so testing can validate
@@ -446,8 +451,9 @@
   // ever one allocator for all such histograms created by a single process.
   // This takes ownership of the object and should be called as soon as
   // possible during startup to capture as many histograms as possible and
-  // while operating single-threaded so there are no race-conditions.
-  static void Set(std::unique_ptr<GlobalHistogramAllocator> allocator);
+  // while operating single-threaded so there are no race-conditions. Note that
+  // the `allocator` will never be destroyed including tests.
+  static void Set(GlobalHistogramAllocator* allocator);
 
   // Gets a pointer to the global histogram allocator. Returns null if none
   // exists.
@@ -456,8 +462,9 @@
   // This access to the persistent allocator is only for testing; it extracts
   // the current allocator completely. This allows easy creation of histograms
   // within persistent memory segments which can then be extracted and used in
-  // other ways.
-  static std::unique_ptr<GlobalHistogramAllocator> ReleaseForTesting();
+  // other ways. Do not destroy the returned allocator since already created
+  // histograms may still keep pointers to allocated memory.
+  static GlobalHistogramAllocator* ReleaseForTesting();
 
   // Stores a pathname to which the contents of this allocator should be saved
   // in order to persist the data for a later use.
diff --git a/base/metrics/persistent_histogram_allocator_unittest.cc b/base/metrics/persistent_histogram_allocator_unittest.cc
index c57954d..ef2cecb 100644
--- a/base/metrics/persistent_histogram_allocator_unittest.cc
+++ b/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -7,13 +7,12 @@
 #include "base/files/file.h"
 #include "base/files/file_util.h"
 #include "base/files/scoped_temp_dir.h"
-#include "base/memory/ptr_util.h"
 #include "base/memory/raw_ptr.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/histogram_functions.h"
-#include "base/metrics/histogram_macros.h"
 #include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/sparse_histogram.h"
 #include "base/metrics/statistics_recorder.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -28,7 +27,7 @@
       const PersistentHistogramAllocatorTest&) = delete;
 
  protected:
-  const int32_t kAllocatorMemorySize = 64 << 10;  // 64 KiB
+  constexpr static int32_t kAllocatorMemorySize = 64 << 10;  // 64 KiB
 
   PersistentHistogramAllocatorTest()
       : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()) {
@@ -39,12 +38,15 @@
   }
 
   void CreatePersistentHistogramAllocator() {
-    allocator_memory_.reset(new char[kAllocatorMemorySize]);
+    // GlobalHistogramAllocator is never deleted, hence intentionally leak
+    // allocated memory in this test.
+    allocator_memory_ = new char[kAllocatorMemorySize];
+    ANNOTATE_LEAKING_OBJECT_PTR(allocator_memory_);
 
     GlobalHistogramAllocator::ReleaseForTesting();
-    memset(allocator_memory_.get(), 0, kAllocatorMemorySize);
+    memset(allocator_memory_, 0, kAllocatorMemorySize);
     GlobalHistogramAllocator::CreateWithPersistentMemory(
-        allocator_memory_.get(), kAllocatorMemorySize, 0, 0,
+        allocator_memory_, kAllocatorMemorySize, 0, 0,
         "PersistentHistogramAllocatorTest");
     allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
   }
@@ -55,7 +57,7 @@
   }
 
   std::unique_ptr<StatisticsRecorder> statistics_recorder_;
-  std::unique_ptr<char[]> allocator_memory_;
+  raw_ptr<char> allocator_memory_ = nullptr;
   raw_ptr<PersistentMemoryAllocator> allocator_ = nullptr;
 };
 
@@ -111,7 +113,8 @@
   std::unique_ptr<HistogramBase> recovered;
   PersistentHistogramAllocator recovery(
       std::make_unique<PersistentMemoryAllocator>(
-          allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false));
+          allocator_memory_, kAllocatorMemorySize, 0, 0, "",
+          PersistentMemoryAllocator::kReadWrite));
   PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
 
   recovered = histogram_iter.GetNext();
@@ -228,7 +231,7 @@
   std::unique_ptr<StatisticsRecorder> local_sr =
       StatisticsRecorder::CreateTemporaryForTesting();
   EXPECT_EQ(0U, StatisticsRecorder::GetHistogramCount());
-  std::unique_ptr<GlobalHistogramAllocator> old_allocator =
+  GlobalHistogramAllocator* old_allocator =
       GlobalHistogramAllocator::ReleaseForTesting();
   GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0, "");
   ASSERT_TRUE(GlobalHistogramAllocator::Get());
@@ -258,20 +261,21 @@
   // Destroy the local SR and ensure that we're back to the initial state and
   // restore the global allocator. Histograms created in the local SR will
   // become unmanaged.
-  std::unique_ptr<GlobalHistogramAllocator> new_allocator =
+  GlobalHistogramAllocator* new_allocator =
       GlobalHistogramAllocator::ReleaseForTesting();
   local_sr.reset();
   EXPECT_EQ(global_sr_initial_histogram_count,
             StatisticsRecorder::GetHistogramCount());
   EXPECT_EQ(global_sr_initial_bucket_ranges_count,
             StatisticsRecorder::GetBucketRanges().size());
-  GlobalHistogramAllocator::Set(std::move(old_allocator));
+  GlobalHistogramAllocator::Set(old_allocator);
 
   // Create a "recovery" allocator using the same memory as the local one.
   PersistentHistogramAllocator recovery1(
       std::make_unique<PersistentMemoryAllocator>(
           const_cast<void*>(new_allocator->memory_allocator()->data()),
-          new_allocator->memory_allocator()->size(), 0, 0, "", false));
+          new_allocator->memory_allocator()->size(), 0, 0, "",
+          PersistentMemoryAllocator::kReadWrite));
   PersistentHistogramAllocator::Iterator histogram_iter1(&recovery1);
 
   // Get the histograms that were created locally (and forgotten) and merge
@@ -325,7 +329,8 @@
   PersistentHistogramAllocator recovery2(
       std::make_unique<PersistentMemoryAllocator>(
           const_cast<void*>(new_allocator->memory_allocator()->data()),
-          new_allocator->memory_allocator()->size(), 0, 0, "", false));
+          new_allocator->memory_allocator()->size(), 0, 0, "",
+          PersistentMemoryAllocator::kReadWrite));
   PersistentHistogramAllocator::Iterator histogram_iter2(&recovery2);
   while (true) {
     recovered = histogram_iter2.GetNext();
@@ -355,6 +360,170 @@
   EXPECT_EQ(1, snapshot->GetCount(7));
 }
 
+// Verify that when merging histograms from an allocator with the global
+// StatisticsRecorder, if the histogram has no samples to be merged, then it
+// is skipped (no lookup/registration of the histogram with the SR).
+TEST_F(PersistentHistogramAllocatorTest,
+       StatisticsRecorderMerge_IsDefinitelyEmpty) {
+  const size_t global_sr_initial_histogram_count =
+      StatisticsRecorder::GetHistogramCount();
+  const size_t global_sr_initial_bucket_ranges_count =
+      StatisticsRecorder::GetBucketRanges().size();
+
+  // Create a local StatisticsRecorder in which the newly created histogram
+  // will be recorded. The global allocator must be replaced after because the
+  // act of releasing will cause the active SR to forget about all histograms
+  // in the released memory.
+  std::unique_ptr<StatisticsRecorder> local_sr =
+      StatisticsRecorder::CreateTemporaryForTesting();
+  EXPECT_EQ(0U, StatisticsRecorder::GetHistogramCount());
+  GlobalHistogramAllocator* old_allocator =
+      GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0, "");
+  ASSERT_TRUE(GlobalHistogramAllocator::Get());
+
+  // Create a bunch of histograms, and call SnapshotDelta() on all of them so
+  // that their next SnapshotDelta() calls return an empty HistogramSamples.
+  LinearHistogram::FactoryGet("SRTLinearHistogram1", 1, 10, 10, 0);
+  HistogramBase* histogram2 =
+      LinearHistogram::FactoryGet("SRTLinearHistogram2", 1, 10, 10, 0);
+  histogram2->Add(3);
+  histogram2->SnapshotDelta();
+  HistogramBase* histogram3 =
+      LinearHistogram::FactoryGet("SRTLinearHistogram3", 1, 10, 10, 0);
+  histogram3->Add(1);
+  histogram3->Add(10);
+  histogram3->SnapshotDelta();
+  SparseHistogram::FactoryGet("SRTSparseHistogram1", 0);
+  HistogramBase* sparse_histogram2 =
+      SparseHistogram::FactoryGet("SRTSparseHistogram2", 0);
+  sparse_histogram2->Add(3);
+  sparse_histogram2->SnapshotDelta();
+  HistogramBase* sparse_histogram3 =
+      SparseHistogram::FactoryGet("SRTSparseHistogram3", 0);
+  sparse_histogram3->Add(1);
+  sparse_histogram3->Add(10);
+  sparse_histogram3->SnapshotDelta();
+
+  EXPECT_EQ(6U, StatisticsRecorder::GetHistogramCount());
+
+  // Destroy the local SR and ensure that we're back to the initial state and
+  // restore the global allocator. Histograms created in the local SR will
+  // become unmanaged.
+  GlobalHistogramAllocator* new_allocator =
+      GlobalHistogramAllocator::ReleaseForTesting();
+  local_sr.reset();
+  EXPECT_EQ(global_sr_initial_histogram_count,
+            StatisticsRecorder::GetHistogramCount());
+  EXPECT_EQ(global_sr_initial_bucket_ranges_count,
+            StatisticsRecorder::GetBucketRanges().size());
+  GlobalHistogramAllocator::Set(old_allocator);
+
+  // Create a "recovery" allocator using the same memory as the local one.
+  PersistentHistogramAllocator recovery1(
+      std::make_unique<PersistentMemoryAllocator>(
+          const_cast<void*>(new_allocator->memory_allocator()->data()),
+          new_allocator->memory_allocator()->size(), 0, 0, "",
+          PersistentMemoryAllocator::kReadWrite));
+  PersistentHistogramAllocator::Iterator histogram_iter1(&recovery1);
+
+  // Get the histograms that were created locally (and forgotten) and attempt
+  // to merge them into the global SR. Since their delta are all empty, nothing
+  // should end up being registered with the SR.
+  while (true) {
+    std::unique_ptr<HistogramBase> recovered = histogram_iter1.GetNext();
+    if (!recovered) {
+      break;
+    }
+
+    recovery1.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
+    HistogramBase* found =
+        StatisticsRecorder::FindHistogram(recovered->histogram_name());
+    EXPECT_FALSE(found);
+  }
+  EXPECT_EQ(global_sr_initial_histogram_count,
+            StatisticsRecorder::GetHistogramCount());
+
+  // Same as above, but with MergeHistogramFinalDeltaToStatisticsRecorder()
+  // instead of MergeHistogramDeltaToStatisticsRecorder().
+  PersistentHistogramAllocator recovery2(
+      std::make_unique<PersistentMemoryAllocator>(
+          const_cast<void*>(new_allocator->memory_allocator()->data()),
+          new_allocator->memory_allocator()->size(), 0, 0, "",
+          PersistentMemoryAllocator::kReadWrite));
+  PersistentHistogramAllocator::Iterator histogram_iter2(&recovery2);
+  while (true) {
+    std::unique_ptr<HistogramBase> recovered = histogram_iter2.GetNext();
+    if (!recovered) {
+      break;
+    }
+
+    recovery2.MergeHistogramFinalDeltaToStatisticsRecorder(recovered.get());
+    HistogramBase* found =
+        StatisticsRecorder::FindHistogram(recovered->histogram_name());
+    EXPECT_FALSE(found);
+  }
+  EXPECT_EQ(global_sr_initial_histogram_count,
+            StatisticsRecorder::GetHistogramCount());
+}
+
+TEST_F(PersistentHistogramAllocatorTest, MultipleSameSparseHistograms) {
+  const std::string kSparseHistogramName = "SRTSparseHistogram";
+
+  // Create a temporary SR so that histograms created during this test aren't
+  // leaked to other tests.
+  std::unique_ptr<StatisticsRecorder> local_sr =
+      StatisticsRecorder::CreateTemporaryForTesting();
+
+  // Create a sparse histogram.
+  HistogramBase* sparse = SparseHistogram::FactoryGet(kSparseHistogramName, 0);
+
+  // Get the sparse histogram that was created above. We should have two
+  // distinct objects, but both representing and pointing to the same data.
+  PersistentHistogramAllocator::Iterator iter(GlobalHistogramAllocator::Get());
+  std::unique_ptr<HistogramBase> sparse2;
+  while (true) {
+    sparse2 = iter.GetNext();
+    if (!sparse2 || kSparseHistogramName == sparse2->histogram_name()) {
+      break;
+    }
+  }
+  ASSERT_TRUE(sparse2);
+  EXPECT_NE(sparse, sparse2.get());
+
+  // Verify that both objects can coexist, i.e., samples emitted from one can be
+  // found by the other and vice versa.
+  sparse->AddCount(1, 3);
+  std::unique_ptr<HistogramSamples> snapshot =
+      sparse->SnapshotUnloggedSamples();
+  std::unique_ptr<HistogramSamples> snapshot2 =
+      sparse2->SnapshotUnloggedSamples();
+  EXPECT_EQ(snapshot->TotalCount(), 3);
+  EXPECT_EQ(snapshot2->TotalCount(), 3);
+  EXPECT_EQ(snapshot->GetCount(1), 3);
+  EXPECT_EQ(snapshot2->GetCount(1), 3);
+  snapshot = sparse->SnapshotDelta();
+  snapshot2 = sparse2->SnapshotDelta();
+  EXPECT_EQ(snapshot->TotalCount(), 3);
+  EXPECT_EQ(snapshot2->TotalCount(), 0);
+  EXPECT_EQ(snapshot->GetCount(1), 3);
+  EXPECT_EQ(snapshot2->GetCount(1), 0);
+
+  sparse2->AddCount(2, 6);
+  snapshot = sparse->SnapshotUnloggedSamples();
+  snapshot2 = sparse2->SnapshotUnloggedSamples();
+  EXPECT_EQ(snapshot->TotalCount(), 6);
+  EXPECT_EQ(snapshot2->TotalCount(), 6);
+  EXPECT_EQ(snapshot->GetCount(2), 6);
+  EXPECT_EQ(snapshot2->GetCount(2), 6);
+  snapshot2 = sparse2->SnapshotDelta();
+  snapshot = sparse->SnapshotDelta();
+  EXPECT_EQ(snapshot->TotalCount(), 0);
+  EXPECT_EQ(snapshot2->TotalCount(), 6);
+  EXPECT_EQ(snapshot->GetCount(2), 0);
+  EXPECT_EQ(snapshot2->GetCount(2), 6);
+}
+
 TEST_F(PersistentHistogramAllocatorTest, CustomRangesManager) {
   const char LinearHistogramName[] = "TestLinearHistogram";
   const size_t global_sr_initial_bucket_ranges_count =
@@ -367,7 +536,7 @@
   std::unique_ptr<StatisticsRecorder> local_sr =
       StatisticsRecorder::CreateTemporaryForTesting();
   EXPECT_EQ(0U, StatisticsRecorder::GetHistogramCount());
-  std::unique_ptr<GlobalHistogramAllocator> old_allocator =
+  GlobalHistogramAllocator* old_allocator =
       GlobalHistogramAllocator::ReleaseForTesting();
   GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0, "");
   ASSERT_TRUE(GlobalHistogramAllocator::Get());
@@ -383,18 +552,19 @@
   // Destroy the local SR and ensure that we're back to the initial state and
   // restore the global allocator. The histogram created in the local SR will
   // become unmanaged.
-  std::unique_ptr<GlobalHistogramAllocator> new_allocator =
+  GlobalHistogramAllocator* new_allocator =
       GlobalHistogramAllocator::ReleaseForTesting();
   local_sr.reset();
   EXPECT_EQ(global_sr_initial_bucket_ranges_count,
             StatisticsRecorder::GetBucketRanges().size());
-  GlobalHistogramAllocator::Set(std::move(old_allocator));
+  GlobalHistogramAllocator::Set(old_allocator);
 
   // Create a "recovery" allocator using the same memory as the local one.
   PersistentHistogramAllocator recovery(
       std::make_unique<PersistentMemoryAllocator>(
           const_cast<void*>(new_allocator->memory_allocator()->data()),
-          new_allocator->memory_allocator()->size(), 0, 0, "", false));
+          new_allocator->memory_allocator()->size(), 0, 0, "",
+          PersistentMemoryAllocator::kReadWrite));
 
   // Set a custom RangesManager for the recovery allocator so that the
   // BucketRanges are not registered with the global SR.
@@ -499,8 +669,9 @@
 
   // Create an allocator and iterator using the file's data.
   PersistentHistogramAllocator new_file_allocator(
-      std::make_unique<PersistentMemoryAllocator>(data.get(), temp_size, 0, 0,
-                                                  "", false));
+      std::make_unique<PersistentMemoryAllocator>(
+          data.get(), temp_size, 0, 0, "",
+          PersistentMemoryAllocator::kReadWrite));
   PersistentHistogramAllocator::Iterator it(&new_file_allocator);
 
   // Verify that |kHistogramName| is in the file.
diff --git a/base/metrics/persistent_histogram_storage.cc b/base/metrics/persistent_histogram_storage.cc
index b6a6115..ef71fa5 100644
--- a/base/metrics/persistent_histogram_storage.cc
+++ b/base/metrics/persistent_histogram_storage.cc
@@ -130,6 +130,9 @@
   // Save data using the current time as the filename. The actual filename
   // doesn't matter (so long as it ends with the correct extension) but this
   // works as well as anything.
+  //
+  // NOTE: Cannot use `UnlocalizedTimeFormatWithPattern()` here since `//base`
+  // cannot depend on `//base:i18n`.
   Time::Exploded exploded;
   Time::Now().LocalExplode(&exploded);
   const FilePath file_path =
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
index e3a9fef..997afbe 100644
--- a/base/metrics/persistent_memory_allocator.cc
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -72,8 +72,11 @@
 constexpr uint32_t kFlagFull = 1 << 1;
 
 // Errors that are logged in "errors" histogram.
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused.
 enum AllocatorError : int {
   kMemoryIsCorrupt = 1,
+  kMaxValue = kMemoryIsCorrupt,
 };
 
 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, uint32_t flag) {
@@ -309,20 +312,20 @@
                                                      size_t page_size,
                                                      uint64_t id,
                                                      base::StringPiece name,
-                                                     bool readonly)
+                                                     AccessMode access_mode)
     : PersistentMemoryAllocator(Memory(base, MEM_EXTERNAL),
                                 size,
                                 page_size,
                                 id,
                                 name,
-                                readonly) {}
+                                access_mode) {}
 
 PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
                                                      size_t size,
                                                      size_t page_size,
                                                      uint64_t id,
                                                      base::StringPiece name,
-                                                     bool readonly)
+                                                     AccessMode access_mode)
     : mem_base_(static_cast<char*>(memory.base)),
       mem_type_(memory.type),
       mem_size_(checked_cast<uint32_t>(size)),
@@ -332,11 +335,7 @@
 #else
       vm_page_size_(SysInfo::VMAllocationGranularity()),
 #endif
-      readonly_(readonly),
-      corrupt_(false),
-      allocs_histogram_(nullptr),
-      used_histogram_(nullptr),
-      errors_histogram_(nullptr) {
+      access_mode_(access_mode) {
   // These asserts ensure that the structures are 32/64-bit agnostic and meet
   // all the requirements of use within the allocator. They access private
   // definitions and so cannot be moved to the global scope.
@@ -353,6 +352,7 @@
                 "\"queue\" is not aligned properly; must be at end of struct");
 
   // Ensure that memory segment is of acceptable size.
+  const bool readonly = access_mode == kReadOnly;
   CHECK(IsMemoryAcceptable(memory.base, size, page_size, readonly));
 
   // These atomics operate inter-process and so must be lock-free.
@@ -361,9 +361,12 @@
   DCHECK(BlockHeader().next.is_lock_free());
   CHECK(corrupt_.is_lock_free());
 
+  // When calling SetCorrupt() during initialization, don't write to the memory
+  // in kReadOnly and kReadWriteExisting modes.
+  const bool allow_write_for_set_corrupt = (access_mode == kReadWrite);
   if (shared_meta()->cookie != kGlobalCookie) {
-    if (readonly) {
-      SetCorrupt();
+    if (access_mode != kReadWrite) {
+      SetCorrupt(allow_write_for_set_corrupt);
       return;
     }
 
@@ -387,7 +390,8 @@
         first_block->type_id.load(std::memory_order_relaxed) != 0 ||
         first_block->next != 0) {
       // ...or something malicious has been playing with the metadata.
-      SetCorrupt();
+      CHECK(allow_write_for_set_corrupt);
+      SetCorrupt(allow_write_for_set_corrupt);
     }
 
     // This is still safe to do even if corruption has been detected.
@@ -423,26 +427,26 @@
         shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
         shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
         shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
-      SetCorrupt();
+      SetCorrupt(allow_write_for_set_corrupt);
     }
     if (!readonly) {
       // The allocator is attaching to a previously initialized segment of
       // memory. If the initialization parameters differ, make the best of it
-      // by reducing the local construction parameters to match those of
-      // the actual memory area. This ensures that the local object never
-      // tries to write outside of the original bounds.
+      // by reducing the local construction parameters to match those of the
+      // actual memory area. This ensures that the local object never tries to
+      // write outside of the original bounds.
       // Because the fields are const to ensure that no code other than the
-      // constructor makes changes to them as well as to give optimization
-      // hints to the compiler, it's necessary to const-cast them for changes
-      // here.
+      // constructor makes changes to them as well as to give optimization hints
+      // to the compiler, it's necessary to const-cast them for changes here.
       if (shared_meta()->size < mem_size_)
         *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
       if (shared_meta()->page_size < mem_page_)
         *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
 
       // Ensure that settings are still valid after the above adjustments.
-      if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly))
-        SetCorrupt();
+      if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly)) {
+        SetCorrupt(allow_write_for_set_corrupt);
+      }
     }
   }
 }
@@ -477,8 +481,9 @@
 
 void PersistentMemoryAllocator::CreateTrackingHistograms(
     base::StringPiece name) {
-  if (name.empty() || readonly_)
+  if (name.empty() || access_mode_ == kReadOnly) {
     return;
+  }
   std::string name_string(name);
 
 #if 0
@@ -497,8 +502,9 @@
       HistogramBase::kUmaTargetedHistogramFlag);
 
   DCHECK(!errors_histogram_);
-  errors_histogram_ = SparseHistogram::FactoryGet(
-      "UMA.PersistentAllocator." + name_string + ".Errors",
+  errors_histogram_ = LinearHistogram::FactoryGet(
+      "UMA.PersistentAllocator." + name_string + ".Errors", 1,
+      AllocatorError::kMaxValue + 1, AllocatorError::kMaxValue + 2,
       HistogramBase::kUmaTargetedHistogramFlag);
 }
 
@@ -566,7 +572,7 @@
                                            uint32_t to_type_id,
                                            uint32_t from_type_id,
                                            bool clear) {
-  DCHECK(!readonly_);
+  DCHECK_NE(access_mode_, kReadOnly);
   volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
   if (!block)
     return false;
@@ -641,7 +647,7 @@
 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
     size_t req_size,
     uint32_t type_id) {
-  DCHECK(!readonly_);
+  DCHECK_NE(access_mode_, kReadOnly);
 
   // Validate req_size to ensure it won't overflow when used as 32-bit value.
   if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) {
@@ -815,7 +821,7 @@
 }
 
 void PersistentMemoryAllocator::MakeIterable(Reference ref) {
-  DCHECK(!readonly_);
+  DCHECK_NE(access_mode_, kReadOnly);
   if (IsCorrupt())
     return;
   volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
@@ -876,7 +882,7 @@
 // iteration, this method may be called by other "const" methods. In this
 // case, it's safe to discard the constness and modify the local flag and
 // maybe even the shared flag if the underlying data isn't actually read-only.
-void PersistentMemoryAllocator::SetCorrupt() const {
+void PersistentMemoryAllocator::SetCorrupt(bool allow_write) const {
   if (!corrupt_.load(std::memory_order_relaxed) &&
       !CheckFlag(
           const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
@@ -886,16 +892,19 @@
   }
 
   corrupt_.store(true, std::memory_order_relaxed);
-  if (!readonly_) {
+  if (allow_write && access_mode_ != kReadOnly) {
     SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
             kFlagCorrupt);
   }
 }
 
 bool PersistentMemoryAllocator::IsCorrupt() const {
-  if (corrupt_.load(std::memory_order_relaxed) ||
-      CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
-    SetCorrupt();  // Make sure all indicators are set.
+  if (corrupt_.load(std::memory_order_relaxed)) {
+    return true;
+  }
+  if (CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
+    // Set the local flag if we found the flag in the data.
+    SetCorrupt(/*allow_write=*/false);
     return true;
   }
   return false;
@@ -962,8 +971,8 @@
 void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
   // Generally there is nothing to do as every write is done through volatile
   // memory with atomic instructions to guarantee consistency. This (virtual)
-  // method exists so that derivced classes can do special things, such as
-  // tell the OS to write changes to disk now rather than when convenient.
+  // method exists so that derived classes can do special things, such as tell
+  // the OS to write changes to disk now rather than when convenient.
 }
 
 void PersistentMemoryAllocator::RecordError(int error) const {
@@ -992,7 +1001,7 @@
 }
 
 void PersistentMemoryAllocator::UpdateTrackingHistograms() {
-  DCHECK(!readonly_);
+  DCHECK_NE(access_mode_, kReadOnly);
   if (used_histogram_) {
     MemoryInfo meminfo;
     GetMemoryInfo(&meminfo);
@@ -1014,7 +1023,7 @@
                                 0,
                                 id,
                                 name,
-                                false) {}
+                                kReadWrite) {}
 
 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
   DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_);
@@ -1093,7 +1102,7 @@
                                 0,
                                 id,
                                 name,
-                                false),
+                                kReadWrite),
       shared_memory_(std::move(memory)) {}
 
 WritableSharedPersistentMemoryAllocator::
@@ -1118,7 +1127,7 @@
           0,
           id,
           name,
-          true),
+          kReadOnly),
       shared_memory_(std::move(memory)) {}
 
 ReadOnlySharedPersistentMemoryAllocator::
@@ -1138,14 +1147,14 @@
     size_t max_size,
     uint64_t id,
     base::StringPiece name,
-    bool read_only)
+    AccessMode access_mode)
     : PersistentMemoryAllocator(
           Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
           max_size != 0 ? max_size : file->length(),
           0,
           id,
           name,
-          read_only),
+          access_mode),
       mapped_file_(std::move(file)) {}
 
 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() = default;
@@ -1153,8 +1162,8 @@
 // static
 bool FilePersistentMemoryAllocator::IsFileAcceptable(
     const MemoryMappedFile& file,
-    bool read_only) {
-  return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
+    bool readonly) {
+  return IsMemoryAcceptable(file.data(), file.length(), 0, readonly);
 }
 
 void FilePersistentMemoryAllocator::Cache() {
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
index 6001a2c..97fc517 100644
--- a/base/metrics/persistent_memory_allocator.h
+++ b/base/metrics/persistent_memory_allocator.h
@@ -273,6 +273,16 @@
     kSizeAny = 1  // Constant indicating that any array size is acceptable.
   };
 
+  // Indicates the mode for accessing the underlying data.
+  enum AccessMode {
+    kReadOnly,
+    kReadWrite,
+    // Open existing initialized data in R/W mode. If the passed data appears to
+    // not have been initialized, does not write to it and instead marks the
+    // allocator as corrupt (without writing anything to the underlying data.)
+    kReadWriteExisting,
+  };
+
   // This is the standard file extension (suitable for being passed to the
   // AddExtension() method of base::FilePath) for dumps of persistent memory.
   static const base::FilePath::CharType kFileExtension[];
@@ -287,9 +297,9 @@
   // creation of the segment and can be checked by the caller for consistency.
   // The |name|, if provided, is used to distinguish histograms for this
   // allocator. Only the primary owner of the segment should define this value;
-  // other processes can learn it from the shared state. If the underlying
-  // memory is |readonly| then no changes will be made to it. The resulting
-  // object should be stored as a "const" pointer.
+  // other processes can learn it from the shared state. If the access mode
+  // is kReadOnly then no changes will be made to it. The resulting object
+  // should be stored as a "const" pointer.
   //
   // PersistentMemoryAllocator does NOT take ownership of the memory block.
   // The caller must manage it and ensure it stays available throughout the
@@ -304,9 +314,12 @@
   // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
   // method below) before construction if the definition of the segment can
   // vary in any way at run-time. Invalid memory segments will cause a crash.
-  PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
-                            uint64_t id, base::StringPiece name,
-                            bool readonly);
+  PersistentMemoryAllocator(void* base,
+                            size_t size,
+                            size_t page_size,
+                            uint64_t id,
+                            base::StringPiece name,
+                            AccessMode access_mode);
 
   PersistentMemoryAllocator(const PersistentMemoryAllocator&) = delete;
   PersistentMemoryAllocator& operator=(const PersistentMemoryAllocator&) =
@@ -329,7 +342,7 @@
   const char* Name() const;
 
   // Is this segment open only for read?
-  bool IsReadonly() const { return readonly_; }
+  bool IsReadonly() const { return access_mode_ == kReadOnly; }
 
   // Manage the saved state of the memory.
   void SetMemoryState(uint8_t memory_state);
@@ -354,8 +367,11 @@
   // The |sync| parameter indicates if this call should block until the flush
   // is complete but is only advisory and may or may not have an effect
   // depending on the capabilities of the OS. Synchronous flushes are allowed
-  // only from theads that are allowed to do I/O but since |sync| is only
+  // only from threads that are allowed to do I/O but since |sync| is only
   // advisory, all flushes should be done on IO-capable threads.
+  // TODO: Since |sync| is ignored on Windows, consider making it re-post on a
+  // background thread with |sync| set to true so that |sync| is not just
+  // advisory.
   void Flush(bool sync);
 
   // Direct access to underlying memory segment. If the segment is shared
@@ -409,16 +425,16 @@
   // based on knowledge of how the allocator is being used.
   template <typename T>
   T* GetAsObject(Reference ref) {
-    static_assert(std::is_standard_layout<T>::value, "only standard objects");
-    static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
+    static_assert(std::is_standard_layout_v<T>, "only standard objects");
+    static_assert(!std::is_array_v<T>, "use GetAsArray<>()");
     static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
     return const_cast<T*>(reinterpret_cast<volatile T*>(
         GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
   }
   template <typename T>
   const T* GetAsObject(Reference ref) const {
-    static_assert(std::is_standard_layout<T>::value, "only standard objects");
-    static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
+    static_assert(std::is_standard_layout_v<T>, "only standard objects");
+    static_assert(!std::is_array_v<T>, "use GetAsArray<>()");
     static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
     return const_cast<const T*>(reinterpret_cast<const volatile T*>(
         GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
@@ -437,13 +453,13 @@
   // as char, float, double, or (u)intXX_t.
   template <typename T>
   T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
-    static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
+    static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()");
     return const_cast<T*>(reinterpret_cast<volatile T*>(
         GetBlockData(ref, type_id, count * sizeof(T))));
   }
   template <typename T>
   const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
-    static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
+    static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()");
     return const_cast<const char*>(reinterpret_cast<const volatile T*>(
         GetBlockData(ref, type_id, count * sizeof(T))));
   }
@@ -500,7 +516,8 @@
   // If there is some indication that the memory has become corrupted,
   // calling this will attempt to prevent further damage by indicating to
   // all processes that something is not as expected.
-  void SetCorrupt() const;
+  // If `allow_write` is false, the corrupt bit will not be written to the data.
+  void SetCorrupt(bool allow_write = true) const;
 
   // This can be called to determine if corruption has been detected in the
   // segment, possibly my a malicious actor. Once detected, future allocations
@@ -642,9 +659,12 @@
   // Constructs the allocator. Everything is the same as the public allocator
   // except |memory| which is a structure with additional information besides
   // the base address.
-  PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size,
-                            uint64_t id, base::StringPiece name,
-                            bool readonly);
+  PersistentMemoryAllocator(Memory memory,
+                            size_t size,
+                            size_t page_size,
+                            uint64_t id,
+                            base::StringPiece name,
+                            AccessMode access_mode);
 
   // Implementation of Flush that accepts how much to flush.
   virtual void FlushPartial(size_t length, bool sync);
@@ -724,12 +744,17 @@
   // Returns the metadata version used in this allocator.
   uint32_t version() const;
 
-  const bool readonly_;                // Indicates access to read-only memory.
-  mutable std::atomic<bool> corrupt_;  // Local version of "corrupted" flag.
+  const AccessMode access_mode_;
 
-  raw_ptr<HistogramBase> allocs_histogram_;  // Histogram recording allocs.
-  raw_ptr<HistogramBase> used_histogram_;    // Histogram recording used space.
-  raw_ptr<HistogramBase> errors_histogram_;  // Histogram recording errors.
+  // Local version of "corrupted" flag.
+  mutable std::atomic<bool> corrupt_ = false;
+
+  // Histogram recording allocs.
+  raw_ptr<HistogramBase> allocs_histogram_ = nullptr;
+  // Histogram recording used space.
+  raw_ptr<HistogramBase> used_histogram_ = nullptr;
+  // Histogram recording errors.
+  raw_ptr<HistogramBase> errors_histogram_ = nullptr;
 
   friend class metrics::FileMetricsProvider;
   friend class PersistentMemoryAllocatorTest;
@@ -835,7 +860,7 @@
                                 size_t max_size,
                                 uint64_t id,
                                 base::StringPiece name,
-                                bool read_only);
+                                AccessMode access_mode);
 
   FilePersistentMemoryAllocator(const FilePersistentMemoryAllocator&) = delete;
   FilePersistentMemoryAllocator& operator=(
@@ -921,7 +946,7 @@
   // The underlying object that does the actual allocation of memory. Its
   // lifetime must exceed that of all DelayedPersistentAllocation objects
   // that use it.
-  const raw_ptr<PersistentMemoryAllocator, LeakedDanglingUntriaged> allocator_;
+  const raw_ptr<PersistentMemoryAllocator> allocator_;
 
   // The desired type and size of the allocated segment plus the offset
   // within it for the defined request.
@@ -933,8 +958,7 @@
   // stored once the allocation is complete. If multiple delayed allocations
   // share the same pointer then an allocation on one will amount to an
   // allocation for all.
-  const raw_ptr<volatile std::atomic<Reference>, LeakedDanglingUntriaged>
-      reference_;
+  const raw_ptr<volatile std::atomic<Reference>, AllowPtrArithmetic> reference_;
 
   // No DISALLOW_COPY_AND_ASSIGN as it's okay to copy/move these objects.
 };
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
index 82aaee5..50c21e5 100644
--- a/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -83,7 +83,7 @@
     ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE);
     allocator_ = std::make_unique<PersistentMemoryAllocator>(
         mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE, TEST_ID,
-        TEST_NAME, false);
+        TEST_NAME, PersistentMemoryAllocator::kReadWrite);
   }
 
   void TearDown() override {
@@ -232,7 +232,8 @@
   // Create second allocator (read/write) using the same memory segment.
   std::unique_ptr<PersistentMemoryAllocator> allocator2(
       new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
-                                    TEST_MEMORY_PAGE, 0, "", false));
+                                    TEST_MEMORY_PAGE, 0, "",
+                                    PersistentMemoryAllocator::kReadWrite));
   EXPECT_EQ(TEST_ID, allocator2->Id());
   EXPECT_FALSE(allocator2->used_histogram_);
 
@@ -247,7 +248,8 @@
   // Create a third allocator (read-only) using the same memory segment.
   std::unique_ptr<const PersistentMemoryAllocator> allocator3(
       new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
-                                    TEST_MEMORY_PAGE, 0, "", true));
+                                    TEST_MEMORY_PAGE, 0, "",
+                                    PersistentMemoryAllocator::kReadOnly));
   EXPECT_EQ(TEST_ID, allocator3->Id());
   EXPECT_FALSE(allocator3->used_histogram_);
 
@@ -307,7 +309,12 @@
       : SimpleThread(name, Options()),
         count_(0),
         iterable_(0),
-        allocator_(base, size, page_size, 0, std::string(), false) {}
+        allocator_(base,
+                   size,
+                   page_size,
+                   0,
+                   "",
+                   PersistentMemoryAllocator::kReadWrite) {}
 
   void Run() override {
     for (;;) {
@@ -757,7 +764,8 @@
   const size_t mmlength = mmfile->length();
   EXPECT_GE(meminfo1.total, mmlength);
 
-  FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", false);
+  FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "",
+                                     FilePersistentMemoryAllocator::kReadWrite);
   EXPECT_FALSE(file.IsReadonly());
   EXPECT_EQ(TEST_ID, file.Id());
   EXPECT_FALSE(file.IsFull());
@@ -813,8 +821,9 @@
     ASSERT_TRUE(mmfile->Initialize(
         File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
         region, MemoryMappedFile::READ_WRITE_EXTEND));
-    FilePersistentMemoryAllocator allocator(std::move(mmfile), region.size, 0,
-                                            "", false);
+    FilePersistentMemoryAllocator allocator(
+        std::move(mmfile), region.size, 0, "",
+        FilePersistentMemoryAllocator::kReadWrite);
     EXPECT_EQ(static_cast<size_t>(before_size), allocator.used());
 
     allocator.Allocate(111, 111);
@@ -879,8 +888,10 @@
     if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
       // Make sure construction doesn't crash. It will, however, cause
       // error messages warning about about a corrupted memory segment.
-      FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
-                                              read_only);
+      FilePersistentMemoryAllocator allocator(
+          std::move(mmfile), 0, 0, "",
+          read_only ? FilePersistentMemoryAllocator::kReadOnly
+                    : FilePersistentMemoryAllocator::kReadWrite);
       // Also make sure that iteration doesn't crash.
       PersistentMemoryAllocator::Iterator iter(&allocator);
       uint32_t type_id;
@@ -921,8 +932,10 @@
     if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
       // Make sure construction doesn't crash. It will, however, cause
       // error messages warning about about a corrupted memory segment.
-      FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
-                                              read_only);
+      FilePersistentMemoryAllocator allocator(
+          std::move(mmfile), 0, 0, "",
+          read_only ? FilePersistentMemoryAllocator::kReadOnly
+                    : FilePersistentMemoryAllocator::kReadWrite);
       EXPECT_TRUE(allocator.IsCorrupt());  // Garbage data so it should be.
     } else {
       // For filesize >= minsize, the file must be acceptable. This
@@ -981,8 +994,10 @@
       ASSERT_TRUE(
           FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only));
 
-      FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
-                                              read_only);
+      FilePersistentMemoryAllocator allocator(
+          std::move(mmfile), 0, 0, "",
+          read_only ? FilePersistentMemoryAllocator::kReadOnly
+                    : FilePersistentMemoryAllocator::kReadWrite);
 
       PersistentMemoryAllocator::Iterator iter(&allocator);
       uint32_t type_id;
diff --git a/base/metrics/persistent_sample_map.cc b/base/metrics/persistent_sample_map.cc
index 259e53d..adf9367 100644
--- a/base/metrics/persistent_sample_map.cc
+++ b/base/metrics/persistent_sample_map.cc
@@ -124,10 +124,7 @@
     Metadata* meta)
     : HistogramSamples(id, meta), allocator_(allocator) {}
 
-PersistentSampleMap::~PersistentSampleMap() {
-  if (records_)
-    records_->Release(this);
-}
+PersistentSampleMap::~PersistentSampleMap() = default;
 
 void PersistentSampleMap::Accumulate(Sample value, Count count) {
   // We have to do the following atomically, because even if the caller is using
@@ -149,7 +146,8 @@
 Count PersistentSampleMap::TotalCount() const {
   // Have to override "const" in order to make sure all samples have been
   // loaded before trying to iterate over the map.
-  const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
+  const_cast<PersistentSampleMap*>(this)->ImportSamples(
+      /*until_value=*/absl::nullopt);
 
   Count count = 0;
   for (const auto& entry : sample_counts_) {
@@ -161,28 +159,41 @@
 std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
   // Have to override "const" in order to make sure all samples have been
   // loaded before trying to iterate over the map.
-  const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
+  const_cast<PersistentSampleMap*>(this)->ImportSamples(
+      /*until_value=*/absl::nullopt);
   return std::make_unique<PersistentSampleMapIterator>(sample_counts_);
 }
 
 std::unique_ptr<SampleCountIterator> PersistentSampleMap::ExtractingIterator() {
   // Make sure all samples have been loaded before trying to iterate over the
   // map.
-  ImportSamples(-1, true);
+  ImportSamples(/*until_value=*/absl::nullopt);
   return std::make_unique<ExtractingPersistentSampleMapIterator>(
       sample_counts_);
 }
 
+bool PersistentSampleMap::IsDefinitelyEmpty() const {
+  // Not implemented.
+  NOTREACHED();
+
+  // Always return false. If we are wrong, this will just make the caller
+  // perform some extra work thinking that |this| is non-empty.
+  return false;
+}
+
 // static
 PersistentMemoryAllocator::Reference
 PersistentSampleMap::GetNextPersistentRecord(
     PersistentMemoryAllocator::Iterator& iterator,
-    uint64_t* sample_map_id) {
+    uint64_t* sample_map_id,
+    Sample* value) {
   const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
-  if (!record)
+  if (!record) {
     return 0;
+  }
 
   *sample_map_id = record->id;
+  *value = record->value;
   return iterator.GetAsReference(record);
 }
 
@@ -244,7 +255,7 @@
     return it->second;
 
   // Import any new samples from persistent memory looking for the value.
-  return ImportSamples(value, false);
+  return ImportSamples(/*until_value=*/value);
 }
 
 Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
@@ -255,7 +266,7 @@
 
   // Create a new record in persistent memory for the value. |records_| will
   // have been initialized by the GetSampleCountStorage() call above.
-  DCHECK(records_);
+  CHECK(records_);
   PersistentMemoryAllocator::Reference ref = records_->CreateNew(value);
   if (!ref) {
     // If a new record could not be created then the underlying allocator is
@@ -276,7 +287,7 @@
   // Thread-safety within a process where multiple threads use the same
   // histogram object is delegated to the controlling histogram object which,
   // for sparse histograms, is a lock object.
-  count_pointer = ImportSamples(value, false);
+  count_pointer = ImportSamples(/*until_value=*/value);
   DCHECK(count_pointer);
   return count_pointer;
 }
@@ -287,47 +298,51 @@
   // and if both were to grab the records object, there would be a conflict.
   // Use of a histogram, and thus a call to this method, won't occur until
   // after the histogram has been de-dup'd.
-  if (!records_)
-    records_ = allocator_->UseSampleMapRecords(id(), this);
-  return records_;
+  if (!records_) {
+    records_ = allocator_->CreateSampleMapRecords(id());
+  }
+  return records_.get();
 }
 
-Count* PersistentSampleMap::ImportSamples(Sample until_value,
-                                          bool import_everything) {
-  Count* found_count = nullptr;
-  PersistentMemoryAllocator::Reference ref;
+Count* PersistentSampleMap::ImportSamples(absl::optional<Sample> until_value) {
+  std::vector<PersistentMemoryAllocator::Reference> refs;
   PersistentSampleMapRecords* records = GetRecords();
-  while ((ref = records->GetNext()) != 0) {
-    SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
-    if (!record)
-      continue;
+  while (!(refs = records->GetNextRecords(until_value)).empty()) {
+    // GetNextRecords() returns a list of new unseen records belonging to this
+    // map. Iterate through them all and store them internally. Note that if
+    // |until_value| was found, it will be the last element in |refs|.
+    for (auto ref : refs) {
+      SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
+      if (!record) {
+        continue;
+      }
 
-    DCHECK_EQ(id(), record->id);
+      DCHECK_EQ(id(), record->id);
 
-    // Check if the record's value is already known.
-    if (!Contains(sample_counts_, record->value)) {
-      // No: Add it to map of known values.
-      sample_counts_[record->value] = &record->count;
-    } else {
-      // Yes: Ignore it; it's a duplicate caused by a race condition -- see
-      // code & comment in GetOrCreateSampleCountStorage() for details.
-      // Check that nothing ever operated on the duplicate record.
-      DCHECK_EQ(0, record->count);
-    }
+      // Check if the record's value is already known.
+      if (!Contains(sample_counts_, record->value)) {
+        // No: Add it to map of known values.
+        sample_counts_[record->value] = &record->count;
+      } else {
+        // Yes: Ignore it; it's a duplicate caused by a race condition -- see
+        // code & comment in GetOrCreateSampleCountStorage() for details.
+        // Check that nothing ever operated on the duplicate record.
+        DCHECK_EQ(0, record->count);
+      }
 
-    // Check if it's the value being searched for and, if so, keep a pointer
-    // to return later. Stop here unless everything is being imported.
-    // Because race conditions can cause multiple records for a single value,
-    // be sure to return the first one found.
-    if (record->value == until_value) {
-      if (!found_count)
-        found_count = &record->count;
-      if (!import_everything)
-        break;
+      // Check if it's the value being searched for and, if so, stop here.
+      // Because race conditions can cause multiple records for a single value,
+      // be sure to return the first one found.
+      if (until_value.has_value() && record->value == until_value.value()) {
+        // Ensure that this was the last value in |refs|.
+        CHECK_EQ(refs.back(), ref);
+
+        return &record->count;
+      }
     }
   }
 
-  return found_count;
+  return nullptr;
 }
 
 }  // namespace base
diff --git a/base/metrics/persistent_sample_map.h b/base/metrics/persistent_sample_map.h
index cd336a2..d3eb4c3 100644
--- a/base/metrics/persistent_sample_map.h
+++ b/base/metrics/persistent_sample_map.h
@@ -20,6 +20,7 @@
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/persistent_memory_allocator.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
 
 namespace base {
 
@@ -48,13 +49,16 @@
   HistogramBase::Count TotalCount() const override;
   std::unique_ptr<SampleCountIterator> Iterator() const override;
   std::unique_ptr<SampleCountIterator> ExtractingIterator() override;
+  bool IsDefinitelyEmpty() const override;
 
   // Uses a persistent-memory |iterator| to locate and return information about
-  // the next record holding information for a PersistentSampleMap. The record
+  // the next record holding information for a PersistentSampleMap (in
+  // particular, the reference and the sample |value| it holds). The record
   // could be for any Map so return the |sample_map_id| as well.
   static PersistentMemoryAllocator::Reference GetNextPersistentRecord(
       PersistentMemoryAllocator::Iterator& iterator,
-      uint64_t* sample_map_id);
+      uint64_t* sample_map_id,
+      HistogramBase::Sample* value);
 
   // Creates a new record in an |allocator| storing count information for a
   // specific sample |value| of a histogram with the given |sample_map_id|.
@@ -81,15 +85,15 @@
   // |records_| member after first initializing it if necessary.
   PersistentSampleMapRecords* GetRecords();
 
-  // Imports samples from persistent memory by iterating over all sample
-  // records found therein, adding them to the sample_counts_ map. If a
-  // count for the sample |until_value| is found, stop the import and return
-  // a pointer to that counter. If that value is not found, null will be
-  // returned after all currently available samples have been loaded. Pass
-  // true for |import_everything| to force the importing of all available
-  // samples even if a match is found.
-  HistogramBase::Count* ImportSamples(HistogramBase::Sample until_value,
-                                      bool import_everything);
+  // Imports samples from persistent memory by iterating over all sample records
+  // found therein, adding them to the sample_counts_ map. If a count for the
+  // sample |until_value| is found, stop the import and return a pointer to that
+  // counter. If that value is not found, null will be returned after all
+  // currently available samples have been loaded. Pass a nullopt for
+  // |until_value| to force the importing of all available samples (null will
+  // always be returned in this case).
+  HistogramBase::Count* ImportSamples(
+      absl::optional<HistogramBase::Sample> until_value);
 
   // All created/loaded sample values and their associated counts. The storage
   // for the actual Count numbers is owned by the |records_| object and its
@@ -98,14 +102,13 @@
 
   // The allocator that manages histograms inside persistent memory. This is
   // owned externally and is expected to live beyond the life of this object.
-  raw_ptr<PersistentHistogramAllocator, LeakedDanglingUntriaged> allocator_;
+  raw_ptr<PersistentHistogramAllocator> allocator_;
 
-  // The object that manages sample records inside persistent memory. This is
-  // owned by the |allocator_| object (above) and so, like it, is expected to
-  // live beyond the life of this object. This value is lazily-initialized on
-  // first use via the GetRecords() accessor method.
-  raw_ptr<PersistentSampleMapRecords, LeakedDanglingUntriaged> records_ =
-      nullptr;
+  // The object that manages sample records inside persistent memory. The
+  // underlying data used is owned by the |allocator_| object (above). This
+  // value is lazily-initialized on first use via the GetRecords() accessor
+  // method.
+  std::unique_ptr<PersistentSampleMapRecords> records_ = nullptr;
 };
 
 }  // namespace base
diff --git a/base/metrics/persistent_sample_map_unittest.cc b/base/metrics/persistent_sample_map_unittest.cc
index 80d85cd..9f6d76f 100644
--- a/base/metrics/persistent_sample_map_unittest.cc
+++ b/base/metrics/persistent_sample_map_unittest.cc
@@ -25,7 +25,8 @@
   return std::make_unique<PersistentHistogramAllocator>(
       std::make_unique<PersistentMemoryAllocator>(
           const_cast<void*>(original->data()), original->length(), 0,
-          original->Id(), original->Name(), false));
+          original->Id(), original->Name(),
+          PersistentMemoryAllocator::kReadWrite));
 }
 
 TEST(PersistentSampleMapTest, AccumulateTest) {
diff --git a/base/metrics/ranges_manager.cc b/base/metrics/ranges_manager.cc
index 801501f..e8d99c6 100644
--- a/base/metrics/ranges_manager.cc
+++ b/base/metrics/ranges_manager.cc
@@ -9,8 +9,9 @@
 RangesManager::RangesManager() = default;
 
 RangesManager::~RangesManager() {
-  if (!do_not_release_ranges_on_destroy_for_testing_)
+  if (!do_not_release_ranges_on_destroy_for_testing_) {
     ReleaseBucketRanges();
+  }
 }
 
 size_t RangesManager::BucketRangesHash::operator()(
@@ -24,39 +25,73 @@
   return a->Equals(b);
 }
 
-const BucketRanges* RangesManager::RegisterOrDeleteDuplicateRanges(
+const BucketRanges* RangesManager::GetOrRegisterCanonicalRanges(
     const BucketRanges* ranges) {
+  // Note: This code is run in a critical lock path from StatisticsRecorder
+  // so we intentionally don't use a CHECK() here.
   DCHECK(ranges->HasValidChecksum());
 
   // Attempt to insert |ranges| into the set of registered BucketRanges. If an
   // equivalent one already exists (one with the exact same ranges), this
   // fetches the pre-existing one and does not insert the passed |ranges|.
-  const BucketRanges* const registered = *ranges_.insert(ranges).first;
-
-  // If there is already a registered equivalent BucketRanges, delete the passed
-  // |ranges|.
-  if (registered != ranges)
-    delete ranges;
-
-  return registered;
+  return *GetRanges().insert(ranges).first;
 }
 
 std::vector<const BucketRanges*> RangesManager::GetBucketRanges() const {
   std::vector<const BucketRanges*> out;
-  out.reserve(ranges_.size());
-  out.assign(ranges_.begin(), ranges_.end());
+  out.reserve(GetRanges().size());
+  out.assign(GetRanges().begin(), GetRanges().end());
   return out;
 }
 
 void RangesManager::ReleaseBucketRanges() {
-  for (auto* range : ranges_) {
+  for (auto* range : GetRanges()) {
     delete range;
   }
-  ranges_.clear();
+  GetRanges().clear();
+}
+
+RangesManager::RangesMap& RangesManager::GetRanges() {
+  return ranges_;
+}
+
+const RangesManager::RangesMap& RangesManager::GetRanges() const {
+  return ranges_;
 }
 
 void RangesManager::DoNotReleaseRangesOnDestroyForTesting() {
   do_not_release_ranges_on_destroy_for_testing_ = true;
 }
 
+ThreadSafeRangesManager::ThreadSafeRangesManager() = default;
+
+ThreadSafeRangesManager::~ThreadSafeRangesManager() = default;
+
+const BucketRanges* ThreadSafeRangesManager::GetOrRegisterCanonicalRanges(
+    const BucketRanges* ranges) {
+  base::AutoLock auto_lock(lock_);
+  return RangesManager::GetOrRegisterCanonicalRanges(ranges);
+}
+
+std::vector<const BucketRanges*> ThreadSafeRangesManager::GetBucketRanges()
+    const {
+  base::AutoLock auto_lock(lock_);
+  return RangesManager::GetBucketRanges();
+}
+
+void ThreadSafeRangesManager::ReleaseBucketRanges() {
+  base::AutoLock auto_lock(lock_);
+  RangesManager::ReleaseBucketRanges();
+}
+
+RangesManager::RangesMap& ThreadSafeRangesManager::GetRanges() {
+  lock_.AssertAcquired();
+  return RangesManager::GetRanges();
+}
+
+const RangesManager::RangesMap& ThreadSafeRangesManager::GetRanges() const {
+  lock_.AssertAcquired();
+  return RangesManager::GetRanges();
+}
+
 }  // namespace base
diff --git a/base/metrics/ranges_manager.h b/base/metrics/ranges_manager.h
index 1b7f888..e6e706b 100644
--- a/base/metrics/ranges_manager.h
+++ b/base/metrics/ranges_manager.h
@@ -6,8 +6,11 @@
 #define BASE_METRICS_RANGES_MANAGER_H_
 
 #include <unordered_set>
+#include <vector>
+
 #include "base/base_export.h"
 #include "base/metrics/bucket_ranges.h"
+#include "base/synchronization/lock.h"
 
 namespace base {
 
@@ -24,19 +27,19 @@
   RangesManager(const RangesManager&) = delete;
   RangesManager& operator=(const RangesManager&) = delete;
 
-  ~RangesManager();
+  virtual ~RangesManager();
 
-  // Registers a BucketRanges. If an equivalent BucketRanges is already
-  // registered, then the argument |ranges| will be deleted. The returned value
-  // is always the registered BucketRanges (either the argument, or the
-  // pre-existing one). Registering a BucketRanges passes the ownership, and
-  // will be released when the RangesManager is released.
-  const BucketRanges* RegisterOrDeleteDuplicateRanges(
+  // Gets the canonical BucketRanges object corresponding to `ranges`. If one
+  // does not exist, then `ranges` will be registered with this object, which
+  // will take ownership of it. Returns a pointer to the canonical ranges
+  // object. If it's different than `ranges`, the caller is responsible for
+  // deleting `ranges`.
+  virtual const BucketRanges* GetOrRegisterCanonicalRanges(
       const BucketRanges* ranges);
 
   // Gets all registered BucketRanges. The order of returned BucketRanges is not
   // guaranteed.
-  std::vector<const BucketRanges*> GetBucketRanges() const;
+  virtual std::vector<const BucketRanges*> GetBucketRanges() const;
 
   // Some tests may instantiate temporary StatisticsRecorders, each having their
   // own RangesManager. During the tests, ranges may get registered with a
@@ -45,11 +48,7 @@
   // deleted.
   void DoNotReleaseRangesOnDestroyForTesting();
 
- private:
-  // Removes all registered BucketRanges and destroys them. This is called in
-  // the destructor.
-  void ReleaseBucketRanges();
-
+ protected:
   // Used to get the hash of a BucketRanges, which is simply its checksum.
   struct BucketRangesHash {
     size_t operator()(const BucketRanges* a) const;
@@ -66,6 +65,14 @@
       unordered_set<const BucketRanges*, BucketRangesHash, BucketRangesEqual>
           RangesMap;
 
+  // Removes all registered BucketRanges and destroys them. This is called in
+  // the destructor.
+  virtual void ReleaseBucketRanges();
+
+  virtual RangesMap& GetRanges();
+  virtual const RangesMap& GetRanges() const;
+
+ private:
   // The set of unique BucketRanges registered to the RangesManager.
   RangesMap ranges_;
 
@@ -74,6 +81,31 @@
   bool do_not_release_ranges_on_destroy_for_testing_ = false;
 };
 
+class BASE_EXPORT ThreadSafeRangesManager final : public RangesManager {
+ public:
+  ThreadSafeRangesManager();
+
+  ThreadSafeRangesManager(const RangesManager&) = delete;
+  ThreadSafeRangesManager& operator=(const ThreadSafeRangesManager&) = delete;
+
+  ~ThreadSafeRangesManager() override;
+
+  // RangesManager:
+  const BucketRanges* GetOrRegisterCanonicalRanges(
+      const BucketRanges* ranges) override;
+  std::vector<const BucketRanges*> GetBucketRanges() const override;
+
+ protected:
+  // RangesManager:
+  void ReleaseBucketRanges() override;
+  RangesMap& GetRanges() override;
+  const RangesMap& GetRanges() const override;
+
+ private:
+  // Used to protect access to |ranges_|.
+  mutable base::Lock lock_;
+};
+
 }  // namespace base
 
 #endif  // BASE_METRICS_RANGES_MANAGER_H_
diff --git a/base/metrics/ranges_manager_unittest.cc b/base/metrics/ranges_manager_unittest.cc
index e0906d9..6d839e1 100644
--- a/base/metrics/ranges_manager_unittest.cc
+++ b/base/metrics/ranges_manager_unittest.cc
@@ -13,7 +13,7 @@
 
 using testing::UnorderedElementsAre;
 
-TEST(RangesManagerTest, RegisterBucketRanges) {
+TEST(RangesManagerTest, GetOrRegisterCanonicalRanges) {
   RangesManager ranges_manager;
 
   // Create some BucketRanges. We call |ResetChecksum| to calculate and set
@@ -27,14 +27,14 @@
   ranges2->ResetChecksum();
 
   // Register new ranges.
-  EXPECT_EQ(ranges1, ranges_manager.RegisterOrDeleteDuplicateRanges(ranges1));
-  EXPECT_EQ(ranges2, ranges_manager.RegisterOrDeleteDuplicateRanges(ranges2));
+  EXPECT_EQ(ranges1, ranges_manager.GetOrRegisterCanonicalRanges(ranges1));
+  EXPECT_EQ(ranges2, ranges_manager.GetOrRegisterCanonicalRanges(ranges2));
   EXPECT_THAT(ranges_manager.GetBucketRanges(),
               UnorderedElementsAre(ranges1, ranges2));
 
   // Register |ranges1| again. The registered BucketRanges set should not change
   // as |ranges1| is already registered.
-  EXPECT_EQ(ranges1, ranges_manager.RegisterOrDeleteDuplicateRanges(ranges1));
+  EXPECT_EQ(ranges1, ranges_manager.GetOrRegisterCanonicalRanges(ranges1));
   EXPECT_THAT(ranges_manager.GetBucketRanges(),
               UnorderedElementsAre(ranges1, ranges2));
 
@@ -45,12 +45,13 @@
   EXPECT_EQ(0, ranges1->range(1));
   EXPECT_EQ(0, ranges1->range(2));
 
-  // Register a new |ranges3| that is equivalent to |ranges1| (same ranges). We
-  // expect that |ranges3| is deleted (verified by LeakSanitizer bots) and that
-  // |ranges1| is returned by |RegisterOrDeleteDuplicateRanges|.
+  // Register a new |ranges3| that is equivalent to |ranges1| (same ranges). If
+  // GetOrRegisterCanonicalRanges() returns a different object than the param
+  // (as asserted here), we are responsible for deleting the object (below).
   BucketRanges* ranges3 = new BucketRanges(3);
   ranges3->ResetChecksum();
-  EXPECT_EQ(ranges1, ranges_manager.RegisterOrDeleteDuplicateRanges(ranges3));
+  ASSERT_EQ(ranges1, ranges_manager.GetOrRegisterCanonicalRanges(ranges3));
+  delete ranges3;
   EXPECT_THAT(ranges_manager.GetBucketRanges(),
               UnorderedElementsAre(ranges1, ranges2));
 }
@@ -68,7 +69,7 @@
   ranges->ResetChecksum();
 
   // Register new range.
-  EXPECT_EQ(ranges, ranges_manager->RegisterOrDeleteDuplicateRanges(ranges));
+  EXPECT_EQ(ranges, ranges_manager->GetOrRegisterCanonicalRanges(ranges));
   EXPECT_THAT(ranges_manager->GetBucketRanges(), UnorderedElementsAre(ranges));
 
   // Explicitly destroy |ranges_manager|.
diff --git a/base/metrics/sample_map.cc b/base/metrics/sample_map.cc
index f0eed32..427db0c 100644
--- a/base/metrics/sample_map.cc
+++ b/base/metrics/sample_map.cc
@@ -4,6 +4,8 @@
 
 #include "base/metrics/sample_map.h"
 
+#include <type_traits>
+
 #include "base/check.h"
 #include "base/numerics/safe_conversions.h"
 
@@ -138,21 +140,39 @@
   return std::make_unique<ExtractingSampleMapIterator>(sample_counts_);
 }
 
+bool SampleMap::IsDefinitelyEmpty() const {
+  // If |sample_counts_| is empty (no entry was ever inserted), then return
+  // true. If it does contain some entries, then it may or may not have samples
+  // (e.g. it's possible all entries have a bucket count of 0). Just return
+  // false in this case. If we are wrong, this will just make the caller perform
+  // some extra work thinking that |this| is non-empty.
+  return HistogramSamples::IsDefinitelyEmpty() && sample_counts_.empty();
+}
+
 bool SampleMap::AddSubtractImpl(SampleCountIterator* iter, Operator op) {
   Sample min;
   int64_t max;
   Count count;
   for (; !iter->Done(); iter->Next()) {
     iter->Get(&min, &max, &count);
-    if (strict_cast<int64_t>(min) + 1 != max)
+    if (strict_cast<int64_t>(min) + 1 != max) {
       return false;  // SparseHistogram only supports bucket with size 1.
+    }
+
+    // Note that we do not need to check that count != 0, since Next() above
+    // will skip empty buckets.
 
     // We do not have to do the following atomically -- if the caller needs
     // thread safety, they should use a lock. And since this is in local memory,
     // if a lock is used, we know the value would not be concurrently modified
     // by a different process (in contrast to PersistentSampleMap, where the
     // value in shared memory may be modified concurrently by a subprocess).
-    sample_counts_[min] += (op == HistogramSamples::ADD) ? count : -count;
+    Count& sample_ref = sample_counts_[min];
+    if (op == HistogramSamples::ADD) {
+      sample_ref = base::WrappingAdd(sample_ref, count);
+    } else {
+      sample_ref = base::WrappingSub(sample_ref, count);
+    }
   }
   return true;
 }
diff --git a/base/metrics/sample_map.h b/base/metrics/sample_map.h
index 7caf976..e62e6a7 100644
--- a/base/metrics/sample_map.h
+++ b/base/metrics/sample_map.h
@@ -39,6 +39,7 @@
   HistogramBase::Count TotalCount() const override;
   std::unique_ptr<SampleCountIterator> Iterator() const override;
   std::unique_ptr<SampleCountIterator> ExtractingIterator() override;
+  bool IsDefinitelyEmpty() const override;
 
  protected:
   // Performs arithemetic. |op| is ADD or SUBTRACT.
diff --git a/base/metrics/sample_vector.cc b/base/metrics/sample_vector.cc
index 0192748..b9f6fda 100644
--- a/base/metrics/sample_vector.cc
+++ b/base/metrics/sample_vector.cc
@@ -80,7 +80,7 @@
     }
   }
 
-  raw_ptr<T> counts_;
+  raw_ptr<T, AllowPtrArithmetic> counts_;
   size_t counts_size_;
   raw_ptr<const BucketRanges> bucket_ranges_;
 
@@ -462,6 +462,18 @@
 
 SampleVector::~SampleVector() = default;
 
+bool SampleVector::IsDefinitelyEmpty() const {
+  // If we are still using SingleSample, and it has a count of 0, then |this|
+  // has no samples. If we are not using SingleSample, always return false, even
+  // though it is possible that |this| has no samples (e.g. we are using a
+  // counts array and all the bucket counts are 0). If we are wrong, this will
+  // just make the caller perform some extra work thinking that |this| is
+  // non-empty.
+  AtomicSingleSample sample = single_sample();
+  return HistogramSamples::IsDefinitelyEmpty() && !sample.IsDisabled() &&
+         sample.Load().count == 0;
+}
+
 bool SampleVector::MountExistingCountsStorage() const {
   // There is never any existing storage other than what is already in use.
   return counts() != nullptr;
@@ -592,6 +604,15 @@
 
 PersistentSampleVector::~PersistentSampleVector() = default;
 
+bool PersistentSampleVector::IsDefinitelyEmpty() const {
+  // Not implemented.
+  NOTREACHED();
+
+  // Always return false. If we are wrong, this will just make the caller
+  // perform some extra work thinking that |this| is non-empty.
+  return false;
+}
+
 bool PersistentSampleVector::MountExistingCountsStorage() const {
   // There is no early exit if counts is not yet mounted because, given that
   // this is a virtual function, it's more efficient to do that at the call-
diff --git a/base/metrics/sample_vector.h b/base/metrics/sample_vector.h
index 0f22647..b1059ec 100644
--- a/base/metrics/sample_vector.h
+++ b/base/metrics/sample_vector.h
@@ -113,7 +113,7 @@
   mutable std::atomic<HistogramBase::AtomicCount*> counts_{nullptr};
 
   // Shares the same BucketRanges with Histogram object.
-  const raw_ptr<const BucketRanges, LeakedDanglingUntriaged> bucket_ranges_;
+  const raw_ptr<const BucketRanges> bucket_ranges_;
 };
 
 // A sample vector that uses local memory for the counts array.
@@ -125,6 +125,9 @@
   SampleVector& operator=(const SampleVector&) = delete;
   ~SampleVector() override;
 
+  // HistogramSamples:
+  bool IsDefinitelyEmpty() const override;
+
  private:
   FRIEND_TEST_ALL_PREFIXES(SampleVectorTest, GetPeakBucketSize);
 
@@ -165,6 +168,9 @@
   PersistentSampleVector& operator=(const PersistentSampleVector&) = delete;
   ~PersistentSampleVector() override;
 
+  // HistogramSamples:
+  bool IsDefinitelyEmpty() const override;
+
  private:
   // SampleVectorBase:
   bool MountExistingCountsStorage() const override;
diff --git a/base/metrics/sparse_histogram_unittest.cc b/base/metrics/sparse_histogram_unittest.cc
index 7bd3775..835f8af 100644
--- a/base/metrics/sparse_histogram_unittest.cc
+++ b/base/metrics/sparse_histogram_unittest.cc
@@ -195,6 +195,26 @@
   EXPECT_EQ(10, samples->sum());
 }
 
+// Check that IsDefinitelyEmpty() works with the results of SnapshotDelta().
+TEST_P(SparseHistogramTest, IsDefinitelyEmpty_SnapshotDelta) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+
+  // No samples initially.
+  EXPECT_TRUE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+
+  histogram->Add(1);
+  EXPECT_FALSE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+  EXPECT_TRUE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+  histogram->Add(10);
+  histogram->Add(10);
+  EXPECT_FALSE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+  EXPECT_TRUE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+  histogram->Add(1);
+  histogram->Add(50);
+  EXPECT_FALSE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+  EXPECT_TRUE(histogram->SnapshotDelta()->IsDefinitelyEmpty());
+}
+
 TEST_P(SparseHistogramTest, AddCount_LargeValuesDontOverflow) {
   std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
   std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
diff --git a/base/metrics/statistics_recorder.cc b/base/metrics/statistics_recorder.cc
index c2c33e7..8069a51 100644
--- a/base/metrics/statistics_recorder.cc
+++ b/base/metrics/statistics_recorder.cc
@@ -4,10 +4,9 @@
 
 #include "base/metrics/statistics_recorder.h"
 
-#include <memory>
-
 #include "base/at_exit.h"
 #include "base/barrier_closure.h"
+#include "base/command_line.h"
 #include "base/containers/contains.h"
 #include "base/debug/leak_annotations.h"
 #include "base/json/string_escape.h"
@@ -18,14 +17,39 @@
 #include "base/metrics/metrics_hashes.h"
 #include "base/metrics/persistent_histogram_allocator.h"
 #include "base/metrics/record_histogram_checker.h"
+#include "base/rand_util.h"
 #include "base/ranges/algorithm.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/values.h"
+#include "build/build_config.h"
 
 namespace base {
 namespace {
 
+// Whether a 50/50 trial for using a R/W lock should be run.
+// Restrict it to Windows for now as other platforms show poor results.
+#if BUILDFLAG(IS_WIN)
+constexpr bool kRunRwLockTrial = true;
+#else
+constexpr bool kRunRwLockTrial = false;
+#endif  // BUILDFLAG(IS_WIN)
+
+// Whether the R/W lock should be used when the trial is not active.
+// Only enabled on Windows for now, since other platforms show poor results.
+#if BUILDFLAG(IS_WIN)
+constexpr bool kUseRwLockByDefault = true;
+#else
+constexpr bool kUseRwLockByDefault = false;
+#endif  // BUILDFLAG(IS_WIN)
+
+bool EnableBenchmarking() {
+  // TODO(asvitkine): If this code ends up not being temporary, refactor it to
+  // not duplicate the constant name. (Right now it's at a different layer.)
+  return CommandLine::InitializedForCurrentProcess() &&
+         CommandLine::ForCurrentProcess()->HasSwitch("enable-benchmarking");
+}
+
 bool HistogramNameLesser(const base::HistogramBase* a,
                          const base::HistogramBase* b) {
   return strcmp(a->histogram_name(), b->histogram_name()) < 0;
@@ -34,7 +58,7 @@
 }  // namespace
 
 // static
-LazyInstance<absl::Mutex>::Leaky StatisticsRecorder::lock_ =
+LazyInstance<StatisticsRecorder::SrLock>::Leaky StatisticsRecorder::lock_ =
     LAZY_INSTANCE_INITIALIZER;
 
 // static
@@ -78,14 +102,14 @@
 }
 
 StatisticsRecorder::~StatisticsRecorder() {
-  const absl::MutexLock auto_lock(lock_.Pointer());
+  const SrAutoWriterLock auto_lock(GetLock());
   DCHECK_EQ(this, top_);
   top_ = previous_;
 }
 
 // static
 void StatisticsRecorder::EnsureGlobalRecorderWhileLocked() {
-  lock_.Get().AssertHeld();
+  AssertLockHeld();
   if (top_) {
     return;
   }
@@ -99,7 +123,7 @@
 // static
 void StatisticsRecorder::RegisterHistogramProvider(
     const WeakPtr<HistogramProvider>& provider) {
-  const absl::MutexLock auto_lock(lock_.Pointer());
+  const SrAutoWriterLock auto_lock(GetLock());
   EnsureGlobalRecorderWhileLocked();
   top_->providers_.push_back(provider);
 }
@@ -107,33 +131,54 @@
 // static
 HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
     HistogramBase* histogram) {
-  // Declared before |auto_lock| to ensure correct destruction order.
+  uint64_t hash = histogram->name_hash();
+
+  // Ensure that histograms use HashMetricName() to compute their hash, since
+  // that function is used to look up histograms.
+  DCHECK_EQ(hash, HashMetricName(histogram->histogram_name()));
+
+  // Declared before |auto_lock| so that the histogram is deleted after the lock
+  // is released (no point in holding the lock longer than needed).
   std::unique_ptr<HistogramBase> histogram_deleter;
-  const absl::MutexLock auto_lock(lock_.Pointer());
+  const SrAutoWriterLock auto_lock(GetLock());
   EnsureGlobalRecorderWhileLocked();
 
-  const char* const name = histogram->histogram_name();
-  HistogramBase*& registered = top_->histograms_[name];
+  HistogramBase*& registered = top_->histograms_[hash];
 
   if (!registered) {
-    // |name| is guaranteed to never change or be deallocated so long
-    // as the histogram is alive (which is forever).
     registered = histogram;
     ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
     // If there are callbacks for this histogram, we set the kCallbackExists
     // flag.
-    if (base::Contains(top_->observers_, name))
+    if (base::Contains(top_->observers_, hash)) {
+      // Note: SetFlags() does not write to persistent memory, it only writes to
+      // an in-memory version of the flags.
       histogram->SetFlags(HistogramBase::kCallbackExists);
+    }
 
     return histogram;
   }
 
+  // Assert that there was no collision. Note that this is intentionally a
+  // DCHECK because 1) this is expensive to call repeatedly, and 2) this
+  // comparison may cause a read in persistent memory, which can cause I/O (this
+  // is bad because |lock_| is currently being held).
+  //
+  // If you are a developer adding a new histogram and this DCHECK is being hit,
+  // you are unluckily a victim of a hash collision. For now, the best solution
+  // is to rename the histogram. Reach out to [email protected] if
+  // you are unsure!
+  DCHECK_EQ(strcmp(histogram->histogram_name(), registered->histogram_name()),
+            0)
+      << "Histogram name hash collision between " << histogram->histogram_name()
+      << " and " << registered->histogram_name() << " (hash = " << hash << ")";
+
   if (histogram == registered) {
     // The histogram was registered before.
     return histogram;
   }
 
-  // We already have one histogram with this name.
+  // We already have a histogram with this name.
   histogram_deleter.reset(histogram);
   return registered;
 }
@@ -141,14 +186,20 @@
 // static
 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
     const BucketRanges* ranges) {
-  const absl::MutexLock auto_lock(lock_.Pointer());
-  EnsureGlobalRecorderWhileLocked();
+  const BucketRanges* registered;
+  {
+    const SrAutoWriterLock auto_lock(GetLock());
+    EnsureGlobalRecorderWhileLocked();
 
-  const BucketRanges* const registered =
-      top_->ranges_manager_.RegisterOrDeleteDuplicateRanges(ranges);
+    registered = top_->ranges_manager_.GetOrRegisterCanonicalRanges(ranges);
+  }
 
-  if (registered == ranges)
+  // Delete the duplicate ranges outside the lock to reduce contention.
+  if (registered != ranges) {
+    delete ranges;
+  } else {
     ANNOTATE_LEAKING_OBJECT_PTR(ranges);
+  }
 
   return registered;
 }
@@ -185,7 +236,7 @@
 
 // static
 std::vector<const BucketRanges*> StatisticsRecorder::GetBucketRanges() {
-  const absl::ReaderMutexLock auto_lock(lock_.Pointer());
+  const SrAutoReaderLock auto_lock(GetLock());
 
   // Manipulate |top_| through a const variable to ensure it is not mutated.
   const auto* const_top = top_;
@@ -198,15 +249,17 @@
 
 // static
 HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) {
-  // This must be called *before* the lock is acquired below because it may
-  // call back into StatisticsRecorder to register histograms. Those called
-  // methods will acquire the lock at that time.
+  uint64_t hash = HashMetricName(name);
+
+  // This must be called *before* the lock is acquired below because it may call
+  // back into StatisticsRecorder to register histograms. Those called methods
+  // will acquire the lock at that time.
   ImportGlobalPersistentHistograms();
 
   // Acquire the lock in "read" mode since we're only reading the data, not
   // modifying anything. This allows multiple readers to look up histograms
   // concurrently.
-  const absl::ReaderMutexLock auto_lock(lock_.Pointer());
+  const SrAutoReaderLock auto_lock(GetLock());
 
   // Manipulate |top_| through a const variable to ensure it is not mutated.
   const auto* const_top = top_;
@@ -214,14 +267,13 @@
     return nullptr;
   }
 
-  const HistogramMap::const_iterator it = const_top->histograms_.find(name);
-  return it != const_top->histograms_.end() ? it->second : nullptr;
+  return const_top->FindHistogramByHashInternal(hash, name);
 }
 
 // static
 StatisticsRecorder::HistogramProviders
 StatisticsRecorder::GetHistogramProviders() {
-  const absl::ReaderMutexLock auto_lock(lock_.Pointer());
+  const SrAutoReaderLock auto_lock(GetLock());
 
   // Manipulate |top_| through a const variable to ensure it is not mutated.
   const auto* const_top = top_;
@@ -288,29 +340,74 @@
 
 // static
 void StatisticsRecorder::InitLogOnShutdown() {
-  const absl::MutexLock auto_lock(lock_.Pointer());
+  const SrAutoWriterLock auto_lock(GetLock());
   InitLogOnShutdownWhileLocked();
 }
 
 // static
+StringPiece StatisticsRecorder::GetLockTrialGroup() {
+  if (kRunRwLockTrial && !EnableBenchmarking()) {
+    return lock_.Get().use_shared_mutex() ? "Enabled" : "Disabled";
+  }
+  return StringPiece();
+}
+
+// static
+bool StatisticsRecorder::SrLock::ShouldUseSharedMutex() {
+  // Force deterministic results for benchmarks.
+  if (kRunRwLockTrial && !EnableBenchmarking()) {
+    return RandInt(0, 1) == 1;
+  }
+  return kUseRwLockByDefault;
+}
+
+HistogramBase* StatisticsRecorder::FindHistogramByHashInternal(
+    uint64_t hash,
+    StringPiece name) const {
+  AssertLockHeld();
+  const HistogramMap::const_iterator it = histograms_.find(hash);
+  if (it == histograms_.end()) {
+    return nullptr;
+  }
+  // Assert that there was no collision. Note that this is intentionally a
+  // DCHECK because 1) this is expensive to call repeatedly, and 2) this
+  // comparison may cause a read in persistent memory, which can cause I/O (this
+  // is bad because |lock_| is currently being held).
+  //
+  // If you are a developer adding a new histogram and this DCHECK is being hit,
+  // you are unluckily a victim of a hash collision. For now, the best solution
+  // is to rename the histogram. Reach out to [email protected] if
+  // you are unsure!
+  DCHECK_EQ(name, it->second->histogram_name())
+      << "Histogram name hash collision between " << name << " and "
+      << it->second->histogram_name() << " (hash = " << hash << ")";
+  return it->second;
+}
+
+// static
 void StatisticsRecorder::AddHistogramSampleObserver(
     const std::string& name,
     StatisticsRecorder::ScopedHistogramSampleObserver* observer) {
   DCHECK(observer);
-  const absl::MutexLock auto_lock(lock_.Pointer());
+  uint64_t hash = HashMetricName(name);
+
+  const SrAutoWriterLock auto_lock(GetLock());
   EnsureGlobalRecorderWhileLocked();
 
-  auto iter = top_->observers_.find(name);
+  auto iter = top_->observers_.find(hash);
   if (iter == top_->observers_.end()) {
     top_->observers_.insert(
-        {name, base::MakeRefCounted<HistogramSampleObserverList>()});
+        {hash, base::MakeRefCounted<HistogramSampleObserverList>()});
   }
 
-  top_->observers_[name]->AddObserver(observer);
+  top_->observers_[hash]->AddObserver(observer);
 
-  const HistogramMap::const_iterator it = top_->histograms_.find(name);
-  if (it != top_->histograms_.end())
-    it->second->SetFlags(HistogramBase::kCallbackExists);
+  HistogramBase* histogram = top_->FindHistogramByHashInternal(hash, name);
+  if (histogram) {
+    // Note: SetFlags() does not write to persistent memory, it only writes to
+    // an in-memory version of the flags.
+    histogram->SetFlags(HistogramBase::kCallbackExists);
+  }
 
   have_active_callbacks_.store(
       global_sample_callback() || !top_->observers_.empty(),
@@ -321,21 +418,26 @@
 void StatisticsRecorder::RemoveHistogramSampleObserver(
     const std::string& name,
     StatisticsRecorder::ScopedHistogramSampleObserver* observer) {
-  const absl::MutexLock auto_lock(lock_.Pointer());
+  uint64_t hash = HashMetricName(name);
+
+  const SrAutoWriterLock auto_lock(GetLock());
   EnsureGlobalRecorderWhileLocked();
 
-  auto iter = top_->observers_.find(name);
+  auto iter = top_->observers_.find(hash);
   DCHECK(iter != top_->observers_.end());
 
   auto result = iter->second->RemoveObserver(observer);
   if (result ==
       HistogramSampleObserverList::RemoveObserverResult::kWasOrBecameEmpty) {
-    top_->observers_.erase(name);
+    top_->observers_.erase(hash);
 
     // We also clear the flag from the histogram (if it exists).
-    const HistogramMap::const_iterator it = top_->histograms_.find(name);
-    if (it != top_->histograms_.end())
-      it->second->ClearFlags(HistogramBase::kCallbackExists);
+    HistogramBase* histogram = top_->FindHistogramByHashInternal(hash, name);
+    if (histogram) {
+      // Note: ClearFlags() does not write to persistent memory, it only writes
+      // to an in-memory version of the flags.
+      histogram->ClearFlags(HistogramBase::kCallbackExists);
+    }
   }
 
   have_active_callbacks_.store(
@@ -349,7 +451,9 @@
     const char* histogram_name,
     uint64_t name_hash,
     HistogramBase::Sample sample) {
-  const absl::ReaderMutexLock auto_lock(lock_.Pointer());
+  DCHECK_EQ(name_hash, HashMetricName(histogram_name));
+
+  const SrAutoReaderLock auto_lock(GetLock());
 
   // Manipulate |top_| through a const variable to ensure it is not mutated.
   const auto* const_top = top_;
@@ -357,7 +461,7 @@
     return;
   }
 
-  auto it = const_top->observers_.find(histogram_name);
+  auto it = const_top->observers_.find(name_hash);
 
   // Ensure that this observer is still registered, as it might have been
   // unregistered before we acquired the lock.
@@ -372,7 +476,7 @@
 // static
 void StatisticsRecorder::SetGlobalSampleCallback(
     const GlobalSampleCallback& new_global_sample_callback) {
-  const absl::MutexLock auto_lock(lock_.Pointer());
+  const SrAutoWriterLock auto_lock(GetLock());
   EnsureGlobalRecorderWhileLocked();
 
   DCHECK(!global_sample_callback() || !new_global_sample_callback);
@@ -385,7 +489,7 @@
 
 // static
 size_t StatisticsRecorder::GetHistogramCount() {
-  const absl::ReaderMutexLock auto_lock(lock_.Pointer());
+  const SrAutoReaderLock auto_lock(GetLock());
 
   // Manipulate |top_| through a const variable to ensure it is not mutated.
   const auto* const_top = top_;
@@ -397,29 +501,32 @@
 
 // static
 void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
-  const absl::MutexLock auto_lock(lock_.Pointer());
+  const SrAutoWriterLock auto_lock(GetLock());
   EnsureGlobalRecorderWhileLocked();
 
-  const HistogramMap::iterator found = top_->histograms_.find(name);
-  if (found == top_->histograms_.end())
+  uint64_t hash = HashMetricName(name);
+  HistogramBase* base = top_->FindHistogramByHashInternal(hash, name);
+  if (!base) {
     return;
+  }
 
-  HistogramBase* const base = found->second;
   if (base->GetHistogramType() != SPARSE_HISTOGRAM) {
-    // When forgetting a histogram, it's likely that other information is
-    // also becoming invalid. Clear the persistent reference that may no
-    // longer be valid. There's no danger in this as, at worst, duplicates
-    // will be created in persistent memory.
+    // When forgetting a histogram, it's likely that other information is also
+    // becoming invalid. Clear the persistent reference that may no longer be
+    // valid. There's no danger in this as, at worst, duplicates will be created
+    // in persistent memory.
     static_cast<Histogram*>(base)->bucket_ranges()->set_persistent_reference(0);
   }
 
-  top_->histograms_.erase(found);
+  // This performs another lookup in the map, but this is fine since this is
+  // only used in tests.
+  top_->histograms_.erase(hash);
 }
 
 // static
 std::unique_ptr<StatisticsRecorder>
 StatisticsRecorder::CreateTemporaryForTesting() {
-  const absl::MutexLock auto_lock(lock_.Pointer());
+  const SrAutoWriterLock auto_lock(GetLock());
   std::unique_ptr<StatisticsRecorder> temporary_recorder =
       WrapUnique(new StatisticsRecorder());
   temporary_recorder->ranges_manager_
@@ -430,14 +537,14 @@
 // static
 void StatisticsRecorder::SetRecordChecker(
     std::unique_ptr<RecordHistogramChecker> record_checker) {
-  const absl::MutexLock auto_lock(lock_.Pointer());
+  const SrAutoWriterLock auto_lock(GetLock());
   EnsureGlobalRecorderWhileLocked();
   top_->record_checker_ = std::move(record_checker);
 }
 
 // static
 bool StatisticsRecorder::ShouldRecordHistogram(uint32_t histogram_hash) {
-  const absl::ReaderMutexLock auto_lock(lock_.Pointer());
+  const SrAutoReaderLock auto_lock(GetLock());
 
   // Manipulate |top_| through a const variable to ensure it is not mutated.
   const auto* const_top = top_;
@@ -455,7 +562,7 @@
 
   Histograms out;
 
-  const absl::ReaderMutexLock auto_lock(lock_.Pointer());
+  const SrAutoReaderLock auto_lock(GetLock());
 
   // Manipulate |top_| through a const variable to ensure it is not mutated.
   const auto* const_top = top_;
@@ -465,9 +572,12 @@
 
   out.reserve(const_top->histograms_.size());
   for (const auto& entry : const_top->histograms_) {
+    // Note: HasFlags() does not read to persistent memory, it only reads an
+    // in-memory version of the flags.
     bool is_persistent = entry.second->HasFlags(HistogramBase::kIsPersistent);
-    if (!include_persistent && is_persistent)
+    if (!include_persistent && is_persistent) {
       continue;
+    }
     out.push_back(entry.second);
   }
 
@@ -520,7 +630,7 @@
 }
 
 StatisticsRecorder::StatisticsRecorder() {
-  lock_.Get().AssertHeld();
+  AssertLockHeld();
   previous_ = top_;
   top_ = this;
   InitLogOnShutdownWhileLocked();
@@ -528,7 +638,7 @@
 
 // static
 void StatisticsRecorder::InitLogOnShutdownWhileLocked() {
-  lock_.Get().AssertHeld();
+  AssertLockHeld();
   if (!is_vlog_initialized_ && VLOG_IS_ON(1)) {
     is_vlog_initialized_ = true;
     const auto dump_to_vlog = [](void*) {
diff --git a/base/metrics/statistics_recorder.h b/base/metrics/statistics_recorder.h
index b4b873e..76bc21c 100644
--- a/base/metrics/statistics_recorder.h
+++ b/base/metrics/statistics_recorder.h
@@ -12,18 +12,20 @@
 
 #include <stdint.h>
 
-#include <atomic>
+#include <atomic>  // For std::memory_order_*.
 #include <memory>
+#include <shared_mutex>
 #include <string>
 #include <unordered_map>
-#include <unordered_set>
 #include <vector>
 
+#include "base/atomicops.h"
 #include "base/base_export.h"
 #include "base/functional/callback.h"
 #include "base/gtest_prod_util.h"
 #include "base/lazy_instance.h"
 #include "base/memory/raw_ptr.h"
+#include "base/memory/raw_ref.h"
 #include "base/memory/weak_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/ranges_manager.h"
@@ -33,7 +35,6 @@
 #include "base/synchronization/lock.h"
 #include "base/thread_annotations.h"
 #include "base/types/pass_key.h"
-#include "third_party/abseil-cpp/absl/synchronization/mutex.h"
 
 namespace base {
 
@@ -174,7 +175,7 @@
   //
   // This method is thread safe.
   static Histograms GetHistograms(bool include_persistent = true)
-      LOCKS_EXCLUDED(lock_.Pointer());
+      LOCKS_EXCLUDED(GetLock());
 
   // Gets BucketRanges used by all histograms registered. The order of returned
   // BucketRanges is not guaranteed.
@@ -323,7 +324,135 @@
     return have_active_callbacks_.load(std::memory_order_relaxed);
   }
 
+#ifdef ARCH_CPU_64_BITS
+  static base::TimeDelta GetAndClearTotalWaitTime() {
+    return lock_.Get().GetAndClearTotalWaitTime();
+  }
+#endif  // ARCH_CPU_64_BITS
+
+  // Returns the synthetic trial group name for the R/W lock trial being ran,
+  // or an empty string if no trial is being run and should not be reported.
+  static StringPiece GetLockTrialGroup();
+
  private:
+  // Wrapper lock class that provides A/B testing between a base::Lock and a
+  // std::shared_mutex and tracks lock wait times. Additionally, allows the use
+  // of thread locking annotations, which are not otherwise supported by
+  // std::shared_mutex.
+  //
+  // Note: std::shared_mutex is currently not generally allowed in Chromium but
+  // this specific use has been explicitly discussed and agreed on
+  // [email protected] here:
+  // https://groups.google.com/a/chromium.org/g/cxx/c/bIlGr1URn8I/m/ftvVCQPiAQAJ
+  class BASE_EXPORT LOCKABLE SrLock {
+   public:
+    SrLock() : use_shared_mutex_(ShouldUseSharedMutex()) {}
+    ~SrLock() = default;
+
+    void Acquire() EXCLUSIVE_LOCK_FUNCTION() {
+      TimeTicks start = TimeTicks::Now();
+      if (use_shared_mutex_) {
+        mutex_.lock();
+      } else {
+        lock_.Acquire();
+      }
+      IncrementLockWaitTime(TimeTicks::Now() - start);
+    }
+
+    void Release() UNLOCK_FUNCTION() {
+      if (use_shared_mutex_) {
+        mutex_.unlock();
+      } else {
+        lock_.Release();
+      }
+    }
+
+    void AcquireShared() SHARED_LOCK_FUNCTION() {
+      TimeTicks start = TimeTicks::Now();
+      if (use_shared_mutex_) {
+        mutex_.lock_shared();
+      } else {
+        lock_.Acquire();
+      }
+      IncrementLockWaitTime(TimeTicks::Now() - start);
+    }
+
+    void ReleaseShared() UNLOCK_FUNCTION() {
+      if (use_shared_mutex_) {
+        mutex_.unlock_shared();
+      } else {
+        lock_.Release();
+      }
+    }
+
+    void AssertAcquired() {
+      if (use_shared_mutex_) {
+        // Not available with std::shared_mutex. This can be implemented on top
+        // of that API, similar to what base::Lock does.
+      } else {
+        lock_.AssertAcquired();
+      }
+    }
+
+#ifdef ARCH_CPU_64_BITS
+    TimeDelta GetAndClearTotalWaitTime() {
+      return Microseconds(
+          subtle::NoBarrier_AtomicExchange(&total_lock_wait_time_micros_, 0));
+    }
+#endif  // ARCH_CPU_64_BITS
+
+    bool use_shared_mutex() const { return use_shared_mutex_; }
+
+   private:
+    // Determines if the shared mutex should be used. Should only be called
+    // once when the lock is created.
+    static bool ShouldUseSharedMutex();
+
+    void IncrementLockWaitTime(TimeDelta delta) {
+#ifdef ARCH_CPU_64_BITS
+      subtle::NoBarrier_AtomicIncrement(&total_lock_wait_time_micros_,
+                                        delta.InMicroseconds());
+#endif  // ARCH_CPU_64_BITS
+    }
+
+#ifdef ARCH_CPU_64_BITS
+    // Cumulative wait time on acquiring the lock (both R and W modes) since the
+    // the last call to GetAndClearTotalWaitTime().
+    // Note: Requires 64-bit arch for atomic increments.
+    subtle::Atomic64 total_lock_wait_time_micros_ = 0;
+#endif  // ARCH_CPU_64_BITS
+
+    // If true, |mutex_| will be used in R/W mode; otherwise |lock_| is used.
+    const bool use_shared_mutex_;
+    std::shared_mutex mutex_;
+    Lock lock_;
+  };
+
+  class SCOPED_LOCKABLE SrAutoReaderLock {
+   public:
+    explicit SrAutoReaderLock(SrLock& lock) EXCLUSIVE_LOCK_FUNCTION(lock)
+        : lock_(lock) {
+      lock_->AcquireShared();
+    }
+
+    ~SrAutoReaderLock() UNLOCK_FUNCTION() { lock_->ReleaseShared(); }
+
+   private:
+    raw_ref<SrLock> lock_;
+  };
+
+  using SrAutoWriterLock = internal::BasicAutoLock<SrLock>;
+  static SrLock& GetLock() { return lock_.Get(); }
+  static void AssertLockHeld() { lock_.Get().AssertAcquired(); }
+
+  // Returns the histogram registered with |hash|, if there is one. Returns
+  // nullptr otherwise.
+  // Note: |name| is only used in DCHECK builds to assert that there was no
+  // collision (i.e. different histograms with the same hash).
+  HistogramBase* FindHistogramByHashInternal(uint64_t hash,
+                                             StringPiece name) const
+      EXCLUSIVE_LOCKS_REQUIRED(GetLock());
+
   // Adds an observer to be notified when a new sample is recorded on the
   // histogram referred to by |histogram_name|. Can be called before or after
   // the histogram is created.
@@ -343,14 +472,15 @@
 
   typedef std::vector<WeakPtr<HistogramProvider>> HistogramProviders;
 
-  typedef std::unordered_map<StringPiece, HistogramBase*, StringPieceHash>
-      HistogramMap;
+  // A map of histogram name hash (see HashMetricName()) to histogram object.
+  typedef std::unordered_map<uint64_t, HistogramBase*> HistogramMap;
 
-  // A map of histogram name to registered observers. If the histogram isn't
-  // created yet, the observers will be added after creation.
+  // A map of histogram name hash (see HashMetricName()) to registered observers
+  // If the histogram isn't created yet, the observers will be added after
+  // creation.
   using HistogramSampleObserverList =
       base::ObserverListThreadSafe<ScopedHistogramSampleObserver>;
-  typedef std::unordered_map<std::string,
+  typedef std::unordered_map<uint64_t,
                              scoped_refptr<HistogramSampleObserverList>>
       ObserverMap;
 
@@ -360,7 +490,7 @@
   // Initializes the global recorder if it doesn't already exist. Safe to call
   // multiple times.
   static void EnsureGlobalRecorderWhileLocked()
-      EXCLUSIVE_LOCKS_REQUIRED(lock_.Pointer());
+      EXCLUSIVE_LOCKS_REQUIRED(GetLock());
 
   // Gets histogram providers.
   //
@@ -368,8 +498,7 @@
   static HistogramProviders GetHistogramProviders();
 
   // Imports histograms from global persistent memory.
-  static void ImportGlobalPersistentHistograms()
-      LOCKS_EXCLUDED(lock_.Pointer());
+  static void ImportGlobalPersistentHistograms() LOCKS_EXCLUDED(GetLock());
 
   // Constructs a new StatisticsRecorder and sets it as the current global
   // recorder.
@@ -377,12 +506,12 @@
   // This singleton instance should be started during the single-threaded
   // portion of startup and hence it is not thread safe. It initializes globals
   // to provide support for all future calls.
-  StatisticsRecorder() EXCLUSIVE_LOCKS_REQUIRED(lock_.Pointer());
+  StatisticsRecorder() EXCLUSIVE_LOCKS_REQUIRED(GetLock());
 
   // Initialize implementation but without lock. Caller should guard
   // StatisticsRecorder by itself if needed (it isn't in unit tests).
   static void InitLogOnShutdownWhileLocked()
-      EXCLUSIVE_LOCKS_REQUIRED(lock_.Pointer());
+      EXCLUSIVE_LOCKS_REQUIRED(GetLock());
 
   HistogramMap histograms_;
   ObserverMap observers_;
@@ -394,7 +523,9 @@
   raw_ptr<StatisticsRecorder> previous_ = nullptr;
 
   // Global lock for internal synchronization.
-  static LazyInstance<absl::Mutex>::Leaky lock_;
+  // Note: Care must be taken to not read or write anything to persistent memory
+  // while holding this lock, as that could cause a file I/O stall.
+  static LazyInstance<SrLock>::Leaky lock_;
 
   // Global lock for internal synchronization of histogram snapshots.
   static LazyInstance<base::Lock>::Leaky snapshot_lock_;
@@ -408,7 +539,7 @@
   // Current global recorder. This recorder is used by static methods. When a
   // new global recorder is created by CreateTemporaryForTesting(), then the
   // previous global recorder is referenced by top_->previous_.
-  static StatisticsRecorder* top_ GUARDED_BY(lock_.Pointer());
+  static StatisticsRecorder* top_ GUARDED_BY(GetLock());
 
   // Tracks whether InitLogOnShutdownWhileLocked() has registered a logging
   // function that will be called when the program finishes.
diff --git a/base/metrics/statistics_recorder_unittest.cc b/base/metrics/statistics_recorder_unittest.cc
index 130083a5..d9c16db 100644
--- a/base/metrics/statistics_recorder_unittest.cc
+++ b/base/metrics/statistics_recorder_unittest.cc
@@ -103,15 +103,23 @@
     // Note: We can't clear |top_| in the locked block, because the
     // StatisticsRecorder destructor expects that the lock isn't already held.
     {
-      const absl::MutexLock auto_lock(StatisticsRecorder::lock_.Pointer());
+      const StatisticsRecorder::SrAutoWriterLock auto_lock(
+          StatisticsRecorder::GetLock());
       statistics_recorder_.reset(StatisticsRecorder::top_);
+      if (statistics_recorder_) {
+        // Prevent releasing ranges in test to avoid dangling pointers in
+        // created histogram objects.
+        statistics_recorder_->ranges_manager_
+            .DoNotReleaseRangesOnDestroyForTesting();
+      }
     }
     statistics_recorder_.reset();
     DCHECK(!HasGlobalRecorder());
   }
 
   bool HasGlobalRecorder() {
-    const absl::ReaderMutexLock auto_lock(StatisticsRecorder::lock_.Pointer());
+    const StatisticsRecorder::SrAutoReaderLock auto_lock(
+        StatisticsRecorder::GetLock());
     return StatisticsRecorder::top_ != nullptr;
   }
 
@@ -138,7 +146,6 @@
   const bool use_persistent_histogram_allocator_;
 
   std::unique_ptr<StatisticsRecorder> statistics_recorder_;
-  std::unique_ptr<GlobalHistogramAllocator> old_global_allocator_;
 
  private:
   LogStateSaver log_state_saver_;
@@ -218,12 +225,13 @@
 
   // Create a new global allocator using the same memory as the old one. Any
   // old one is kept around so the memory doesn't get released.
-  old_global_allocator_ = GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator* old_global_allocator =
+      GlobalHistogramAllocator::ReleaseForTesting();
   if (use_persistent_histogram_allocator_) {
     GlobalHistogramAllocator::CreateWithPersistentMemory(
-        const_cast<void*>(old_global_allocator_->data()),
-        old_global_allocator_->length(), 0, old_global_allocator_->Id(),
-        old_global_allocator_->Name());
+        const_cast<void*>(old_global_allocator->data()),
+        old_global_allocator->length(), 0, old_global_allocator->Id(),
+        old_global_allocator->Name());
   }
 
   // Reset statistics-recorder to validate operation from a clean start.
@@ -413,12 +421,13 @@
 
   // Create a new global allocator using the same memory as the old one. Any
   // old one is kept around so the memory doesn't get released.
-  old_global_allocator_ = GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator* old_global_allocator =
+      GlobalHistogramAllocator::ReleaseForTesting();
   if (use_persistent_histogram_allocator_) {
     GlobalHistogramAllocator::CreateWithPersistentMemory(
-        const_cast<void*>(old_global_allocator_->data()),
-        old_global_allocator_->length(), 0, old_global_allocator_->Id(),
-        old_global_allocator_->Name());
+        const_cast<void*>(old_global_allocator->data()),
+        old_global_allocator->length(), 0, old_global_allocator->Id(),
+        old_global_allocator->Name());
   }
 
   // Reset statistics-recorder to validate operation from a clean start.
@@ -774,8 +783,7 @@
 
 class TestHistogramProvider : public StatisticsRecorder::HistogramProvider {
  public:
-  explicit TestHistogramProvider(
-      std::unique_ptr<PersistentHistogramAllocator> allocator)
+  explicit TestHistogramProvider(PersistentHistogramAllocator* allocator)
       : allocator_(std::move(allocator)) {
     StatisticsRecorder::RegisterHistogramProvider(weak_factory_.GetWeakPtr());
   }
@@ -794,7 +802,7 @@
   }
 
  private:
-  std::unique_ptr<PersistentHistogramAllocator> allocator_;
+  const raw_ptr<PersistentHistogramAllocator> allocator_;
   WeakPtrFactory<TestHistogramProvider> weak_factory_{this};
 };
 
@@ -804,7 +812,7 @@
       StatisticsRecorder::CreateTemporaryForTesting();
 
   // Extract any existing global allocator so a new one can be created.
-  std::unique_ptr<GlobalHistogramAllocator> old_allocator =
+  GlobalHistogramAllocator* old_allocator =
       GlobalHistogramAllocator::ReleaseForTesting();
 
   // Create a histogram inside a new allocator for testing.
@@ -813,13 +821,13 @@
   histogram->Add(3);
 
   // Undo back to the starting point.
-  std::unique_ptr<GlobalHistogramAllocator> new_allocator =
+  GlobalHistogramAllocator* new_allocator =
       GlobalHistogramAllocator::ReleaseForTesting();
-  GlobalHistogramAllocator::Set(std::move(old_allocator));
+  GlobalHistogramAllocator::Set(old_allocator);
   temp_sr.reset();
 
   // Create a provider that can supply histograms to the current SR.
-  TestHistogramProvider provider(std::move(new_allocator));
+  TestHistogramProvider provider(new_allocator);
 
   // Verify that the created histogram is no longer known.
   ASSERT_FALSE(StatisticsRecorder::FindHistogram(histogram->histogram_name()));
diff --git a/base/moving_window.h b/base/moving_window.h
new file mode 100644
index 0000000..283cb04
--- /dev/null
+++ b/base/moving_window.h
@@ -0,0 +1,727 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MOVING_WINDOW_H_
+#define BASE_MOVING_WINDOW_H_
+
+#include <math.h>
+#include <stddef.h>
+
+#include <cmath>
+#include <functional>
+#include <limits>
+#include <vector>
+
+#include "base/check_op.h"
+#include "base/memory/raw_ref.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// Class to efficiently calculate statistics in a sliding window.
+// This class isn't thread safe.
+// Supported statistics are Min/Max/Mean/Deviation.
+// You can also iterate through the items in the window.
+// The class is modular: required features must be specified in the template
+// arguments.
+// Non listed features don't consume memory or runtime cycles at all.
+//
+// Usage:
+// base::MovingWindow<int,
+//                    base::MovingWindowFeatures::Min,
+//                    base::MovingWindowFeatures::Max>
+//                    moving_window(window_size);
+//
+// Following convenience shortcuts are provided with predefined sets of
+// features:
+// MovingMax/MovingMin/MovingAverage/MovingAverageDeviation/MovingMinMax.
+//
+// Methods:
+// Constructor:
+//   MovingWindow(size_t window_size);
+//
+// Window update (available for all templates):
+//   AddSample(T value) const;
+//   size_t Count() const;
+//   void Reset();
+//
+// Available for MovingWindowFeatures::Min:
+//    T Min() const;
+//
+// Available for MovingWindowFeatures::Max:
+//    T Max() const;
+//
+// Available for MovingWindowFeatures::Mean:
+//    U Mean<U>() const;
+//
+// Available for MovingWindowFeatures::Deviation:
+//    U Deviation<U>() const;
+//
+// Available for MovingWindowFeatures::Iteration. Iterating through the window:
+//    iterator begin() const;
+//    iterator begin() const;
+//    size_t size() const;
+
+// Features supported by the class.
+struct MovingWindowFeatures {
+  struct Min {
+    static bool has_min;
+  };
+
+  struct Max {
+    static bool has_max;
+  };
+
+  // Need to specify a type capable of holding a sum of all elements in the
+  // window.
+  template <typename SumType>
+  struct Mean {
+    static SumType has_mean;
+  };
+
+  // Need to specify a type capable of holding a sum of squares of all elements
+  // in the window.
+  template <typename SumType>
+  struct Deviation {
+    static SumType has_deviation;
+  };
+
+  struct Iteration {
+    static bool has_iteration;
+  };
+};
+
+// Main template.
+template <typename T, typename... Features>
+class MovingWindow;
+
+// Convenience shortcuts.
+template <typename T>
+using MovingMax = MovingWindow<T, MovingWindowFeatures::Max>;
+
+template <typename T>
+using MovingMin = MovingWindow<T, MovingWindowFeatures::Min>;
+
+template <typename T>
+using MovingMinMax =
+    MovingWindow<T, MovingWindowFeatures::Min, MovingWindowFeatures::Max>;
+
+template <typename T, typename SumType>
+using MovingAverage = MovingWindow<T, MovingWindowFeatures::Mean<SumType>>;
+
+template <typename T>
+using MovingAverageDeviation =
+    MovingWindow<T,
+                 MovingWindowFeatures::Mean<T>,
+                 MovingWindowFeatures::Deviation<double>>;
+
+namespace internal {
+
+// Class responsible only for calculating maximum in the window.
+// It's reused to calculate both min and max via inverting the comparator.
+template <typename T, typename Comparator>
+class MovingExtremumBase {
+ public:
+  explicit MovingExtremumBase(size_t window_size)
+      : window_size_(window_size),
+        values_(window_size),
+        added_at_(window_size),
+        last_idx_(window_size - 1),
+        compare_(Comparator()) {}
+  ~MovingExtremumBase() = default;
+
+  // Add new sample to the stream.
+  void AddSample(const T& value, size_t total_added) {
+    // Remove old elements from the back of the window;
+    while (size_ > 0 && added_at_[begin_idx_] + window_size_ <= total_added) {
+      ++begin_idx_;
+      if (begin_idx_ == window_size_) {
+        begin_idx_ = 0;
+      }
+      --size_;
+    }
+    // Remove small elements from the front of the window because they can never
+    // become the maximum in the window since the currently added element is
+    // bigger than them and will leave the window later.
+    while (size_ > 0 && compare_(values_[last_idx_], value)) {
+      if (last_idx_ == 0) {
+        last_idx_ = window_size_;
+      }
+      --last_idx_;
+      --size_;
+    }
+    DCHECK_LT(size_, window_size_);
+    ++last_idx_;
+    if (last_idx_ == window_size_) {
+      last_idx_ = 0;
+    }
+    values_[last_idx_] = value;
+    added_at_[last_idx_] = total_added;
+    ++size_;
+  }
+
+  // Get the maximum of the last `window_size` elements.
+  T Value() const {
+    DCHECK_GT(size_, 0u);
+    return values_[begin_idx_];
+  }
+
+  // Clear all samples.
+  void Reset() {
+    size_ = 0;
+    begin_idx_ = 0;
+    last_idx_ = window_size_ - 1;
+  }
+
+ private:
+  const size_t window_size_;
+  // Circular buffer with some values in the window.
+  // Only possible candidates for maximum are stored:
+  // values form a non-increasing sequence.
+  std::vector<T> values_;
+  // Circular buffer storing when numbers in `values_` were added.
+  std::vector<size_t> added_at_;
+  // Begin of the circular buffers above.
+  size_t begin_idx_ = 0;
+  // Last occupied position.
+  size_t last_idx_;
+  // How many elements are stored in the circular buffers above.
+  size_t size_ = 0;
+  // Template parameter comparator.
+  const Comparator compare_;
+};
+
+// Null implementation of the above class to be used when feature is disabled.
+template <typename T>
+struct NullExtremumImpl {
+  explicit NullExtremumImpl(size_t) {}
+  ~NullExtremumImpl() = default;
+  void AddSample(const T&, size_t) {}
+  void Reset() {}
+};
+
+// Class to hold the moving window.
+// It's used to calculate replaced element for Mean/Deviation calculations.
+template <typename T>
+class MovingWindowBase {
+ public:
+  explicit MovingWindowBase(size_t window_size) : values_(window_size) {}
+
+  ~MovingWindowBase() = default;
+
+  void AddSample(const T& sample) {
+    values_[cur_idx_] = sample;
+    ++cur_idx_;
+    if (cur_idx_ == values_.size()) {
+      cur_idx_ = 0;
+    }
+  }
+
+  // Is the window filled integer amount of times.
+  bool IsLastIdx() const { return cur_idx_ + 1 == values_.size(); }
+
+  void Reset() {
+    cur_idx_ = 0;
+    std::fill(values_.begin(), values_.end(), T());
+  }
+
+  T GetValue() const { return values_[cur_idx_]; }
+
+  T operator[](size_t idx) const { return values_[idx]; }
+
+  size_t Size() const { return values_.size(); }
+
+  // What index will be overwritten by a new element;
+  size_t CurIdx() const { return cur_idx_; }
+
+ private:
+  // Circular buffer.
+  std::vector<T> values_;
+  // Where the buffer begins.
+  size_t cur_idx_ = 0;
+};
+
+// Null implementation of the above class to be used when feature is disabled.
+template <typename T>
+struct NullWindowImpl {
+  explicit NullWindowImpl(size_t) {}
+  ~NullWindowImpl() = default;
+  void AddSample(const T& sample) {}
+  bool IsLastIdx() const { return false; }
+  void Reset() {}
+  T GetValue() const { return T(); }
+};
+
+// Performs division allowing the class to work with more types.
+// General template.
+template <typename SumType, typename ReturnType>
+struct DivideInternal {
+  static ReturnType Compute(const SumType& sum, const size_t count) {
+    return static_cast<ReturnType>(sum) / static_cast<ReturnType>(count);
+  }
+};
+
+// Class to calculate moving mean.
+template <typename T, typename SumType, bool IsFloating>
+class MovingMeanBase {
+ public:
+  explicit MovingMeanBase(size_t window_size) : sum_() {}
+
+  ~MovingMeanBase() = default;
+
+  void AddSample(const T& sample, const T& replaced_value, bool is_last_idx) {
+    sum_ += sample - replaced_value;
+  }
+
+  template <typename ReturnType = SumType>
+  ReturnType Mean(const size_t count) const {
+    if (count == 0) {
+      return ReturnType();
+    }
+    return DivideInternal<SumType, ReturnType>::Compute(sum_, count);
+  }
+  void Reset() { sum_ = SumType(); }
+
+  SumType Sum() const { return sum_; }
+
+ private:
+  SumType sum_;
+};
+
+// Class to calculate moving mean.
+// Variant for float types with running sum to avoid rounding errors
+// accumulation.
+template <typename T, typename SumType>
+class MovingMeanBase<T, SumType, true> {
+ public:
+  explicit MovingMeanBase(size_t window_size) : sum_(), running_sum_() {}
+
+  ~MovingMeanBase() = default;
+
+  void AddSample(const T& sample, const T& replaced_value, bool is_last_idx) {
+    running_sum_ += sample;
+    if (is_last_idx) {
+      // Replace sum with running sum to avoid rounding errors accumulation.
+      sum_ = running_sum_;
+      running_sum_ = SumType();
+    } else {
+      sum_ += sample - replaced_value;
+    }
+  }
+
+  template <typename ReturnType = SumType>
+  ReturnType Mean(const size_t count) const {
+    if (count == 0) {
+      return ReturnType();
+    }
+    return DivideInternal<SumType, ReturnType>::Compute(sum_, count);
+  }
+
+  void Reset() { sum_ = running_sum_ = SumType(); }
+
+  SumType Sum() const { return sum_; }
+
+ private:
+  SumType sum_;
+  SumType running_sum_;
+};
+
+// Null implementation of the above class to be used when feature is disabled.
+template <typename T>
+struct NullMeanImpl {
+  explicit NullMeanImpl(size_t window_size) {}
+  ~NullMeanImpl() = default;
+
+  void AddSample(const T& sample, const T&, bool) {}
+
+  void Reset() {}
+};
+
+// Computs main Deviation fromula, allowing the class to work with more types.
+// Deviation is equal to mean of squared values minus squared mean value.
+// General template.
+template <typename SumType, typename ReturnType>
+struct DeivationInternal {
+  static ReturnType Compute(const SumType& sum_squares,
+                            const SumType& square_of_sum,
+                            const size_t count) {
+    return static_cast<ReturnType>(
+        std::sqrt((static_cast<double>(sum_squares) -
+                   static_cast<double>(square_of_sum) / count) /
+                  count));
+  }
+};
+
+// Class to compute square of the number.
+// General template
+template <typename T, typename SquareType>
+struct SquareInternal {
+  static SquareType Compute(const T& sample) {
+    return static_cast<SquareType>(sample) * sample;
+  }
+};
+
+// Class to calculate moving deviation.
+template <typename T, typename SumType, bool IsFloating>
+class MovingDeviationBase {
+ public:
+  explicit MovingDeviationBase(size_t window_size) : sum_sq_() {}
+  ~MovingDeviationBase() = default;
+  void AddSample(const T& sample, const T& replaced_value, bool is_last_idx) {
+    sum_sq_ += SquareInternal<T, SumType>::Compute(sample) -
+               SquareInternal<T, SumType>::Compute(replaced_value);
+  }
+
+  template <typename ReturnType, typename U>
+  ReturnType Deviation(const size_t count, const U& sum) const {
+    if (count == 0) {
+      return ReturnType();
+    }
+    return DeivationInternal<SumType, ReturnType>::Compute(
+        sum_sq_, SquareInternal<U, SumType>::Compute(sum), count);
+  }
+  void Reset() { sum_sq_ = SumType(); }
+
+ private:
+  SumType sum_sq_;
+};
+
+// Class to calculate moving deviation.
+// Variant for float types with running sum to avoid rounding errors
+// accumulation.
+template <typename T, typename SumType>
+class MovingDeviationBase<T, SumType, true> {
+ public:
+  explicit MovingDeviationBase(size_t window_size)
+      : sum_sq_(), running_sum_() {}
+  ~MovingDeviationBase() = default;
+  void AddSample(const T& sample, const T& replaced_value, bool is_last_idx) {
+    SumType square = SquareInternal<T, SumType>::Compute(sample);
+    running_sum_ += square;
+    if (is_last_idx) {
+      // Replace sum with running sum to avoid rounding errors accumulation.
+      sum_sq_ = running_sum_;
+      running_sum_ = SumType();
+    } else {
+      sum_sq_ += square - SquareInternal<T, SumType>::Compute(replaced_value);
+    }
+  }
+
+  template <typename ReturnType, typename U>
+  ReturnType Deviation(const size_t count, const U& sum) const {
+    if (count == 0) {
+      return ReturnType();
+    }
+    return DeivationInternal<SumType, ReturnType>::Compute(
+        sum_sq_, SquareInternal<U, SumType>::Compute(sum), count);
+  }
+  void Reset() { running_sum_ = sum_sq_ = SumType(); }
+
+ private:
+  SumType sum_sq_;
+  SumType running_sum_;
+};
+
+// Null implementation of the above class to be used when feature is disabled.
+template <typename T>
+struct NullDeviationImpl {
+ public:
+  explicit NullDeviationImpl(size_t window_size) {}
+  ~NullDeviationImpl() = default;
+  void AddSample(const T&, const T&, bool) {}
+  void Reset() {}
+};
+
+// Template helpers.
+
+// Gets all enabled features in one struct.
+template <typename... Features>
+struct EnabledFeatures : public Features... {};
+
+// Checks if specific member is present.
+template <typename T, typename = void>
+struct has_member_min : std::false_type {};
+template <typename T>
+struct has_member_min<T, decltype((void)T::has_min, void())> : std::true_type {
+};
+
+template <typename T, typename = void>
+struct has_member_max : std::false_type {};
+template <typename T>
+struct has_member_max<T, decltype((void)T::has_max, void())> : std::true_type {
+};
+
+template <typename T, typename = void>
+struct has_member_mean : std::false_type {};
+template <typename T>
+struct has_member_mean<T, decltype((void)T::has_mean, void())>
+    : std::true_type {};
+
+template <typename T, typename = void>
+struct has_memeber_deviation : std::false_type {};
+template <typename T>
+struct has_memeber_deviation<T, decltype((void)T::has_deviation, void())>
+    : std::true_type {};
+
+template <typename T, typename = void>
+struct has_member_iteration : std::false_type {};
+template <typename T>
+struct has_member_iteration<T, decltype((void)T::has_iteration, void())>
+    : std::true_type {};
+
+// Gets the type of the member if present.
+// Can't just use decltype, because the member might be absent.
+template <typename T, typename = void>
+struct get_type_mean {
+  typedef void type;
+};
+template <typename T>
+struct get_type_mean<T, decltype((void)T::has_mean, void())> {
+  typedef decltype(T::has_mean) type;
+};
+
+template <typename T, typename = void>
+struct get_type_deviation {
+  typedef void type;
+};
+template <typename T>
+struct get_type_deviation<T, decltype((void)T::has_deviation, void())> {
+  typedef decltype(T::has_deviation) type;
+};
+
+// Performs division allowing the class to work with more types.
+// Specific template for TimeDelta.
+template <>
+struct DivideInternal<TimeDelta, TimeDelta> {
+  static TimeDelta Compute(const TimeDelta& sum, const size_t count) {
+    return sum / count;
+  }
+};
+
+// Computs main Deviation fromula, allowing the class to work with more types.
+// Deviation is equal to mean of squared values minus squared mean value.
+// Specific template for TimeDelta.
+template <>
+struct DeivationInternal<double, TimeDelta> {
+  static TimeDelta Compute(const double sum_squares,
+                           const double square_of_sum,
+                           const size_t count) {
+    return Seconds(std::sqrt((sum_squares - square_of_sum / count) / count));
+  }
+};
+
+// Class to compute square of the number.
+// Specific template for TimeDelta.
+template <>
+struct SquareInternal<TimeDelta, double> {
+  static double Compute(const TimeDelta& sample) {
+    return sample.InSecondsF() * sample.InSecondsF();
+  }
+};
+
+}  // namespace internal
+
+// Implementation of the main class.
+template <typename T, typename... Features>
+class MovingWindow {
+ public:
+  // List of all requested features.
+  using EnabledFeatures = internal::EnabledFeatures<Features...>;
+
+  explicit MovingWindow(size_t window_size)
+      : min_impl_(window_size),
+        max_impl_(window_size),
+        mean_impl_(window_size),
+        deviation_impl_(window_size),
+        window_impl_(window_size) {}
+
+  // Adds sample to the window.
+  void AddSample(const T& sample) {
+    ++total_added_;
+    min_impl_.AddSample(sample, total_added_);
+    max_impl_.AddSample(sample, total_added_);
+    mean_impl_.AddSample(sample, window_impl_.GetValue(),
+                         window_impl_.IsLastIdx());
+    deviation_impl_.AddSample(sample, window_impl_.GetValue(),
+                              window_impl_.IsLastIdx());
+    window_impl_.AddSample(sample);
+  }
+
+  // Returns amount of elementes so far in the stream (might be bigger than the
+  // window size).
+  size_t Count() const { return total_added_; }
+
+  // Calculates min in the window. Template to disable when feature isn't
+  // requested.
+  template <typename U = EnabledFeatures,
+            std::enable_if_t<internal::has_member_min<U>::value, int> = 0>
+  T Min() const {
+    return min_impl_.Value();
+  }
+
+  // Calculates max in the window. Template to disable when feature isn't
+  // requested.
+  template <typename U = EnabledFeatures,
+            std::enable_if_t<internal::has_member_max<U>::value, int> = 0>
+  T Max() const {
+    return max_impl_.Value();
+  }
+
+  // Calculates mean in the window. Template to disable when feature isn't
+  // requested.
+  template <typename ReturnType = T,
+            typename U = EnabledFeatures,
+            std::enable_if_t<internal::has_member_mean<U>::value, int> = 0>
+  ReturnType Mean() const {
+    return mean_impl_.template Mean<ReturnType>(
+        std::min(total_added_, window_impl_.Size()));
+  }
+
+  // Calculates deviation in the window. Template to disable when feature isn't
+  // requested.
+  template <
+      typename ReturnType = T,
+      typename U = EnabledFeatures,
+      std::enable_if_t<internal::has_memeber_deviation<U>::value, int> = 0>
+  ReturnType Deviation() const {
+    const size_t count = std::min(total_added_, window_impl_.Size());
+    return deviation_impl_.template Deviation<ReturnType>(count,
+                                                          mean_impl_.Sum());
+  }
+
+  // Resets the state to an empty window.
+  void Reset() {
+    min_impl_.Reset();
+    max_impl_.Reset();
+    mean_impl_.Reset();
+    deviation_impl_.Reset();
+    window_impl_.Reset();
+    total_added_ = 0;
+  }
+
+  // iterator implementation.
+  class iterator {
+   public:
+    ~iterator() = default;
+
+    const T operator*() {
+      DCHECK_LT(idx_, window_impl_->Size());
+      return (*window_impl_)[idx_];
+    }
+
+    iterator& operator++() {
+      ++idx_;
+      // Wrap around the circular buffer.
+      if (idx_ == window_impl_->Size()) {
+        idx_ = 0;
+      }
+      // The only way to arrive to the current element is to
+      // come around after iterating through the whole window.
+      if (idx_ == window_impl_->CurIdx()) {
+        idx_ = kInvalidIndex;
+      }
+      return *this;
+    }
+
+    bool operator==(const iterator& other) const { return idx_ == other.idx_; }
+
+   private:
+    iterator(const internal::MovingWindowBase<T>& window, size_t idx)
+        : window_impl_(window), idx_(idx) {}
+
+    static const size_t kInvalidIndex = std::numeric_limits<size_t>::max();
+
+    raw_ref<const internal::MovingWindowBase<T>> window_impl_;
+    size_t idx_;
+
+    friend class MovingWindow<T, Features...>;
+  };
+
+  // Begin iterator. Template to enable only if iteration feature is requested.
+  template <typename U = EnabledFeatures,
+            std::enable_if_t<internal::has_member_iteration<U>::value, int> = 0>
+  iterator begin() const {
+    if (total_added_ == 0) {
+      return end();
+    }
+    // Before window is fully filled, the oldest element is at the index 0.
+    size_t idx =
+        (total_added_ < window_impl_.Size()) ? 0 : window_impl_.CurIdx();
+
+    return iterator(window_impl_, idx);
+  }
+
+  // End iterator. Template to enable only if iteration feature is requested.
+  template <typename U = EnabledFeatures,
+            std::enable_if_t<internal::has_member_iteration<U>::value, int> = 0>
+  iterator end() const {
+    return iterator(window_impl_, iterator::kInvalidIndex);
+  }
+
+  // Size of the collection. Template to enable only if iteration feature is
+  // requested.
+  template <typename U = EnabledFeatures,
+            std::enable_if_t<internal::has_member_iteration<U>::value, int> = 0>
+  size_t size() const {
+    return std::min(total_added_, window_impl_.Size());
+  }
+
+ private:
+  // Member for calculating min.
+  // Conditionally enabled on Min feature.
+  typename std::conditional<internal::has_member_min<EnabledFeatures>::value,
+                            internal::MovingExtremumBase<T, std::greater<T>>,
+                            internal::NullExtremumImpl<T>>::type min_impl_;
+
+  // Member for calculating min.
+  // Conditionally enabled on Min feature.
+  typename std::conditional<internal::has_member_max<EnabledFeatures>::value,
+                            internal::MovingExtremumBase<T, std::less<T>>,
+                            internal::NullExtremumImpl<T>>::type max_impl_;
+
+  // Type for sum value in Mean implementation. Might need to reuse deviation
+  // sum type, because enabling only deviation feature will also enable mean
+  // member (because deviation calculation depends on mean calculation).
+  using MeanSumType = typename std::conditional<
+      internal::has_member_mean<EnabledFeatures>::value,
+      typename internal::get_type_mean<EnabledFeatures>::type,
+      typename internal::get_type_deviation<EnabledFeatures>::type>::type;
+  // Member for calculating mean.
+  // Conditionally enabled on Mean or Deviation feature (because deviation
+  // calculation depends on mean calculation).
+  typename std::conditional<
+      internal::has_member_mean<EnabledFeatures>::value ||
+          internal::has_memeber_deviation<EnabledFeatures>::value,
+      internal::
+          MovingMeanBase<T, MeanSumType, std::is_floating_point_v<MeanSumType>>,
+      internal::NullMeanImpl<T>>::type mean_impl_;
+
+  // Member for calculating deviation.
+  // Conditionally enabled on Deviation feature.
+  typename std::conditional<
+      internal::has_memeber_deviation<EnabledFeatures>::value,
+      internal::MovingDeviationBase<
+          T,
+          typename internal::get_type_deviation<EnabledFeatures>::type,
+          std::is_floating_point_v<
+              typename internal::get_type_deviation<EnabledFeatures>::type>>,
+      internal::NullDeviationImpl<T>>::type deviation_impl_;
+
+  // Member for storing the moving window.
+  // Conditionally enabled on Mean, Deviation or Iteration feature since
+  // they need the elements in the window.
+  // Min and Max features store elements internally so they don't need this.
+  typename std::conditional<
+      internal::has_member_mean<EnabledFeatures>::value ||
+          internal::has_memeber_deviation<EnabledFeatures>::value ||
+          internal::has_member_iteration<EnabledFeatures>::value,
+      internal::MovingWindowBase<T>,
+      internal::NullWindowImpl<T>>::type window_impl_;
+  // Total number of added elements.
+  size_t total_added_ = 0;
+};
+
+}  // namespace base
+
+#endif  // BASE_MOVING_WINDOW_H_
diff --git a/base/moving_window_unittest.cc b/base/moving_window_unittest.cc
new file mode 100644
index 0000000..a306d5a
--- /dev/null
+++ b/base/moving_window_unittest.cc
@@ -0,0 +1,204 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/moving_window.h"
+
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+constexpr int kTestValues[] = {
+    33, 1, 2, 7, 5, 2, 4, 45, 1000, 1, 100, 2, 200, 2,  2, 2, 300, 4, 1,
+    2,  3, 4, 5, 6, 7, 8, 9,  10,   9, 8,   7, 6,   5,  4, 3, 2,   1, 1,
+    2,  1, 4, 2, 1, 8, 1, 2,  1,    4, 1,   2, 1,   16, 1, 2, 1};
+
+}  // namespace
+
+class MovingMaxTest : public testing::TestWithParam<unsigned int> {};
+
+INSTANTIATE_TEST_SUITE_P(All,
+                         MovingMaxTest,
+                         testing::ValuesIn({1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u,
+                                            10u, 17u, 20u, 100u}));
+
+TEST_P(MovingMaxTest, BlanketTest) {
+  const size_t window_size = GetParam();
+  MovingMax<int> window(window_size);
+  for (size_t i = 0; i < std::size(kTestValues); ++i) {
+    window.AddSample(kTestValues[i]);
+    int slow_max = kTestValues[i];
+    for (size_t j = 1; j < window_size && j <= i; ++j) {
+      slow_max = std::max(slow_max, kTestValues[i - j]);
+    }
+    EXPECT_EQ(window.Max(), slow_max);
+  }
+}
+
+TEST(MovingMax, SingleElementWindow) {
+  MovingMax<int> window(1u);
+  window.AddSample(100);
+  EXPECT_EQ(window.Max(), 100);
+  window.AddSample(1000);
+  EXPECT_EQ(window.Max(), 1000);
+  window.AddSample(1);
+  EXPECT_EQ(window.Max(), 1);
+  window.AddSample(3);
+  EXPECT_EQ(window.Max(), 3);
+  window.AddSample(4);
+  EXPECT_EQ(window.Max(), 4);
+}
+
+TEST(MovingMax, VeryLargeWindow) {
+  MovingMax<int> window(100u);
+  window.AddSample(100);
+  EXPECT_EQ(window.Max(), 100);
+  window.AddSample(1000);
+  EXPECT_EQ(window.Max(), 1000);
+  window.AddSample(1);
+  EXPECT_EQ(window.Max(), 1000);
+  window.AddSample(3);
+  EXPECT_EQ(window.Max(), 1000);
+  window.AddSample(4);
+  EXPECT_EQ(window.Max(), 1000);
+}
+
+TEST(MovingMax, Counts) {
+  MovingMax<int> window(3u);
+  EXPECT_EQ(window.Count(), 0u);
+  window.AddSample(100);
+  EXPECT_EQ(window.Count(), 1u);
+  window.AddSample(1000);
+  EXPECT_EQ(window.Count(), 2u);
+  window.AddSample(1);
+  EXPECT_EQ(window.Count(), 3u);
+  window.AddSample(3);
+  EXPECT_EQ(window.Count(), 4u);
+  window.AddSample(4);
+  EXPECT_EQ(window.Count(), 5u);
+}
+
+TEST(MovingAverage, Unrounded) {
+  MovingAverage<int, int64_t> window(4u);
+  window.AddSample(1);
+  EXPECT_EQ(window.Mean<double>(), 1.0);
+  window.AddSample(2);
+  EXPECT_EQ(window.Mean<double>(), 1.5);
+  window.AddSample(3);
+  EXPECT_EQ(window.Mean<double>(), 2.0);
+  window.AddSample(4);
+  EXPECT_EQ(window.Mean<double>(), 2.5);
+  window.AddSample(101);
+  EXPECT_EQ(window.Mean<double>(), 27.5);
+}
+
+class MovingMinTest : public testing::TestWithParam<unsigned int> {};
+
+INSTANTIATE_TEST_SUITE_P(All,
+                         MovingMinTest,
+                         testing::ValuesIn({1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u,
+                                            10u, 17u, 20u, 100u}));
+
+TEST_P(MovingMinTest, BlanketTest) {
+  const size_t window_size = GetParam();
+  MovingMin<int> window(window_size);
+  for (int repeats = 0; repeats < 2; ++repeats) {
+    for (size_t i = 0; i < std::size(kTestValues); ++i) {
+      window.AddSample(kTestValues[i]);
+      int slow_min = kTestValues[i];
+      for (size_t j = 1; j < window_size && j <= i; ++j) {
+        slow_min = std::min(slow_min, kTestValues[i - j]);
+      }
+      EXPECT_EQ(window.Min(), slow_min);
+    }
+    window.Reset();
+  }
+}
+
+class MovingAverageTest : public testing::TestWithParam<unsigned int> {};
+
+INSTANTIATE_TEST_SUITE_P(All,
+                         MovingAverageTest,
+                         testing::ValuesIn({1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u,
+                                            10u, 17u, 20u, 100u}));
+
+TEST_P(MovingAverageTest, BlanketTest) {
+  const size_t window_size = GetParam();
+  MovingAverage<int, int64_t> window(window_size);
+  for (int repeats = 0; repeats < 2; ++repeats) {
+    for (size_t i = 0; i < std::size(kTestValues); ++i) {
+      window.AddSample(kTestValues[i]);
+      int slow_mean = 0;
+      for (size_t j = 0; j < window_size && j <= i; ++j) {
+        slow_mean += kTestValues[i - j];
+      }
+      slow_mean /= std::min(window_size, i + 1);
+      EXPECT_EQ(window.Mean(), slow_mean);
+    }
+    window.Reset();
+  }
+}
+
+class MovingDeviationTest : public testing::TestWithParam<unsigned int> {};
+
+INSTANTIATE_TEST_SUITE_P(All,
+                         MovingDeviationTest,
+                         testing::ValuesIn({1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u,
+                                            10u, 17u, 20u, 100u}));
+
+TEST_P(MovingDeviationTest, BlanketTest) {
+  const size_t window_size = GetParam();
+  MovingAverageDeviation<double> window(window_size);
+  for (int repeats = 0; repeats < 2; ++repeats) {
+    for (size_t i = 0; i < std::size(kTestValues); ++i) {
+      window.AddSample(kTestValues[i]);
+      double slow_deviation = 0;
+      double mean = window.Mean();
+      for (size_t j = 0; j < window_size && j <= i; ++j) {
+        slow_deviation +=
+            (kTestValues[i - j] - mean) * (kTestValues[i - j] - mean);
+      }
+      slow_deviation /= std::min(window_size, i + 1);
+      slow_deviation = sqrt(slow_deviation);
+      double fast_deviation = window.Deviation();
+      EXPECT_TRUE(std::abs(fast_deviation - slow_deviation) < 1e-9);
+    }
+    window.Reset();
+  }
+}
+
+TEST(MovingWindowTest, Iteration) {
+  const size_t kWindowSize = 10;
+  MovingWindow<int, base::MovingWindowFeatures::Iteration> window(kWindowSize);
+  for (int repeats = 0; repeats < 2; ++repeats) {
+    for (size_t i = 0; i < std::size(kTestValues); ++i) {
+      window.AddSample(kTestValues[i]);
+      size_t j = 0;
+      const size_t in_window = std::min(i + 1, kWindowSize);
+      for (int value : window) {
+        ASSERT_LT(j, in_window);
+        EXPECT_EQ(value, kTestValues[i + j + 1 - in_window]);
+        ++j;
+      }
+      EXPECT_EQ(j, in_window);
+    }
+    window.Reset();
+  }
+}
+
+TEST(MovingMeanDeviation, WorksWithTimeDelta) {
+  MovingAverageDeviation<base::TimeDelta> window(2);
+  window.AddSample(base::Milliseconds(400));
+  window.AddSample(base::Milliseconds(200));
+  EXPECT_EQ(window.Mean(), base::Milliseconds(300));
+  EXPECT_EQ(window.Deviation(), base::Milliseconds(100));
+  window.AddSample(base::Seconds(40));
+  window.AddSample(base::Seconds(20));
+  EXPECT_EQ(window.Mean(), base::Seconds(30));
+  EXPECT_EQ(window.Deviation(), base::Seconds(10));
+}
+
+}  // namespace base
diff --git a/base/native_library_mac.mm b/base/native_library_mac.mm
index 452dcaf..55f8bdf 100644
--- a/base/native_library_mac.mm
+++ b/base/native_library_mac.mm
@@ -7,10 +7,10 @@
 #include <dlfcn.h>
 #include <mach-o/getsect.h>
 
+#include "base/apple/scoped_cftyperef.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
-#include "base/mac/scoped_cftyperef.h"
 #include "base/strings/strcat.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/string_util.h"
@@ -64,7 +64,7 @@
     native_lib->objc_status = OBJC_UNKNOWN;
     return native_lib;
   }
-  ScopedCFTypeRef<CFURLRef> url(CFURLCreateFromFileSystemRepresentation(
+  apple::ScopedCFTypeRef<CFURLRef> url(CFURLCreateFromFileSystemRepresentation(
       kCFAllocatorDefault, (const UInt8*)library_path.value().c_str(),
       checked_cast<CFIndex>(library_path.value().length()), true));
   if (!url)
@@ -103,10 +103,10 @@
 
   // Get the function pointer using the right API for the type.
   if (library->type == BUNDLE) {
-    ScopedCFTypeRef<CFStringRef> symbol_name(CFStringCreateWithCString(
+    apple::ScopedCFTypeRef<CFStringRef> symbol_name(CFStringCreateWithCString(
         kCFAllocatorDefault, name, kCFStringEncodingUTF8));
     function_pointer =
-        CFBundleGetFunctionPointerForName(library->bundle, symbol_name);
+        CFBundleGetFunctionPointerForName(library->bundle, symbol_name.get());
   } else {
     function_pointer = dlsym(library->dylib, name);
   }
diff --git a/base/nix/mime_util_xdg_fuzzer.cc b/base/nix/mime_util_xdg_fuzzer.cc
new file mode 100644
index 0000000..3efd6a0
--- /dev/null
+++ b/base/nix/mime_util_xdg_fuzzer.cc
@@ -0,0 +1,38 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+#include <string>
+
+#include "base/containers/span.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/logging.h"
+#include "base/nix/mime_util_xdg.h"
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  base::ScopedTempDir temp_dir;
+  if (!temp_dir.CreateUniqueTempDir()) {
+    // Not a fuzzer error, so we return 0.
+    LOG(ERROR) << "Failed to create temp dir";
+    return 0;
+  }
+
+  // The parser reads file $XDG_DATA_DIRS/mime/mime.cache.
+  setenv("XDG_DATA_DIRS", temp_dir.GetPath().value().c_str(), 1);
+  base::FilePath mime_dir = temp_dir.GetPath().Append("mime");
+  base::FilePath mime_cache = mime_dir.Append("mime.cache");
+  if (!base::CreateDirectory(mime_dir) ||
+      !base::WriteFile(mime_cache, base::make_span(data, size))) {
+    LOG(ERROR) << "Failed to create " << mime_cache;
+    // Not a fuzzer error, so we return 0.
+    return 0;
+  }
+
+  base::FilePath dummy_path("foo.txt");
+  std::string type = base::nix::GetFileMimeType(dummy_path);
+  return 0;
+}
diff --git a/base/no_destructor_nocompile.nc b/base/no_destructor_nocompile.nc
new file mode 100644
index 0000000..90bf1da
--- /dev/null
+++ b/base/no_destructor_nocompile.nc
@@ -0,0 +1,18 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/no_destructor.h"
+
+namespace base {
+
+void WontCompile() {
+  // NoDestructor should only be used for non-trivially destructible types;
+  // trivial types can simply be directly declared as globals.
+  static NoDestructor<bool> x;  // expected-error@*:* {{static assertion failed due to requirement '!std::is_trivially_destructible_v<bool>'}}
+}
+
+}  // namespace base
diff --git a/base/no_destructor_unittest.nc b/base/no_destructor_unittest.nc
deleted file mode 100644
index f3ab468..0000000
--- a/base/no_destructor_unittest.nc
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a "No Compile Test" suite.
-// http://dev.chromium.org/developers/testing/no-compile-tests
-
-#include "base/no_destructor.h"
-
-#include <string>
-
-namespace base {
-
-#if defined(NCTEST_NODESTRUCTOR_REQUIRES_NONTRIVIAL_DESTRUCTOR) // [r"static assertion failed due to requirement '!std::is_trivially_destructible_v<bool>'"]
-
-// Attempt to make a NoDestructor for a type with a trivial destructor.
-void WontCompile() {
-  NoDestructor<bool> nd;
-}
-
-#endif
-
-}  // namespace base
diff --git a/base/numerics/BUILD.gn b/base/numerics/BUILD.gn
index 35d4b94..e6f2215 100644
--- a/base/numerics/BUILD.gn
+++ b/base/numerics/BUILD.gn
@@ -25,5 +25,6 @@
     "ranges.h",
     "safe_conversions.h",
     "safe_math.h",
+    "wrapping_math.h",
   ]
 }
diff --git a/base/numerics/checked_math.h b/base/numerics/checked_math.h
index c84a4f9..c25bef0 100644
--- a/base/numerics/checked_math.h
+++ b/base/numerics/checked_math.h
@@ -17,7 +17,7 @@
 
 template <typename T>
 class CheckedNumeric {
-  static_assert(std::is_arithmetic<T>::value,
+  static_assert(std::is_arithmetic_v<T>,
                 "CheckedNumeric<T>: T must be a numeric type.");
 
  public:
@@ -42,7 +42,7 @@
   // This is not an explicit constructor because we implicitly upgrade regular
   // numerics to CheckedNumerics to make them easier to use.
   template <typename Src,
-            typename = std::enable_if_t<std::is_arithmetic<Src>::value>>
+            typename = std::enable_if_t<std::is_arithmetic_v<Src>>>
   // NOLINTNEXTLINE(google-explicit-constructor)
   constexpr CheckedNumeric(Src value) : state_(value) {}
 
@@ -144,14 +144,14 @@
 
   constexpr CheckedNumeric operator-() const {
     // Use an optimized code path for a known run-time variable.
-    if (!IsConstantEvaluated() && std::is_signed<T>::value &&
-        std::is_floating_point<T>::value) {
+    if (!IsConstantEvaluated() && std::is_signed_v<T> &&
+        std::is_floating_point_v<T>) {
       return FastRuntimeNegate();
     }
     // The negation of two's complement int min is int min.
     const bool is_valid =
         IsValid() &&
-        (!std::is_signed<T>::value || std::is_floating_point<T>::value ||
+        (!std::is_signed_v<T> || std::is_floating_point_v<T> ||
          NegateWrapper(state_.value()) != std::numeric_limits<T>::lowest());
     return CheckedNumeric<T>(NegateWrapper(state_.value()), is_valid);
   }
diff --git a/base/numerics/checked_math_impl.h b/base/numerics/checked_math_impl.h
index fa45480..e8dbd3e 100644
--- a/base/numerics/checked_math_impl.h
+++ b/base/numerics/checked_math_impl.h
@@ -22,7 +22,7 @@
 
 template <typename T>
 constexpr bool CheckedAddImpl(T x, T y, T* result) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
+  static_assert(std::is_integral_v<T>, "Type must be integral");
   // Since the value of x+y is undefined if we have a signed type, we compute
   // it using the unsigned type of the same size.
   using UnsignedDst = typename std::make_unsigned<T>::type;
@@ -32,10 +32,11 @@
   const UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
   // Addition is valid if the sign of (x + y) is equal to either that of x or
   // that of y.
-  if (std::is_signed<T>::value
+  if (std::is_signed_v<T>
           ? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) < 0
-          : uresult < uy)  // Unsigned is either valid or underflow.
+          : uresult < uy) {  // Unsigned is either valid or underflow.
     return false;
+  }
   *result = static_cast<T>(uresult);
   return true;
 }
@@ -44,10 +45,10 @@
 struct CheckedAddOp {};
 
 template <typename T, typename U>
-struct CheckedAddOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct CheckedAddOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V>
   static constexpr bool Do(T x, U y, V* result) {
@@ -85,7 +86,7 @@
 
 template <typename T>
 constexpr bool CheckedSubImpl(T x, T y, T* result) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
+  static_assert(std::is_integral_v<T>, "Type must be integral");
   // Since the value of x+y is undefined if we have a signed type, we compute
   // it using the unsigned type of the same size.
   using UnsignedDst = typename std::make_unsigned<T>::type;
@@ -95,10 +96,11 @@
   const UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
   // Subtraction is valid if either x and y have same sign, or (x-y) and x have
   // the same sign.
-  if (std::is_signed<T>::value
+  if (std::is_signed_v<T>
           ? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) < 0
-          : x < y)
+          : x < y) {
     return false;
+  }
   *result = static_cast<T>(uresult);
   return true;
 }
@@ -107,10 +109,10 @@
 struct CheckedSubOp {};
 
 template <typename T, typename U>
-struct CheckedSubOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct CheckedSubOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V>
   static constexpr bool Do(T x, U y, V* result) {
@@ -148,7 +150,7 @@
 
 template <typename T>
 constexpr bool CheckedMulImpl(T x, T y, T* result) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
+  static_assert(std::is_integral_v<T>, "Type must be integral");
   // Since the value of x*y is potentially undefined if we have a signed type,
   // we compute it using the unsigned type of the same size.
   using UnsignedDst = typename std::make_unsigned<T>::type;
@@ -157,13 +159,14 @@
   const UnsignedDst uy = SafeUnsignedAbs(y);
   const UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
   const bool is_negative =
-      std::is_signed<T>::value && static_cast<SignedDst>(x ^ y) < 0;
+      std::is_signed_v<T> && static_cast<SignedDst>(x ^ y) < 0;
   // We have a fast out for unsigned identity or zero on the second operand.
   // After that it's an unsigned overflow check on the absolute value, with
   // a +1 bound for a negative result.
-  if (uy > UnsignedDst(!std::is_signed<T>::value || is_negative) &&
-      ux > (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy)
+  if (uy > UnsignedDst(!std::is_signed_v<T> || is_negative) &&
+      ux > (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy) {
     return false;
+  }
   *result = static_cast<T>(is_negative ? 0 - uresult : uresult);
   return true;
 }
@@ -172,10 +175,10 @@
 struct CheckedMulOp {};
 
 template <typename T, typename U>
-struct CheckedMulOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct CheckedMulOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V>
   static constexpr bool Do(T x, U y, V* result) {
@@ -217,10 +220,10 @@
 struct CheckedDivOp {};
 
 template <typename T, typename U>
-struct CheckedDivOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct CheckedDivOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V>
   static constexpr bool Do(T x, U y, V* result) {
@@ -231,7 +234,7 @@
     // combination of types needed to trigger this case.
     using Promotion = typename BigEnoughPromotion<T, U>::type;
     if (BASE_NUMERICS_UNLIKELY(
-            (std::is_signed<T>::value && std::is_signed<U>::value &&
+            (std::is_signed_v<T> && std::is_signed_v<U> &&
              IsTypeInRangeForNumericType<T, Promotion>::value &&
              static_cast<Promotion>(x) ==
                  std::numeric_limits<Promotion>::lowest() &&
@@ -258,10 +261,10 @@
 struct CheckedModOp {};
 
 template <typename T, typename U>
-struct CheckedModOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct CheckedModOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V>
   static constexpr bool Do(T x, U y, V* result) {
@@ -270,7 +273,7 @@
 
     using Promotion = typename BigEnoughPromotion<T, U>::type;
     if (BASE_NUMERICS_UNLIKELY(
-            (std::is_signed<T>::value && std::is_signed<U>::value &&
+            (std::is_signed_v<T> && std::is_signed_v<U> &&
              IsTypeInRangeForNumericType<T, Promotion>::value &&
              static_cast<Promotion>(x) ==
                  std::numeric_limits<Promotion>::lowest() &&
@@ -295,10 +298,10 @@
 // of bits in the promoted type are undefined. Shifts of negative values
 // are undefined. Otherwise it is defined when the result fits.
 template <typename T, typename U>
-struct CheckedLshOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct CheckedLshOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = T;
   template <typename V>
   static constexpr bool Do(T x, U shift, V* result) {
@@ -313,9 +316,10 @@
     }
 
     // Handle the legal corner-case of a full-width signed shift of zero.
-    if (!std::is_signed<T>::value || x ||
-        as_unsigned(shift) != as_unsigned(std::numeric_limits<T>::digits))
+    if (!std::is_signed_v<T> || x ||
+        as_unsigned(shift) != as_unsigned(std::numeric_limits<T>::digits)) {
       return false;
+    }
     *result = 0;
     return true;
   }
@@ -328,10 +332,10 @@
 // of bits in the promoted type are undefined. Otherwise, it is always defined,
 // but a right shift of a negative value is implementation-dependent.
 template <typename T, typename U>
-struct CheckedRshOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct CheckedRshOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = T;
   template <typename V>
   static constexpr bool Do(T x, U shift, V* result) {
@@ -354,10 +358,10 @@
 
 // For simplicity we support only unsigned integer results.
 template <typename T, typename U>
-struct CheckedAndOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct CheckedAndOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename std::make_unsigned<
       typename MaxExponentPromotion<T, U>::type>::type;
   template <typename V>
@@ -376,10 +380,10 @@
 
 // For simplicity we support only unsigned integers.
 template <typename T, typename U>
-struct CheckedOrOp<T,
-                   U,
-                   typename std::enable_if<std::is_integral<T>::value &&
-                                           std::is_integral<U>::value>::type> {
+struct CheckedOrOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename std::make_unsigned<
       typename MaxExponentPromotion<T, U>::type>::type;
   template <typename V>
@@ -398,10 +402,10 @@
 
 // For simplicity we support only unsigned integers.
 template <typename T, typename U>
-struct CheckedXorOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct CheckedXorOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename std::make_unsigned<
       typename MaxExponentPromotion<T, U>::type>::type;
   template <typename V>
@@ -424,8 +428,7 @@
 struct CheckedMaxOp<
     T,
     U,
-    typename std::enable_if<std::is_arithmetic<T>::value &&
-                            std::is_arithmetic<U>::value>::type> {
+    std::enable_if_t<std::is_arithmetic_v<T> && std::is_arithmetic_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V>
   static constexpr bool Do(T x, U y, V* result) {
@@ -448,8 +451,7 @@
 struct CheckedMinOp<
     T,
     U,
-    typename std::enable_if<std::is_arithmetic<T>::value &&
-                            std::is_arithmetic<U>::value>::type> {
+    std::enable_if_t<std::is_arithmetic_v<T> && std::is_arithmetic_v<U>>> {
   using result_type = typename LowestValuePromotion<T, U>::type;
   template <typename V>
   static constexpr bool Do(T x, U y, V* result) {
@@ -465,22 +467,21 @@
 
 // This is just boilerplate that wraps the standard floating point arithmetic.
 // A macro isn't the nicest solution, but it beats rewriting these repeatedly.
-#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP)                              \
-  template <typename T, typename U>                                      \
-  struct Checked##NAME##Op<                                              \
-      T, U,                                                              \
-      typename std::enable_if<std::is_floating_point<T>::value ||        \
-                              std::is_floating_point<U>::value>::type> { \
-    using result_type = typename MaxExponentPromotion<T, U>::type;       \
-    template <typename V>                                                \
-    static constexpr bool Do(T x, U y, V* result) {                      \
-      using Promotion = typename MaxExponentPromotion<T, U>::type;       \
-      const Promotion presult = x OP y;                                  \
-      if (!IsValueInRangeForNumericType<V>(presult))                     \
-        return false;                                                    \
-      *result = static_cast<V>(presult);                                 \
-      return true;                                                       \
-    }                                                                    \
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP)                                 \
+  template <typename T, typename U>                                         \
+  struct Checked##NAME##Op<T, U,                                            \
+                           std::enable_if_t<std::is_floating_point_v<T> ||  \
+                                            std::is_floating_point_v<U>>> { \
+    using result_type = typename MaxExponentPromotion<T, U>::type;          \
+    template <typename V>                                                   \
+    static constexpr bool Do(T x, U y, V* result) {                         \
+      using Promotion = typename MaxExponentPromotion<T, U>::type;          \
+      const Promotion presult = x OP y;                                     \
+      if (!IsValueInRangeForNumericType<V>(presult))                        \
+        return false;                                                       \
+      *result = static_cast<V>(presult);                                    \
+      return true;                                                          \
+    }                                                                       \
   };
 
 BASE_FLOAT_ARITHMETIC_OPS(Add, +)
@@ -502,10 +503,10 @@
 template <typename NumericType>
 struct GetNumericRepresentation {
   static const NumericRepresentation value =
-      std::is_integral<NumericType>::value
+      std::is_integral_v<NumericType>
           ? NUMERIC_INTEGER
-          : (std::is_floating_point<NumericType>::value ? NUMERIC_FLOATING
-                                                        : NUMERIC_UNKNOWN);
+          : (std::is_floating_point_v<NumericType> ? NUMERIC_FLOATING
+                                                   : NUMERIC_UNKNOWN);
 };
 
 template <typename T,
@@ -520,7 +521,7 @@
   constexpr explicit CheckedNumericState(Src value = 0, bool is_valid = true)
       : is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
         value_(WellDefinedConversionOrZero(value, is_valid_)) {
-    static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+    static_assert(std::is_arithmetic_v<Src>, "Argument must be numeric.");
   }
 
   template <typename Src>
@@ -536,9 +537,8 @@
   template <typename Src>
   static constexpr T WellDefinedConversionOrZero(Src value, bool is_valid) {
     using SrcType = typename internal::UnderlyingType<Src>::type;
-    return (std::is_integral<SrcType>::value || is_valid)
-               ? static_cast<T>(value)
-               : 0;
+    return (std::is_integral_v<SrcType> || is_valid) ? static_cast<T>(value)
+                                                     : 0;
   }
 
   // is_valid_ precedes value_ because member initializers in the constructors
diff --git a/base/numerics/clamped_math.h b/base/numerics/clamped_math.h
index a72e4a7..66112ed 100644
--- a/base/numerics/clamped_math.h
+++ b/base/numerics/clamped_math.h
@@ -17,7 +17,7 @@
 
 template <typename T>
 class ClampedNumeric {
-  static_assert(std::is_arithmetic<T>::value,
+  static_assert(std::is_arithmetic_v<T>,
                 "ClampedNumeric<T>: T must be a numeric type.");
 
  public:
diff --git a/base/numerics/clamped_math_impl.h b/base/numerics/clamped_math_impl.h
index 373b1ae..4608886 100644
--- a/base/numerics/clamped_math_impl.h
+++ b/base/numerics/clamped_math_impl.h
@@ -21,9 +21,9 @@
 namespace base {
 namespace internal {
 
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value &&
-                                  std::is_signed<T>::value>::type* = nullptr>
+template <
+    typename T,
+    std::enable_if_t<std::is_integral_v<T> && std::is_signed_v<T>>* = nullptr>
 constexpr T SaturatedNegWrapper(T value) {
   return IsConstantEvaluated() || !ClampedNegFastOp<T>::is_supported
              ? (NegateWrapper(value) != std::numeric_limits<T>::lowest()
@@ -32,22 +32,19 @@
              : ClampedNegFastOp<T>::Do(value);
 }
 
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value &&
-                                  !std::is_signed<T>::value>::type* = nullptr>
+template <
+    typename T,
+    std::enable_if_t<std::is_integral_v<T> && !std::is_signed_v<T>>* = nullptr>
 constexpr T SaturatedNegWrapper(T value) {
   return T(0);
 }
 
-template <
-    typename T,
-    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+template <typename T, std::enable_if_t<std::is_floating_point_v<T>>* = nullptr>
 constexpr T SaturatedNegWrapper(T value) {
   return -value;
 }
 
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
 constexpr T SaturatedAbsWrapper(T value) {
   // The calculation below is a static identity for unsigned types, but for
   // signed integer types it provides a non-branching, saturated absolute value.
@@ -62,9 +59,7 @@
       IsValueNegative<T>(static_cast<T>(SafeUnsignedAbs(value))));
 }
 
-template <
-    typename T,
-    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+template <typename T, std::enable_if_t<std::is_floating_point_v<T>>* = nullptr>
 constexpr T SaturatedAbsWrapper(T value) {
   return value < 0 ? -value : value;
 }
@@ -73,17 +68,17 @@
 struct ClampedAddOp {};
 
 template <typename T, typename U>
-struct ClampedAddOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct ClampedAddOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V = result_type>
   static constexpr V Do(T x, U y) {
     if (!IsConstantEvaluated() && ClampedAddFastOp<T, U>::is_supported)
       return ClampedAddFastOp<T, U>::template Do<V>(x, y);
 
-    static_assert(std::is_same<V, result_type>::value ||
+    static_assert(std::is_same_v<V, result_type> ||
                       IsTypeInRangeForNumericType<U, V>::value,
                   "The saturation result cannot be determined from the "
                   "provided types.");
@@ -99,17 +94,17 @@
 struct ClampedSubOp {};
 
 template <typename T, typename U>
-struct ClampedSubOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct ClampedSubOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V = result_type>
   static constexpr V Do(T x, U y) {
     if (!IsConstantEvaluated() && ClampedSubFastOp<T, U>::is_supported)
       return ClampedSubFastOp<T, U>::template Do<V>(x, y);
 
-    static_assert(std::is_same<V, result_type>::value ||
+    static_assert(std::is_same_v<V, result_type> ||
                       IsTypeInRangeForNumericType<U, V>::value,
                   "The saturation result cannot be determined from the "
                   "provided types.");
@@ -125,10 +120,10 @@
 struct ClampedMulOp {};
 
 template <typename T, typename U>
-struct ClampedMulOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct ClampedMulOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V = result_type>
   static constexpr V Do(T x, U y) {
@@ -148,10 +143,10 @@
 struct ClampedDivOp {};
 
 template <typename T, typename U>
-struct ClampedDivOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct ClampedDivOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V = result_type>
   static constexpr V Do(T x, U y) {
@@ -168,10 +163,10 @@
 struct ClampedModOp {};
 
 template <typename T, typename U>
-struct ClampedModOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct ClampedModOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V = result_type>
   static constexpr V Do(T x, U y) {
@@ -188,14 +183,14 @@
 // Left shift. Non-zero values saturate in the direction of the sign. A zero
 // shifted by any value always results in zero.
 template <typename T, typename U>
-struct ClampedLshOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct ClampedLshOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = T;
   template <typename V = result_type>
   static constexpr V Do(T x, U shift) {
-    static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+    static_assert(!std::is_signed_v<U>, "Shift value must be unsigned.");
     if (BASE_NUMERICS_LIKELY(shift < std::numeric_limits<T>::digits)) {
       // Shift as unsigned to avoid undefined behavior.
       V result = static_cast<V>(as_unsigned(x) << shift);
@@ -212,14 +207,14 @@
 
 // Right shift. Negative values saturate to -1. Positive or 0 saturates to 0.
 template <typename T, typename U>
-struct ClampedRshOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct ClampedRshOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = T;
   template <typename V = result_type>
   static constexpr V Do(T x, U shift) {
-    static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+    static_assert(!std::is_signed_v<U>, "Shift value must be unsigned.");
     // Signed right shift is odd, because it saturates to -1 or 0.
     const V saturated = as_unsigned(V(0)) - IsValueNegative(x);
     return BASE_NUMERICS_LIKELY(shift < IntegerBitsPlusSign<T>::value)
@@ -232,10 +227,10 @@
 struct ClampedAndOp {};
 
 template <typename T, typename U>
-struct ClampedAndOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct ClampedAndOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename std::make_unsigned<
       typename MaxExponentPromotion<T, U>::type>::type;
   template <typename V>
@@ -249,10 +244,10 @@
 
 // For simplicity we promote to unsigned integers.
 template <typename T, typename U>
-struct ClampedOrOp<T,
-                   U,
-                   typename std::enable_if<std::is_integral<T>::value &&
-                                           std::is_integral<U>::value>::type> {
+struct ClampedOrOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename std::make_unsigned<
       typename MaxExponentPromotion<T, U>::type>::type;
   template <typename V>
@@ -266,10 +261,10 @@
 
 // For simplicity we support only unsigned integers.
 template <typename T, typename U>
-struct ClampedXorOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
+struct ClampedXorOp<
+    T,
+    U,
+    std::enable_if_t<std::is_integral_v<T> && std::is_integral_v<U>>> {
   using result_type = typename std::make_unsigned<
       typename MaxExponentPromotion<T, U>::type>::type;
   template <typename V>
@@ -285,8 +280,7 @@
 struct ClampedMaxOp<
     T,
     U,
-    typename std::enable_if<std::is_arithmetic<T>::value &&
-                            std::is_arithmetic<U>::value>::type> {
+    std::enable_if_t<std::is_arithmetic_v<T> && std::is_arithmetic_v<U>>> {
   using result_type = typename MaxExponentPromotion<T, U>::type;
   template <typename V = result_type>
   static constexpr V Do(T x, U y) {
@@ -302,8 +296,7 @@
 struct ClampedMinOp<
     T,
     U,
-    typename std::enable_if<std::is_arithmetic<T>::value &&
-                            std::is_arithmetic<U>::value>::type> {
+    std::enable_if_t<std::is_arithmetic_v<T> && std::is_arithmetic_v<U>>> {
   using result_type = typename LowestValuePromotion<T, U>::type;
   template <typename V = result_type>
   static constexpr V Do(T x, U y) {
@@ -314,17 +307,16 @@
 
 // This is just boilerplate that wraps the standard floating point arithmetic.
 // A macro isn't the nicest solution, but it beats rewriting these repeatedly.
-#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP)                              \
-  template <typename T, typename U>                                      \
-  struct Clamped##NAME##Op<                                              \
-      T, U,                                                              \
-      typename std::enable_if<std::is_floating_point<T>::value ||        \
-                              std::is_floating_point<U>::value>::type> { \
-    using result_type = typename MaxExponentPromotion<T, U>::type;       \
-    template <typename V = result_type>                                  \
-    static constexpr V Do(T x, U y) {                                    \
-      return saturated_cast<V>(x OP y);                                  \
-    }                                                                    \
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP)                                 \
+  template <typename T, typename U>                                         \
+  struct Clamped##NAME##Op<T, U,                                            \
+                           std::enable_if_t<std::is_floating_point_v<T> ||  \
+                                            std::is_floating_point_v<U>>> { \
+    using result_type = typename MaxExponentPromotion<T, U>::type;          \
+    template <typename V = result_type>                                     \
+    static constexpr V Do(T x, U y) {                                       \
+      return saturated_cast<V>(x OP y);                                     \
+    }                                                                       \
   };
 
 BASE_FLOAT_ARITHMETIC_OPS(Add, +)
diff --git a/base/numerics/ranges.h b/base/numerics/ranges.h
index 2d8c8b7..0d18964 100644
--- a/base/numerics/ranges.h
+++ b/base/numerics/ranges.h
@@ -12,7 +12,7 @@
 
 template <typename T>
 constexpr bool IsApproximatelyEqual(T lhs, T rhs, T tolerance) {
-  static_assert(std::is_arithmetic<T>::value, "Argument must be arithmetic");
+  static_assert(std::is_arithmetic_v<T>, "Argument must be arithmetic");
   return std::abs(rhs - lhs) <= tolerance;
 }
 
diff --git a/base/numerics/safe_conversions.h b/base/numerics/safe_conversions.h
index 2fc2009..83fbd1c 100644
--- a/base/numerics/safe_conversions.h
+++ b/base/numerics/safe_conversions.h
@@ -51,10 +51,9 @@
 struct IsValueInRangeFastOp<
     Dst,
     Src,
-    typename std::enable_if<
-        std::is_integral<Dst>::value && std::is_integral<Src>::value &&
-        std::is_signed<Dst>::value && std::is_signed<Src>::value &&
-        !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+    std::enable_if_t<std::is_integral_v<Dst> && std::is_integral_v<Src> &&
+                     std::is_signed_v<Dst> && std::is_signed_v<Src> &&
+                     !IsTypeInRangeForNumericType<Dst, Src>::value>> {
   static constexpr bool is_supported = true;
 
   static constexpr bool Do(Src value) {
@@ -69,10 +68,9 @@
 struct IsValueInRangeFastOp<
     Dst,
     Src,
-    typename std::enable_if<
-        std::is_integral<Dst>::value && std::is_integral<Src>::value &&
-        !std::is_signed<Dst>::value && std::is_signed<Src>::value &&
-        !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+    std::enable_if_t<std::is_integral_v<Dst> && std::is_integral_v<Src> &&
+                     !std::is_signed_v<Dst> && std::is_signed_v<Src> &&
+                     !IsTypeInRangeForNumericType<Dst, Src>::value>> {
   static constexpr bool is_supported = true;
 
   static constexpr bool Do(Src value) {
@@ -148,7 +146,7 @@
              ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
                                                  : S<Dst>::Underflow())
              // Skip this check for integral Src, which cannot be NaN.
-             : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
+             : (std::is_integral_v<Src> || !constraint.IsUnderflowFlagSet()
                     ? S<Dst>::Overflow()
                     : S<Dst>::NaN());
 }
@@ -169,9 +167,8 @@
 struct SaturateFastOp<
     Dst,
     Src,
-    typename std::enable_if<std::is_integral<Src>::value &&
-                            std::is_integral<Dst>::value &&
-                            SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+    std::enable_if_t<std::is_integral_v<Src> && std::is_integral_v<Dst> &&
+                     SaturateFastAsmOp<Dst, Src>::is_supported>> {
   static constexpr bool is_supported = true;
   static constexpr Dst Do(Src value) {
     return SaturateFastAsmOp<Dst, Src>::Do(value);
@@ -182,9 +179,8 @@
 struct SaturateFastOp<
     Dst,
     Src,
-    typename std::enable_if<std::is_integral<Src>::value &&
-                            std::is_integral<Dst>::value &&
-                            !SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+    std::enable_if_t<std::is_integral_v<Src> && std::is_integral_v<Dst> &&
+                     !SaturateFastAsmOp<Dst, Src>::is_supported>> {
   static constexpr bool is_supported = true;
   static constexpr Dst Do(Src value) {
     // The exact order of the following is structured to hit the correct
@@ -209,8 +205,8 @@
 constexpr Dst saturated_cast(Src value) {
   using SrcType = typename UnderlyingType<Src>::type;
   return !IsConstantEvaluated() && SaturateFastOp<Dst, SrcType>::is_supported &&
-                 std::is_same<SaturationHandler<Dst>,
-                              SaturationDefaultLimits<Dst>>::value
+                 std::is_same_v<SaturationHandler<Dst>,
+                                SaturationDefaultLimits<Dst>>
              ? SaturateFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
              : saturated_cast_impl<Dst, SaturationHandler, SrcType>(
                    static_cast<SrcType>(value),
@@ -225,7 +221,7 @@
 constexpr Dst strict_cast(Src value) {
   using SrcType = typename UnderlyingType<Src>::type;
   static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
-  static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+  static_assert(std::is_arithmetic_v<Dst>, "Result must be numeric.");
 
   // If you got here from a compiler error, it's because you tried to assign
   // from a source type to a destination type that has insufficient range.
@@ -251,8 +247,8 @@
 struct IsNumericRangeContained<
     Dst,
     Src,
-    typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
-                            ArithmeticOrUnderlyingEnum<Src>::value>::type> {
+    std::enable_if_t<ArithmeticOrUnderlyingEnum<Dst>::value &&
+                     ArithmeticOrUnderlyingEnum<Src>::value>> {
   static constexpr bool value =
       StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
       NUMERIC_RANGE_CONTAINED;
@@ -305,8 +301,7 @@
   // If none of that works, you may be better served with the checked_cast<> or
   // saturated_cast<> template functions for your particular use case.
   template <typename Dst,
-            typename std::enable_if<
-                IsNumericRangeContained<Dst, T>::value>::type* = nullptr>
+            std::enable_if_t<IsNumericRangeContained<Dst, T>::value>* = nullptr>
   constexpr operator Dst() const {
     return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
   }
@@ -323,13 +318,12 @@
   return value;
 }
 
-#define BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP)              \
-  template <typename L, typename R,                                     \
-            typename std::enable_if<                                    \
-                internal::Is##CLASS##Op<L, R>::value>::type* = nullptr> \
-  constexpr bool operator OP(const L lhs, const R rhs) {                \
-    return SafeCompare<NAME, typename UnderlyingType<L>::type,          \
-                       typename UnderlyingType<R>::type>(lhs, rhs);     \
+#define BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP)                     \
+  template <typename L, typename R,                                            \
+            std::enable_if_t<internal::Is##CLASS##Op<L, R>::value>* = nullptr> \
+  constexpr bool operator OP(const L lhs, const R rhs) {                       \
+    return SafeCompare<NAME, typename UnderlyingType<L>::type,                 \
+                       typename UnderlyingType<R>::type>(lhs, rhs);            \
   }
 
 BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLess, <)
@@ -357,29 +351,50 @@
 using SizeT = StrictNumeric<size_t>;
 
 // floating -> integral conversions that saturate and thus can actually return
-// an integral type.  In most cases, these should be preferred over the std::
-// versions.
+// an integral type.
+//
+// Generally, what you want is saturated_cast<Dst>(std::nearbyint(x)), which
+// rounds correctly according to IEEE-754 (round to nearest, ties go to nearest
+// even number; this avoids bias). If your code is performance-critical
+// and you are sure that you will never overflow, you can use std::lrint()
+// or std::llrint(), which return a long or long long directly.
+//
+// Below are convenience functions around similar patterns, except that
+// they round in nonstandard directions and will generally be slower.
+
+// Rounds towards negative infinity (i.e., down).
 template <typename Dst = int,
           typename Src,
-          typename = std::enable_if_t<std::is_integral<Dst>::value &&
-                                      std::is_floating_point<Src>::value>>
+          typename = std::enable_if_t<std::is_integral_v<Dst> &&
+                                      std::is_floating_point_v<Src>>>
 Dst ClampFloor(Src value) {
   return saturated_cast<Dst>(std::floor(value));
 }
+
+// Rounds towards positive infinity (i.e., up).
 template <typename Dst = int,
           typename Src,
-          typename = std::enable_if_t<std::is_integral<Dst>::value &&
-                                      std::is_floating_point<Src>::value>>
+          typename = std::enable_if_t<std::is_integral_v<Dst> &&
+                                      std::is_floating_point_v<Src>>>
 Dst ClampCeil(Src value) {
   return saturated_cast<Dst>(std::ceil(value));
 }
+
+// Rounds towards nearest integer, with ties away from zero.
+// This means that 0.5 will be rounded to 1 and 1.5 will be rounded to 2.
+// Similarly, -0.5 will be rounded to -1 and -1.5 will be rounded to -2.
+//
+// This is normally not what you want accuracy-wise (it introduces a small bias
+// away from zero), and it is not the fastest option, but it is frequently what
+// existing code expects. Compare with saturated_cast<Dst>(std::nearbyint(x))
+// or std::lrint(x), which would round 0.5 and -0.5 to 0 but 1.5 to 2 and
+// -1.5 to -2.
 template <typename Dst = int,
           typename Src,
-          typename = std::enable_if_t<std::is_integral<Dst>::value &&
-                                      std::is_floating_point<Src>::value>>
+          typename = std::enable_if_t<std::is_integral_v<Dst> &&
+                                      std::is_floating_point_v<Src>>>
 Dst ClampRound(Src value) {
-  const Src rounded =
-      (value >= 0.0f) ? std::floor(value + 0.5f) : std::ceil(value - 0.5f);
+  const Src rounded = std::round(value);
   return saturated_cast<Dst>(rounded);
 }
 
diff --git a/base/numerics/safe_conversions_arm_impl.h b/base/numerics/safe_conversions_arm_impl.h
index e4b5730..abbf71e 100644
--- a/base/numerics/safe_conversions_arm_impl.h
+++ b/base/numerics/safe_conversions_arm_impl.h
@@ -18,17 +18,17 @@
 template <typename Dst, typename Src>
 struct SaturateFastAsmOp {
   static constexpr bool is_supported =
-      kEnableAsmCode && std::is_signed<Src>::value &&
-      std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+      kEnableAsmCode && std::is_signed_v<Src> && std::is_integral_v<Dst> &&
+      std::is_integral_v<Src> &&
       IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
       IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
       !IsTypeInRangeForNumericType<Dst, Src>::value;
 
   __attribute__((always_inline)) static Dst Do(Src value) {
     int32_t src = value;
-    typename std::conditional<std::is_signed<Dst>::value, int32_t,
-                              uint32_t>::type result;
-    if (std::is_signed<Dst>::value) {
+    typename std::conditional<std::is_signed_v<Dst>, int32_t, uint32_t>::type
+        result;
+    if (std::is_signed_v<Dst>) {
       asm("ssat %[dst], %[shift], %[src]"
           : [dst] "=r"(result)
           : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
diff --git a/base/numerics/safe_conversions_impl.h b/base/numerics/safe_conversions_impl.h
index d0a9d1a..9231468 100644
--- a/base/numerics/safe_conversions_impl.h
+++ b/base/numerics/safe_conversions_impl.h
@@ -25,7 +25,7 @@
 // we can compute an analog using std::numeric_limits<>::digits.
 template <typename NumericType>
 struct MaxExponent {
-  static const int value = std::is_floating_point<NumericType>::value
+  static const int value = std::is_floating_point_v<NumericType>
                                ? std::numeric_limits<NumericType>::max_exponent
                                : std::numeric_limits<NumericType>::digits + 1;
 };
@@ -34,8 +34,8 @@
 // hacks.
 template <typename NumericType>
 struct IntegerBitsPlusSign {
-  static const int value = std::numeric_limits<NumericType>::digits +
-                           std::is_signed<NumericType>::value;
+  static const int value =
+      std::numeric_limits<NumericType>::digits + std::is_signed_v<NumericType>;
 };
 
 // Helper templates for integer manipulations.
@@ -47,17 +47,15 @@
 
 // Determines if a numeric value is negative without throwing compiler
 // warnings on: unsigned(value) < 0.
-template <typename T,
-          typename std::enable_if<std::is_signed<T>::value>::type* = nullptr>
+template <typename T, std::enable_if_t<std::is_signed_v<T>>* = nullptr>
 constexpr bool IsValueNegative(T value) {
-  static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+  static_assert(std::is_arithmetic_v<T>, "Argument must be numeric.");
   return value < 0;
 }
 
-template <typename T,
-          typename std::enable_if<!std::is_signed<T>::value>::type* = nullptr>
+template <typename T, std::enable_if_t<!std::is_signed_v<T>>* = nullptr>
 constexpr bool IsValueNegative(T) {
-  static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+  static_assert(std::is_arithmetic_v<T>, "Argument must be numeric.");
   return false;
 }
 
@@ -68,7 +66,7 @@
 constexpr typename std::make_signed<T>::type ConditionalNegate(
     T x,
     bool is_negative) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
+  static_assert(std::is_integral_v<T>, "Type must be integral");
   using SignedT = typename std::make_signed<T>::type;
   using UnsignedT = typename std::make_unsigned<T>::type;
   return static_cast<SignedT>((static_cast<UnsignedT>(x) ^
@@ -79,7 +77,7 @@
 // This performs a safe, absolute value via unsigned overflow.
 template <typename T>
 constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
+  static_assert(std::is_integral_v<T>, "Type must be integral");
   using UnsignedT = typename std::make_unsigned<T>::type;
   return IsValueNegative(value)
              ? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
@@ -136,10 +134,10 @@
 
 template <typename Dst,
           typename Src,
-          IntegerRepresentation DstSign = std::is_signed<Dst>::value
+          IntegerRepresentation DstSign = std::is_signed_v<Dst>
                                               ? INTEGER_REPRESENTATION_SIGNED
                                               : INTEGER_REPRESENTATION_UNSIGNED,
-          IntegerRepresentation SrcSign = std::is_signed<Src>::value
+          IntegerRepresentation SrcSign = std::is_signed_v<Src>
                                               ? INTEGER_REPRESENTATION_SIGNED
                                               : INTEGER_REPRESENTATION_UNSIGNED>
 struct StaticDstRangeRelationToSrcRange;
@@ -236,14 +234,12 @@
        SrcLimits::digits < DstLimits::digits)
           ? (DstLimits::digits - SrcLimits::digits)
           : 0;
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
 
   // Masks out the integer bits that are beyond the precision of the
   // intermediate type used for comparison.
   static constexpr T Adjust(T value) {
-    static_assert(std::is_same<T, Dst>::value, "");
+    static_assert(std::is_same_v<T, Dst>, "");
     static_assert(kShift < DstLimits::digits, "");
     using UnsignedDst = typename std::make_unsigned_t<T>;
     return static_cast<T>(ConditionalNegate(
@@ -252,10 +248,9 @@
   }
 
   template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
+            std::enable_if_t<std::is_floating_point_v<T>>* = nullptr>
   static constexpr T Adjust(T value) {
-    static_assert(std::is_same<T, Dst>::value, "");
+    static_assert(std::is_same_v<T, Dst>, "");
     static_assert(kShift == 0, "");
     return value;
   }
@@ -268,10 +263,10 @@
           typename Src,
           template <typename>
           class Bounds,
-          IntegerRepresentation DstSign = std::is_signed<Dst>::value
+          IntegerRepresentation DstSign = std::is_signed_v<Dst>
                                               ? INTEGER_REPRESENTATION_SIGNED
                                               : INTEGER_REPRESENTATION_UNSIGNED,
-          IntegerRepresentation SrcSign = std::is_signed<Src>::value
+          IntegerRepresentation SrcSign = std::is_signed_v<Src>
                                               ? INTEGER_REPRESENTATION_SIGNED
                                               : INTEGER_REPRESENTATION_UNSIGNED,
           NumericRangeRepresentation DstRange =
@@ -373,7 +368,7 @@
     bool ge_zero = false;
     // Converting floating-point to integer will discard fractional part, so
     // values in (-1.0, -0.0) will truncate to 0 and fit in Dst.
-    if (std::is_floating_point<Src>::value) {
+    if (std::is_floating_point_v<Src>) {
       ge_zero = value > Src(-1);
     } else {
       ge_zero = value >= Src(0);
@@ -399,8 +394,8 @@
           template <typename> class Bounds = std::numeric_limits,
           typename Src>
 constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
-  static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
-  static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+  static_assert(std::is_arithmetic_v<Src>, "Argument must be numeric.");
+  static_assert(std::is_arithmetic_v<Dst>, "Result must be numeric.");
   static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
   return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
 }
@@ -412,7 +407,7 @@
 #define INTEGER_FOR_DIGITS_AND_SIGN(I)                          \
   template <>                                                   \
   struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
-                                 std::is_signed<I>::value> {    \
+                                 std::is_signed_v<I>> {         \
     using type = I;                                             \
   }
 
@@ -432,7 +427,7 @@
 static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
               "Max integer size not supported for this toolchain.");
 
-template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
+template <typename Integer, bool IsSigned = std::is_signed_v<Integer>>
 struct TwiceWiderInteger {
   using type =
       typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
@@ -467,13 +462,13 @@
 template <typename Lhs,
           typename Rhs,
           ArithmeticPromotionCategory Promotion =
-              std::is_signed<Lhs>::value
-                  ? (std::is_signed<Rhs>::value
+              std::is_signed_v<Lhs>
+                  ? (std::is_signed_v<Rhs>
                          ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
                                 ? LEFT_PROMOTION
                                 : RIGHT_PROMOTION)
                          : LEFT_PROMOTION)
-                  : (std::is_signed<Rhs>::value
+                  : (std::is_signed_v<Rhs>
                          ? RIGHT_PROMOTION
                          : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
                                 ? LEFT_PROMOTION
@@ -495,16 +490,15 @@
     typename Lhs,
     typename Rhs = Lhs,
     bool is_intmax_type =
-        std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value&&
-            IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
+        std::is_integral_v<typename MaxExponentPromotion<Lhs, Rhs>::type> &&
+        IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
                 value == IntegerBitsPlusSign<intmax_t>::value,
-    bool is_max_exponent =
-        StaticDstRangeRelationToSrcRange<
-            typename MaxExponentPromotion<Lhs, Rhs>::type,
-            Lhs>::value ==
-        NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
-            typename MaxExponentPromotion<Lhs, Rhs>::type,
-            Rhs>::value == NUMERIC_RANGE_CONTAINED>
+    bool is_max_exponent = StaticDstRangeRelationToSrcRange<
+                               typename MaxExponentPromotion<Lhs, Rhs>::type,
+                               Lhs>::value == NUMERIC_RANGE_CONTAINED &&
+                           StaticDstRangeRelationToSrcRange<
+                               typename MaxExponentPromotion<Lhs, Rhs>::type,
+                               Rhs>::value == NUMERIC_RANGE_CONTAINED>
 struct BigEnoughPromotion;
 
 // The side with the max exponent is big enough.
@@ -519,8 +513,8 @@
 struct BigEnoughPromotion<Lhs, Rhs, false, false> {
   using type =
       typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
-                                 std::is_signed<Lhs>::value ||
-                                     std::is_signed<Rhs>::value>::type;
+                                 std::is_signed_v<Lhs> ||
+                                     std::is_signed_v<Rhs>>::type;
   static const bool is_contained = true;
 };
 
@@ -538,12 +532,11 @@
 template <typename T, typename Lhs, typename Rhs = Lhs>
 struct IsIntegerArithmeticSafe {
   static const bool value =
-      !std::is_floating_point<T>::value &&
-      !std::is_floating_point<Lhs>::value &&
-      !std::is_floating_point<Rhs>::value &&
-      std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
+      !std::is_floating_point_v<T> && !std::is_floating_point_v<Lhs> &&
+      !std::is_floating_point_v<Rhs> &&
+      std::is_signed_v<T> >= std::is_signed_v<Lhs> &&
       IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
-      std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
+      std::is_signed_v<T> >= std::is_signed_v<Rhs> &&
       IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
 };
 
@@ -552,8 +545,8 @@
 template <typename Lhs,
           typename Rhs,
           bool is_promotion_possible = IsIntegerArithmeticSafe<
-              typename std::conditional<std::is_signed<Lhs>::value ||
-                                            std::is_signed<Rhs>::value,
+              typename std::conditional<std::is_signed_v<Lhs> ||
+                                            std::is_signed_v<Rhs>,
                                         intmax_t,
                                         uintmax_t>::type,
               typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
@@ -563,8 +556,8 @@
 struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
   using type =
       typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
-                                 std::is_signed<Lhs>::value ||
-                                     std::is_signed<Rhs>::value>::type;
+                                 std::is_signed_v<Lhs> ||
+                                     std::is_signed_v<Rhs>>::type;
   static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
   static const bool is_contained = true;
 };
@@ -576,19 +569,19 @@
 };
 
 // Extracts the underlying type from an enum.
-template <typename T, bool is_enum = std::is_enum<T>::value>
+template <typename T, bool is_enum = std::is_enum_v<T>>
 struct ArithmeticOrUnderlyingEnum;
 
 template <typename T>
 struct ArithmeticOrUnderlyingEnum<T, true> {
   using type = typename std::underlying_type<T>::type;
-  static const bool value = std::is_arithmetic<type>::value;
+  static const bool value = std::is_arithmetic_v<type>;
 };
 
 template <typename T>
 struct ArithmeticOrUnderlyingEnum<T, false> {
   using type = T;
-  static const bool value = std::is_arithmetic<type>::value;
+  static const bool value = std::is_arithmetic_v<type>;
 };
 
 // The following are helper templates used in the CheckedNumeric class.
@@ -605,7 +598,7 @@
 template <typename T>
 struct UnderlyingType {
   using type = typename ArithmeticOrUnderlyingEnum<T>::type;
-  static const bool is_numeric = std::is_arithmetic<type>::value;
+  static const bool is_numeric = std::is_arithmetic_v<type>;
   static const bool is_checked = false;
   static const bool is_clamped = false;
   static const bool is_strict = false;
@@ -669,7 +662,7 @@
 constexpr typename std::make_signed<
     typename base::internal::UnderlyingType<Src>::type>::type
 as_signed(const Src value) {
-  static_assert(std::is_integral<decltype(as_signed(value))>::value,
+  static_assert(std::is_integral_v<decltype(as_signed(value))>,
                 "Argument must be a signed or unsigned integer type.");
   return static_cast<decltype(as_signed(value))>(value);
 }
@@ -681,7 +674,7 @@
 constexpr typename std::make_unsigned<
     typename base::internal::UnderlyingType<Src>::type>::type
 as_unsigned(const Src value) {
-  static_assert(std::is_integral<decltype(as_unsigned(value))>::value,
+  static_assert(std::is_integral_v<decltype(as_unsigned(value))>,
                 "Argument must be a signed or unsigned integer type.");
   return static_cast<decltype(as_unsigned(value))>(value);
 }
@@ -698,7 +691,7 @@
 
 template <typename L, typename R>
 struct IsLess {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
                 "Types must be numeric.");
   static constexpr bool Test(const L lhs, const R rhs) {
     return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
@@ -718,7 +711,7 @@
 
 template <typename L, typename R>
 struct IsLessOrEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
                 "Types must be numeric.");
   static constexpr bool Test(const L lhs, const R rhs) {
     return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
@@ -738,7 +731,7 @@
 
 template <typename L, typename R>
 struct IsGreater {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
                 "Types must be numeric.");
   static constexpr bool Test(const L lhs, const R rhs) {
     return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
@@ -758,7 +751,7 @@
 
 template <typename L, typename R>
 struct IsGreaterOrEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
                 "Types must be numeric.");
   static constexpr bool Test(const L lhs, const R rhs) {
     return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
@@ -768,7 +761,7 @@
 
 template <typename L, typename R>
 struct IsEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
                 "Types must be numeric.");
   static constexpr bool Test(const L lhs, const R rhs) {
     return DstRangeRelationToSrcRange<R>(lhs) ==
@@ -780,7 +773,7 @@
 
 template <typename L, typename R>
 struct IsNotEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
                 "Types must be numeric.");
   static constexpr bool Test(const L lhs, const R rhs) {
     return DstRangeRelationToSrcRange<R>(lhs) !=
@@ -794,7 +787,7 @@
 // Binary arithmetic operations.
 template <template <typename, typename> class C, typename L, typename R>
 constexpr bool SafeCompare(const L lhs, const R rhs) {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+  static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
                 "Types must be numeric.");
   using Promotion = BigEnoughPromotion<L, R>;
   using BigType = typename Promotion::type;
diff --git a/base/numerics/safe_math_clang_gcc_impl.h b/base/numerics/safe_math_clang_gcc_impl.h
index b45388c..c5a89d9 100644
--- a/base/numerics/safe_math_clang_gcc_impl.h
+++ b/base/numerics/safe_math_clang_gcc_impl.h
@@ -136,7 +136,7 @@
 
 template <typename T>
 struct ClampedNegFastOp {
-  static const bool is_supported = std::is_signed<T>::value;
+  static const bool is_supported = std::is_signed_v<T>;
   __attribute__((always_inline)) static T Do(T value) {
     // Use this when there is no assembler path available.
     if (!ClampedSubFastAsmOp<T, T>::is_supported) {
diff --git a/base/numerics/safe_math_shared_impl.h b/base/numerics/safe_math_shared_impl.h
index 7ba4ed7..46ec883 100644
--- a/base/numerics/safe_math_shared_impl.h
+++ b/base/numerics/safe_math_shared_impl.h
@@ -115,8 +115,8 @@
 // However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
 // so the float versions will not compile.
 template <typename Numeric,
-          bool IsInteger = std::is_integral<Numeric>::value,
-          bool IsFloat = std::is_floating_point<Numeric>::value>
+          bool IsInteger = std::is_integral_v<Numeric>,
+          bool IsFloat = std::is_floating_point_v<Numeric>>
 struct UnsignedOrFloatForSize;
 
 template <typename Numeric>
@@ -134,36 +134,29 @@
 // exhibit well-defined overflow semantics and rely on the caller to detect
 // if an overflow occurred.
 
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
 constexpr T NegateWrapper(T value) {
   using UnsignedT = typename std::make_unsigned<T>::type;
   // This will compile to a NEG on Intel, and is normal negation on ARM.
   return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
 }
 
-template <
-    typename T,
-    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+template <typename T, std::enable_if_t<std::is_floating_point_v<T>>* = nullptr>
 constexpr T NegateWrapper(T value) {
   return -value;
 }
 
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
 constexpr typename std::make_unsigned<T>::type InvertWrapper(T value) {
   return ~value;
 }
 
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
 constexpr T AbsWrapper(T value) {
   return static_cast<T>(SafeUnsignedAbs(value));
 }
 
-template <
-    typename T,
-    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+template <typename T, std::enable_if_t<std::is_floating_point_v<T>>* = nullptr>
 constexpr T AbsWrapper(T value) {
   return value < 0 ? -value : value;
 }
@@ -192,8 +185,7 @@
 #define BASE_NUMERIC_ARITHMETIC_OPERATORS(CLASS, CL_ABBR, OP_NAME, OP, CMP_OP) \
   /* Binary arithmetic operator for all CLASS##Numeric operations. */          \
   template <typename L, typename R,                                            \
-            typename std::enable_if<Is##CLASS##Op<L, R>::value>::type* =       \
-                nullptr>                                                       \
+            std::enable_if_t<Is##CLASS##Op<L, R>::value>* = nullptr>           \
   constexpr CLASS##Numeric<                                                    \
       typename MathWrapper<CLASS##OP_NAME##Op, L, R>::type>                    \
   operator OP(const L lhs, const R rhs) {                                      \
diff --git a/base/numerics/wrapping_math.h b/base/numerics/wrapping_math.h
new file mode 100644
index 0000000..891f017
--- /dev/null
+++ b/base/numerics/wrapping_math.h
@@ -0,0 +1,42 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_WRAPPING_MATH_H_
+#define BASE_NUMERICS_WRAPPING_MATH_H_
+
+#include <type_traits>
+
+namespace base {
+
+// Returns `a + b` with overflow defined to wrap around, i.e. modulo 2^N where N
+// is the bit width of `T`.
+template <typename T>
+inline constexpr T WrappingAdd(T a, T b) {
+  static_assert(std::is_integral_v<T>);
+  // Unsigned arithmetic wraps, so convert to the corresponding unsigned type.
+  // Note that, if `T` is smaller than `int`, e.g. `int16_t`, the values are
+  // promoted to `int`, which brings us back to undefined overflow. This is fine
+  // here because the sum of any two `int16_t`s fits in `int`, but `WrappingMul`
+  // will need a more complex implementation.
+  using Unsigned = std::make_unsigned_t<T>;
+  return static_cast<T>(static_cast<Unsigned>(a) + static_cast<Unsigned>(b));
+}
+
+// Returns `a - b` with overflow defined to wrap around, i.e. modulo 2^N where N
+// is the bit width of `T`.
+template <typename T>
+inline constexpr T WrappingSub(T a, T b) {
+  static_assert(std::is_integral_v<T>);
+  // Unsigned arithmetic wraps, so convert to the corresponding unsigned type.
+  // Note that, if `T` is smaller than `int`, e.g. `int16_t`, the values are
+  // promoted to `int`, which brings us back to undefined overflow. This is fine
+  // here because the difference of any two `int16_t`s fits in `int`, but
+  // `WrappingMul` will need a more complex implementation.
+  using Unsigned = std::make_unsigned_t<T>;
+  return static_cast<T>(static_cast<Unsigned>(a) - static_cast<Unsigned>(b));
+}
+
+}  // namespace base
+
+#endif  // BASE_NUMERICS_WRAPPING_MATH_H_
diff --git a/base/observer_list.h b/base/observer_list.h
index 450ec23..f4aad8c 100644
--- a/base/observer_list.h
+++ b/base/observer_list.h
@@ -17,6 +17,7 @@
 
 #include "base/check.h"
 #include "base/check_op.h"
+#include "base/containers/cxx20_erase_vector.h"
 #include "base/dcheck_is_on.h"
 #include "base/debug/dump_without_crashing.h"
 #include "base/notreached.h"
@@ -342,10 +343,8 @@
     // Compact() is only ever called when the last iterator is destroyed.
     DETACH_FROM_SEQUENCE(iteration_sequence_checker_);
 
-    observers_.erase(
-        std::remove_if(observers_.begin(), observers_.end(),
-                       [](const auto& o) { return o.IsMarkedForRemoval(); }),
-        observers_.end());
+    base::EraseIf(observers_,
+                  [](const auto& o) { return o.IsMarkedForRemoval(); });
   }
 
   std::string GetObserversCreationStackString() const {
diff --git a/base/observer_list_internal.h b/base/observer_list_internal.h
index d19d2c7..9d8e676 100644
--- a/base/observer_list_internal.h
+++ b/base/observer_list_internal.h
@@ -42,7 +42,7 @@
   template <class ObserverType>
   static ObserverType* Get(const UncheckedObserverAdapter& adapter) {
     static_assert(
-        !std::is_base_of<CheckedObserver, ObserverType>::value,
+        !std::is_base_of_v<CheckedObserver, ObserverType>,
         "CheckedObserver classes must not use ObserverList<T>::Unchecked.");
     return static_cast<ObserverType*>(adapter.ptr_);
   }
@@ -102,7 +102,7 @@
   template <class ObserverType>
   static ObserverType* Get(const CheckedObserverAdapter& adapter) {
     static_assert(
-        std::is_base_of<CheckedObserver, ObserverType>::value,
+        std::is_base_of_v<CheckedObserver, ObserverType>,
         "Observers should inherit from base::CheckedObserver. "
         "Use ObserverList<T>::Unchecked to observe with raw pointers.");
     DCHECK(adapter.weak_ptr_);
diff --git a/base/observer_list_threadsafe.h b/base/observer_list_threadsafe.h
index 790cddc..09b7e25 100644
--- a/base/observer_list_threadsafe.h
+++ b/base/observer_list_threadsafe.h
@@ -43,7 +43,16 @@
 //      callback.
 //    * If one sequence is notifying observers concurrently with an observer
 //      removing itself from the observer list, the notifications will be
-//      silently dropped.
+//      silently dropped. However if the observer is currently inside a
+//      notification callback, the callback will finish running.
+//
+//   By default, observers can be removed from any sequence. However this can be
+//   error-prone since an observer may be running a callback when it's removed,
+//   in which case it isn't safe to delete until the callback is finished.
+//   Consider using the RemoveObserverPolicy::kAddingSequenceOnly template
+//   parameter, which will CHECK that observers are only removed from the
+//   sequence where they were added (which is also the sequence that runs
+//   callbacks).
 //
 //   The drawback of the threadsafe observer list is that notifications are not
 //   as real-time as the non-threadsafe version of this class. Notifications
@@ -98,8 +107,19 @@
 
 }  // namespace internal
 
-template <class ObserverType>
+enum class RemoveObserverPolicy {
+  // Observers can be removed from any sequence.
+  kAnySequence,
+  // Observers can only be removed from the sequence that added them.
+  kAddingSequenceOnly,
+};
+
+template <class ObserverType,
+          RemoveObserverPolicy RemovePolicy =
+              RemoveObserverPolicy::kAnySequence>
 class ObserverListThreadSafe : public internal::ObserverListThreadSafeBase {
+  using Self = ObserverListThreadSafe<ObserverType, RemovePolicy>;
+
  public:
   enum class AddObserverResult {
     kBecameNonEmpty,
@@ -160,7 +180,7 @@
             static_cast<const NotificationData*>(current_notification);
         task_runner->PostTask(
             current_notification->from_here,
-            BindOnce(&ObserverListThreadSafe<ObserverType>::NotifyWrapper, this,
+            BindOnce(&Self::NotifyWrapper, this,
                      // While `observer` may be dangling, we pass it and
                      // check it wasn't deallocated in NotifyWrapper() which can
                      // check `observers_` to verify presence (the owner of the
@@ -184,6 +204,11 @@
   // observer won't stop it.
   RemoveObserverResult RemoveObserver(ObserverType* observer) {
     AutoLock auto_lock(lock_);
+    if constexpr (RemovePolicy == RemoveObserverPolicy::kAddingSequenceOnly) {
+      const auto it = observers_.find(observer);
+      CHECK(it == observers_.end() ||
+            it->second.task_runner->RunsTasksInCurrentSequence());
+    }
     observers_.erase(observer);
     return observers_.empty() ? RemoveObserverResult::kWasOrBecameEmpty
                               : RemoveObserverResult::kRemainsNonEmpty;
@@ -214,7 +239,7 @@
     for (const auto& observer : observers_) {
       observer.second.task_runner->PostTask(
           from_here,
-          BindOnce(&ObserverListThreadSafe<ObserverType>::NotifyWrapper, this,
+          BindOnce(&Self::NotifyWrapper, this,
                    // While `observer.first` may be dangling, we pass it and
                    // check it wasn't deallocated in NotifyWrapper() which can
                    // check `observers_` to verify presence (the owner of the
diff --git a/base/observer_list_threadsafe_unittest.cc b/base/observer_list_threadsafe_unittest.cc
index 9514546..cec0930 100644
--- a/base/observer_list_threadsafe_unittest.cc
+++ b/base/observer_list_threadsafe_unittest.cc
@@ -5,6 +5,7 @@
 #include "base/observer_list_threadsafe.h"
 
 #include <memory>
+#include <utility>
 #include <vector>
 
 #include "base/compiler_specific.h"
@@ -20,7 +21,9 @@
 #include "base/task/thread_pool.h"
 #include "base/task/thread_pool/thread_pool_instance.h"
 #include "base/test/bind.h"
+#include "base/test/gtest_util.h"
 #include "base/test/task_environment.h"
+#include "base/test/test_waitable_event.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread_restrictions.h"
 #include "build/build_config.h"
@@ -30,7 +33,7 @@
 namespace base {
 namespace {
 
-constexpr int kThreadRunTime = 2000;  // ms to run the multi-threaded test.
+constexpr int kThreadRunTime = 1000;  // ms to run the multi-threaded test.
 
 class Foo {
  public:
@@ -73,19 +76,26 @@
 
 // A task for use in the ThreadSafeObserver test which will add and remove
 // itself from the notification list repeatedly.
+template <RemoveObserverPolicy RemovePolicy =
+              RemoveObserverPolicy::kAnySequence>
 class AddRemoveThread : public Foo {
+  using Self = AddRemoveThread<RemovePolicy>;
+  using ObserverList = ObserverListThreadSafe<Foo, RemovePolicy>;
+
  public:
-  AddRemoveThread(ObserverListThreadSafe<Foo>* list, bool notify)
+  AddRemoveThread(ObserverList* list,
+                  bool notify,
+                  scoped_refptr<SingleThreadTaskRunner> removal_task_runner)
       : list_(list),
         task_runner_(ThreadPool::CreateSingleThreadTaskRunner(
             {},
             SingleThreadTaskRunnerThreadMode::DEDICATED)),
+        removal_task_runner_(std::move(removal_task_runner)),
         in_list_(false),
         start_(Time::Now()),
         do_notifies_(notify) {
     task_runner_->PostTask(
-        FROM_HERE,
-        base::BindOnce(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
+        FROM_HERE, base::BindOnce(&Self::AddTask, weak_factory_.GetWeakPtr()));
   }
 
   ~AddRemoveThread() override = default;
@@ -108,8 +118,12 @@
     }
 
     SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
-        FROM_HERE,
-        base::BindOnce(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
+        FROM_HERE, base::BindOnce(&Self::AddTask, weak_factory_.GetWeakPtr()));
+  }
+
+  void RemoveTask() {
+    list_->RemoveObserver(this);
+    in_list_ = false;
   }
 
   void Observe(int x) override {
@@ -120,20 +134,39 @@
     // This callback should fire on the appropriate thread
     EXPECT_TRUE(task_runner_->BelongsToCurrentThread());
 
-    list_->RemoveObserver(this);
-    in_list_ = false;
+    if (removal_task_runner_) {
+      // Remove the observer on a different thread, blocking the current thread
+      // until it's removed. Unretained is safe since the pointers are valid
+      // until the thread is unblocked.
+      base::TestWaitableEvent event;
+      removal_task_runner_->PostTask(
+          FROM_HERE, base::BindOnce(&Self::RemoveTask, base::Unretained(this))
+                         .Then(base::BindOnce(&base::TestWaitableEvent::Signal,
+                                              base::Unretained(&event))));
+      event.Wait();
+    } else {
+      // Remove the observer on the same thread.
+      RemoveTask();
+    }
+  }
+
+  scoped_refptr<SingleThreadTaskRunner> task_runner() const {
+    return task_runner_;
   }
 
  private:
-  raw_ptr<ObserverListThreadSafe<Foo>> list_;
+  raw_ptr<ObserverList> list_;
   scoped_refptr<SingleThreadTaskRunner> task_runner_;
+  // Optional task runner used to remove observers. This will be the main task
+  // runner of a different AddRemoveThread.
+  scoped_refptr<SingleThreadTaskRunner> removal_task_runner_;
   bool in_list_;  // Are we currently registered for notifications.
                   // in_list_ is only used on |this| thread.
   Time start_;    // The time we started the test.
 
   bool do_notifies_;    // Whether these threads should do notifications.
 
-  base::WeakPtrFactory<AddRemoveThread> weak_factory_{this};
+  base::WeakPtrFactory<Self> weak_factory_{this};
 };
 
 }  // namespace
@@ -253,27 +286,41 @@
 
 // A test driver for a multi-threaded notification loop.  Runs a number of
 // observer threads, each of which constantly adds/removes itself from the
-// observer list.  Optionally, if cross_thread_notifies is set to true, the
-// observer threads will also trigger notifications to all observers.
+// observer list.  Optionally, if `cross_thread_notifies` is set to true, the
+// observer threads will also trigger notifications to all observers, and if
+// `cross_thread_removes` is set to true, the observer threads will also remove
+// observers added by other threads.
+template <
+    RemoveObserverPolicy RemovePolicy = RemoveObserverPolicy::kAnySequence>
 static void ThreadSafeObserverHarness(int num_threads,
-                                      bool cross_thread_notifies) {
+                                      bool cross_thread_notifies = false,
+                                      bool cross_thread_removes = false) {
   test::TaskEnvironment task_environment;
 
-  scoped_refptr<ObserverListThreadSafe<Foo>> observer_list(
-      new ObserverListThreadSafe<Foo>);
+  auto observer_list =
+      base::MakeRefCounted<ObserverListThreadSafe<Foo, RemovePolicy>>();
+
   Adder a(1);
   Adder b(-1);
 
   observer_list->AddObserver(&a);
   observer_list->AddObserver(&b);
 
-  std::vector<std::unique_ptr<AddRemoveThread>> threaded_observer;
-  threaded_observer.reserve(num_threads);
+  using TestThread = AddRemoveThread<RemovePolicy>;
+  std::vector<std::unique_ptr<TestThread>> threaded_observers;
+  threaded_observers.reserve(num_threads);
+  scoped_refptr<SingleThreadTaskRunner> removal_task_runner;
   for (int index = 0; index < num_threads; index++) {
-    threaded_observer.push_back(std::make_unique<AddRemoveThread>(
-        observer_list.get(), cross_thread_notifies));
+    auto add_remove_thread =
+        std::make_unique<TestThread>(observer_list.get(), cross_thread_notifies,
+                                     std::move(removal_task_runner));
+    if (cross_thread_removes) {
+      // Save the task runner to pass to the next thread.
+      removal_task_runner = add_remove_thread->task_runner();
+    }
+    threaded_observers.push_back(std::move(add_remove_thread));
   }
-  ASSERT_EQ(static_cast<size_t>(num_threads), threaded_observer.size());
+  ASSERT_EQ(static_cast<size_t>(num_threads), threaded_observers.size());
 
   Time start = Time::Now();
   while (true) {
@@ -290,25 +337,60 @@
 
 TEST(ObserverListThreadSafeTest, CrossThreadObserver) {
   // Use 7 observer threads.  Notifications only come from the main thread.
-  ThreadSafeObserverHarness(7, false);
+  ThreadSafeObserverHarness(7);
 }
 
 TEST(ObserverListThreadSafeTest, CrossThreadNotifications) {
   // Use 3 observer threads.  Notifications will fire from the main thread and
   // all 3 observer threads.
-  ThreadSafeObserverHarness(3, true);
+  ThreadSafeObserverHarness(3, /*cross_thread_notifies=*/true);
+}
+
+TEST(ObserverListThreadSafeTest, CrossThreadRemoval) {
+  // Use 3 observer threads. Observers can be removed from any thread.
+  ThreadSafeObserverHarness(3, /*cross_thread_notifies=*/true,
+                            /*cross_thread_removes=*/true);
+}
+
+TEST(ObserverListThreadSafeTest, CrossThreadRemovalRestricted) {
+  // Use 3 observer threads. Observers must be removed from the thread that
+  // added them. This should succeed because the test doesn't break that
+  // restriction.
+  ThreadSafeObserverHarness<RemoveObserverPolicy::kAddingSequenceOnly>(
+      3, /*cross_thread_notifies=*/true, /*cross_thread_removes=*/false);
+}
+
+TEST(ObserverListThreadSafeDeathTest, CrossThreadRemovalRestricted) {
+  // Use 3 observer threads. Observers must be removed from the thread that
+  // added them. This should CHECK because the test breaks that restriction.
+  EXPECT_CHECK_DEATH(
+      ThreadSafeObserverHarness<RemoveObserverPolicy::kAddingSequenceOnly>(
+          3, /*cross_thread_notifies=*/true, /*cross_thread_removes=*/true));
 }
 
 TEST(ObserverListThreadSafeTest, OutlivesTaskEnvironment) {
   absl::optional<test::TaskEnvironment> task_environment(absl::in_place);
-  scoped_refptr<ObserverListThreadSafe<Foo>> observer_list(
-      new ObserverListThreadSafe<Foo>);
+  auto observer_list = base::MakeRefCounted<ObserverListThreadSafe<Foo>>();
 
   Adder a(1);
   observer_list->AddObserver(&a);
   task_environment.reset();
   // Test passes if we don't crash here.
   observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+  observer_list->RemoveObserver(&a);
+}
+
+TEST(ObserverListThreadSafeTest, OutlivesTaskEnvironmentRemovalRestricted) {
+  absl::optional<test::TaskEnvironment> task_environment(absl::in_place);
+  auto observer_list = base::MakeRefCounted<
+      ObserverListThreadSafe<Foo, RemoveObserverPolicy::kAddingSequenceOnly>>();
+
+  Adder a(1);
+  observer_list->AddObserver(&a);
+  task_environment.reset();
+  // Test passes if we don't crash here.
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+  observer_list->RemoveObserver(&a);
 }
 
 namespace {
diff --git a/base/observer_list_unittest.nc b/base/observer_list_unittest.nc
index 172337a..557de54 100644
--- a/base/observer_list_unittest.nc
+++ b/base/observer_list_unittest.nc
@@ -11,7 +11,7 @@
 
 namespace base {
 
-#if defined(NCTEST_CHECKED_OBSERVER_USING_UNCHECKED_LIST)  // [r"fatal error: static assertion failed due to requirement '!std::is_base_of<base::CheckedObserver, Observer>::value': CheckedObserver classes must not use ObserverList<T>::Unchecked."]
+#if defined(NCTEST_CHECKED_OBSERVER_USING_UNCHECKED_LIST)  // [r"fatal error: static assertion failed due to requirement '!std::is_base_of_v<base::CheckedObserver, Observer>': CheckedObserver classes must not use ObserverList<T>::Unchecked."]
 
 void WontCompile() {
   struct Observer : public CheckedObserver {
@@ -22,7 +22,7 @@
     observer.OnObserve();
 }
 
-#elif defined(NCTEST_UNCHECKED_OBSERVER_USING_CHECKED_LIST)  // [r"fatal error: static assertion failed due to requirement 'std::is_base_of<base::CheckedObserver, UncheckedObserver>::value': Observers should inherit from base::CheckedObserver. Use ObserverList<T>::Unchecked to observe with raw pointers."]
+#elif defined(NCTEST_UNCHECKED_OBSERVER_USING_CHECKED_LIST)  // [r"fatal error: static assertion failed due to requirement 'std::is_base_of_v<base::CheckedObserver, UncheckedObserver>': Observers should inherit from base::CheckedObserver. Use ObserverList<T>::Unchecked to observe with raw pointers."]
 
 void WontCompile() {
   struct UncheckedObserver {
diff --git a/base/parameter_pack.h b/base/parameter_pack.h
index 3eef16e..450718b 100644
--- a/base/parameter_pack.h
+++ b/base/parameter_pack.h
@@ -42,25 +42,23 @@
 struct ParameterPack {
   // Checks if |Type| occurs in the parameter pack.
   template <typename Type>
-  using HasType =
-      std::bool_constant<any_of({std::is_same<Type, Ts>::value...})>;
+  using HasType = std::bool_constant<any_of({std::is_same_v<Type, Ts>...})>;
 
   // Checks if the parameter pack only contains |Type|.
   template <typename Type>
-  using OnlyHasType =
-      std::bool_constant<all_of({std::is_same<Type, Ts>::value...})>;
+  using OnlyHasType = std::bool_constant<all_of({std::is_same_v<Type, Ts>...})>;
 
   // Checks if |Type| occurs only once in the parameter pack.
   template <typename Type>
   using IsUniqueInPack =
-      std::bool_constant<count({std::is_same<Type, Ts>::value...}, true) == 1>;
+      std::bool_constant<count({std::is_same_v<Type, Ts>...}, true) == 1>;
 
   // Returns the zero-based index of |Type| within |Pack...| or |pack_npos| if
   // it's not within the pack.
   template <typename Type>
   static constexpr size_t IndexInPack() {
     size_t index = 0;
-    for (bool value : {std::is_same<Type, Ts>::value...}) {
+    for (bool value : {std::is_same_v<Type, Ts>...}) {
       if (value)
         return index;
       index++;
@@ -74,7 +72,7 @@
 
   // Checks if every type in the parameter pack is the same.
   using IsAllSameType =
-      std::bool_constant<all_of({std::is_same<NthType<0>, Ts>::value...})>;
+      std::bool_constant<all_of({std::is_same_v<NthType<0>, Ts>...})>;
 };
 
 }  // namespace base
diff --git a/base/parameter_pack_unittest.cc b/base/parameter_pack_unittest.cc
index 4f06f8a..8d3172b 100644
--- a/base/parameter_pack_unittest.cc
+++ b/base/parameter_pack_unittest.cc
@@ -57,14 +57,11 @@
 
 TEST(ParameterPack, NthType) {
   static_assert(
-      std::is_same<int, ParameterPack<int, float, bool>::NthType<0>>::value,
-      "");
+      std::is_same_v<int, ParameterPack<int, float, bool>::NthType<0>>, "");
   static_assert(
-      std::is_same<float, ParameterPack<int, float, bool>::NthType<1>>::value,
-      "");
+      std::is_same_v<float, ParameterPack<int, float, bool>::NthType<1>>, "");
   static_assert(
-      std::is_same<bool, ParameterPack<int, float, bool>::NthType<2>>::value,
-      "");
+      std::is_same_v<bool, ParameterPack<int, float, bool>::NthType<2>>, "");
 }
 
 TEST(ParameterPack, IsAllSameType) {
diff --git a/base/path_service.cc b/base/path_service.cc
index cf47f34..6350fc8 100644
--- a/base/path_service.cc
+++ b/base/path_service.cc
@@ -321,7 +321,7 @@
 }
 
 // static
-bool PathService::IsOverriddenForTests(int key) {
+bool PathService::IsOverriddenForTesting(int key) {
   PathData* path_data = GetPathData();
   DCHECK(path_data);
 
diff --git a/base/path_service.h b/base/path_service.h
index 28f0112..7767022 100644
--- a/base/path_service.h
+++ b/base/path_service.h
@@ -57,6 +57,9 @@
                                         bool is_absolute,
                                         bool create);
 
+  // Returns whether an override is present for a special directory or file.
+  static bool IsOverriddenForTesting(int key);
+
   // To extend the set of supported keys, you can register a path provider,
   // which is just a function mirroring PathService::Get.  The ProviderFunc
   // returns false if it cannot provide a non-empty path for the given key.
@@ -83,9 +86,6 @@
   // Removes an override for a special directory or file. Returns true if there
   // was an override to remove or false if none was present.
   static bool RemoveOverrideForTests(int key);
-
-  // Returns whether an override is present for a special directory or file.
-  static bool IsOverriddenForTests(int key);
 };
 
 }  // namespace base
diff --git a/base/pending_task.cc b/base/pending_task.cc
index 868e16e..577fc03 100644
--- a/base/pending_task.cc
+++ b/base/pending_task.cc
@@ -8,27 +8,6 @@
 
 namespace base {
 
-namespace {
-
-// TODO(crbug.com/1153139): Reconcile with GetDefaultTaskLeeway() and
-// kMinLowResolutionThresholdMs once GetDefaultTaskLeeway() == 16ms.
-constexpr base::TimeDelta kMaxPreciseDelay = Milliseconds(64);
-
-subtle::DelayPolicy MaybeOverrideDelayPolicy(subtle::DelayPolicy delay_policy,
-                                             TimeTicks queue_time,
-                                             TimeTicks delayed_run_time) {
-  if (delayed_run_time.is_null())
-    return subtle::DelayPolicy::kFlexibleNoSooner;
-  DCHECK(!queue_time.is_null());
-  if (delayed_run_time - queue_time >= kMaxPreciseDelay &&
-      delay_policy == subtle::DelayPolicy::kPrecise) {
-    return subtle::DelayPolicy::kFlexibleNoSooner;
-  }
-  return delay_policy;
-}
-
-}  // namespace
-
 PendingTask::PendingTask() = default;
 
 PendingTask::PendingTask(const Location& posted_from,
@@ -42,9 +21,7 @@
       queue_time(queue_time),
       delayed_run_time(delayed_run_time),
       leeway(leeway),
-      delay_policy(MaybeOverrideDelayPolicy(delay_policy,
-                                            queue_time,
-                                            delayed_run_time)) {}
+      delay_policy(delay_policy) {}
 
 PendingTask::PendingTask(PendingTask&& other) = default;
 
diff --git a/base/pickle.cc b/base/pickle.cc
index 0776e11..11d89e4 100644
--- a/base/pickle.cc
+++ b/base/pickle.cc
@@ -4,10 +4,11 @@
 
 #include "base/pickle.h"
 
-#include <algorithm>  // for max()
+#include <algorithm>
 #include <cstdlib>
 #include <limits>
 #include <ostream>
+#include <type_traits>
 
 #include "base/bits.h"
 #include "base/numerics/safe_conversions.h"
@@ -28,13 +29,13 @@
 
 template <typename Type>
 inline bool PickleIterator::ReadBuiltinType(Type* result) {
+  static_assert(
+      std::is_integral_v<Type> && !std::is_same_v<Type, bool>,
+      "This method is only safe with to use with types without padding bits.");
   const char* read_from = GetReadPointerAndAdvance<Type>();
   if (!read_from)
     return false;
-  if (sizeof(Type) > sizeof(uint32_t))
-    memcpy(result, read_from, sizeof(*result));
-  else
-    *result = *reinterpret_cast<const Type*>(read_from);
+  memcpy(result, read_from, sizeof(*result));
   return true;
 }
 
@@ -79,7 +80,14 @@
 }
 
 bool PickleIterator::ReadBool(bool* result) {
-  return ReadBuiltinType(result);
+  // Not all bit patterns are valid bools. Avoid undefined behavior by reading a
+  // type with no padding bits, then converting to bool.
+  uint8_t v;
+  if (!ReadBuiltinType(&v)) {
+    return false;
+  }
+  *result = v != 0;
+  return true;
 }
 
 bool PickleIterator::ReadInt(int* result) {
@@ -242,6 +250,9 @@
   header_->payload_size = 0;
 }
 
+Pickle::Pickle(span<const uint8_t> data)
+    : Pickle(reinterpret_cast<const char*>(data.data()), data.size()) {}
+
 Pickle::Pickle(const char* data, size_t data_len)
     : header_(reinterpret_cast<Header*>(const_cast<char*>(data))),
       header_size_(0),
@@ -430,7 +441,7 @@
   }
 
   char* write = mutable_payload() + write_offset_;
-  memset(write + length, 0, data_len - length);  // Always initialize padding
+  std::fill(write + length, write + data_len, 0);  // Always initialize padding
   header_->payload_size = static_cast<uint32_t>(new_size);
   write_offset_ = new_size;
   return write;
@@ -441,7 +452,8 @@
       << "oops: pickle is readonly";
   MSAN_CHECK_MEM_IS_INITIALIZED(data, length);
   void* write = ClaimUninitializedBytesInternal(length);
-  memcpy(write, data, length);
+  std::copy(static_cast<const char*>(data),
+            static_cast<const char*>(data) + length, static_cast<char*>(write));
 }
 
 }  // namespace base
diff --git a/base/pickle.h b/base/pickle.h
index 65762e3..1464fe7 100644
--- a/base/pickle.h
+++ b/base/pickle.h
@@ -160,6 +160,9 @@
   // instead the data is merely referenced by this Pickle.  Only const methods
   // should be used on the Pickle when initialized this way.  The header
   // padding size is deduced from the data length.
+  explicit Pickle(span<const uint8_t> data);
+  // TODO(crbug.com/1490484): Migrate callers of this overload to the span
+  // version.
   Pickle(const char* data, size_t data_len);
 
   // Initializes a Pickle as a deep copy of another Pickle.
diff --git a/base/pickle_unittest.cc b/base/pickle_unittest.cc
index 41a642d..08d0f0b 100644
--- a/base/pickle_unittest.cc
+++ b/base/pickle_unittest.cc
@@ -634,4 +634,16 @@
   EXPECT_TRUE(iter.ReachedEnd());
 }
 
+// Test that reading a value other than 0 or 1 as a bool does not trigger
+// UBSan.
+TEST(PickleTest, NonCanonicalBool) {
+  Pickle pickle;
+  pickle.WriteInt(0xff);
+
+  PickleIterator iter(pickle);
+  bool b;
+  ASSERT_TRUE(iter.ReadBool(&b));
+  EXPECT_TRUE(b);
+}
+
 }  // namespace base
diff --git a/base/posix/can_lower_nice_to.cc b/base/posix/can_lower_nice_to.cc
index f3ce094..123ee3a 100644
--- a/base/posix/can_lower_nice_to.cc
+++ b/base/posix/can_lower_nice_to.cc
@@ -28,8 +28,9 @@
   // the target value is within the range allowed by RLIMIT_NICE.
 
   // 1. Check for root user.
-  if (geteuid() == 0)
+  if (geteuid() == 0) {
     return true;
+  }
 
   // 2. Skip checking the CAP_SYS_NICE permission because it would require
   // libcap.so.
@@ -47,8 +48,9 @@
   //
   // So, we are allowed to reduce niceness to a minimum of NZERO - rlimit:
   struct rlimit rlim;
-  if (getrlimit(RLIMIT_NICE, &rlim) != 0)
+  if (getrlimit(RLIMIT_NICE, &rlim) != 0) {
     return false;
+  }
   const int lowest_nice_allowed = NZERO - static_cast<int>(rlim.rlim_cur);
 
   // And lowering niceness to |nice_value| is allowed if it is greater than or
diff --git a/base/posix/sysctl.cc b/base/posix/sysctl.cc
new file mode 100644
index 0000000..a6f6d0d
--- /dev/null
+++ b/base/posix/sysctl.cc
@@ -0,0 +1,60 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/posix/sysctl.h"
+
+#include <sys/sysctl.h>
+
+#include <initializer_list>
+#include <string>
+
+#include "base/check_op.h"
+#include "base/functional/function_ref.h"
+#include "base/numerics/safe_conversions.h"
+#include "build/build_config.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+namespace {
+
+absl::optional<std::string> StringSysctlImpl(
+    base::FunctionRef<int(char* /*out*/, size_t* /*out_len*/)> sysctl_func) {
+  size_t buf_len;
+  int result = sysctl_func(nullptr, &buf_len);
+  if (result < 0 || buf_len < 1) {
+    return absl::nullopt;
+  }
+
+  std::string value(buf_len - 1, '\0');
+  result = sysctl_func(&value[0], &buf_len);
+  if (result < 0) {
+    return absl::nullopt;
+  }
+  CHECK_LE(buf_len - 1, value.size());
+  CHECK_EQ(value[buf_len - 1], '\0');
+  value.resize(buf_len - 1);
+
+  return value;
+}
+}  // namespace
+
+namespace base {
+
+absl::optional<std::string> StringSysctl(
+    const std::initializer_list<int>& mib) {
+  return StringSysctlImpl([mib](char* out, size_t* out_len) {
+    return sysctl(const_cast<int*>(std::data(mib)),
+                  checked_cast<unsigned int>(std::size(mib)), out, out_len,
+                  nullptr, 0);
+  });
+}
+
+#if !BUILDFLAG(IS_OPENBSD)
+absl::optional<std::string> StringSysctlByName(const char* name) {
+  return StringSysctlImpl([name](char* out, size_t* out_len) {
+    return sysctlbyname(name, out, out_len, nullptr, 0);
+  });
+}
+#endif
+
+}  // namespace base
diff --git a/base/posix/sysctl.h b/base/posix/sysctl.h
new file mode 100644
index 0000000..cb08e52
--- /dev/null
+++ b/base/posix/sysctl.h
@@ -0,0 +1,32 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POSIX_SYSCTL_H_
+#define BASE_POSIX_SYSCTL_H_
+
+#include <initializer_list>
+#include <string>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+// NB: While a BSD utility file, this lives in /base/posix/ for simplicity as
+// there is no /base/bsd/.
+
+namespace base {
+
+// Returns the value returned by `sysctl` as a std::string, or nullopt on error.
+BASE_EXPORT absl::optional<std::string> StringSysctl(
+    const std::initializer_list<int>& mib);
+
+#if !BUILDFLAG(IS_OPENBSD)
+// Returns the value returned by `sysctlbyname` as a std::string, or nullopt
+// on error.
+BASE_EXPORT absl::optional<std::string> StringSysctlByName(const char* name);
+#endif
+
+}  // namespace base
+
+#endif  // BASE_POSIX_SYSCTL_H_
diff --git a/base/posix/sysctl_unittest.cc b/base/posix/sysctl_unittest.cc
new file mode 100644
index 0000000..c0b3690
--- /dev/null
+++ b/base/posix/sysctl_unittest.cc
@@ -0,0 +1,42 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/posix/sysctl.h"
+
+#include <sys/sysctl.h>
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+using SysctlTest = testing::Test;
+
+TEST(SysctlTest, MibSuccess) {
+  absl::optional<std::string> result1 = StringSysctl({CTL_HW, HW_MACHINE});
+  EXPECT_TRUE(result1);
+
+#if !BUILDFLAG(IS_OPENBSD)
+  absl::optional<std::string> result2 = StringSysctlByName("hw.machine");
+  EXPECT_TRUE(result2);
+
+  EXPECT_EQ(result1, result2);
+#endif
+}
+
+TEST(SysctlTest, MibFailure) {
+  absl::optional<std::string> result = StringSysctl({-1});
+  EXPECT_FALSE(result);
+
+#if !BUILDFLAG(IS_OPENBSD)
+  result = StringSysctlByName("banananananananana");
+  EXPECT_FALSE(result);
+#endif
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/power_monitor/battery_level_provider_mac.mm b/base/power_monitor/battery_level_provider_mac.mm
index d5578de..cf8390d 100644
--- a/base/power_monitor/battery_level_provider_mac.mm
+++ b/base/power_monitor/battery_level_provider_mac.mm
@@ -8,8 +8,8 @@
 #include <IOKit/IOKitLib.h>
 #include <IOKit/ps/IOPSKeys.h>
 
-#include "base/mac/foundation_util.h"
-#include "base/mac/scoped_cftyperef.h"
+#include "base/apple/foundation_util.h"
+#include "base/apple/scoped_cftyperef.h"
 #include "base/mac/scoped_ioobject.h"
 
 namespace base {
@@ -21,7 +21,7 @@
 absl::optional<SInt64> GetValueAsSInt64(CFDictionaryRef description,
                                         CFStringRef key) {
   CFNumberRef number_ref =
-      base::mac::GetValueFromDictionary<CFNumberRef>(description, key);
+      base::apple::GetValueFromDictionary<CFNumberRef>(description, key);
 
   SInt64 value;
   if (number_ref && CFNumberGetValue(number_ref, kCFNumberSInt64Type, &value))
@@ -33,7 +33,7 @@
 absl::optional<bool> GetValueAsBoolean(CFDictionaryRef description,
                                        CFStringRef key) {
   CFBooleanRef boolean =
-      base::mac::GetValueFromDictionary<CFBooleanRef>(description, key);
+      base::apple::GetValueFromDictionary<CFBooleanRef>(description, key);
   if (!boolean)
     return absl::nullopt;
   return CFBooleanGetValue(boolean);
@@ -65,13 +65,13 @@
   const base::mac::ScopedIOObject<io_service_t> service(
       IOServiceGetMatchingService(kIOMasterPortDefault,
                                   IOServiceMatching("IOPMPowerSource")));
-  if (service == IO_OBJECT_NULL) {
+  if (!service) {
     // Macs without a battery don't necessarily provide the IOPMPowerSource
     // service (e.g. test bots). Don't report this as an error.
     return MakeBatteryState(/* battery_details=*/{});
   }
 
-  base::ScopedCFTypeRef<CFMutableDictionaryRef> dict;
+  apple::ScopedCFTypeRef<CFMutableDictionaryRef> dict;
   kern_return_t result =
       IORegistryEntryCreateCFProperties(service.get(), dict.InitializeInto(),
                                         /*allocator=*/nullptr, /*options=*/0);
@@ -82,7 +82,7 @@
   }
 
   absl::optional<bool> battery_installed =
-      GetValueAsBoolean(dict, CFSTR("BatteryInstalled"));
+      GetValueAsBoolean(dict.get(), CFSTR("BatteryInstalled"));
   if (!battery_installed.has_value()) {
     // Failing to access the BatteryInstalled property is unexpected.
     return absl::nullopt;
@@ -94,26 +94,26 @@
   }
 
   absl::optional<bool> external_connected =
-      GetValueAsBoolean(dict, CFSTR("ExternalConnected"));
+      GetValueAsBoolean(dict.get(), CFSTR("ExternalConnected"));
   if (!external_connected.has_value()) {
     // Failing to access the ExternalConnected property is unexpected.
     return absl::nullopt;
   }
 
   absl::optional<SInt64> current_capacity =
-      GetValueAsSInt64(dict, CFSTR("AppleRawCurrentCapacity"));
+      GetValueAsSInt64(dict.get(), CFSTR("AppleRawCurrentCapacity"));
   if (!current_capacity.has_value()) {
     return absl::nullopt;
   }
 
   absl::optional<SInt64> max_capacity =
-      GetValueAsSInt64(dict, CFSTR("AppleRawMaxCapacity"));
+      GetValueAsSInt64(dict.get(), CFSTR("AppleRawMaxCapacity"));
   if (!max_capacity.has_value()) {
     return absl::nullopt;
   }
 
   absl::optional<SInt64> voltage_mv =
-      GetValueAsSInt64(dict, CFSTR(kIOPSVoltageKey));
+      GetValueAsSInt64(dict.get(), CFSTR(kIOPSVoltageKey));
   if (!voltage_mv.has_value()) {
     return absl::nullopt;
   }
diff --git a/base/power_monitor/battery_state_sampler.cc b/base/power_monitor/battery_state_sampler.cc
index 3909fe4..48d0854 100644
--- a/base/power_monitor/battery_state_sampler.cc
+++ b/base/power_monitor/battery_state_sampler.cc
@@ -1,4 +1,4 @@
-// Copyright 2022 The Chromium Authors. All rights reserved.
+// Copyright 2022 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/base/power_monitor/battery_state_sampler.h b/base/power_monitor/battery_state_sampler.h
index 8d08fbc..eaa532a 100644
--- a/base/power_monitor/battery_state_sampler.h
+++ b/base/power_monitor/battery_state_sampler.h
@@ -1,4 +1,4 @@
-// Copyright 2022 The Chromium Authors. All rights reserved.
+// Copyright 2022 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/base/power_monitor/battery_state_sampler_mac.cc b/base/power_monitor/battery_state_sampler_mac.cc
index 02f2f0b..8b06f4d 100644
--- a/base/power_monitor/battery_state_sampler_mac.cc
+++ b/base/power_monitor/battery_state_sampler_mac.cc
@@ -1,4 +1,4 @@
-// Copyright 2022 The Chromium Authors. All rights reserved.
+// Copyright 2022 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/base/power_monitor/battery_state_sampler_unittest.cc b/base/power_monitor/battery_state_sampler_unittest.cc
index 17b7051..605a606 100644
--- a/base/power_monitor/battery_state_sampler_unittest.cc
+++ b/base/power_monitor/battery_state_sampler_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2022 The Chromium Authors. All rights reserved.
+// Copyright 2022 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/base/power_monitor/iopm_power_source_sampling_event_source.cc b/base/power_monitor/iopm_power_source_sampling_event_source.cc
index 48c7bb6..27c6cf2 100644
--- a/base/power_monitor/iopm_power_source_sampling_event_source.cc
+++ b/base/power_monitor/iopm_power_source_sampling_event_source.cc
@@ -43,8 +43,8 @@
                                      dispatch_get_main_queue());
 
   kern_return_t result = IOServiceAddInterestNotification(
-      notify_port_.get(), service_, kIOGeneralInterest, OnNotification, this,
-      notification_.InitializeInto());
+      notify_port_.get(), service_.get(), kIOGeneralInterest, OnNotification,
+      this, notification_.InitializeInto());
 
   if (result != KERN_SUCCESS) {
     LOG(ERROR) << "Could not register to IOPMPowerSource notifications";
diff --git a/base/power_monitor/moving_average.cc b/base/power_monitor/moving_average.cc
deleted file mode 100644
index 9ead688..0000000
--- a/base/power_monitor/moving_average.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/power_monitor/moving_average.h"
-
-#include <algorithm>
-#include <limits>
-
-#include "base/check_op.h"
-#include "base/numerics/clamped_math.h"
-
-namespace {
-constexpr int kIntMax = std::numeric_limits<int>::max();
-constexpr int64_t kInt64Max = std::numeric_limits<int64_t>::max();
-}  // namespace
-
-namespace base {
-
-MovingAverage::MovingAverage(uint8_t window_size)
-    : window_size_(window_size), buffer_(window_size, 0) {
-  DCHECK_LE(kIntMax * window_size, kInt64Max);
-}
-
-MovingAverage::~MovingAverage() = default;
-
-void MovingAverage::AddSample(int sample) {
-  sum_ -= buffer_[index_];
-  buffer_[index_++] = sample;
-  sum_ += sample;
-  if (index_ == window_size_) {
-    full_ = true;
-    index_ = 0;
-  }
-}
-
-int MovingAverage::GetAverageRoundedDown() const {
-  if (Size() == 0 || uint64_t{Size()} > static_cast<uint64_t>(kInt64Max)) {
-    return 0;
-  }
-  return static_cast<int>(sum_ / static_cast<int64_t>(Size()));
-}
-
-int MovingAverage::GetAverageRoundedToClosest() const {
-  if (Size() == 0 || uint64_t{Size()} > static_cast<uint64_t>(kInt64Max))
-    return 0;
-  return static_cast<int>((base::ClampedNumeric<int64_t>(sum_) + Size() / 2) /
-                          static_cast<int64_t>(Size()));
-}
-
-double MovingAverage::GetUnroundedAverage() const {
-  if (Size() == 0)
-    return 0;
-  return sum_ / static_cast<double>(Size());
-}
-
-void MovingAverage::Reset() {
-  std::fill(buffer_.begin(), buffer_.end(), 0);
-  sum_ = 0;
-  index_ = 0;
-  full_ = false;
-}
-
-size_t MovingAverage::Size() const {
-  return full_ ? window_size_ : index_;
-}
-}  // namespace base
diff --git a/base/power_monitor/moving_average.h b/base/power_monitor/moving_average.h
deleted file mode 100644
index 991e03d..0000000
--- a/base/power_monitor/moving_average.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_POWER_MONITOR_MOVING_AVERAGE_H_
-#define BASE_POWER_MONITOR_MOVING_AVERAGE_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <vector>
-
-#include "base/base_export.h"
-
-namespace base {
-
-// Calculates average over a small fixed size window. If there are less than
-// window size elements, calculates average of all inserted elements so far.
-// This implementation support a maximum window size of 255.
-// Ported from third_party/webrtc/rtc_base/numerics/moving_average.h.
-class BASE_EXPORT MovingAverage {
- public:
-  // Maximum supported window size is 2^8 - 1 = 255.
-  explicit MovingAverage(uint8_t window_size);
-  ~MovingAverage();
-  // MovingAverage is neither copyable nor movable.
-  MovingAverage(const MovingAverage&) = delete;
-  MovingAverage& operator=(const MovingAverage&) = delete;
-
-  // Adds new sample. If the window is full, the oldest element is pushed out.
-  void AddSample(int sample);
-
-  // Returns rounded down average of last `window_size` elements or all
-  // elements if there are not enough of them.
-  int GetAverageRoundedDown() const;
-
-  // Same as above but rounded to the closest integer.
-  int GetAverageRoundedToClosest() const;
-
-  // Returns unrounded average over the window.
-  double GetUnroundedAverage() const;
-
-  // Resets to the initial state before any elements were added.
-  void Reset();
-
-  // Returns number of elements in the window.
-  size_t Size() const;
-
- private:
-  // Stores `window_size` used in the constructor.
-  uint8_t window_size_ = 0;
-  // New samples are added at this index. Counts modulo `window_size`.
-  uint8_t index_ = 0;
-  // Set to true when the `buffer_` is full. i.e, all elements contain a
-  // sample added by AddSample().
-  bool full_ = false;
-  // Sum of the samples in the moving window.
-  int64_t sum_ = 0;
-  // Circular buffer for all the samples in the moving window.
-  // Size is always `window_size`
-  std::vector<int> buffer_;
-};
-
-}  // namespace base
-
-#endif  // BASE_POWER_MONITOR_MOVING_AVERAGE_H_
diff --git a/base/power_monitor/moving_average_unittest.cc b/base/power_monitor/moving_average_unittest.cc
deleted file mode 100644
index a3bb66d..0000000
--- a/base/power_monitor/moving_average_unittest.cc
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2020 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/power_monitor/moving_average.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace test {
-
-// Ported from third_party/webrtc/rtc_base/numerics/moving_average_unittest.cc.
-
-TEST(MovingAverageTest, EmptyAverage) {
-  MovingAverage moving_average(1);
-  EXPECT_EQ(0u, moving_average.Size());
-  EXPECT_EQ(0, moving_average.GetAverageRoundedDown());
-}
-
-TEST(MovingAverageTest, OneElement) {
-  MovingAverage moving_average(1);
-  moving_average.AddSample(3);
-  EXPECT_EQ(1u, moving_average.Size());
-  EXPECT_EQ(3, moving_average.GetAverageRoundedDown());
-}
-
-// Verify that Size() increases monotonically when samples are added up to the
-// window size. At that point the filter is full and shall return the window
-// size as Size() until Reset() is called.
-TEST(MovingAverageTest, Size) {
-  constexpr uint8_t kWindowSize = 3;
-  MovingAverage moving_average(kWindowSize);
-  EXPECT_EQ(0u, moving_average.Size());
-  moving_average.AddSample(1);
-  EXPECT_EQ(1u, moving_average.Size());
-  moving_average.AddSample(2);
-  EXPECT_EQ(2u, moving_average.Size());
-  moving_average.AddSample(3);
-  // Three samples have beend added and the filter is full (all elements in the
-  // buffer have been given a valid value).
-  EXPECT_EQ(kWindowSize, moving_average.Size());
-  // Adding a fourth sample will shift out the first sample (1) and the filter
-  // should now contain [4,2,3] => average is 9 / 3 = 3.
-  moving_average.AddSample(4);
-  EXPECT_EQ(kWindowSize, moving_average.Size());
-  EXPECT_EQ(3, moving_average.GetAverageRoundedToClosest());
-  moving_average.Reset();
-  EXPECT_EQ(0u, moving_average.Size());
-  EXPECT_EQ(0, moving_average.GetAverageRoundedToClosest());
-}
-
-TEST(MovingAverageTest, GetAverage) {
-  MovingAverage moving_average(255);
-  moving_average.AddSample(1);
-  moving_average.AddSample(1);
-  moving_average.AddSample(3);
-  moving_average.AddSample(3);
-  EXPECT_EQ(moving_average.GetAverageRoundedDown(), 2);
-  EXPECT_EQ(moving_average.GetAverageRoundedToClosest(), 2);
-}
-
-TEST(MovingAverageTest, GetAverageRoundedDownRounds) {
-  MovingAverage moving_average(255);
-  moving_average.AddSample(1);
-  moving_average.AddSample(2);
-  moving_average.AddSample(2);
-  moving_average.AddSample(2);
-  EXPECT_EQ(moving_average.GetAverageRoundedDown(), 1);
-}
-
-TEST(MovingAverageTest, GetAverageRoundedToClosestRounds) {
-  MovingAverage moving_average(255);
-  moving_average.AddSample(1);
-  moving_average.AddSample(2);
-  moving_average.AddSample(2);
-  moving_average.AddSample(2);
-  EXPECT_EQ(moving_average.GetAverageRoundedToClosest(), 2);
-}
-
-TEST(MovingAverageTest, Reset) {
-  MovingAverage moving_average(5);
-  moving_average.AddSample(1);
-  EXPECT_EQ(1, moving_average.GetAverageRoundedDown());
-  EXPECT_EQ(1, moving_average.GetAverageRoundedToClosest());
-
-  moving_average.Reset();
-
-  EXPECT_EQ(0, moving_average.GetAverageRoundedDown());
-  moving_average.AddSample(10);
-  EXPECT_EQ(10, moving_average.GetAverageRoundedDown());
-  EXPECT_EQ(10, moving_average.GetAverageRoundedToClosest());
-}
-
-TEST(MovingAverageTest, ManySamples) {
-  MovingAverage moving_average(10);
-  for (int i = 1; i < 11; i++) {
-    moving_average.AddSample(i);
-  }
-  EXPECT_EQ(moving_average.GetAverageRoundedDown(), 5);
-  EXPECT_EQ(moving_average.GetAverageRoundedToClosest(), 6);
-  for (int i = 1; i < 2001; i++) {
-    moving_average.AddSample(i);
-  }
-  EXPECT_EQ(moving_average.GetAverageRoundedDown(), 1995);
-  EXPECT_EQ(moving_average.GetAverageRoundedToClosest(), 1996);
-}
-
-TEST(MovingAverageTest, VerifyNoOverflow) {
-  constexpr int kMaxInt = std::numeric_limits<int>::max();
-  MovingAverage moving_average(255);
-  for (int i = 0; i < 255; i++) {
-    moving_average.AddSample(kMaxInt);
-  }
-  EXPECT_EQ(moving_average.GetAverageRoundedDown(), kMaxInt);
-  EXPECT_EQ(moving_average.GetAverageRoundedToClosest(), kMaxInt);
-  EXPECT_EQ(moving_average.GetUnroundedAverage(), kMaxInt);
-}
-
-}  // namespace test
-}  // namespace base
diff --git a/base/power_monitor/power_monitor_device_source.h b/base/power_monitor/power_monitor_device_source.h
index 3e4c5eb..d03e4d5 100644
--- a/base/power_monitor/power_monitor_device_source.h
+++ b/base/power_monitor/power_monitor_device_source.h
@@ -24,7 +24,7 @@
 #if BUILDFLAG(IS_MAC)
 #include <IOKit/IOTypes.h>
 
-#include "base/mac/scoped_cftyperef.h"
+#include "base/apple/scoped_cftyperef.h"
 #include "base/mac/scoped_ionotificationportref.h"
 #include "base/power_monitor/battery_level_provider.h"
 #include "base/power_monitor/iopm_power_source_sampling_event_source.h"
diff --git a/base/power_monitor/power_monitor_device_source_mac.mm b/base/power_monitor/power_monitor_device_source_mac.mm
index 87bd223..f385178 100644
--- a/base/power_monitor/power_monitor_device_source_mac.mm
+++ b/base/power_monitor/power_monitor_device_source_mac.mm
@@ -7,8 +7,8 @@
 
 #include "base/power_monitor/power_monitor_device_source.h"
 
-#include "base/mac/foundation_util.h"
-#include "base/mac/scoped_cftyperef.h"
+#include "base/apple/foundation_util.h"
+#include "base/apple/scoped_cftyperef.h"
 #include "base/power_monitor/power_monitor.h"
 #include "base/power_monitor/power_monitor_source.h"
 
diff --git a/base/power_monitor/speed_limit_observer_win.cc b/base/power_monitor/speed_limit_observer_win.cc
index 2454140..f7f7927 100644
--- a/base/power_monitor/speed_limit_observer_win.cc
+++ b/base/power_monitor/speed_limit_observer_win.cc
@@ -16,6 +16,9 @@
 
 #include "base/logging.h"
 #include "base/system/sys_info.h"
+#include "base/timer/elapsed_timer.h"
+#include "base/trace_event/base_tracing.h"
+#include "build/build_config.h"
 
 namespace {
 
@@ -27,7 +30,9 @@
 
 // Size of moving-average filter which is used to smooth out variations in
 // speed-limit estimates.
-constexpr size_t kMovingAverageWindowSize = 10;
+size_t kMovingAverageWindowSize = 10;
+
+constexpr const char kPowerTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("power");
 
 // From
 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa373184(v=vs.85).aspx.
@@ -65,6 +70,43 @@
   return true;
 }
 
+#if defined(ARCH_CPU_X86_FAMILY)
+// Returns the estimated CPU frequency by executing a tight loop of predictable
+// assembly instructions. The estimated frequency should be proportional and
+// about the same magnitude than the real CPU frequency. The measurement should
+// be long enough to avoid Turbo Boost effect (~3ms) and be low enough to stay
+// within the operating system scheduler quantum (~100ms).
+double EstimateCpuFrequency() {
+  // The heuristic to estimate CPU frequency is based on UIforETW code.
+  // see: https://github.com/google/UIforETW/blob/main/UIforETW/CPUFrequency.cpp
+  //      https://github.com/google/UIforETW/blob/main/UIforETW/SpinALot64.asm
+  base::ElapsedTimer timer;
+  const int kAmountOfIterations = 50000;
+  const int kAmountOfInstructions = 10;
+  for (int i = 0; i < kAmountOfIterations; ++i) {
+    __asm__ __volatile__(
+        "addl  %%eax, %%eax\n"
+        "addl  %%eax, %%eax\n"
+        "addl  %%eax, %%eax\n"
+        "addl  %%eax, %%eax\n"
+        "addl  %%eax, %%eax\n"
+        "addl  %%eax, %%eax\n"
+        "addl  %%eax, %%eax\n"
+        "addl  %%eax, %%eax\n"
+        "addl  %%eax, %%eax\n"
+        "addl  %%eax, %%eax\n"
+        :
+        :
+        : "eax");
+  }
+
+  const base::TimeDelta elapsed = timer.Elapsed();
+  const double estimated_frequency =
+      (kAmountOfIterations * kAmountOfInstructions) / elapsed.InSecondsF();
+  return estimated_frequency;
+}
+#endif
+
 }  // namespace
 
 namespace base {
@@ -95,6 +137,24 @@
   // Get the latest estimated throttling level (value between 0.0 and 1.0).
   float throttling_level = EstimateThrottlingLevel();
 
+  // Emit trace events to investigate issues with power throttling. Run this
+  // block only if tracing is running to avoid executing expensive calls to
+  // EstimateCpuFrequency(...).
+  bool trace_events_enabled;
+  TRACE_EVENT_CATEGORY_GROUP_ENABLED(kPowerTraceCategory,
+                                     &trace_events_enabled);
+  if (trace_events_enabled) {
+    TRACE_COUNTER1(kPowerTraceCategory, "idleness", idleness_percent);
+    TRACE_COUNTER1(kPowerTraceCategory, "throttling_level",
+                   static_cast<unsigned int>(throttling_level * 100));
+
+#if defined(ARCH_CPU_X86_FAMILY)
+    double cpu_frequency = EstimateCpuFrequency();
+    TRACE_COUNTER1(kPowerTraceCategory, "frequency_mhz",
+                   static_cast<unsigned int>(cpu_frequency / 1'000'000));
+#endif
+  }
+
   // Ignore the value if the global idleness is above 90% or throttling value
   // is very small. This approach avoids false alarms and removes noise from the
   // measurements.
@@ -119,14 +179,14 @@
     return kSpeedLimitMax;
   }
 
-  // Add the latest speeed-limit value [0,100] to the MA filter and return its
+  // Add the latest speed-limit value [0,100] to the MA filter and return its
   // output after ensuring that the filter is full. We do this to avoid initial
   // false alarms at startup and after calling Reset() on the filter.
   moving_average_.AddSample(speed_limit);
-  if (moving_average_.Size() < kMovingAverageWindowSize) {
+  if (moving_average_.Count() < kMovingAverageWindowSize) {
     return kSpeedLimitMax;
   }
-  return moving_average_.GetAverageRoundedDown();
+  return moving_average_.Mean();
 }
 
 void SpeedLimitObserverWin::OnTimerTick() {
@@ -137,6 +197,9 @@
     speed_limit_ = speed_limit;
     callback_.Run(speed_limit_);
   }
+
+  TRACE_COUNTER1(kPowerTraceCategory, "speed_limit",
+                 static_cast<unsigned int>(speed_limit));
 }
 
 float SpeedLimitObserverWin::EstimateThrottlingLevel() {
@@ -155,13 +218,17 @@
   // Estimate the level of throttling by measuring how many CPUs that are not
   // in idle state and how "far away" they are from the most idle state. Local
   // tests have shown that `MaxIdleState` is typically 2 or 3 and
-  // `CurrentIdleState` switches to 2 or 1 when some sort of trottling starts
-  // to take place. `CurrentIdleState` equal to 0 can happen on devices where
-  // `MaxIdleState` equals 1 but it seems hard to provoke when `MaxIdleState`
-  // is larger than 1.
-  // The Intel Extreme Tuning Utility application has been used to monitor when
-  // any type of throttling (thermal, power-limit, PMAX etc) starts.
+  //
+  // `CurrentIdleState` switches to 2 or 1 when some sort of throttling starts
+  // to take place. The Intel Extreme Tuning Utility application has been used
+  // to monitor when any type of throttling (thermal, power-limit, PMAX etc)
+  // starts.
+  //
+  // `CurrentIdleState` contains the CPU C-State + 1. When `MaxIdleState` is
+  // 1, the `CurrentIdleState` will always be 0 and the C-States are not
+  // supported.
   int num_non_idle_cpus = 0;
+  int num_active_cpus = 0;
   float load_fraction_total = 0.0;
   for (size_t i = 0; i < num_cpus(); ++i) {
     // Amount of "non-idleness" is the distance from the max idle state.
@@ -178,11 +245,22 @@
     load_fraction_total += load_fraction;
     // Used for a sanity check only.
     num_non_idle_cpus += (info[i].CurrentIdleState < info[i].MaxIdleState);
+
+    // Count the amount of CPU that are in the C0 state (active). If
+    // `MaxIdleState` is 1, C-states are not supported and we consider the CPU
+    // is active.
+    if (info[i].MaxIdleState == 1 || info[i].CurrentIdleState == 1) {
+      num_active_cpus++;
+    }
   }
+
   DCHECK_LE(load_fraction_total, static_cast<float>(num_non_idle_cpus))
       << " load_fraction_total: " << load_fraction_total
       << " num_non_idle_cpus:" << num_non_idle_cpus;
   throttling_level = (load_fraction_total / num_cpus());
+
+  TRACE_COUNTER1(kPowerTraceCategory, "num_active_cpus", num_active_cpus);
+
   return throttling_level;
 }
 
diff --git a/base/power_monitor/speed_limit_observer_win.h b/base/power_monitor/speed_limit_observer_win.h
index aefd887..8c06b8b 100644
--- a/base/power_monitor/speed_limit_observer_win.h
+++ b/base/power_monitor/speed_limit_observer_win.h
@@ -7,7 +7,7 @@
 
 #include "base/base_export.h"
 #include "base/functional/callback.h"
-#include "base/power_monitor/moving_average.h"
+#include "base/moving_window.h"
 #include "base/power_monitor/power_observer.h"
 #include "base/time/time.h"
 #include "base/timer/timer.h"
@@ -54,7 +54,7 @@
   // sample rate is one sample per seconds but the existing choice is rather
   // ad-hoc and not based on any deeper analysis into exact frequency
   // characteristics of the underlying process.
-  MovingAverage moving_average_;
+  MovingAverage<int, int64_t> moving_average_;
   // Max speed-limit value is 100 (%) and it is also used in cases where the
   // native Windows API(s) fail.
   int speed_limit_ = PowerThermalObserver::kSpeedLimitMax;
diff --git a/base/power_monitor/thermal_state_observer_mac.mm b/base/power_monitor/thermal_state_observer_mac.mm
index 63d9e24..44f2a65 100644
--- a/base/power_monitor/thermal_state_observer_mac.mm
+++ b/base/power_monitor/thermal_state_observer_mac.mm
@@ -14,8 +14,8 @@
 
 #include <memory>
 
+#include "base/apple/scoped_cftyperef.h"
 #include "base/logging.h"
-#include "base/mac/scoped_cftyperef.h"
 #include "base/power_monitor/power_monitor.h"
 #include "base/power_monitor/power_monitor_source.h"
 #include "base/power_monitor/power_observer.h"
@@ -111,7 +111,7 @@
 }
 
 int ThermalStateObserverMac::GetCurrentSpeedLimit() {
-  base::ScopedCFTypeRef<CFDictionaryRef> dictionary;
+  apple::ScopedCFTypeRef<CFDictionaryRef> dictionary;
   IOReturn result = IOPMCopyCPUPowerStatus(dictionary.InitializeInto());
   if (result != kIOReturnSuccess) {
     DVLOG(1) << __func__
diff --git a/base/process/environment_internal.cc b/base/process/environment_internal.cc
index ad3ab4a..c1dacc4 100644
--- a/base/process/environment_internal.cc
+++ b/base/process/environment_internal.cc
@@ -6,13 +6,17 @@
 
 #include <stddef.h>
 
+#include <vector>
+
 #include "build/build_config.h"
 
 #if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
 #include <string.h>
 #endif
 
-#include <vector>
+#if BUILDFLAG(IS_WIN)
+#include "base/check_op.h"
+#endif
 
 namespace base {
 namespace internal {
diff --git a/base/process/internal_linux.h b/base/process/internal_linux.h
index 98ed4d0..a2657a7 100644
--- a/base/process/internal_linux.h
+++ b/base/process/internal_linux.h
@@ -35,7 +35,7 @@
 extern const char kStatFile[];
 
 // Returns a FilePath to "/proc/pid".
-base::FilePath GetProcPidDir(pid_t pid);
+BASE_EXPORT base::FilePath GetProcPidDir(pid_t pid);
 
 // Reads a file from /proc into a string. This is allowed on any thread as
 // reading from /proc does not hit the disk. Returns true if the file can be
diff --git a/base/process/launch_win.cc b/base/process/launch_win.cc
index 384c771..eca51fa 100644
--- a/base/process/launch_win.cc
+++ b/base/process/launch_win.cc
@@ -57,7 +57,7 @@
 
   // Create the pipe for the child process's STDOUT.
   if (!CreatePipe(&out_read, &out_write, &sa_attr, 0)) {
-    NOTREACHED() << "Failed to create pipe";
+    DPLOG(ERROR) << "Failed to create pipe";
     return false;
   }
 
@@ -67,7 +67,7 @@
 
   // Ensure the read handles to the pipes are not inherited.
   if (!SetHandleInformation(out_read, HANDLE_FLAG_INHERIT, 0)) {
-    NOTREACHED() << "Failed to disabled pipe inheritance";
+    DPLOG(ERROR) << "Failed to disabled pipe inheritance";
     return false;
   }
 
@@ -92,7 +92,7 @@
                      nullptr,
                      TRUE,  // Handles are inherited.
                      0, nullptr, nullptr, &start_info, &temp_process_info)) {
-    NOTREACHED() << "Failed to start process";
+    DPLOG(ERROR) << "Failed to start process";
     return false;
   }
 
diff --git a/base/process/memory.cc b/base/process/memory.cc
index b3ebf32..be9e151 100644
--- a/base/process/memory.cc
+++ b/base/process/memory.cc
@@ -7,14 +7,14 @@
 #include <string.h>
 
 #include "base/allocator/buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/debug/alias.h"
 #include "base/immediate_crash.h"
 #include "base/logging.h"
 #include "build/build_config.h"
 
 #if BUILDFLAG(USE_PARTITION_ALLOC)
-#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
 #endif
 
 #if BUILDFLAG(IS_WIN)
diff --git a/base/process/memory.h b/base/process/memory.h
index 88482bc..7f9c1f8 100644
--- a/base/process/memory.h
+++ b/base/process/memory.h
@@ -7,7 +7,7 @@
 
 #include <stddef.h>
 
-#include "base/allocator/partition_allocator/oom.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/oom.h"
 #include "base/base_export.h"
 #include "base/process/process_handle.h"
 #include "build/build_config.h"
diff --git a/base/process/memory_fuchsia.cc b/base/process/memory_fuchsia.cc
index 4e30f4a..b297e91 100644
--- a/base/process/memory_fuchsia.cc
+++ b/base/process/memory_fuchsia.cc
@@ -4,10 +4,10 @@
 
 #include "base/process/memory.h"
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 
 #if BUILDFLAG(USE_ALLOCATOR_SHIM)
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
 #endif
 
 #include <stdlib.h>
diff --git a/base/process/memory_linux.cc b/base/process/memory_linux.cc
index a615585..b8652fd 100644
--- a/base/process/memory_linux.cc
+++ b/base/process/memory_linux.cc
@@ -8,8 +8,8 @@
 
 #include <new>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
diff --git a/base/process/memory_mac.mm b/base/process/memory_mac.mm
index bd8b20e..742e929 100644
--- a/base/process/memory_mac.mm
+++ b/base/process/memory_mac.mm
@@ -6,9 +6,9 @@
 
 #include <new>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/shim/allocator_interception_mac.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
 #include "build/build_config.h"
 
 namespace base {
diff --git a/base/process/memory_unittest.cc b/base/process/memory_unittest.cc
index f842671..08b55ff 100644
--- a/base/process/memory_unittest.cc
+++ b/base/process/memory_unittest.cc
@@ -15,8 +15,8 @@
 #include <vector>
 
 #include "base/allocator/allocator_check.h"
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/compiler_specific.h"
 #include "base/debug/alias.h"
 #include "base/memory/aligned_memory.h"
@@ -32,8 +32,8 @@
 #endif
 #if BUILDFLAG(IS_MAC)
 #include <malloc/malloc.h>
-#include "base/allocator/partition_allocator/shim/allocator_interception_mac.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_interception_apple.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
 #include "base/check_op.h"
 #include "base/process/memory_unittest_mac.h"
 #endif
diff --git a/base/process/memory_win.cc b/base/process/memory_win.cc
index 443bba1..d3f7d23 100644
--- a/base/process/memory_win.cc
+++ b/base/process/memory_win.cc
@@ -4,6 +4,12 @@
 
 #include "base/process/memory.h"
 
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
+
 #include <windows.h>  // Must be in front of other Windows header files.
 
 #include <new.h>
@@ -11,31 +17,6 @@
 #include <stddef.h>
 #include <stdlib.h>
 
-#if defined(__clang__)
-// This global constructor is trivial and non-racy (per being const).
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wglobal-constructors"
-#endif
-
-// malloc_unchecked is required to implement UncheckedMalloc properly.
-// It's provided by allocator_shim_win.cc but since that's not always present,
-// we provide a default that falls back to regular malloc.
-typedef void* (*MallocFn)(size_t);
-extern "C" void* (*const malloc_unchecked)(size_t);
-extern "C" void* (*const malloc_default)(size_t) = &malloc;
-
-#if defined(__clang__)
-#pragma clang diagnostic pop  // -Wglobal-constructors
-#endif
-
-#if defined(_M_IX86)
-#pragma comment(linker, "/alternatename:_malloc_unchecked=_malloc_default")
-#elif defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64)
-#pragma comment(linker, "/alternatename:malloc_unchecked=malloc_default")
-#else
-#error Unsupported platform
-#endif
-
 namespace base {
 
 namespace {
@@ -62,14 +43,24 @@
   _set_new_mode(kCallNewHandlerOnAllocationFailure);
 }
 
-// Implemented using a weak symbol.
 bool UncheckedMalloc(size_t size, void** result) {
-  *result = malloc_unchecked(size);
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+  *result = allocator_shim::UncheckedAlloc(size);
+#else
+  // malloc_unchecked is required to implement UncheckedMalloc properly.
+  // It's provided by allocator_shim_win.cc but since that's not always present,
+  // In the case, use regular malloc instead.
+  *result = malloc(size);
+#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
   return *result != NULL;
 }
 
 void UncheckedFree(void* ptr) {
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+  allocator_shim::UncheckedFree(ptr);
+#else
   free(ptr);
+#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
 }
 
 }  // namespace base
diff --git a/base/process/process.h b/base/process/process.h
index 1332758..80f9ae4 100644
--- a/base/process/process.h
+++ b/base/process/process.h
@@ -31,16 +31,18 @@
 
 namespace base {
 
-#if BUILDFLAG(IS_APPLE)
-BASE_DECLARE_FEATURE(kMacAllowBackgroundingProcesses);
-#endif
-
 #if BUILDFLAG(IS_CHROMEOS)
 // OneGroupPerRenderer feature places each foreground renderer process into
 // its own cgroup. This will cause the scheduler to use the aggregate runtime
 // of all threads in the process when deciding on the next thread to schedule.
 // It will help guarantee fairness between renderers.
 BASE_EXPORT BASE_DECLARE_FEATURE(kOneGroupPerRenderer);
+
+// Set all threads of a background process as backgrounded, which changes the
+// thread attributes including c-group, latency sensitivity. But the nice value
+// is unchanged, since background process is under the spell of the background
+// CPU c-group (via cgroup.procs).
+BASE_EXPORT BASE_DECLARE_FEATURE(kSetThreadBgForBgProcess);
 #endif
 
 #if BUILDFLAG(IS_WIN)
@@ -266,9 +268,7 @@
 #endif  // BUILDFLAG(IS_CHROMEOS)
 
 #if BUILDFLAG(IS_APPLE)
-  // Sets the `task_role_t` of the current task (the calling process) to
-  // TASK_DEFAULT_APPLICATION, if the MacSetDefaultTaskRole feature is
-  // enabled.
+  // Sets the priority of the current process to its default value.
   static void SetCurrentTaskDefaultRole();
 #endif  // BUILDFLAG(IS_MAC)
 
diff --git a/base/process/process_handle_freebsd.cc b/base/process/process_handle_freebsd.cc
index 585587a..07449df 100644
--- a/base/process/process_handle_freebsd.cc
+++ b/base/process/process_handle_freebsd.cc
@@ -11,6 +11,10 @@
 #include <sys/user.h>
 #include <unistd.h>
 
+#include "base/files/file_path.h"
+#include "base/posix/sysctl.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
 namespace base {
 
 ProcessId GetParentProcessId(ProcessHandle process) {
@@ -25,18 +29,10 @@
 }
 
 FilePath GetProcessExecutablePath(ProcessHandle process) {
-  char pathname[PATH_MAX];
-  size_t length;
-  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, process };
+  absl::optional<std::string> pathname =
+      base::StringSysctl({CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, process});
 
-  length = sizeof(pathname);
-
-  if (sysctl(mib, std::size(mib), pathname, &length, NULL, 0) < 0 ||
-      length == 0) {
-    return FilePath();
-  }
-
-  return FilePath(std::string(pathname));
+  return FilePath(pathname.value_or(std::string{}));
 }
 
 }  // namespace base
diff --git a/base/process/process_linux.cc b/base/process/process_linux.cc
index a6f1585..8597b4d 100644
--- a/base/process/process_linux.cc
+++ b/base/process/process_linux.cc
@@ -21,6 +21,8 @@
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_split.h"
 #include "base/strings/stringprintf.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/platform_thread_internal_posix.h"
 #include "base/threading/thread_restrictions.h"
 #include "build/build_config.h"
 #include "build/chromeos_buildflags.h"
@@ -215,6 +217,26 @@
   DCHECK(IsValid());
 
 #if BUILDFLAG(IS_CHROMEOS)
+  // Go through all the threads for a process and set it as [un]backgrounded.
+  // Threads that are created after this call will also be [un]backgrounded by
+  // detecting that the main thread of the process has been [un]backgrounded.
+
+  // Should not be called concurrently with other functions
+  // like SetThreadType().
+  if (PlatformThreadChromeOS::IsThreadsBgFeatureEnabled()) {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(
+        PlatformThreadChromeOS::GetCrossProcessThreadPrioritySequenceChecker());
+
+    int process_id = process_;
+    bool background = priority == Priority::kBestEffort;
+    internal::ForEachProcessTask(
+        process_,
+        [process_id, background](PlatformThreadId tid, const FilePath& path) {
+          PlatformThreadChromeOS::SetThreadBackgrounded(process_id, tid,
+                                                        background);
+        });
+  }
+
   if (CGroups::Get().enabled) {
     std::string pid = NumberToString(process_);
     const FilePath file =
diff --git a/base/process/process_mac.cc b/base/process/process_mac.cc
index 355859c..b8659de 100644
--- a/base/process/process_mac.cc
+++ b/base/process/process_mac.cc
@@ -13,9 +13,10 @@
 
 #include <iterator>
 #include <memory>
+#include <utility>
 
+#include "base/apple/mach_logging.h"
 #include "base/feature_list.h"
-#include "base/mac/mach_logging.h"
 #include "base/memory/free_deleter.h"
 #include "third_party/abseil-cpp/absl/types/optional.h"
 
@@ -29,17 +30,8 @@
              "MacSetDefaultTaskRole",
              FEATURE_DISABLED_BY_DEFAULT);
 
-// Returns the `task_role_t` of the process whose process ID is `pid`.
-absl::optional<task_role_t> GetTaskCategoryPolicyRole(
-    PortProvider* port_provider,
-    ProcessId pid) {
-  DCHECK(port_provider);
-
-  mach_port_t task_port = port_provider->TaskForPid(pid);
-  if (task_port == TASK_NULL) {
-    return absl::nullopt;
-  }
-
+// Returns the `task_role_t` of the process whose task port is `task_port`.
+absl::optional<task_role_t> GetTaskCategoryPolicyRole(mach_port_t task_port) {
   task_category_policy_data_t category_policy;
   mach_msg_type_number_t task_info_count = TASK_CATEGORY_POLICY_COUNT;
   boolean_t get_default = FALSE;
@@ -52,10 +44,103 @@
     MACH_LOG(ERROR, result) << "task_policy_get TASK_CATEGORY_POLICY";
     return absl::nullopt;
   }
-  DCHECK(!get_default);
+  CHECK(!get_default);
   return category_policy.role;
 }
 
+// Sets the task role for `task_port`.
+bool SetTaskCategoryPolicy(mach_port_t task_port, task_role_t task_role) {
+  task_category_policy task_category_policy{.role = task_role};
+  kern_return_t result =
+      task_policy_set(task_port, TASK_CATEGORY_POLICY,
+                      reinterpret_cast<task_policy_t>(&task_category_policy),
+                      TASK_CATEGORY_POLICY_COUNT);
+  if (result != KERN_SUCCESS) {
+    MACH_LOG(ERROR, result) << "task_policy_set TASK_CATEGORY_POLICY";
+    return false;
+  }
+  return true;
+}
+
+// Taken from task_policy_private.h.
+struct task_suppression_policy {
+  integer_t active;
+  integer_t lowpri_cpu;
+  integer_t timer_throttle;
+  integer_t disk_throttle;
+  integer_t cpu_limit;
+  integer_t suspend;
+  integer_t throughput_qos;
+  integer_t suppressed_cpu;
+  integer_t background_sockets;
+  integer_t reserved[7];
+};
+
+// Taken from task_policy_private.h.
+#define TASK_SUPPRESSION_POLICY_COUNT                                \
+  ((mach_msg_type_number_t)(sizeof(struct task_suppression_policy) / \
+                            sizeof(integer_t)))
+
+// Activates or deactivates the suppression policy to match the effect of App
+// Nap.
+bool SetTaskSuppressionPolicy(mach_port_t task_port, bool activate) {
+  task_suppression_policy suppression_policy = {
+      .active = activate,
+      .lowpri_cpu = activate,
+      .timer_throttle =
+          activate ? LATENCY_QOS_TIER_5 : LATENCY_QOS_TIER_UNSPECIFIED,
+      .disk_throttle = activate,
+      .cpu_limit = 0,                                    /* unused */
+      .suspend = false,                                  /* unused */
+      .throughput_qos = THROUGHPUT_QOS_TIER_UNSPECIFIED, /* unused */
+      .suppressed_cpu = activate,
+      .background_sockets = activate,
+  };
+  kern_return_t result =
+      task_policy_set(task_port, TASK_SUPPRESSION_POLICY,
+                      reinterpret_cast<task_policy_t>(&suppression_policy),
+                      TASK_SUPPRESSION_POLICY_COUNT);
+  if (result != KERN_SUCCESS) {
+    MACH_LOG(ERROR, result) << "task_policy_set TASK_SUPPRESSION_POLICY";
+    return false;
+  }
+  return true;
+}
+
+// Returns true if the task suppression policy is active for `task_port`.
+bool IsTaskSuppressionPolicyActive(mach_port_t task_port) {
+  task_suppression_policy suppression_policy = {
+      .active = false,
+  };
+
+  mach_msg_type_number_t task_info_count = TASK_SUPPRESSION_POLICY_COUNT;
+  boolean_t get_default = FALSE;
+
+  kern_return_t result =
+      task_policy_get(task_port, TASK_SUPPRESSION_POLICY,
+                      reinterpret_cast<task_policy_t>(&suppression_policy),
+                      &task_info_count, &get_default);
+  if (result != KERN_SUCCESS) {
+    MACH_LOG(ERROR, result) << "task_policy_get TASK_SUPPRESSION_POLICY";
+    return false;
+  }
+  CHECK(!get_default);
+
+  // Only check the `active` property as it is sufficient to discern the state,
+  // even though other properties could be used.
+  return suppression_policy.active;
+}
+
+// Sets the task role and the suppression policy for `task_port`.
+bool SetPriorityImpl(mach_port_t task_port,
+                     task_role_t task_role,
+                     bool activate_suppression_policy) {
+  // Do both operations, even if the first one fails.
+  bool succeeded = SetTaskCategoryPolicy(task_port, task_role);
+  succeeded &= SetTaskSuppressionPolicy(task_port, activate_suppression_policy);
+  return succeeded;
+}
+
 }  // namespace
 
 Time Process::CreationTime() const {
@@ -76,58 +161,70 @@
 }
 
 Process::Priority Process::GetPriority(PortProvider* port_provider) const {
-  DCHECK(IsValid());
-  DCHECK(port_provider);
+  CHECK(IsValid());
+  CHECK(port_provider);
 
-  // A process is backgrounded if the role is explicitly
-  // TASK_BACKGROUND_APPLICATION (as opposed to not being
-  // TASK_FOREGROUND_APPLICATION).
-  absl::optional<task_role_t> task_role =
-      GetTaskCategoryPolicyRole(port_provider, Pid());
-  if (task_role && *task_role == TASK_BACKGROUND_APPLICATION) {
-    return Priority::kBestEffort;
+  mach_port_t task_port = port_provider->TaskForPid(Pid());
+  if (task_port == TASK_NULL) {
+    // Upon failure, return the default value.
+    return Priority::kUserBlocking;
   }
+
+  absl::optional<task_role_t> task_role = GetTaskCategoryPolicyRole(task_port);
+  if (!task_role) {
+    // Upon failure, return the default value.
+    return Priority::kUserBlocking;
+  }
+  bool is_suppression_policy_active = IsTaskSuppressionPolicyActive(task_port);
+  if (*task_role == TASK_BACKGROUND_APPLICATION &&
+      is_suppression_policy_active) {
+    return Priority::kBestEffort;
+  } else if (*task_role == TASK_BACKGROUND_APPLICATION &&
+             !is_suppression_policy_active) {
+    return Priority::kUserVisible;
+  } else if (*task_role == TASK_FOREGROUND_APPLICATION &&
+             !is_suppression_policy_active) {
+    return Priority::kUserBlocking;
+  }
+
+  // It is possible to get a different state very early in the process lifetime,
+  // before SetCurrentTaskDefaultRole() has been invoked. Assume highest
+  // priority then.
   return Priority::kUserBlocking;
 }
 
 bool Process::SetPriority(PortProvider* port_provider, Priority priority) {
-  DCHECK(IsValid());
-  DCHECK(port_provider);
+  CHECK(IsValid());
+  CHECK(port_provider);
 
   if (!CanSetPriority()) {
     return false;
   }
 
   mach_port_t task_port = port_provider->TaskForPid(Pid());
-  if (task_port == TASK_NULL)
-    return false;
-
-  absl::optional<task_role_t> current_role =
-      GetTaskCategoryPolicyRole(port_provider, Pid());
-  if (!current_role) {
+  if (task_port == TASK_NULL) {
     return false;
   }
 
-  const bool background = priority == base::Process::Priority::kBestEffort;
-  if ((background && *current_role == TASK_BACKGROUND_APPLICATION) ||
-      (!background && *current_role == TASK_FOREGROUND_APPLICATION)) {
-    return true;
+  switch (priority) {
+    case Priority::kBestEffort:
+      // Activate the suppression policy.
+      // Note:
+      // App Nap keeps the task role to TASK_FOREGROUND_APPLICATION when it
+      // activates the suppression policy. Here TASK_BACKGROUND_APPLICATION is
+      // used instead to keep the kBestEffort role consistent with the value for
+      // kUserVisible (so that its is not greater than kUserVisible). This
+      // difference is unlikely to matter.
+      return SetPriorityImpl(task_port, TASK_BACKGROUND_APPLICATION, true);
+    case Priority::kUserVisible:
+      // Set a task role with a lower priority than kUserBlocking, but do not
+      // activate the suppression policy.
+      return SetPriorityImpl(task_port, TASK_BACKGROUND_APPLICATION, false);
+    case Priority::kUserBlocking:
+    default:
+      // Set the highest priority with the suppression policy inactive.
+      return SetPriorityImpl(task_port, TASK_FOREGROUND_APPLICATION, false);
   }
-
-  task_category_policy category_policy;
-  category_policy.role =
-      background ? TASK_BACKGROUND_APPLICATION : TASK_FOREGROUND_APPLICATION;
-  kern_return_t result =
-      task_policy_set(task_port, TASK_CATEGORY_POLICY,
-                      reinterpret_cast<task_policy_t>(&category_policy),
-                      TASK_CATEGORY_POLICY_COUNT);
-
-  if (result != KERN_SUCCESS) {
-    MACH_LOG(ERROR, result) << "task_policy_set TASK_CATEGORY_POLICY";
-    return false;
-  }
-
-  return true;
 }
 
 // static
@@ -136,11 +233,17 @@
     return;
   }
 
-  task_category_policy category_policy;
-  category_policy.role = TASK_DEFAULT_APPLICATION;
-  task_policy_set(mach_task_self(), TASK_CATEGORY_POLICY,
-                  reinterpret_cast<task_policy_t>(&category_policy),
-                  TASK_CATEGORY_POLICY_COUNT);
+  SetTaskCategoryPolicy(mach_task_self(), TASK_FOREGROUND_APPLICATION);
+
+  // Set the QoS settings to tier 0, to match the default value given to App Nap
+  // enabled applications.
+  task_qos_policy task_qos_policy = {
+      .task_latency_qos_tier = LATENCY_QOS_TIER_0,
+      .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0,
+  };
+  task_policy_set(mach_task_self(), TASK_BASE_QOS_POLICY,
+                  reinterpret_cast<task_policy_t>(&task_qos_policy),
+                  TASK_QOS_POLICY_COUNT);
 }
 
 }  // namespace base
diff --git a/base/process/process_metrics.cc b/base/process/process_metrics.cc
index a3b441d..bc62215 100644
--- a/base/process/process_metrics.cc
+++ b/base/process/process_metrics.cc
@@ -128,34 +128,6 @@
 }
 #endif
 
-#if BUILDFLAG(IS_WIN)
-double ProcessMetrics::GetPreciseCPUUsage(TimeDelta cumulative_cpu) {
-  TimeTicks time = TimeTicks::Now();
-
-  if (last_precise_cumulative_cpu_.is_zero()) {
-    // First call, just set the last values.
-    last_precise_cumulative_cpu_ = cumulative_cpu;
-    last_cpu_time_for_precise_cpu_usage_ = time;
-    return 0;
-  }
-
-  TimeDelta cpu_time_delta = cumulative_cpu - last_precise_cumulative_cpu_;
-  TimeDelta time_delta = time - last_cpu_time_for_precise_cpu_usage_;
-  DCHECK(!time_delta.is_zero());
-  if (time_delta.is_zero())
-    return 0;
-
-  last_precise_cumulative_cpu_ = cumulative_cpu;
-  last_cpu_time_for_precise_cpu_usage_ = time;
-
-  return 100.0 * cpu_time_delta / time_delta;
-}
-
-double ProcessMetrics::GetPreciseCPUUsage() {
-  return GetPreciseCPUUsage(GetPreciseCumulativeCPUUsage());
-}
-#endif  // BUILDFLAG(IS_WIN)
-
 #if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
     BUILDFLAG(IS_AIX)
 int ProcessMetrics::CalculateIdleWakeupsPerSecond(
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index 4561afd..f65efa6 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -136,32 +136,6 @@
   // will result in a time delta of 2 seconds/per 1 wall-clock second.
   [[nodiscard]] TimeDelta GetCumulativeCPUUsage();
 
-#if BUILDFLAG(IS_WIN)
-  // TODO(pmonette): Remove the precise version of the CPU usage functions once
-  // we're validated that they are indeed better than the regular version above
-  // and that they can replace the old implementation.
-
-  // Returns the percentage of time spent executing, across all threads of the
-  // process, in the interval since the last time the method was called, using
-  // the current |cumulative_cpu|.
-  //
-  // Same as GetPlatformIndependentCPUUSage() but implemented using
-  // `QueryProcessCycleTime` for higher precision.
-  [[nodiscard]] double GetPreciseCPUUsage(TimeDelta cumulative_cpu);
-
-  // Same as the above, but automatically calls GetPreciseCumulativeCPUUsage()
-  // to determine the current cumulative CPU.
-  [[nodiscard]] double GetPreciseCPUUsage();
-
-  // Returns the cumulative CPU usage across all threads of the process since
-  // process start. In case of multi-core processors, a process can consume CPU
-  // at a rate higher than wall-clock time, e.g. two cores at full utilization
-  // will result in a time delta of 2 seconds/per 1 wall-clock second.
-  //
-  // This is implemented using `QueryProcessCycleTime` for higher precision.
-  [[nodiscard]] TimeDelta GetPreciseCumulativeCPUUsage();
-#endif  // BUILDFLAG(IS_WIN)
-
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
     BUILDFLAG(IS_AIX)
   // Emits the cumulative CPU usage for all currently active threads since they
@@ -196,10 +170,6 @@
   // measures such as placing DRAM in to self-refresh (also referred to as
   // auto-refresh), place interconnects into lower-power states etc"
   int GetPackageIdleWakeupsPerSecond();
-
-  // Returns "Energy Impact", a synthetic power estimation metric displayed by
-  // macOS in Activity Monitor and the battery menu.
-  int GetEnergyImpact();
 #endif
 
   // Retrieves accounting information for all I/O operations performed by the
@@ -270,11 +240,6 @@
   TimeDelta last_cumulative_cpu_;
 #endif
 
-#if BUILDFLAG(IS_WIN)
-  TimeTicks last_cpu_time_for_precise_cpu_usage_;
-  TimeDelta last_precise_cumulative_cpu_;
-#endif
-
 #if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
     BUILDFLAG(IS_AIX)
   // Same thing for idle wakeups.
@@ -286,9 +251,10 @@
   // And same thing for package idle exit wakeups.
   TimeTicks last_package_idle_wakeups_time_;
   uint64_t last_absolute_package_idle_wakeups_;
-  double last_energy_impact_;
-  // In mach_absolute_time units.
-  uint64_t last_energy_impact_time_;
+
+  // Works around a race condition when combining two task_info() calls to
+  // measure CPU time.
+  TimeDelta last_measured_cpu_;
 #endif
 
 #if BUILDFLAG(IS_MAC)
@@ -628,9 +594,7 @@
                                             mach_vm_size_t* size,
                                             mach_vm_address_t* address,
                                             vm_region_basic_info_64* info);
-#endif  // BUILDFLAG(IS_APPLE)
 
-#if BUILDFLAG(IS_MAC)
 // Returns info on the first memory region at or after |address|, including
 // resident memory and share mode. On Success, |size| reflects the size of the
 // memory region.
@@ -641,7 +605,7 @@
                                           mach_vm_size_t* size,
                                           mach_vm_address_t* address,
                                           vm_region_top_info_data_t* info);
-#endif  // BUILDFLAG(IS_MAC)
+#endif  // BUILDFLAG(IS_APPLE)
 
 }  // namespace base
 
diff --git a/base/process/process_metrics_apple.cc b/base/process/process_metrics_apple.cc
index be31cdc..54dd5f2 100644
--- a/base/process/process_metrics_apple.cc
+++ b/base/process/process_metrics_apple.cc
@@ -11,10 +11,10 @@
 #include <stdint.h>
 #include <sys/sysctl.h>
 
+#include "base/apple/mach_logging.h"
+#include "base/apple/scoped_mach_port.h"
 #include "base/logging.h"
 #include "base/mac/mac_util.h"
-#include "base/mac/mach_logging.h"
-#include "base/mac/scoped_mach_port.h"
 #include "base/memory/ptr_util.h"
 #include "base/numerics/safe_math.h"
 #include "base/time/time.h"
@@ -128,7 +128,18 @@
   timeradd(&user_timeval, &task_timeval, &task_timeval);
   timeradd(&system_timeval, &task_timeval, &task_timeval);
 
-  return Microseconds(TimeValToMicroseconds(task_timeval));
+  const TimeDelta measured_cpu =
+      Microseconds(TimeValToMicroseconds(task_timeval));
+  if (measured_cpu < last_measured_cpu_) {
+    // When a thread terminates, its CPU time is immediately removed from the
+    // running thread times returned by TASK_THREAD_TIMES_INFO, but there can be
+    // a lag before it shows up in the terminated thread times returned by
+    // GetTaskInfo(). Make sure CPU usage doesn't appear to go backwards if
+    // GetCumulativeCPUUsage() is called in the interval.
+    return last_measured_cpu_;
+  }
+  last_measured_cpu_ = measured_cpu;
+  return measured_cpu;
 }
 
 int ProcessMetrics::GetPackageIdleWakeupsPerSecond() {
@@ -162,7 +173,7 @@
 
 // Bytes committed by the system.
 size_t GetSystemCommitCharge() {
-  base::mac::ScopedMachSendRight host(mach_host_self());
+  base::apple::ScopedMachSendRight host(mach_host_self());
   mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
   vm_statistics_data_t data;
   kern_return_t kr = host_statistics(
@@ -178,7 +189,7 @@
 bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
   struct host_basic_info hostinfo;
   mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
-  base::mac::ScopedMachSendRight host(mach_host_self());
+  base::apple::ScopedMachSendRight host(mach_host_self());
   int result = host_info(host.get(), HOST_BASIC_INFO,
                          reinterpret_cast<host_info_t>(&hostinfo), &count);
   if (result != KERN_SUCCESS) {
@@ -206,8 +217,25 @@
 #else
   static_assert(PAGE_SIZE % 1024 == 0, "Invalid page size");
 #endif
-  meminfo->free = saturated_cast<int>(
-      PAGE_SIZE / 1024 * (vm_info.free_count - vm_info.speculative_count));
+
+  if (vm_info.speculative_count <= vm_info.free_count) {
+    meminfo->free = saturated_cast<int>(
+        PAGE_SIZE / 1024 * (vm_info.free_count - vm_info.speculative_count));
+  } else {
+    // Inside the `host_statistics64` call above, `speculative_count` is
+    // computed later than `free_count`, so these values are snapshots of two
+    // (slightly) different points in time. As a result, it is possible for
+    // `speculative_count` to have increased significantly since `free_count`
+    // was computed, even to a point where `speculative_count` is greater than
+    // the computed value of `free_count`. See
+    // https://github.com/apple-oss-distributions/xnu/blob/aca3beaa3dfbd42498b42c5e5ce20a938e6554e5/osfmk/kern/host.c#L788
+    // In this case, 0 is the best approximation for `meminfo->free`. This is
+    // inexact, but even in the case where `speculative_count` is less than
+    // `free_count`, the computed `meminfo->free` will only be an approximation
+    // given that the two inputs come from different points in time.
+    meminfo->free = 0;
+  }
+
   meminfo->speculative =
       saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.speculative_count);
   meminfo->file_backed =
@@ -228,18 +256,18 @@
   // The kernel always returns a null object for VM_REGION_TOP_INFO, but
   // balance it with a deallocate in case this ever changes. See 10.9.2
   // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
-  mac::ScopedMachSendRight object_name;
+  apple::ScopedMachSendRight object_name;
 
   kern_return_t kr =
 #if BUILDFLAG(IS_MAC)
       mach_vm_region(task, address, size, VM_REGION_TOP_INFO,
                      reinterpret_cast<vm_region_info_t>(info), &info_count,
-                     mac::ScopedMachSendRight::Receiver(object_name).get());
+                     apple::ScopedMachSendRight::Receiver(object_name).get());
 #else
       vm_region_64(task, reinterpret_cast<vm_address_t*>(address),
-                   reinterpret_cast<vm_size_t*>(size), VM_REGION_BASIC_INFO_64,
+                   reinterpret_cast<vm_size_t*>(size), VM_REGION_TOP_INFO,
                    reinterpret_cast<vm_region_info_t>(info), &info_count,
-                   mac::ScopedMachSendRight::Receiver(object_name).get());
+                   apple::ScopedMachSendRight::Receiver(object_name).get());
 #endif
   return ParseOutputFromMachVMRegion(kr);
 }
@@ -252,23 +280,54 @@
   // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
   // balance it with a deallocate in case this ever changes. See 10.9.2
   // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
-  mac::ScopedMachSendRight object_name;
+  apple::ScopedMachSendRight object_name;
 
   kern_return_t kr =
 #if BUILDFLAG(IS_MAC)
       mach_vm_region(task, address, size, VM_REGION_BASIC_INFO_64,
                      reinterpret_cast<vm_region_info_t>(info), &info_count,
-                     mac::ScopedMachSendRight::Receiver(object_name).get());
+                     apple::ScopedMachSendRight::Receiver(object_name).get());
 
 #else
       vm_region_64(task, reinterpret_cast<vm_address_t*>(address),
                    reinterpret_cast<vm_size_t*>(size), VM_REGION_BASIC_INFO_64,
                    reinterpret_cast<vm_region_info_t>(info), &info_count,
-                   mac::ScopedMachSendRight::Receiver(object_name).get());
+                   apple::ScopedMachSendRight::Receiver(object_name).get());
 #endif
   return ParseOutputFromMachVMRegion(kr);
 }
 
+int ProcessMetrics::GetOpenFdCount() const {
+#if BUILDFLAG(USE_BLINK)
+  // In order to get a true count of the open number of FDs, PROC_PIDLISTFDS
+  // is used. This is done twice: first to get the appropriate size of a
+  // buffer, and then secondly to fill the buffer with the actual FD info.
+  //
+  // The buffer size returned in the first call is an estimate, based on the
+  // number of allocated fileproc structures in the kernel. This number can be
+  // greater than the actual number of open files, since the structures are
+  // allocated in slabs. The value returned in proc_bsdinfo::pbi_nfiles is
+  // also the number of allocated fileprocs, not the number in use.
+  //
+  // However, the buffer size returned in the second call is an accurate count
+  // of the open number of descriptors. The contents of the buffer are unused.
+  int rv = proc_pidinfo(process_, PROC_PIDLISTFDS, 0, nullptr, 0);
+  if (rv < 0) {
+    return -1;
+  }
+
+  std::unique_ptr<char[]> buffer(new char[static_cast<size_t>(rv)]);
+  rv = proc_pidinfo(process_, PROC_PIDLISTFDS, 0, buffer.get(), rv);
+  if (rv < 0) {
+    return -1;
+  }
+  return static_cast<int>(static_cast<unsigned long>(rv) / PROC_PIDLISTFD_SIZE);
+#else
+  NOTIMPLEMENTED_LOG_ONCE();
+  return -1;
+#endif  // BUILDFLAG(USE_BLINK)
+}
+
 int ProcessMetrics::GetOpenFdSoftLimit() const {
   return checked_cast<int>(GetMaxFds());
 }
diff --git a/base/process/process_metrics_ios.cc b/base/process/process_metrics_ios.cc
index b3649ab..3f8e0ca 100644
--- a/base/process/process_metrics_ios.cc
+++ b/base/process/process_metrics_ios.cc
@@ -19,10 +19,4 @@
   return WrapUnique(new ProcessMetrics(process));
 }
 
-int ProcessMetrics::GetOpenFdCount() const {
-  // Provide a stub for now. -1 indicates an error.
-  NOTIMPLEMENTED_LOG_ONCE();
-  return -1;
-}
-
 }  // namespace base
diff --git a/base/process/process_metrics_mac.cc b/base/process/process_metrics_mac.cc
index f4b103b5..e681305 100644
--- a/base/process/process_metrics_mac.cc
+++ b/base/process/process_metrics_mac.cc
@@ -4,73 +4,13 @@
 
 #include "base/process/process_metrics.h"
 
-#include <AvailabilityMacros.h>
-#include <libproc.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#include <mach/mach_vm.h>
-#include <mach/shared_region.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/sysctl.h>
 #include <memory>
 
-#include "base/logging.h"
-#include "base/mac/mac_util.h"
-#include "base/mac/mach_logging.h"
 #include "base/memory/ptr_util.h"
 #include "base/process/process_metrics_iocounters.h"
-#include "base/time/time.h"
-#include "build/build_config.h"
-
-namespace {
-
-// This is a standin for the private pm_task_energy_data_t struct.
-struct OpaquePMTaskEnergyData {
-  // Empirical size of the private struct.
-  uint8_t data[408];
-};
-
-// Sample everything but network usage, since fetching network
-// usage can hang.
-constexpr uint8_t kPMSampleFlags = 0xff & ~0x8;
-
-}  // namespace
-
-extern "C" {
-
-// From libpmsample.dylib
-int pm_sample_task(mach_port_t task,
-                   OpaquePMTaskEnergyData* pm_energy,
-                   uint64_t mach_time,
-                   uint8_t flags);
-
-// From libpmenergy.dylib
-double pm_energy_impact(OpaquePMTaskEnergyData* pm_energy);
-
-}  // extern "C"
 
 namespace base {
 
-namespace {
-
-double GetEnergyImpactInternal(mach_port_t task, uint64_t mach_time) {
-  OpaquePMTaskEnergyData energy_info{};
-
-  if (pm_sample_task(task, &energy_info, mach_time, kPMSampleFlags) != 0) {
-    return 0.0;
-  }
-  return pm_energy_impact(&energy_info);
-}
-
-}  // namespace
-
-// Getting a mach task from a pid for another process requires permissions in
-// general, so there doesn't really seem to be a way to do these (and spinning
-// up ps to fetch each stats seems dangerous to put in a base api for anyone to
-// call). Child processes ipc their port, so return something if available,
-// otherwise return 0.
-
 // static
 std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
     ProcessHandle process,
@@ -83,61 +23,8 @@
     : process_(process),
       last_absolute_idle_wakeups_(0),
       last_absolute_package_idle_wakeups_(0),
-      last_energy_impact_(0),
       port_provider_(port_provider) {}
 
-int ProcessMetrics::GetEnergyImpact() {
-  uint64_t now = mach_absolute_time();
-  if (last_energy_impact_ == 0) {
-    last_energy_impact_ = GetEnergyImpactInternal(TaskForPid(process_), now);
-    last_energy_impact_time_ = now;
-    return 0;
-  }
-
-  double total_energy_impact =
-      GetEnergyImpactInternal(TaskForPid(process_), now);
-  uint64_t delta = now - last_energy_impact_time_;
-  if (delta == 0) {
-    return 0;
-  }
-
-  // Scale by 100 since the histogram is integral.
-  double seconds_since_last_measurement =
-      base::TimeTicks::FromMachAbsoluteTime(delta).since_origin().InSecondsF();
-  int energy_impact = 100 * (total_energy_impact - last_energy_impact_) /
-                      seconds_since_last_measurement;
-  last_energy_impact_ = total_energy_impact;
-  last_energy_impact_time_ = now;
-
-  return energy_impact;
-}
-
-int ProcessMetrics::GetOpenFdCount() const {
-  // In order to get a true count of the open number of FDs, PROC_PIDLISTFDS
-  // is used. This is done twice: first to get the appropriate size of a
-  // buffer, and then secondly to fill the buffer with the actual FD info.
-  //
-  // The buffer size returned in the first call is an estimate, based on the
-  // number of allocated fileproc structures in the kernel. This number can be
-  // greater than the actual number of open files, since the structures are
-  // allocated in slabs. The value returned in proc_bsdinfo::pbi_nfiles is
-  // also the number of allocated fileprocs, not the number in use.
-  //
-  // However, the buffer size returned in the second call is an accurate count
-  // of the open number of descriptors. The contents of the buffer are unused.
-  int rv = proc_pidinfo(process_, PROC_PIDLISTFDS, 0, nullptr, 0);
-  if (rv < 0) {
-    return -1;
-  }
-
-  std::unique_ptr<char[]> buffer(new char[static_cast<size_t>(rv)]);
-  rv = proc_pidinfo(process_, PROC_PIDLISTFDS, 0, buffer.get(), rv);
-  if (rv < 0) {
-    return -1;
-  }
-  return static_cast<int>(static_cast<unsigned long>(rv) / PROC_PIDLISTFD_SIZE);
-}
-
 bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
   return false;
 }
diff --git a/base/process/process_metrics_unittest.cc b/base/process/process_metrics_unittest.cc
index 3cc116f..3980249 100644
--- a/base/process/process_metrics_unittest.cc
+++ b/base/process/process_metrics_unittest.cc
@@ -39,15 +39,20 @@
 #include "base/process/internal_linux.h"
 #endif
 
-namespace base {
-namespace debug {
-
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||      \
     BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_WIN) || \
-    BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
+    BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_APPLE)
+#define ENABLE_CPU_TESTS 1
+#else
+#define ENABLE_CPU_TESTS 0
+#endif
+
+namespace base::debug {
 
 namespace {
 
+#if ENABLE_CPU_TESTS
+
 void BusyWork(std::vector<std::string>* vec) {
   int64_t test_value = 0;
   for (int i = 0; i < 100000; ++i) {
@@ -56,11 +61,25 @@
   }
 }
 
-}  // namespace
+TimeDelta TestCumulativeCPU(ProcessMetrics* metrics, TimeDelta prev_cpu_usage) {
+  const TimeDelta current_cpu_usage = metrics->GetCumulativeCPUUsage();
+  EXPECT_GE(current_cpu_usage, prev_cpu_usage);
+  EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+  return current_cpu_usage;
+}
 
-#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
-        // BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_WIN) ||
-        // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
+#endif  // ENABLE_CPU_TESTS
+
+std::unique_ptr<ProcessMetrics> CreateProcessMetricsForTest(
+    ProcessHandle handle) {
+#if BUILDFLAG(IS_MAC)
+  return ProcessMetrics::CreateProcessMetrics(handle, nullptr);
+#else
+  return ProcessMetrics::CreateProcessMetrics(handle);
+#endif
+}
+
+}  // namespace
 
 // Tests for SystemMetrics.
 // Exists as a class so it can be a friend of SystemMetrics.
@@ -345,19 +364,16 @@
 #endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
         // BUILDFLAG(IS_ANDROID)
 
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||      \
-    BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_WIN) || \
-    BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
-
+#if ENABLE_CPU_TESTS
 // Test that ProcessMetrics::GetPlatformIndependentCPUUsage() doesn't return
 // negative values when the number of threads running on the process decreases
 // between two successive calls to it.
 TEST_F(SystemMetricsTest, TestNoNegativeCpuUsage) {
   ProcessHandle handle = GetCurrentProcessHandle();
-  std::unique_ptr<ProcessMetrics> metrics(
-      ProcessMetrics::CreateProcessMetrics(handle));
+  std::unique_ptr<ProcessMetrics> metrics(CreateProcessMetricsForTest(handle));
 
   EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+
   Thread thread1("thread1");
   Thread thread2("thread2");
   Thread thread3("thread3");
@@ -378,31 +394,18 @@
   thread2.task_runner()->PostTask(FROM_HERE, BindOnce(&BusyWork, &vec2));
   thread3.task_runner()->PostTask(FROM_HERE, BindOnce(&BusyWork, &vec3));
 
-  TimeDelta prev_cpu_usage = metrics->GetCumulativeCPUUsage();
-  EXPECT_GE(prev_cpu_usage, TimeDelta());
-  EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+  TimeDelta prev_cpu_usage = TestCumulativeCPU(metrics.get(), TimeDelta());
 
   thread1.Stop();
-  TimeDelta current_cpu_usage = metrics->GetCumulativeCPUUsage();
-  EXPECT_GE(current_cpu_usage, prev_cpu_usage);
-  prev_cpu_usage = current_cpu_usage;
-  EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+  prev_cpu_usage = TestCumulativeCPU(metrics.get(), prev_cpu_usage);
 
   thread2.Stop();
-  current_cpu_usage = metrics->GetCumulativeCPUUsage();
-  EXPECT_GE(current_cpu_usage, prev_cpu_usage);
-  prev_cpu_usage = current_cpu_usage;
-  EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+  prev_cpu_usage = TestCumulativeCPU(metrics.get(), prev_cpu_usage);
 
   thread3.Stop();
-  current_cpu_usage = metrics->GetCumulativeCPUUsage();
-  EXPECT_GE(current_cpu_usage, prev_cpu_usage);
-  EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+  prev_cpu_usage = TestCumulativeCPU(metrics.get(), prev_cpu_usage);
 }
-
-#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
-        // BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_WIN) ||
-        // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
+#endif  // ENABLE_CPU_TESTS
 
 #if BUILDFLAG(IS_CHROMEOS)
 TEST_F(SystemMetricsTest, ParseZramMmStat) {
@@ -493,7 +496,8 @@
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
 TEST(ProcessMetricsTest, ParseProcStatCPU) {
   // /proc/self/stat for a process running "top".
-  const char kTopStat[] = "960 (top) S 16230 960 16230 34818 960 "
+  const char kTopStat[] =
+      "960 (top) S 16230 960 16230 34818 960 "
       "4202496 471 0 0 0 "
       "12 16 0 0 "  // <- These are the goods.
       "20 0 1 0 121946157 15077376 314 18446744073709551615 4194304 "
@@ -502,7 +506,8 @@
   EXPECT_EQ(12 + 16, ParseProcStatCPU(kTopStat));
 
   // cat /proc/self/stat on a random other machine I have.
-  const char kSelfStat[] = "5364 (cat) R 5354 5364 5354 34819 5364 "
+  const char kSelfStat[] =
+      "5364 (cat) R 5354 5364 5354 34819 5364 "
       "0 142 0 0 0 "
       "0 0 0 0 "  // <- No CPU, apparently.
       "16 0 1 0 1676099790 2957312 114 4294967295 134512640 134528148 "
@@ -512,7 +517,8 @@
 
   // Some weird long-running process with a weird name that I created for the
   // purposes of this test.
-  const char kWeirdNameStat[] = "26115 (Hello) You ()))  ) R 24614 26115 24614"
+  const char kWeirdNameStat[] =
+      "26115 (Hello) You ()))  ) R 24614 26115 24614"
       " 34839 26115 4218880 227 0 0 0 "
       "5186 11 0 0 "
       "20 0 1 0 36933953 4296704 90 18446744073709551615 4194304 4196116 "
@@ -546,7 +552,8 @@
 }
 #endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
 
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
+    (BUILDFLAG(IS_APPLE) && BUILDFLAG(USE_BLINK))
 namespace {
 
 // Keep these in sync so the GetChildOpenFdCount test can refer to correct test
@@ -580,8 +587,9 @@
 
 // Busy-wait for an event to be signaled.
 void WaitForEvent(const FilePath& signal_dir, const char* signal_file) {
-  while (!CheckEvent(signal_dir, signal_file))
+  while (!CheckEvent(signal_dir, signal_file)) {
     PlatformThread::Sleep(Milliseconds(10));
+  }
 }
 
 // Subprocess to test the number of open file descriptors.
@@ -607,8 +615,9 @@
   CHECK(SignalEvent(temp_path, kSignalClosed));
 
   // Wait to be terminated.
-  while (true)
+  while (true) {
     PlatformThread::Sleep(Seconds(1));
+  }
 }
 
 }  // namespace
@@ -626,11 +635,7 @@
   WaitForEvent(temp_path, kSignalReady);
 
   std::unique_ptr<ProcessMetrics> metrics =
-#if BUILDFLAG(IS_APPLE)
-      ProcessMetrics::CreateProcessMetrics(child.Handle(), nullptr);
-#else
-      ProcessMetrics::CreateProcessMetrics(child.Handle());
-#endif  // BUILDFLAG(IS_APPLE)
+      CreateProcessMetricsForTest(child.Handle());
 
   const int fd_count = metrics->GetOpenFdCount();
   EXPECT_GE(fd_count, 0);
@@ -651,11 +656,7 @@
 TEST(ProcessMetricsTest, GetOpenFdCount) {
   base::ProcessHandle process = base::GetCurrentProcessHandle();
   std::unique_ptr<base::ProcessMetrics> metrics =
-#if BUILDFLAG(IS_APPLE)
-      ProcessMetrics::CreateProcessMetrics(process, nullptr);
-#else
-      ProcessMetrics::CreateProcessMetrics(process);
-#endif  // BUILDFLAG(IS_APPLE)
+      CreateProcessMetricsForTest(process);
 
   ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
@@ -668,8 +669,7 @@
   EXPECT_GT(new_fd_count, 0);
   EXPECT_EQ(new_fd_count, fd_count + 1);
 }
-
-#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC)
+#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_APPLE)
 
 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
 
@@ -758,12 +758,12 @@
           return entry.first == prev_entry.first;
         });
 
-    if (prev_it != prev_thread_times.end())
+    if (prev_it != prev_thread_times.end()) {
       EXPECT_GE(entry.second, prev_it->second);
+    }
   }
 }
 #endif  // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) ||
         // BUILDFLAG(IS_CHROMEOS)
 
-}  // namespace debug
-}  // namespace base
+}  // namespace base::debug
diff --git a/base/process/process_metrics_win.cc b/base/process/process_metrics_win.cc
index 35fe6f6..556fa4c 100644
--- a/base/process/process_metrics_win.cc
+++ b/base/process/process_metrics_win.cc
@@ -120,6 +120,28 @@
   ULONG SystemCalls;
 };
 
+TimeDelta GetImpreciseCumulativeCPUUsage(const win::ScopedHandle& process) {
+  FILETIME creation_time;
+  FILETIME exit_time;
+  FILETIME kernel_time;
+  FILETIME user_time;
+
+  if (!process.is_valid()) {
+    return TimeDelta();
+  }
+
+  if (!GetProcessTimes(process.get(), &creation_time, &exit_time, &kernel_time,
+                       &user_time)) {
+    // This should never fail because we duplicate the handle to guarantee it
+    // will remain valid.
+    DCHECK(false);
+    return TimeDelta();
+  }
+
+  return TimeDelta::FromFileTime(kernel_time) +
+         TimeDelta::FromFileTime(user_time);
+}
+
 }  // namespace
 
 size_t GetMaxFds() {
@@ -140,31 +162,10 @@
 }
 
 TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
-  FILETIME creation_time;
-  FILETIME exit_time;
-  FILETIME kernel_time;
-  FILETIME user_time;
-
-  if (!process_.is_valid())
-    return TimeDelta();
-
-  if (!GetProcessTimes(process_.get(), &creation_time, &exit_time, &kernel_time,
-                       &user_time)) {
-    // This should never fail because we duplicate the handle to guarantee it
-    // will remain valid.
-    DCHECK(false);
-    return TimeDelta();
-  }
-
-  return TimeDelta::FromFileTime(kernel_time) +
-         TimeDelta::FromFileTime(user_time);
-}
-
-TimeDelta ProcessMetrics::GetPreciseCumulativeCPUUsage() {
 #if defined(ARCH_CPU_ARM64)
   // Precise CPU usage is not available on Arm CPUs because they don't support
   // constant rate TSC.
-  return GetCumulativeCPUUsage();
+  return GetImpreciseCumulativeCPUUsage(process_);
 #else   // !defined(ARCH_CPU_ARM64)
   if (!time_internal::HasConstantRateTSC())
     return GetCumulativeCPUUsage();
@@ -172,10 +173,10 @@
   const double tsc_ticks_per_second = time_internal::TSCTicksPerSecond();
   if (tsc_ticks_per_second == 0) {
     // TSC is only initialized once TSCTicksPerSecond() is called twice 50 ms
-    // apart on the same thread to get a baseline. This often doesn't happen in
-    // unit tests, and theoretically may happen in production if
-    // GetPreciseCumulativeCPUUsage() is called before any uses of ThreadTicks.
-    return GetCumulativeCPUUsage();
+    // apart on the same thread to get a baseline. In unit tests, it is frequent
+    // for the initialization not to be complete. In production, it can also
+    // theoretically happen.
+    return GetImpreciseCumulativeCPUUsage(process_);
   }
 
   ULONG64 process_cycle_time = 0;
diff --git a/base/process/process_unittest.cc b/base/process/process_unittest.cc
index 3a98fa1..f8536e0 100644
--- a/base/process/process_unittest.cc
+++ b/base/process/process_unittest.cc
@@ -14,18 +14,21 @@
 #include "base/test/multiprocess_test.h"
 #include "base/test/test_timeouts.h"
 #include "base/threading/platform_thread.h"
+#include "base/threading/platform_thread_internal_posix.h"
 #include "base/threading/thread_local.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/multiprocess_func_list.h"
 
 #if BUILDFLAG(IS_CHROMEOS)
+#include <sys/resource.h>
 #include <unistd.h>
 
 #include <vector>
 
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
+#include "base/process/internal_linux.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_split.h"
 #include "base/strings/string_util.h"
@@ -377,6 +380,249 @@
   EXPECT_EQ(old_os_priority, new_os_priority);
 }
 
+#if BUILDFLAG(IS_CHROMEOS)
+
+namespace {
+
+class FunctionTestThread : public PlatformThread::Delegate {
+ public:
+  FunctionTestThread() = default;
+
+  FunctionTestThread(const FunctionTestThread&) = delete;
+  FunctionTestThread& operator=(const FunctionTestThread&) = delete;
+
+  void ThreadMain() override {
+    PlatformThread::SetCurrentThreadType(ThreadType::kCompositing);
+    while (true) {
+      PlatformThread::Sleep(Milliseconds(100));
+    }
+  }
+};
+
+class RTFunctionTestThread : public PlatformThread::Delegate {
+ public:
+  RTFunctionTestThread() = default;
+
+  RTFunctionTestThread(const RTFunctionTestThread&) = delete;
+  RTFunctionTestThread& operator=(const RTFunctionTestThread&) = delete;
+
+  void ThreadMain() override {
+    PlatformThread::SetCurrentThreadType(ThreadType::kRealtimeAudio);
+    while (true) {
+      PlatformThread::Sleep(Milliseconds(100));
+    }
+  }
+};
+
+int create_threads_after_bg;
+bool bg_threads_created;
+bool prebg_threads_created;
+bool rt_threads_created;
+
+void sig_create_threads_after_bg(int signum) {
+  if (signum == SIGUSR1) {
+    create_threads_after_bg = true;
+  }
+}
+
+void sig_prebg_threads_created_handler(int signum) {
+  if (signum == SIGUSR1) {
+    prebg_threads_created = true;
+  }
+}
+
+void sig_bg_threads_created_handler(int signum) {
+  if (signum == SIGUSR2) {
+    bg_threads_created = true;
+  }
+}
+
+void sig_rt_threads_created_handler(int signum) {
+  if (signum == SIGUSR1) {
+    rt_threads_created = true;
+  }
+}
+
+}  // namespace
+
+MULTIPROCESS_TEST_MAIN(ProcessThreadBackgroundingMain) {
+  PlatformThreadHandle handle1, handle2, handle3;
+  FunctionTestThread thread1, thread2, thread3;
+  PlatformThread::SetCurrentThreadType(ThreadType::kCompositing);
+
+  // Register signal handler to be notified to create threads after backgrounding.
+  signal(SIGUSR1, sig_create_threads_after_bg);
+
+  if (!PlatformThread::Create(0, &thread1, &handle1)) {
+    ADD_FAILURE() << "ProcessThreadBackgroundingMain: Failed to create thread1";
+    return 1;
+  }
+
+  if (!PlatformThread::Create(0, &thread2, &handle2)) {
+    ADD_FAILURE() << "ProcessThreadBackgroundingMain: Failed to create thread2";
+    return 1;
+  }
+
+  // Signal that the pre-backgrounding threads were created.
+  kill(getppid(), SIGUSR1);
+
+  // Wait for the signal to background.
+  while (create_threads_after_bg == 0) {
+    PlatformThread::Sleep(Milliseconds(100));
+  }
+
+  // Test creation of thread while process is backgrounded.
+  if (!PlatformThread::Create(0, &thread3, &handle3)) {
+    ADD_FAILURE() << "ProcessThreadBackgroundingMain: Failed to create thread3";
+    return 1;
+  }
+
+  // Signal that the thread after backgrounding was created.
+  kill(getppid(), SIGUSR2);
+
+  while (true) {
+    PlatformThread::Sleep(Milliseconds(100));
+  }
+}
+
+void AssertCompositingThreadProperties(int process_id, bool backgrounded) {
+  internal::ForEachProcessTask(
+      process_id, [process_id, backgrounded](PlatformThreadId tid,
+                                             const FilePath& path) {
+        EXPECT_EQ(PlatformThread::GetThreadTypeFromThreadId(process_id, tid),
+                  ThreadType::kCompositing);
+        EXPECT_EQ(PlatformThreadLinux::IsThreadBackgroundedForTest(tid),
+                  backgrounded);
+      });
+}
+
+// ProcessThreadBackgrounding: A test to create a process and verify
+// that the threads in the process are backgrounded correctly.
+TEST_F(ProcessTest, ProcessThreadBackgrounding) {
+  if (!PlatformThread::CanChangeThreadType(ThreadType::kDefault,
+                                           ThreadType::kCompositing)) {
+    return;
+  }
+
+  // Register signal handlers to be notified of events in child process.
+  signal(SIGUSR1, sig_prebg_threads_created_handler);
+  signal(SIGUSR2, sig_bg_threads_created_handler);
+
+  Process process(SpawnChild("ProcessThreadBackgroundingMain"));
+  EXPECT_TRUE(process.IsValid());
+
+  // Wait for the signal that the initial pre-backgrounding
+  // threads were created.
+  while (!prebg_threads_created) {
+    PlatformThread::Sleep(Milliseconds(100));
+  }
+
+  AssertCompositingThreadProperties(process.Pid(), false);
+
+  EXPECT_TRUE(process.SetPriority(Process::Priority::kBestEffort));
+
+  // Send a signal to create a thread while the process is backgrounded.
+  kill(process.Pid(), SIGUSR1);
+
+  // Wait for the signal that backgrounding completed
+  while (!bg_threads_created) {
+    PlatformThread::Sleep(Milliseconds(100));
+  }
+
+  AssertCompositingThreadProperties(process.Pid(), true);
+
+  EXPECT_TRUE(process.SetPriority(Process::Priority::kUserBlocking));
+  EXPECT_TRUE(process.GetPriority() == base::Process::Priority::kUserBlocking);
+
+  // Verify that type is restored to default after foregrounding.
+  AssertCompositingThreadProperties(process.Pid(), false);
+}
+
+bool IsThreadRT(PlatformThreadId thread_id) {
+  PlatformThreadId syscall_tid = thread_id;
+  int sched;
+
+  if (thread_id == PlatformThread::CurrentId()) {
+    syscall_tid = 0;
+  }
+
+  // Check if the thread is running in real-time mode
+  sched = sched_getscheduler(syscall_tid);
+  if (sched == -1) {
+    // The thread may disappear for any reason so ignore ESRCH.
+    DPLOG_IF(ERROR, errno != ESRCH)
+        << "Failed to call sched_getscheduler for thread id " << thread_id;
+    return false;
+  }
+  return sched == SCHED_RR || sched == SCHED_FIFO;
+}
+
+// Verify that all the threads in a process are kRealtimeAudio
+// and not backgrounded even though the process may be backgrounded.
+void AssertRTAudioThreadProperties(int process_id) {
+  internal::ForEachProcessTask(
+      process_id, [process_id](PlatformThreadId tid, const FilePath& path) {
+        EXPECT_EQ(PlatformThread::GetThreadTypeFromThreadId(process_id, tid),
+                  ThreadType::kRealtimeAudio);
+        EXPECT_EQ(IsThreadRT(tid), true);
+        EXPECT_EQ(PlatformThreadLinux::IsThreadBackgroundedForTest(tid), false);
+      });
+}
+
+MULTIPROCESS_TEST_MAIN(ProcessRTThreadBackgroundingMain) {
+  PlatformThreadHandle handle1;
+  RTFunctionTestThread thread1;
+  PlatformThread::SetCurrentThreadType(ThreadType::kRealtimeAudio);
+
+  if (!PlatformThread::Create(0, &thread1, &handle1)) {
+    ADD_FAILURE()
+        << "ProcessRTThreadBackgroundingMain: Failed to create thread1";
+    return 1;
+  }
+
+  // Signal that the RT thread was created.
+  kill(getppid(), SIGUSR1);
+
+  while (true) {
+    PlatformThread::Sleep(Milliseconds(100));
+  }
+}
+
+// Test the property of kRealTimeAudio threads in a backgrounded process.
+TEST_F(ProcessTest, ProcessRTThreadBackgrounding) {
+  if (!PlatformThread::CanChangeThreadType(ThreadType::kDefault,
+                                           ThreadType::kCompositing)) {
+    return;
+  }
+
+  // Register signal handler to check if RT thread was created by child process.
+  signal(SIGUSR1, sig_rt_threads_created_handler);
+
+  Process process(SpawnChild("ProcessRTThreadBackgroundingMain"));
+  EXPECT_TRUE(process.IsValid());
+
+  // Wait for signal that threads were spawned
+  while (!rt_threads_created) {
+    PlatformThread::Sleep(Milliseconds(100));
+  }
+
+  AssertRTAudioThreadProperties(process.Pid());
+
+  EXPECT_TRUE(process.SetPriority(Process::Priority::kBestEffort));
+  EXPECT_TRUE(process.GetPriority() == base::Process::Priority::kBestEffort);
+
+  // Verify that nothing changed when process is kBestEffort
+  AssertRTAudioThreadProperties(process.Pid());
+
+  EXPECT_TRUE(process.SetPriority(Process::Priority::kUserBlocking));
+  EXPECT_TRUE(process.GetPriority() == base::Process::Priority::kUserBlocking);
+
+  // Verify that nothing changed when process is kUserBlocking
+  AssertRTAudioThreadProperties(process.Pid());
+}
+
+#endif  // BUILDFLAG(IS_CHROMEOS)
+
 // Consumers can use WaitForExitWithTimeout(base::TimeDelta(), nullptr) to check
 // whether the process is still running. This may not be safe because of the
 // potential reusing of the process id. So we won't export Process::IsRunning()
diff --git a/base/profiler/chrome_unwinder_android.cc b/base/profiler/chrome_unwinder_android.cc
index da959ce..a13f2d1 100644
--- a/base/profiler/chrome_unwinder_android.cc
+++ b/base/profiler/chrome_unwinder_android.cc
@@ -323,17 +323,15 @@
     return absl::nullopt;
   }
 
-  const span<const FunctionTableEntry>::const_iterator
-      function_table_entry_start =
-          function_offset_table_indices.begin() +
-          checked_cast<ptrdiff_t>(page_start_instructions[page_number]);
-  const span<const FunctionTableEntry>::const_iterator
-      function_table_entry_end =
-          page_number == page_start_instructions.size() - 1
-              ? function_offset_table_indices.end()
-              : function_offset_table_indices.begin() +
-                    checked_cast<ptrdiff_t>(
-                        page_start_instructions[page_number + 1]);
+  const span<const FunctionTableEntry>::iterator function_table_entry_start =
+      function_offset_table_indices.begin() +
+      checked_cast<ptrdiff_t>(page_start_instructions[page_number]);
+  const span<const FunctionTableEntry>::iterator function_table_entry_end =
+      page_number == page_start_instructions.size() - 1
+          ? function_offset_table_indices.end()
+          : function_offset_table_indices.begin() +
+                checked_cast<ptrdiff_t>(
+                    page_start_instructions[page_number + 1]);
 
   // `std::upper_bound` finds first element that > target in range
   // [function_table_entry_start, function_table_entry_end).
diff --git a/base/profiler/libunwindstack_unwinder_android.cc b/base/profiler/libunwindstack_unwinder_android.cc
index 1b42dd4..409e358 100644
--- a/base/profiler/libunwindstack_unwinder_android.cc
+++ b/base/profiler/libunwindstack_unwinder_android.cc
@@ -76,13 +76,26 @@
 #endif  // #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS)
 }
 
+void WriteLibunwindstackTraceEventArgs(unwindstack::ErrorCode error_code,
+                                       std::optional<int> num_frames,
+                                       perfetto::EventContext& ctx) {
+  auto* track_event = ctx.event<perfetto::protos::pbzero::ChromeTrackEvent>();
+  auto* libunwindstack_unwinder = track_event->set_libunwindstack_unwinder();
+  using ProtoEnum = perfetto::protos::pbzero::LibunwindstackUnwinder::ErrorCode;
+  libunwindstack_unwinder->set_error_code(static_cast<ProtoEnum>(error_code));
+  if (num_frames.has_value()) {
+    libunwindstack_unwinder->set_num_frames(*num_frames);
+  }
+}
+
 }  // namespace
 
 LibunwindstackUnwinderAndroid::LibunwindstackUnwinderAndroid()
-    : memory_regions_map_(NativeUnwinderAndroid::CreateMemoryRegionsMap(
-          /*use_updatable_maps=*/false)),
-      process_memory_(std::shared_ptr<unwindstack::Memory>(
-          memory_regions_map_->TakeMemory().release())) {
+    : memory_regions_map_(
+          static_cast<NativeUnwinderAndroidMemoryRegionsMapImpl*>(
+              NativeUnwinderAndroid::CreateMemoryRegionsMap(
+                  /*use_updatable_maps=*/false)
+                  .release())) {
   TRACE_EVENT_INSTANT(
       TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
       "LibunwindstackUnwinderAndroid::LibunwindstackUnwinderAndroid");
@@ -100,8 +113,8 @@
 unwindstack::JitDebug* LibunwindstackUnwinderAndroid::GetOrCreateJitDebug(
     unwindstack::ArchEnum arch) {
   if (!jit_debug_) {
-    jit_debug_ =
-        unwindstack::CreateJitDebug(arch, process_memory_, search_libs_);
+    jit_debug_ = unwindstack::CreateJitDebug(
+        arch, memory_regions_map_->memory(), search_libs_);
   }
   return jit_debug_.get();
 }
@@ -109,8 +122,8 @@
 unwindstack::DexFiles* LibunwindstackUnwinderAndroid::GetOrCreateDexFiles(
     unwindstack::ArchEnum arch) {
   if (!dex_files_) {
-    dex_files_ =
-        unwindstack::CreateDexFiles(arch, process_memory_, search_libs_);
+    dex_files_ = unwindstack::CreateDexFiles(
+        arch, memory_regions_map_->memory(), search_libs_);
   }
   return dex_files_.get();
 }
@@ -137,14 +150,19 @@
   std::unique_ptr<unwindstack::Regs> regs =
       CreateFromRegisterContext(thread_context);
   DCHECK(regs);
-  unwindstack::Unwinder unwinder(kMaxFrames, memory_regions_map_->GetMaps(),
-                                 regs.get(), process_memory_);
+
+  TRACE_EVENT_BEGIN(TRACE_DISABLED_BY_DEFAULT("cpu_profiler.debug"),
+                    "libunwindstack::Unwind");
+  unwindstack::Unwinder unwinder(kMaxFrames, memory_regions_map_->maps(),
+                                 regs.get(), memory_regions_map_->memory());
 
   unwinder.SetJitDebug(GetOrCreateJitDebug(regs->Arch()));
   unwinder.SetDexFiles(GetOrCreateDexFiles(regs->Arch()));
 
   unwinder.Unwind(/*initial_map_names_to_skip=*/nullptr,
                   /*map_suffixes_to_ignore=*/nullptr);
+  TRACE_EVENT_END(TRACE_DISABLED_BY_DEFAULT("cpu_profiler.debug"));
+
   // Currently libunwindstack doesn't support warnings.
   UnwindValues values =
       UnwindValues{unwinder.LastErrorCode(), /*unwinder.warnings()*/ 0,
@@ -152,9 +170,11 @@
 
   if (values.error_code != unwindstack::ERROR_NONE) {
     TRACE_EVENT_INSTANT(TRACE_DISABLED_BY_DEFAULT("cpu_profiler.debug"),
-                        "TryUnwind Failure", "error", values.error_code,
-                        "warning", values.warnings, "num_frames",
-                        values.frames.size());
+                        "Libunwindstack Failure",
+                        [&values](perfetto::EventContext& ctx) {
+                          WriteLibunwindstackTraceEventArgs(
+                              values.error_code, values.frames.size(), ctx);
+                        });
   }
   if (values.frames.empty()) {
     return UnwindResult::kCompleted;
@@ -182,15 +202,19 @@
         module = module_for_caching.get();
         module_cache()->AddCustomNativeModule(std::move(module_for_caching));
       }
-      // TODO(crbug/1446766): Cleanup after finishing crash investigation.
       if (frame.pc < frame.map_info->start() ||
           frame.pc >= frame.map_info->end()) {
-        TRACE_EVENT_INSTANT(TRACE_DISABLED_BY_DEFAULT("cpu_profiler.debug"),
-                            "PC out of map range");
+        TRACE_EVENT_INSTANT(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
+                            "PC out of map range",
+                            [&values](perfetto::EventContext& ctx) {
+                              WriteLibunwindstackTraceEventArgs(
+                                  values.error_code, std::nullopt, ctx);
+                            });
       }
     }
     stack->emplace_back(frame.pc, module, frame.function_name);
   }
   return UnwindResult::kCompleted;
 }
+
 }  // namespace base
diff --git a/base/profiler/libunwindstack_unwinder_android.h b/base/profiler/libunwindstack_unwinder_android.h
index bdf1995..9fffc02 100644
--- a/base/profiler/libunwindstack_unwinder_android.h
+++ b/base/profiler/libunwindstack_unwinder_android.h
@@ -8,13 +8,14 @@
 #include <memory>
 #include <vector>
 
-#include "base/profiler/native_unwinder_android_memory_regions_map.h"
+#include "base/profiler/native_unwinder_android_memory_regions_map_impl.h"
 #include "base/profiler/unwinder.h"
 #include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/DexFiles.h"
 #include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/JitDebug.h"
 #include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Memory.h"
 
 namespace base {
+
 // This unwinder uses the libunwindstack::Unwinder internally to provide the
 // base::Unwinder implementation. This is in contrast to
 // base::NativeUnwinderAndroid, which uses functions from libunwindstack
@@ -42,11 +43,8 @@
   unwindstack::JitDebug* GetOrCreateJitDebug(unwindstack::ArchEnum arch);
   unwindstack::DexFiles* GetOrCreateDexFiles(unwindstack::ArchEnum arch);
 
-  std::unique_ptr<NativeUnwinderAndroidMemoryRegionsMap> memory_regions_map_;
-  // libunwindstack::Unwinder requires that process_memory be provided as a
-  // std::shared_ptr. Since this is a third_party library this exception to
-  // normal Chromium conventions of not using shared_ptr has to exist here.
-  std::shared_ptr<unwindstack::Memory> process_memory_;
+  std::unique_ptr<NativeUnwinderAndroidMemoryRegionsMapImpl>
+      memory_regions_map_;
 
   std::unique_ptr<unwindstack::JitDebug> jit_debug_;
   std::unique_ptr<unwindstack::DexFiles> dex_files_;
diff --git a/base/profiler/module_cache.cc b/base/profiler/module_cache.cc
index adc9b8b..7911f89 100644
--- a/base/profiler/module_cache.cc
+++ b/base/profiler/module_cache.cc
@@ -8,12 +8,8 @@
 #include <utility>
 
 #include "base/check_op.h"
-#include "base/debug/crash_logging.h"
 #include "base/ranges/algorithm.h"
 #include "base/strings/strcat.h"
-#include "base/trace_event/base_tracing.h"
-#include "base/tracing_buildflags.h"
-#include "build/build_config.h"
 
 namespace base {
 
@@ -34,17 +30,6 @@
   }
 };
 
-// TODO(crbug/1446766): Cleanup after finishing crash investigation.
-#if BUILDFLAG(IS_ANDROID) && BUILDFLAG(ENABLE_BASE_TRACING)
-std::string ModuleToString(const ModuleCache::Module& module) {
-  trace_event::TracedValueJSON value;
-  value.SetPointer("base", reinterpret_cast<void*>(module.GetBaseAddress()));
-  value.SetInteger("size", static_cast<int>(module.GetSize()));
-  value.SetString("path", module.GetDebugBasename().value());
-  return value.ToJSON();
-}
-#endif  // BUILDFLAG(IS_ANDROID) && BUILDFLAG(ENABLE_BASE_TRACING)
-
 }  // namespace
 
 std::string TransformModuleIDToSymbolServerFormat(StringPiece module_id) {
@@ -153,21 +138,6 @@
 }
 
 void ModuleCache::AddCustomNativeModule(std::unique_ptr<const Module> module) {
-// TODO(1446766): Cleanup after finishing crash investigation.
-#if BUILDFLAG(IS_ANDROID) && BUILDFLAG(ENABLE_BASE_TRACING)
-  auto it = native_modules_.find(module);
-  if (it != native_modules_.end()) {
-    static auto* const crash_key = base::debug::AllocateCrashKeyString(
-        "module_cache", base::debug::CrashKeySize::Size1024);
-    trace_event::TracedValueJSON value;
-    value.SetString("new_module", ModuleToString(*module));
-
-    auto& existing_module = *it;
-    value.SetString("existing_module", ModuleToString(*existing_module));
-
-    base::debug::SetCrashKeyString(crash_key, value.ToJSON());
-  }
-#endif  // BUILDFLAG(IS_ANDROID) && BUILDFLAG(ENABLE_BASE_TRACING)
   const bool was_inserted = native_modules_.insert(std::move(module)).second;
   // |module| should have been inserted into |native_modules_|, indicating that
   // there was no equivalent module already present. While this scenario would
diff --git a/base/profiler/module_cache_apple.cc b/base/profiler/module_cache_apple.cc
new file mode 100644
index 0000000..5428779
--- /dev/null
+++ b/base/profiler/module_cache_apple.cc
@@ -0,0 +1,152 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/module_cache.h"
+
+#include <dlfcn.h>
+#include <mach-o/getsect.h>
+#include <string.h>
+#include <uuid/uuid.h>
+
+#include "base/strings/string_number_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+#if defined(ARCH_CPU_64_BITS)
+using MachHeaderType = mach_header_64;
+using SegmentCommandType = segment_command_64;
+constexpr uint32_t kMachHeaderMagic = MH_MAGIC_64;
+constexpr uint32_t kSegmentCommand = LC_SEGMENT_64;
+#else
+using MachHeaderType = mach_header;
+using SegmentCommandType = segment_command;
+constexpr uint32_t kMachHeaderMagic = MH_MAGIC;
+constexpr uint32_t kSegmentCommand = LC_SEGMENT;
+#endif
+
+// Returns the unique build ID and text segment size for a module loaded at
+// |module_addr|. Returns the empty string and 0 if the function fails to get
+// the build ID or size.
+//
+// Build IDs are created by the concatenation of the module's GUID (Windows) /
+// UUID (Mac) and an "age" field that indicates how many times that GUID/UUID
+// has been reused. In Windows binaries, the "age" field is present in the
+// module header, but on the Mac, UUIDs are never reused and so the "age" value
+// appended to the UUID is always 0.
+void GetUniqueIdAndTextSize(const void* module_addr,
+                            std::string* unique_id,
+                            size_t* text_size) {
+  const MachHeaderType* mach_header =
+      reinterpret_cast<const MachHeaderType*>(module_addr);
+  DCHECK_EQ(mach_header->magic, kMachHeaderMagic);
+
+  size_t offset = sizeof(MachHeaderType);
+  size_t offset_limit = sizeof(MachHeaderType) + mach_header->sizeofcmds;
+  bool found_uuid = false;
+  bool found_text_size = false;
+
+  for (uint32_t i = 0; i < mach_header->ncmds; ++i) {
+    if (offset + sizeof(load_command) >= offset_limit) {
+      unique_id->clear();
+      *text_size = 0;
+      return;
+    }
+
+    const load_command* load_cmd = reinterpret_cast<const load_command*>(
+        reinterpret_cast<const uint8_t*>(mach_header) + offset);
+
+    if (offset + load_cmd->cmdsize > offset_limit) {
+      // This command runs off the end of the command list. This is malformed.
+      unique_id->clear();
+      *text_size = 0;
+      return;
+    }
+
+    if (load_cmd->cmd == LC_UUID) {
+      if (load_cmd->cmdsize < sizeof(uuid_command)) {
+        // This "UUID command" is too small. This is malformed.
+        unique_id->clear();
+      } else {
+        const uuid_command* uuid_cmd =
+            reinterpret_cast<const uuid_command*>(load_cmd);
+        static_assert(sizeof(uuid_cmd->uuid) == sizeof(uuid_t),
+                      "UUID field of UUID command should be 16 bytes.");
+        // The ID comprises the UUID concatenated with the Mac's "age" value
+        // which is always 0.
+        unique_id->assign(HexEncode(&uuid_cmd->uuid, sizeof(uuid_cmd->uuid)) +
+                          "0");
+      }
+      if (found_text_size) {
+        return;
+      }
+      found_uuid = true;
+    } else if (load_cmd->cmd == kSegmentCommand) {
+      const SegmentCommandType* segment_cmd =
+          reinterpret_cast<const SegmentCommandType*>(load_cmd);
+      if (strncmp(segment_cmd->segname, SEG_TEXT,
+                  sizeof(segment_cmd->segname)) == 0) {
+        *text_size = segment_cmd->vmsize;
+        // Compare result with library function call, which is slower than this
+        // code.
+        unsigned long text_size_from_libmacho;
+        DCHECK(getsegmentdata(mach_header, SEG_TEXT, &text_size_from_libmacho));
+        DCHECK_EQ(*text_size, text_size_from_libmacho);
+      }
+      if (found_uuid) {
+        return;
+      }
+      found_text_size = true;
+    }
+    offset += load_cmd->cmdsize;
+  }
+
+  if (!found_uuid) {
+    unique_id->clear();
+  }
+  if (!found_text_size) {
+    *text_size = 0;
+  }
+}
+
+}  // namespace
+
+class MacModule : public ModuleCache::Module {
+ public:
+  explicit MacModule(const Dl_info& dl_info)
+      : base_address_(reinterpret_cast<uintptr_t>(dl_info.dli_fbase)),
+        debug_basename_(FilePath(dl_info.dli_fname).BaseName()) {
+    GetUniqueIdAndTextSize(dl_info.dli_fbase, &id_, &size_);
+  }
+
+  MacModule(const MacModule&) = delete;
+  MacModule& operator=(const MacModule&) = delete;
+
+  // ModuleCache::Module
+  uintptr_t GetBaseAddress() const override { return base_address_; }
+  std::string GetId() const override { return id_; }
+  FilePath GetDebugBasename() const override { return debug_basename_; }
+  size_t GetSize() const override { return size_; }
+  bool IsNative() const override { return true; }
+
+ private:
+  uintptr_t base_address_;
+  std::string id_;
+  FilePath debug_basename_;
+  size_t size_;
+};
+
+// static
+std::unique_ptr<const ModuleCache::Module> ModuleCache::CreateModuleForAddress(
+    uintptr_t address) {
+  Dl_info info;
+  if (!dladdr(reinterpret_cast<const void*>(address), &info)) {
+    return nullptr;
+  }
+  return std::make_unique<MacModule>(info);
+}
+
+}  // namespace base
diff --git a/base/profiler/module_cache_mac.cc b/base/profiler/module_cache_mac.cc
deleted file mode 100644
index 891654d..0000000
--- a/base/profiler/module_cache_mac.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/profiler/module_cache.h"
-
-#include <dlfcn.h>
-#include <mach-o/getsect.h>
-#include <string.h>
-#include <uuid/uuid.h>
-
-#include "base/strings/string_number_conversions.h"
-#include "build/build_config.h"
-
-namespace base {
-
-namespace {
-
-#if defined(ARCH_CPU_64_BITS)
-using MachHeaderType = mach_header_64;
-using SegmentCommandType = segment_command_64;
-constexpr uint32_t kMachHeaderMagic = MH_MAGIC_64;
-constexpr uint32_t kSegmentCommand = LC_SEGMENT_64;
-#else
-using MachHeaderType = mach_header;
-using SegmentCommandType = segment_command;
-constexpr uint32_t kMachHeaderMagic = MH_MAGIC;
-constexpr uint32_t kSegmentCommand = LC_SEGMENT;
-#endif
-
-// Returns the unique build ID and text segment size for a module loaded at
-// |module_addr|. Returns the empty string and 0 if the function fails to get
-// the build ID or size.
-//
-// Build IDs are created by the concatenation of the module's GUID (Windows) /
-// UUID (Mac) and an "age" field that indicates how many times that GUID/UUID
-// has been reused. In Windows binaries, the "age" field is present in the
-// module header, but on the Mac, UUIDs are never reused and so the "age" value
-// appended to the UUID is always 0.
-void GetUniqueIdAndTextSize(const void* module_addr,
-                            std::string* unique_id,
-                            size_t* text_size) {
-  const MachHeaderType* mach_header =
-      reinterpret_cast<const MachHeaderType*>(module_addr);
-  DCHECK_EQ(mach_header->magic, kMachHeaderMagic);
-
-  size_t offset = sizeof(MachHeaderType);
-  size_t offset_limit = sizeof(MachHeaderType) + mach_header->sizeofcmds;
-  bool found_uuid = false;
-  bool found_text_size = false;
-
-  for (uint32_t i = 0; i < mach_header->ncmds; ++i) {
-    if (offset + sizeof(load_command) >= offset_limit) {
-      unique_id->clear();
-      *text_size = 0;
-      return;
-    }
-
-    const load_command* load_cmd = reinterpret_cast<const load_command*>(
-        reinterpret_cast<const uint8_t*>(mach_header) + offset);
-
-    if (offset + load_cmd->cmdsize > offset_limit) {
-      // This command runs off the end of the command list. This is malformed.
-      unique_id->clear();
-      *text_size = 0;
-      return;
-    }
-
-    if (load_cmd->cmd == LC_UUID) {
-      if (load_cmd->cmdsize < sizeof(uuid_command)) {
-        // This "UUID command" is too small. This is malformed.
-        unique_id->clear();
-      } else {
-        const uuid_command* uuid_cmd =
-            reinterpret_cast<const uuid_command*>(load_cmd);
-        static_assert(sizeof(uuid_cmd->uuid) == sizeof(uuid_t),
-                      "UUID field of UUID command should be 16 bytes.");
-        // The ID comprises the UUID concatenated with the Mac's "age" value
-        // which is always 0.
-        unique_id->assign(HexEncode(&uuid_cmd->uuid, sizeof(uuid_cmd->uuid)) +
-                          "0");
-      }
-      if (found_text_size)
-        return;
-      found_uuid = true;
-    } else if (load_cmd->cmd == kSegmentCommand) {
-      const SegmentCommandType* segment_cmd =
-          reinterpret_cast<const SegmentCommandType*>(load_cmd);
-      if (strncmp(segment_cmd->segname, SEG_TEXT,
-                  sizeof(segment_cmd->segname)) == 0) {
-        *text_size = segment_cmd->vmsize;
-        // Compare result with library function call, which is slower than this
-        // code.
-        unsigned long text_size_from_libmacho;
-        DCHECK(getsegmentdata(mach_header, SEG_TEXT, &text_size_from_libmacho));
-        DCHECK_EQ(*text_size, text_size_from_libmacho);
-      }
-      if (found_uuid)
-        return;
-      found_text_size = true;
-    }
-    offset += load_cmd->cmdsize;
-  }
-
-  if (!found_uuid) {
-    unique_id->clear();
-  }
-  if (!found_text_size) {
-    *text_size = 0;
-  }
-}
-
-}  // namespace
-
-class MacModule : public ModuleCache::Module {
- public:
-  MacModule(const Dl_info& dl_info)
-      : base_address_(reinterpret_cast<uintptr_t>(dl_info.dli_fbase)),
-        debug_basename_(FilePath(dl_info.dli_fname).BaseName()) {
-    GetUniqueIdAndTextSize(dl_info.dli_fbase, &id_, &size_);
-  }
-
-  MacModule(const MacModule&) = delete;
-  MacModule& operator=(const MacModule&) = delete;
-
-  // ModuleCache::Module
-  uintptr_t GetBaseAddress() const override { return base_address_; }
-  std::string GetId() const override { return id_; }
-  FilePath GetDebugBasename() const override { return debug_basename_; }
-  size_t GetSize() const override { return size_; }
-  bool IsNative() const override { return true; }
-
- private:
-  uintptr_t base_address_;
-  std::string id_;
-  FilePath debug_basename_;
-  size_t size_;
-};
-
-// static
-std::unique_ptr<const ModuleCache::Module> ModuleCache::CreateModuleForAddress(
-    uintptr_t address) {
-  Dl_info info;
-  if (!dladdr(reinterpret_cast<const void*>(address), &info))
-    return nullptr;
-  return std::make_unique<MacModule>(info);
-}
-
-}  // namespace base
diff --git a/base/profiler/native_unwinder_android.cc b/base/profiler/native_unwinder_android.cc
index e398ecb..38bf773 100644
--- a/base/profiler/native_unwinder_android.cc
+++ b/base/profiler/native_unwinder_android.cc
@@ -15,10 +15,11 @@
 #include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Regs.h"
 
 #include "base/memory/ptr_util.h"
+#include "base/metrics/metrics_hashes.h"
 #include "base/notreached.h"
 #include "base/profiler/module_cache.h"
 #include "base/profiler/native_unwinder_android_map_delegate.h"
-#include "base/profiler/native_unwinder_android_memory_regions_map.h"
+#include "base/profiler/native_unwinder_android_memory_regions_map_impl.h"
 #include "base/profiler/profile_builder.h"
 #include "build/build_config.h"
 
@@ -35,15 +36,27 @@
 
 class NonElfModule : public ModuleCache::Module {
  public:
-  explicit NonElfModule(unwindstack::MapInfo* map_info)
+  explicit NonElfModule(unwindstack::MapInfo* map_info,
+                        bool is_java_name_hashing_enabled)
       : start_(map_info->start()),
         size_(map_info->end() - start_),
-        map_info_name_(map_info->name()) {}
+        map_info_name_(map_info->name()),
+        is_java_name_hashing_enabled_(is_java_name_hashing_enabled) {}
   ~NonElfModule() override = default;
 
   uintptr_t GetBaseAddress() const override { return start_; }
 
-  std::string GetId() const override { return std::string(); }
+  std::string GetId() const override {
+    // We provide a non-empty string only if Java name hashing is enabled, to
+    // allow us to easily filter out the results from outside the experiment.
+    if (is_java_name_hashing_enabled_) {
+      // Synthetic build id to use for DEX files that provide hashed function
+      // names rather than instruction pointers.
+      return "44444444BC18564712E780518FB3032B999";
+    } else {
+      return "";
+    }
+  }
 
   FilePath GetDebugBasename() const override {
     return FilePath(map_info_name_);
@@ -59,6 +72,7 @@
   const uintptr_t start_;
   const size_t size_;
   const std::string map_info_name_;
+  const bool is_java_name_hashing_enabled_;
 };
 
 std::unique_ptr<unwindstack::Regs> CreateFromRegisterContext(
@@ -88,28 +102,6 @@
 #endif  // #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS)
 }
 
-// The wrapper class exists to avoid the reference of concrete libunwindstack
-// types in chrome code. Only code in the stack unwinder DFM has the access to
-// third_party/libunwindstack/src/libunwindstack. Files within the stack
-// unwinder DFM can be found by searching `native_unwinder_android` source set
-// in `base/BUILD.gn`.
-class MemoryRegionsMap : public base::NativeUnwinderAndroidMemoryRegionsMap {
- public:
-  MemoryRegionsMap(std::unique_ptr<unwindstack::Maps> maps,
-                   std::unique_ptr<unwindstack::Memory> memory)
-      : maps_(std::move(maps)), memory_(std::move(memory)) {}
-
-  unwindstack::Maps* GetMaps() override { return maps_.get(); }
-  unwindstack::Memory* GetMemory() override { return memory_.get(); }
-  std::unique_ptr<unwindstack::Memory> TakeMemory() override {
-    return std::move(memory_);
-  }
-
- private:
-  std::unique_ptr<unwindstack::Maps> maps_;
-  std::unique_ptr<unwindstack::Memory> memory_;
-};
-
 }  // namespace
 
 UnwindStackMemoryAndroid::UnwindStackMemoryAndroid(uintptr_t stack_ptr,
@@ -141,16 +133,20 @@
   const bool success = maps->Parse();
   DCHECK(success);
 
-  return std::make_unique<MemoryRegionsMap>(
+  return std::make_unique<NativeUnwinderAndroidMemoryRegionsMapImpl>(
       std::move(maps), unwindstack::Memory::CreateLocalProcessMemory());
 }
 
 NativeUnwinderAndroid::NativeUnwinderAndroid(
     uintptr_t exclude_module_with_base_address,
-    NativeUnwinderAndroidMapDelegate* map_delegate)
-    : exclude_module_with_base_address_(exclude_module_with_base_address),
+    NativeUnwinderAndroidMapDelegate* map_delegate,
+    bool is_java_name_hashing_enabled)
+    : is_java_name_hashing_enabled_(is_java_name_hashing_enabled),
+      exclude_module_with_base_address_(exclude_module_with_base_address),
       map_delegate_(map_delegate),
-      memory_regions_map_(map_delegate->GetMapReference()) {
+      memory_regions_map_(
+          static_cast<NativeUnwinderAndroidMemoryRegionsMapImpl*>(
+              map_delegate->GetMapReference())) {
   DCHECK(map_delegate_);
   DCHECK(memory_regions_map_);
 }
@@ -183,14 +179,14 @@
     uint64_t cur_pc = regs->pc();
     uint64_t cur_sp = regs->sp();
     unwindstack::MapInfo* map_info =
-        memory_regions_map_->GetMaps()->Find(cur_pc).get();
+        memory_regions_map_->maps()->Find(cur_pc).get();
     if (map_info == nullptr ||
         map_info->flags() & unwindstack::MAPS_FLAGS_DEVICE_MAP) {
       break;
     }
 
-    unwindstack::Elf* elf = map_info->GetElf(
-        {memory_regions_map_->GetMemory(), [](unwindstack::Memory*) {}}, arch);
+    unwindstack::Elf* elf =
+        map_info->GetElf(memory_regions_map_->memory(), arch);
     if (!elf->valid())
       break;
 
@@ -232,7 +228,7 @@
 
     if (regs->dex_pc() != 0) {
       // Add a frame to represent the dex file.
-      EmitDexFrame(regs->dex_pc(), stack);
+      EmitDexFrame(regs->dex_pc(), arch, stack);
 
       // Clear the dex pc so that we don't repeat this frame later.
       regs->set_dex_pc(0);
@@ -254,16 +250,27 @@
 std::unique_ptr<const ModuleCache::Module>
 NativeUnwinderAndroid::TryCreateModuleForAddress(uintptr_t address) {
   unwindstack::MapInfo* map_info =
-      memory_regions_map_->GetMaps()->Find(address).get();
+      memory_regions_map_->maps()->Find(address).get();
   if (map_info == nullptr || !(map_info->flags() & PROT_EXEC) ||
       map_info->flags() & unwindstack::MAPS_FLAGS_DEVICE_MAP) {
     return nullptr;
   }
-  return std::make_unique<NonElfModule>(map_info);
+  return std::make_unique<NonElfModule>(map_info,
+                                        is_java_name_hashing_enabled_);
+}
+
+unwindstack::DexFiles* NativeUnwinderAndroid::GetOrCreateDexFiles(
+    unwindstack::ArchEnum arch) {
+  if (!dex_files_) {
+    dex_files_ = unwindstack::CreateDexFiles(
+        arch, memory_regions_map_->memory(), search_libs_);
+  }
+  return dex_files_.get();
 }
 
 void NativeUnwinderAndroid::EmitDexFrame(uintptr_t dex_pc,
-                                         std::vector<Frame>* stack) const {
+                                         unwindstack::ArchEnum arch,
+                                         std::vector<Frame>* stack) {
   const ModuleCache::Module* module =
       module_cache()->GetExistingModuleForAddress(dex_pc);
   if (!module) {
@@ -272,14 +279,27 @@
     // are used much less commonly, it's lazily added here instead of from
     // AddInitialModulesFromMaps().
     unwindstack::MapInfo* map_info =
-        memory_regions_map_->GetMaps()->Find(dex_pc).get();
+        memory_regions_map_->maps()->Find(dex_pc).get();
     if (map_info) {
-      auto new_module = std::make_unique<NonElfModule>(map_info);
+      auto new_module = std::make_unique<NonElfModule>(
+          map_info, is_java_name_hashing_enabled_);
       module = new_module.get();
       module_cache()->AddCustomNativeModule(std::move(new_module));
     }
   }
-  stack->emplace_back(dex_pc, module);
+
+  if (is_java_name_hashing_enabled_) {
+    unwindstack::SharedString function_name;
+    uint64_t function_offset = 0;
+    GetOrCreateDexFiles(arch)->GetFunctionName(
+        memory_regions_map_->maps(), dex_pc, &function_name, &function_offset);
+    stack->emplace_back(
+        HashMetricNameAs32Bits(static_cast<const std::string&>(function_name)),
+        module);
+
+  } else {
+    stack->emplace_back(dex_pc, module);
+  }
 }
 
 }  // namespace base
diff --git a/base/profiler/native_unwinder_android.h b/base/profiler/native_unwinder_android.h
index e08d3fd..4521a5c 100644
--- a/base/profiler/native_unwinder_android.h
+++ b/base/profiler/native_unwinder_android.h
@@ -10,12 +10,14 @@
 
 #include "base/memory/raw_ptr.h"
 #include "base/profiler/unwinder.h"
+#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/DexFiles.h"
 #include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Memory.h"
 
 namespace base {
 
 class NativeUnwinderAndroidMapDelegate;
 class NativeUnwinderAndroidMemoryRegionsMap;
+class NativeUnwinderAndroidMemoryRegionsMapImpl;
 
 // Implementation of unwindstack::Memory that restricts memory access to a stack
 // buffer, used by NativeUnwinderAndroid. While unwinding, only memory accesses
@@ -57,7 +59,8 @@
   // |map_delegate| is used to manage memory used by libunwindstack. It must
   // outlives this object.
   NativeUnwinderAndroid(uintptr_t exclude_module_with_base_address,
-                        NativeUnwinderAndroidMapDelegate* map_delegate);
+                        NativeUnwinderAndroidMapDelegate* map_delegate,
+                        bool is_java_name_hashing_enabled);
   ~NativeUnwinderAndroid() override;
 
   NativeUnwinderAndroid(const NativeUnwinderAndroid&) = delete;
@@ -75,12 +78,21 @@
       uintptr_t address) override;
 
  private:
+  unwindstack::DexFiles* GetOrCreateDexFiles(unwindstack::ArchEnum arch);
+
   void EmitDexFrame(uintptr_t dex_pc,
-                    std::vector<Frame>* stack) const;
+                    unwindstack::ArchEnum,
+                    std::vector<Frame>* stack);
+
+  const bool is_java_name_hashing_enabled_;
+  std::unique_ptr<unwindstack::DexFiles> dex_files_;
 
   const uintptr_t exclude_module_with_base_address_;
   raw_ptr<NativeUnwinderAndroidMapDelegate> map_delegate_;
-  const raw_ptr<NativeUnwinderAndroidMemoryRegionsMap> memory_regions_map_;
+  const raw_ptr<NativeUnwinderAndroidMemoryRegionsMapImpl> memory_regions_map_;
+  // This is a vector (rather than an array) because it gets used in functions
+  // from libunwindstack.
+  const std::vector<std::string> search_libs_ = {"libart.so", "libartd.so"};
 };
 
 }  // namespace base
diff --git a/base/profiler/native_unwinder_android_memory_regions_map.h b/base/profiler/native_unwinder_android_memory_regions_map.h
index 6fbf52e..d54bcb79 100644
--- a/base/profiler/native_unwinder_android_memory_regions_map.h
+++ b/base/profiler/native_unwinder_android_memory_regions_map.h
@@ -5,13 +5,6 @@
 #ifndef BASE_PROFILER_NATIVE_UNWINDER_ANDROID_MEMORY_REGIONS_MAP_H_
 #define BASE_PROFILER_NATIVE_UNWINDER_ANDROID_MEMORY_REGIONS_MAP_H_
 
-#include <memory>
-
-namespace unwindstack {
-class Maps;
-class Memory;
-}  // namespace unwindstack
-
 namespace base {
 
 // NativeUnwinderAndroidMemoryRegionsMap is an opaque interface that hides
@@ -20,6 +13,9 @@
 // pass the underlying instances around without referencing libunwindstack.
 // NativeUnwinderAndroidMemoryRegionsMap's implementation must live in the
 // stack_unwinder dynamic feature module.
+//
+// Code within the dynamic feature module is expected to downcast to the
+// derived type to access the unwindstack types.
 class NativeUnwinderAndroidMemoryRegionsMap {
  public:
   NativeUnwinderAndroidMemoryRegionsMap() = default;
@@ -29,12 +25,6 @@
       const NativeUnwinderAndroidMemoryRegionsMap&) = delete;
   NativeUnwinderAndroidMemoryRegionsMap& operator=(
       const NativeUnwinderAndroidMemoryRegionsMap&) = delete;
-
-  virtual unwindstack::Maps* GetMaps() = 0;
-  virtual unwindstack::Memory* GetMemory() = 0;
-  // This function exists to provide a method for
-  // `LibunwindstackUnwinderAndroid` to take the ownership of `Memory`.
-  virtual std::unique_ptr<unwindstack::Memory> TakeMemory() = 0;
 };
 
 }  // namespace base
diff --git a/base/profiler/native_unwinder_android_memory_regions_map_impl.cc b/base/profiler/native_unwinder_android_memory_regions_map_impl.cc
new file mode 100644
index 0000000..b70cd93
--- /dev/null
+++ b/base/profiler/native_unwinder_android_memory_regions_map_impl.cc
@@ -0,0 +1,16 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/native_unwinder_android_memory_regions_map_impl.h"
+
+namespace base {
+NativeUnwinderAndroidMemoryRegionsMapImpl::
+    NativeUnwinderAndroidMemoryRegionsMapImpl(
+        std::unique_ptr<unwindstack::Maps> maps,
+        std::unique_ptr<unwindstack::Memory> memory)
+    : maps_(std::move(maps)), memory_(std::move(memory)) {}
+
+NativeUnwinderAndroidMemoryRegionsMapImpl::
+    ~NativeUnwinderAndroidMemoryRegionsMapImpl() = default;
+}  // namespace base
diff --git a/base/profiler/native_unwinder_android_memory_regions_map_impl.h b/base/profiler/native_unwinder_android_memory_regions_map_impl.h
new file mode 100644
index 0000000..42e6e75
--- /dev/null
+++ b/base/profiler/native_unwinder_android_memory_regions_map_impl.h
@@ -0,0 +1,40 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROFILER_NATIVE_UNWINDER_ANDROID_MEMORY_REGIONS_MAP_IMPL_H_
+#define BASE_PROFILER_NATIVE_UNWINDER_ANDROID_MEMORY_REGIONS_MAP_IMPL_H_
+
+#include "base/profiler/native_unwinder_android_memory_regions_map.h"
+
+#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Maps.h"
+#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Memory.h"
+
+namespace base {
+
+class NativeUnwinderAndroidMemoryRegionsMapImpl
+    : public NativeUnwinderAndroidMemoryRegionsMap {
+ public:
+  NativeUnwinderAndroidMemoryRegionsMapImpl(
+      std::unique_ptr<unwindstack::Maps> maps,
+      std::unique_ptr<unwindstack::Memory> memory);
+
+  ~NativeUnwinderAndroidMemoryRegionsMapImpl() override;
+
+  unwindstack::Maps* maps() { return maps_.get(); }
+  // We use a non-const reference here because some functions in libunwindstack
+  // expect that.
+  std::shared_ptr<unwindstack::Memory>& memory() { return memory_; }
+
+  void SetMapsForTesting(std::unique_ptr<unwindstack::Maps> maps) {
+    maps_ = std::move(maps);
+  }
+
+ private:
+  std::unique_ptr<unwindstack::Maps> maps_;
+  std::shared_ptr<unwindstack::Memory> memory_;
+};
+
+}  // namespace base
+
+#endif  // BASE_PROFILER_NATIVE_UNWINDER_ANDROID_MEMORY_REGIONS_MAP_IMPL_H_
diff --git a/base/profiler/native_unwinder_android_unittest.cc b/base/profiler/native_unwinder_android_unittest.cc
index 29124ae..c32f66a 100644
--- a/base/profiler/native_unwinder_android_unittest.cc
+++ b/base/profiler/native_unwinder_android_unittest.cc
@@ -18,7 +18,7 @@
 #include "base/functional/bind.h"
 #include "base/memory/raw_ptr.h"
 #include "base/profiler/native_unwinder_android_map_delegate.h"
-#include "base/profiler/native_unwinder_android_memory_regions_map.h"
+#include "base/profiler/native_unwinder_android_memory_regions_map_impl.h"
 #include "base/profiler/register_context.h"
 #include "base/profiler/stack_buffer.h"
 #include "base/profiler/stack_copier_signal.h"
@@ -52,32 +52,25 @@
   map_info.set_elf_offset(map_info.offset());
 }
 
-class NativeUnwinderAndroidMemoryRegionsMapForTesting
-    : public NativeUnwinderAndroidMemoryRegionsMap {
- public:
-  NativeUnwinderAndroidMemoryRegionsMapForTesting(unwindstack::Maps* maps,
-                                                  unwindstack::Memory* memory)
-      : maps_(maps), memory_(memory) {}
-  unwindstack::Maps* GetMaps() override { return maps_; }
-  unwindstack::Memory* GetMemory() override { return memory_; }
-  // This function is not expected to be called within the unittest, as
-  // `NativeUnwinderAndroidMemoryRegionsMapForTesting` does not own
-  // `unwindstack::Memory`.
-  std::unique_ptr<unwindstack::Memory> TakeMemory() override { return nullptr; }
-
- private:
-  raw_ptr<unwindstack::Maps> maps_;
-  raw_ptr<unwindstack::Memory> memory_;
-};
+std::unique_ptr<NativeUnwinderAndroidMemoryRegionsMapImpl>
+CreateMemoryRegionsMap() {
+  std::unique_ptr<NativeUnwinderAndroidMemoryRegionsMap> memory_regions_map =
+      NativeUnwinderAndroid::CreateMemoryRegionsMap();
+  std::unique_ptr<NativeUnwinderAndroidMemoryRegionsMapImpl> downcast(
+      static_cast<NativeUnwinderAndroidMemoryRegionsMapImpl*>(
+          memory_regions_map.release()));
+  return downcast;
+}
 
 class NativeUnwinderAndroidMapDelegateForTesting
     : public NativeUnwinderAndroidMapDelegate {
  public:
   explicit NativeUnwinderAndroidMapDelegateForTesting(
-      std::unique_ptr<NativeUnwinderAndroidMemoryRegionsMap> memory_regions_map)
+      std::unique_ptr<NativeUnwinderAndroidMemoryRegionsMapImpl>
+          memory_regions_map)
       : memory_regions_map_(std::move(memory_regions_map)) {}
 
-  NativeUnwinderAndroidMemoryRegionsMap* GetMapReference() override {
+  NativeUnwinderAndroidMemoryRegionsMapImpl* GetMapReference() override {
     acquire_count_++;
     return memory_regions_map_.get();
   }
@@ -87,7 +80,7 @@
   uint32_t release_count() { return release_count_; }
 
  private:
-  const std::unique_ptr<NativeUnwinderAndroidMemoryRegionsMap>
+  const std::unique_ptr<NativeUnwinderAndroidMemoryRegionsMapImpl>
       memory_regions_map_;
 
   uint32_t acquire_count_ = 0u;
@@ -139,17 +132,26 @@
   return sample;
 }
 
-// Checks that the expected information is present in sampled frames.
 // TODO(https://crbug.com/1147315): After fix, re-enable on all ASAN bots.
-// TODO(https://crbug.com/1368981): After fix, re-enable on all bots except
-// if defined(ADDRESS_SANITIZER).
-TEST(NativeUnwinderAndroidTest, DISABLED_PlainFunction) {
+#if defined(ADDRESS_SANITIZER)
+#define MAYBE_PlainFunction DISABLED_PlainFunction
+#else
+#define MAYBE_PlainFunction PlainFunction
+#endif
+// Checks that the expected information is present in sampled frames.
+TEST(NativeUnwinderAndroidTest, MAYBE_PlainFunction) {
+  const auto sdk_version = base::android::BuildInfo::GetInstance()->sdk_int();
+  if (sdk_version < base::android::SDK_VERSION_NOUGAT) {
+    GTEST_SKIP();
+  }
+
   UnwindScenario scenario(BindRepeating(&CallWithPlainFunction));
   NativeUnwinderAndroidMapDelegateForTesting map_delegate(
-      NativeUnwinderAndroid::CreateMemoryRegionsMap());
+      CreateMemoryRegionsMap());
 
   ModuleCache module_cache;
-  auto unwinder = std::make_unique<NativeUnwinderAndroid>(0, &map_delegate);
+  auto unwinder = std::make_unique<NativeUnwinderAndroid>(
+      0, &map_delegate, /*is_java_name_hashing_enabled=*/false);
 
   unwinder->Initialize(&module_cache);
   std::vector<Frame> sample =
@@ -173,19 +175,28 @@
                                scenario.GetOuterFunctionAddressRange()});
 }
 
+// TODO(https://crbug.com/1147315): After fix, re-enable on all ASAN bots.
+#if defined(ADDRESS_SANITIZER)
+#define MAYBE_Alloca DISABLED_Alloca
+#else
+#define MAYBE_Alloca Alloca
+#endif
 // Checks that the unwinder handles stacks containing dynamically-allocated
 // stack memory.
-// TODO(https://crbug.com/1147315): After fix, re-enable on all ASAN bots.
-// TODO(https://crbug.com/1368981): After fix, re-enable on all bots except
-// if defined(ADDRESS_SANITIZER).
-TEST(NativeUnwinderAndroidTest, DISABLED_Alloca) {
+TEST(NativeUnwinderAndroidTest, MAYBE_Alloca) {
+  const auto sdk_version = base::android::BuildInfo::GetInstance()->sdk_int();
+  if (sdk_version < base::android::SDK_VERSION_NOUGAT) {
+    GTEST_SKIP();
+  }
+
   UnwindScenario scenario(BindRepeating(&CallWithAlloca));
 
   NativeUnwinderAndroidMapDelegateForTesting map_delegate(
-      NativeUnwinderAndroid::CreateMemoryRegionsMap());
+      CreateMemoryRegionsMap());
 
   ModuleCache module_cache;
-  auto unwinder = std::make_unique<NativeUnwinderAndroid>(0, &map_delegate);
+  auto unwinder = std::make_unique<NativeUnwinderAndroid>(
+      0, &map_delegate, /*is_java_name_hashing_enabled=*/false);
 
   unwinder->Initialize(&module_cache);
   std::vector<Frame> sample =
@@ -209,20 +220,29 @@
                                scenario.GetOuterFunctionAddressRange()});
 }
 
+// TODO(https://crbug.com/1147315): After fix, re-enable on all ASAN bots.
+#if defined(ADDRESS_SANITIZER)
+#define MAYBE_OtherLibrary DISABLED_OtherLibrary
+#else
+#define MAYBE_OtherLibrary OtherLibrary
+#endif
 // Checks that a stack that runs through another library produces a stack with
 // the expected functions.
-// TODO(https://crbug.com/1147315): After fix, re-enable on all ASAN bots.
-// TODO(https://crbug.com/1368981): After fix, re-enable on all bots except
-// if defined(ADDRESS_SANITIZER).
-TEST(NativeUnwinderAndroidTest, DISABLED_OtherLibrary) {
+TEST(NativeUnwinderAndroidTest, MAYBE_OtherLibrary) {
+  const auto sdk_version = base::android::BuildInfo::GetInstance()->sdk_int();
+  if (sdk_version < base::android::SDK_VERSION_NOUGAT) {
+    GTEST_SKIP();
+  }
+
   NativeLibrary other_library = LoadOtherLibrary();
   UnwindScenario scenario(
       BindRepeating(&CallThroughOtherLibrary, Unretained(other_library)));
 
   NativeUnwinderAndroidMapDelegateForTesting map_delegate(
-      NativeUnwinderAndroid::CreateMemoryRegionsMap());
+      CreateMemoryRegionsMap());
   ModuleCache module_cache;
-  auto unwinder = std::make_unique<NativeUnwinderAndroid>(0, &map_delegate);
+  auto unwinder = std::make_unique<NativeUnwinderAndroid>(
+      0, &map_delegate, /*is_java_name_hashing_enabled=*/false);
 
   unwinder->Initialize(&module_cache);
   std::vector<Frame> sample =
@@ -249,16 +269,17 @@
       BindRepeating(&CallThroughOtherLibrary, Unretained(other_library)));
 
   NativeUnwinderAndroidMapDelegateForTesting map_delegate(
-      NativeUnwinderAndroid::CreateMemoryRegionsMap());
+      CreateMemoryRegionsMap());
   ModuleCache module_cache;
   unwindstack::MapInfo* other_library_map =
       map_delegate.GetMapReference()
-          ->GetMaps()
+          ->maps()
           ->Find(GetAddressInOtherLibrary(other_library))
           .get();
   ASSERT_NE(nullptr, other_library_map);
   auto unwinder = std::make_unique<NativeUnwinderAndroid>(
-      other_library_map->start(), &map_delegate);
+      other_library_map->start(), &map_delegate,
+      /*is_java_name_hashing_enabled=*/false);
   unwinder->Initialize(&module_cache);
 
   std::vector<Frame> sample =
@@ -291,7 +312,7 @@
       BindRepeating(&CallThroughOtherLibrary, Unretained(other_library)));
 
   NativeUnwinderAndroidMapDelegateForTesting map_delegate(
-      NativeUnwinderAndroid::CreateMemoryRegionsMap());
+      CreateMemoryRegionsMap());
 
   // Several unwinders are used to unwind different portion of the stack. Since
   // only 1 unwinder can be registered as a module provider, each unwinder uses
@@ -300,24 +321,26 @@
   // NativeUnwinderAndroid work with other unwinders, but doesn't reproduce what
   // happens in production.
   ModuleCache module_cache_for_all;
-  auto unwinder_for_all =
-      std::make_unique<NativeUnwinderAndroid>(0, &map_delegate);
+  auto unwinder_for_all = std::make_unique<NativeUnwinderAndroid>(
+      0, &map_delegate, /*is_java_name_hashing_enabled=*/false);
   unwinder_for_all->Initialize(&module_cache_for_all);
 
   ModuleCache module_cache_for_native;
   auto unwinder_for_native = std::make_unique<NativeUnwinderAndroid>(
-      reinterpret_cast<uintptr_t>(&__executable_start), &map_delegate);
+      reinterpret_cast<uintptr_t>(&__executable_start), &map_delegate,
+      /*is_java_name_hashing_enabled=*/false);
   unwinder_for_native->Initialize(&module_cache_for_native);
 
   ModuleCache module_cache_for_chrome;
   unwindstack::MapInfo* other_library_map =
       map_delegate.GetMapReference()
-          ->GetMaps()
+          ->maps()
           ->Find(GetAddressInOtherLibrary(other_library))
           .get();
   ASSERT_NE(nullptr, other_library_map);
   auto unwinder_for_chrome = std::make_unique<NativeUnwinderAndroid>(
-      other_library_map->start(), &map_delegate);
+      other_library_map->start(), &map_delegate,
+      /*is_java_name_hashing_enabled=*/false);
   unwinder_for_chrome->Initialize(&module_cache_for_chrome);
 
   std::vector<Frame> sample = CaptureScenario(
@@ -365,21 +388,25 @@
 }
 
 // Checks that java frames can be unwound through.
-// Disabled, see: https://crbug.com/1076997
-TEST(NativeUnwinderAndroidTest, DISABLED_JavaFunction) {
+TEST(NativeUnwinderAndroidTest, JavaFunction) {
   auto* build_info = base::android::BuildInfo::GetInstance();
-  // Due to varying availability of compiled java unwind tables, unwinding is
-  // only expected to succeed on > SDK_VERSION_MARSHMALLOW.
-  bool can_always_unwind =
-      build_info->sdk_int() > base::android::SDK_VERSION_MARSHMALLOW;
+  const auto sdk_version = build_info->sdk_int();
+
+  // Skip this test on anything Android O or earlier, because Java unwinding
+  // fails on these.
+  if (sdk_version <= base::android::SDK_VERSION_OREO) {
+    GTEST_SKIP();
+  }
 
   UnwindScenario scenario(base::BindRepeating(callWithJavaFunction));
 
   NativeUnwinderAndroidMapDelegateForTesting map_delegate(
-      NativeUnwinderAndroid::CreateMemoryRegionsMap());
-  auto unwinder = std::make_unique<NativeUnwinderAndroid>(0, &map_delegate);
+      CreateMemoryRegionsMap());
 
   ModuleCache module_cache;
+  auto unwinder = std::make_unique<NativeUnwinderAndroid>(
+      0, &map_delegate, /*is_java_name_hashing_enabled=*/false);
+
   unwinder->Initialize(&module_cache);
   std::vector<Frame> sample =
       CaptureScenario(&scenario, &module_cache,
@@ -389,8 +416,7 @@
                         ASSERT_TRUE(unwinder->CanUnwindFrom(sample->back()));
                         UnwindResult result = unwinder->TryUnwind(
                             thread_context, stack_top, sample);
-                        if (can_always_unwind)
-                          EXPECT_EQ(UnwindResult::kCompleted, result);
+                        EXPECT_EQ(UnwindResult::kCompleted, result);
                       }));
 
   // Check that all the modules are valid.
@@ -398,11 +424,9 @@
     EXPECT_NE(nullptr, frame.module);
 
   // The stack should contain a full unwind.
-  if (can_always_unwind) {
-    ExpectStackContains(sample, {scenario.GetWaitForSampleAddressRange(),
-                                 scenario.GetSetupFunctionAddressRange(),
-                                 scenario.GetOuterFunctionAddressRange()});
-  }
+  ExpectStackContains(sample, {scenario.GetWaitForSampleAddressRange(),
+                               scenario.GetSetupFunctionAddressRange(),
+                               scenario.GetOuterFunctionAddressRange()});
 }
 
 TEST(NativeUnwinderAndroidTest, UnwindStackMemoryTest) {
@@ -435,18 +459,20 @@
 
 // Checks the debug basename is the whole name for a non-ELF module.
 TEST(NativeUnwinderAndroidTest, ModuleDebugBasenameForNonElf) {
-  unwindstack::Maps maps;
+  auto maps = std::make_unique<unwindstack::Maps>();
 
   AddMapInfo(0x1000u, 0x2000u, 0u, PROT_READ | PROT_EXEC, "[foo / bar]", {0xAA},
-             maps);
+             *maps);
 
   ModuleCache module_cache;
 
-  auto memory_regions_map = NativeUnwinderAndroid::CreateMemoryRegionsMap();
+  std::unique_ptr<NativeUnwinderAndroidMemoryRegionsMapImpl>
+      memory_regions_map = CreateMemoryRegionsMap();
+  memory_regions_map->SetMapsForTesting(std::move(maps));
   NativeUnwinderAndroidMapDelegateForTesting map_delegate(
-      std::make_unique<NativeUnwinderAndroidMemoryRegionsMapForTesting>(
-          &maps, memory_regions_map->GetMemory()));
-  auto unwinder = std::make_unique<NativeUnwinderAndroid>(0, &map_delegate);
+      std::move(memory_regions_map));
+  auto unwinder = std::make_unique<NativeUnwinderAndroid>(
+      0, &map_delegate, /*is_java_name_hashing_enabled=*/false);
   unwinder->Initialize(&module_cache);
 
   const ModuleCache::Module* module = module_cache.GetModuleForAddress(0x1000u);
@@ -457,17 +483,19 @@
 
 // Checks that modules are only created for executable memory regions.
 TEST(NativeUnwinderAndroidTest, ModulesCreatedOnlyForExecutableRegions) {
-  unwindstack::Maps maps;
-  AddMapInfo(0x1000u, 0x2000u, 0u, PROT_READ | PROT_EXEC, "[a]", {0xAA}, maps);
-  AddMapInfo(0x2000u, 0x3000u, 0u, PROT_READ, "[b]", {0xAB}, maps);
-  AddMapInfo(0x3000u, 0x4000u, 0u, PROT_READ | PROT_EXEC, "[c]", {0xAC}, maps);
+  auto maps = std::make_unique<unwindstack::Maps>();
+  AddMapInfo(0x1000u, 0x2000u, 0u, PROT_READ | PROT_EXEC, "[a]", {0xAA}, *maps);
+  AddMapInfo(0x2000u, 0x3000u, 0u, PROT_READ, "[b]", {0xAB}, *maps);
+  AddMapInfo(0x3000u, 0x4000u, 0u, PROT_READ | PROT_EXEC, "[c]", {0xAC}, *maps);
 
-  auto memory_regions_map = NativeUnwinderAndroid::CreateMemoryRegionsMap();
+  std::unique_ptr<NativeUnwinderAndroidMemoryRegionsMapImpl>
+      memory_regions_map = CreateMemoryRegionsMap();
+  memory_regions_map->SetMapsForTesting(std::move(maps));
   NativeUnwinderAndroidMapDelegateForTesting map_delegate(
-      std::make_unique<NativeUnwinderAndroidMemoryRegionsMapForTesting>(
-          &maps, memory_regions_map->GetMemory()));
+      std::move(memory_regions_map));
   ModuleCache module_cache;
-  auto unwinder = std::make_unique<NativeUnwinderAndroid>(0, &map_delegate);
+  auto unwinder = std::make_unique<NativeUnwinderAndroid>(
+      0, &map_delegate, /*is_java_name_hashing_enabled=*/false);
   unwinder->Initialize(&module_cache);
 
   const ModuleCache::Module* module1 =
@@ -487,12 +515,12 @@
 TEST(NativeUnwinderAndroidTest,
      AcquireAndReleaseMemoryRegionsMapThroughMapDelegate) {
   NativeUnwinderAndroidMapDelegateForTesting map_delegate(
-      NativeUnwinderAndroid::CreateMemoryRegionsMap());
+      CreateMemoryRegionsMap());
 
   {
     ModuleCache module_cache;
     auto unwinder = std::make_unique<NativeUnwinderAndroid>(
-        /* exclude_module_with_base_address= */ 0, &map_delegate);
+        0, &map_delegate, /*is_java_name_hashing_enabled=*/false);
     unwinder->Initialize(&module_cache);
     EXPECT_EQ(1u, map_delegate.acquire_count());
     EXPECT_EQ(0u, map_delegate.release_count());
diff --git a/base/profiler/stack_copier_signal_unittest.cc b/base/profiler/stack_copier_signal_unittest.cc
index c232e2f..495edef 100644
--- a/base/profiler/stack_copier_signal_unittest.cc
+++ b/base/profiler/stack_copier_signal_unittest.cc
@@ -82,9 +82,6 @@
 #if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
     defined(THREAD_SANITIZER)
 #define MAYBE_CopyStack DISABLED_CopyStack
-#elif BUILDFLAG(IS_CHROMEOS_ASH)
-// https://crbug.com/1042974
-#define MAYBE_CopyStack DISABLED_CopyStack
 #elif BUILDFLAG(IS_LINUX)
 // We don't support getting the stack base address on Linux, and thus can't
 // copy the stack. // https://crbug.com/1394278
diff --git a/base/profiler/stack_sampling_profiler_test_util.cc b/base/profiler/stack_sampling_profiler_test_util.cc
index ac78123..eaedcc0 100644
--- a/base/profiler/stack_sampling_profiler_test_util.cc
+++ b/base/profiler/stack_sampling_profiler_test_util.cc
@@ -128,7 +128,8 @@
 std::unique_ptr<NativeUnwinderAndroid> CreateNativeUnwinderAndroidForTesting(
     uintptr_t exclude_module_with_base_address) {
   return std::make_unique<NativeUnwinderAndroid>(
-      exclude_module_with_base_address, GetMapDelegateForTesting());
+      exclude_module_with_base_address, GetMapDelegateForTesting(),
+      /*is_java_name_hashing_enabled=*/false);
 }
 
 std::unique_ptr<Unwinder> CreateChromeUnwinderAndroidForTesting(
diff --git a/base/profiler/suspendable_thread_delegate_mac.cc b/base/profiler/suspendable_thread_delegate_mac.cc
index a362145..760a61c 100644
--- a/base/profiler/suspendable_thread_delegate_mac.cc
+++ b/base/profiler/suspendable_thread_delegate_mac.cc
@@ -10,8 +10,8 @@
 
 #include <vector>
 
+#include "base/apple/mach_logging.h"
 #include "base/check.h"
-#include "base/mac/mach_logging.h"
 #include "base/profiler/profile_builder.h"
 #include "build/build_config.h"
 
diff --git a/base/profiler/win32_stack_frame_unwinder.cc b/base/profiler/win32_stack_frame_unwinder.cc
index be7e675..627f449 100644
--- a/base/profiler/win32_stack_frame_unwinder.cc
+++ b/base/profiler/win32_stack_frame_unwinder.cc
@@ -8,6 +8,7 @@
 
 #include <utility>
 
+#include "base/check_op.h"
 #include "base/notreached.h"
 #include "build/build_config.h"
 
diff --git a/base/rand_util.cc b/base/rand_util.cc
index 8796e22..056adc2 100644
--- a/base/rand_util.cc
+++ b/base/rand_util.cc
@@ -13,6 +13,7 @@
 
 #include "base/check_op.h"
 #include "base/strings/string_util.h"
+#include "base/time/time.h"
 
 namespace base {
 
@@ -49,6 +50,23 @@
   return BitsToOpenEndedUnitIntervalF(base::RandUint64());
 }
 
+TimeDelta RandTimeDelta(TimeDelta start, TimeDelta limit) {
+  // We must have a finite, non-empty, non-reversed interval.
+  CHECK_LT(start, limit);
+  CHECK(!start.is_min());
+  CHECK(!limit.is_max());
+
+  const int64_t range = (limit - start).InMicroseconds();
+  // Because of the `CHECK_LT()` above, range > 0, so this cast is safe.
+  const uint64_t delta_us = base::RandGenerator(static_cast<uint64_t>(range));
+  // ...and because `range` fit in an `int64_t`, so will `delta_us`.
+  return start + Microseconds(static_cast<int64_t>(delta_us));
+}
+
+TimeDelta RandTimeDeltaUpTo(TimeDelta limit) {
+  return RandTimeDelta(TimeDelta(), limit);
+}
+
 double BitsToOpenEndedUnitInterval(uint64_t bits) {
   // We try to get maximum precision by masking out as many bits as will fit
   // in the target type's mantissa, and raising it to an appropriate power to
diff --git a/base/rand_util.h b/base/rand_util.h
index c9dd124..47e7917 100644
--- a/base/rand_util.h
+++ b/base/rand_util.h
@@ -26,6 +26,8 @@
 
 namespace base {
 
+class TimeDelta;
+
 namespace internal {
 
 #if BUILDFLAG(IS_ANDROID)
@@ -48,6 +50,9 @@
 BASE_EXPORT uint64_t RandUint64();
 
 // Returns a random number between min and max (inclusive). Thread-safe.
+//
+// TODO(crbug.com/1488681): Change from fully-closed to half-closed (i.e.
+// exclude `max`) to parallel other APIs here.
 BASE_EXPORT int RandInt(int min, int max);
 
 // Returns a random number in range [0, range).  Thread-safe.
@@ -59,6 +64,16 @@
 // Returns a random float in range [0, 1). Thread-safe.
 BASE_EXPORT float RandFloat();
 
+// Returns a random duration in [`start`, `limit`). Thread-safe.
+//
+// REQUIRES: `start` < `limit`
+BASE_EXPORT TimeDelta RandTimeDelta(TimeDelta start, TimeDelta limit);
+
+// Returns a random duration in [`TimeDelta()`, `limit`). Thread-safe.
+//
+// REQUIRES: `limit.is_positive()`
+BASE_EXPORT TimeDelta RandTimeDeltaUpTo(TimeDelta limit);
+
 // Given input |bits|, convert with maximum precision to a double in
 // the range [0, 1). Thread-safe.
 BASE_EXPORT double BitsToOpenEndedUnitInterval(uint64_t bits);
diff --git a/base/rand_util_unittest.cc b/base/rand_util_unittest.cc
index 4db69c4..03cd883 100644
--- a/base/rand_util_unittest.cc
+++ b/base/rand_util_unittest.cc
@@ -52,6 +52,33 @@
   EXPECT_LE(0.f, number);
 }
 
+TEST(RandUtilTest, RandTimeDelta) {
+  {
+    const auto delta =
+        base::RandTimeDelta(-base::Seconds(2), -base::Seconds(1));
+    EXPECT_GE(delta, -base::Seconds(2));
+    EXPECT_LT(delta, -base::Seconds(1));
+  }
+
+  {
+    const auto delta = base::RandTimeDelta(-base::Seconds(2), base::Seconds(2));
+    EXPECT_GE(delta, -base::Seconds(2));
+    EXPECT_LT(delta, base::Seconds(2));
+  }
+
+  {
+    const auto delta = base::RandTimeDelta(base::Seconds(1), base::Seconds(2));
+    EXPECT_GE(delta, base::Seconds(1));
+    EXPECT_LT(delta, base::Seconds(2));
+  }
+}
+
+TEST(RandUtilTest, RandTimeDeltaUpTo) {
+  const auto delta = base::RandTimeDeltaUpTo(base::Seconds(2));
+  EXPECT_FALSE(delta.is_negative());
+  EXPECT_LT(delta, base::Seconds(2));
+}
+
 TEST(RandUtilTest, BitsToOpenEndedUnitInterval) {
   // Force 64-bit precision, making sure we're not in an 80-bit FPU register.
   volatile double all_zeros = BitsToOpenEndedUnitInterval(0x0);
diff --git a/base/safe_numerics_unittest.cc b/base/safe_numerics_unittest.cc
index cb92cf3..daadf65 100644
--- a/base/safe_numerics_unittest.cc
+++ b/base/safe_numerics_unittest.cc
@@ -29,6 +29,7 @@
 #include "base/logging.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/numerics/safe_math.h"
+#include "base/numerics/wrapping_math.h"
 #include "base/test/gtest_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -71,36 +72,36 @@
 static_assert(BigEnoughPromotion<intmax_t, int8_t>::is_contained, "");
 static_assert(!BigEnoughPromotion<uintmax_t, int8_t>::is_contained, "");
 static_assert(
-    std::is_same<BigEnoughPromotion<int16_t, int8_t>::type, int16_t>::value,
+    std::is_same_v<BigEnoughPromotion<int16_t, int8_t>::type, int16_t>,
     "");
 static_assert(
-    std::is_same<BigEnoughPromotion<int32_t, uint32_t>::type, int64_t>::value,
+    std::is_same_v<BigEnoughPromotion<int32_t, uint32_t>::type, int64_t>,
     "");
 static_assert(
-    std::is_same<BigEnoughPromotion<intmax_t, int8_t>::type, intmax_t>::value,
+    std::is_same_v<BigEnoughPromotion<intmax_t, int8_t>::type, intmax_t>,
     "");
 static_assert(
-    std::is_same<BigEnoughPromotion<uintmax_t, int8_t>::type, uintmax_t>::value,
+    std::is_same_v<BigEnoughPromotion<uintmax_t, int8_t>::type, uintmax_t>,
     "");
 static_assert(BigEnoughPromotion<int16_t, int8_t>::is_contained, "");
 static_assert(BigEnoughPromotion<int32_t, uint32_t>::is_contained, "");
 static_assert(BigEnoughPromotion<intmax_t, int8_t>::is_contained, "");
 static_assert(!BigEnoughPromotion<uintmax_t, int8_t>::is_contained, "");
 static_assert(
-    std::is_same<FastIntegerArithmeticPromotion<int16_t, int8_t>::type,
-                 int32_t>::value,
+    std::is_same_v<FastIntegerArithmeticPromotion<int16_t, int8_t>::type,
+                   int32_t>,
     "");
 static_assert(
-    std::is_same<FastIntegerArithmeticPromotion<int32_t, uint32_t>::type,
-                 int64_t>::value,
+    std::is_same_v<FastIntegerArithmeticPromotion<int32_t, uint32_t>::type,
+                   int64_t>,
     "");
 static_assert(
-    std::is_same<FastIntegerArithmeticPromotion<intmax_t, int8_t>::type,
-                 intmax_t>::value,
+    std::is_same_v<FastIntegerArithmeticPromotion<intmax_t, int8_t>::type,
+                   intmax_t>,
     "");
 static_assert(
-    std::is_same<FastIntegerArithmeticPromotion<uintmax_t, int8_t>::type,
-                 uintmax_t>::value,
+    std::is_same_v<FastIntegerArithmeticPromotion<uintmax_t, int8_t>::type,
+                   uintmax_t>,
     "");
 static_assert(FastIntegerArithmeticPromotion<int16_t, int8_t>::is_contained,
               "");
@@ -255,9 +256,9 @@
 static void TestSpecializedArithmetic(
     const char* dst,
     int line,
-    typename std::enable_if<numeric_limits<Dst>::is_integer &&
-                                numeric_limits<Dst>::is_signed,
-                            int>::type = 0) {
+    std::enable_if_t<numeric_limits<Dst>::is_integer &&
+                         numeric_limits<Dst>::is_signed,
+                     int> = 0) {
   using DstLimits = SaturationDefaultLimits<Dst>;
   TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::lowest()));
   TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
@@ -460,9 +461,9 @@
 static void TestSpecializedArithmetic(
     const char* dst,
     int line,
-    typename std::enable_if<numeric_limits<Dst>::is_integer &&
-                                !numeric_limits<Dst>::is_signed,
-                            int>::type = 0) {
+    std::enable_if_t<numeric_limits<Dst>::is_integer &&
+                         !numeric_limits<Dst>::is_signed,
+                     int> = 0) {
   using DstLimits = SaturationDefaultLimits<Dst>;
   TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::lowest()));
   TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
@@ -618,7 +619,7 @@
 void TestSpecializedArithmetic(
     const char* dst,
     int line,
-    typename std::enable_if<numeric_limits<Dst>::is_iec559, int>::type = 0) {
+    std::enable_if_t<numeric_limits<Dst>::is_iec559, int> = 0) {
   using DstLimits = SaturationDefaultLimits<Dst>;
   TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::lowest()));
 
@@ -1082,7 +1083,7 @@
     } else if (numeric_limits<Src>::is_signed) {
       // This block reverses the Src to Dst relationship so we don't have to
       // complicate the test macros.
-      if (!std::is_same<Src, Dst>::value) {
+      if (!std::is_same_v<Src, Dst>) {
         TEST_EXPECTED_SUCCESS(CheckDiv(SrcLimits::lowest(), Dst(-1)));
       }
       TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
@@ -1419,12 +1420,11 @@
   return 0;
 }
 
-static_assert(
-    std::is_same<decltype(TestOverload(StrictNumeric<int>())), int>::value,
-    "");
-static_assert(std::is_same<decltype(TestOverload(StrictNumeric<size_t>())),
-                           size_t>::value,
+static_assert(std::is_same_v<decltype(TestOverload(StrictNumeric<int>())), int>,
               "");
+static_assert(
+    std::is_same_v<decltype(TestOverload(StrictNumeric<size_t>())), size_t>,
+    "");
 
 template <typename T>
 struct CastTest1 {
@@ -1543,12 +1543,11 @@
   auto int8_max = MakeCheckedNum(numeric_limits<int8_t>::max());
   auto double_max = MakeCheckedNum(numeric_limits<double>::max());
   static_assert(
-      std::is_same<int16_t,
-                   decltype(int8_min.ValueOrDie<int16_t>())::type>::value,
+      std::is_same_v<int16_t, decltype(int8_min.ValueOrDie<int16_t>())::type>,
       "ValueOrDie returning incorrect type.");
   static_assert(
-      std::is_same<int16_t,
-                   decltype(int8_min.ValueOrDefault<int16_t>(0))::type>::value,
+      std::is_same_v<int16_t,
+                     decltype(int8_min.ValueOrDefault<int16_t>(0))::type>,
       "ValueOrDefault returning incorrect type.");
   EXPECT_FALSE(IsValidForType<uint8_t>(int8_min));
   EXPECT_TRUE(IsValidForType<uint8_t>(int8_max));
@@ -1838,7 +1837,9 @@
   EXPECT_EQ(-100, ClampRound(-100.1f));
   EXPECT_EQ(-101, ClampRound(-100.5f));
   EXPECT_EQ(-101, ClampRound(-100.9f));
+  EXPECT_EQ(0, ClampRound(std::nextafter(-0.5f, 0.0f)));
   EXPECT_EQ(0, ClampRound(0.0f));
+  EXPECT_EQ(0, ClampRound(std::nextafter(0.5f, 0.0f)));
   EXPECT_EQ(100, ClampRound(100.1f));
   EXPECT_EQ(101, ClampRound(100.5f));
   EXPECT_EQ(101, ClampRound(100.9f));
@@ -1886,6 +1887,82 @@
   EXPECT_EQ(0, ClampRound<int64_t>(-kNaN));
 }
 
+template <typename T>
+void TestWrappingMathSigned() {
+  static_assert(std::is_signed_v<T>);
+  constexpr T kMinusTwo = -2;
+  constexpr T kMinusOne = -1;
+  constexpr T kZero = 0;
+  constexpr T kOne = 1;
+  constexpr T kTwo = 2;
+  constexpr T kThree = 3;
+  constexpr T kMax = std::numeric_limits<T>::max();
+  constexpr T kMin = std::numeric_limits<T>::min();
+
+  EXPECT_EQ(base::WrappingAdd(kOne, kTwo), kThree);
+  static_assert(base::WrappingAdd(kOne, kTwo) == kThree);
+  EXPECT_EQ(base::WrappingAdd(kMax, kOne), kMin);
+  static_assert(base::WrappingAdd(kMax, kOne) == kMin);
+  EXPECT_EQ(base::WrappingAdd(kMax, kTwo), kMin + 1);
+  static_assert(base::WrappingAdd(kMax, kTwo) == kMin + 1);
+  EXPECT_EQ(base::WrappingAdd(kMax, kMax), kMinusTwo);
+  static_assert(base::WrappingAdd(kMax, kMax) == kMinusTwo);
+  EXPECT_EQ(base::WrappingAdd(kMin, kMin), kZero);
+  static_assert(base::WrappingAdd(kMin, kMin) == kZero);
+
+  EXPECT_EQ(base::WrappingSub(kTwo, kOne), kOne);
+  static_assert(base::WrappingSub(kTwo, kOne) == kOne);
+  EXPECT_EQ(base::WrappingSub(kOne, kTwo), kMinusOne);
+  static_assert(base::WrappingSub(kOne, kTwo) == kMinusOne);
+  EXPECT_EQ(base::WrappingSub(kMin, kOne), kMax);
+  static_assert(base::WrappingSub(kMin, kOne) == kMax);
+  EXPECT_EQ(base::WrappingSub(kMin, kTwo), kMax - 1);
+  static_assert(base::WrappingSub(kMin, kTwo) == kMax - 1);
+  EXPECT_EQ(base::WrappingSub(kMax, kMin), kMinusOne);
+  static_assert(base::WrappingSub(kMax, kMin) == kMinusOne);
+  EXPECT_EQ(base::WrappingSub(kMin, kMax), kOne);
+  static_assert(base::WrappingSub(kMin, kMax) == kOne);
+}
+
+template <typename T>
+void TestWrappingMathUnsigned() {
+  static_assert(std::is_unsigned_v<T>);
+  constexpr T kZero = 0;
+  constexpr T kOne = 1;
+  constexpr T kTwo = 2;
+  constexpr T kThree = 3;
+  constexpr T kMax = std::numeric_limits<T>::max();
+
+  EXPECT_EQ(base::WrappingAdd(kOne, kTwo), kThree);
+  static_assert(base::WrappingAdd(kOne, kTwo) == kThree);
+  EXPECT_EQ(base::WrappingAdd(kMax, kOne), kZero);
+  static_assert(base::WrappingAdd(kMax, kOne) == kZero);
+  EXPECT_EQ(base::WrappingAdd(kMax, kTwo), kOne);
+  static_assert(base::WrappingAdd(kMax, kTwo) == kOne);
+  EXPECT_EQ(base::WrappingAdd(kMax, kMax), kMax - 1);
+  static_assert(base::WrappingAdd(kMax, kMax) == kMax - 1);
+
+  EXPECT_EQ(base::WrappingSub(kTwo, kOne), kOne);
+  static_assert(base::WrappingSub(kTwo, kOne) == kOne);
+  EXPECT_EQ(base::WrappingSub(kOne, kTwo), kMax);
+  static_assert(base::WrappingSub(kOne, kTwo) == kMax);
+  EXPECT_EQ(base::WrappingSub(kZero, kOne), kMax);
+  static_assert(base::WrappingSub(kZero, kOne) == kMax);
+  EXPECT_EQ(base::WrappingSub(kZero, kTwo), kMax - 1);
+  static_assert(base::WrappingSub(kZero, kTwo) == kMax - 1);
+}
+
+TEST(SafeNumerics, WrappingMath) {
+  TestWrappingMathSigned<int8_t>();
+  TestWrappingMathUnsigned<uint8_t>();
+  TestWrappingMathSigned<int16_t>();
+  TestWrappingMathUnsigned<uint16_t>();
+  TestWrappingMathSigned<int32_t>();
+  TestWrappingMathUnsigned<uint32_t>();
+  TestWrappingMathSigned<int64_t>();
+  TestWrappingMathUnsigned<uint64_t>();
+}
+
 #if defined(__clang__)
 #pragma clang diagnostic pop  // -Winteger-overflow
 #endif
diff --git a/base/sampling_heap_profiler/lock_free_address_hash_set_unittest.cc b/base/sampling_heap_profiler/lock_free_address_hash_set_unittest.cc
index a921945..bd281b8 100644
--- a/base/sampling_heap_profiler/lock_free_address_hash_set_unittest.cc
+++ b/base/sampling_heap_profiler/lock_free_address_hash_set_unittest.cc
@@ -9,7 +9,7 @@
 #include <cinttypes>
 #include <memory>
 
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
 #include "base/debug/alias.h"
 #include "base/memory/raw_ptr.h"
 #include "base/threading/simple_thread.h"
diff --git a/base/sampling_heap_profiler/poisson_allocation_sampler.cc b/base/sampling_heap_profiler/poisson_allocation_sampler.cc
index 9bff244..cd00c4b 100644
--- a/base/sampling_heap_profiler/poisson_allocation_sampler.cc
+++ b/base/sampling_heap_profiler/poisson_allocation_sampler.cc
@@ -65,7 +65,7 @@
   // Clang's implementation of thread_local.
   static base::NoDestructor<
       base::allocator::dispatcher::ThreadLocalStorage<ThreadLocalData>>
-      thread_local_data;
+      thread_local_data("poisson_allocation_sampler");
   return thread_local_data->GetThreadLocalData();
 #else
   // Notes on TLS usage:
diff --git a/base/sampling_heap_profiler/sampling_heap_profiler.cc b/base/sampling_heap_profiler/sampling_heap_profiler.cc
index 0b531aa..324378e 100644
--- a/base/sampling_heap_profiler/sampling_heap_profiler.cc
+++ b/base/sampling_heap_profiler/sampling_heap_profiler.cc
@@ -8,8 +8,9 @@
 #include <cmath>
 #include <utility>
 
-#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
+#include "base/allocator/dispatcher/tls.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
 #include "base/compiler_specific.h"
 #include "base/debug/stack_trace.h"
 #include "base/feature_list.h"
@@ -38,6 +39,22 @@
 
 namespace {
 
+struct ThreadLocalData {
+  const char* thread_name = nullptr;
+};
+
+ThreadLocalData* GetThreadLocalData() {
+#if USE_LOCAL_TLS_EMULATION()
+  static base::NoDestructor<
+      base::allocator::dispatcher::ThreadLocalStorage<ThreadLocalData>>
+      thread_local_data("sampling_heap_profiler");
+  return thread_local_data->GetThreadLocalData();
+#else
+  static thread_local ThreadLocalData thread_local_data;
+  return &thread_local_data;
+#endif
+}
+
 #if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
 BASE_FEATURE(kAvoidFramePointers,
              "AndroidHeapSamplerAvoidFramePointers",
@@ -58,7 +75,7 @@
     return thread_name;
 
   // prctl requires 16 bytes, snprintf requires 19, pthread_getname_np requires
-  // 64 on macOS, see PlatformThread::SetName in platform_thread_mac.mm.
+  // 64 on macOS, see PlatformThread::SetName in platform_thread_apple.mm.
   constexpr size_t kBufferLen = 64;
   char name[kBufferLen];
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
@@ -82,19 +99,19 @@
 }
 
 const char* UpdateAndGetThreadName(const char* name) {
-  static thread_local const char* thread_name;
+  ThreadLocalData* const thread_local_data = GetThreadLocalData();
   if (name)
-    thread_name = name;
-  if (!thread_name)
-    thread_name = GetAndLeakThreadName();
-  return thread_name;
+    thread_local_data->thread_name = name;
+  if (!thread_local_data->thread_name) {
+    thread_local_data->thread_name = GetAndLeakThreadName();
+  }
+  return thread_local_data->thread_name;
 }
 
 // Checks whether unwinding from this function works.
 [[maybe_unused]] StackUnwinder CheckForDefaultUnwindTables() {
-  void* stack[kMaxStackEntries];
-  size_t frame_count = base::debug::CollectStackTrace(const_cast<void**>(stack),
-                                                      kMaxStackEntries);
+  const void* stack[kMaxStackEntries];
+  size_t frame_count = base::debug::CollectStackTrace(stack, kMaxStackEntries);
   // First frame is the current function and can be found without unwind tables.
   return frame_count > 1 ? StackUnwinder::kDefault
                          : StackUnwinder::kUnavailable;
@@ -186,9 +203,9 @@
   return UpdateAndGetThreadName(nullptr);
 }
 
-void** SamplingHeapProfiler::CaptureStackTrace(void** frames,
-                                               size_t max_entries,
-                                               size_t* count) {
+const void** SamplingHeapProfiler::CaptureStackTrace(const void** frames,
+                                                     size_t max_entries,
+                                                     size_t* count) {
   // Skip top frames as they correspond to the profiler itself.
   size_t skip_frames = 3;
   size_t frame_count = 0;
@@ -250,10 +267,10 @@
 
 void SamplingHeapProfiler::CaptureNativeStack(const char* context,
                                               Sample* sample) {
-  void* stack[kMaxStackEntries];
+  const void* stack[kMaxStackEntries];
   size_t frame_count;
   // One frame is reserved for the thread name.
-  void** first_frame =
+  const void** first_frame =
       CaptureStackTrace(stack, kMaxStackEntries - 1, &frame_count);
   DCHECK_LT(frame_count, kMaxStackEntries);
   sample->stack.assign(first_frame, first_frame + frame_count);
@@ -305,6 +322,7 @@
 
 // static
 void SamplingHeapProfiler::Init() {
+  GetThreadLocalData();
   PoissonAllocationSampler::Init();
 }
 
diff --git a/base/sampling_heap_profiler/sampling_heap_profiler.h b/base/sampling_heap_profiler/sampling_heap_profiler.h
index d0931a0..e273a01 100644
--- a/base/sampling_heap_profiler/sampling_heap_profiler.h
+++ b/base/sampling_heap_profiler/sampling_heap_profiler.h
@@ -47,7 +47,7 @@
     // Name of the thread that made the sampled allocation.
     const char* thread_name = nullptr;
     // Call stack of PC addresses responsible for the allocation.
-    std::vector<void*> stack;
+    std::vector<const void*> stack;
 
     // Public for testing.
     Sample(size_t size, size_t total, uint32_t ordinal);
@@ -100,7 +100,9 @@
   // Captures up to |max_entries| stack frames using the buffer pointed by
   // |frames|. Puts the number of captured frames into the |count| output
   // parameters. Returns the pointer to the topmost frame.
-  void** CaptureStackTrace(void** frames, size_t max_entries, size_t* count);
+  const void** CaptureStackTrace(const void** frames,
+                                 size_t max_entries,
+                                 size_t* count);
 
   static void Init();
   static SamplingHeapProfiler* Get();
diff --git a/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc b/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc
index 4c84d0a..dc006f4 100644
--- a/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc
+++ b/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc
@@ -8,7 +8,7 @@
 #include <cinttypes>
 
 #include "base/allocator/dispatcher/dispatcher.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
 #include "base/debug/alias.h"
 #include "base/memory/raw_ptr.h"
 #include "base/rand_util.h"
diff --git a/base/scoped_generic.h b/base/scoped_generic.h
index a347a95..4b034a18 100644
--- a/base/scoped_generic.h
+++ b/base/scoped_generic.h
@@ -262,7 +262,7 @@
 
   template <typename Void = void>
   typename std::enable_if_t<
-      std::is_base_of<ScopedGenericOwnershipTracking, Traits>::value,
+      std::is_base_of_v<ScopedGenericOwnershipTracking, Traits>,
       Void>
   TrackAcquire(const T& value) {
     if (value != traits_type::InvalidValue()) {
@@ -272,13 +272,13 @@
 
   template <typename Void = void>
   typename std::enable_if_t<
-      !std::is_base_of<ScopedGenericOwnershipTracking, Traits>::value,
+      !std::is_base_of_v<ScopedGenericOwnershipTracking, Traits>,
       Void>
   TrackAcquire(const T& value) {}
 
   template <typename Void = void>
   typename std::enable_if_t<
-      std::is_base_of<ScopedGenericOwnershipTracking, Traits>::value,
+      std::is_base_of_v<ScopedGenericOwnershipTracking, Traits>,
       Void>
   TrackRelease(const T& value) {
     if (value != traits_type::InvalidValue()) {
@@ -288,7 +288,7 @@
 
   template <typename Void = void>
   typename std::enable_if_t<
-      !std::is_base_of<ScopedGenericOwnershipTracking, Traits>::value,
+      !std::is_base_of_v<ScopedGenericOwnershipTracking, Traits>,
       Void>
   TrackRelease(const T& value) {}
 
diff --git a/base/scoped_observation.h b/base/scoped_observation.h
index 3cd6882..d5faef3 100644
--- a/base/scoped_observation.h
+++ b/base/scoped_observation.h
@@ -5,7 +5,7 @@
 #ifndef BASE_SCOPED_OBSERVATION_H_
 #define BASE_SCOPED_OBSERVATION_H_
 
-#include <stddef.h>
+#include <utility>
 
 #include "base/check.h"
 #include "base/check_op.h"
@@ -112,8 +112,7 @@
   // if currently observing. Does nothing otherwise.
   void Reset() {
     if (source_) {
-      Traits::RemoveObserver(source_, observer_);
-      source_ = nullptr;
+      Traits::RemoveObserver(std::exchange(source_, nullptr), observer_);
     }
   }
 
@@ -126,6 +125,11 @@
     return source_ == source;
   }
 
+  // Gets a pointer to the observed source, or nullptr if no source is being
+  // observed.
+  Source* GetSource() { return source_; }
+  const Source* GetSource() const { return source_; }
+
  private:
   using Traits = ScopedObservationTraits<Source, Observer>;
 
diff --git a/base/scoped_observation_unittest.cc b/base/scoped_observation_unittest.cc
index 1ec7604..ef90b69 100644
--- a/base/scoped_observation_unittest.cc
+++ b/base/scoped_observation_unittest.cc
@@ -7,7 +7,6 @@
 #include "base/containers/contains.h"
 #include "base/ranges/algorithm.h"
 #include "base/scoped_observation_traits.h"
-#include "base/test/gtest_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -54,12 +53,18 @@
   {
     TestSourceObserver o1;
     TestScopedObservation obs(&o1);
+    const TestScopedObservation& cobs = obs;
     EXPECT_EQ(0u, s1.num_observers());
     EXPECT_FALSE(s1.HasObserver(&o1));
+    EXPECT_EQ(obs.GetSource(), nullptr);
+    EXPECT_EQ(cobs.GetSource(), nullptr);
 
     obs.Observe(&s1);
     EXPECT_EQ(1u, s1.num_observers());
     EXPECT_TRUE(s1.HasObserver(&o1));
+    TestSource* const got_source = obs.GetSource();
+    EXPECT_EQ(got_source, &s1);
+    EXPECT_EQ(cobs.GetSource(), &s1);
   }
 
   // Test that the observation is removed when it goes out of scope.
@@ -70,32 +75,50 @@
   TestSource s1;
   TestSourceObserver o1;
   TestScopedObservation obs(&o1);
+  const TestScopedObservation& cobs = obs;
   EXPECT_EQ(0u, s1.num_observers());
+  EXPECT_EQ(obs.GetSource(), nullptr);
+  EXPECT_EQ(cobs.GetSource(), nullptr);
   obs.Reset();
+  EXPECT_EQ(obs.GetSource(), nullptr);
+  EXPECT_EQ(cobs.GetSource(), nullptr);
 
   obs.Observe(&s1);
   EXPECT_EQ(1u, s1.num_observers());
   EXPECT_TRUE(s1.HasObserver(&o1));
+  EXPECT_EQ(obs.GetSource(), &s1);
+  EXPECT_EQ(cobs.GetSource(), &s1);
 
   obs.Reset();
   EXPECT_EQ(0u, s1.num_observers());
+  EXPECT_EQ(obs.GetSource(), nullptr);
+  EXPECT_EQ(cobs.GetSource(), nullptr);
 
   // Safe to call with no observation.
   obs.Reset();
   EXPECT_EQ(0u, s1.num_observers());
+  EXPECT_EQ(obs.GetSource(), nullptr);
+  EXPECT_EQ(cobs.GetSource(), nullptr);
 }
 
 TEST(ScopedObservationTest, IsObserving) {
   TestSource s1;
   TestSourceObserver o1;
   TestScopedObservation obs(&o1);
-  EXPECT_FALSE(obs.IsObserving());
+  const TestScopedObservation& cobs = obs;
+  EXPECT_FALSE(cobs.IsObserving());
+  EXPECT_EQ(obs.GetSource(), nullptr);
+  EXPECT_EQ(cobs.GetSource(), nullptr);
 
   obs.Observe(&s1);
-  EXPECT_TRUE(obs.IsObserving());
+  EXPECT_TRUE(cobs.IsObserving());
+  EXPECT_EQ(obs.GetSource(), &s1);
+  EXPECT_EQ(cobs.GetSource(), &s1);
 
   obs.Reset();
-  EXPECT_FALSE(obs.IsObserving());
+  EXPECT_FALSE(cobs.IsObserving());
+  EXPECT_EQ(obs.GetSource(), nullptr);
+  EXPECT_EQ(cobs.GetSource(), nullptr);
 }
 
 TEST(ScopedObservationTest, IsObservingSource) {
@@ -103,16 +126,23 @@
   TestSource s2;
   TestSourceObserver o1;
   TestScopedObservation obs(&o1);
-  EXPECT_FALSE(obs.IsObservingSource(&s1));
-  EXPECT_FALSE(obs.IsObservingSource(&s2));
+  const TestScopedObservation& cobs = obs;
+  EXPECT_FALSE(cobs.IsObservingSource(&s1));
+  EXPECT_FALSE(cobs.IsObservingSource(&s2));
+  EXPECT_EQ(obs.GetSource(), nullptr);
+  EXPECT_EQ(cobs.GetSource(), nullptr);
 
   obs.Observe(&s1);
-  EXPECT_TRUE(obs.IsObservingSource(&s1));
-  EXPECT_FALSE(obs.IsObservingSource(&s2));
+  EXPECT_TRUE(cobs.IsObservingSource(&s1));
+  EXPECT_FALSE(cobs.IsObservingSource(&s2));
+  EXPECT_EQ(obs.GetSource(), &s1);
+  EXPECT_EQ(cobs.GetSource(), &s1);
 
   obs.Reset();
-  EXPECT_FALSE(obs.IsObservingSource(&s1));
-  EXPECT_FALSE(obs.IsObservingSource(&s2));
+  EXPECT_FALSE(cobs.IsObservingSource(&s1));
+  EXPECT_FALSE(cobs.IsObservingSource(&s2));
+  EXPECT_EQ(obs.GetSource(), nullptr);
+  EXPECT_EQ(cobs.GetSource(), nullptr);
 }
 
 namespace {
diff --git a/base/security_unittest.cc b/base/security_unittest.cc
index 04ca69f..bdb8180 100644
--- a/base/security_unittest.cc
+++ b/base/security_unittest.cc
@@ -14,7 +14,7 @@
 #include <limits>
 #include <memory>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/files/file_util.h"
 #include "base/memory/free_deleter.h"
 #include "base/sanitizer_buildflags.h"
diff --git a/base/std_clamp_unittest.cc b/base/std_clamp_unittest.cc
index 1340ff3..603378e 100644
--- a/base/std_clamp_unittest.cc
+++ b/base/std_clamp_unittest.cc
@@ -21,6 +21,7 @@
   int some_other_int;
 };
 
+// Verify libc++ hardening terminates instead of UB with invalid clamp args.
 TEST(ClampTest, Death) {
   EXPECT_DEATH_IF_SUPPORTED(std::ignore = std::clamp(3, 10, 0), "");
   EXPECT_DEATH_IF_SUPPORTED(std::ignore = std::clamp(3.0, 10.0, 0.0), "");
diff --git a/base/stl_util.h b/base/stl_util.h
index 46dfde1..7fdac12 100644
--- a/base/stl_util.h
+++ b/base/stl_util.h
@@ -21,8 +21,8 @@
 
 template <typename Iter>
 constexpr bool IsRandomAccessIter =
-    std::is_same<typename std::iterator_traits<Iter>::iterator_category,
-                 std::random_access_iterator_tag>::value;
+    std::is_same_v<typename std::iterator_traits<Iter>::iterator_category,
+                   std::random_access_iterator_tag>;
 
 }  // namespace internal
 
diff --git a/base/stl_util_unittest.cc b/base/stl_util_unittest.cc
index 305865a..0c99567 100644
--- a/base/stl_util_unittest.cc
+++ b/base/stl_util_unittest.cc
@@ -31,8 +31,8 @@
 TEST(STLUtilTest, GetUnderlyingContainer) {
   {
     std::queue<int> queue({1, 2, 3, 4, 5});
-    static_assert(std::is_same<decltype(GetUnderlyingContainer(queue)),
-                               const std::deque<int>&>::value,
+    static_assert(std::is_same_v<decltype(GetUnderlyingContainer(queue)),
+                                 const std::deque<int>&>,
                   "GetUnderlyingContainer(queue) should be of type deque");
     EXPECT_THAT(GetUnderlyingContainer(queue),
                 testing::ElementsAre(1, 2, 3, 4, 5));
@@ -46,8 +46,8 @@
   {
     base::queue<int> queue({1, 2, 3, 4, 5});
     static_assert(
-        std::is_same<decltype(GetUnderlyingContainer(queue)),
-                     const base::circular_deque<int>&>::value,
+        std::is_same_v<decltype(GetUnderlyingContainer(queue)),
+                       const base::circular_deque<int>&>,
         "GetUnderlyingContainer(queue) should be of type circular_deque");
     EXPECT_THAT(GetUnderlyingContainer(queue),
                 testing::ElementsAre(1, 2, 3, 4, 5));
@@ -56,8 +56,8 @@
   {
     std::vector<int> values = {1, 2, 3, 4, 5};
     std::priority_queue<int> queue(values.begin(), values.end());
-    static_assert(std::is_same<decltype(GetUnderlyingContainer(queue)),
-                               const std::vector<int>&>::value,
+    static_assert(std::is_same_v<decltype(GetUnderlyingContainer(queue)),
+                                 const std::vector<int>&>,
                   "GetUnderlyingContainer(queue) should be of type vector");
     EXPECT_THAT(GetUnderlyingContainer(queue),
                 testing::UnorderedElementsAre(1, 2, 3, 4, 5));
@@ -65,8 +65,8 @@
 
   {
     std::stack<int> stack({1, 2, 3, 4, 5});
-    static_assert(std::is_same<decltype(GetUnderlyingContainer(stack)),
-                               const std::deque<int>&>::value,
+    static_assert(std::is_same_v<decltype(GetUnderlyingContainer(stack)),
+                                 const std::deque<int>&>,
                   "GetUnderlyingContainer(stack) should be of type deque");
     EXPECT_THAT(GetUnderlyingContainer(stack),
                 testing::ElementsAre(1, 2, 3, 4, 5));
diff --git a/base/strings/escape.cc b/base/strings/escape.cc
index d03da4c..1e61d69 100644
--- a/base/strings/escape.cc
+++ b/base/strings/escape.cc
@@ -9,6 +9,7 @@
 #include "base/check_op.h"
 #include "base/feature_list.h"
 #include "base/features.h"
+#include "base/strings/string_number_conversions.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/string_util.h"
 #include "base/strings/utf_string_conversion_utils.h"
@@ -19,13 +20,6 @@
 
 namespace {
 
-const char kHexString[] = "0123456789ABCDEF";
-inline char IntToHex(int i) {
-  DCHECK_GE(i, 0) << i << " not a hex value";
-  DCHECK_LE(i, 15) << i << " not a hex value";
-  return kHexString[i];
-}
-
 // A fast bit-vector map for ascii characters.
 //
 // Internally stores 256 bits in an array of 8 ints.
@@ -58,8 +52,7 @@
       escaped.push_back('%');
     } else if (charmap.Contains(c)) {
       escaped.push_back('%');
-      escaped.push_back(IntToHex(c >> 4));
-      escaped.push_back(IntToHex(c & 0xf));
+      AppendHexEncodedByte(c, escaped);
     } else {
       escaped.push_back(static_cast<char>(c));
     }
diff --git a/base/strings/safe_sprintf_unittest.cc b/base/strings/safe_sprintf_unittest.cc
index 9b94a99..e0482ba 100644
--- a/base/strings/safe_sprintf_unittest.cc
+++ b/base/strings/safe_sprintf_unittest.cc
@@ -12,7 +12,7 @@
 #include <limits>
 #include <memory>
 
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
 #include "base/check_op.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
diff --git a/base/strings/string_number_conversions.cc b/base/strings/string_number_conversions.cc
index cde0c65..735feda 100644
--- a/base/strings/string_number_conversions.cc
+++ b/base/strings/string_number_conversions.cc
@@ -120,21 +120,18 @@
 }
 
 std::string HexEncode(const void* bytes, size_t size) {
-  static const char kHexChars[] = "0123456789ABCDEF";
-
-  // Each input byte creates two output hex characters.
-  std::string ret(size * 2, '\0');
-
-  for (size_t i = 0; i < size; ++i) {
-    char b = reinterpret_cast<const char*>(bytes)[i];
-    ret[(i * 2)] = kHexChars[(b >> 4) & 0xf];
-    ret[(i * 2) + 1] = kHexChars[b & 0xf];
-  }
-  return ret;
+  return HexEncode(span(reinterpret_cast<const uint8_t*>(bytes), size));
 }
 
-std::string HexEncode(base::span<const uint8_t> bytes) {
-  return HexEncode(bytes.data(), bytes.size());
+std::string HexEncode(span<const uint8_t> bytes) {
+  // Each input byte creates two output hex characters.
+  std::string ret;
+  ret.reserve(bytes.size() * 2);
+
+  for (uint8_t byte : bytes) {
+    AppendHexEncodedByte(byte, ret);
+  }
+  return ret;
 }
 
 bool HexStringToInt(StringPiece input, int* output) {
@@ -165,7 +162,7 @@
                                                   std::back_inserter(*output));
 }
 
-bool HexStringToSpan(StringPiece input, base::span<uint8_t> output) {
+bool HexStringToSpan(StringPiece input, span<uint8_t> output) {
   if (input.size() / 2 != output.size())
     return false;
 
diff --git a/base/strings/string_number_conversions.h b/base/strings/string_number_conversions.h
index f8a3bd8..281d250 100644
--- a/base/strings/string_number_conversions.h
+++ b/base/strings/string_number_conversions.h
@@ -107,6 +107,22 @@
 BASE_EXPORT std::string HexEncode(const void* bytes, size_t size);
 BASE_EXPORT std::string HexEncode(base::span<const uint8_t> bytes);
 
+// Appends a hex representation of `byte`, as two uppercase (by default)
+// characters, to `output`. This is a useful primitive in larger conversion
+// routines.
+inline void AppendHexEncodedByte(uint8_t byte,
+                                 std::string& output,
+                                 bool uppercase = true) {
+  static constexpr char kHexCharsUpper[] = {'0', '1', '2', '3', '4', '5',
+                                            '6', '7', '8', '9', 'A', 'B',
+                                            'C', 'D', 'E', 'F'};
+  static constexpr char kHexCharsLower[] = {'0', '1', '2', '3', '4', '5',
+                                            '6', '7', '8', '9', 'a', 'b',
+                                            'c', 'd', 'e', 'f'};
+  const char* const hex_chars = uppercase ? kHexCharsUpper : kHexCharsLower;
+  output.append({hex_chars[byte >> 4], hex_chars[byte & 0xf]});
+}
+
 // Best effort conversion, see StringToInt above for restrictions.
 // Will only successful parse hex values that will fit into |output|, i.e.
 // -0x80000000 < |input| < 0x7FFFFFFF.
diff --git a/base/strings/string_number_conversions_unittest.cc b/base/strings/string_number_conversions_unittest.cc
index 7834139..c0e8806 100644
--- a/base/strings/string_number_conversions_unittest.cc
+++ b/base/strings/string_number_conversions_unittest.cc
@@ -919,12 +919,29 @@
   EXPECT_EQ("1.33489033216e+12", NumberToString(input));
 }
 
+TEST(StringNumberConversionsTest, AppendHexEncodedByte) {
+  std::string hex;
+  AppendHexEncodedByte(0, hex);
+  AppendHexEncodedByte(0, hex, false);
+  AppendHexEncodedByte(1, hex);
+  AppendHexEncodedByte(1, hex, false);
+  AppendHexEncodedByte(0xf, hex);
+  AppendHexEncodedByte(0xf, hex, false);
+  AppendHexEncodedByte(0x8a, hex);
+  AppendHexEncodedByte(0x8a, hex, false);
+  AppendHexEncodedByte(0xe0, hex);
+  AppendHexEncodedByte(0xe0, hex, false);
+  AppendHexEncodedByte(0xff, hex);
+  AppendHexEncodedByte(0xff, hex, false);
+  EXPECT_EQ(hex, "000001010F0f8A8aE0e0FFff");
+}
+
 TEST(StringNumberConversionsTest, HexEncode) {
   std::string hex(HexEncode(nullptr, 0));
   EXPECT_EQ(hex.length(), 0U);
   unsigned char bytes[] = {0x01, 0xff, 0x02, 0xfe, 0x03, 0x80, 0x81};
   hex = HexEncode(bytes, sizeof(bytes));
-  EXPECT_EQ(hex.compare("01FF02FE038081"), 0);
+  EXPECT_EQ(hex, "01FF02FE038081");
 }
 
 // Test cases of known-bad strtod conversions that motivated the use of dmg_fp.
diff --git a/base/strings/string_piece.h b/base/strings/string_piece.h
index 2969ab7..6a8cd34 100644
--- a/base/strings/string_piece.h
+++ b/base/strings/string_piece.h
@@ -10,27 +10,10 @@
 #ifndef BASE_STRINGS_STRING_PIECE_H_
 #define BASE_STRINGS_STRING_PIECE_H_
 
-#include <functional>
-
 // Many files including this header rely on these being included due to IWYU
 // violations. Preserve the includes for now. As code is migrated away from this
 // header, we can incrementally fix the IWYU violations.
-#include "base/base_export.h"
 #include "base/check.h"
-#include "base/check_op.h"
-#include "base/compiler_specific.h"
-#include "base/cxx20_is_constant_evaluated.h"
 #include "base/strings/string_piece_forward.h"
-#include "base/strings/utf_ostream_operators.h"
-#include "build/build_config.h"
-
-namespace base {
-
-// Historically, `std::hash` did not support `base::StringPiece`. Now
-// `base::StringPiece` is `std::string_view`, so this is no longer necessary.
-// Replace uses of this type with the default hasher.
-using StringPieceHash = std::hash<StringPiece>;
-
-}  // namespace base
 
 #endif  // BASE_STRINGS_STRING_PIECE_H_
diff --git a/base/strings/string_piece_unittest.cc b/base/strings/string_piece_unittest.cc
index 21c298b..7685950 100644
--- a/base/strings/string_piece_unittest.cc
+++ b/base/strings/string_piece_unittest.cc
@@ -684,6 +684,14 @@
   }
 }
 
+// Chromium development assumes StringPiece (which is std::string_view) is
+// implemented with an STL that enables hardening checks. We treat bugs that
+// trigger one of these conditions as functional rather than security bugs. If
+// this test fails on some embedder, it should not be disabled. Instead, the
+// embedder should fix their STL or build configuration to enable corresponding
+// hardening checks.
+//
+// See https://chromium.googlesource.com/chromium/src/+/main/docs/security/faq.md#indexing-a-container-out-of-bounds-hits-a-libcpp_verbose_abort_is-this-a-security-bug
 TEST(StringPieceTest, OutOfBoundsDeath) {
   {
     constexpr StringPiece piece;
diff --git a/base/strings/string_util.h b/base/strings/string_util.h
index 1279ea5..bee110b 100644
--- a/base/strings/string_util.h
+++ b/base/strings/string_util.h
@@ -115,7 +115,7 @@
 // ASCII-specific tolower.  The standard library's tolower is locale sensitive,
 // so we don't want to use it here.
 template <typename CharT,
-          typename = std::enable_if_t<std::is_integral<CharT>::value>>
+          typename = std::enable_if_t<std::is_integral_v<CharT>>>
 constexpr CharT ToLowerASCII(CharT c) {
   return internal::ToLowerASCII(c);
 }
@@ -123,7 +123,7 @@
 // ASCII-specific toupper.  The standard library's toupper is locale sensitive,
 // so we don't want to use it here.
 template <typename CharT,
-          typename = std::enable_if_t<std::is_integral<CharT>::value>>
+          typename = std::enable_if_t<std::is_integral_v<CharT>>>
 CharT ToUpperASCII(CharT c) {
   return (c >= 'a' && c <= 'z') ? static_cast<CharT>(c + 'A' - 'a') : c;
 }
diff --git a/base/strings/string_util_impl_helpers.h b/base/strings/string_util_impl_helpers.h
index 9741ef9..da4148a 100644
--- a/base/strings/string_util_impl_helpers.h
+++ b/base/strings/string_util_impl_helpers.h
@@ -519,7 +519,7 @@
     const bool is_strict_mode,
     std::vector<size_t>* offsets) {
   size_t substitutions = subst.size();
-  DCHECK_LT(substitutions, 11U);
+  DCHECK_LT(substitutions, 10U);
 
   size_t sub_length = 0;
   for (const auto& cur : subst) {
diff --git a/base/strings/string_util_internal.h b/base/strings/string_util_internal.h
index 978088f..25dbc41 100644
--- a/base/strings/string_util_internal.h
+++ b/base/strings/string_util_internal.h
@@ -15,7 +15,7 @@
 // ASCII-specific tolower.  The standard library's tolower is locale sensitive,
 // so we don't want to use it here.
 template <typename CharT,
-          typename = std::enable_if_t<std::is_integral<CharT>::value>>
+          typename = std::enable_if_t<std::is_integral_v<CharT>>>
 constexpr CharT ToLowerASCII(CharT c) {
   return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
 }
diff --git a/base/strings/string_util_unittest.cc b/base/strings/string_util_unittest.cc
index 4bb22e5..b4c797f 100644
--- a/base/strings/string_util_unittest.cc
+++ b/base/strings/string_util_unittest.cc
@@ -355,59 +355,53 @@
 TEST(StringUtilTest, as_wcstr) {
   char16_t rw_buffer[10] = {};
   static_assert(
-      std::is_same<wchar_t*, decltype(as_writable_wcstr(rw_buffer))>::value,
-      "");
+      std::is_same_v<wchar_t*, decltype(as_writable_wcstr(rw_buffer))>, "");
   EXPECT_EQ(static_cast<void*>(rw_buffer), as_writable_wcstr(rw_buffer));
 
   std::u16string rw_str(10, '\0');
-  static_assert(
-      std::is_same<wchar_t*, decltype(as_writable_wcstr(rw_str))>::value, "");
+  static_assert(std::is_same_v<wchar_t*, decltype(as_writable_wcstr(rw_str))>,
+                "");
   EXPECT_EQ(static_cast<const void*>(rw_str.data()), as_writable_wcstr(rw_str));
 
   const char16_t ro_buffer[10] = {};
-  static_assert(
-      std::is_same<const wchar_t*, decltype(as_wcstr(ro_buffer))>::value, "");
+  static_assert(std::is_same_v<const wchar_t*, decltype(as_wcstr(ro_buffer))>,
+                "");
   EXPECT_EQ(static_cast<const void*>(ro_buffer), as_wcstr(ro_buffer));
 
   const std::u16string ro_str(10, '\0');
-  static_assert(std::is_same<const wchar_t*, decltype(as_wcstr(ro_str))>::value,
-                "");
+  static_assert(std::is_same_v<const wchar_t*, decltype(as_wcstr(ro_str))>, "");
   EXPECT_EQ(static_cast<const void*>(ro_str.data()), as_wcstr(ro_str));
 
   StringPiece16 piece = ro_buffer;
-  static_assert(std::is_same<const wchar_t*, decltype(as_wcstr(piece))>::value,
-                "");
+  static_assert(std::is_same_v<const wchar_t*, decltype(as_wcstr(piece))>, "");
   EXPECT_EQ(static_cast<const void*>(piece.data()), as_wcstr(piece));
 }
 
 TEST(StringUtilTest, as_u16cstr) {
   wchar_t rw_buffer[10] = {};
   static_assert(
-      std::is_same<char16_t*, decltype(as_writable_u16cstr(rw_buffer))>::value,
-      "");
+      std::is_same_v<char16_t*, decltype(as_writable_u16cstr(rw_buffer))>, "");
   EXPECT_EQ(static_cast<void*>(rw_buffer), as_writable_u16cstr(rw_buffer));
 
   std::wstring rw_str(10, '\0');
   static_assert(
-      std::is_same<char16_t*, decltype(as_writable_u16cstr(rw_str))>::value,
-      "");
+      std::is_same_v<char16_t*, decltype(as_writable_u16cstr(rw_str))>, "");
   EXPECT_EQ(static_cast<const void*>(rw_str.data()),
             as_writable_u16cstr(rw_str));
 
   const wchar_t ro_buffer[10] = {};
   static_assert(
-      std::is_same<const char16_t*, decltype(as_u16cstr(ro_buffer))>::value,
-      "");
+      std::is_same_v<const char16_t*, decltype(as_u16cstr(ro_buffer))>, "");
   EXPECT_EQ(static_cast<const void*>(ro_buffer), as_u16cstr(ro_buffer));
 
   const std::wstring ro_str(10, '\0');
-  static_assert(
-      std::is_same<const char16_t*, decltype(as_u16cstr(ro_str))>::value, "");
+  static_assert(std::is_same_v<const char16_t*, decltype(as_u16cstr(ro_str))>,
+                "");
   EXPECT_EQ(static_cast<const void*>(ro_str.data()), as_u16cstr(ro_str));
 
   WStringPiece piece = ro_buffer;
-  static_assert(
-      std::is_same<const char16_t*, decltype(as_u16cstr(piece))>::value, "");
+  static_assert(std::is_same_v<const char16_t*, decltype(as_u16cstr(piece))>,
+                "");
   EXPECT_EQ(static_cast<const void*>(piece.data()), as_u16cstr(piece));
 }
 #endif  // defined(WCHAR_T_IS_UTF16)
diff --git a/base/strings/stringprintf.cc b/base/strings/stringprintf.cc
index 4ac965a..4cffac4 100644
--- a/base/strings/stringprintf.cc
+++ b/base/strings/stringprintf.cc
@@ -16,53 +16,39 @@
 
 namespace base {
 
-namespace {
-
-// Overloaded wrappers around vsnprintf and vswprintf. The buf_size parameter
-// is the size of the buffer. These return the number of characters in the
-// formatted string excluding the NUL terminator. If the buffer is not
-// large enough to accommodate the formatted string without truncation, they
-// return the number of characters that would be in the fully-formatted string
-// (vsnprintf, and vswprintf on Windows), or -1 (vswprintf on POSIX platforms).
-inline int vsnprintfT(char* buffer,
-                      size_t buf_size,
-                      const char* format,
-                      va_list argptr) {
-  return base::vsnprintf(buffer, buf_size, format, argptr);
+std::string StringPrintf(const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  std::string result;
+  StringAppendV(&result, format, ap);
+  va_end(ap);
+  return result;
 }
 
-#if BUILDFLAG(IS_WIN)
-inline int vsnprintfT(wchar_t* buffer,
-                      size_t buf_size,
-                      const wchar_t* format,
-                      va_list argptr) {
-  return base::vswprintf(buffer, buf_size, format, argptr);
+std::string StringPrintV(const char* format, va_list ap) {
+  std::string result;
+  StringAppendV(&result, format, ap);
+  return result;
 }
-inline int vsnprintfT(char16_t* buffer,
-                      size_t buf_size,
-                      const char16_t* format,
-                      va_list argptr) {
-  return base::vswprintf(reinterpret_cast<wchar_t*>(buffer), buf_size,
-                         reinterpret_cast<const wchar_t*>(format), argptr);
-}
-#endif
 
-// Templatized backend for StringPrintF/StringAppendF. This does not finalize
-// the va_list, the caller is expected to do that.
-template <class CharT>
-static void StringAppendVT(std::basic_string<CharT>* dst,
-                           const CharT* format,
-                           va_list ap) {
+void StringAppendF(std::string* dst, const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  StringAppendV(dst, format, ap);
+  va_end(ap);
+}
+
+void StringAppendV(std::string* dst, const char* format, va_list ap) {
   // First try with a small fixed size buffer.
   // This buffer size should be kept in sync with StringUtilTest.GrowBoundary
   // and StringUtilTest.StringPrintfBounds.
-  CharT stack_buf[1024];
+  char stack_buf[1024];
 
   va_list ap_copy;
   va_copy(ap_copy, ap);
 
   base::ScopedClearLastError last_error;
-  int result = vsnprintfT(stack_buf, std::size(stack_buf), format, ap_copy);
+  int result = vsnprintf(stack_buf, std::size(stack_buf), format, ap_copy);
   va_end(ap_copy);
 
   if (result >= 0 && static_cast<size_t>(result) < std::size(stack_buf)) {
@@ -76,13 +62,14 @@
   while (true) {
     if (result < 0) {
 #if BUILDFLAG(IS_WIN)
-      // On Windows, vsnprintfT always returns the number of characters in a
+      // On Windows, vsnprintf always returns the number of characters in a
       // fully-formatted string, so if we reach this point, something else is
       // wrong and no amount of buffer-doubling is going to fix it.
       return;
 #else
-      if (errno != 0 && errno != EOVERFLOW)
+      if (errno != 0 && errno != EOVERFLOW) {
         return;
+      }
       // Try doubling the buffer size.
       mem_length *= 2;
 #endif
@@ -93,18 +80,18 @@
 
     if (mem_length > 32 * 1024 * 1024) {
       // That should be plenty, don't try anything larger.  This protects
-      // against huge allocations when using vsnprintfT implementations that
+      // against huge allocations when using vsnprintf implementations that
       // return -1 for reasons other than overflow without setting errno.
       DLOG(WARNING) << "Unable to printf the requested string due to size.";
       return;
     }
 
-    std::vector<CharT> mem_buf(mem_length);
+    std::vector<char> mem_buf(mem_length);
 
     // NOTE: You can only use a va_list once.  Since we're in a while loop, we
     // need to make a new copy each time so we don't use up the original.
     va_copy(ap_copy, ap);
-    result = vsnprintfT(&mem_buf[0], mem_length, format, ap_copy);
+    result = vsnprintf(&mem_buf[0], mem_length, format, ap_copy);
     va_end(ap_copy);
 
     if ((result >= 0) && (static_cast<size_t>(result) < mem_length)) {
@@ -115,110 +102,4 @@
   }
 }
 
-}  // namespace
-
-std::string StringPrintf(const char* format, ...) {
-  va_list ap;
-  va_start(ap, format);
-  std::string result;
-  StringAppendV(&result, format, ap);
-  va_end(ap);
-  return result;
-}
-
-#if BUILDFLAG(IS_WIN)
-std::wstring StringPrintf(const wchar_t* format, ...) {
-  va_list ap;
-  va_start(ap, format);
-  std::wstring result;
-  StringAppendV(&result, format, ap);
-  va_end(ap);
-  return result;
-}
-
-std::u16string StringPrintf(const char16_t* format, ...) {
-  va_list ap;
-  va_start(ap, format);
-  std::u16string result;
-  StringAppendV(&result, format, ap);
-  va_end(ap);
-  return result;
-}
-#endif
-
-std::string StringPrintV(const char* format, va_list ap) {
-  std::string result;
-  StringAppendV(&result, format, ap);
-  return result;
-}
-
-const std::string& SStringPrintf(std::string* dst, const char* format, ...) {
-  va_list ap;
-  va_start(ap, format);
-  dst->clear();
-  StringAppendV(dst, format, ap);
-  va_end(ap);
-  return *dst;
-}
-
-#if BUILDFLAG(IS_WIN)
-const std::wstring& SStringPrintf(std::wstring* dst,
-                                  const wchar_t* format, ...) {
-  va_list ap;
-  va_start(ap, format);
-  dst->clear();
-  StringAppendV(dst, format, ap);
-  va_end(ap);
-  return *dst;
-}
-
-const std::u16string& SStringPrintf(std::u16string* dst,
-                                    const char16_t* format,
-                                    ...) {
-  va_list ap;
-  va_start(ap, format);
-  dst->clear();
-  StringAppendV(dst, format, ap);
-  va_end(ap);
-  return *dst;
-}
-#endif
-
-void StringAppendF(std::string* dst, const char* format, ...) {
-  va_list ap;
-  va_start(ap, format);
-  StringAppendV(dst, format, ap);
-  va_end(ap);
-}
-
-#if BUILDFLAG(IS_WIN)
-void StringAppendF(std::wstring* dst, const wchar_t* format, ...) {
-  va_list ap;
-  va_start(ap, format);
-  StringAppendV(dst, format, ap);
-  va_end(ap);
-}
-
-void StringAppendF(std::u16string* dst, const char16_t* format, ...) {
-  va_list ap;
-  va_start(ap, format);
-  StringAppendV(dst, format, ap);
-  va_end(ap);
-}
-#endif
-
-void StringAppendV(std::string* dst, const char* format, va_list ap) {
-  StringAppendVT(dst, format, ap);
-}
-
-#if BUILDFLAG(IS_WIN)
-void StringAppendV(std::wstring* dst, const wchar_t* format, va_list ap) {
-  StringAppendVT(dst, format, ap);
-}
-
-void StringAppendV(std::u16string* dst, const char16_t* format, va_list ap) {
-  StringAppendVT(dst, format, ap);
-}
-#endif
-
 }  // namespace base
diff --git a/base/strings/stringprintf.h b/base/strings/stringprintf.h
index 7894c49..5bd9474 100644
--- a/base/strings/stringprintf.h
+++ b/base/strings/stringprintf.h
@@ -5,70 +5,74 @@
 #ifndef BASE_STRINGS_STRINGPRINTF_H_
 #define BASE_STRINGS_STRINGPRINTF_H_
 
-#include <stdarg.h>   // va_list
+#include <stdarg.h>  // va_list
 
 #include <string>
+#include <string_view>
 
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
-#include "build/build_config.h"
 
 namespace base {
 
-// Return a C++ string given printf-like input.
+// Returns a C++ string given `printf()`-like input. The format string should be
+// a compile-time constant (like with `std::format()`).
+// TODO(crbug.com/1371963): Implement in terms of `std::format()`,
+// `absl::StrFormat()`, or similar.
 [[nodiscard]] BASE_EXPORT std::string StringPrintf(const char* format, ...)
     PRINTF_FORMAT(1, 2);
-#if BUILDFLAG(IS_WIN)
-// Note: Unfortunately compile time checking of the format string for UTF-16
-// strings is not supported by any compiler, thus these functions should be used
-// carefully and sparingly. Also applies to SStringPrintf and StringAppendV
-// below.
-[[nodiscard]] BASE_EXPORT std::wstring StringPrintf(const wchar_t* format, ...)
-    WPRINTF_FORMAT(1, 2);
-[[nodiscard]] BASE_EXPORT std::u16string StringPrintf(const char16_t* format,
-                                                      ...) WPRINTF_FORMAT(1, 2);
+
+// Returns a C++ string given `printf()`-like input. The format string must be a
+// run-time value (like with `std::vformat()`), or this will not compile.
+// Because this does not check arguments at compile-time, prefer
+// `StringPrintf()` whenever possible.
+template <typename... Args>
+[[nodiscard]] std::string StringPrintfNonConstexpr(std::string_view format,
+                                                   const Args&... args) {
+  // TODO(crbug.com/1371963): Implement in terms of `std::vformat()`,
+  // `absl::FormatUntyped()`, or similar.
+  return StringPrintf(format.data(), args...);
+}
+
+// If possible, guide users to use `StringPrintf()` instead of
+// `StringPrintfNonConstexpr()` when the format string is constexpr.
+//
+// It would be nice to do this with `std::enable_if`, but I don't know of a way;
+// whether a string constant's value is available at compile time is not
+// something easily obtained from the type system, and trying to pass various
+// forms of string constant to non-type template parameters produces a variety
+// of compile errors.
+#if HAS_ATTRIBUTE(enable_if)
+// Disable calling with a constexpr `std::string_view`.
+template <typename... Args>
+[[nodiscard]] std::string StringPrintfNonConstexpr(std::string_view format,
+                                                   const Args&... args)
+    __attribute__((enable_if(
+        [](std::string_view s) { return s.empty() || s[0] == s[0]; }(format),
+        "Use StringPrintf() for constexpr format strings"))) = delete;
+// Disable calling with a constexpr `char[]` or `char*`.
+template <typename... Args>
+[[nodiscard]] std::string StringPrintfNonConstexpr(const char* format,
+                                                   const Args&... args)
+    __attribute__((
+        enable_if([](const char* s) { return !!s; }(format),
+                  "Use StringPrintf() for constexpr format strings"))) = delete;
 #endif
 
-// Return a C++ string given vprintf-like input.
+// Returns a C++ string given `vprintf()`-like input.
 [[nodiscard]] BASE_EXPORT std::string StringPrintV(const char* format,
                                                    va_list ap)
     PRINTF_FORMAT(1, 0);
 
-// Store result into a supplied string and return it.
-BASE_EXPORT const std::string& SStringPrintf(std::string* dst,
-                                             const char* format,
-                                             ...) PRINTF_FORMAT(2, 3);
-#if BUILDFLAG(IS_WIN)
-BASE_EXPORT const std::wstring& SStringPrintf(std::wstring* dst,
-                                              const wchar_t* format,
-                                              ...) WPRINTF_FORMAT(2, 3);
-BASE_EXPORT const std::u16string& SStringPrintf(std::u16string* dst,
-                                                const char16_t* format,
-                                                ...) WPRINTF_FORMAT(2, 3);
-#endif
-
-// Append result to a supplied string.
+// Like `StringPrintf()`, but appends result to a supplied string.
+// TODO(crbug.com/1371963): Implement in terms of `std::format_to()`,
+// `absl::StrAppendFormat()`, or similar.
 BASE_EXPORT void StringAppendF(std::string* dst, const char* format, ...)
     PRINTF_FORMAT(2, 3);
-#if BUILDFLAG(IS_WIN)
-BASE_EXPORT void StringAppendF(std::wstring* dst, const wchar_t* format, ...)
-    WPRINTF_FORMAT(2, 3);
-BASE_EXPORT void StringAppendF(std::u16string* dst, const char16_t* format, ...)
-    WPRINTF_FORMAT(2, 3);
-#endif
 
-// Lower-level routine that takes a va_list and appends to a specified
-// string.  All other routines are just convenience wrappers around it.
+// Like `StringPrintV()`, but appends result to a supplied string.
 BASE_EXPORT void StringAppendV(std::string* dst, const char* format, va_list ap)
     PRINTF_FORMAT(2, 0);
-#if BUILDFLAG(IS_WIN)
-BASE_EXPORT void StringAppendV(std::wstring* dst,
-                               const wchar_t* format,
-                               va_list ap) WPRINTF_FORMAT(2, 0);
-BASE_EXPORT void StringAppendV(std::u16string* dst,
-                               const char16_t* format,
-                               va_list ap) WPRINTF_FORMAT(2, 0);
-#endif
 
 }  // namespace base
 
diff --git a/base/strings/stringprintf_nocompile.nc b/base/strings/stringprintf_nocompile.nc
new file mode 100644
index 0000000..03983fd
--- /dev/null
+++ b/base/strings/stringprintf_nocompile.nc
@@ -0,0 +1,30 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/strings/stringprintf.h"
+
+#include <string_view>
+#include <tuple>
+
+namespace base {
+
+void ConstexprStringView() {
+  static constexpr std::string_view kTest = "test %s";
+  std::ignore = StringPrintfNonConstexpr(kTest, "123");  // expected-error {{call to deleted function 'StringPrintfNonConstexpr'}}
+}
+
+void ConstexprCharArray() {
+  static constexpr char kTest[] = "test %s";
+  std::ignore = StringPrintfNonConstexpr(kTest, "123");  // expected-error {{call to deleted function 'StringPrintfNonConstexpr'}}
+}
+
+void ConstexprCharPointer() {
+  static constexpr const char* kTest = "test %s";
+  std::ignore = StringPrintfNonConstexpr(kTest, "123");  // expected-error {{call to deleted function 'StringPrintfNonConstexpr'}}
+}
+
+}  // namespace base
diff --git a/base/strings/stringprintf_unittest.cc b/base/strings/stringprintf_unittest.cc
index 270afe1..93e8b67 100644
--- a/base/strings/stringprintf_unittest.cc
+++ b/base/strings/stringprintf_unittest.cc
@@ -35,58 +35,24 @@
 
 TEST(StringPrintfTest, StringPrintfMisc) {
   EXPECT_EQ("123hello w", StringPrintf("%3d%2s %1c", 123, "hello", 'w'));
-#if BUILDFLAG(IS_WIN)
-  EXPECT_EQ(L"123hello w", StringPrintf(L"%3d%2ls %1lc", 123, L"hello", 'w'));
-  EXPECT_EQ(u"123hello w", StringPrintf(u"%3d%2ls %1lc", 123, u"hello", 'w'));
-#endif
 }
 
 TEST(StringPrintfTest, StringAppendfEmptyString) {
   std::string value("Hello");
   StringAppendF(&value, "%s", "");
   EXPECT_EQ("Hello", value);
-
-#if BUILDFLAG(IS_WIN)
-  std::wstring valuew(L"Hello");
-  StringAppendF(&valuew, L"%ls", L"");
-  EXPECT_EQ(L"Hello", valuew);
-
-  std::u16string value16(u"Hello");
-  StringAppendF(&value16, u"%ls", u"");
-  EXPECT_EQ(u"Hello", value16);
-#endif
 }
 
 TEST(StringPrintfTest, StringAppendfString) {
   std::string value("Hello");
   StringAppendF(&value, " %s", "World");
   EXPECT_EQ("Hello World", value);
-
-#if BUILDFLAG(IS_WIN)
-  std::wstring valuew(L"Hello");
-  StringAppendF(&valuew, L" %ls", L"World");
-  EXPECT_EQ(L"Hello World", valuew);
-
-  std::u16string value16(u"Hello");
-  StringAppendF(&value16, u" %ls", u"World");
-  EXPECT_EQ(u"Hello World", value16);
-#endif
 }
 
 TEST(StringPrintfTest, StringAppendfInt) {
   std::string value("Hello");
   StringAppendF(&value, " %d", 123);
   EXPECT_EQ("Hello 123", value);
-
-#if BUILDFLAG(IS_WIN)
-  std::wstring valuew(L"Hello");
-  StringAppendF(&valuew, L" %d", 123);
-  EXPECT_EQ(L"Hello 123", valuew);
-
-  std::u16string value16(u"Hello");
-  StringAppendF(&value16, u" %d", 123);
-  EXPECT_EQ(u"Hello 123", value16);
-#endif
 }
 
 // Make sure that lengths exactly around the initial buffer size are handled
@@ -105,23 +71,7 @@
   for (int i = 1; i < 3; i++) {
     src[kSrcLen - i] = 0;
     std::string out;
-    SStringPrintf(&out, "%s", src);
-    EXPECT_STREQ(src, out.c_str());
-
-#if BUILDFLAG(IS_WIN)
-    srcw[kSrcLen - i] = 0;
-    std::wstring outw;
-    SStringPrintf(&outw, L"%ls", srcw);
-    EXPECT_STREQ(srcw, outw.c_str());
-
-    src16[kSrcLen - i] = 0;
-    std::u16string out16;
-    SStringPrintf(&out16, u"%ls", src16);
-    // EXPECT_STREQ does not support const char16_t* strings yet.
-    // Dispatch to the const wchar_t* overload instead.
-    EXPECT_STREQ(reinterpret_cast<const wchar_t*>(src16),
-                 reinterpret_cast<const wchar_t*>(out16.c_str()));
-#endif
+    EXPECT_EQ(src, StringPrintf("%s", src));
   }
 }
 
@@ -134,9 +84,6 @@
 
   const char fmt[] = "%sB%sB%sB%sB%sB%sB%s";
 
-  std::string out;
-  SStringPrintf(&out, fmt, src, src, src, src, src, src, src);
-
   const int kRefSize = 320000;
   char* ref = new char[kRefSize];
 #if BUILDFLAG(IS_WIN)
@@ -145,7 +92,7 @@
   snprintf(ref, kRefSize, fmt, src, src, src, src, src, src, src);
 #endif
 
-  EXPECT_STREQ(ref, out.c_str());
+  EXPECT_EQ(ref, StringPrintf(fmt, src, src, src, src, src, src, src));
   delete[] ref;
 }
 
@@ -153,16 +100,6 @@
   std::string out;
   StringAppendVTestHelper(&out, "%d foo %s", 1, "bar");
   EXPECT_EQ("1 foo bar", out);
-
-#if BUILDFLAG(IS_WIN)
-  std::wstring outw;
-  StringAppendVTestHelper(&outw, L"%d foo %ls", 1, L"bar");
-  EXPECT_EQ(L"1 foo bar", outw);
-
-  std::u16string out16;
-  StringAppendVTestHelper(&out16, u"%d foo %ls", 1, u"bar");
-  EXPECT_EQ(u"1 foo bar", out16);
-#endif
 }
 
 // Test the boundary condition for the size of the string_util's
@@ -178,24 +115,9 @@
     src[i] = 'a';
   src[kBufLen - 1] = 0;
 
-  std::string out;
-  SStringPrintf(&out, "%s", src);
-
-  EXPECT_STREQ(src, out.c_str());
+  EXPECT_EQ(src, StringPrintf("%s", src));
 }
 
-#if BUILDFLAG(IS_WIN)
-TEST(StringPrintfTest, Invalid) {
-  wchar_t invalid[2];
-  invalid[0] = 0xffff;
-  invalid[1] = 0;
-
-  std::wstring out;
-  SStringPrintf(&out, L"%ls", invalid);
-  EXPECT_STREQ(invalid, out.c_str());
-}
-#endif
-
 // Test that StringPrintf and StringAppendV do not change errno.
 TEST(StringPrintfTest, StringPrintfErrno) {
   errno = 1;
diff --git a/base/strings/sys_string_conversions.h b/base/strings/sys_string_conversions.h
index 9bb872f..006f2e3 100644
--- a/base/strings/sys_string_conversions.h
+++ b/base/strings/sys_string_conversions.h
@@ -20,7 +20,7 @@
 #if BUILDFLAG(IS_APPLE)
 #include <CoreFoundation/CoreFoundation.h>
 
-#include "base/mac/scoped_cftyperef.h"
+#include "base/apple/scoped_cftyperef.h"
 
 #ifdef __OBJC__
 @class NSString;
@@ -63,10 +63,10 @@
 // Converts between strings and CFStringRefs/NSStrings.
 
 // Converts a string to a CFStringRef. Returns null on failure.
-[[nodiscard]] BASE_EXPORT ScopedCFTypeRef<CFStringRef> SysUTF8ToCFStringRef(
-    StringPiece utf8);
-[[nodiscard]] BASE_EXPORT ScopedCFTypeRef<CFStringRef> SysUTF16ToCFStringRef(
-    StringPiece16 utf16);
+[[nodiscard]] BASE_EXPORT apple::ScopedCFTypeRef<CFStringRef>
+SysUTF8ToCFStringRef(StringPiece utf8);
+[[nodiscard]] BASE_EXPORT apple::ScopedCFTypeRef<CFStringRef>
+SysUTF16ToCFStringRef(StringPiece16 utf16);
 
 // Converts a CFStringRef to a string. Returns an empty string on failure. It is
 // not valid to call these with a null `ref`.
diff --git a/base/strings/sys_string_conversions_apple.mm b/base/strings/sys_string_conversions_apple.mm
new file mode 100644
index 0000000..8d656df
--- /dev/null
+++ b/base/strings/sys_string_conversions_apple.mm
@@ -0,0 +1,181 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/sys_string_conversions.h"
+
+#import <Foundation/Foundation.h>
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/apple/bridging.h"
+#include "base/apple/foundation_util.h"
+#include "base/apple/scoped_cftyperef.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+namespace {
+
+// Converts the supplied CFString into the specified encoding, and returns it as
+// a C++ library string of the template type. Returns an empty string on
+// failure.
+//
+// Do not assert in this function since it is used by the assertion code!
+template <typename StringType>
+StringType CFStringToStringWithEncodingT(CFStringRef cfstring,
+                                         CFStringEncoding encoding) {
+  CFIndex length = CFStringGetLength(cfstring);
+  if (length == 0) {
+    return StringType();
+  }
+
+  CFRange whole_string = CFRangeMake(0, length);
+  CFIndex out_size;
+  CFIndex converted = CFStringGetBytes(cfstring, whole_string, encoding,
+                                       /*lossByte=*/0,
+                                       /*isExternalRepresentation=*/false,
+                                       /*buffer=*/nullptr,
+                                       /*maxBufLen=*/0, &out_size);
+  if (converted == 0 || out_size <= 0) {
+    return StringType();
+  }
+
+  // `out_size` is the number of UInt8-sized units needed in the destination.
+  // A buffer allocated as UInt8 units might not be properly aligned to
+  // contain elements of StringType::value_type.  Use a container for the
+  // proper value_type, and convert `out_size` by figuring the number of
+  // value_type elements per UInt8.  Leave room for a NUL terminator.
+  size_t elements = static_cast<size_t>(out_size) * sizeof(UInt8) /
+                        sizeof(typename StringType::value_type) +
+                    1;
+
+  std::vector<typename StringType::value_type> out_buffer(elements);
+  converted =
+      CFStringGetBytes(cfstring, whole_string, encoding,
+                       /*lossByte=*/0,
+                       /*isExternalRepresentation=*/false,
+                       reinterpret_cast<UInt8*>(&out_buffer[0]), out_size,
+                       /*usedBufLen=*/nullptr);
+  if (converted == 0) {
+    return StringType();
+  }
+
+  out_buffer[elements - 1] = '\0';
+  return StringType(&out_buffer[0], elements - 1);
+}
+
+// Given a C++ library string `in` with an encoding specified by `in_encoding`,
+// converts it to `out_encoding` and returns it as a C++ library string of the
+// `OutStringType` template type. Returns an empty string on failure.
+//
+// Do not assert in this function since it is used by the assertion code!
+template <typename InStringType, typename OutStringType>
+OutStringType StringToStringWithEncodingsT(const InStringType& in,
+                                           CFStringEncoding in_encoding,
+                                           CFStringEncoding out_encoding) {
+  typename InStringType::size_type in_length = in.length();
+  if (in_length == 0) {
+    return OutStringType();
+  }
+
+  apple::ScopedCFTypeRef<CFStringRef> cfstring(CFStringCreateWithBytesNoCopy(
+      kCFAllocatorDefault, reinterpret_cast<const UInt8*>(in.data()),
+      checked_cast<CFIndex>(in_length *
+                            sizeof(typename InStringType::value_type)),
+      in_encoding,
+      /*isExternalRepresentation=*/false, kCFAllocatorNull));
+  if (!cfstring) {
+    return OutStringType();
+  }
+
+  return CFStringToStringWithEncodingT<OutStringType>(cfstring.get(),
+                                                      out_encoding);
+}
+
+// Given a StringPiece `in` with an encoding specified by `in_encoding`, returns
+// it as a CFStringRef. Returns null on failure.
+template <typename CharT>
+apple::ScopedCFTypeRef<CFStringRef> StringPieceToCFStringWithEncodingsT(
+    BasicStringPiece<CharT> in,
+    CFStringEncoding in_encoding) {
+  const auto in_length = in.length();
+  if (in_length == 0) {
+    return apple::ScopedCFTypeRef<CFStringRef>(CFSTR(""),
+                                               base::scoped_policy::RETAIN);
+  }
+
+  return apple::ScopedCFTypeRef<CFStringRef>(CFStringCreateWithBytes(
+      kCFAllocatorDefault, reinterpret_cast<const UInt8*>(in.data()),
+      checked_cast<CFIndex>(in_length * sizeof(CharT)), in_encoding, false));
+}
+
+}  // namespace
+
+// The CFStringEncodings used below specify the byte ordering explicitly,
+// otherwise CFString will be confused when strings don't carry BOMs, as they
+// typically won't.
+
+// Do not assert in this function since it is used by the assertion code!
+std::string SysWideToUTF8(const std::wstring& wide) {
+  return StringToStringWithEncodingsT<std::wstring, std::string>(
+      wide, kCFStringEncodingUTF32LE, kCFStringEncodingUTF8);
+}
+
+// Do not assert in this function since it is used by the assertion code!
+std::wstring SysUTF8ToWide(StringPiece utf8) {
+  return StringToStringWithEncodingsT<StringPiece, std::wstring>(
+      utf8, kCFStringEncodingUTF8, kCFStringEncodingUTF32LE);
+}
+
+std::string SysWideToNativeMB(const std::wstring& wide) {
+  return SysWideToUTF8(wide);
+}
+
+std::wstring SysNativeMBToWide(StringPiece native_mb) {
+  return SysUTF8ToWide(native_mb);
+}
+
+apple::ScopedCFTypeRef<CFStringRef> SysUTF8ToCFStringRef(StringPiece utf8) {
+  return StringPieceToCFStringWithEncodingsT(utf8, kCFStringEncodingUTF8);
+}
+
+apple::ScopedCFTypeRef<CFStringRef> SysUTF16ToCFStringRef(StringPiece16 utf16) {
+  return StringPieceToCFStringWithEncodingsT(utf16, kCFStringEncodingUTF16LE);
+}
+
+NSString* SysUTF8ToNSString(StringPiece utf8) {
+  return base::apple::CFToNSOwnershipCast(SysUTF8ToCFStringRef(utf8).release());
+}
+
+NSString* SysUTF16ToNSString(StringPiece16 utf16) {
+  return base::apple::CFToNSOwnershipCast(
+      SysUTF16ToCFStringRef(utf16).release());
+}
+
+std::string SysCFStringRefToUTF8(CFStringRef ref) {
+  return CFStringToStringWithEncodingT<std::string>(ref, kCFStringEncodingUTF8);
+}
+
+std::u16string SysCFStringRefToUTF16(CFStringRef ref) {
+  return CFStringToStringWithEncodingT<std::u16string>(
+      ref, kCFStringEncodingUTF16LE);
+}
+
+std::string SysNSStringToUTF8(NSString* nsstring) {
+  if (!nsstring) {
+    return std::string();
+  }
+  return SysCFStringRefToUTF8(apple::NSToCFPtrCast(nsstring));
+}
+
+std::u16string SysNSStringToUTF16(NSString* nsstring) {
+  if (!nsstring) {
+    return std::u16string();
+  }
+  return SysCFStringRefToUTF16(apple::NSToCFPtrCast(nsstring));
+}
+
+}  // namespace base
diff --git a/base/strings/sys_string_conversions_apple_unittest.mm b/base/strings/sys_string_conversions_apple_unittest.mm
new file mode 100644
index 0000000..e4480ff
--- /dev/null
+++ b/base/strings/sys_string_conversions_apple_unittest.mm
@@ -0,0 +1,56 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/sys_string_conversions.h"
+
+#import <Foundation/Foundation.h>
+
+#include <string>
+
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(SysStrings, ConversionsFromNSString) {
+  EXPECT_STREQ("Hello, world!", SysNSStringToUTF8(@"Hello, world!").c_str());
+
+  // Conversions should be able to handle a NULL value without crashing.
+  EXPECT_STREQ("", SysNSStringToUTF8(nil).c_str());
+  EXPECT_EQ(std::u16string(), SysNSStringToUTF16(nil));
+}
+
+std::vector<std::string> GetRoundTripStrings() {
+  return {
+      "Hello, World!",  // ASCII / ISO-8859 string (also valid UTF-8)
+      "a\0b",           // UTF-8 with embedded NUL byte
+      "λf",             // lowercase lambda + 'f'
+      "χρώμιο",         // "chromium" in greek
+      "כרום",           // "chromium" in hebrew
+      "クロム",         // "chromium" in japanese
+
+      // Tarot card symbol "the morning", which is outside of the BMP and is not
+      // representable with one UTF-16 code unit.
+      "🃦",
+  };
+}
+
+TEST(SysStrings, RoundTripsFromUTF8) {
+  for (const auto& string8 : GetRoundTripStrings()) {
+    NSString* nsstring8 = SysUTF8ToNSString(string8);
+    std::string back8 = SysNSStringToUTF8(nsstring8);
+    EXPECT_EQ(string8, back8);
+  }
+}
+
+TEST(SysStrings, RoundTripsFromUTF16) {
+  for (const auto& string8 : GetRoundTripStrings()) {
+    std::u16string string16 = base::UTF8ToUTF16(string8);
+    NSString* nsstring16 = SysUTF16ToNSString(string16);
+    std::u16string back16 = SysNSStringToUTF16(nsstring16);
+    EXPECT_EQ(string16, back16);
+  }
+}
+
+}  // namespace base
diff --git a/base/strings/sys_string_conversions_mac.mm b/base/strings/sys_string_conversions_mac.mm
deleted file mode 100644
index 94116a6..0000000
--- a/base/strings/sys_string_conversions_mac.mm
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/strings/sys_string_conversions.h"
-
-#import <Foundation/Foundation.h>
-#include <stddef.h>
-
-#include <vector>
-
-#include "base/apple/bridging.h"
-#include "base/mac/foundation_util.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/strings/string_piece.h"
-
-namespace base {
-
-namespace {
-
-// Converts the supplied CFString into the specified encoding, and returns it as
-// a C++ library string of the template type. Returns an empty string on
-// failure.
-//
-// Do not assert in this function since it is used by the assertion code!
-template <typename StringType>
-StringType CFStringToStringWithEncodingT(CFStringRef cfstring,
-                                         CFStringEncoding encoding) {
-  CFIndex length = CFStringGetLength(cfstring);
-  if (length == 0)
-    return StringType();
-
-  CFRange whole_string = CFRangeMake(0, length);
-  CFIndex out_size;
-  CFIndex converted = CFStringGetBytes(cfstring, whole_string, encoding,
-                                       /*lossByte=*/0,
-                                       /*isExternalRepresentation=*/false,
-                                       /*buffer=*/nullptr,
-                                       /*maxBufLen=*/0, &out_size);
-  if (converted == 0 || out_size <= 0)
-    return StringType();
-
-  // `out_size` is the number of UInt8-sized units needed in the destination.
-  // A buffer allocated as UInt8 units might not be properly aligned to
-  // contain elements of StringType::value_type.  Use a container for the
-  // proper value_type, and convert `out_size` by figuring the number of
-  // value_type elements per UInt8.  Leave room for a NUL terminator.
-  size_t elements = static_cast<size_t>(out_size) * sizeof(UInt8) /
-                        sizeof(typename StringType::value_type) +
-                    1;
-
-  std::vector<typename StringType::value_type> out_buffer(elements);
-  converted =
-      CFStringGetBytes(cfstring, whole_string, encoding,
-                       /*lossByte=*/0,
-                       /*isExternalRepresentation=*/false,
-                       reinterpret_cast<UInt8*>(&out_buffer[0]), out_size,
-                       /*usedBufLen=*/nullptr);
-  if (converted == 0)
-    return StringType();
-
-  out_buffer[elements - 1] = '\0';
-  return StringType(&out_buffer[0], elements - 1);
-}
-
-// Given a C++ library string `in` with an encoding specified by `in_encoding`,
-// converts it to `out_encoding` and returns it as a C++ library string of the
-// `OutStringType` template type. Returns an empty string on failure.
-//
-// Do not assert in this function since it is used by the assertion code!
-template <typename InStringType, typename OutStringType>
-OutStringType StringToStringWithEncodingsT(const InStringType& in,
-                                           CFStringEncoding in_encoding,
-                                           CFStringEncoding out_encoding) {
-  typename InStringType::size_type in_length = in.length();
-  if (in_length == 0)
-    return OutStringType();
-
-  base::ScopedCFTypeRef<CFStringRef> cfstring(CFStringCreateWithBytesNoCopy(
-      kCFAllocatorDefault, reinterpret_cast<const UInt8*>(in.data()),
-      checked_cast<CFIndex>(in_length *
-                            sizeof(typename InStringType::value_type)),
-      in_encoding,
-      /*isExternalRepresentation=*/false, kCFAllocatorNull));
-  if (!cfstring)
-    return OutStringType();
-
-  return CFStringToStringWithEncodingT<OutStringType>(cfstring, out_encoding);
-}
-
-// Given a StringPiece `in` with an encoding specified by `in_encoding`, returns
-// it as a CFStringRef. Returns null on failure.
-template <typename CharT>
-ScopedCFTypeRef<CFStringRef> StringPieceToCFStringWithEncodingsT(
-    BasicStringPiece<CharT> in,
-    CFStringEncoding in_encoding) {
-  const auto in_length = in.length();
-  if (in_length == 0)
-    return ScopedCFTypeRef<CFStringRef>(CFSTR(""), base::scoped_policy::RETAIN);
-
-  return ScopedCFTypeRef<CFStringRef>(CFStringCreateWithBytes(
-      kCFAllocatorDefault, reinterpret_cast<const UInt8*>(in.data()),
-      checked_cast<CFIndex>(in_length * sizeof(CharT)), in_encoding, false));
-}
-
-}  // namespace
-
-// The CFStringEncodings used below specify the byte ordering explicitly,
-// otherwise CFString will be confused when strings don't carry BOMs, as they
-// typically won't.
-
-// Do not assert in this function since it is used by the assertion code!
-std::string SysWideToUTF8(const std::wstring& wide) {
-  return StringToStringWithEncodingsT<std::wstring, std::string>(
-      wide, kCFStringEncodingUTF32LE, kCFStringEncodingUTF8);
-}
-
-// Do not assert in this function since it is used by the assertion code!
-std::wstring SysUTF8ToWide(StringPiece utf8) {
-  return StringToStringWithEncodingsT<StringPiece, std::wstring>(
-      utf8, kCFStringEncodingUTF8, kCFStringEncodingUTF32LE);
-}
-
-std::string SysWideToNativeMB(const std::wstring& wide) {
-  return SysWideToUTF8(wide);
-}
-
-std::wstring SysNativeMBToWide(StringPiece native_mb) {
-  return SysUTF8ToWide(native_mb);
-}
-
-ScopedCFTypeRef<CFStringRef> SysUTF8ToCFStringRef(StringPiece utf8) {
-  return StringPieceToCFStringWithEncodingsT(utf8, kCFStringEncodingUTF8);
-}
-
-ScopedCFTypeRef<CFStringRef> SysUTF16ToCFStringRef(StringPiece16 utf16) {
-  return StringPieceToCFStringWithEncodingsT(utf16, kCFStringEncodingUTF16LE);
-}
-
-NSString* SysUTF8ToNSString(StringPiece utf8) {
-  return base::apple::CFToNSOwnershipCast(SysUTF8ToCFStringRef(utf8).release());
-}
-
-NSString* SysUTF16ToNSString(StringPiece16 utf16) {
-  return base::apple::CFToNSOwnershipCast(
-      SysUTF16ToCFStringRef(utf16).release());
-}
-
-std::string SysCFStringRefToUTF8(CFStringRef ref) {
-  return CFStringToStringWithEncodingT<std::string>(ref, kCFStringEncodingUTF8);
-}
-
-std::u16string SysCFStringRefToUTF16(CFStringRef ref) {
-  return CFStringToStringWithEncodingT<std::u16string>(
-      ref, kCFStringEncodingUTF16LE);
-}
-
-std::string SysNSStringToUTF8(NSString* nsstring) {
-  if (!nsstring)
-    return std::string();
-  return SysCFStringRefToUTF8(apple::NSToCFPtrCast(nsstring));
-}
-
-std::u16string SysNSStringToUTF16(NSString* nsstring) {
-  if (!nsstring)
-    return std::u16string();
-  return SysCFStringRefToUTF16(apple::NSToCFPtrCast(nsstring));
-}
-
-}  // namespace base
diff --git a/base/strings/sys_string_conversions_mac_unittest.mm b/base/strings/sys_string_conversions_mac_unittest.mm
deleted file mode 100644
index f786ad4..0000000
--- a/base/strings/sys_string_conversions_mac_unittest.mm
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#import <Foundation/Foundation.h>
-
-#include <string>
-
-#include "base/strings/sys_string_conversions.h"
-#include "base/strings/utf_string_conversions.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-TEST(SysStrings, ConversionsFromNSString) {
-  EXPECT_STREQ("Hello, world!", SysNSStringToUTF8(@"Hello, world!").c_str());
-
-  // Conversions should be able to handle a NULL value without crashing.
-  EXPECT_STREQ("", SysNSStringToUTF8(nil).c_str());
-  EXPECT_EQ(std::u16string(), SysNSStringToUTF16(nil));
-}
-
-std::vector<std::string> GetRoundTripStrings() {
-  return {
-      "Hello, World!",  // ASCII / ISO8859 string (also valid UTF-8)
-      "a\0b",           // UTF-8 with embedded NUL byte
-      "λf",             // lowercase lambda + 'f'
-      "χρώμιο",         // "chromium" in greek
-      "כרום",           // "chromium" in hebrew
-      "クロム",         // "chromium" in japanese
-
-      // Tarot card symbol "the morning", which does not fit in one UTF-16
-      // character.
-      "🃦",
-  };
-}
-
-TEST(SysStrings, RoundTripsFromUTF8) {
-  for (const auto& string8 : GetRoundTripStrings()) {
-    NSString* nsstring8 = SysUTF8ToNSString(string8);
-    std::string back8 = SysNSStringToUTF8(nsstring8);
-    EXPECT_EQ(string8, back8);
-  }
-}
-
-TEST(SysStrings, RoundTripsFromUTF16) {
-  for (const auto& string8 : GetRoundTripStrings()) {
-    std::u16string string16 = base::UTF8ToUTF16(string8);
-    NSString* nsstring16 = SysUTF16ToNSString(string16);
-    std::u16string back16 = SysNSStringToUTF16(nsstring16);
-    EXPECT_EQ(string16, back16);
-  }
-}
-
-}  // namespace base
diff --git a/base/strings/sys_string_conversions_unittest.cc b/base/strings/sys_string_conversions_unittest.cc
index 66e716d..85be25e 100644
--- a/base/strings/sys_string_conversions_unittest.cc
+++ b/base/strings/sys_string_conversions_unittest.cc
@@ -25,7 +25,8 @@
   EXPECT_EQ("Hello, world", SysWideToUTF8(L"Hello, world"));
   EXPECT_EQ("\xe4\xbd\xa0\xe5\xa5\xbd", SysWideToUTF8(L"\x4f60\x597d"));
 
-  // >16 bits
+  // A value outside of the BMP and therefore not representable with one UTF-16
+  // code unit.
   EXPECT_EQ("\xF0\x90\x8C\x80", SysWideToUTF8(kSysWideOldItalicLetterA));
 
   // Error case. When Windows finds a UTF-16 character going off the end of
@@ -52,7 +53,9 @@
 TEST(SysStrings, SysUTF8ToWide) {
   EXPECT_EQ(L"Hello, world", SysUTF8ToWide("Hello, world"));
   EXPECT_EQ(L"\x4f60\x597d", SysUTF8ToWide("\xe4\xbd\xa0\xe5\xa5\xbd"));
-  // >16 bits
+
+  // A value outside of the BMP and therefore not representable with one UTF-16
+  // code unit.
   EXPECT_EQ(kSysWideOldItalicLetterA, SysUTF8ToWide("\xF0\x90\x8C\x80"));
 
   // Error case. When Windows finds an invalid UTF-8 character, it just skips
@@ -83,7 +86,8 @@
   EXPECT_EQ("Hello, world", SysWideToNativeMB(L"Hello, world"));
   EXPECT_EQ("\xe4\xbd\xa0\xe5\xa5\xbd", SysWideToNativeMB(L"\x4f60\x597d"));
 
-  // >16 bits
+  // A value outside of the BMP and therefore not representable with one UTF-16
+  // code unit.
   EXPECT_EQ("\xF0\x90\x8C\x80", SysWideToNativeMB(kSysWideOldItalicLetterA));
 
   // Error case. When Windows finds a UTF-16 character going off the end of
@@ -114,7 +118,9 @@
 #endif
   EXPECT_EQ(L"Hello, world", SysNativeMBToWide("Hello, world"));
   EXPECT_EQ(L"\x4f60\x597d", SysNativeMBToWide("\xe4\xbd\xa0\xe5\xa5\xbd"));
-  // >16 bits
+
+  // A value outside of the BMP and therefore not representable with one UTF-16
+  // code unit.
   EXPECT_EQ(kSysWideOldItalicLetterA, SysNativeMBToWide("\xF0\x90\x8C\x80"));
 
   // Error case. When Windows finds an invalid UTF-8 character, it just skips
diff --git a/base/strings/to_string.h b/base/strings/to_string.h
index 288f1af..eae7b94 100644
--- a/base/strings/to_string.h
+++ b/base/strings/to_string.h
@@ -9,13 +9,18 @@
 #include <memory>
 #include <sstream>
 #include <string>
+#include <tuple>
 #include <type_traits>
+#include <utility>
 
 #include "base/template_util.h"
 #include "base/types/supports_ostream_operator.h"
 
 namespace base {
 
+template <typename... Ts>
+std::string ToString(const Ts&... values);
+
 namespace internal {
 
 template <typename T>
@@ -91,6 +96,23 @@
   }
 };
 
+// Tuples. Will recursively apply `ToString()` to each value in the tuple.
+template <typename... T>
+struct ToStringHelper<std::tuple<T...>> {
+  template <size_t... I>
+  static void StringifyHelper(const std::tuple<T...>& values,
+                              std::index_sequence<I...>,
+                              std::ostringstream& ss) {
+    ss << "<";
+    (..., (ss << (I == 0 ? "" : ", "), ss << ToString(std::get<I>(values))));
+    ss << ">";
+  }
+
+  static void Stringify(const std::tuple<T...>& v, std::ostringstream& ss) {
+    StringifyHelper(v, std::make_index_sequence<sizeof...(T)>(), ss);
+  }
+};
+
 }  // namespace internal
 
 // Converts any type to a string, preferring defined operator<<() or ToString()
@@ -98,9 +120,8 @@
 template <typename... Ts>
 std::string ToString(const Ts&... values) {
   std::ostringstream ss;
-  (internal::ToStringHelper<remove_cvref_t<decltype(values)>>::Stringify(values,
-                                                                         ss),
-   ...);
+  (..., internal::ToStringHelper<remove_cvref_t<decltype(values)>>::Stringify(
+            values, ss));
   return ss.str();
 }
 
diff --git a/base/strings/to_string_unittest.cc b/base/strings/to_string_unittest.cc
index e047eff..dca285f 100644
--- a/base/strings/to_string_unittest.cc
+++ b/base/strings/to_string_unittest.cc
@@ -91,6 +91,13 @@
   EXPECT_EQ(ToString("42 in hex is ", std::hex, 42), "42 in hex is 2a");
 }
 
+TEST(ToStringTest, Tuple) {
+  // Tuples should correctly format the contained types.
+  EXPECT_EQ(ToString(std::make_tuple(StreamableTestEnum::kGreeting,
+                                     HasToString(), "a string")),
+            "<hello, yay!, a string>");
+}
+
 void Func() {}
 
 TEST(ToStringTest, FunctionPointer) {
diff --git a/base/strings/utf_ostream_operators.cc b/base/strings/utf_ostream_operators.cc
index 2e28c99..39f93c4 100644
--- a/base/strings/utf_ostream_operators.cc
+++ b/base/strings/utf_ostream_operators.cc
@@ -5,6 +5,7 @@
 #include "base/strings/utf_ostream_operators.h"
 
 #include "base/strings/utf_string_conversions.h"
+#include "base/types/supports_ostream_operator.h"
 
 std::ostream& std::operator<<(std::ostream& out, const wchar_t* wstr) {
   return out << (wstr ? std::wstring_view(wstr) : std::wstring_view());
diff --git a/base/strings/utf_string_conversion_utils.cc b/base/strings/utf_string_conversion_utils.cc
index d342ae0..8263204 100644
--- a/base/strings/utf_string_conversion_utils.cc
+++ b/base/strings/utf_string_conversion_utils.cc
@@ -11,13 +11,13 @@
 
 // CountUnicodeCharacters ------------------------------------------------------
 
-absl::optional<size_t> CountUnicodeCharacters(const char16_t* src,
-                                              size_t src_len,
+absl::optional<size_t> CountUnicodeCharacters(std::string_view text,
                                               size_t limit) {
   base_icu::UChar32 unused = 0;
   size_t count = 0;
-  for (size_t index = 0; count < limit && index < src_len; ++count, ++index) {
-    if (!ReadUnicodeCharacter(src, src_len, &index, &unused)) {
+  for (size_t index = 0; count < limit && index < text.size();
+       ++count, ++index) {
+    if (!ReadUnicodeCharacter(text.data(), text.size(), &index, &unused)) {
       return absl::nullopt;
     }
   }
diff --git a/base/strings/utf_string_conversion_utils.h b/base/strings/utf_string_conversion_utils.h
index 8658a19..01b5347 100644
--- a/base/strings/utf_string_conversion_utils.h
+++ b/base/strings/utf_string_conversion_utils.h
@@ -11,7 +11,9 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <limits>
 #include <string>
+#include <string_view>
 
 #include "base/base_export.h"
 #include "base/third_party/icu/icu_utf.h"
@@ -43,10 +45,9 @@
 // CountUnicodeCharacters ------------------------------------------------------
 
 // Returns the number of Unicode characters in `text`, up to the supplied
-// `limit`, if `text` contains valid UTF-16. Returns `nullopt` otherwise.
+// `limit`, if `text` contains valid UTF-8. Returns `nullopt` otherwise.
 BASE_EXPORT absl::optional<size_t> CountUnicodeCharacters(
-    const char16_t* src,
-    size_t src_len,
+    std::string_view text,
     size_t limit = std::numeric_limits<size_t>::max());
 
 // ReadUnicodeCharacter --------------------------------------------------------
diff --git a/base/strings/utf_string_conversion_utils_unittest.cc b/base/strings/utf_string_conversion_utils_unittest.cc
index 2971d09..557eb5e 100644
--- a/base/strings/utf_string_conversion_utils_unittest.cc
+++ b/base/strings/utf_string_conversion_utils_unittest.cc
@@ -12,19 +12,24 @@
 namespace base {
 
 TEST(UtfStringConversionUtilsTest, CountUnicodeCharacters) {
-  struct TestCase {
-    std::u16string value;
+  const struct TestCase {
+    std::string value;
     size_t limit;
     absl::optional<size_t> count;
   } test_cases[] = {
-      {u"", 0, 0},           {u"abc", 1, 1},
-      {u"abc", 3, 3},        {u"abc", 0, 0},
-      {u"abc", 4, 3},        {u"abc\U0001F4A9", 4, 4},
-      {u"\U0001F4A9", 1, 1}, {{1, 0xD801u}, 5, absl::nullopt},
+      {"", 0, 0},
+      {"abc", 1, 1},
+      {"abc", 3, 3},
+      {"abc", 0, 0},
+      {"abc", 4, 3},
+      // The casts and u8 string literals are needed here so that we don't
+      // trigger linter errors about invalid ascii values.
+      {reinterpret_cast<const char*>(u8"abc\U0001F4A9"), 4, 4},
+      {reinterpret_cast<const char*>(u8"\U0001F4A9"), 1, 1},
+      {{1, static_cast<char>(-1)}, 5, absl::nullopt},
   };
   for (const auto& test_case : test_cases) {
-    EXPECT_EQ(CountUnicodeCharacters(test_case.value.data(),
-                                     test_case.value.length(), test_case.limit),
+    EXPECT_EQ(CountUnicodeCharacters(test_case.value, test_case.limit),
               test_case.count);
   }
 }
diff --git a/base/strings/utf_string_conversions.cc b/base/strings/utf_string_conversions.cc
index eca48f0..fa95913 100644
--- a/base/strings/utf_string_conversions.cc
+++ b/base/strings/utf_string_conversions.cc
@@ -12,6 +12,7 @@
 
 #include "base/strings/string_piece.h"
 #include "base/strings/string_util.h"
+#include "base/strings/utf_ostream_operators.h"
 #include "base/strings/utf_string_conversion_utils.h"
 #include "base/third_party/icu/icu_utf.h"
 #include "build/build_config.h"
@@ -66,9 +67,9 @@
 // Convenience typedef that checks whether the passed in type is integral (i.e.
 // bool, char, int or their extended versions) and is of the correct size.
 template <typename Char, size_t N>
-using EnableIfBitsAre = std::enable_if_t<std::is_integral<Char>::value &&
-                                             CHAR_BIT * sizeof(Char) == N,
-                                         bool>;
+using EnableIfBitsAre =
+    std::enable_if_t<std::is_integral_v<Char> && CHAR_BIT * sizeof(Char) == N,
+                     bool>;
 
 template <typename Char, EnableIfBitsAre<Char, 8> = true>
 void UnicodeAppendUnsafe(Char* out,
diff --git a/base/strings/utf_string_conversions.h b/base/strings/utf_string_conversions.h
index 29a65ed..c476dbe 100644
--- a/base/strings/utf_string_conversions.h
+++ b/base/strings/utf_string_conversions.h
@@ -67,27 +67,24 @@
 
 // The conversion functions in this file should not be used to convert string
 // literals. Instead, the corresponding prefixes (e.g. u"" for UTF16 or L"" for
-// Wide) should be used. Deleting the overloads here catches these cases at
-// compile time.
+// Wide) should be used. Catch those cases with overloads that assert at compile
+// time.
 template <size_t N>
-std::u16string WideToUTF16(const wchar_t (&str)[N]) {
+[[noreturn]] std::u16string WideToUTF16(const wchar_t (&str)[N]) {
   static_assert(AlwaysFalse<decltype(N)>,
-                "Error: Use the u\"...\" prefix instead.");
-  return std::u16string();
+                "Error: Use u\"...\" to create a std::u16string literal.");
 }
 
 template <size_t N>
-std::u16string UTF8ToUTF16(const char (&str)[N]) {
+[[noreturn]] std::u16string UTF8ToUTF16(const char (&str)[N]) {
   static_assert(AlwaysFalse<decltype(N)>,
-                "Error: Use the u\"...\" prefix instead.");
-  return std::u16string();
+                "Error: Use u\"...\" to create a std::u16string literal.");
 }
 
 template <size_t N>
-std::u16string ASCIIToUTF16(const char (&str)[N]) {
+[[noreturn]] std::u16string ASCIIToUTF16(const char (&str)[N]) {
   static_assert(AlwaysFalse<decltype(N)>,
-                "Error: Use the u\"...\" prefix instead.");
-  return std::u16string();
+                "Error: Use u\"...\" to create a std::u16string literal.");
 }
 
 // Mutable character arrays are usually only populated during runtime. Continue
diff --git a/base/supports_user_data.cc b/base/supports_user_data.cc
index f8a569d..d8d2fb0 100644
--- a/base/supports_user_data.cc
+++ b/base/supports_user_data.cc
@@ -5,7 +5,6 @@
 #include "base/supports_user_data.h"
 
 #include "base/feature_list.h"
-#include "base/features.h"
 #include "base/sequence_checker.h"
 
 namespace base {
@@ -14,10 +13,7 @@
   return nullptr;
 }
 
-SupportsUserData::SupportsUserData()
-    : user_data_(FeatureList::IsEnabled(features::kSupportsUserDataFlatHashMap)
-                     ? MapVariants(FlatDataMap())
-                     : MapVariants(DataMap())) {
+SupportsUserData::SupportsUserData() {
   // Harmless to construct on a different execution sequence to subsequent
   // usage.
   DETACH_FROM_SEQUENCE(sequence_checker_);
@@ -30,15 +26,11 @@
   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
   // Avoid null keys; they are too vulnerable to collision.
   DCHECK(key);
-  return absl::visit(
-      [key](const auto& map) -> Data* {
-        auto found = map.find(key);
-        if (found != map.end()) {
-          return found->second.get();
-        }
-        return nullptr;
-      },
-      user_data_);
+  auto found = user_data_.find(key);
+  if (found != user_data_.end()) {
+    return found->second.get();
+  }
+  return nullptr;
 }
 
 std::unique_ptr<SupportsUserData::Data> SupportsUserData::TakeUserData(
@@ -46,28 +38,25 @@
   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
   // Null keys are too vulnerable to collision.
   CHECK(key);
-  return absl::visit(
-      [key](auto& map) -> std::unique_ptr<SupportsUserData::Data> {
-        auto found = map.find(key);
-        if (found != map.end()) {
-          std::unique_ptr<SupportsUserData::Data> deowned;
-          deowned.swap(found->second);
-          map.erase(key);
-          return deowned;
-        }
-        return nullptr;
-      },
-      user_data_);
+  auto found = user_data_.find(key);
+  if (found != user_data_.end()) {
+    std::unique_ptr<SupportsUserData::Data> deowned;
+    deowned.swap(found->second);
+    user_data_.erase(key);
+    return deowned;
+  }
+  return nullptr;
 }
 
 void SupportsUserData::SetUserData(const void* key,
                                    std::unique_ptr<Data> data) {
   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+  CHECK(!in_destructor_) << "Calling SetUserData() when SupportsUserData is "
+                            "being destroyed is not supported.";
   // Avoid null keys; they are too vulnerable to collision.
   DCHECK(key);
   if (data.get()) {
-    absl::visit([key, &data](auto& map) { map[key] = std::move(data); },
-                user_data_);
+    user_data_[key] = std::move(data);
   } else {
     RemoveUserData(key);
   }
@@ -75,26 +64,22 @@
 
 void SupportsUserData::RemoveUserData(const void* key) {
   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-  absl::visit(
-      [key](auto& map) {
-        auto it = map.find(key);
-        if (it != map.end()) {
-          // Remove the entry from the map before deleting `owned_data` to avoid
-          // reentrancy issues when `owned_data` owns `this`. Otherwise:
-          //
-          // 1. `RemoveUserData()` calls `erase()`.
-          // 2. `erase()` deletes `owned_data`.
-          // 3. `owned_data` deletes `this`.
-          //
-          // At this point, `erase()` is still on the stack even though the
-          // backing map (owned by `this`) has already been destroyed, and it
-          // may simply crash, cause a use-after-free, or any other number of
-          // interesting things.
-          auto owned_data = std::move(it->second);
-          map.erase(it);
-        }
-      },
-      user_data_);
+  auto it = user_data_.find(key);
+  if (it != user_data_.end()) {
+    // Remove the entry from the map before deleting `owned_data` to avoid
+    // reentrancy issues when `owned_data` owns `this`. Otherwise:
+    //
+    // 1. `RemoveUserData()` calls `erase()`.
+    // 2. `erase()` deletes `owned_data`.
+    // 3. `owned_data` deletes `this`.
+    //
+    // At this point, `erase()` is still on the stack even though the
+    // backing map (owned by `this`) has already been destroyed, and it
+    // may simply crash, cause a use-after-free, or any other number of
+    // interesting things.
+    auto owned_data = std::move(it->second);
+    user_data_.erase(it);
+  }
 }
 
 void SupportsUserData::DetachFromSequence() {
@@ -102,24 +87,21 @@
 }
 
 void SupportsUserData::CloneDataFrom(const SupportsUserData& other) {
-  absl::visit(
-      [this](const auto& other_map) {
-        for (const auto& data_pair : other_map) {
-          auto cloned_data = data_pair.second->Clone();
-          if (cloned_data) {
-            SetUserData(data_pair.first, std::move(cloned_data));
-          }
-        }
-      },
-      other.user_data_);
+  for (const auto& data_pair : other.user_data_) {
+    auto cloned_data = data_pair.second->Clone();
+    if (cloned_data) {
+      SetUserData(data_pair.first, std::move(cloned_data));
+    }
+  }
 }
 
 SupportsUserData::~SupportsUserData() {
-  if (!absl::visit([](const auto& map) { return map.empty(); }, user_data_)) {
+  if (!user_data_.empty()) {
     DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
   }
-  MapVariants local_user_data;
-  user_data_.swap(local_user_data);
+  in_destructor_ = true;
+  absl::flat_hash_map<const void*, std::unique_ptr<Data>> user_data;
+  user_data_.swap(user_data);
   // Now this->user_data_ is empty, and any destructors called transitively from
   // the destruction of |local_user_data| will see it that way instead of
   // examining a being-destroyed object.
@@ -127,7 +109,7 @@
 
 void SupportsUserData::ClearAllUserData() {
   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-  absl::visit([](auto& map) { map.clear(); }, user_data_);
+  user_data_.clear();
 }
 
 }  // namespace base
diff --git a/base/supports_user_data.h b/base/supports_user_data.h
index cca7ed7..fc0d1e5 100644
--- a/base/supports_user_data.h
+++ b/base/supports_user_data.h
@@ -12,7 +12,6 @@
 #include "base/memory/scoped_refptr.h"
 #include "base/sequence_checker.h"
 #include "third_party/abseil-cpp/absl/container/flat_hash_map.h"
-#include "third_party/abseil-cpp/absl/types/variant.h"
 
 namespace base {
 
@@ -65,13 +64,9 @@
   void ClearAllUserData();
 
  private:
-  // Currently a variant for A/B testing purposes.
-  using DataMap = std::map<const void*, std::unique_ptr<Data>>;
-  using FlatDataMap = absl::flat_hash_map<const void*, std::unique_ptr<Data>>;
-  using MapVariants = absl::variant<DataMap, FlatDataMap>;
-
   // Externally-defined data accessible by key.
-  MapVariants user_data_;
+  absl::flat_hash_map<const void*, std::unique_ptr<Data>> user_data_;
+  bool in_destructor_ = false;
   // Guards usage of |user_data_|
   SEQUENCE_CHECKER(sequence_checker_);
 };
diff --git a/base/supports_user_data_unittest.cc b/base/supports_user_data_unittest.cc
index f3b93f8..d8be714 100644
--- a/base/supports_user_data_unittest.cc
+++ b/base/supports_user_data_unittest.cc
@@ -7,7 +7,8 @@
 #include "base/features.h"
 #include "base/memory/ptr_util.h"
 #include "base/memory/raw_ptr.h"
-#include "base/test/scoped_feature_list.h"
+#include "base/memory/raw_ref.h"
+#include "base/test/gtest_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -32,22 +33,9 @@
   raw_ptr<const void> key_;
 };
 
-class SupportsUserDataTest : public ::testing::TestWithParam<bool> {
- public:
-  SupportsUserDataTest() {
-    if (GetParam()) {
-      scoped_features_.InitWithFeatures(
-          {features::kSupportsUserDataFlatHashMap}, {});
-    } else {
-      scoped_features_.InitWithFeatures(
-          {}, {features::kSupportsUserDataFlatHashMap});
-    }
-  }
+using SupportsUserDataTest = ::testing::Test;
 
-  base::test::ScopedFeatureList scoped_features_;
-};
-
-TEST_P(SupportsUserDataTest, ClearWorksRecursively) {
+TEST_F(SupportsUserDataTest, ClearWorksRecursively) {
   char key = 0;  // Must outlive `supports_user_data`.
   TestSupportsUserData supports_user_data;
   supports_user_data.SetUserData(
@@ -57,7 +45,7 @@
 
 struct TestData : public SupportsUserData::Data {};
 
-TEST_P(SupportsUserDataTest, Movable) {
+TEST_F(SupportsUserDataTest, Movable) {
   TestSupportsUserData supports_user_data_1;
   char key1 = 0;
   supports_user_data_1.SetUserData(&key1, std::make_unique<TestData>());
@@ -73,7 +61,7 @@
   EXPECT_EQ(nullptr, supports_user_data_2.GetUserData(&key2));
 }
 
-TEST_P(SupportsUserDataTest, ClearAllUserData) {
+TEST_F(SupportsUserDataTest, ClearAllUserData) {
   TestSupportsUserData supports_user_data;
   char key1 = 0;
   supports_user_data.SetUserData(&key1, std::make_unique<TestData>());
@@ -89,7 +77,7 @@
   EXPECT_FALSE(supports_user_data.GetUserData(&key2));
 }
 
-TEST_P(SupportsUserDataTest, TakeUserData) {
+TEST_F(SupportsUserDataTest, TakeUserData) {
   TestSupportsUserData supports_user_data;
   char key1 = 0;
   supports_user_data.SetUserData(&key1, std::make_unique<TestData>());
@@ -120,16 +108,47 @@
 
 // Tests that removing a `SupportsUserData::Data` that owns a `SupportsUserData`
 // does not crash.
-TEST_P(SupportsUserDataTest, ReentrantRemoveUserData) {
+TEST_F(SupportsUserDataTest, ReentrantRemoveUserData) {
   DataOwnsSupportsUserData* data = new DataOwnsSupportsUserData;
   char key = 0;
   data->supports_user_data()->SetUserData(&key, WrapUnique(data));
   data->supports_user_data()->RemoveUserData(&key);
 }
 
-INSTANTIATE_TEST_SUITE_P(All,
-                         SupportsUserDataTest,
-                         testing::Values(false, true));
+TEST_F(SupportsUserDataTest, ReentrantSetUserDataDuringRemoval) {
+  static const char kKey = 0;
+
+  class ProblematicSet : public SupportsUserData::Data {
+   public:
+    explicit ProblematicSet(const void* const key,
+                            TestSupportsUserData& supports_user_data)
+        : key_(key), supports_user_data_(supports_user_data) {}
+
+    ~ProblematicSet() override {
+      supports_user_data_->SetUserData(
+          key_, std::make_unique<ProblematicSet>(key_, *supports_user_data_));
+    }
+
+   private:
+    const raw_ptr<const void> key_;
+    raw_ref<TestSupportsUserData> supports_user_data_;
+  };
+  {
+    absl::optional<TestSupportsUserData> supports_user_data;
+    supports_user_data.emplace();
+    // This awkward construction is required since death tests are typically
+    // implemented using `fork()`, so calling `SetUserData()` outside the
+    // `EXPECT_CHECK_DEATH()` macro will also crash the process that's trying to
+    // observe the crash.
+    EXPECT_CHECK_DEATH([&] {
+      supports_user_data->SetUserData(
+          &kKey, std::make_unique<ProblematicSet>(&kKey, *supports_user_data));
+      // Triggers the reentrant attempt to call `SetUserData()` during
+      // destruction.
+      supports_user_data.reset();
+    }());
+  }
+}
 
 }  // namespace
 }  // namespace base
diff --git a/base/synchronization/condition_variable_posix.cc b/base/synchronization/condition_variable_posix.cc
index 4a9d680..c497ea5 100644
--- a/base/synchronization/condition_variable_posix.cc
+++ b/base/synchronization/condition_variable_posix.cc
@@ -31,7 +31,7 @@
 // Use to evaluate if the hack is still needed. See https://crbug.com/517681.
 BASE_FEATURE(kSkipConditionVariableWakeupHack,
              "SkipConditionVariableWakeupHack",
-             base::FEATURE_DISABLED_BY_DEFAULT);
+             base::FEATURE_ENABLED_BY_DEFAULT);
 std::atomic_bool g_skip_wakeup_hack = false;
 #endif
 }  // namespace
diff --git a/base/synchronization/waitable_event.h b/base/synchronization/waitable_event.h
index 0ba6519..744aa9d 100644
--- a/base/synchronization/waitable_event.h
+++ b/base/synchronization/waitable_event.h
@@ -19,8 +19,8 @@
 #include <list>
 #include <memory>
 
+#include "base/apple/scoped_mach_port.h"
 #include "base/functional/callback_forward.h"
-#include "base/mac/scoped_mach_port.h"
 #include "base/memory/ref_counted.h"
 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
 #include <list>
@@ -203,7 +203,7 @@
     friend class RefCountedThreadSafe<ReceiveRight>;
     ~ReceiveRight();
 
-    mac::ScopedMachReceiveRight right_;
+    apple::ScopedMachReceiveRight right_;
   };
 
   const ResetPolicy policy_;
@@ -214,7 +214,7 @@
   // The send right used to signal the event. This can be disposed of with
   // the event, unlike the receive right, since a deleted event cannot be
   // signaled.
-  mac::ScopedMachSendRight send_right_;
+  apple::ScopedMachSendRight send_right_;
 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
   // On Windows, you must not close a HANDLE which is currently being waited on.
   // The MSDN documentation says that the resulting behaviour is 'undefined'.
diff --git a/base/synchronization/waitable_event_apple.cc b/base/synchronization/waitable_event_apple.cc
new file mode 100644
index 0000000..7d92456
--- /dev/null
+++ b/base/synchronization/waitable_event_apple.cc
@@ -0,0 +1,261 @@
+// Copyright 2017 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/waitable_event.h"
+
+#include <mach/mach.h>
+#include <sys/event.h>
+
+#include <limits>
+#include <memory>
+
+#include "base/apple/mach_logging.h"
+#include "base/files/scoped_file.h"
+#include "base/notreached.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/time/time.h"
+#include "base/time/time_override.h"
+#include "build/build_config.h"
+
+namespace base {
+
+WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
+                             InitialState initial_state)
+    : policy_(reset_policy) {
+  mach_port_options_t options{};
+  options.flags = MPO_INSERT_SEND_RIGHT;
+  options.mpl.mpl_qlimit = 1;
+
+  mach_port_t name;
+  kern_return_t kr =
+      mach_port_construct(mach_task_self(), &options, /*context=*/0, &name);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_port_construct";
+
+  receive_right_ = new ReceiveRight(name);
+  send_right_.reset(name);
+
+  if (initial_state == InitialState::SIGNALED) {
+    Signal();
+  }
+}
+
+WaitableEvent::~WaitableEvent() = default;
+
+void WaitableEvent::Reset() {
+  PeekPort(receive_right_->Name(), true);
+}
+
+void WaitableEvent::SignalImpl() {
+  mach_msg_empty_send_t msg{};
+  msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND);
+  msg.header.msgh_size = sizeof(&msg);
+  msg.header.msgh_remote_port = send_right_.get();
+  // If the event is already signaled, this will time out because the queue
+  // has a length of one.
+  kern_return_t kr =
+      mach_msg(&msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT, sizeof(msg),
+               /*rcv_size=*/0, /*rcv_name=*/MACH_PORT_NULL, /*timeout=*/0,
+               /*notify=*/MACH_PORT_NULL);
+  MACH_CHECK(kr == KERN_SUCCESS || kr == MACH_SEND_TIMED_OUT, kr) << "mach_msg";
+}
+
+bool WaitableEvent::IsSignaled() {
+  return PeekPort(receive_right_->Name(), policy_ == ResetPolicy::AUTOMATIC);
+}
+
+bool WaitableEvent::TimedWaitImpl(TimeDelta wait_delta) {
+  mach_msg_empty_rcv_t msg{};
+  msg.header.msgh_local_port = receive_right_->Name();
+
+  mach_msg_option_t options = MACH_RCV_MSG;
+
+  if (!wait_delta.is_max()) {
+    options |= MACH_RCV_TIMEOUT | MACH_RCV_INTERRUPT;
+  }
+
+  mach_msg_size_t rcv_size = sizeof(msg);
+  if (policy_ == ResetPolicy::MANUAL) {
+    // To avoid dequeuing the message, receive with a size of 0 and set
+    // MACH_RCV_LARGE to keep the message in the queue.
+    options |= MACH_RCV_LARGE;
+    rcv_size = 0;
+  }
+
+  // TimeTicks takes care of overflow but we special case is_max() nonetheless
+  // to avoid invoking TimeTicksNowIgnoringOverride() unnecessarily (same for
+  // the increment step of the for loop if the condition variable returns
+  // early). Ref: https://crbug.com/910524#c7
+  const TimeTicks end_time =
+      wait_delta.is_max() ? TimeTicks::Max()
+                          : subtle::TimeTicksNowIgnoringOverride() + wait_delta;
+  // Fake |kr| value to bootstrap the for loop.
+  kern_return_t kr = MACH_RCV_INTERRUPTED;
+  for (mach_msg_timeout_t timeout =
+           wait_delta.is_max() ? MACH_MSG_TIMEOUT_NONE
+                               : saturated_cast<mach_msg_timeout_t>(
+                                     wait_delta.InMillisecondsRoundedUp());
+       // If the thread is interrupted during mach_msg(), the system call will
+       // be restarted. However, the libsyscall wrapper does not adjust the
+       // timeout by the amount of time already waited. Using MACH_RCV_INTERRUPT
+       // will instead return from mach_msg(), so that the call can be retried
+       // with an adjusted timeout.
+       kr == MACH_RCV_INTERRUPTED;
+       timeout = end_time.is_max()
+                     ? MACH_MSG_TIMEOUT_NONE
+                     : std::max(mach_msg_timeout_t{0},
+                                saturated_cast<mach_msg_timeout_t>(
+                                    (end_time -
+                                     subtle::TimeTicksNowIgnoringOverride())
+                                        .InMillisecondsRoundedUp()))) {
+    kr = mach_msg(&msg.header, options, /*send_size=*/0, rcv_size,
+                  receive_right_->Name(), timeout, /*notify=*/MACH_PORT_NULL);
+  }
+
+  if (kr == KERN_SUCCESS) {
+    return true;
+  } else if (rcv_size == 0 && kr == MACH_RCV_TOO_LARGE) {
+    return true;
+  } else {
+    MACH_CHECK(kr == MACH_RCV_TIMED_OUT, kr) << "mach_msg";
+    return false;
+  }
+}
+
+// static
+size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables, size_t count) {
+  DCHECK(count) << "Cannot wait on no events";
+  internal::ScopedBlockingCallWithBaseSyncPrimitives scoped_blocking_call(
+      FROM_HERE, BlockingType::MAY_BLOCK);
+  // On macOS 10.11+, using Mach port sets may cause system instability, per
+  // https://crbug.com/756102. On macOS 10.12+, a kqueue can be used
+  // instead to work around that.
+  enum WaitManyPrimitive {
+    KQUEUE,
+    PORT_SET,
+  };
+#if BUILDFLAG(IS_IOS)
+  const WaitManyPrimitive kPrimitive = PORT_SET;
+#else
+  const WaitManyPrimitive kPrimitive = KQUEUE;
+#endif
+  if (kPrimitive == KQUEUE) {
+    std::vector<kevent64_s> events(count);
+    for (size_t i = 0; i < count; ++i) {
+      EV_SET64(&events[i], raw_waitables[i]->receive_right_->Name(),
+               EVFILT_MACHPORT, EV_ADD, 0, 0, i, 0, 0);
+    }
+
+    std::vector<kevent64_s> out_events(count);
+
+    ScopedFD wait_many(kqueue());
+    PCHECK(wait_many.is_valid()) << "kqueue";
+
+    const int count_int = checked_cast<int>(count);
+    int rv = HANDLE_EINTR(kevent64(wait_many.get(), events.data(), count_int,
+                                   out_events.data(), count_int, /*flags=*/0,
+                                   /*timeout=*/nullptr));
+    PCHECK(rv > 0) << "kevent64";
+
+    size_t triggered = std::numeric_limits<size_t>::max();
+    for (size_t i = 0; i < static_cast<size_t>(rv); ++i) {
+      // WaitMany should return the lowest index in |raw_waitables| that was
+      // triggered.
+      size_t index = static_cast<size_t>(out_events[i].udata);
+      triggered = std::min(triggered, index);
+    }
+
+    if (raw_waitables[triggered]->policy_ == ResetPolicy::AUTOMATIC) {
+      // The message needs to be dequeued to reset the event.
+      PeekPort(raw_waitables[triggered]->receive_right_->Name(),
+               /*dequeue=*/true);
+    }
+
+    return triggered;
+  } else {
+    DCHECK_EQ(kPrimitive, PORT_SET);
+
+    kern_return_t kr;
+
+    apple::ScopedMachPortSet port_set;
+    {
+      mach_port_t name;
+      kr =
+          mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &name);
+      MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_port_allocate";
+      port_set.reset(name);
+    }
+
+    for (size_t i = 0; i < count; ++i) {
+      kr = mach_port_insert_member(mach_task_self(),
+                                   raw_waitables[i]->receive_right_->Name(),
+                                   port_set.get());
+      MACH_CHECK(kr == KERN_SUCCESS, kr) << "index " << i;
+    }
+
+    mach_msg_empty_rcv_t msg{};
+    // Wait on the port set. Only specify space enough for the header, to
+    // identify which port in the set is signaled. Otherwise, receiving from the
+    // port set may dequeue a message for a manual-reset event object, which
+    // would cause it to be reset.
+    kr = mach_msg(&msg.header,
+                  MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY,
+                  /*send_size=*/0, sizeof(msg.header), port_set.get(),
+                  /*timeout=*/0, /*notify=*/MACH_PORT_NULL);
+    MACH_CHECK(kr == MACH_RCV_TOO_LARGE, kr) << "mach_msg";
+
+    for (size_t i = 0; i < count; ++i) {
+      WaitableEvent* event = raw_waitables[i];
+      if (msg.header.msgh_local_port == event->receive_right_->Name()) {
+        if (event->policy_ == ResetPolicy::AUTOMATIC) {
+          // The message needs to be dequeued to reset the event.
+          PeekPort(msg.header.msgh_local_port, true);
+        }
+        return i;
+      }
+    }
+
+    NOTREACHED();
+    return 0;
+  }
+}
+
+// static
+bool WaitableEvent::PeekPort(mach_port_t port, bool dequeue) {
+  if (dequeue) {
+    mach_msg_empty_rcv_t msg{};
+    msg.header.msgh_local_port = port;
+    kern_return_t kr =
+        mach_msg(&msg.header, MACH_RCV_MSG | MACH_RCV_TIMEOUT, /*send_size=*/0,
+                 sizeof(msg), port, /*timeout=*/0, /*notify=*/MACH_PORT_NULL);
+    if (kr == KERN_SUCCESS) {
+      return true;
+    } else {
+      MACH_CHECK(kr == MACH_RCV_TIMED_OUT, kr) << "mach_msg";
+      return false;
+    }
+  } else {
+    mach_port_seqno_t seqno = 0;
+    mach_msg_size_t size;
+    mach_msg_id_t id;
+    mach_msg_trailer_t trailer;
+    mach_msg_type_number_t trailer_size = sizeof(trailer);
+    kern_return_t kr = mach_port_peek(
+        mach_task_self(), port, MACH_RCV_TRAILER_TYPE(MACH_RCV_TRAILER_NULL),
+        &seqno, &size, &id, reinterpret_cast<mach_msg_trailer_info_t>(&trailer),
+        &trailer_size);
+    if (kr == KERN_SUCCESS) {
+      return true;
+    } else {
+      MACH_CHECK(kr == KERN_FAILURE, kr) << "mach_port_peek";
+      return false;
+    }
+  }
+}
+
+WaitableEvent::ReceiveRight::ReceiveRight(mach_port_t name) : right_(name) {}
+
+WaitableEvent::ReceiveRight::~ReceiveRight() = default;
+
+}  // namespace base
diff --git a/base/synchronization/waitable_event_mac.cc b/base/synchronization/waitable_event_mac.cc
deleted file mode 100644
index 4000796..0000000
--- a/base/synchronization/waitable_event_mac.cc
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/synchronization/waitable_event.h"
-
-#include <mach/mach.h>
-#include <sys/event.h>
-
-#include <limits>
-#include <memory>
-
-#include "base/files/scoped_file.h"
-#include "base/mac/mach_logging.h"
-#include "base/notreached.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/threading/scoped_blocking_call.h"
-#include "base/threading/thread_restrictions.h"
-#include "base/time/time.h"
-#include "base/time/time_override.h"
-#include "build/build_config.h"
-#include "third_party/abseil-cpp/absl/types/optional.h"
-
-namespace base {
-
-WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
-                             InitialState initial_state)
-    : policy_(reset_policy) {
-  mach_port_options_t options{};
-  options.flags = MPO_INSERT_SEND_RIGHT;
-  options.mpl.mpl_qlimit = 1;
-
-  mach_port_t name;
-  kern_return_t kr = mach_port_construct(mach_task_self(), &options, 0, &name);
-  MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_port_construct";
-
-  receive_right_ = new ReceiveRight(name);
-  send_right_.reset(name);
-
-  if (initial_state == InitialState::SIGNALED)
-    Signal();
-}
-
-WaitableEvent::~WaitableEvent() = default;
-
-void WaitableEvent::Reset() {
-  PeekPort(receive_right_->Name(), true);
-}
-
-void WaitableEvent::SignalImpl() {
-  mach_msg_empty_send_t msg{};
-  msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND);
-  msg.header.msgh_size = sizeof(&msg);
-  msg.header.msgh_remote_port = send_right_.get();
-  // If the event is already signaled, this will time out because the queue
-  // has a length of one.
-  kern_return_t kr =
-      mach_msg(&msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT, sizeof(msg), 0,
-               MACH_PORT_NULL, 0, MACH_PORT_NULL);
-  MACH_CHECK(kr == KERN_SUCCESS || kr == MACH_SEND_TIMED_OUT, kr) << "mach_msg";
-}
-
-bool WaitableEvent::IsSignaled() {
-  return PeekPort(receive_right_->Name(), policy_ == ResetPolicy::AUTOMATIC);
-}
-
-bool WaitableEvent::TimedWaitImpl(TimeDelta wait_delta) {
-  mach_msg_empty_rcv_t msg{};
-  msg.header.msgh_local_port = receive_right_->Name();
-
-  mach_msg_option_t options = MACH_RCV_MSG;
-
-  if (!wait_delta.is_max())
-    options |= MACH_RCV_TIMEOUT | MACH_RCV_INTERRUPT;
-
-  mach_msg_size_t rcv_size = sizeof(msg);
-  if (policy_ == ResetPolicy::MANUAL) {
-    // To avoid dequeing the message, receive with a size of 0 and set
-    // MACH_RCV_LARGE to keep the message in the queue.
-    options |= MACH_RCV_LARGE;
-    rcv_size = 0;
-  }
-
-  // TimeTicks takes care of overflow but we special case is_max() nonetheless
-  // to avoid invoking TimeTicksNowIgnoringOverride() unnecessarily (same for
-  // the increment step of the for loop if the condition variable returns
-  // early). Ref: https://crbug.com/910524#c7
-  const TimeTicks end_time =
-      wait_delta.is_max() ? TimeTicks::Max()
-                          : subtle::TimeTicksNowIgnoringOverride() + wait_delta;
-  // Fake |kr| value to boostrap the for loop.
-  kern_return_t kr = MACH_RCV_INTERRUPTED;
-  for (mach_msg_timeout_t timeout =
-           wait_delta.is_max() ? MACH_MSG_TIMEOUT_NONE
-                               : saturated_cast<mach_msg_timeout_t>(
-                                     wait_delta.InMillisecondsRoundedUp());
-       // If the thread is interrupted during mach_msg(), the system call will
-       // be restarted. However, the libsyscall wrapper does not adjust the
-       // timeout by the amount of time already waited. Using MACH_RCV_INTERRUPT
-       // will instead return from mach_msg(), so that the call can be retried
-       // with an adjusted timeout.
-       kr == MACH_RCV_INTERRUPTED;
-       timeout = end_time.is_max()
-                     ? MACH_MSG_TIMEOUT_NONE
-                     : std::max(mach_msg_timeout_t{0},
-                                saturated_cast<mach_msg_timeout_t>(
-                                    (end_time -
-                                     subtle::TimeTicksNowIgnoringOverride())
-                                        .InMillisecondsRoundedUp()))) {
-    kr = mach_msg(&msg.header, options, 0, rcv_size, receive_right_->Name(),
-                  timeout, MACH_PORT_NULL);
-  }
-
-  if (kr == KERN_SUCCESS) {
-    return true;
-  } else if (rcv_size == 0 && kr == MACH_RCV_TOO_LARGE) {
-    return true;
-  } else {
-    MACH_CHECK(kr == MACH_RCV_TIMED_OUT, kr) << "mach_msg";
-    return false;
-  }
-}
-
-// static
-size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables, size_t count) {
-  DCHECK(count) << "Cannot wait on no events";
-  internal::ScopedBlockingCallWithBaseSyncPrimitives scoped_blocking_call(
-      FROM_HERE, BlockingType::MAY_BLOCK);
-  // On macOS 10.11+, using Mach port sets may cause system instability, per
-  // https://crbug.com/756102. On macOS 10.12+, a kqueue can be used
-  // instead to work around that.
-  enum WaitManyPrimitive {
-    KQUEUE,
-    PORT_SET,
-  };
-#if BUILDFLAG(IS_IOS)
-  const WaitManyPrimitive kPrimitive = PORT_SET;
-#else
-  const WaitManyPrimitive kPrimitive = KQUEUE;
-#endif
-  if (kPrimitive == KQUEUE) {
-    std::vector<kevent64_s> events(count);
-    for (size_t i = 0; i < count; ++i) {
-      EV_SET64(&events[i], raw_waitables[i]->receive_right_->Name(),
-               EVFILT_MACHPORT, EV_ADD, 0, 0, i, 0, 0);
-    }
-
-    std::vector<kevent64_s> out_events(count);
-
-    ScopedFD wait_many(kqueue());
-    PCHECK(wait_many.is_valid()) << "kqueue";
-
-    const int count_int = checked_cast<int>(count);
-    int rv = HANDLE_EINTR(kevent64(wait_many.get(), events.data(), count_int,
-                                   out_events.data(), count_int, 0, nullptr));
-    PCHECK(rv > 0) << "kevent64";
-
-    size_t triggered = std::numeric_limits<size_t>::max();
-    for (size_t i = 0; i < static_cast<size_t>(rv); ++i) {
-      // WaitMany should return the lowest index in |raw_waitables| that was
-      // triggered.
-      size_t index = static_cast<size_t>(out_events[i].udata);
-      triggered = std::min(triggered, index);
-    }
-
-    if (raw_waitables[triggered]->policy_ == ResetPolicy::AUTOMATIC) {
-      // The message needs to be dequeued to reset the event.
-      PeekPort(raw_waitables[triggered]->receive_right_->Name(), true);
-    }
-
-    return triggered;
-  } else {
-    DCHECK_EQ(kPrimitive, PORT_SET);
-
-    kern_return_t kr;
-
-    mac::ScopedMachPortSet port_set;
-    {
-      mach_port_t name;
-      kr =
-          mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &name);
-      MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_port_allocate";
-      port_set.reset(name);
-    }
-
-    for (size_t i = 0; i < count; ++i) {
-      kr = mach_port_insert_member(mach_task_self(),
-                                   raw_waitables[i]->receive_right_->Name(),
-                                   port_set.get());
-      MACH_CHECK(kr == KERN_SUCCESS, kr) << "index " << i;
-    }
-
-    mach_msg_empty_rcv_t msg{};
-    // Wait on the port set. Only specify space enough for the header, to
-    // identify which port in the set is signaled. Otherwise, receiving from the
-    // port set may dequeue a message for a manual-reset event object, which
-    // would cause it to be reset.
-    kr = mach_msg(&msg.header,
-                  MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY, 0,
-                  sizeof(msg.header), port_set.get(), 0, MACH_PORT_NULL);
-    MACH_CHECK(kr == MACH_RCV_TOO_LARGE, kr) << "mach_msg";
-
-    for (size_t i = 0; i < count; ++i) {
-      WaitableEvent* event = raw_waitables[i];
-      if (msg.header.msgh_local_port == event->receive_right_->Name()) {
-        if (event->policy_ == ResetPolicy::AUTOMATIC) {
-          // The message needs to be dequeued to reset the event.
-          PeekPort(msg.header.msgh_local_port, true);
-        }
-        return i;
-      }
-    }
-
-    NOTREACHED();
-    return 0;
-  }
-}
-
-// static
-bool WaitableEvent::PeekPort(mach_port_t port, bool dequeue) {
-  if (dequeue) {
-    mach_msg_empty_rcv_t msg{};
-    msg.header.msgh_local_port = port;
-    kern_return_t kr = mach_msg(&msg.header, MACH_RCV_MSG | MACH_RCV_TIMEOUT, 0,
-                                sizeof(msg), port, 0, MACH_PORT_NULL);
-    if (kr == KERN_SUCCESS) {
-      return true;
-    } else {
-      MACH_CHECK(kr == MACH_RCV_TIMED_OUT, kr) << "mach_msg";
-      return false;
-    }
-  } else {
-    mach_port_seqno_t seqno = 0;
-    mach_msg_size_t size;
-    mach_msg_id_t id;
-    mach_msg_trailer_t trailer;
-    mach_msg_type_number_t trailer_size = sizeof(trailer);
-    kern_return_t kr = mach_port_peek(
-        mach_task_self(), port, MACH_RCV_TRAILER_TYPE(MACH_RCV_TRAILER_NULL),
-        &seqno, &size, &id, reinterpret_cast<mach_msg_trailer_info_t>(&trailer),
-        &trailer_size);
-    if (kr == KERN_SUCCESS) {
-      return true;
-    } else {
-      MACH_CHECK(kr == KERN_FAILURE, kr) << "mach_port_peek";
-      return false;
-    }
-  }
-}
-
-WaitableEvent::ReceiveRight::ReceiveRight(mach_port_t name) : right_(name) {}
-
-WaitableEvent::ReceiveRight::~ReceiveRight() = default;
-
-}  // namespace base
diff --git a/base/synchronization/waitable_event_perftest.cc b/base/synchronization/waitable_event_perftest.cc
index 39cf7bc..8820a6e 100644
--- a/base/synchronization/waitable_event_perftest.cc
+++ b/base/synchronization/waitable_event_perftest.cc
@@ -61,7 +61,7 @@
 
   bool TimedWaitUntil(const TimeTicks& end_time) {
     ElapsedTimer timer;
-    const bool signaled = event_.TimedWait(end_time - timer.Begin());
+    const bool signaled = event_.TimedWait(end_time - timer.start_time());
     total_wait_time_ += timer.Elapsed();
     ++wait_samples_;
     return signaled;
diff --git a/base/synchronization/waitable_event_watcher_mac.cc b/base/synchronization/waitable_event_watcher_mac.cc
index f9bc3aa..f5e16e8 100644
--- a/base/synchronization/waitable_event_watcher_mac.cc
+++ b/base/synchronization/waitable_event_watcher_mac.cc
@@ -4,9 +4,9 @@
 
 #include "base/synchronization/waitable_event_watcher.h"
 
+#include "base/apple/scoped_dispatch_object.h"
 #include "base/functional/bind.h"
 #include "base/functional/callback.h"
-#include "base/mac/scoped_dispatch_object.h"
 
 namespace base {
 
@@ -14,7 +14,7 @@
   // A TYPE_MACH_RECV dispatch source on |receive_right_|. When a receive event
   // is delivered, the message queue will be peeked and the bound |callback_|
   // may be run. This will be null if nothing is currently being watched.
-  ScopedDispatchObject<dispatch_source_t> dispatch_source;
+  apple::ScopedDispatchObject<dispatch_source_t> dispatch_source;
 };
 
 WaitableEventWatcher::WaitableEventWatcher()
@@ -30,7 +30,7 @@
     scoped_refptr<SequencedTaskRunner> task_runner) {
   DCHECK(task_runner->RunsTasksInCurrentSequence());
   DCHECK(!storage_->dispatch_source ||
-         dispatch_source_testcancel(storage_->dispatch_source));
+         dispatch_source_testcancel(storage_->dispatch_source.get()));
 
   // Keep a reference to the receive right, so that if the event is deleted
   // out from under the watcher, a signal can still be observed.
@@ -53,7 +53,7 @@
   dispatch_source_t source = storage_->dispatch_source.get();
   mach_port_t name = receive_right_->Name();
 
-  dispatch_source_set_event_handler(storage_->dispatch_source, ^{
+  dispatch_source_set_event_handler(storage_->dispatch_source.get(), ^{
     // For automatic-reset events, only fire the callback if this watcher
     // can claim/dequeue the event. For manual-reset events, all watchers can
     // be called back.
@@ -68,7 +68,7 @@
     task_runner->PostTask(
         FROM_HERE, BindOnce(&WaitableEventWatcher::InvokeCallback, weak_this));
   });
-  dispatch_resume(storage_->dispatch_source);
+  dispatch_resume(storage_->dispatch_source.get());
 
   return true;
 }
@@ -77,7 +77,7 @@
   callback_.Reset();
   receive_right_ = nullptr;
   if (storage_->dispatch_source) {
-    dispatch_source_cancel(storage_->dispatch_source);
+    dispatch_source_cancel(storage_->dispatch_source.get());
     storage_->dispatch_source.reset();
   }
 }
diff --git a/base/system/sys_info.cc b/base/system/sys_info.cc
index 4c6cbdf..82cebf2 100644
--- a/base/system/sys_info.cc
+++ b/base/system/sys_info.cc
@@ -84,42 +84,100 @@
   return IsLowEndDeviceImpl();
 }
 
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
 
 namespace {
 
-bool IsAndroid4GbOr6GbDevice() {
+enum class BucketizedSize {
+  k2GbOrLess,
+  k3Gb,
+  k4Gb,
+  k6Gb,
+  k8GbOrHigher,
+};
+
+BucketizedSize GetSystemRamBucketizedSize() {
+  int physical_memory = base::SysInfo::AmountOfPhysicalMemoryMB();
+
   // Because of Android carveouts, AmountOfPhysicalMemory() returns smaller
-  // than the actual memory size, So we will use a small lowerbound than 4GB
-  // to discriminate real 4GB devices from lower memory ones.
-  constexpr int kLowerBoundMB = 3.2 * 1024;
-  constexpr int kUpperBoundMB = 6 * 1024;
-  static bool is_4gb_or_6g_device =
-      kLowerBoundMB <= base::SysInfo::AmountOfPhysicalMemoryMB() &&
-      base::SysInfo::AmountOfPhysicalMemoryMB() <= kUpperBoundMB;
-  return is_4gb_or_6g_device;
+  // than the actual memory size, So we will use a small lowerbound than "X"GB
+  // to discriminate real "X"GB devices from lower memory ones.
+  // Addendum: This logic should also work for ChromeOS.
+
+  constexpr int kUpperBound2GB = 2 * 1024;  // inclusive
+  if (physical_memory <= kUpperBound2GB) {
+    return BucketizedSize::k2GbOrLess;
+  }
+
+  constexpr int kLowerBound3GB = kUpperBound2GB;  // exclusive
+  constexpr int kUpperBound3GB = 3.2 * 1024;      // inclusive
+  if (kLowerBound3GB < physical_memory && physical_memory <= kUpperBound3GB) {
+    return BucketizedSize::k3Gb;
+  }
+
+  constexpr int kLowerBound4GB = kUpperBound3GB;  // exclusive
+  constexpr int kUpperBound4GB = 4 * 1024;        // inclusive
+  if (kLowerBound4GB < physical_memory && physical_memory <= kUpperBound4GB) {
+    return BucketizedSize::k4Gb;
+  }
+
+  constexpr int kLowerBound6GB = kUpperBound4GB;  // exclusive
+  constexpr int kUpperBound6GB = 6.5 * 1024 - 1;  // inclusive
+  if (kLowerBound6GB < physical_memory && physical_memory <= kUpperBound6GB) {
+    return BucketizedSize::k6Gb;
+  }
+
+  return BucketizedSize::k8GbOrHigher;
+}
+
+BucketizedSize GetCachedSystemRamBucketizedSize() {
+  static BucketizedSize s_size = GetSystemRamBucketizedSize();
+  return s_size;
 }
 
 bool IsPartialLowEndModeOnMidRangeDevicesEnabled() {
   // TODO(crbug.com/1434873): make the feature not enable on 32-bit devices
   // before launching or going to high Stable %.
-  return IsAndroid4GbOr6GbDevice() &&
+  return SysInfo::Is4GbOr6GbDevice() &&
          base::FeatureList::IsEnabled(
              features::kPartialLowEndModeOnMidRangeDevices);
 }
 
+bool IsPartialLowEndModeOn3GbDevicesEnabled() {
+  return SysInfo::Is3GbDevice() &&
+         base::FeatureList::IsEnabled(features::kPartialLowEndModeOn3GbDevices);
+}
+
 }  // namespace
 
-#endif  // BUILDFLAG(IS_ANDROID)
+bool SysInfo::Is3GbDevice() {
+  return GetCachedSystemRamBucketizedSize() == BucketizedSize::k3Gb;
+}
+
+bool SysInfo::Is4GbDevice() {
+  return GetCachedSystemRamBucketizedSize() == BucketizedSize::k4Gb;
+}
+
+bool SysInfo::Is4GbOr6GbDevice() {
+  return GetCachedSystemRamBucketizedSize() == BucketizedSize::k4Gb ||
+         GetCachedSystemRamBucketizedSize() == BucketizedSize::k6Gb;
+}
+
+bool SysInfo::Is6GbDevice() {
+  return GetCachedSystemRamBucketizedSize() == BucketizedSize::k6Gb;
+}
+
+#endif  // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
 
 // TODO(crbug.com/1434873): This method is for chromium native code.
 // We need to update the java-side code, i.e.
 // base/android/java/src/org/chromium/base/SysUtils.java,
 // and to make the selected components in java to see this feature.
 bool SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled() {
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
   return base::SysInfo::IsLowEndDevice() ||
-         IsPartialLowEndModeOnMidRangeDevicesEnabled();
+         IsPartialLowEndModeOnMidRangeDevicesEnabled() ||
+         IsPartialLowEndModeOn3GbDevicesEnabled();
 #else
   return base::SysInfo::IsLowEndDevice();
 #endif
@@ -127,9 +185,10 @@
 
 bool SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled(
     const FeatureParam<bool>& param_for_exclusion) {
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
   return base::SysInfo::IsLowEndDevice() ||
-         (IsPartialLowEndModeOnMidRangeDevicesEnabled() &&
+         ((IsPartialLowEndModeOnMidRangeDevicesEnabled() ||
+           IsPartialLowEndModeOn3GbDevicesEnabled()) &&
           !param_for_exclusion.Get());
 #else
   return base::SysInfo::IsLowEndDevice();
diff --git a/base/system/sys_info.h b/base/system/sys_info.h
index b8295c7..26944d4 100644
--- a/base/system/sys_info.h
+++ b/base/system/sys_info.h
@@ -10,6 +10,7 @@
 
 #include <map>
 #include <string>
+#include <string_view>
 
 #include "base/base_export.h"
 #include "base/functional/callback_forward.h"
@@ -56,6 +57,9 @@
   // system, but could instead be the number of physical cores when
   // SetCpuSecurityMitigationsEnabled() has been invoked to indicate that CPU
   // security mitigations are enabled on Mac.
+  // On some platforms this may cache the resulting value in its implementation,
+  // e.g. on Linux/ChromeOS where this function cannot run in a sandbox and so
+  // a cached value must be returned.
   static int NumberOfProcessors();
 
   // Returns the number of the most efficient logical processors for the current
@@ -113,17 +117,54 @@
   // Returns a descriptive string for the current machine model or an empty
   // string if the machine model is unknown or an error occurred.
   // e.g. "MacPro1,1" on Mac, "iPhone9,3" on iOS or "Nexus 5" on Android. Only
-  // implemented on OS X, iOS, Android, Chrome OS and Windows. This returns an
+  // implemented on macOS, iOS, Android, Chrome OS and Windows. This returns an
   // empty string on other platforms.
+  //
+  // For macOS, a useful reference of the resulting strings returned by this
+  // function and their corresponding hardware can be found at
+  // https://everymac.com/systems/by_capability/mac-specs-by-machine-model-machine-id.html
   static std::string HardwareModelName();
 
+#if BUILDFLAG(IS_MAC)
+  struct HardwareModelNameSplit {
+    std::string category;
+    int model = 0;
+    int variant = 0;
+  };
+  // Hardware model names on the Mac are of the shape "Mac𝓍,𝓎" where the
+  // prefix is the general category, the 𝓍 is the model, and the 𝓎 is the
+  // variant. This function takes the hardware model name as returned by
+  // HardwareModelName() above, and returns it split into its constituent parts.
+  // Returns nullopt if the value cannot be parsed.
+  //
+  // /!\ WARNING
+  //
+  // This is NOT A USEFUL FUNCTION and SHOULD NOT BE USED. While the `model`
+  // value does inform as to what generation of hardware it is within the
+  // `category`, this is not useful in determining the capabilities of the
+  // hardware. Instead of using the `model` value, check the actual capabilities
+  // of the hardware to verify what it can do rather than relying on a hardware
+  // model name. In addition, while the `category` value used to have meaning
+  // and could be used to determine the type of hardware (e.g. desktop vs
+  // laptop), in 2022 Apple started using the generic category of "Mac", thus
+  // removing its usefulness when used alone. While the entire model string as
+  // returned by HardwareModelName() above can be useful for identifying a
+  // specific piece of equipment, splitting apart it is not useful.
+  //
+  // Do not add any further callers! When the aforementioned 2022-era hardware
+  // is the minimum requirement for Chromium, remove this function and adjust
+  // all callers appropriately.
+  static absl::optional<HardwareModelNameSplit> SplitHardwareModelNameDoNotUse(
+      std::string_view name);
+#endif
+
   struct HardwareInfo {
     std::string manufacturer;
     std::string model;
   };
   // Returns via |callback| a struct containing descriptive UTF-8 strings for
   // the current machine manufacturer and model, or empty strings if the
-  // information is unknown or an error occurred. Implemented on Windows, OS X,
+  // information is unknown or an error occurred. Implemented on Windows, macOS,
   // iOS, Linux, Chrome OS and Android.
   static void GetHardwareInfo(base::OnceCallback<void(HardwareInfo)> callback);
 
@@ -135,9 +176,10 @@
 
   // Retrieves detailed numeric values for the OS version.
   // DON'T USE THIS ON THE MAC OR WINDOWS to determine the current OS release
-  // for OS version-specific feature checks and workarounds. If you must use
-  // an OS version check instead of a feature check, use the base::mac::IsOS*
-  // family from base/mac/mac_util.h, or base::win::GetVersion from
+  // for OS version-specific feature checks and workarounds. If you must use an
+  // OS version check instead of a feature check, use
+  // base::mac::MacOSVersion()/MacOSMajorVersion() family from
+  // base/mac/mac_util.h, or base::win::GetVersion() from
   // base/win/windows_version.h.
   static void OperatingSystemVersionNumbers(int32_t* major_version,
                                             int32_t* minor_version,
@@ -205,7 +247,8 @@
   static std::string KernelVersion();
 
   // Crashes if running on Chrome OS non-test image. Use only for really
-  // sensitive and risky use cases.
+  // sensitive and risky use cases. Only works while running in verified mode,
+  // this check an easily be bypassed in dev mode.
   static void CrashIfChromeOSNonTestImage();
 #endif  // BUILDFLAG(IS_CHROMEOS)
 
@@ -254,6 +297,20 @@
   static bool IsLowEndDeviceOrPartialLowEndModeEnabled(
       const FeatureParam<bool>& param_for_exclusion);
 
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
+  // Returns true for Android devices whose memory is X GB, considering
+  // carveouts. The carveouts is memory reserved by the system, e.g.
+  // for drivers, MTE, etc. It's very common for querying app to see
+  // hundreds MBs less than actual physical memory installed on the system.
+  // Addendum: This logic should also work for ChromeOS.
+  static bool Is3GbDevice();
+  static bool Is4GbDevice();
+  static bool Is6GbDevice();
+  // Returns true for Android devices whose memory is 4GB or 6GB, considering
+  // carveouts.
+  static bool Is4GbOr6GbDevice();
+#endif  // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
+
 #if BUILDFLAG(IS_MAC)
   // Indicates that CPU security mitigations are enabled for the current
   // process. This is used to control the behavior of NumberOfProcessors(), see
diff --git a/base/system/sys_info_chromeos.cc b/base/system/sys_info_chromeos.cc
index da4f2ce..3f8c6b5 100644
--- a/base/system/sys_info_chromeos.cc
+++ b/base/system/sys_info_chromeos.cc
@@ -58,7 +58,7 @@
     if (parsed_from_env) {
       double us = 0;
       if (StringToDouble(lsb_release_time_str, &us))
-        lsb_release_time_ = Time::FromDoubleT(us);
+        lsb_release_time_ = Time::FromSecondsSinceUnixEpoch(us);
     } else {
       // If the LSB_RELEASE and LSB_RELEASE_TIME environment variables are not
       // set, fall back to a blocking read of the lsb_release file. This should
@@ -172,6 +172,9 @@
 // static
 std::string SysInfo::HardwareModelName() {
   std::string board = GetLsbReleaseBoard();
+  if (board == "unknown") {
+    return "";
+  }
   // GetLsbReleaseBoard() may be suffixed with a "-signed-" and other extra
   // info. Strip it.
   const size_t index = board.find("-signed-");
diff --git a/base/system/sys_info_internal.h b/base/system/sys_info_internal.h
index bbf70a3..0c23b6d 100644
--- a/base/system/sys_info_internal.h
+++ b/base/system/sys_info_internal.h
@@ -44,9 +44,6 @@
 absl::optional<int> NumberOfProcessorsWhenCpuSecurityMitigationEnabled();
 #endif
 
-// Exposed for testing.
-BASE_EXPORT int NumberOfProcessors();
-
 #if BUILDFLAG(IS_APPLE)
 absl::optional<int> GetSysctlIntValue(const char* key_name);
 #endif
diff --git a/base/system/sys_info_ios.mm b/base/system/sys_info_ios.mm
index ed91821..be5788d 100644
--- a/base/system/sys_info_ios.mm
+++ b/base/system/sys_info_ios.mm
@@ -11,10 +11,11 @@
 #include <sys/sysctl.h>
 #include <sys/types.h>
 
+#include "base/apple/scoped_mach_port.h"
 #include "base/check_op.h"
-#include "base/mac/scoped_mach_port.h"
 #include "base/notreached.h"
 #include "base/numerics/safe_conversions.h"
+#include "base/posix/sysctl.h"
 #include "base/process/process_metrics.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
@@ -23,23 +24,6 @@
 
 namespace base {
 
-namespace {
-
-// Queries sysctlbyname() for the given key and returns the value from the
-// system or the empty string on failure.
-std::string GetSysctlValue(const char* key_name) {
-  char value[256];
-  size_t len = std::size(value);
-  if (sysctlbyname(key_name, &value, &len, nullptr, 0) == 0) {
-    DCHECK_GE(len, 1u);
-    DCHECK_EQ('\0', value[len - 1]);
-    return std::string(value, len - 1);
-  }
-  return std::string();
-}
-
-}  // namespace
-
 // static
 std::string SysInfo::OperatingSystemName() {
   static dispatch_once_t get_system_name_once;
@@ -96,21 +80,16 @@
 
 // static
 std::string SysInfo::GetIOSBuildNumber() {
-  int mib[2] = {CTL_KERN, KERN_OSVERSION};
-  unsigned int namelen = sizeof(mib) / sizeof(mib[0]);
-  size_t buffer_size = 0;
-  sysctl(mib, namelen, nullptr, &buffer_size, nullptr, 0);
-  char build_number[buffer_size];
-  int result = sysctl(mib, namelen, build_number, &buffer_size, nullptr, 0);
-  DCHECK(result == 0);
-  return build_number;
+  absl::optional<std::string> build_number =
+      StringSysctl({CTL_KERN, KERN_OSVERSION});
+  return build_number.value();
 }
 
 // static
 uint64_t SysInfo::AmountOfPhysicalMemoryImpl() {
   struct host_basic_info hostinfo;
   mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
-  base::mac::ScopedMachSendRight host(mach_host_self());
+  base::apple::ScopedMachSendRight host(mach_host_self());
   int result = host_info(host.get(), HOST_BASIC_INFO,
                          reinterpret_cast<host_info_t>(&hostinfo), &count);
   if (result != KERN_SUCCESS) {
@@ -133,7 +112,7 @@
 
 // static
 std::string SysInfo::CPUModelName() {
-  return GetSysctlValue("machdep.cpu.brand_string");
+  return StringSysctlByName("machdep.cpu.brand_string").value_or(std::string{});
 }
 
 // static
@@ -159,7 +138,7 @@
 #else
   // Note: This uses "hw.machine" instead of "hw.model" like the Mac code,
   // because "hw.model" doesn't always return the right string on some devices.
-  return GetSysctlValue("hw.machine");
+  return StringSysctl({CTL_HW, HW_MACHINE}).value_or(std::string{});
 #endif
 }
 
diff --git a/base/system/sys_info_mac.mm b/base/system/sys_info_mac.mm
index 22094e6..86249ff 100644
--- a/base/system/sys_info_mac.mm
+++ b/base/system/sys_info_mac.mm
@@ -12,15 +12,19 @@
 #include <sys/sysctl.h>
 #include <sys/types.h>
 
+#include <string_view>
+
+#include "base/apple/scoped_mach_port.h"
 #include "base/check_op.h"
 #include "base/debug/stack_trace.h"
 #include "base/feature_list.h"
 #include "base/mac/mac_util.h"
-#include "base/mac/scoped_mach_port.h"
 #include "base/no_destructor.h"
 #include "base/notreached.h"
 #include "base/numerics/safe_conversions.h"
+#include "base/posix/sysctl.h"
 #include "base/process/process_metrics.h"
+#include "base/strings/string_number_conversions.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/lock.h"
@@ -38,19 +42,6 @@
 // mitigations state changes after a call to NumberOfProcessors().
 bool g_is_cpu_security_mitigation_enabled_read = false;
 
-// Queries sysctlbyname() for the given key and returns the value from the
-// system or the empty string on failure.
-std::string GetSysctlStringValue(const char* key_name) {
-  char value[256];
-  size_t len = sizeof(value);
-  if (sysctlbyname(key_name, &value, &len, nullptr, 0) != 0)
-    return std::string();
-  DCHECK_GE(len, 1u);
-  DCHECK_LE(len, sizeof(value));
-  DCHECK_EQ('\0', value[len - 1]);
-  return std::string(value, len - 1);
-}
-
 }  // namespace
 
 namespace internal {
@@ -113,7 +104,7 @@
 uint64_t SysInfo::AmountOfPhysicalMemoryImpl() {
   struct host_basic_info hostinfo;
   mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
-  base::mac::ScopedMachSendRight host(mach_host_self());
+  base::apple::ScopedMachSendRight host(mach_host_self());
   int result = host_info(host.get(), HOST_BASIC_INFO,
                          reinterpret_cast<host_info_t>(&hostinfo), &count);
   if (result != KERN_SUCCESS) {
@@ -136,12 +127,45 @@
 
 // static
 std::string SysInfo::CPUModelName() {
-  return GetSysctlStringValue("machdep.cpu.brand_string");
+  return StringSysctlByName("machdep.cpu.brand_string").value_or(std::string{});
 }
 
 // static
 std::string SysInfo::HardwareModelName() {
-  return GetSysctlStringValue("hw.model");
+  // The old "hw.machine" and "hw.model" sysctls are discouraged in favor of the
+  // new "hw.product" and "hw.target". See
+  // https://github.com/apple-oss-distributions/xnu/blob/aca3beaa3dfbd42498b42c5e5ce20a938e6554e5/bsd/sys/sysctl.h#L1168-L1169
+  // and
+  // https://github.com/apple-oss-distributions/xnu/blob/aca3beaa3dfbd42498b42c5e5ce20a938e6554e5/bsd/kern/kern_mib.c#L534-L536
+  if (base::mac::MacOSMajorVersion() < 11) {
+    return StringSysctl({CTL_HW, HW_MODEL}).value_or(std::string{});
+  } else {
+    return StringSysctl({CTL_HW, HW_PRODUCT}).value_or(std::string{});
+  }
+}
+
+// static
+absl::optional<SysInfo::HardwareModelNameSplit>
+SysInfo::SplitHardwareModelNameDoNotUse(std::string_view name) {
+  size_t number_loc = name.find_first_of("0123456789");
+  if (number_loc == std::string::npos) {
+    return absl::nullopt;
+  }
+  size_t comma_loc = name.find(',', number_loc);
+  if (comma_loc == std::string::npos) {
+    return absl::nullopt;
+  }
+
+  HardwareModelNameSplit split;
+  const auto* begin = name.begin();
+  if (!StringToInt(std::string_view(begin + number_loc, begin + comma_loc),
+                   &split.model) ||
+      !StringToInt(std::string_view(begin + comma_loc + 1, name.end()),
+                   &split.variant)) {
+    return absl::nullopt;
+  }
+  split.category = name.substr(0, number_loc);
+  return split;
 }
 
 // static
diff --git a/base/system/sys_info_mac_unittest.mm b/base/system/sys_info_mac_unittest.mm
new file mode 100644
index 0000000..68442a9
--- /dev/null
+++ b/base/system/sys_info_mac_unittest.mm
@@ -0,0 +1,41 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/system/sys_info.h"
+
+#include <memory>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+using SysInfoMacTest = testing::Test;
+
+TEST_F(SysInfoMacTest, SplitHardwareModelName) {
+  absl::optional<SysInfo::HardwareModelNameSplit> split_name =
+      SysInfo::SplitHardwareModelNameDoNotUse("");
+  EXPECT_EQ(absl::nullopt, split_name);
+
+  split_name = SysInfo::SplitHardwareModelNameDoNotUse("FooBar");
+  EXPECT_EQ(absl::nullopt, split_name);
+
+  split_name = SysInfo::SplitHardwareModelNameDoNotUse("BarFoo77");
+  EXPECT_EQ(absl::nullopt, split_name);
+
+  split_name = SysInfo::SplitHardwareModelNameDoNotUse("MacPro4,1");
+  EXPECT_EQ("MacPro", split_name.value().category);
+  EXPECT_EQ(4, split_name.value().model);
+  EXPECT_EQ(1, split_name.value().variant);
+
+  split_name = SysInfo::SplitHardwareModelNameDoNotUse("MacBookPro6,2");
+  EXPECT_EQ("MacBookPro", split_name.value().category);
+  EXPECT_EQ(6, split_name.value().model);
+  EXPECT_EQ(2, split_name.value().variant);
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/system/sys_info_openbsd.cc b/base/system/sys_info_openbsd.cc
index ad8310b..d7dfb5b 100644
--- a/base/system/sys_info_openbsd.cc
+++ b/base/system/sys_info_openbsd.cc
@@ -11,6 +11,7 @@
 #include <sys/sysctl.h>
 
 #include "base/notreached.h"
+#include "base/posix/sysctl.h"
 
 namespace {
 
@@ -64,14 +65,7 @@
 
 // static
 std::string SysInfo::CPUModelName() {
-  int mib[] = {CTL_HW, HW_MODEL};
-  char name[256];
-  size_t len = std::size(name);
-  if (sysctl(mib, std::size(mib), name, &len, NULL, 0) < 0) {
-    NOTREACHED();
-    return std::string();
-  }
-  return name;
+  return StringSysctl({CTL_HW, HW_MODEL}).value();
 }
 
 }  // namespace base
diff --git a/base/system/sys_info_posix.cc b/base/system/sys_info_posix.cc
index 4e3226c..4482b59 100644
--- a/base/system/sys_info_posix.cc
+++ b/base/system/sys_info_posix.cc
@@ -118,48 +118,61 @@
 namespace base {
 
 #if !BUILDFLAG(IS_OPENBSD)
+// static
 int SysInfo::NumberOfProcessors() {
 #if BUILDFLAG(IS_MAC)
   absl::optional<int> number_of_physical_cores =
       internal::NumberOfProcessorsWhenCpuSecurityMitigationEnabled();
-  if (number_of_physical_cores.has_value())
+  if (number_of_physical_cores.has_value()) {
     return number_of_physical_cores.value();
+  }
 #endif  // BUILDFLAG(IS_MAC)
 
-  // sysconf returns the number of "logical" (not "physical") processors on both
-  // Mac and Linux.  So we get the number of max available "logical" processors.
-  //
-  // Note that the number of "currently online" processors may be fewer than the
-  // returned value of NumberOfProcessors(). On some platforms, the kernel may
-  // make some processors offline intermittently, to save power when system
-  // loading is low.
-  //
-  // One common use case that needs to know the processor count is to create
-  // optimal number of threads for optimization. It should make plan according
-  // to the number of "max available" processors instead of "currently online"
-  // ones. The kernel should be smart enough to make all processors online when
-  // it has sufficient number of threads waiting to run.
-  long res = sysconf(_SC_NPROCESSORS_CONF);
-  if (res == -1) {
-    NOTREACHED();
-    return 1;
-  }
+  // This value is cached to avoid computing this value in the sandbox, which
+  // doesn't work on some platforms. The Mac-specific code above is not
+  // included because changing the value at runtime is the best way to unittest
+  // its behavior.
+  static int cached_num_cpus = []() {
+    // sysconf returns the number of "logical" (not "physical") processors on
+    // both Mac and Linux.  So we get the number of max available "logical"
+    // processors.
+    //
+    // Note that the number of "currently online" processors may be fewer than
+    // the returned value of NumberOfProcessors(). On some platforms, the kernel
+    // may make some processors offline intermittently, to save power when
+    // system loading is low.
+    //
+    // One common use case that needs to know the processor count is to create
+    // optimal number of threads for optimization. It should make plan according
+    // to the number of "max available" processors instead of "currently online"
+    // ones. The kernel should be smart enough to make all processors online
+    // when it has sufficient number of threads waiting to run.
+    long res = sysconf(_SC_NPROCESSORS_CONF);
+    if (res == -1) {
+      // `res` can be -1 if this function is invoked under the sandbox, which
+      // should never happen.
+      NOTREACHED();
+      return 1;
+    }
 
-  int num_cpus = static_cast<int>(res);
+    int num_cpus = static_cast<int>(res);
 
 #if BUILDFLAG(IS_LINUX)
-  // Restrict the CPU count based on the process's CPU affinity mask, if
-  // available.
-  cpu_set_t* cpu_set = CPU_ALLOC(num_cpus);
-  size_t cpu_set_size = CPU_ALLOC_SIZE(num_cpus);
-  int ret = sched_getaffinity(0, cpu_set_size, cpu_set);
-  if (ret == 0) {
-    num_cpus = CPU_COUNT_S(cpu_set_size, cpu_set);
-  }
-  CPU_FREE(cpu_set);
+    // Restrict the CPU count based on the process's CPU affinity mask, if
+    // available.
+    cpu_set_t* cpu_set = CPU_ALLOC(num_cpus);
+    size_t cpu_set_size = CPU_ALLOC_SIZE(num_cpus);
+    int ret = sched_getaffinity(0, cpu_set_size, cpu_set);
+    if (ret == 0) {
+      num_cpus = CPU_COUNT_S(cpu_set_size, cpu_set);
+    }
+    CPU_FREE(cpu_set);
 #endif  // BUILDFLAG(IS_LINUX)
 
-  return num_cpus;
+    return num_cpus;
+  }();
+
+  return cached_num_cpus;
 }
 #endif  // !BUILDFLAG(IS_OPENBSD)
 
diff --git a/base/system/sys_info_unittest.cc b/base/system/sys_info_unittest.cc
index 3d9e2f8..499d2a0 100644
--- a/base/system/sys_info_unittest.cc
+++ b/base/system/sys_info_unittest.cc
@@ -376,11 +376,11 @@
 TEST_F(SysInfoTest, GoogleChromeOSLsbReleaseTime) {
   const char kLsbRelease[] = "CHROMEOS_RELEASE_VERSION=1.2.3.4";
   // Use a fake time that can be safely displayed as a string.
-  const Time lsb_release_time(Time::FromDoubleT(12345.6));
+  const Time lsb_release_time(Time::FromSecondsSinceUnixEpoch(12345.6));
   test::ScopedChromeOSVersionInfo version(kLsbRelease, lsb_release_time);
   Time parsed_lsb_release_time = SysInfo::GetLsbReleaseTime();
-  EXPECT_DOUBLE_EQ(lsb_release_time.ToDoubleT(),
-                   parsed_lsb_release_time.ToDoubleT());
+  EXPECT_DOUBLE_EQ(lsb_release_time.InSecondsFSinceUnixEpoch(),
+                   parsed_lsb_release_time.InSecondsFSinceUnixEpoch());
 }
 
 TEST_F(SysInfoTest, IsRunningOnChromeOS) {
diff --git a/base/task/bind_post_task.h b/base/task/bind_post_task.h
index 77fafbb..cc3b8ad 100644
--- a/base/task/bind_post_task.h
+++ b/base/task/bind_post_task.h
@@ -68,7 +68,7 @@
     scoped_refptr<TaskRunner> task_runner,
     OnceCallback<ReturnType(Args...)> callback,
     const Location& location = FROM_HERE) {
-  static_assert(std::is_same<ReturnType, void>::value,
+  static_assert(std::is_same_v<ReturnType, void>,
                 "OnceCallback must have void return type in order to produce a "
                 "closure for PostTask(). Use base::IgnoreResult() to drop the "
                 "return value if desired.");
@@ -89,7 +89,7 @@
     scoped_refptr<TaskRunner> task_runner,
     RepeatingCallback<ReturnType(Args...)> callback,
     const Location& location = FROM_HERE) {
-  static_assert(std::is_same<ReturnType, void>::value,
+  static_assert(std::is_same_v<ReturnType, void>,
                 "RepeatingCallback must have void return type in order to "
                 "produce a closure for PostTask(). Use base::IgnoreResult() to "
                 "drop the return value if desired.");
diff --git a/base/task/bind_post_task_unittest.nc b/base/task/bind_post_task_unittest.nc
index ce74248..239a752 100644
--- a/base/task/bind_post_task_unittest.nc
+++ b/base/task/bind_post_task_unittest.nc
@@ -17,7 +17,7 @@
   return 5;
 }
 
-#if defined(NCTEST_ONCE_NON_VOID_RETURN_BIND_POST_TASK)  // [r"fatal error: static assertion failed due to requirement 'std::is_same<int, void>::value': OnceCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
+#if defined(NCTEST_ONCE_NON_VOID_RETURN_BIND_POST_TASK)  // [r"fatal error: static assertion failed due to requirement 'std::is_same_v<int, void>': OnceCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
 // OnceCallback with non-void return type.
 void WontCompile() {
   OnceCallback<int()> cb = BindOnce(&ReturnInt);
@@ -25,7 +25,7 @@
   std::move(post_cb).Run();
 }
 
-#elif defined(NCTEST_REPEATING_NON_VOID_RETURN_BIND_POST_TASK)  // [r"fatal error: static assertion failed due to requirement 'std::is_same<int, void>::value': RepeatingCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
+#elif defined(NCTEST_REPEATING_NON_VOID_RETURN_BIND_POST_TASK)  // [r"fatal error: static assertion failed due to requirement 'std::is_same_v<int, void>': RepeatingCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
 // RepeatingCallback with non-void return type.
 void WontCompile() {
   RepeatingCallback<int()> cb = BindRepeating(&ReturnInt);
@@ -33,7 +33,7 @@
   std::move(post_cb).Run();
 }
 
-#elif defined(NCTEST_ONCE_NON_VOID_RETURN_BIND_POST_TASK_TO_CURRENT_DEFAULT)  // [r"fatal error: static assertion failed due to requirement 'std::is_same<int, void>::value': OnceCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
+#elif defined(NCTEST_ONCE_NON_VOID_RETURN_BIND_POST_TASK_TO_CURRENT_DEFAULT)  // [r"fatal error: static assertion failed due to requirement 'std::is_same_v<int, void>': OnceCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
 // OnceCallback with non-void return type.
 void WontCompile() {
   OnceCallback<int()> cb = BindOnce(&ReturnInt);
@@ -41,7 +41,7 @@
   std::move(post_cb).Run();
 }
 
-#elif defined(NCTEST_REPEATING_NON_VOID_RETURN_BIND_POST_TASK_TO_CURRENT_DEFAULT)  // [r"fatal error: static assertion failed due to requirement 'std::is_same<int, void>::value': RepeatingCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
+#elif defined(NCTEST_REPEATING_NON_VOID_RETURN_BIND_POST_TASK_TO_CURRENT_DEFAULT)  // [r"fatal error: static assertion failed due to requirement 'std::is_same_v<int, void>': RepeatingCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
 // RepeatingCallback with non-void return type.
 void WontCompile() {
   RepeatingCallback<int()> cb = BindRepeating(&ReturnInt);
diff --git a/base/task/current_thread.h b/base/task/current_thread.h
index c7e1134..7d80fd4 100644
--- a/base/task/current_thread.h
+++ b/base/task/current_thread.h
@@ -215,7 +215,7 @@
 
 #if BUILDFLAG(IS_OZONE) && !BUILDFLAG(IS_FUCHSIA) && !BUILDFLAG(IS_WIN)
   static_assert(
-      std::is_base_of<WatchableIOMessagePumpPosix, MessagePumpForUI>::value,
+      std::is_base_of_v<WatchableIOMessagePumpPosix, MessagePumpForUI>,
       "CurrentThreadForUI::WatchFileDescriptor is supported only"
       "by MessagePumpLibevent and MessagePumpGlib implementations.");
   bool WatchFileDescriptor(int fd,
diff --git a/base/task/deferred_sequenced_task_runner.cc b/base/task/deferred_sequenced_task_runner.cc
index f712b61..734e80c 100644
--- a/base/task/deferred_sequenced_task_runner.cc
+++ b/base/task/deferred_sequenced_task_runner.cc
@@ -102,6 +102,11 @@
   StartImpl();
 }
 
+bool DeferredSequencedTaskRunner::Started() const {
+  AutoLock lock(lock_);
+  return started_;
+}
+
 DeferredSequencedTaskRunner::~DeferredSequencedTaskRunner() = default;
 
 void DeferredSequencedTaskRunner::QueueDeferredTask(const Location& from_here,
diff --git a/base/task/deferred_sequenced_task_runner.h b/base/task/deferred_sequenced_task_runner.h
index d1b4ac7..7805be7 100644
--- a/base/task/deferred_sequenced_task_runner.h
+++ b/base/task/deferred_sequenced_task_runner.h
@@ -56,6 +56,9 @@
   void StartWithTaskRunner(
       scoped_refptr<SequencedTaskRunner> target_task_runner);
 
+  // Returns true if task execution has been started.
+  bool Started() const;
+
  private:
   struct DeferredTask  {
     DeferredTask();
diff --git a/base/task/deferred_sequenced_task_runner_unittest.cc b/base/task/deferred_sequenced_task_runner_unittest.cc
index 9360c0d..8bef777 100644
--- a/base/task/deferred_sequenced_task_runner_unittest.cc
+++ b/base/task/deferred_sequenced_task_runner_unittest.cc
@@ -69,17 +69,22 @@
   PostExecuteTask(1);
   RunLoop().RunUntilIdle();
   EXPECT_THAT(executed_task_ids_, testing::ElementsAre());
+  EXPECT_FALSE(runner_->Started());
 }
 
 TEST_F(DeferredSequencedTaskRunnerTest, Start) {
+  EXPECT_FALSE(runner_->Started());
   StartRunner();
+  EXPECT_TRUE(runner_->Started());
   PostExecuteTask(1);
   RunLoop().RunUntilIdle();
   EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1));
 }
 
 TEST_F(DeferredSequencedTaskRunnerTest, StartWithMultipleElements) {
+  EXPECT_FALSE(runner_->Started());
   StartRunner();
+  EXPECT_TRUE(runner_->Started());
   for (int i = 1; i < 5; ++i)
     PostExecuteTask(i);
 
@@ -88,17 +93,22 @@
 }
 
 TEST_F(DeferredSequencedTaskRunnerTest, DeferredStart) {
+  EXPECT_FALSE(runner_->Started());
   PostExecuteTask(1);
   RunLoop().RunUntilIdle();
   EXPECT_THAT(executed_task_ids_, testing::ElementsAre());
+  EXPECT_FALSE(runner_->Started());
 
   StartRunner();
+  EXPECT_TRUE(runner_->Started());
   RunLoop().RunUntilIdle();
   EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1));
+  EXPECT_TRUE(runner_->Started());
 
   PostExecuteTask(2);
   RunLoop().RunUntilIdle();
   EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1, 2));
+  EXPECT_TRUE(runner_->Started());
 }
 
 TEST_F(DeferredSequencedTaskRunnerTest, DeferredStartWithMultipleElements) {
@@ -106,12 +116,15 @@
     PostExecuteTask(i);
   RunLoop().RunUntilIdle();
   EXPECT_THAT(executed_task_ids_, testing::ElementsAre());
+  EXPECT_FALSE(runner_->Started());
 
   StartRunner();
+  EXPECT_TRUE(runner_->Started());
   for (int i = 5; i < 9; ++i)
     PostExecuteTask(i);
   RunLoop().RunUntilIdle();
   EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1, 2, 3, 4, 5, 6, 7, 8));
+  EXPECT_TRUE(runner_->Started());
 }
 
 TEST_F(DeferredSequencedTaskRunnerTest, DeferredStartWithMultipleThreads) {
@@ -127,6 +140,9 @@
       thread2.task_runner()->PostTask(
           FROM_HERE, BindOnce(&DeferredSequencedTaskRunnerTest::PostExecuteTask,
                               Unretained(this), 2 * i + 1));
+      if (i <= 2) {
+        EXPECT_FALSE(runner_->Started());
+      }
       if (i == 2) {
         thread1.task_runner()->PostTask(
             FROM_HERE, BindOnce(&DeferredSequencedTaskRunnerTest::StartRunner,
@@ -136,6 +152,7 @@
   }
 
   RunLoop().RunUntilIdle();
+  EXPECT_TRUE(runner_->Started());
   EXPECT_THAT(executed_task_ids_,
       testing::WhenSorted(testing::ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
 }
@@ -160,6 +177,7 @@
       // task |2 * i + 1| is executed.
       PostExecuteTask(2 * i + 1);
     }
+    EXPECT_FALSE(runner_->Started());
     StartRunner();
   }
 
@@ -167,6 +185,7 @@
   // |2 * i + 1| is executed.
   EXPECT_THAT(executed_task_ids_,
               testing::ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9));
+  EXPECT_TRUE(runner_->Started());
 }
 
 void GetRunsTasksInCurrentSequence(bool* result,
@@ -191,6 +210,7 @@
                runner, run_loop.QuitClosure()));
   run_loop.Run();
   EXPECT_FALSE(runs_task_in_current_thread);
+  EXPECT_FALSE(runner->Started());
 }
 
 TEST_F(DeferredSequencedTaskRunnerTest, StartWithTaskRunner) {
@@ -205,7 +225,9 @@
                          std::move(quit_closure).Run();
                        },
                        &run_called, run_loop.QuitClosure()));
+  EXPECT_FALSE(runner->Started());
   runner->StartWithTaskRunner(SingleThreadTaskRunner::GetCurrentDefault());
+  EXPECT_TRUE(runner->Started());
   run_loop.Run();
   EXPECT_TRUE(run_called);
 }
diff --git a/base/task/delay_policy.h b/base/task/delay_policy.h
index ef979fc..e56d0af 100644
--- a/base/task/delay_policy.h
+++ b/base/task/delay_policy.h
@@ -5,6 +5,8 @@
 #ifndef BASE_TASK_DELAY_POLICY_H_
 #define BASE_TASK_DELAY_POLICY_H_
 
+#include "base/time/time.h"
+
 namespace base {
 namespace subtle {
 
@@ -26,6 +28,15 @@
   kPrecise,
 };
 
+inline DelayPolicy MaybeOverrideDelayPolicy(DelayPolicy delay_policy,
+                                            TimeDelta delay,
+                                            TimeDelta max_precise_delay) {
+  if (delay >= max_precise_delay && delay_policy == DelayPolicy::kPrecise) {
+    return DelayPolicy::kFlexibleNoSooner;
+  }
+  return delay_policy;
+}
+
 }  // namespace subtle
 }  // namespace base
 
diff --git a/base/task/sequence_manager/lazily_deallocated_deque.h b/base/task/sequence_manager/lazily_deallocated_deque.h
index 13a1fc7..3dc2574 100644
--- a/base/task/sequence_manager/lazily_deallocated_deque.h
+++ b/base/task/sequence_manager/lazily_deallocated_deque.h
@@ -319,7 +319,7 @@
     size_t capacity_;
     size_t front_index_;
     size_t back_index_;
-    raw_ptr<T> data_;
+    raw_ptr<T, AllowPtrArithmetic> data_;
     std::unique_ptr<Ring> next_;
   };
 
diff --git a/base/task/sequence_manager/sequence_manager.cc b/base/task/sequence_manager/sequence_manager.cc
index 6951ca3..8c7216b 100644
--- a/base/task/sequence_manager/sequence_manager.cc
+++ b/base/task/sequence_manager/sequence_manager.cc
@@ -96,8 +96,8 @@
 
 SequenceManager::PrioritySettings::~PrioritySettings() = default;
 
-SequenceManager::PrioritySettings::PrioritySettings(PrioritySettings&&) =
-    default;
+SequenceManager::PrioritySettings::PrioritySettings(
+    PrioritySettings&&) noexcept = default;
 
 SequenceManager::PrioritySettings& SequenceManager::PrioritySettings::operator=(
     PrioritySettings&&) = default;
diff --git a/base/task/sequence_manager/sequence_manager.h b/base/task/sequence_manager/sequence_manager.h
index 7d50609..85f86aa 100644
--- a/base/task/sequence_manager/sequence_manager.h
+++ b/base/task/sequence_manager/sequence_manager.h
@@ -93,7 +93,7 @@
 
     ~PrioritySettings();
 
-    PrioritySettings(PrioritySettings&&);
+    PrioritySettings(PrioritySettings&&) noexcept;
     PrioritySettings& operator=(PrioritySettings&&);
 
     TaskQueue::QueuePriority priority_count() const { return priority_count_; }
@@ -308,10 +308,6 @@
   // Removes an observer which reports task execution. Can only be called on the
   // same thread that `this` is running on.
   virtual void RemoveTaskObserver(TaskObserver* task_observer) = 0;
-
- protected:
-  virtual std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
-      const TaskQueue::Spec& spec) = 0;
 };
 
 class BASE_EXPORT SequenceManager::Settings::Builder {
diff --git a/base/task/sequence_manager/sequence_manager_impl.cc b/base/task/sequence_manager/sequence_manager_impl.cc
index ee43e23..9434d13 100644
--- a/base/task/sequence_manager/sequence_manager_impl.cc
+++ b/base/task/sequence_manager/sequence_manager_impl.cc
@@ -22,6 +22,7 @@
 #include "base/rand_util.h"
 #include "base/ranges/algorithm.h"
 #include "base/task/sequence_manager/enqueue_order.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
 #include "base/task/sequence_manager/task_time_observer.h"
 #include "base/task/sequence_manager/thread_controller_impl.h"
 #include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
@@ -299,6 +300,7 @@
   base::InitializeTaskLeeway();
   ApplyNoWakeUpsForCanceledTasks();
   TaskQueueImpl::InitializeFeatures();
+  MessagePump::InitializeFeatures();
   ThreadControllerWithMessagePumpImpl::InitializeFeatures();
 #if BUILDFLAG(IS_WIN)
   g_explicit_high_resolution_timer_win =
@@ -706,8 +708,7 @@
     SelectTaskOption option) const {
   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
 
-  if (auto priority =
-          main_thread_only().selector.GetHighestPendingPriority(option)) {
+  if (main_thread_only().selector.GetHighestPendingPriority(option)) {
     // If the selector has non-empty queues we trivially know there is immediate
     // work to be done. However we may want to yield to native work if it is
     // more important.
@@ -719,8 +720,7 @@
   // do this always.
   ReloadEmptyWorkQueues();
 
-  if (auto priority =
-          main_thread_only().selector.GetHighestPendingPriority(option)) {
+  if (main_thread_only().selector.GetHighestPendingPriority(option)) {
     return WakeUp{};
   }
 
@@ -1133,8 +1133,7 @@
 
 TaskQueue::Handle SequenceManagerImpl::CreateTaskQueue(
     const TaskQueue::Spec& spec) {
-  return TaskQueue::Handle(
-      std::make_unique<TaskQueue>(CreateTaskQueueImpl(spec), spec));
+  return TaskQueue::Handle(CreateTaskQueueImpl(spec));
 }
 
 std::string SequenceManagerImpl::DescribeAllPendingTasks() const {
diff --git a/base/task/sequence_manager/sequence_manager_impl.h b/base/task/sequence_manager/sequence_manager_impl.h
index 925dd5a..8b72740 100644
--- a/base/task/sequence_manager/sequence_manager_impl.h
+++ b/base/task/sequence_manager/sequence_manager_impl.h
@@ -409,7 +409,7 @@
   void ReloadEmptyWorkQueues() const;
 
   std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
-      const TaskQueue::Spec& spec) override;
+      const TaskQueue::Spec& spec);
 
   // Periodically reclaims memory by sweeping away canceled tasks and shrinking
   // buffers.
diff --git a/base/task/sequence_manager/sequence_manager_impl_unittest.cc b/base/task/sequence_manager/sequence_manager_impl_unittest.cc
index 0dd9a6f..6241b1c 100644
--- a/base/task/sequence_manager/sequence_manager_impl_unittest.cc
+++ b/base/task/sequence_manager/sequence_manager_impl_unittest.cc
@@ -137,7 +137,7 @@
 }
 
 TaskQueueImpl* GetTaskQueueImpl(TaskQueue* task_queue) {
-  return task_queue->GetTaskQueueImplForTest();
+  return static_cast<TaskQueueImpl*>(task_queue);
 }
 
 constexpr TimeDelta kLeeway = kDefaultLeeway;
@@ -301,6 +301,7 @@
     auto thread_controller =
         std::make_unique<ThreadControllerWithMessagePumpImpl>(std::move(pump),
                                                               settings);
+    MessagePump::InitializeFeatures();
     ThreadControllerWithMessagePumpImpl::InitializeFeatures();
     sequence_manager_ = SequenceManagerForTest::Create(
         std::move(thread_controller), std::move(settings));
@@ -491,14 +492,13 @@
                          GetTestTypes(),
                          GetTestNameSuffix);
 
-void PostFromNestedRunloop(TaskQueue* runner,
+void PostFromNestedRunloop(scoped_refptr<SingleThreadTaskRunner> runner,
                            std::vector<std::pair<OnceClosure, bool>>* tasks) {
   for (std::pair<OnceClosure, bool>& pair : *tasks) {
     if (pair.second) {
-      runner->task_runner()->PostTask(FROM_HERE, std::move(pair.first));
+      runner->PostTask(FROM_HERE, std::move(pair.first));
     } else {
-      runner->task_runner()->PostNonNestableTask(FROM_HERE,
-                                                 std::move(pair.first));
+      runner->PostNonNestableTask(FROM_HERE, std::move(pair.first));
     }
   }
   RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle();
@@ -723,7 +723,7 @@
       std::make_pair(BindOnce(&TestTask, 6, &run_order), true));
 
   queue->task_runner()->PostTask(
-      FROM_HERE, BindOnce(&PostFromNestedRunloop, Unretained(queue.get()),
+      FROM_HERE, BindOnce(&PostFromNestedRunloop, queue->task_runner(),
                           Unretained(&tasks_to_post_from_nested_loop)));
 
   RunLoop().RunUntilIdle();
@@ -733,8 +733,9 @@
 
 TEST_P(SequenceManagerTest, NonNestableTasksShutdownQueue) {
   // TestMockTimeTaskRunner doesn't support nested loops.
-  if (GetUnderlyingRunnerType() == RunnerType::kMockTaskRunner)
+  if (GetUnderlyingRunnerType() == RunnerType::kMockTaskRunner) {
     return;
+  }
   auto queue = CreateTaskQueue();
 
   std::vector<EnqueueOrder> run_order;
@@ -745,10 +746,10 @@
   tasks_to_post_from_nested_loop.emplace_back(
       BindOnce(&TestTask, 2, &run_order), true);
   tasks_to_post_from_nested_loop.emplace_back(
-      BindLambdaForTesting([&queue]() { queue->ShutdownTaskQueue(); }), true);
+      BindLambdaForTesting([&queue]() { queue.reset(); }), true);
 
   queue->task_runner()->PostTask(
-      FROM_HERE, BindOnce(&PostFromNestedRunloop, Unretained(queue.get()),
+      FROM_HERE, BindOnce(&PostFromNestedRunloop, queue->task_runner(),
                           Unretained(&tasks_to_post_from_nested_loop)));
 
   RunLoop().RunUntilIdle();
@@ -861,7 +862,7 @@
                      true));
 
   queue->task_runner()->PostTask(
-      FROM_HERE, BindOnce(&PostFromNestedRunloop, Unretained(queue.get()),
+      FROM_HERE, BindOnce(&PostFromNestedRunloop, queue->task_runner(),
                           Unretained(&tasks_to_post_from_nested_loop)));
   RunLoop().RunUntilIdle();
 
@@ -1811,7 +1812,7 @@
 
   queue->task_runner()->PostTask(FROM_HERE, BindOnce(&TestTask, 0, &run_order));
   queue->task_runner()->PostTask(
-      FROM_HERE, BindOnce(&PostFromNestedRunloop, Unretained(queue.get()),
+      FROM_HERE, BindOnce(&PostFromNestedRunloop, queue->task_runner(),
                           Unretained(&tasks_to_post_from_nested_loop)));
   queue->task_runner()->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
 
@@ -2350,7 +2351,7 @@
   std::vector<EnqueueOrder> run_order;
   EXPECT_TRUE(runner1->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order)));
   EXPECT_TRUE(runner2->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order)));
-  queue->ShutdownTaskQueue();
+  queue.reset();
   EXPECT_FALSE(
       runner1->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order)));
   EXPECT_FALSE(
@@ -2378,7 +2379,7 @@
                                   BindOnce(&TestTask, 2, &run_order));
   queue3->task_runner()->PostTask(FROM_HERE,
                                   BindOnce(&TestTask, 3, &run_order));
-  queue2->ShutdownTaskQueue();
+  queue2.reset();
   RunLoop().RunUntilIdle();
 
   EXPECT_THAT(run_order, ElementsAre(1u, 3u));
@@ -2396,7 +2397,7 @@
   queues[0]->task_runner()->PostDelayedTask(
       FROM_HERE, BindOnce(&TestTask, 3, &run_order), Milliseconds(30));
 
-  queues[1]->ShutdownTaskQueue();
+  queues[1].reset();
   RunLoop().RunUntilIdle();
 
   FastForwardBy(Milliseconds(40));
@@ -2404,25 +2405,23 @@
 }
 
 namespace {
-void ShutdownQueue(TaskQueue* queue) {
-  queue->ShutdownTaskQueue();
-}
+void ShutdownQueue(TaskQueue::Handle queue) {}
 }  // namespace
 
 TEST_P(SequenceManagerTest, ShutdownTaskQueue_InTasks) {
   auto queues = CreateTaskQueues(3u);
+  auto runner1 = queues[1]->task_runner();
+  auto runner2 = queues[2]->task_runner();
 
   std::vector<EnqueueOrder> run_order;
   queues[0]->task_runner()->PostTask(FROM_HERE,
                                      BindOnce(&TestTask, 1, &run_order));
   queues[0]->task_runner()->PostTask(
-      FROM_HERE, BindOnce(&ShutdownQueue, Unretained(queues[1].get())));
+      FROM_HERE, BindOnce(&ShutdownQueue, std::move(queues[1])));
   queues[0]->task_runner()->PostTask(
-      FROM_HERE, BindOnce(&ShutdownQueue, Unretained(queues[2].get())));
-  queues[1]->task_runner()->PostTask(FROM_HERE,
-                                     BindOnce(&TestTask, 2, &run_order));
-  queues[2]->task_runner()->PostTask(FROM_HERE,
-                                     BindOnce(&TestTask, 3, &run_order));
+      FROM_HERE, BindOnce(&ShutdownQueue, std::move(queues[2])));
+  runner1->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+  runner2->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
 
   RunLoop().RunUntilIdle();
   ASSERT_THAT(run_order, ElementsAre(1u));
@@ -2444,24 +2443,20 @@
 
   // We retain a reference to the task queue even when the manager has deleted
   // its reference.
-  TaskQueue::Handle task_queue = CreateTaskQueue();
+  TaskQueue::Handle queue_to_delete = CreateTaskQueue();
 
   std::vector<bool> log;
   std::vector<std::pair<OnceClosure, bool>> tasks_to_post_from_nested_loop;
 
-  // Inside a nested run loop, call task_queue->ShutdownTaskQueue, bookended
-  // by calls to HasOneRefTask to make sure the manager doesn't release its
-  // reference until the nested run loop exits.
-  // NB: This first HasOneRefTask is a sanity check.
+  // Inside a nested run loop, delete `queue_to_delete`, bookended by Nop tasks.
   tasks_to_post_from_nested_loop.push_back(
       std::make_pair(BindOnce(&NopTask), true));
   tasks_to_post_from_nested_loop.push_back(std::make_pair(
-      BindOnce(&TaskQueue::ShutdownTaskQueue, Unretained(task_queue.get())),
-      true));
+      BindLambdaForTesting([&] { queue_to_delete.reset(); }), true));
   tasks_to_post_from_nested_loop.push_back(
       std::make_pair(BindOnce(&NopTask), true));
   queue->task_runner()->PostTask(
-      FROM_HERE, BindOnce(&PostFromNestedRunloop, Unretained(queue.get()),
+      FROM_HERE, BindOnce(&PostFromNestedRunloop, queue->task_runner(),
                           Unretained(&tasks_to_post_from_nested_loop)));
   RunLoop().RunUntilIdle();
 
@@ -2487,7 +2482,6 @@
   EXPECT_THAT(run_order, ElementsAre(1u));
 
   sequence_manager()->ResetTimeDomain();
-  queue->ShutdownTaskQueue();
 }
 
 // Test that no wake up is scheduled for a delayed task in the future when a
@@ -2511,7 +2505,6 @@
   EXPECT_EQ(WakeUp{}, sequence_manager()->GetPendingWakeUp(&lazy_now2));
 
   sequence_manager()->ResetTimeDomain();
-  queue->ShutdownTaskQueue();
 }
 
 TEST_P(SequenceManagerTest,
@@ -2544,7 +2537,6 @@
   EXPECT_THAT(run_order, ElementsAre(4u, 3u, 2u, 1u));
 
   sequence_manager()->ResetTimeDomain();
-  queue->ShutdownTaskQueue();
 }
 
 namespace {
@@ -2606,9 +2598,6 @@
   queue->task_runner()->PostTask(FROM_HERE, BindOnce(&NopTask));
   sequence_manager()->ReloadEmptyWorkQueues();
   Mock::VerifyAndClearExpectations(&throttler);
-
-  // Tidy up.
-  queue->ShutdownTaskQueue();
 }
 
 TEST_P(SequenceManagerTest, TaskQueueThrottler_DelayedTask) {
@@ -2655,9 +2644,6 @@
               GetNextAllowedWakeUp_DesiredWakeUpTime(start_time + delay1s));
   voter->SetVoteToEnable(true);
   Mock::VerifyAndClearExpectations(&throttler);
-
-  // Tidy up.
-  queue->ShutdownTaskQueue();
 }
 
 TEST_P(SequenceManagerTest, TaskQueueThrottler_OnWakeUp) {
@@ -2681,9 +2667,6 @@
   LazyNow lazy_now(mock_tick_clock());
   sequence_manager()->MoveReadyDelayedTasksToWorkQueues(&lazy_now);
   Mock::VerifyAndClearExpectations(&throttler);
-
-  // Tidy up.
-  queue->ShutdownTaskQueue();
 }
 
 TEST_P(SequenceManagerTest, TaskQueueThrottler_ResetThrottler) {
@@ -2714,9 +2697,6 @@
   // Next wake up should be back to normal.
   EXPECT_EQ((WakeUp{start_time + delay1s, kLeeway}),
             sequence_manager()->GetPendingWakeUp(&lazy_now));
-
-  // Tidy up.
-  queue->ShutdownTaskQueue();
 }
 
 TEST_P(SequenceManagerTest, TaskQueueThrottler_DelayedTaskMultipleQueues) {
@@ -2768,10 +2748,6 @@
               GetNextAllowedWakeUp_DesiredWakeUpTime(start_time + delay10s));
   voter1->SetVoteToEnable(true);
   Mock::VerifyAndClearExpectations(&throttler1);
-
-  // Tidy up.
-  queues[0]->ShutdownTaskQueue();
-  queues[1]->ShutdownTaskQueue();
 }
 
 TEST_P(SequenceManagerTest, TaskQueueThrottler_DelayedWorkWhichCanRunNow) {
@@ -2798,9 +2774,6 @@
   LazyNow lazy_now(mock_tick_clock());
   queue->UpdateWakeUp(&lazy_now);
   Mock::VerifyAndClearExpectations(&throttler);
-
-  // Tidy up.
-  queue->ShutdownTaskQueue();
 }
 
 namespace {
@@ -2987,7 +2960,7 @@
   auto queue = CreateTaskQueue();
   bool did_shutdown = false;
   auto on_destroy = BindLambdaForTesting([&] {
-    queue->ShutdownTaskQueue();
+    queue.reset();
     did_shutdown = true;
   });
 
@@ -3010,7 +2983,7 @@
   auto queue = CreateTaskQueue();
   bool did_shutdown = false;
   auto on_destroy = BindLambdaForTesting([&] {
-    queue->ShutdownTaskQueue();
+    queue.reset();
     did_shutdown = true;
   });
 
@@ -3604,9 +3577,6 @@
       lazy_now2, SequencedTaskSource::SelectTaskOption::kDefault));
   sequence_manager()->DidRunTask(lazy_now2);
   EXPECT_EQ(absl::nullopt, sequence_manager()->GetPendingWakeUp(&lazy_now2));
-
-  // Tidy up.
-  queue->ShutdownTaskQueue();
 }
 
 TEST_P(SequenceManagerTest, DelayedTasksNotSelectedWithImmediateTask) {
@@ -3664,9 +3634,6 @@
       sequence_manager()->GetPendingWakeUp(
           &lazy_now2, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
   sequence_manager()->DidRunTask(lazy_now2);
-
-  // Tidy up.
-  queue->ShutdownTaskQueue();
 }
 
 TEST_P(SequenceManagerTest,
@@ -3728,12 +3695,6 @@
   // No delayed tasks can be executed anymore.
   EXPECT_FALSE(sequence_manager()->SelectNextTask(lazy_now2));
   EXPECT_EQ(absl::nullopt, sequence_manager()->GetPendingWakeUp(&lazy_now2));
-
-  // Tidy up.
-  queues[0]->ShutdownTaskQueue();
-  queues[1]->ShutdownTaskQueue();
-  queues[2]->ShutdownTaskQueue();
-  queues[3]->ShutdownTaskQueue();
 }
 
 TEST_P(SequenceManagerTest, GetPendingWakeUp) {
@@ -4082,7 +4043,7 @@
   sequence_manager()->SetTimeDomain(domain.get());
 
   // Tidy up.
-  queue->ShutdownTaskQueue();
+  queue.reset();
   sequence_manager()->ResetTimeDomain();
 }
 
@@ -4479,10 +4440,13 @@
 }
 
 TEST_P(SequenceManagerTest, TaskQueueTaskRunnerDetach) {
-  TaskQueue::Handle queue1 = CreateTaskQueue();
-  EXPECT_TRUE(queue1->task_runner()->PostTask(FROM_HERE, BindOnce(&NopTask)));
-  queue1->ShutdownTaskQueue();
-  EXPECT_FALSE(queue1->task_runner()->PostTask(FROM_HERE, BindOnce(&NopTask)));
+  scoped_refptr<SingleThreadTaskRunner> task_runner;
+  {
+    TaskQueue::Handle queue1 = CreateTaskQueue();
+    task_runner = queue1->task_runner();
+    EXPECT_TRUE(task_runner->PostTask(FROM_HERE, BindOnce(&NopTask)));
+  }
+  EXPECT_FALSE(task_runner->PostTask(FROM_HERE, BindOnce(&NopTask)));
 
   // Create without a sequence manager.
   std::unique_ptr<TaskQueueImpl> queue2 = std::make_unique<TaskQueueImpl>(
@@ -5118,7 +5082,7 @@
   queue_1->task_runner()->PostDelayedTask(
       FROM_HERE, PostOnDestruction(queue_2.get(), task.Get()), Minutes(1));
 
-  queue_1->ShutdownTaskQueue();
+  queue_1.reset();
 
   FastForwardUntilNoTasksRemain();
 }
@@ -5138,7 +5102,7 @@
 
   // Wakeup time needs to be adjusted to kDelay * 2 when the queue is
   // unregistered from the TimeDomain
-  queue_1->ShutdownTaskQueue();
+  queue_1.reset();
 
   RunLoop().RunUntilIdle();
 }
diff --git a/base/task/sequence_manager/task_queue.cc b/base/task/sequence_manager/task_queue.cc
index 19871f2..75bb6b7 100644
--- a/base/task/sequence_manager/task_queue.cc
+++ b/base/task/sequence_manager/task_queue.cc
@@ -18,8 +18,7 @@
 #include "base/trace_event/base_tracing.h"
 #include "third_party/abseil-cpp/absl/types/optional.h"
 
-namespace base {
-namespace sequence_manager {
+namespace base::sequence_manager {
 
 TaskQueue::QueueEnabledVoter::QueueEnabledVoter(
     WeakPtr<internal::TaskQueueImpl> task_queue)
@@ -43,28 +42,6 @@
   }
 }
 
-TaskQueue::TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
-                     const TaskQueue::Spec& spec)
-    : impl_(std::move(impl)),
-      sequence_manager_(impl_->GetSequenceManagerWeakPtr()),
-      associated_thread_((impl_->sequence_manager())
-                             ? impl_->sequence_manager()->associated_thread()
-                             : MakeRefCounted<internal::AssociatedThreadId>()),
-      default_task_runner_(impl_->CreateTaskRunner(kTaskTypeNone)),
-      name_(impl_->GetProtoName()) {}
-
-TaskQueue::~TaskQueue() {
-  // scoped_refptr guarantees us that this object isn't used.
-  if (!impl_)
-    return;
-  if (impl_->IsUnregistered())
-    return;
-
-  // If we've not been unregistered then this must occur on the main thread.
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  ShutdownTaskQueue();
-}
-
 TaskQueue::TaskTiming::TaskTiming(bool has_wall_time, bool has_thread_time)
     : has_wall_time_(has_wall_time), has_thread_time_(has_thread_time) {}
 
@@ -90,200 +67,39 @@
     end_thread_time_ = base::ThreadTicks::Now();
 }
 
-void TaskQueue::ShutdownTaskQueue() {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  // TODO(crbug.com/1413795): Fix that some task queues get shut down more than
-  // once.
-  if (!impl_) {
-    return;
-  }
-  if (!sequence_manager_) {
-    TakeTaskQueueImpl().reset();
-    return;
-  }
-  sequence_manager_->UnregisterTaskQueueImpl(TakeTaskQueueImpl());
-}
-
-scoped_refptr<SingleThreadTaskRunner> TaskQueue::CreateTaskRunner(
-    TaskType task_type) {
-  // We only need to lock if we're not on the main thread.
-  base::internal::CheckedAutoLockMaybe lock(IsOnMainThread() ? &impl_lock_
-                                                             : nullptr);
-  DCHECK(impl_);
-  return impl_->CreateTaskRunner(task_type);
-}
-
-std::unique_ptr<TaskQueue::QueueEnabledVoter>
-TaskQueue::CreateQueueEnabledVoter() {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  return impl_->CreateQueueEnabledVoter();
-}
-
-bool TaskQueue::IsQueueEnabled() const {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  return impl_->IsQueueEnabled();
-}
-
-bool TaskQueue::IsEmpty() const {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  return impl_->IsEmpty();
-}
-
-size_t TaskQueue::GetNumberOfPendingTasks() const {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  return impl_->GetNumberOfPendingTasks();
-}
-
-bool TaskQueue::HasTaskToRunImmediatelyOrReadyDelayedTask() const {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  return impl_->HasTaskToRunImmediatelyOrReadyDelayedTask();
-}
-
-absl::optional<WakeUp> TaskQueue::GetNextDesiredWakeUp() {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  return impl_->GetNextDesiredWakeUp();
-}
-
-void TaskQueue::UpdateWakeUp(LazyNow* lazy_now) {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  impl_->UpdateWakeUp(lazy_now);
-}
-
-void TaskQueue::SetQueuePriority(TaskQueue::QueuePriority priority) {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  impl_->SetQueuePriority(priority);
-}
-
-TaskQueue::QueuePriority TaskQueue::GetQueuePriority() const {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  // TODO(crbug.com/1413795): change this to DCHECK(impl_) since task queues
-  // should not be used after shutdown.
-  DCHECK(impl_);
-  return impl_->GetQueuePriority();
-}
-
-void TaskQueue::AddTaskObserver(TaskObserver* task_observer) {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  impl_->AddTaskObserver(task_observer);
-}
-
-void TaskQueue::RemoveTaskObserver(TaskObserver* task_observer) {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  impl_->RemoveTaskObserver(task_observer);
-}
-
-void TaskQueue::InsertFence(InsertFencePosition position) {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  impl_->InsertFence(position);
-}
-
-void TaskQueue::InsertFenceAt(TimeTicks time) {
-  impl_->InsertFenceAt(time);
-}
-
-void TaskQueue::RemoveFence() {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  impl_->RemoveFence();
-}
-
-bool TaskQueue::HasActiveFence() {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  return impl_->HasActiveFence();
-}
-
-bool TaskQueue::BlockedByFence() const {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  return impl_->BlockedByFence();
-}
-
-const char* TaskQueue::GetName() const {
-  return perfetto::protos::pbzero::SequenceManagerTask::QueueName_Name(name_);
-}
-
-void TaskQueue::WriteIntoTrace(perfetto::TracedValue context) const {
-  auto dict = std::move(context).WriteDictionary();
-  dict.Add("name", name_);
-}
-
-void TaskQueue::SetThrottler(Throttler* throttler) {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  // |throttler| is guaranteed to outlive TaskQueue and TaskQueueImpl lifecycle
-  // is controlled by |this|.
-  impl_->SetThrottler(throttler);
-}
-
-void TaskQueue::ResetThrottler() {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  impl_->ResetThrottler();
-}
-
-void TaskQueue::SetShouldReportPostedTasksWhenDisabled(bool should_report) {
-  impl_->SetShouldReportPostedTasksWhenDisabled(should_report);
-}
-
-bool TaskQueue::IsOnMainThread() const {
-  return associated_thread_->IsBoundToCurrentThread();
-}
-
-std::unique_ptr<internal::TaskQueueImpl> TaskQueue::TakeTaskQueueImpl() {
-  base::internal::CheckedAutoLock lock(impl_lock_);
-  DCHECK(impl_);
-  return std::move(impl_);
-}
-
-void TaskQueue::SetOnTaskStartedHandler(OnTaskStartedHandler handler) {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  impl_->SetOnTaskStartedHandler(std::move(handler));
-}
-
-void TaskQueue::SetOnTaskCompletedHandler(OnTaskCompletedHandler handler) {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  impl_->SetOnTaskCompletedHandler(std::move(handler));
-}
-
-std::unique_ptr<TaskQueue::OnTaskPostedCallbackHandle>
-TaskQueue::AddOnTaskPostedHandler(OnTaskPostedHandler handler) {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  return impl_->AddOnTaskPostedHandler(std::move(handler));
-}
-
-void TaskQueue::SetTaskExecutionTraceLogger(TaskExecutionTraceLogger logger) {
-  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
-  DCHECK(impl_);
-  impl_->SetTaskExecutionTraceLogger(std::move(logger));
-}
-
-TaskQueue::Handle::Handle(std::unique_ptr<TaskQueue> task_queue)
-    : task_queue_(std::move(task_queue)) {}
+TaskQueue::Handle::Handle(std::unique_ptr<internal::TaskQueueImpl> task_queue)
+    : task_queue_(std::move(task_queue)),
+      sequence_manager_(task_queue_->GetSequenceManagerWeakPtr()) {}
 
 TaskQueue::Handle::Handle() = default;
 
-// TODO(crbug.com/1143007): Once TaskQueueImpl inherits from task queue, pass
-// `task_queue` to sequence manager.
-TaskQueue::Handle::~Handle() = default;
+TaskQueue::Handle::~Handle() {
+  reset();
+}
+
+TaskQueue* TaskQueue::Handle::get() const {
+  return task_queue_.get();
+}
+
+TaskQueue* TaskQueue::Handle::operator->() const {
+  return task_queue_.get();
+}
+
+void TaskQueue::Handle::reset() {
+  if (!task_queue_) {
+    return;
+  }
+  // Sequence manager already unregistered the task queue.
+  if (task_queue_->IsUnregistered()) {
+    task_queue_.reset();
+    return;
+  }
+  CHECK(sequence_manager_);
+  sequence_manager_->UnregisterTaskQueueImpl(std::move(task_queue_));
+}
 
 TaskQueue::Handle::Handle(TaskQueue::Handle&& other) = default;
 
 TaskQueue::Handle& TaskQueue::Handle::operator=(TaskQueue::Handle&&) = default;
 
-}  // namespace sequence_manager
-}  // namespace base
+}  // namespace base::sequence_manager
diff --git a/base/task/sequence_manager/task_queue.h b/base/task/sequence_manager/task_queue.h
index b86014a..f828166 100644
--- a/base/task/sequence_manager/task_queue.h
+++ b/base/task/sequence_manager/task_queue.h
@@ -35,17 +35,23 @@
 using QueueName = ::perfetto::protos::pbzero::SequenceManagerTask::QueueName;
 
 namespace internal {
-class AssociatedThreadId;
 class SequenceManagerImpl;
 class TaskQueueImpl;
 }  // namespace internal
 
-// TODO(crbug.com/1143007): Make TaskQueue to actually be an interface for
-// TaskQueueImpl.
+// A `TaskQueue` represents an ordered list of tasks sharing common properties,
+// e.g. priority, throttling, etc. `TaskQueue`s are associated with a
+// `SequenceManager` instance, which chooses the next task from its set of
+// queues. `TaskQueue`s should typically be used on a single thread since most
+// methods are not thread safe (enforeced via CHECKs), but cross-thread task
+// posting is supported with thread-safe task runners.
 //
-// NOTE: TaskQueue is destroyed when its Handle is destroyed, at which point
-// TaskQueueImpl gets unregistered, meaning it stops posting new tasks and is
-// scheduled for deletion after the current task finishes.
+// A `TaskQueue` is unregistered (stops accepting and running tasks) when either
+// its associated `TaskQueue::Handle` or `SequenceManager` is destroyed. If the
+// handle is destroyed while the `SequenceManager` is still alive, the
+// `SequenceManager` takes ownership of the queue and schedules it for deletion
+// after the current task finishes. Otherwise, if the handle outlives the
+// sequence manager, the queue is destroyed when the handle is destroyed.
 class BASE_EXPORT TaskQueue {
  public:
   // Interface that lets a task queue be throttled by changing the wake up time
@@ -96,33 +102,28 @@
   // unique_ptr-like interface, and it's responsible for managing the queue's
   // lifetime, ensuring the queue is properly unregistered with the queue's
   // `SequenceManager` when the handle is destroyed.
-  //
-  // TODO(crbug.com/1143007): As part of making TaskQueue an interface that
-  // TaskQueueImpl implements, this handle will need to pass the queue to
-  // sequence manager so it's not destroyed until the current task finishes.
   class BASE_EXPORT Handle {
    public:
     Handle();
-    explicit Handle(std::unique_ptr<TaskQueue> task_queue);
 
     Handle(Handle&&);
     Handle& operator=(Handle&&);
 
     ~Handle();
 
-    void reset() { task_queue_.reset(); }
-
-    TaskQueue* get() const { return task_queue_.get(); }
-    TaskQueue* operator->() const { return task_queue_.get(); }
+    void reset();
+    TaskQueue* get() const;
+    TaskQueue* operator->() const;
 
     explicit operator bool() const { return !!task_queue_; }
 
    private:
-    std::unique_ptr<TaskQueue> task_queue_;
-  };
+    friend class internal::SequenceManagerImpl;
+    explicit Handle(std::unique_ptr<internal::TaskQueueImpl> task_queue);
 
-  // Shuts down the queue. All tasks currently queued will be discarded.
-  virtual void ShutdownTaskQueue();
+    std::unique_ptr<internal::TaskQueueImpl> task_queue_;
+    WeakPtr<internal::SequenceManagerImpl> sequence_manager_;
+  };
 
   // Queues with higher priority (smaller number) are selected to run before
   // queues of lower priority. Note that there is no starvation protection,
@@ -173,13 +174,6 @@
     bool non_waking = false;
   };
 
-  // TODO(altimin): Make this private after TaskQueue/TaskQueueImpl refactoring.
-  TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
-            const TaskQueue::Spec& spec);
-  TaskQueue(const TaskQueue&) = delete;
-  TaskQueue& operator=(const TaskQueue&) = delete;
-  virtual ~TaskQueue();
-
   // Information about task execution.
   //
   // Wall-time related methods (start_time, end_time, wall_duration) can be
@@ -268,43 +262,44 @@
     bool enabled_ = true;
   };
 
+  TaskQueue(const TaskQueue&) = delete;
+  TaskQueue& operator=(const TaskQueue&) = delete;
+  virtual ~TaskQueue() = default;
+
   // Returns an interface that allows the caller to vote on whether or not this
   // TaskQueue is enabled. The TaskQueue will be enabled if there are no voters
   // or if all agree it should be enabled.
   // NOTE this must be called on the thread this TaskQueue was created by.
-  std::unique_ptr<QueueEnabledVoter> CreateQueueEnabledVoter();
+  virtual std::unique_ptr<QueueEnabledVoter> CreateQueueEnabledVoter() = 0;
 
   // NOTE this must be called on the thread this TaskQueue was created by.
-  bool IsQueueEnabled() const;
+  virtual bool IsQueueEnabled() const = 0;
 
   // Returns true if the queue is completely empty.
-  bool IsEmpty() const;
+  virtual bool IsEmpty() const = 0;
 
   // Returns the number of pending tasks in the queue.
-  size_t GetNumberOfPendingTasks() const;
+  virtual size_t GetNumberOfPendingTasks() const = 0;
 
   // Returns true iff this queue has immediate tasks or delayed tasks that are
   // ripe for execution. Ignores the queue's enabled state and fences.
   // NOTE: this must be called on the thread this TaskQueue was created by.
   // TODO(etiennep): Rename to HasReadyTask() and add LazyNow parameter.
-  bool HasTaskToRunImmediatelyOrReadyDelayedTask() const;
+  virtual bool HasTaskToRunImmediatelyOrReadyDelayedTask() const = 0;
 
   // Returns a wake-up for the next pending delayed task (pending delayed tasks
   // that are ripe may be ignored), ignoring Throttler is any. If there are no
   // such tasks (immediate tasks don't count) or the queue is disabled it
   // returns nullopt.
   // NOTE: this must be called on the thread this TaskQueue was created by.
-  absl::optional<WakeUp> GetNextDesiredWakeUp();
+  virtual absl::optional<WakeUp> GetNextDesiredWakeUp() = 0;
 
   // Can be called on any thread.
-  virtual const char* GetName() const;
-
-  // Serialise this object into a trace.
-  void WriteIntoTrace(perfetto::TracedValue context) const;
+  virtual const char* GetName() const = 0;
 
   // Set the priority of the queue to |priority|. NOTE this must be called on
   // the thread this TaskQueue was created by.
-  void SetQueuePriority(QueuePriority priority);
+  virtual void SetQueuePriority(QueuePriority priority) = 0;
 
   // Same as above but with an enum value as the priority.
   template <typename T, typename = typename std::enable_if_t<std::is_enum_v<T>>>
@@ -316,12 +311,12 @@
   }
 
   // Returns the current queue priority.
-  QueuePriority GetQueuePriority() const;
+  virtual QueuePriority GetQueuePriority() const = 0;
 
   // These functions can only be called on the same thread that the task queue
   // manager executes its tasks on.
-  void AddTaskObserver(TaskObserver* task_observer);
-  void RemoveTaskObserver(TaskObserver* task_observer);
+  virtual void AddTaskObserver(TaskObserver* task_observer) = 0;
+  virtual void RemoveTaskObserver(TaskObserver* task_observer) = 0;
 
   enum class InsertFencePosition {
     kNow,  // Tasks posted on the queue up till this point further may run.
@@ -345,60 +340,54 @@
   //
   // Only one fence can be scheduled at a time. Inserting a new fence
   // will automatically remove the previous one, regardless of fence type.
-  void InsertFence(InsertFencePosition position);
+  virtual void InsertFence(InsertFencePosition position) = 0;
 
   // Delayed fences are only allowed for queues created with
   // SetDelayedFencesAllowed(true) because this feature implies sampling Now()
   // (which isn't free) for every PostTask, even those with zero delay.
-  void InsertFenceAt(TimeTicks time);
+  virtual void InsertFenceAt(TimeTicks time) = 0;
 
   // Removes any previously added fence and unblocks execution of any tasks
   // blocked by it.
-  void RemoveFence();
+  virtual void RemoveFence() = 0;
 
   // Returns true if the queue has a fence but it isn't necessarily blocking
   // execution of tasks (it may be the case if tasks enqueue order hasn't
   // reached the number set for a fence).
-  bool HasActiveFence();
+  virtual bool HasActiveFence() = 0;
 
   // Returns true if the queue has a fence which is blocking execution of tasks.
-  bool BlockedByFence() const;
+  virtual bool BlockedByFence() const = 0;
 
   // Associates |throttler| to this queue. Only one throttler can be associated
   // with this queue. |throttler| must outlive this TaskQueue, or remain valid
   // until ResetThrottler().
-  void SetThrottler(Throttler* throttler);
+  virtual void SetThrottler(Throttler* throttler) = 0;
   // Disassociates the current throttler from this queue, if any.
-  void ResetThrottler();
+  virtual void ResetThrottler() = 0;
 
   // Updates the task queue's next wake up time in its time domain, taking into
   // account the desired run time of queued tasks and policies enforced by the
   // throttler if any.
-  void UpdateWakeUp(LazyNow* lazy_now);
+  virtual void UpdateWakeUp(LazyNow* lazy_now) = 0;
 
   // Controls whether or not the queue will emit traces events when tasks are
   // posted to it while disabled. This only applies for the current or next
   // period during which the queue is disabled. When the queue is re-enabled
   // this will revert back to the default value of false.
-  void SetShouldReportPostedTasksWhenDisabled(bool should_report);
+  virtual void SetShouldReportPostedTasksWhenDisabled(bool should_report) = 0;
 
   // Create a task runner for this TaskQueue which will annotate all
   // posted tasks with the given task type.
-  // May be called on any thread.
-  // NOTE: Task runners don't hold a reference to a TaskQueue, hence,
-  // it's required to retain that reference to prevent automatic graceful
-  // shutdown. Unique ownership of task queues will fix this issue soon.
-  scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(TaskType task_type);
+  // Must be called on the thread this task queue is associated with.
+  //
+  // NOTE: Task runners don't keep the TaskQueue alive, so task queues can be
+  // deleted with valid task runners. Posting a task in that case will fail.
+  virtual scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(
+      TaskType task_type) const = 0;
 
   // Default task runner which doesn't annotate tasks with a task type.
-  const scoped_refptr<SingleThreadTaskRunner>& task_runner() const {
-    return default_task_runner_;
-  }
-
-  // Checks whether or not this TaskQueue has a TaskQueueImpl.
-  // TODO(crbug.com/1143007): Remove this method when TaskQueueImpl inherits
-  // from TaskQueue and TaskQueue no longer owns an Impl.
-  bool HasImpl() { return !!impl_; }
+  virtual const scoped_refptr<SingleThreadTaskRunner>& task_runner() const = 0;
 
   using OnTaskStartedHandler =
       RepeatingCallback<void(const Task&, const TaskQueue::TaskTiming&)>;
@@ -410,14 +399,14 @@
 
   // Sets a handler to subscribe for notifications about started and completed
   // tasks.
-  void SetOnTaskStartedHandler(OnTaskStartedHandler handler);
+  virtual void SetOnTaskStartedHandler(OnTaskStartedHandler handler) = 0;
 
   // |task_timing| may be passed in Running state and may not have the end time,
   // so that the handler can run an additional task that is counted as a part of
   // the main task.
   // The handler can call TaskTiming::RecordTaskEnd, which is optional, to
   // finalize the task, and use the resulting timing.
-  void SetOnTaskCompletedHandler(OnTaskCompletedHandler handler);
+  virtual void SetOnTaskCompletedHandler(OnTaskCompletedHandler handler) = 0;
 
   // RAII handle associated with an OnTaskPostedHandler. Unregisters the handler
   // upon destruction.
@@ -440,46 +429,15 @@
   // not be a null callback. Must be called on the thread this task queue is
   // associated with, and the handle returned must be destroyed on the same
   // thread.
-  [[nodiscard]] std::unique_ptr<OnTaskPostedCallbackHandle>
-  AddOnTaskPostedHandler(OnTaskPostedHandler handler);
+  [[nodiscard]] virtual std::unique_ptr<OnTaskPostedCallbackHandle>
+  AddOnTaskPostedHandler(OnTaskPostedHandler handler) = 0;
 
   // Set a callback to fill trace event arguments associated with the task
   // execution.
-  void SetTaskExecutionTraceLogger(TaskExecutionTraceLogger logger);
+  virtual void SetTaskExecutionTraceLogger(TaskExecutionTraceLogger logger) = 0;
 
-  // TODO(crbug.com/1143007): Remove this once TaskQueueImpl inherits TaskQueue.
-  internal::TaskQueueImpl* GetTaskQueueImplForTest() const {
-    return impl_.get();
-  }
-
- private:
-  friend class RefCountedThreadSafe<TaskQueue>;
-  friend class internal::SequenceManagerImpl;
-  friend class internal::TaskQueueImpl;
-
-  bool IsOnMainThread() const;
-
-  // TaskQueue has ownership of an underlying implementation but in certain
-  // cases (e.g. detached frames) their lifetime may diverge.
-  // This method should be used to take away the impl for graceful shutdown.
-  // TaskQueue will disregard any calls or posting tasks thereafter.
-  std::unique_ptr<internal::TaskQueueImpl> TakeTaskQueueImpl();
-
-  // |impl_| can be written to on the main thread but can be read from
-  // any thread.
-  // |impl_lock_| must be acquired when writing to |impl_| or when accessing
-  // it from non-main thread. Reading from the main thread does not require
-  // a lock.
-  mutable base::internal::CheckedLock impl_lock_{
-      base::internal::UniversalPredecessor{}};
-  std::unique_ptr<internal::TaskQueueImpl> impl_;
-
-  const WeakPtr<internal::SequenceManagerImpl> sequence_manager_;
-
-  const scoped_refptr<const internal::AssociatedThreadId> associated_thread_;
-  const scoped_refptr<SingleThreadTaskRunner> default_task_runner_;
-
-  QueueName name_;
+ protected:
+  TaskQueue() = default;
 };
 
 }  // namespace sequence_manager
diff --git a/base/task/sequence_manager/task_queue_impl.cc b/base/task/sequence_manager/task_queue_impl.cc
index 3ea4604..841d8e6 100644
--- a/base/task/sequence_manager/task_queue_impl.cc
+++ b/base/task/sequence_manager/task_queue_impl.cc
@@ -61,6 +61,9 @@
 bool g_is_remove_canceled_tasks_in_task_queue_enabled = true;
 bool g_is_sweep_cancelled_tasks_enabled =
     kSweepCancelledTasks.default_state == FEATURE_ENABLED_BY_DEFAULT;
+// An atomic is used here because the value is queried from other threads when
+// tasks are posted cross-thread, which can race with its initialization.
+std::atomic<base::TimeDelta> g_max_precise_delay{kDefaultMaxPreciseDelay};
 #if BUILDFLAG(IS_WIN)
 // An atomic is used here because the flag is queried from other threads when
 // tasks are posted cross-thread, which can race with its initialization.
@@ -185,6 +188,7 @@
   ApplyRemoveCanceledTasksInTaskQueue();
   g_is_sweep_cancelled_tasks_enabled =
       FeatureList::IsEnabled(kSweepCancelledTasks);
+  g_max_precise_delay = kMaxPreciseDelay.Get();
 #if BUILDFLAG(IS_WIN)
   g_explicit_high_resolution_timer_win.store(
       FeatureList::IsEnabled(kExplicitHighResolutionTimerWin),
@@ -229,7 +233,8 @@
               : AtomicFlagSet::AtomicFlag()),
       should_monitor_quiescence_(spec.should_monitor_quiescence),
       should_notify_observers_(spec.should_notify_observers),
-      delayed_fence_allowed_(spec.delayed_fence_allowed) {
+      delayed_fence_allowed_(spec.delayed_fence_allowed),
+      default_task_runner_(CreateTaskRunner(kTaskTypeNone)) {
   UpdateCrossThreadQueueStateLocked();
   // SequenceManager can't be set later, so we need to prevent task runners
   // from posting any tasks.
@@ -268,10 +273,16 @@
 
 scoped_refptr<SingleThreadTaskRunner> TaskQueueImpl::CreateTaskRunner(
     TaskType task_type) const {
+  DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
   return MakeRefCounted<TaskRunner>(task_poster_, associated_thread_,
                                     task_type);
 }
 
+const scoped_refptr<SingleThreadTaskRunner>& TaskQueueImpl::task_runner()
+    const {
+  return default_task_runner_;
+}
+
 void TaskQueueImpl::UnregisterTaskQueue() {
   TRACE_EVENT0("base", "TaskQueueImpl::UnregisterTaskQueue");
   // Invalidate weak pointers now so no voters reference this in a partially
@@ -1059,12 +1070,11 @@
           delayed_task.delay_or_delayed_run_time)) {
     delay = absl::get<base::TimeDelta>(delayed_task.delay_or_delayed_run_time);
     delayed_task.delay_or_delayed_run_time = lazy_now->Now() + delay;
-  }
-#if BUILDFLAG(IS_WIN)
-  else if (!explicit_high_resolution_timer_win) {
+  } else {
     delay = absl::get<base::TimeTicks>(delayed_task.delay_or_delayed_run_time) -
             lazy_now->Now();
   }
+#if BUILDFLAG(IS_WIN)
   if (!explicit_high_resolution_timer_win &&
       delay < (2 * base::Milliseconds(Time::kMinLowResolutionThresholdMs))) {
     // Outside the kExplicitHighResolutionTimerWin experiment, We consider the
@@ -1075,6 +1085,9 @@
     resolution = WakeUpResolution::kHigh;
   }
 #endif  // BUILDFLAG(IS_WIN)
+  delayed_task.delay_policy = subtle::MaybeOverrideDelayPolicy(
+      delayed_task.delay_policy, delay,
+      g_max_precise_delay.load(std::memory_order_relaxed));
   // leeway isn't specified yet since this may be called from any thread.
   return Task(std::move(delayed_task), sequence_number, EnqueueOrder(),
               lazy_now->Now(), resolution);
@@ -1257,7 +1270,7 @@
   DCHECK(!main_thread_only().throttler)
       << "Can't assign two different throttlers to "
          "base::sequence_manager:TaskQueue";
-
+  // `throttler` is guaranteed to outlive this object.
   main_thread_only().throttler = throttler;
 }
 
@@ -1552,6 +1565,10 @@
   voter_weak_ptr_factory_.BindToCurrentSequence(PassKey<TaskQueueImpl>());
 }
 
+TaskQueue::QueuePriority TaskQueueImpl::DefaultPriority() const {
+  return sequence_manager()->settings().priority_settings.default_priority();
+}
+
 TaskQueueImpl::DelayedIncomingQueue::DelayedIncomingQueue() = default;
 TaskQueueImpl::DelayedIncomingQueue::~DelayedIncomingQueue() = default;
 
@@ -1639,10 +1656,6 @@
     task_queue_impl_->RemoveOnTaskPostedHandler(this);
 }
 
-TaskQueue::QueuePriority TaskQueueImpl::DefaultPriority() const {
-  return sequence_manager()->settings().priority_settings.default_priority();
-}
-
 }  // namespace internal
 }  // namespace sequence_manager
 }  // namespace base
diff --git a/base/task/sequence_manager/task_queue_impl.h b/base/task/sequence_manager/task_queue_impl.h
index b19b27e..6544d53 100644
--- a/base/task/sequence_manager/task_queue_impl.h
+++ b/base/task/sequence_manager/task_queue_impl.h
@@ -74,7 +74,7 @@
 // queue is selected, it round-robins between the |immediate_work_queue| and
 // |delayed_work_queue|.  The reason for this is we want to make sure delayed
 // tasks (normally the most common type) don't starve out immediate work.
-class BASE_EXPORT TaskQueueImpl {
+class BASE_EXPORT TaskQueueImpl : public TaskQueue {
  public:
   // Initializes the state of all the task queue features. Must be invoked
   // after FeatureList initialization and while Chrome is still single-threaded.
@@ -95,7 +95,7 @@
 
   TaskQueueImpl(const TaskQueueImpl&) = delete;
   TaskQueueImpl& operator=(const TaskQueueImpl&) = delete;
-  ~TaskQueueImpl();
+  ~TaskQueueImpl() override;
 
   // Types of queues TaskQueueImpl is maintaining internally.
   enum class WorkQueueType { kImmediate, kDelayed };
@@ -124,35 +124,41 @@
   using TaskExecutionTraceLogger =
       RepeatingCallback<void(perfetto::EventContext&, const Task&)>;
 
-  // May be called from any thread.
-  scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(
-      TaskType task_type) const;
-
   // TaskQueue implementation.
-  const char* GetName() const;
-  QueueName GetProtoName() const;
-  bool IsQueueEnabled() const;
-  void SetQueueEnabled(bool enabled);
-  void SetShouldReportPostedTasksWhenDisabled(bool should_report);
-  bool IsEmpty() const;
-  size_t GetNumberOfPendingTasks() const;
-  bool HasTaskToRunImmediatelyOrReadyDelayedTask() const;
-  absl::optional<WakeUp> GetNextDesiredWakeUp();
-  void SetQueuePriority(TaskQueue::QueuePriority priority);
-  TaskQueue::QueuePriority GetQueuePriority() const;
-  void AddTaskObserver(TaskObserver* task_observer);
-  void RemoveTaskObserver(TaskObserver* task_observer);
-  void InsertFence(TaskQueue::InsertFencePosition position);
-  void InsertFenceAt(TimeTicks time);
-  void RemoveFence();
-  bool HasActiveFence();
-  bool BlockedByFence() const;
-  void SetThrottler(TaskQueue::Throttler* throttler);
-  void ResetThrottler();
-  std::unique_ptr<TaskQueue::QueueEnabledVoter> CreateQueueEnabledVoter();
+  const char* GetName() const override;
+  bool IsQueueEnabled() const override;
+  bool IsEmpty() const override;
+  size_t GetNumberOfPendingTasks() const override;
+  bool HasTaskToRunImmediatelyOrReadyDelayedTask() const override;
+  absl::optional<WakeUp> GetNextDesiredWakeUp() override;
+  void SetQueuePriority(TaskQueue::QueuePriority priority) override;
+  TaskQueue::QueuePriority GetQueuePriority() const override;
+  void AddTaskObserver(TaskObserver* task_observer) override;
+  void RemoveTaskObserver(TaskObserver* task_observer) override;
+  void InsertFence(TaskQueue::InsertFencePosition position) override;
+  void InsertFenceAt(TimeTicks time) override;
+  void RemoveFence() override;
+  bool HasActiveFence() override;
+  bool BlockedByFence() const override;
+  void SetThrottler(TaskQueue::Throttler* throttler) override;
+  void ResetThrottler() override;
+  void UpdateWakeUp(LazyNow* lazy_now) override;
+  void SetShouldReportPostedTasksWhenDisabled(bool should_report) override;
+  scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(
+      TaskType task_type) const override;
+  const scoped_refptr<SingleThreadTaskRunner>& task_runner() const override;
+  void SetOnTaskStartedHandler(OnTaskStartedHandler handler) override;
+  void SetOnTaskCompletedHandler(OnTaskCompletedHandler handler) override;
+  [[nodiscard]] std::unique_ptr<TaskQueue::OnTaskPostedCallbackHandle>
+  AddOnTaskPostedHandler(OnTaskPostedHandler handler) override;
+  void SetTaskExecutionTraceLogger(TaskExecutionTraceLogger logger) override;
+  std::unique_ptr<QueueEnabledVoter> CreateQueueEnabledVoter() override;
 
+  void SetQueueEnabled(bool enabled);
   void UnregisterTaskQueue();
 
+  QueueName GetProtoName() const;
+
   // Returns true if a (potentially hypothetical) task with the specified
   // |enqueue_order| could run on the queue. Must be called from the main
   // thread.
@@ -245,36 +251,13 @@
   // addition MaybeShrinkQueue is called on all internal queues.
   void ReclaimMemory(TimeTicks now);
 
-  // Allows wrapping TaskQueue to set a handler to subscribe for notifications
-  // about started and completed tasks.
-  void SetOnTaskStartedHandler(OnTaskStartedHandler handler);
   void OnTaskStarted(const Task& task,
                      const TaskQueue::TaskTiming& task_timing);
-
-  // |task_timing| may be passed in Running state and may not have the end time,
-  // so that the handler can run an additional task that is counted as a part of
-  // the main task.
-  // The handler can call TaskTiming::RecordTaskEnd, which is optional, to
-  // finalize the task, and use the resulting timing.
-  void SetOnTaskCompletedHandler(OnTaskCompletedHandler handler);
   void OnTaskCompleted(const Task& task,
                        TaskQueue::TaskTiming* task_timing,
                        LazyNow* lazy_now);
   bool RequiresTaskTiming() const;
 
-  // Add a callback for adding custom functionality for processing posted task.
-  // Callback will be dispatched while holding a scheduler lock. As a result,
-  // callback should not call scheduler APIs directly, as this can lead to
-  // deadlocks. For example, PostTask should not be called directly and
-  // ScopedDeferTaskPosting::PostOrDefer should be used instead. `handler` must
-  // not be a null callback.
-  [[nodiscard]] std::unique_ptr<TaskQueue::OnTaskPostedCallbackHandle>
-  AddOnTaskPostedHandler(OnTaskPostedHandler handler);
-
-  // Set a callback to fill trace event arguments associated with the task
-  // execution.
-  void SetTaskExecutionTraceLogger(TaskExecutionTraceLogger logger);
-
   WeakPtr<SequenceManagerImpl> GetSequenceManagerWeakPtr();
 
   SequenceManagerImpl* sequence_manager() const { return sequence_manager_; }
@@ -283,10 +266,9 @@
   // and this queue can be safely deleted on any thread.
   bool IsUnregistered() const;
 
-  // Updates this queue's next wake up time in the time domain,
-  // taking into account the desired run time of queued tasks and
-  // policies enforced by the Throttler.
-  void UpdateWakeUp(LazyNow* lazy_now);
+  // Called by the associated sequence manager when it becomes bound. Updates
+  // the weak pointer stored in voters with one bound to the correct thread.
+  void CompleteInitializationOnBoundThread();
 
   void AddQueueEnabledVoter(bool voter_is_enabled,
                             TaskQueue::QueueEnabledVoter& voter);
@@ -294,9 +276,6 @@
                                TaskQueue::QueueEnabledVoter& voter);
   void OnQueueEnabledVoteChanged(bool enabled);
 
-  // Called by the associated sequence manager when it becomes bound.
-  void CompleteInitializationOnBoundThread();
-
  protected:
   // Sets this queue's next wake up time to |wake_up| in the time domain.
   void SetNextWakeUp(LazyNow* lazy_now, absl::optional<WakeUp> wake_up);
@@ -639,6 +618,8 @@
   const bool should_notify_observers_;
   const bool delayed_fence_allowed_;
 
+  const scoped_refptr<SingleThreadTaskRunner> default_task_runner_;
+
   base::WeakPtrFactory<TaskQueueImpl> voter_weak_ptr_factory_{this};
 };
 
diff --git a/base/task/sequence_manager/task_queue_unittest.cc b/base/task/sequence_manager/task_queue_unittest.cc
index 0816cad..3fff097 100644
--- a/base/task/sequence_manager/task_queue_unittest.cc
+++ b/base/task/sequence_manager/task_queue_unittest.cc
@@ -81,7 +81,7 @@
       queue->CreateQueueEnabledVoter();
 
   voter->SetVoteToEnable(true);  // NOP
-  queue->ShutdownTaskQueue();
+  queue.reset();
 
   // This should complete without DCHECKing.
   voter.reset();
@@ -97,7 +97,7 @@
       queue->CreateQueueEnabledVoter();
 
   voter->SetVoteToEnable(false);
-  queue->ShutdownTaskQueue();
+  queue.reset();
 
   // This should complete without DCHECKing.
   voter.reset();
diff --git a/base/task/sequence_manager/tasks.cc b/base/task/sequence_manager/tasks.cc
index 79fdb2f..5bf10c7 100644
--- a/base/task/sequence_manager/tasks.cc
+++ b/base/task/sequence_manager/tasks.cc
@@ -39,7 +39,7 @@
   // and it may wrap around to a negative number during the static cast, hence,
   // TaskQueueImpl::DelayedIncomingQueue is especially sensitive to a potential
   // change of |PendingTask::sequence_num|'s type.
-  static_assert(std::is_same<decltype(sequence_num), int>::value, "");
+  static_assert(std::is_same_v<decltype(sequence_num), int>, "");
   sequence_num = static_cast<int>(sequence_order);
   this->is_high_res = resolution == WakeUpResolution::kHigh;
 }
diff --git a/base/task/sequence_manager/test/mock_time_message_pump_unittest.cc b/base/task/sequence_manager/test/mock_time_message_pump_unittest.cc
index 3c24e7c..b97350d 100644
--- a/base/task/sequence_manager/test/mock_time_message_pump_unittest.cc
+++ b/base/task/sequence_manager/test/mock_time_message_pump_unittest.cc
@@ -137,8 +137,8 @@
   const auto kStartTime = mock_clock.NowTicks();
   const auto kNextDelayedWorkTime = kStartTime + Seconds(2);
 
-  pump.ScheduleDelayedWork(
-      MessagePump::Delegate::NextWorkInfo{kNextDelayedWorkTime, kStartTime});
+  pump.ScheduleDelayedWork(MessagePump::Delegate::NextWorkInfo{
+      kNextDelayedWorkTime, TimeDelta(), kStartTime});
 
   EXPECT_THAT(pump.next_wake_up_time(), Eq(kNextDelayedWorkTime));
 }
diff --git a/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc b/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
index 0a95238..2826030 100644
--- a/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
+++ b/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
@@ -26,7 +26,7 @@
 #include "third_party/abseil-cpp/absl/types/optional.h"
 
 #if BUILDFLAG(IS_IOS)
-#include "base/message_loop/message_pump_mac.h"
+#include "base/message_loop/message_pump_apple.h"
 #elif BUILDFLAG(IS_ANDROID)
 #include "base/message_loop/message_pump_android.h"
 #endif
@@ -66,38 +66,22 @@
              base::FEATURE_DISABLED_BY_DEFAULT);
 #endif
 
-std::atomic_bool g_align_wake_ups = false;
 std::atomic_bool g_run_tasks_by_batches = false;
-#if BUILDFLAG(IS_WIN)
-bool g_explicit_high_resolution_timer_win = true;
-#endif  // BUILDFLAG(IS_WIN)
 
-TimeTicks WakeUpRunTime(const WakeUp& wake_up) {
-  // Windows relies on the low resolution timer rather than manual wake up
-  // alignment.
-#if BUILDFLAG(IS_WIN)
-  if (g_explicit_high_resolution_timer_win)
-    return wake_up.earliest_time();
-#else  // BUILDFLAG(IS_WIN)
-  if (g_align_wake_ups.load(std::memory_order_relaxed)) {
-    TimeTicks aligned_run_time = wake_up.earliest_time().SnappedToNextTick(
-        TimeTicks(), GetTaskLeewayForCurrentThread());
-    return std::min(aligned_run_time, wake_up.latest_time());
+base::TimeDelta GetLeewayForWakeUp(absl::optional<WakeUp> wake_up) {
+  if (!wake_up || wake_up->delay_policy == subtle::DelayPolicy::kPrecise) {
+    return TimeDelta();
   }
-#endif
-  return wake_up.time;
+  return wake_up->leeway;
 }
 
 }  // namespace
 
 // static
 void ThreadControllerWithMessagePumpImpl::InitializeFeatures() {
-  g_align_wake_ups = FeatureList::IsEnabled(kAlignWakeUps);
   g_run_tasks_by_batches.store(FeatureList::IsEnabled(kRunTasksByBatches),
                                std::memory_order_relaxed);
 #if BUILDFLAG(IS_WIN)
-  g_explicit_high_resolution_timer_win =
-      FeatureList::IsEnabled(kExplicitHighResolutionTimerWin);
   g_use_less_high_res_timers.store(
       FeatureList::IsEnabled(kUseLessHighResTimers), std::memory_order_relaxed);
   if (FeatureList::IsEnabled(kAlwaysUseHighResTimers)) {
@@ -108,9 +92,6 @@
 
 // static
 void ThreadControllerWithMessagePumpImpl::ResetFeatures() {
-  g_align_wake_ups.store(
-      kAlignWakeUps.default_state == FEATURE_ENABLED_BY_DEFAULT,
-      std::memory_order_relaxed);
   g_run_tasks_by_batches.store(
       kRunTasksByBatches.default_state == FEATURE_ENABLED_BY_DEFAULT,
       std::memory_order_relaxed);
@@ -211,27 +192,27 @@
     LazyNow* lazy_now,
     absl::optional<WakeUp> wake_up) {
   DCHECK(!wake_up || !wake_up->is_immediate());
-  TimeTicks run_time =
-      wake_up.has_value() ? WakeUpRunTime(*wake_up) : TimeTicks::Max();
-  DCHECK_LT(lazy_now->Now(), run_time);
-
-  if (main_thread_only().next_delayed_do_work == run_time)
-    return;
-  main_thread_only().next_delayed_do_work = run_time;
-
   // It's very rare for PostDelayedTask to be called outside of a DoWork in
   // production, so most of the time this does nothing.
-  if (work_deduplicator_.OnDelayedWorkRequested() ==
+  if (work_deduplicator_.OnDelayedWorkRequested() !=
       ShouldScheduleWork::kScheduleImmediate) {
-    // Cap at one day but remember the exact time for the above equality check
-    // on the next round.
-    if (!run_time.is_max())
-      run_time = CapAtOneDay(run_time, lazy_now);
-    // |pump_| can't be null as all postTasks are cross-thread before binding,
-    // and delayed cross-thread postTasks do the thread hop through an immediate
-    // task.
-    pump_->ScheduleDelayedWork({run_time, lazy_now->Now()});
+    return;
   }
+  TimeTicks run_time =
+      wake_up.has_value()
+          ? pump_->AdjustDelayedRunTime(wake_up->earliest_time(), wake_up->time,
+                                        wake_up->latest_time())
+          : TimeTicks::Max();
+  DCHECK_LT(lazy_now->Now(), run_time);
+
+  if (!run_time.is_max()) {
+    run_time = CapAtOneDay(run_time, lazy_now);
+  }
+  // |pump_| can't be null as all postTasks are cross-thread before binding,
+  // and delayed cross-thread postTasks do the thread hop through an immediate
+  // task.
+  pump_->ScheduleDelayedWork(
+      {run_time, GetLeewayForWakeUp(wake_up), lazy_now->Now()});
 }
 
 bool ThreadControllerWithMessagePumpImpl::RunsTasksInCurrentSequence() {
@@ -371,20 +352,19 @@
 
   // Special-casing here avoids unnecessarily sampling Now() when out of work.
   if (!next_wake_up) {
-    main_thread_only().next_delayed_do_work = TimeTicks::Max();
     next_work_info.delayed_run_time = TimeTicks::Max();
     return next_work_info;
   }
 
   // The MessagePump will schedule the wake up on our behalf, so we need to
-  // update |main_thread_only().next_delayed_do_work|.
-  main_thread_only().next_delayed_do_work = WakeUpRunTime(*next_wake_up);
+  // update |next_work_info.delayed_run_time|.
+  TimeTicks next_delayed_do_work = pump_->AdjustDelayedRunTime(
+      next_wake_up->earliest_time(), next_wake_up->time,
+      next_wake_up->latest_time());
 
   // Don't request a run time past |main_thread_only().quit_runloop_after|.
-  if (main_thread_only().next_delayed_do_work >
-      main_thread_only().quit_runloop_after) {
-    main_thread_only().next_delayed_do_work =
-        main_thread_only().quit_runloop_after;
+  if (next_delayed_do_work > main_thread_only().quit_runloop_after) {
+    next_delayed_do_work = main_thread_only().quit_runloop_after;
     // If we've passed |quit_runloop_after| there's no more work to do.
     if (continuation_lazy_now.Now() >= main_thread_only().quit_runloop_after) {
       next_work_info.delayed_run_time = TimeTicks::Max();
@@ -392,8 +372,9 @@
     }
   }
 
-  next_work_info.delayed_run_time = CapAtOneDay(
-      main_thread_only().next_delayed_do_work, &continuation_lazy_now);
+  next_work_info.delayed_run_time =
+      CapAtOneDay(next_delayed_do_work, &continuation_lazy_now);
+  next_work_info.leeway = GetLeewayForWakeUp(next_wake_up);
   next_work_info.recent_now = continuation_lazy_now.Now();
   return next_work_info;
 }
diff --git a/base/task/sequence_manager/thread_controller_with_message_pump_impl.h b/base/task/sequence_manager/thread_controller_with_message_pump_impl.h
index 7a48b52..72e313d 100644
--- a/base/task/sequence_manager/thread_controller_with_message_pump_impl.h
+++ b/base/task/sequence_manager/thread_controller_with_message_pump_impl.h
@@ -130,9 +130,6 @@
     // yield to the MessagePump after |work_batch_size| work items.
     base::TimeTicks yield_to_native_after_batch = base::TimeTicks();
 
-    // When the next scheduled delayed work should run, if any.
-    TimeTicks next_delayed_do_work = TimeTicks::Max();
-
     // The time after which the runloop should quit.
     TimeTicks quit_runloop_after = TimeTicks::Max();
 
diff --git a/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc b/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc
index d06bdb2..5f3e48f 100644
--- a/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc
+++ b/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc
@@ -1087,6 +1087,7 @@
 
   scoped_feature_list.InitAndEnableFeature(kRunTasksByBatches);
   ThreadControllerWithMessagePumpImpl::InitializeFeatures();
+  MessagePump::InitializeFeatures();
 
   int task_counter = 0;
   for (int i = 0; i < 2; i++) {
@@ -1104,6 +1105,7 @@
 
   scoped_feature_list.InitAndEnableFeature(kRunTasksByBatches);
   ThreadControllerWithMessagePumpImpl::InitializeFeatures();
+  MessagePump::InitializeFeatures();
 
   int task_counter = 0;
   for (int i = 0; i < 2; i++) {
@@ -1123,6 +1125,7 @@
 
   scoped_feature_list.InitAndEnableFeature(kRunTasksByBatches);
   ThreadControllerWithMessagePumpImpl::InitializeFeatures();
+  MessagePump::InitializeFeatures();
 
   int task_counter = 0;
 
diff --git a/base/task/sequence_manager/wake_up_queue_unittest.cc b/base/task/sequence_manager/wake_up_queue_unittest.cc
index f0f0caa..f81a582 100644
--- a/base/task/sequence_manager/wake_up_queue_unittest.cc
+++ b/base/task/sequence_manager/wake_up_queue_unittest.cc
@@ -31,7 +31,7 @@
                        WakeUpQueue* wake_up_queue,
                        const TaskQueue::Spec& spec)
       : TaskQueueImpl(sequence_manager, wake_up_queue, spec) {}
-  ~TaskQueueImplForTest() = default;
+  ~TaskQueueImplForTest() override = default;
 
   using TaskQueueImpl::SetNextWakeUp;
 };
diff --git a/base/task/sequenced_task_runner.h b/base/task/sequenced_task_runner.h
index 03529c3..c19f24a 100644
--- a/base/task/sequenced_task_runner.h
+++ b/base/task/sequenced_task_runner.h
@@ -276,7 +276,7 @@
   //   the current thread.
   virtual bool RunsTasksInCurrentSequence() const = 0;
 
-  // Returns the default SequencedThreadTaskRunner for the current task. It
+  // Returns the default SequencedTaskRunner for the current task. It
   // should only be called if HasCurrentDefault() returns true (see the comment
   // there for the requirements).
   //
diff --git a/base/task/single_thread_task_runner.cc b/base/task/single_thread_task_runner.cc
index f11574e..2b0ea0f 100644
--- a/base/task/single_thread_task_runner.cc
+++ b/base/task/single_thread_task_runner.cc
@@ -1,4 +1,4 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/base/task/single_thread_task_runner.h b/base/task/single_thread_task_runner.h
index e89ca0d..6db207f 100644
--- a/base/task/single_thread_task_runner.h
+++ b/base/task/single_thread_task_runner.h
@@ -115,8 +115,8 @@
     // We expect SingleThreadTaskRunner::CurrentHandleOverride to be only needed
     // under special circumstances. Require them to be enumerated as friends to
     // require //base/OWNERS review. Use
-    // SingleTaskRunner::CurrentHandleOverrideForTesting in unit tests to avoid
-    // the friend requirement.
+    // SingleThreadTaskRunner::CurrentHandleOverrideForTesting in unit tests to
+    // avoid the friend requirement.
 
     friend class blink::scheduler::MainThreadSchedulerImpl;
 
diff --git a/base/task/single_thread_task_runner_unittest.cc b/base/task/single_thread_task_runner_unittest.cc
index ee06919..8101c1d 100644
--- a/base/task/single_thread_task_runner_unittest.cc
+++ b/base/task/single_thread_task_runner_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/base/task/task_features.cc b/base/task/task_features.cc
index 3644fb4..75a0f90 100644
--- a/base/task/task_features.cc
+++ b/base/task/task_features.cc
@@ -9,6 +9,7 @@
 #include "base/base_export.h"
 #include "base/feature_list.h"
 #include "base/threading/platform_thread.h"
+#include "build/build_config.h"
 
 namespace base {
 
@@ -49,22 +50,33 @@
 
 const base::FeatureParam<TimeDelta> kTaskLeewayParam{&kAddTaskLeewayFeature,
                                                      "leeway", kDefaultLeeway};
+const base::FeatureParam<TimeDelta> kMaxPreciseDelay{
+    &kAddTaskLeewayFeature, "max_precise_delay", kDefaultMaxPreciseDelay};
 
 BASE_FEATURE(kAlignWakeUps, "AlignWakeUps", base::FEATURE_DISABLED_BY_DEFAULT);
 
+BASE_FEATURE(kTimerSlackMac,
+             "TimerSlackMac",
+             base::FEATURE_DISABLED_BY_DEFAULT);
+
 BASE_FEATURE(kExplicitHighResolutionTimerWin,
              "ExplicitHighResolutionTimerWin",
              base::FEATURE_ENABLED_BY_DEFAULT);
 
 BASE_FEATURE(kRunTasksByBatches,
              "RunTasksByBatches",
+#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_CHROMEOS)
+             base::FEATURE_ENABLED_BY_DEFAULT);
+#else
              base::FEATURE_DISABLED_BY_DEFAULT);
-BASE_FEATURE(kThreadPoolCap,
-             "ThreadPoolCap",
+#endif
+
+BASE_FEATURE(kThreadPoolCap2,
+             "ThreadPoolCap2",
              base::FEATURE_DISABLED_BY_DEFAULT);
 
 const base::FeatureParam<int> kThreadPoolCapRestrictedCount{
-    &kThreadPoolCap, "restricted_count", 3};
+    &kThreadPoolCap2, "restricted_count", 3};
 
 // Leeway value applied to delayed tasks. An atomic is used here because the
 // value is queried from multiple threads.
diff --git a/base/task/task_features.h b/base/task/task_features.h
index 0eb8fb0..83b25d6 100644
--- a/base/task/task_features.h
+++ b/base/task/task_features.h
@@ -12,9 +12,8 @@
 
 namespace base {
 
-// Amount of threads that will be system-wide restricted from being used
-// by thread pools.
-BASE_EXPORT BASE_DECLARE_FEATURE(kThreadPoolCap);
+// Fixed amount of threads that will be used as a cap for thread pools.
+BASE_EXPORT BASE_DECLARE_FEATURE(kThreadPoolCap2);
 
 extern const BASE_EXPORT base::FeatureParam<int> kThreadPoolCapRestrictedCount;
 
@@ -47,10 +46,19 @@
 #endif  // #if !BUILDFLAG(IS_WIN)
 extern const BASE_EXPORT base::FeatureParam<TimeDelta> kTaskLeewayParam;
 
+// We consider that delayed tasks above |kMaxPreciseDelay| never need
+// DelayPolicy::kPrecise. The default value is slightly above 30Hz timer.
+constexpr TimeDelta kDefaultMaxPreciseDelay = Milliseconds(36);
+extern const BASE_EXPORT base::FeatureParam<TimeDelta> kMaxPreciseDelay;
+
 // Under this feature, wake ups are aligned at a 8ms boundary when allowed per
 // DelayPolicy.
 BASE_EXPORT BASE_DECLARE_FEATURE(kAlignWakeUps);
 
+// Under this feature, slack is added on mac message pumps that support it when
+// allowed per DelayPolicy.
+BASE_EXPORT BASE_DECLARE_FEATURE(kTimerSlackMac);
+
 // Under this feature, tasks that need high resolution timer are determined
 // based on explicit DelayPolicy rather than based on a threshold.
 BASE_EXPORT BASE_DECLARE_FEATURE(kExplicitHighResolutionTimerWin);
diff --git a/base/task/task_runner.cc b/base/task/task_runner.cc
index 88f8d72..dc07eec 100644
--- a/base/task/task_runner.cc
+++ b/base/task/task_runner.cc
@@ -15,34 +15,6 @@
 
 namespace base {
 
-namespace {
-
-// TODO(akalin): There's only one other implementation of
-// PostTaskAndReplyImpl in post_task.cc.  Investigate whether it'll be
-// possible to merge the two.
-class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
- public:
-  explicit PostTaskAndReplyTaskRunner(TaskRunner* destination);
-
- private:
-  bool PostTask(const Location& from_here, OnceClosure task) override;
-
-  // Non-owning.
-  raw_ptr<TaskRunner, AcrossTasksDanglingUntriaged> destination_;
-};
-
-PostTaskAndReplyTaskRunner::PostTaskAndReplyTaskRunner(
-    TaskRunner* destination) : destination_(destination) {
-  DCHECK(destination_);
-}
-
-bool PostTaskAndReplyTaskRunner::PostTask(const Location& from_here,
-                                          OnceClosure task) {
-  return destination_->PostTask(from_here, std::move(task));
-}
-
-}  // namespace
-
 bool TaskRunner::PostTask(const Location& from_here, OnceClosure task) {
   return PostDelayedTask(from_here, std::move(task), base::TimeDelta());
 }
@@ -50,7 +22,10 @@
 bool TaskRunner::PostTaskAndReply(const Location& from_here,
                                   OnceClosure task,
                                   OnceClosure reply) {
-  return PostTaskAndReplyTaskRunner(this).PostTaskAndReply(
+  return internal::PostTaskAndReplyImpl(
+      [this](const Location& location, OnceClosure task) {
+        return PostTask(location, std::move(task));
+      },
       from_here, std::move(task), std::move(reply));
 }
 
diff --git a/base/task/thread_pool.cc b/base/task/thread_pool.cc
index 762b188..bacdfd2 100644
--- a/base/task/thread_pool.cc
+++ b/base/task/thread_pool.cc
@@ -16,20 +16,6 @@
 
 namespace {
 
-class PostTaskAndReplyWithTraitsTaskRunner
-    : public internal::PostTaskAndReplyImpl {
- public:
-  explicit PostTaskAndReplyWithTraitsTaskRunner(const TaskTraits& traits)
-      : traits_(traits) {}
-
- private:
-  bool PostTask(const Location& from_here, OnceClosure task) override {
-    ThreadPool::PostTask(from_here, traits_, std::move(task));
-    return true;
-  }
-
-  const TaskTraits traits_;
-};
 
 internal::ThreadPoolImpl* GetThreadPoolImpl() {
   auto* instance = ThreadPoolInstance::Get();
@@ -87,7 +73,10 @@
                                   const TaskTraits& traits,
                                   OnceClosure task,
                                   OnceClosure reply) {
-  return PostTaskAndReplyWithTraitsTaskRunner(traits).PostTaskAndReply(
+  return internal::PostTaskAndReplyImpl(
+      [&traits](const Location& location, OnceClosure task) {
+        return ThreadPool::PostTask(location, traits, std::move(task));
+      },
       from_here, std::move(task), std::move(reply));
 }
 
diff --git a/base/task/thread_pool/delayed_task_manager.cc b/base/task/thread_pool/delayed_task_manager.cc
index 4b61aa6..be9a34e 100644
--- a/base/task/thread_pool/delayed_task_manager.cc
+++ b/base/task/thread_pool/delayed_task_manager.cc
@@ -73,6 +73,7 @@
     DCHECK(!service_thread_task_runner_);
     service_thread_task_runner_ = std::move(service_thread_task_runner);
     align_wake_ups_ = FeatureList::IsEnabled(kAlignWakeUps);
+    max_precise_delay = kMaxPreciseDelay.Get();
     std::tie(process_ripe_tasks_time, delay_policy) =
         GetTimeAndDelayPolicyToScheduleProcessRipeTasksLockRequired();
   }
@@ -97,6 +98,10 @@
   subtle::DelayPolicy delay_policy;
   {
     CheckedAutoLock auto_lock(queue_lock_);
+    task.delay_policy = subtle::MaybeOverrideDelayPolicy(
+        task.delay_policy, task.delayed_run_time - task.queue_time,
+        max_precise_delay);
+
     auto [old_process_ripe_tasks_time, old_delay_policy] =
         GetTimeAndDelayPolicyToScheduleProcessRipeTasksLockRequired();
     delayed_task_queue_.insert(DelayedTask(std::move(task),
diff --git a/base/task/thread_pool/delayed_task_manager.h b/base/task/thread_pool/delayed_task_manager.h
index b055a50..05c1feb 100644
--- a/base/task/thread_pool/delayed_task_manager.h
+++ b/base/task/thread_pool/delayed_task_manager.h
@@ -15,6 +15,7 @@
 #include "base/synchronization/atomic_flag.h"
 #include "base/task/common/checked_lock.h"
 #include "base/task/delay_policy.h"
+#include "base/task/task_features.h"
 #include "base/task/thread_pool/task.h"
 #include "base/thread_annotations.h"
 #include "base/time/default_tick_clock.h"
@@ -135,6 +136,8 @@
       GUARDED_BY(queue_lock_);
 
   bool align_wake_ups_ GUARDED_BY(queue_lock_) = false;
+  base::TimeDelta max_precise_delay GUARDED_BY(queue_lock_) =
+      kDefaultMaxPreciseDelay;
 
   SEQUENCE_CHECKER(sequence_checker_);
 };
diff --git a/base/task/thread_pool/pooled_single_thread_task_runner_manager.cc b/base/task/thread_pool/pooled_single_thread_task_runner_manager.cc
index 695396b..e309cdf 100644
--- a/base/task/thread_pool/pooled_single_thread_task_runner_manager.cc
+++ b/base/task/thread_pool/pooled_single_thread_task_runner_manager.cc
@@ -8,6 +8,7 @@
 #include <string>
 #include <utility>
 
+#include "base/check.h"
 #include "base/debug/leak_annotations.h"
 #include "base/functional/bind.h"
 #include "base/functional/callback.h"
@@ -281,6 +282,9 @@
     WorkerThreadDelegate::OnMainEntry(worker);
 
     scoped_com_initializer_ = std::make_unique<win::ScopedCOMInitializer>();
+
+    // CHECK to make sure this COM thread is initialized correctly in an STA.
+    CHECK(scoped_com_initializer_->Succeeded());
   }
 
   RegisteredTaskSource GetWork(WorkerThread* worker) override {
diff --git a/base/task/thread_pool/priority_queue.cc b/base/task/thread_pool/priority_queue.cc
index 47e3d4c..b5e77a8 100644
--- a/base/task/thread_pool/priority_queue.cc
+++ b/base/task/thread_pool/priority_queue.cc
@@ -193,6 +193,13 @@
   is_flush_task_sources_on_destroy_enabled_ = true;
 }
 
+void PriorityQueue::swap(PriorityQueue& other) {
+  container_.swap(other.container_);
+  num_task_sources_per_priority_.swap(other.num_task_sources_per_priority_);
+  std::swap(is_flush_task_sources_on_destroy_enabled_,
+            other.is_flush_task_sources_on_destroy_enabled_);
+}
+
 void PriorityQueue::DecrementNumTaskSourcesForPriority(TaskPriority priority) {
   DCHECK_GT(num_task_sources_per_priority_[base::to_underlying(priority)], 0U);
   --num_task_sources_per_priority_[base::to_underlying(priority)];
diff --git a/base/task/thread_pool/priority_queue.h b/base/task/thread_pool/priority_queue.h
index ebafb54..fe4a28e 100644
--- a/base/task/thread_pool/priority_queue.h
+++ b/base/task/thread_pool/priority_queue.h
@@ -75,6 +75,8 @@
   // (TaskSource -> Task -> TaskRunner -> TaskSource...) during test teardown.
   void EnableFlushTaskSourcesOnDestroyForTesting();
 
+  void swap(PriorityQueue& other);
+
  private:
   // A class combining a TaskSource and the TaskSourceSortKey that determines
   // its position in a PriorityQueue.
diff --git a/base/task/thread_pool/semaphore.h b/base/task/thread_pool/semaphore.h
new file mode 100644
index 0000000..650a5d5
--- /dev/null
+++ b/base/task/thread_pool/semaphore.h
@@ -0,0 +1,99 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a clone of "v8/src/base/platform/semaphore.h" in v8.
+// Keep in sync, especially when fixing bugs.
+
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_THREAD_POOL_SEMAPHORE_H_
+#define BASE_TASK_THREAD_POOL_SEMAPHORE_H_
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#elif BUILDFLAG(IS_MAC) || BUILDFLAG(IS_IOS)
+#include <dispatch/dispatch.h>
+#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL) || BUILDFLAG(IS_FUCHSIA)
+#include <semaphore.h>
+#else
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#endif
+
+namespace base {
+class TimeDelta;
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Semaphore
+//
+// A semaphore object is a synchronization object that maintains a count. The
+// count is decremented each time a thread completes a wait for the semaphore
+// object and incremented each time a thread signals the semaphore. When the
+// count reaches zero,  threads waiting for the semaphore blocks until the
+// count becomes non-zero.
+
+class BASE_EXPORT Semaphore {
+ public:
+  explicit Semaphore(int count);
+  Semaphore(const Semaphore&) = delete;
+  Semaphore& operator=(const Semaphore&) = delete;
+  ~Semaphore();
+
+  // Increments the semaphore counter.
+  void Signal();
+
+  // Decrements the semaphore counter if it is positive, or blocks until it
+  // becomes positive and then decrements the counter.
+  //
+  // Wait's return "happens-after" |Signal| has completed. This means that it's
+  // safe for a WaitableEvent to synchronise its own destruction, like this:
+  //
+  //   Semaphore* s = new Semaphore;
+  //   SendToOtherThread(s);
+  //   s->Wait();
+  //   delete s;
+  void Wait();
+
+  // Like Wait() but returns after `timeout` time has passed. If the call times
+  // out, the return value is false and the counter is unchanged. Otherwise the
+  // semaphore counter is decremented and true is returned.
+  //
+  // Note: Timeout is checked to be no more than DWORD-size (24 days).
+  [[nodiscard]] bool TimedWait(TimeDelta timeout);
+
+#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_IOS)
+  using NativeHandle = dispatch_semaphore_t;
+#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL) || BUILDFLAG(IS_FUCHSIA)
+  using NativeHandle = sem_t;
+#elif BUILDFLAG(IS_WIN)
+  using NativeHandle = HANDLE;
+#else  // default implementation
+  using NativeHandle = struct DefaultSemaphore {
+   private:
+    friend class Semaphore;
+    DefaultSemaphore(int count) : condition_var(&lock), value(count) {}
+
+    Lock lock;
+    ConditionVariable condition_var GUARDED_BY(lock);
+    int value GUARDED_BY(lock);
+  };
+#endif
+
+ private:
+  NativeHandle& native_handle() { return native_handle_; }
+  const NativeHandle& native_handle() const { return native_handle_; }
+
+  NativeHandle native_handle_;
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // V8_BASE_PLATFORM_SEMAPHORE_H_
diff --git a/base/task/thread_pool/semaphore/semaphore_apple.cc b/base/task/thread_pool/semaphore/semaphore_apple.cc
new file mode 100644
index 0000000..6e541c4
--- /dev/null
+++ b/base/task/thread_pool/semaphore/semaphore_apple.cc
@@ -0,0 +1,46 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a clone of "v8/src/base/platform/semaphore.cc" in v8.
+// Keep in sync, especially when fixing bugs.
+
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/thread_pool/semaphore.h"
+
+#include <dispatch/dispatch.h>
+
+#include "base/check.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+Semaphore::Semaphore(int count) {
+  native_handle_ = dispatch_semaphore_create(count);
+  CHECK(native_handle_);
+}
+
+Semaphore::~Semaphore() {
+  dispatch_release(native_handle_);
+}
+
+void Semaphore::Signal() {
+  dispatch_semaphore_signal(native_handle_);
+}
+
+void Semaphore::Wait() {
+  CHECK_EQ(dispatch_semaphore_wait(native_handle_, DISPATCH_TIME_FOREVER), 0);
+}
+
+bool Semaphore::TimedWait(TimeDelta timeout) {
+  dispatch_time_t wait_time =
+      dispatch_time(DISPATCH_TIME_NOW, timeout.InNanoseconds());
+  return dispatch_semaphore_wait(native_handle_, wait_time) == 0;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task/thread_pool/semaphore/semaphore_default.cc b/base/task/thread_pool/semaphore/semaphore_default.cc
new file mode 100644
index 0000000..645ec6b
--- /dev/null
+++ b/base/task/thread_pool/semaphore/semaphore_default.cc
@@ -0,0 +1,64 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a clone of "v8/src/base/platform/semaphore.cc" in v8.
+// Keep in sync, especially when fixing bugs.
+
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/thread_pool/semaphore.h"
+
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+Semaphore::Semaphore(int count) : native_handle_(count) {
+  native_handle().condition_var.declare_only_used_while_idle();
+}
+
+Semaphore::~Semaphore() = default;
+
+void Semaphore::Signal() {
+  AutoLock lock(native_handle().lock);
+  ++native_handle().value;
+  native_handle().condition_var.Signal();
+}
+
+void Semaphore::Wait() {
+  AutoLock lock(native_handle().lock);
+  while (native_handle().value < 1) {
+    native_handle().condition_var.Wait();
+  }
+  --native_handle().value;
+}
+
+bool Semaphore::TimedWait(TimeDelta timeout) {
+  AutoLock lock(native_handle().lock);
+  const TimeTicks before_wait = TimeTicks::Now();
+  const TimeTicks wait_end = before_wait + timeout;
+  TimeDelta remaining_sleep = timeout;
+  while (native_handle().value < 1) {
+    native_handle().condition_var.TimedWait(remaining_sleep);
+
+    // Since condition variables experience spurious wakeups, adjust the
+    // remaining wait time to prepare for sleeping once more, and return if a
+    // timeout occurred.
+    remaining_sleep = wait_end - TimeTicks::Now();
+    if (!remaining_sleep.is_positive()) {
+      return false;
+    }
+  }
+  // In this case, the lock has been successfully acquired with a positive
+  // semaphore value.
+  --native_handle().value;
+  return true;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task/thread_pool/semaphore/semaphore_posix.cc b/base/task/thread_pool/semaphore/semaphore_posix.cc
new file mode 100644
index 0000000..db1fa4a
--- /dev/null
+++ b/base/task/thread_pool/semaphore/semaphore_posix.cc
@@ -0,0 +1,85 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a clone of "v8/src/base/platform/semaphore.cc" in v8.
+// Keep in sync, especially when fixing bugs.
+
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/thread_pool/semaphore.h"
+
+#include <errno.h>
+#include <semaphore.h>
+
+#include "base/check.h"
+#include "base/logging.h"
+#include "base/notreached.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+// Translates a base::TimeDelta (relative to now) to struct timedelta containing
+// that position in time relative to unix epoch
+struct timespec TimeDeltaToAbsTimeSpec(base::TimeDelta time_delta) {
+  struct timespec now;
+  clock_gettime(CLOCK_REALTIME, &now);
+  struct timespec offset = time_delta.ToTimeSpec();
+  now.tv_sec += offset.tv_sec;
+  now.tv_nsec += offset.tv_nsec;
+  if (now.tv_nsec >= Time::kNanosecondsPerSecond) {
+    now.tv_sec++;
+    now.tv_nsec -= Time::kNanosecondsPerSecond;
+  }
+  return now;
+}
+}  // namespace
+
+Semaphore::Semaphore(int count) {
+  CHECK_GE(count, 0);
+  int result = sem_init(&native_handle_, 0, static_cast<unsigned int>(count));
+  CHECK_EQ(result, 0);
+}
+
+Semaphore::~Semaphore() {
+  int result = sem_destroy(&native_handle_);
+  CHECK_EQ(result, 0);
+}
+
+void Semaphore::Signal() {
+  int result = sem_post(&native_handle_);
+  CHECK_EQ(result, 0);
+}
+
+void Semaphore::Wait() {
+  int result = HANDLE_EINTR(sem_wait(&native_handle_));
+  if (result == 0) {
+    return;  // Semaphore was signalled.
+  }
+  PCHECK(false);
+}
+
+bool Semaphore::TimedWait(TimeDelta timeout) {
+  // Compute the time for end of timeout.
+  const struct timespec ts = TimeDeltaToAbsTimeSpec(timeout);
+
+  // Wait for semaphore signalled or timeout.
+  int result = HANDLE_EINTR(sem_timedwait(&native_handle_, &ts));
+  if (result == 0) {
+    return true;  // Semaphore was signalled.
+  }
+  if (result == -1 && errno == ETIMEDOUT) {
+    // Timed out while waiting for semaphore.
+    return false;
+  }
+  PCHECK(false);
+  return false;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task/thread_pool/semaphore/semaphore_unittest.cc b/base/task/thread_pool/semaphore/semaphore_unittest.cc
new file mode 100644
index 0000000..a1352a4
--- /dev/null
+++ b/base/task/thread_pool/semaphore/semaphore_unittest.cc
@@ -0,0 +1,93 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/thread_pool/semaphore.h"
+
+#include <memory>
+#include <vector>
+
+#include "base/functional/callback.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/test/bind.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace base {
+
+namespace {
+
+class SemaphoreTest : public PlatformTest {
+ protected:
+  raw_ptr<Thread> CreateThreadWithTask(RepeatingClosure& thread_task) {
+    std::unique_ptr<Thread> thread = std::make_unique<Thread>(
+        StringPrintf("SemTestThread%d", threadcounter++));
+
+    thread->Start();
+    thread->task_runner()->PostTask(FROM_HERE, thread_task);
+    threads_.push_back(std::move(thread));
+    return threads_.back().get();
+  }
+
+  int threadcounter = 0;
+  WaitableEvent shutdown_event_{};
+  std::vector<std::unique_ptr<Thread>> threads_{};
+};
+
+}  // namespace
+
+TEST_F(SemaphoreTest, TimedWaitFail) {
+  internal::Semaphore sem{0};
+  RepeatingClosure task = BindLambdaForTesting([&]() {
+    TimeTicks start_time = TimeTicks::Now();
+    EXPECT_FALSE(sem.TimedWait(TestTimeouts::tiny_timeout()));
+    EXPECT_GE(TimeTicks::Now() - start_time, TestTimeouts::tiny_timeout());
+  });
+
+  CreateThreadWithTask(task)->FlushForTesting();
+}
+
+TEST_F(SemaphoreTest, TimedWaitSuccess) {
+  internal::Semaphore sem{0};
+  RepeatingClosure task = BindLambdaForTesting(
+      [&]() { EXPECT_TRUE(sem.TimedWait(TestTimeouts::tiny_timeout())); });
+
+  sem.Signal();
+  CreateThreadWithTask(task)->FlushForTesting();
+}
+
+TEST_F(SemaphoreTest, PingPongCounter) {
+  internal::Semaphore sem{0};
+  int counter = 0;
+  RepeatingClosure task = BindLambdaForTesting([&]() {
+    while (!shutdown_event_.IsSignaled()) {
+      sem.Wait();
+      {
+        if (shutdown_event_.IsSignaled()) {
+          return;
+        }
+      }
+      ++counter;
+      if (counter > 999) {
+        shutdown_event_.Signal();
+      }
+      sem.Signal();
+      PlatformThread::Sleep(Microseconds(100));
+    }
+  });
+
+  sem.Signal();
+  raw_ptr<Thread> thread = CreateThreadWithTask(task);
+  raw_ptr<Thread> thread2 = CreateThreadWithTask(task);
+  thread->FlushForTesting();
+  thread2->FlushForTesting();
+  thread->Stop();
+  thread2->Stop();
+  EXPECT_EQ(counter, 1000);
+}
+
+}  // namespace base
diff --git a/base/task/thread_pool/semaphore/semaphore_win.cc b/base/task/thread_pool/semaphore/semaphore_win.cc
new file mode 100644
index 0000000..c28f7e5
--- /dev/null
+++ b/base/task/thread_pool/semaphore/semaphore_win.cc
@@ -0,0 +1,62 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a clone of "v8/src/base/platform/semaphore.cc" in v8.
+// Keep in sync, especially when fixing bugs.
+
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/thread_pool/semaphore.h"
+
+#include <windows.h>
+
+#include "base/check.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+Semaphore::Semaphore(int count) {
+  CHECK_GE(count, 0);
+  native_handle_ = ::CreateSemaphoreA(
+      nullptr, count, std::numeric_limits<LONG>::max(), nullptr);
+  CHECK(!!native_handle_);
+}
+
+Semaphore::~Semaphore() {
+  const bool result = CloseHandle(native_handle_);
+  CHECK(result);
+}
+
+void Semaphore::Signal() {
+  const bool result = ReleaseSemaphore(native_handle_, 1, nullptr);
+  CHECK(result);
+}
+
+void Semaphore::Wait() {
+  const DWORD result = WaitForSingleObject(native_handle_, INFINITE);
+  CHECK_EQ(result, WAIT_OBJECT_0);
+}
+
+bool Semaphore::TimedWait(TimeDelta timeout) {
+  const DWORD wait_ms = checked_cast<DWORD>(timeout.InMilliseconds());
+  const TimeTicks start = TimeTicks::Now();
+  DWORD result;
+  // WaitForSingleObject has been shown to experience spurious wakeups (on the
+  // order of 10ms before when it was supposed to wake up), so retry until at
+  // least |timeout| has passed.
+  do {
+    result = WaitForSingleObject(native_handle_, wait_ms);
+    if (result == WAIT_OBJECT_0) {
+      return true;
+    }
+  } while (TimeTicks::Now() <= start + timeout);
+  CHECK_EQ(result, static_cast<DWORD>(WAIT_TIMEOUT));
+  return false;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task/thread_pool/task.cc b/base/task/thread_pool/task.cc
index 59fa754..8fe92d4 100644
--- a/base/task/thread_pool/task.cc
+++ b/base/task/thread_pool/task.cc
@@ -6,17 +6,9 @@
 
 #include <utility>
 
-#include "base/atomic_sequence_num.h"
-
 namespace base {
 namespace internal {
 
-namespace {
-
-AtomicSequenceNumber g_sequence_nums_for_tracing;
-
-}  // namespace
-
 Task::Task(const Location& posted_from,
            OnceClosure task,
            TimeTicks queue_time,
@@ -39,15 +31,7 @@
                   queue_time,
                   delayed_run_time,
                   leeway,
-                  delay_policy) {
-  // ThreadPoolImpl doesn't use |sequence_num| but tracing (toplevel.flow)
-  // relies on it being unique. While this subtle dependency is a bit
-  // overreaching, ThreadPoolImpl is the only task system that doesn't use
-  // |sequence_num| and the dependent code rarely changes so this isn't worth a
-  // big change and faking it here isn't too bad for now (posting tasks is full
-  // of atomic ops already).
-  this->sequence_num = g_sequence_nums_for_tracing.GetNext();
-}
+                  delay_policy) {}
 
 // This should be "= default but MSVC has trouble with "noexcept = default" in
 // this case.
diff --git a/base/task/thread_pool/task_tracker.cc b/base/task/thread_pool/task_tracker.cc
index 94db917..a8a5fe6 100644
--- a/base/task/thread_pool/task_tracker.cc
+++ b/base/task/thread_pool/task_tracker.cc
@@ -309,6 +309,7 @@
   DCHECK(task);
   DCHECK(task->task);
 
+  task->sequence_num = sequence_nums_.GetNext();
   if (state_->HasShutdownStarted()) {
     // A non BLOCK_SHUTDOWN task is allowed to be posted iff shutdown hasn't
     // started and the task is not delayed.
diff --git a/base/task/thread_pool/task_tracker.h b/base/task/thread_pool/task_tracker.h
index b4267fa..eaf9791 100644
--- a/base/task/thread_pool/task_tracker.h
+++ b/base/task/thread_pool/task_tracker.h
@@ -12,6 +12,7 @@
 #include <queue>
 #include <string>
 
+#include "base/atomic_sequence_num.h"
 #include "base/atomicops.h"
 #include "base/base_export.h"
 #include "base/containers/circular_deque.h"
@@ -268,6 +269,9 @@
   // completes.
   std::unique_ptr<WaitableEvent> shutdown_event_ GUARDED_BY(shutdown_lock_);
 
+  // Used to generate unique |PendingTask::sequence_num| when posting tasks.
+  AtomicSequenceNumber sequence_nums_;
+
   // Ensures all state (e.g. dangling cleaned up workers) is coalesced before
   // destroying the TaskTracker (e.g. in test environments).
   // Ref. https://crbug.com/827615.
diff --git a/base/task/thread_pool/thread_group.cc b/base/task/thread_pool/thread_group.cc
index 7e9dd36..f3f7e08 100644
--- a/base/task/thread_pool/thread_group.cc
+++ b/base/task/thread_pool/thread_group.cc
@@ -6,6 +6,7 @@
 
 #include <utility>
 
+#include "base/check.h"
 #include "base/feature_list.h"
 #include "base/functional/bind.h"
 #include "base/functional/callback_helpers.h"
@@ -64,12 +65,8 @@
 }
 
 ThreadGroup::ThreadGroup(TrackedRef<TaskTracker> task_tracker,
-                         TrackedRef<Delegate> delegate,
-                         ThreadGroup* predecessor_thread_group)
-    : task_tracker_(std::move(task_tracker)),
-      delegate_(std::move(delegate)),
-      lock_(predecessor_thread_group ? &predecessor_thread_group->lock_
-                                     : nullptr) {
+                         TrackedRef<Delegate> delegate)
+    : task_tracker_(std::move(task_tracker)), delegate_(std::move(delegate)) {
   DCHECK(task_tracker_);
 }
 
@@ -259,34 +256,29 @@
   EnsureEnoughWorkersLockRequired(executor);
 }
 
-void ThreadGroup::InvalidateAndHandoffAllTaskSourcesToOtherThreadGroup(
-    ThreadGroup* destination_thread_group) {
-  CheckedAutoLock current_thread_group_lock(lock_);
-  CheckedAutoLock destination_thread_group_lock(
-      destination_thread_group->lock_);
-  destination_thread_group->priority_queue_ = std::move(priority_queue_);
-  replacement_thread_group_ = destination_thread_group;
-}
-
 void ThreadGroup::HandoffNonUserBlockingTaskSourcesToOtherThreadGroup(
     ThreadGroup* destination_thread_group) {
-  CheckedAutoLock current_thread_group_lock(lock_);
-  CheckedAutoLock destination_thread_group_lock(
-      destination_thread_group->lock_);
   PriorityQueue new_priority_queue;
   TaskSourceSortKey top_sort_key;
-  // This works because all USER_BLOCKING tasks are at the front of the queue.
-  while (!priority_queue_.IsEmpty() &&
-         (top_sort_key = priority_queue_.PeekSortKey()).priority() ==
-             TaskPriority::USER_BLOCKING) {
-    new_priority_queue.Push(priority_queue_.PopTaskSource(), top_sort_key);
+  {
+    // This works because all USER_BLOCKING tasks are at the front of the queue.
+    CheckedAutoLock current_thread_group_lock(lock_);
+    while (!priority_queue_.IsEmpty() &&
+           (top_sort_key = priority_queue_.PeekSortKey()).priority() ==
+               TaskPriority::USER_BLOCKING) {
+      new_priority_queue.Push(priority_queue_.PopTaskSource(), top_sort_key);
+    }
+    new_priority_queue.swap(priority_queue_);
   }
-  while (!priority_queue_.IsEmpty()) {
-    top_sort_key = priority_queue_.PeekSortKey();
-    destination_thread_group->priority_queue_.Push(
-        priority_queue_.PopTaskSource(), top_sort_key);
+  {
+    CheckedAutoLock destination_thread_group_lock(
+        destination_thread_group->lock_);
+    while (!new_priority_queue.IsEmpty()) {
+      top_sort_key = new_priority_queue.PeekSortKey();
+      destination_thread_group->priority_queue_.Push(
+          new_priority_queue.PopTaskSource(), top_sort_key);
+    }
   }
-  priority_queue_ = std::move(new_priority_queue);
 }
 
 bool ThreadGroup::ShouldYield(TaskSourceSortKey sort_key) {
@@ -331,6 +323,10 @@
   std::unique_ptr<win::ScopedWindowsThreadEnvironment> scoped_environment;
   if (environment == WorkerEnvironment::COM_MTA) {
     scoped_environment = std::make_unique<win::ScopedWinrtInitializer>();
+
+    // TODO(crbug.com/1498668): rollback the change or replace it with a CHECK
+    // before closing the bug.
+    DUMP_WILL_BE_CHECK(scoped_environment->Succeeded());
   }
 
   DCHECK(!scoped_environment || scoped_environment->Succeeded());
diff --git a/base/task/thread_pool/thread_group.h b/base/task/thread_pool/thread_group.h
index 9dc58c8..a52b9cf 100644
--- a/base/task/thread_pool/thread_group.h
+++ b/base/task/thread_pool/thread_group.h
@@ -86,16 +86,6 @@
   virtual void PushTaskSourceAndWakeUpWorkers(
       RegisteredTaskSourceAndTransaction transaction_with_task_source) = 0;
 
-  // Removes all task sources from this ThreadGroup's PriorityQueue and enqueues
-  // them in another |destination_thread_group|. After this method is called,
-  // any task sources posted to this ThreadGroup will be forwarded to
-  // |destination_thread_group|.
-  //
-  // TODO(crbug.com/756547): Remove this method once the UseNativeThreadPool
-  // experiment is complete.
-  void InvalidateAndHandoffAllTaskSourcesToOtherThreadGroup(
-      ThreadGroup* destination_thread_group);
-
   // Move all task sources except the ones with TaskPriority::USER_BLOCKING,
   // from this ThreadGroup's PriorityQueue to the |destination_thread_group|'s.
   void HandoffNonUserBlockingTaskSourcesToOtherThreadGroup(
@@ -175,16 +165,8 @@
     raw_ptr<ThreadGroup> destination_thread_group_ = nullptr;
   };
 
-  // |predecessor_thread_group| is a ThreadGroup whose lock can be acquired
-  // before the constructed ThreadGroup's lock. This is necessary to move all
-  // task sources from |predecessor_thread_group| to the constructed ThreadGroup
-  // and support the UseNativeThreadPool experiment.
-  //
-  // TODO(crbug.com/756547): Remove |predecessor_thread_group| once the
-  // experiment is complete.
   ThreadGroup(TrackedRef<TaskTracker> task_tracker,
-              TrackedRef<Delegate> delegate,
-              ThreadGroup* predecessor_thread_group = nullptr);
+              TrackedRef<Delegate> delegate);
 
 #if BUILDFLAG(IS_WIN)
   static std::unique_ptr<win::ScopedWindowsThreadEnvironment>
@@ -239,7 +221,7 @@
   // atomic, nor immutable after start. Since this lock is a bottleneck to post
   // and schedule work, only simple data structure manipulations are allowed
   // within its scope (no thread creation or wake up).
-  mutable CheckedLock lock_;
+  mutable CheckedLock lock_{};
 
   bool disable_fair_scheduling_ GUARDED_BY(lock_){false};
 
diff --git a/base/task/thread_pool/thread_group_impl.cc b/base/task/thread_pool/thread_group_impl.cc
index d36832c..5ed5e8c 100644
--- a/base/task/thread_pool/thread_group_impl.cc
+++ b/base/task/thread_pool/thread_group_impl.cc
@@ -340,11 +340,8 @@
                                  StringPiece thread_group_label,
                                  ThreadType thread_type_hint,
                                  TrackedRef<TaskTracker> task_tracker,
-                                 TrackedRef<Delegate> delegate,
-                                 ThreadGroup* predecessor_thread_group)
-    : ThreadGroup(std::move(task_tracker),
-                  std::move(delegate),
-                  predecessor_thread_group),
+                                 TrackedRef<Delegate> delegate)
+    : ThreadGroup(std::move(task_tracker), std::move(delegate)),
       histogram_label_(histogram_label),
       thread_group_label_(thread_group_label),
       thread_type_hint_(thread_type_hint),
@@ -987,9 +984,9 @@
   DCHECK_LT(workers_.size(), kMaxNumberOfWorkers);
   DCHECK(idle_workers_set_.IsEmpty());
 
-  // WorkerThread needs |lock_| as a predecessor for its thread lock
-  // because in WakeUpOneWorker, |lock_| is first acquired and then
-  // the thread lock is acquired when WakeUp is called on the worker.
+  // WorkerThread needs |lock_| as a predecessor for its thread lock because in
+  // GetWork(), |lock_| is first acquired and then the thread lock is acquired
+  // when GetLastUsedTime() is called on the worker by CanGetWorkLockRequired().
   scoped_refptr<WorkerThread> worker = MakeRefCounted<WorkerThread>(
       thread_type_hint_,
       std::make_unique<WorkerThreadDelegateImpl>(
diff --git a/base/task/thread_pool/thread_group_impl.h b/base/task/thread_pool/thread_group_impl.h
index 1211079..4c6de67 100644
--- a/base/task/thread_pool/thread_group_impl.h
+++ b/base/task/thread_pool/thread_group_impl.h
@@ -59,8 +59,7 @@
                   StringPiece thread_group_label,
                   ThreadType thread_type_hint,
                   TrackedRef<TaskTracker> task_tracker,
-                  TrackedRef<Delegate> delegate,
-                  ThreadGroup* predecessor_thread_group = nullptr);
+                  TrackedRef<Delegate> delegate);
 
   // Creates threads, allowing existing and future tasks to run. The thread
   // group runs at most |max_tasks| / `max_best_effort_tasks` unblocked task
@@ -90,9 +89,9 @@
 
   ThreadGroupImpl(const ThreadGroupImpl&) = delete;
   ThreadGroupImpl& operator=(const ThreadGroupImpl&) = delete;
-  // Destroying a ThreadGroupImpl returned by Create() is not allowed in
-  // production; it is always leaked. In tests, it can only be destroyed after
-  // JoinForTesting() has returned.
+  // Destroying a ThreadGroupImpl is not allowed in production; it is always
+  // leaked. In tests, it can only be destroyed after JoinForTesting() has
+  // returned.
   ~ThreadGroupImpl() override;
 
   // ThreadGroup:
@@ -156,9 +155,6 @@
   void MaintainAtLeastOneIdleWorkerLockRequired(
       ScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
-  // Returns true if worker cleanup is permitted.
-  bool CanWorkerCleanupForTestingLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
   // Creates a worker, adds it to the thread group, schedules its start and
   // returns it. Cannot be called before Start().
   scoped_refptr<WorkerThread> CreateAndRegisterWorkerLockRequired(
diff --git a/base/task/thread_pool/thread_pool_impl.cc b/base/task/thread_pool/thread_pool_impl.cc
index 7f15cb2..9bc25b8 100644
--- a/base/task/thread_pool/thread_pool_impl.cc
+++ b/base/task/thread_pool/thread_pool_impl.cc
@@ -150,8 +150,7 @@
                   "."),
         kUtilityPoolEnvironmentParams.name_suffix,
         kUtilityPoolEnvironmentParams.thread_type_hint,
-        task_tracker_->GetTrackedRef(), tracked_ref_factory_.GetTrackedRef(),
-        foreground_thread_group_.get());
+        task_tracker_->GetTrackedRef(), tracked_ref_factory_.GetTrackedRef());
     foreground_thread_group_
         ->HandoffNonUserBlockingTaskSourcesToOtherThreadGroup(
             utility_thread_group_.get());
@@ -181,27 +180,6 @@
 
   size_t foreground_threads = init_params.max_num_foreground_threads;
   size_t utility_threads = init_params.max_num_utility_threads;
-  // Set the size of each ThreadGroup such that N cores are left available
-  // for other threads. N is the number of threads that the application is
-  // expected to need to be responsive (currently configurable via field trial).
-  // The size of each ThreadGroup can grow beyond the value set here when tasks
-  // enter ScopedBlockingCall.
-  if (base::FeatureList::IsEnabled(kThreadPoolCap)) {
-    int restricted_threads = kThreadPoolCapRestrictedCount.Get();
-    int max_allowed_workers_per_pool =
-        (base::SysInfo::NumberOfProcessors() - restricted_threads);
-    // Set a positive minimum amount of workers per pool.
-    max_allowed_workers_per_pool = std::max(2, max_allowed_workers_per_pool);
-    foreground_threads =
-        std::min(init_params.max_num_foreground_threads,
-                 static_cast<size_t>(max_allowed_workers_per_pool));
-    utility_threads =
-        std::min(init_params.max_num_utility_threads,
-                 static_cast<size_t>(max_allowed_workers_per_pool));
-    max_best_effort_tasks =
-        std::min(max_best_effort_tasks,
-                 static_cast<size_t>(max_allowed_workers_per_pool));
-  }
 
   // On platforms that can't use the background thread priority, best-effort
   // tasks run in foreground pools. A cap is set on the number of best-effort
diff --git a/base/task/thread_pool/thread_pool_impl_unittest.cc b/base/task/thread_pool/thread_pool_impl_unittest.cc
index e0f712c..9064f7a 100644
--- a/base/task/thread_pool/thread_pool_impl_unittest.cc
+++ b/base/task/thread_pool/thread_pool_impl_unittest.cc
@@ -1465,9 +1465,12 @@
   // If the task following the priority update is expected to run in the
   // foreground group, it should be after the task posted to the TaskRunner
   // whose priority is updated to USER_VISIBLE.
-  expected_previous_event = CanUseBackgroundThreadTypeForWorkerThread()
-                                ? nullptr
-                                : &task_runners_and_events.back()->task_ran;
+  expected_previous_event =
+      CanUseBackgroundThreadTypeForWorkerThread() ||
+              (test->GetUseResourceEfficientThreadGroup() &&
+               CanUseUtilityThreadTypeForWorkerThread())
+          ? nullptr
+          : &task_runners_and_events.back()->task_ran;
 
   task_runners_and_events.push_back(std::make_unique<TaskRunnerAndEvents>(
       thread_pool->CreateUpdateableSequencedTaskRunner(
diff --git a/base/task/thread_pool/worker_thread.cc b/base/task/thread_pool/worker_thread.cc
index 84fb4db..1fd13ad 100644
--- a/base/task/thread_pool/worker_thread.cc
+++ b/base/task/thread_pool/worker_thread.cc
@@ -10,8 +10,8 @@
 #include <atomic>
 #include <utility>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
 #include "base/check_op.h"
 #include "base/compiler_specific.h"
 #include "base/debug/alias.h"
@@ -34,12 +34,12 @@
 #endif
 
 #if BUILDFLAG(IS_APPLE)
-#include "base/mac/scoped_nsautorelease_pool.h"
+#include "base/apple/scoped_nsautorelease_pool.h"
 #endif
 
 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
     PA_CONFIG(THREAD_CACHE_SUPPORTED)
-#include "base/allocator/partition_allocator/thread_cache.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
 #endif
 
 namespace base::internal {
@@ -442,7 +442,7 @@
   bool got_work_this_wakeup = false;
   while (!ShouldExit()) {
 #if BUILDFLAG(IS_APPLE)
-    mac::ScopedNSAutoreleasePool autorelease_pool;
+    apple::ScopedNSAutoreleasePool autorelease_pool;
 #endif
     absl::optional<WatchHangsInScope> hang_watch_scope;
     if (watch_for_hangs)
diff --git a/base/task/thread_pool/worker_thread_unittest.cc b/base/task/thread_pool/worker_thread_unittest.cc
index a2cc253..327af8a 100644
--- a/base/task/thread_pool/worker_thread_unittest.cc
+++ b/base/task/thread_pool/worker_thread_unittest.cc
@@ -11,10 +11,10 @@
 #include <utility>
 #include <vector>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim.h"
-#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
 #include "base/compiler_specific.h"
 #include "base/functional/bind.h"
 #include "base/functional/callback_helpers.h"
@@ -42,8 +42,8 @@
 
 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
     PA_CONFIG(THREAD_CACHE_SUPPORTED)
-#include "base/allocator/partition_allocator/extended_api.h"
-#include "base/allocator/partition_allocator/thread_cache.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/extended_api.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/thread_cache.h"
 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
         // PA_CONFIG(THREAD_CACHE_SUPPORTED)
 
diff --git a/base/template_util.h b/base/template_util.h
index 3ea6186..ff47ed4 100644
--- a/base/template_util.h
+++ b/base/template_util.h
@@ -45,11 +45,11 @@
 
 // The indirection with std::is_enum<T> is required, because instantiating
 // std::underlying_type_t<T> when T is not an enum is UB prior to C++20.
-template <typename T, bool = std::is_enum<T>::value>
+template <typename T, bool = std::is_enum_v<T>>
 struct IsScopedEnumImpl : std::false_type {};
 
 template <typename T>
-struct IsScopedEnumImpl<T, /*std::is_enum<T>::value=*/true>
+struct IsScopedEnumImpl<T, /*std::is_enum_v<T>=*/true>
     : std::negation<std::is_convertible<T, std::underlying_type_t<T>>> {};
 
 }  // namespace internal
diff --git a/base/template_util_unittest.cc b/base/template_util_unittest.cc
index 8857ef3..d1f93d4 100644
--- a/base/template_util_unittest.cc
+++ b/base/template_util_unittest.cc
@@ -30,30 +30,26 @@
 }
 
 TEST(TemplateUtil, RemoveCvRefT) {
-  static_assert(std::is_same<int, remove_cvref_t<const int>>::value, "");
-  static_assert(std::is_same<int, remove_cvref_t<const volatile int>>::value,
-                "");
-  static_assert(std::is_same<int, remove_cvref_t<int&>>::value, "");
-  static_assert(std::is_same<int, remove_cvref_t<const int&>>::value, "");
-  static_assert(std::is_same<int, remove_cvref_t<const volatile int&>>::value,
-                "");
-  static_assert(std::is_same<int, remove_cvref_t<int&&>>::value, "");
+  static_assert(std::is_same_v<int, remove_cvref_t<const int>>, "");
+  static_assert(std::is_same_v<int, remove_cvref_t<const volatile int>>, "");
+  static_assert(std::is_same_v<int, remove_cvref_t<int&>>, "");
+  static_assert(std::is_same_v<int, remove_cvref_t<const int&>>, "");
+  static_assert(std::is_same_v<int, remove_cvref_t<const volatile int&>>, "");
+  static_assert(std::is_same_v<int, remove_cvref_t<int&&>>, "");
   static_assert(
-      std::is_same<SimpleStruct, remove_cvref_t<const SimpleStruct&>>::value,
-      "");
-  static_assert(std::is_same<int*, remove_cvref_t<int*>>::value, "");
+      std::is_same_v<SimpleStruct, remove_cvref_t<const SimpleStruct&>>, "");
+  static_assert(std::is_same_v<int*, remove_cvref_t<int*>>, "");
 
   // Test references and pointers to arrays.
-  static_assert(std::is_same<int[3], remove_cvref_t<int[3]>>::value, "");
-  static_assert(std::is_same<int[3], remove_cvref_t<int(&)[3]>>::value, "");
-  static_assert(std::is_same<int(*)[3], remove_cvref_t<int(*)[3]>>::value, "");
+  static_assert(std::is_same_v<int[3], remove_cvref_t<int[3]>>, "");
+  static_assert(std::is_same_v<int[3], remove_cvref_t<int(&)[3]>>, "");
+  static_assert(std::is_same_v<int(*)[3], remove_cvref_t<int(*)[3]>>, "");
 
   // Test references and pointers to functions.
-  static_assert(std::is_same<void(int), remove_cvref_t<void(int)>>::value, "");
-  static_assert(std::is_same<void(int), remove_cvref_t<void (&)(int)>>::value,
+  static_assert(std::is_same_v<void(int), remove_cvref_t<void(int)>>, "");
+  static_assert(std::is_same_v<void(int), remove_cvref_t<void (&)(int)>>, "");
+  static_assert(std::is_same_v<void (*)(int), remove_cvref_t<void (*)(int)>>,
                 "");
-  static_assert(
-      std::is_same<void (*)(int), remove_cvref_t<void (*)(int)>>::value, "");
 }
 
 }  // namespace
diff --git a/base/test/BUILD.gn b/base/test/BUILD.gn
index e89151a..d6d708d 100644
--- a/base/test/BUILD.gn
+++ b/base/test/BUILD.gn
@@ -12,12 +12,17 @@
 
 if (is_android) {
   import("//build/config/android/rules.gni")
+  import("//third_party/jni_zero/jni_zero.gni")
 }
 
 if (is_ios) {
   import("//build/config/ios/rules.gni")
 }
 
+if (enable_base_tracing) {
+  import("//third_party/perfetto/gn/perfetto_cc_proto_descriptor.gni")
+}
+
 static_library("test_config") {
   testonly = true
   sources = [
@@ -98,6 +103,7 @@
     "power_monitor_test.h",
     "power_monitor_test_utils.cc",
     "power_monitor_test_utils.h",
+    "protobuf_matchers.h",
     "rectify_callback.h",
     "rectify_callback_internal.h",
     "repeating_test_future.h",
@@ -105,6 +111,8 @@
     "run_until.h",
     "scoped_amount_of_physical_memory_override.cc",
     "scoped_amount_of_physical_memory_override.h",
+    "scoped_block_tests_writing_to_special_dirs.cc",
+    "scoped_block_tests_writing_to_special_dirs.h",
     "scoped_command_line.cc",
     "scoped_command_line.h",
     "scoped_feature_list.cc",
@@ -186,6 +194,10 @@
       "test_trace_processor.cc",
       "test_trace_processor.h",
     ]
+    deps += [
+      ":amalgamated_perfetto_sql_stdlib",
+      ":gen_cc_chrome_track_event_descriptor",
+    ]
     if (is_ios) {
       deps += [
         ":test_trace_processor+bundle",
@@ -423,8 +435,10 @@
 if (is_fuchsia || is_linux || is_chromeos) {
   shared_library("malloc_wrapper") {
     testonly = true
-    sources = [ "malloc_wrapper.cc" ]
-    deps = [ "//base" ]
+    sources = [
+      "malloc_wrapper.cc",
+      "malloc_wrapper.h",
+    ]
   }
 }
 
@@ -453,10 +467,10 @@
 
     deps = [
       "//base:base_java",
-      "//base:jni_java",
       "//base:process_launcher_java",
       "//testing/android/native_test:native_main_runner_java",
       "//third_party/android_deps:com_google_code_findbugs_jsr305_java",
+      "//third_party/jni_zero:jni_zero_java",
     ]
 
     srcjar_deps = [ ":test_support_java_aidl" ]
@@ -499,10 +513,7 @@
 
   source_set("google_test_runner") {
     sources = [ "ios/google_test_runner.mm" ]
-    deps = [
-      ":google_test_runner_shared_headers",
-      "//base",
-    ]
+    deps = [ ":google_test_runner_shared_headers" ]
     frameworks = [ "UIKit.framework" ]
     configs += [ "//build/config/ios:xctest_config" ]
   }
@@ -516,6 +527,27 @@
 }
 
 if (enable_base_tracing) {
+  perfetto_cc_proto_descriptor("gen_cc_chrome_track_event_descriptor") {
+    descriptor_name = "chrome_track_event.descriptor"
+    descriptor_target = "//base/tracing/protos:chrome_track_event"
+  }
+
+  import("//base/tracing/stdlib/chrome/perfetto_sql_files.gni")
+  action("amalgamated_perfetto_sql_stdlib") {
+    script = "//third_party/perfetto/tools/gen_amalgamated_sql.py"
+    sources = rebase_path(chrome_stdlib_sql_files,
+                          ".",
+                          "//base/tracing/stdlib/chrome")
+    stdlib_header = "$root_gen_dir/base/test/perfetto_sql_stdlib.h"
+    outputs = [ stdlib_header ]
+    args = [
+             "--namespace",
+             "chrome_stdlib",
+             "--cpp-out",
+             rebase_path(stdlib_header, root_build_dir),
+           ] + rebase_path(sources, root_build_dir)
+  }
+
   # We encapsulate the trace processor in a separate shared library to prevent
   # any duplicate symbol issues. Perfetto symbols are exported by chromium’s
   # base via a public_dep on libperfetto; libtrace_processor also depends on
@@ -585,3 +617,43 @@
     testonly = true
   }
 }
+
+if (is_android) {
+  android_library("public_transit_java") {
+    testonly = true
+
+    deps = [
+      "//base:base_java",
+      "//base:base_java_test_support",
+      "//build/android:build_java",
+      "//third_party/android_deps:espresso_java",
+      "//third_party/android_deps:guava_android_java",
+      "//third_party/androidx:androidx_annotation_annotation_java",
+      "//third_party/androidx:androidx_core_core_java",
+      "//third_party/androidx:androidx_test_monitor_java",
+      "//third_party/hamcrest:hamcrest_java",
+      "//third_party/junit",
+    ]
+
+    sources = [
+      "android/javatests/src/org/chromium/base/test/transit/CallbackCondition.java",
+      "android/javatests/src/org/chromium/base/test/transit/Condition.java",
+      "android/javatests/src/org/chromium/base/test/transit/ConditionChecker.java",
+      "android/javatests/src/org/chromium/base/test/transit/ConditionWaiter.java",
+      "android/javatests/src/org/chromium/base/test/transit/ConditionalState.java",
+      "android/javatests/src/org/chromium/base/test/transit/Elements.java",
+      "android/javatests/src/org/chromium/base/test/transit/FacilityCheckIn.java",
+      "android/javatests/src/org/chromium/base/test/transit/FacilityCheckOut.java",
+      "android/javatests/src/org/chromium/base/test/transit/InstrumentationThreadCondition.java",
+      "android/javatests/src/org/chromium/base/test/transit/PublicTransitConfig.java",
+      "android/javatests/src/org/chromium/base/test/transit/StationFacility.java",
+      "android/javatests/src/org/chromium/base/test/transit/TransitAsserts.java",
+      "android/javatests/src/org/chromium/base/test/transit/TransitStation.java",
+      "android/javatests/src/org/chromium/base/test/transit/Transition.java",
+      "android/javatests/src/org/chromium/base/test/transit/TravelException.java",
+      "android/javatests/src/org/chromium/base/test/transit/Trip.java",
+      "android/javatests/src/org/chromium/base/test/transit/UiThreadCondition.java",
+      "android/javatests/src/org/chromium/base/test/transit/ViewConditions.java",
+    ]
+  }
+}
diff --git a/base/test/android/java/src/org/chromium/base/ContentUriTestUtils.java b/base/test/android/java/src/org/chromium/base/ContentUriTestUtils.java
index 452547d..32b962e 100644
--- a/base/test/android/java/src/org/chromium/base/ContentUriTestUtils.java
+++ b/base/test/android/java/src/org/chromium/base/ContentUriTestUtils.java
@@ -9,7 +9,7 @@
 import android.net.Uri;
 import android.provider.MediaStore;
 
-import org.chromium.base.annotations.CalledByNative;
+import org.jni_zero.CalledByNative;
 
 /**
  * Utilities for testing operations on content URI.
diff --git a/base/test/android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java b/base/test/android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java
index 3d97ef7..f0be175 100644
--- a/base/test/android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java
+++ b/base/test/android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java
@@ -7,9 +7,9 @@
 import android.os.Handler;
 import android.os.Process;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.CalledByNativeUnchecked;
-import org.chromium.base.annotations.JNINamespace;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.CalledByNativeUnchecked;
+import org.jni_zero.JNINamespace;
 
 import java.util.concurrent.atomic.AtomicBoolean;
 
diff --git a/base/test/android/java/src/org/chromium/base/MainReturnCodeResult.java b/base/test/android/java/src/org/chromium/base/MainReturnCodeResult.java
index a558458..c59aac6 100644
--- a/base/test/android/java/src/org/chromium/base/MainReturnCodeResult.java
+++ b/base/test/android/java/src/org/chromium/base/MainReturnCodeResult.java
@@ -4,8 +4,8 @@
 
 package org.chromium.base;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
 
 /**
  * Contains the result of a native main method that ran in a child process.
diff --git a/base/test/android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java
index d0f21ba..774b3e2 100644
--- a/base/test/android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java
+++ b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java
@@ -11,8 +11,9 @@
 import android.os.RemoteException;
 import android.util.SparseArray;
 
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.base.annotations.JNINamespace;
+import org.jni_zero.CalledByNative;
+import org.jni_zero.JNINamespace;
+
 import org.chromium.base.process_launcher.ChildConnectionAllocator;
 import org.chromium.base.process_launcher.ChildProcessConnection;
 import org.chromium.base.process_launcher.ChildProcessLauncher;
diff --git a/base/test/android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java b/base/test/android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java
index c8d38cb..5ccfd04 100644
--- a/base/test/android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java
+++ b/base/test/android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java
@@ -11,12 +11,10 @@
 import androidx.multidex.MultiDex;
 
 import org.chromium.base.Log;
-import org.chromium.build.annotations.MainDex;
 
 /**
  *  Performs multidex installation for non-isolated processes.
  */
-@MainDex
 public class ChromiumMultiDexInstaller {
     private static final String TAG = "base_multidex";
 
diff --git a/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java b/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java
index 6b66478..ef5e658 100644
--- a/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java
+++ b/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java
@@ -21,6 +21,7 @@
 import android.os.Handler;
 import android.os.Looper;
 import android.os.SystemClock;
+import android.system.Os;
 import android.text.TextUtils;
 
 import androidx.core.content.ContextCompat;
@@ -38,20 +39,20 @@
 
 import org.chromium.base.ActivityState;
 import org.chromium.base.ApplicationStatus;
+import org.chromium.base.CommandLineInitUtil;
 import org.chromium.base.ContextUtils;
 import org.chromium.base.FileUtils;
 import org.chromium.base.LifetimeAssert;
 import org.chromium.base.Log;
 import org.chromium.base.library_loader.LibraryLoader;
 import org.chromium.base.metrics.UmaRecorderHolder;
-import org.chromium.base.multidex.ChromiumMultiDexInstaller;
 import org.chromium.base.test.util.CallbackHelper;
+import org.chromium.base.test.util.CommandLineFlags;
 import org.chromium.base.test.util.InMemorySharedPreferences;
 import org.chromium.base.test.util.InMemorySharedPreferencesContext;
 import org.chromium.base.test.util.MinAndroidSdkLevel;
 import org.chromium.base.test.util.ScalableTimeout;
 import org.chromium.build.BuildConfig;
-import org.chromium.build.annotations.MainDex;
 
 import java.io.File;
 import java.io.IOException;
@@ -61,21 +62,18 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Enumeration;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
 /**
- * A custom AndroidJUnitRunner that supports multidex installer and lists out test information.
- * Also customizes various TestRunner and Instrumentation behaviors, like when Activities get
- * finished, and adds a timeout to waitForIdleSync.
+ * A custom AndroidJUnitRunner that supports incremental install and custom test listing. Also
+ * customizes various TestRunner and Instrumentation behaviors, like when Activities get finished,
+ * and adds a timeout to waitForIdleSync.
  *
- * Please beware that is this not a class runner. It is declared in test apk AndroidManifest.xml
+ * <p>Please beware that is this not a class runner. It is declared in test apk AndroidManifest.xml
  * <instrumentation>
  */
-@MainDex
 public class BaseChromiumAndroidJUnitRunner extends AndroidJUnitRunner {
     private static final String LIST_ALL_TESTS_FLAG =
             "org.chromium.base.test.BaseChromiumAndroidJUnitRunner.TestList";
@@ -83,6 +81,8 @@
             "org.chromium.base.test.BaseChromiumAndroidJUnitRunner.TestListPackage";
     private static final String IS_UNIT_TEST_FLAG =
             "org.chromium.base.test.BaseChromiumAndroidJUnitRunner.IsUnitTest";
+    private static final String EXTRA_CLANG_COVERAGE_DEVICE_FILE =
+            "org.chromium.base.test.BaseChromiumAndroidJUnitRunner.ClangCoverageDeviceFile";
     /**
      * This flag is supported by AndroidJUnitRunner.
      *
@@ -119,26 +119,16 @@
 
     static InMemorySharedPreferencesContext sInMemorySharedPreferencesContext;
 
+    static {
+        CommandLineInitUtil.setFilenameOverrideForTesting(CommandLineFlags.getTestCmdLineFile());
+    }
+
     @Override
     public Application newApplication(ClassLoader cl, String className, Context context)
             throws ClassNotFoundException, IllegalAccessException, InstantiationException {
         Context targetContext = super.getTargetContext();
         boolean hasUnderTestApk =
                 !getContext().getPackageName().equals(targetContext.getPackageName());
-        // When there is an under-test APK, BuildConfig belongs to it and does not indicate whether
-        // the test apk is multidex. In this case, just assume it is.
-        boolean isTestMultidex = hasUnderTestApk || BuildConfig.IS_MULTIDEX_ENABLED;
-        if (isTestMultidex) {
-            if (hasUnderTestApk) {
-                // Need hacks to have multidex work when there is an under-test apk :(.
-                ChromiumMultiDexInstaller.install(
-                        new BaseChromiumRunnerCommon.MultiDexContextWrapper(
-                                getContext(), targetContext));
-                BaseChromiumRunnerCommon.reorderDexPathElements(cl, getContext(), targetContext);
-            } else {
-                ChromiumMultiDexInstaller.install(getContext());
-            }
-        }
 
         // Wrap |context| here so that calls to getSharedPreferences() from within
         // attachBaseContext() will hit our InMemorySharedPreferencesContext.
@@ -208,6 +198,7 @@
             // androidx.test.
             System.setProperty("org.mockito.android.target",
                     InstrumentationRegistry.getTargetContext().getCacheDir().getPath());
+            setClangCoverageEnvIfEnabled();
             super.onStart();
         }
     }
@@ -289,30 +280,25 @@
     }
 
     private Request createListTestRequest(Bundle arguments) {
-        ArrayList<DexFile> dexFiles = new ArrayList<>();
-        try {
-            Class<?> bootstrapClass =
-                    Class.forName("org.chromium.incrementalinstall.BootstrapApplication");
-            DexFile[] incrementalInstallDexes =
-                    (DexFile[]) bootstrapClass.getDeclaredField("sIncrementalDexFiles").get(null);
-            dexFiles.addAll(Arrays.asList(incrementalInstallDexes));
-        } catch (Exception e) {
-            // Not an incremental apk.
-            if (BuildConfig.IS_MULTIDEX_ENABLED
-                    && Build.VERSION.SDK_INT <= Build.VERSION_CODES.KITKAT) {
-                // Test listing fails for test classes that aren't in the main dex
-                // (crbug.com/903820).
-                addClassloaderDexFiles(dexFiles, getClass().getClassLoader());
-            }
-        }
-        RunnerArgs runnerArgs =
-                new RunnerArgs.Builder().fromManifest(this).fromBundle(this, arguments).build();
         TestRequestBuilder builder;
-        if (!dexFiles.isEmpty()) {
-            builder = new DexFileTestRequestBuilder(this, arguments, dexFiles);
+        if (BuildConfig.IS_INCREMENTAL_INSTALL) {
+            try {
+                Class<?> bootstrapClass =
+                        Class.forName("org.chromium.incrementalinstall.BootstrapApplication");
+                DexFile[] incrementalInstallDexes =
+                        (DexFile[])
+                                bootstrapClass.getDeclaredField("sIncrementalDexFiles").get(null);
+                builder =
+                        new DexFileTestRequestBuilder(
+                                this, arguments, Arrays.asList(incrementalInstallDexes));
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
         } else {
             builder = new TestRequestBuilder(this, arguments);
         }
+        RunnerArgs runnerArgs =
+                new RunnerArgs.Builder().fromManifest(this).fromBundle(this, arguments).build();
         builder.addFromRunnerArgs(runnerArgs);
         builder.addPathToScan(getContext().getPackageCodePath());
 
@@ -328,21 +314,19 @@
     }
 
     /**
-     * Wraps TestRequestBuilder to make it work with incremental install and for multidex <= K.
+     * Wraps TestRequestBuilder to make it work with incremental install.
      *
-     * TestRequestBuilder does not know to look through the incremental install dex files, and has
-     * no api for telling it to do so. This class checks to see if the list of tests was given
-     * by the runner (mHasClassList), and if not overrides the auto-detection logic in build()
-     * to manually scan all .dex files.
-     *
-     * On <= K, classes not in the main dex file are missed, so we manually list them by grabbing
-     * the loaded DexFiles from the ClassLoader.
+     * <p>TestRequestBuilder does not know to look through the incremental install dex files, and
+     * has no api for telling it to do so. This class checks to see if the list of tests was given
+     * by the runner (mHasClassList), and if not overrides the auto-detection logic in build() to
+     * manually scan all .dex files.
      */
     private static class DexFileTestRequestBuilder extends TestRequestBuilder {
         final List<String> mExcludedPrefixes = new ArrayList<String>();
         final List<String> mIncludedPrefixes = new ArrayList<String>();
         final List<DexFile> mDexFiles;
         boolean mHasClassList;
+        private ClassLoader mClassLoader = DexFileTestRequestBuilder.class.getClassLoader();
 
         DexFileTestRequestBuilder(Instrumentation instr, Bundle bundle, List<DexFile> dexFiles) {
             super(instr, bundle);
@@ -380,6 +364,12 @@
         }
 
         @Override
+        public TestRequestBuilder setClassLoader(ClassLoader loader) {
+            mClassLoader = loader;
+            return super.setClassLoader(loader);
+        }
+
+        @Override
         public Request build() {
             // If a test class was requested, then no need to iterate class loader.
             if (!mHasClassList) {
@@ -423,7 +413,7 @@
                         // android-kitkat-arm-rel from 41s -> 23s.
                         continue;
                     }
-                    if (!className.contains("$") && checkIfTest(className)) {
+                    if (!className.contains("$") && checkIfTest(className, mClassLoader)) {
                         addTestClass(className);
                     }
                 }
@@ -438,30 +428,6 @@
         return field.get(instance);
     }
 
-    private static void addClassloaderDexFiles(List<DexFile> dexFiles, ClassLoader cl) {
-        // The main apk appears in the classpath twice sometimes, so check for apk path rather
-        // than comparing DexFile instances (e.g. on kitkat without an apk-under-test).
-        Set<String> apkPaths = new HashSet<>();
-        try {
-            Object pathList = getField(cl.getClass().getSuperclass(), cl, "pathList");
-            Object[] dexElements =
-                    (Object[]) getField(pathList.getClass(), pathList, "dexElements");
-            for (Object dexElement : dexElements) {
-                DexFile dexFile = (DexFile) getField(dexElement.getClass(), dexElement, "dexFile");
-                // Prevent adding the main apk twice, and also skip any system libraries added due
-                // to <uses-library> manifest entries.
-                String apkPath = dexFile.getName();
-                if (!apkPaths.contains(apkPath) && !apkPath.startsWith("/system")) {
-                    dexFiles.add(dexFile);
-                    apkPaths.add(apkPath);
-                }
-            }
-        } catch (Exception e) {
-            // No way to recover and test listing will fail.
-            throw new RuntimeException(e);
-        }
-    }
-
     /**
      * ClassLoader that translates NoClassDefFoundError into ClassNotFoundException.
      *
@@ -492,24 +458,20 @@
         }
     }
 
-    private static boolean checkIfTest(String className) {
-        Class<?> loadedClass = tryLoadClass(className);
+    private static boolean checkIfTest(String className, ClassLoader classLoader) {
+        Class<?> loadedClass = tryLoadClass(className, classLoader);
         if (loadedClass != null && isTestClass(loadedClass)) {
             return true;
         }
         return false;
     }
 
-    private static Class<?> tryLoadClass(String className) {
+    private static Class<?> tryLoadClass(String className, ClassLoader classLoader) {
         try {
-            return Class.forName(
-                    className, false, BaseChromiumAndroidJUnitRunner.class.getClassLoader());
-        } catch (NoClassDefFoundError e) {
-            // Do nothing.
-        } catch (ClassNotFoundException e) {
-            // Do nothing.
+            return Class.forName(className, false, classLoader);
+        } catch (NoClassDefFoundError | ClassNotFoundException e) {
+            return null;
         }
-        return null;
     }
 
     // Copied from android.support.test.runner code.
@@ -589,6 +551,7 @@
         }
 
         try {
+            writeClangCoverageProfileIfEnabled();
             getTargetContext().getSystemService(JobScheduler.class).cancelAll();
             checkOrDeleteOnDiskSharedPreferences(true);
             UmaRecorderHolder.resetForTesting();
@@ -728,13 +691,9 @@
             if (file.getName().equals("lib")) {
                 continue;
             }
-            if (file.getName().equals("chromium_tests_root")) {
-                continue;
-            }
             if (file.getName().equals("incremental-install-files")) {
                 continue;
             }
-            // E.g. Legacy multidex files.
             if (file.getName().equals("code_cache")) {
                 continue;
             }
@@ -759,12 +718,6 @@
     }
 
     private static boolean isSharedPrefFileAllowed(File f) {
-        // Multidex support library prefs need to stay or else multidex extraction will occur
-        // needlessly.
-        if (f.getName().endsWith("multidex.version.xml")) {
-            return true;
-        }
-
         // WebView prefs need to stay because webview tests have no (good) way of hooking
         // SharedPreferences for instantiated WebViews.
         String[] allowlist = new String[] {
@@ -773,6 +726,7 @@
                 "AwComponentUpdateServicePreferences.xml",
                 "ComponentsProviderServicePreferences.xml",
                 "org.chromium.webengine.test.instrumentation_test_apk_preferences.xml",
+                "AwOriginVisitLoggerPrefs.xml",
         };
         for (String name : allowlist) {
             // SharedPreferences may also access a ".bak" backup file from a previous run. See
@@ -828,4 +782,29 @@
             throw new AssertionError(errorMsg);
         }
     }
+
+    /**
+     * Configure the required environment variable if Clang coverage argument exists.
+     */
+    private void setClangCoverageEnvIfEnabled() {
+        String clangProfileFile =
+                InstrumentationRegistry.getArguments().getString(EXTRA_CLANG_COVERAGE_DEVICE_FILE);
+        if (clangProfileFile != null) {
+            try {
+                Os.setenv("LLVM_PROFILE_FILE", clangProfileFile, /*override*/ true);
+            } catch (Exception e) {
+                Log.w(TAG, "failed to set LLVM_PROFILE_FILE", e);
+            }
+        }
+    }
+
+    /**
+     * Invoke __llvm_profile_dump() to write raw clang coverage profile to device.
+     * Noop if the required build flag is not set.
+     */
+    private void writeClangCoverageProfileIfEnabled() {
+        if (BuildConfig.WRITE_CLANG_PROFILING_DATA && LibraryLoader.getInstance().isInitialized()) {
+            ClangProfiler.writeClangProfilingProfile();
+        }
+    }
 }
diff --git a/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumRunnerCommon.java b/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumRunnerCommon.java
deleted file mode 100644
index 835b738..0000000
--- a/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumRunnerCommon.java
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2017 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.base.test;
-
-import android.content.Context;
-import android.content.ContextWrapper;
-import android.content.SharedPreferences;
-import android.content.pm.ApplicationInfo;
-import android.content.pm.PackageManager;
-
-import androidx.core.content.ContextCompat;
-
-import org.chromium.base.Log;
-import org.chromium.base.test.util.PackageManagerWrapper;
-import org.chromium.build.annotations.MainDex;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.Serializable;
-import java.lang.reflect.Field;
-import java.util.Arrays;
-import java.util.Comparator;
-
-/**
- *  Functionality common to the JUnit3 and JUnit4 runners.
- */
-@MainDex
-class BaseChromiumRunnerCommon {
-    private static final String TAG = "base_test";
-
-    /**
-     *  A ContextWrapper that allows multidex test APKs to extract secondary dexes into
-     *  the APK under test's data directory.
-     */
-    @MainDex
-    static class MultiDexContextWrapper extends ContextWrapper {
-        private final Context mAppContext;
-
-        MultiDexContextWrapper(Context instrContext, Context appContext) {
-            super(instrContext);
-            mAppContext = appContext;
-        }
-
-        @Override
-        public File getFilesDir() {
-            return mAppContext.getFilesDir();
-        }
-
-        @Override
-        public SharedPreferences getSharedPreferences(String name, int mode) {
-            // Prefix so as to not conflict with main app's multidex prefs file.
-            return mAppContext.getSharedPreferences("test-" + name, mode);
-        }
-
-        @Override
-        public PackageManager getPackageManager() {
-            return new PackageManagerWrapper(super.getPackageManager()) {
-                @Override
-                public ApplicationInfo getApplicationInfo(String packageName, int flags) {
-                    try {
-                        ApplicationInfo ai = super.getApplicationInfo(packageName, flags);
-                        if (packageName.equals(getPackageName())) {
-                            File dataDir = new File(
-                                    ContextCompat.getCodeCacheDir(mAppContext), "test-multidex");
-                            if (!dataDir.exists() && !dataDir.mkdirs()) {
-                                throw new IOException(String.format(
-                                        "Unable to create test multidex directory \"%s\"",
-                                        dataDir.getPath()));
-                            }
-                            ai.dataDir = dataDir.getPath();
-                        }
-                        return ai;
-                    } catch (Exception e) {
-                        Log.e(TAG, "Failed to get application info for %s", packageName, e);
-                    }
-                    return null;
-                }
-            };
-        }
-    }
-
-    /**
-     * Ensure all test dex entries precede app dex entries.
-     *
-     * @param cl ClassLoader to modify. Assumed to be a derivative of
-     *        {@link dalvik.system.BaseDexClassLoader}. If this isn't
-     *        the case, reordering will fail.
-     */
-    static void reorderDexPathElements(ClassLoader cl, Context context, Context targetContext) {
-        try {
-            Log.i(TAG,
-                    "Reordering dex files. If you're building a multidex test APK and see a "
-                            + "class resolving to an unexpected implementation, this may be why.");
-            Field pathListField = findField(cl, "pathList");
-            Object dexPathList = pathListField.get(cl);
-            Field dexElementsField = findField(dexPathList, "dexElements");
-            Object[] dexElementsList = (Object[]) dexElementsField.get(dexPathList);
-            Arrays.sort(dexElementsList,
-                    new DexListReorderingComparator(
-                            context.getPackageName(), targetContext.getPackageName()));
-            dexElementsField.set(dexPathList, dexElementsList);
-        } catch (Exception e) {
-            Log.e(TAG, "Failed to reorder dex elements for testing.", e);
-        }
-    }
-
-    /**
-     *  Comparator for sorting dex list entries.
-     *
-     *  Using this to sort a list of dex list entries will result in the following order:
-     *   - Strings that contain neither the test package nor the app package in lexicographical
-     *     order.
-     *   - Strings that contain the test package in lexicographical order.
-     *   - Strings that contain the app package but not the test package in lexicographical order.
-     */
-    private static class DexListReorderingComparator implements Comparator<Object>, Serializable {
-        private String mTestPackage;
-        private String mAppPackage;
-
-        public DexListReorderingComparator(String testPackage, String appPackage) {
-            mTestPackage = testPackage;
-            mAppPackage = appPackage;
-        }
-
-        @Override
-        public int compare(Object o1, Object o2) {
-            String s1 = o1.toString();
-            String s2 = o2.toString();
-            if (s1.contains(mTestPackage)) {
-                if (!s2.contains(mTestPackage)) {
-                    if (s2.contains(mAppPackage)) {
-                        return -1;
-                    } else {
-                        return 1;
-                    }
-                }
-            } else if (s1.contains(mAppPackage)) {
-                if (s2.contains(mTestPackage)) {
-                    return 1;
-                } else if (!s2.contains(mAppPackage)) {
-                    return 1;
-                }
-            } else if (s2.contains(mTestPackage) || s2.contains(mAppPackage)) {
-                return -1;
-            }
-            return s1.compareTo(s2);
-        }
-    }
-
-    private static Field findField(Object instance, String name) throws NoSuchFieldException {
-        for (Class<?> clazz = instance.getClass(); clazz != null; clazz = clazz.getSuperclass()) {
-            try {
-                Field f = clazz.getDeclaredField(name);
-                f.setAccessible(true);
-                return f;
-            } catch (NoSuchFieldException e) {
-            }
-        }
-        throw new NoSuchFieldException(
-                "Unable to find field " + name + " in " + instance.getClass());
-    }
-}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java b/base/test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java
index a1978f3..901fcb0 100644
--- a/base/test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java
+++ b/base/test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java
@@ -29,6 +29,7 @@
 import org.chromium.base.test.util.AndroidSdkLevelSkipCheck;
 import org.chromium.base.test.util.CommandLineFlags;
 import org.chromium.base.test.util.DisableIfSkipCheck;
+import org.chromium.base.test.util.EspressoIdleTimeoutRule;
 import org.chromium.base.test.util.RestrictionSkipCheck;
 import org.chromium.base.test.util.SkipCheck;
 
@@ -37,6 +38,7 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 /**
  *  A custom runner for JUnit4 tests that checks requirements to conditionally ignore tests.
@@ -238,7 +240,8 @@
     @CallSuper
     protected List<TestRule> getDefaultTestRules() {
         return Arrays.asList(new BaseJUnit4TestRule(), new MockitoErrorHandler(),
-                new UnitTestLifetimeAssertRule());
+                new UnitTestLifetimeAssertRule(),
+                new EspressoIdleTimeoutRule(20, TimeUnit.SECONDS));
     }
 
     /**
diff --git a/base/test/android/javatests/src/org/chromium/base/test/ClangProfiler.java b/base/test/android/javatests/src/org/chromium/base/test/ClangProfiler.java
new file mode 100644
index 0000000..9a8b638
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/ClangProfiler.java
@@ -0,0 +1,29 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
+
+/**
+ * Class containing static methods for Clang profiling.
+ */
+@JNINamespace("base")
+public class ClangProfiler {
+    private ClangProfiler() {}
+
+    /**
+     * Writes Clang profiling profile to the configured path (LLVM_PROFILE_FILE).
+     * No-op if use_clang_coverage = false when building.
+     */
+    public static void writeClangProfilingProfile() {
+        ClangProfilerJni.get().writeClangProfilingProfile();
+    }
+
+    @NativeMethods
+    interface Natives {
+        void writeClangProfilingProfile();
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/ReachedCodeProfiler.java b/base/test/android/javatests/src/org/chromium/base/test/ReachedCodeProfiler.java
index 174c598..d7a7a07 100644
--- a/base/test/android/javatests/src/org/chromium/base/test/ReachedCodeProfiler.java
+++ b/base/test/android/javatests/src/org/chromium/base/test/ReachedCodeProfiler.java
@@ -4,8 +4,8 @@
 
 package org.chromium.base.test;
 
-import org.chromium.base.annotations.JNINamespace;
-import org.chromium.base.annotations.NativeMethods;
+import org.jni_zero.JNINamespace;
+import org.jni_zero.NativeMethods;
 
 /**
  * Class containing only static methods for querying the status of the reached code profiler.
diff --git a/base/test/android/javatests/src/org/chromium/base/test/task/ThreadPoolTestHelpers.java b/base/test/android/javatests/src/org/chromium/base/test/task/ThreadPoolTestHelpers.java
index 2d7675d..67c4e9e 100644
--- a/base/test/android/javatests/src/org/chromium/base/test/task/ThreadPoolTestHelpers.java
+++ b/base/test/android/javatests/src/org/chromium/base/test/task/ThreadPoolTestHelpers.java
@@ -3,7 +3,8 @@
 // found in the LICENSE file.
 
 package org.chromium.base.test.task;
-import org.chromium.base.annotations.NativeMethods;
+
+import org.jni_zero.NativeMethods;
 
 /** Helpers that allow base::ThreadPoolInstance to be initialized or shutdown for testing. */
 public class ThreadPoolTestHelpers {
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/CallbackCondition.java b/base/test/android/javatests/src/org/chromium/base/test/transit/CallbackCondition.java
new file mode 100644
index 0000000..07ecb37
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/CallbackCondition.java
@@ -0,0 +1,51 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import org.chromium.base.test.util.CallbackHelper;
+
+/** A {@link Condition} that checks if a single callback was received. */
+public class CallbackCondition extends Condition {
+    private final CallbackHelper mCallbackHelper;
+    private final String mDescription;
+    private int mStartingCount;
+
+    /**
+     * Use {@link #instrumentationThread(CallbackHelper, String) or {@link #uiThread(CallbackHelper, String)}
+     *
+     * @param callbackHelper the {@link CallbackHelper} to wait for.
+     * @param description the user-visible name for the Condition.
+     */
+    private CallbackCondition(
+            boolean runOnUiThread, CallbackHelper callbackHelper, String description) {
+        super(runOnUiThread);
+        mCallbackHelper = callbackHelper;
+        mDescription = description;
+    }
+
+    public static CallbackCondition instrumentationThread(
+            CallbackHelper callbackHelper, String description) {
+        return new CallbackCondition(/* runOnUiThread= */ false, callbackHelper, description);
+    }
+
+    public static CallbackCondition uiThread(CallbackHelper callbackHelper, String description) {
+        return new CallbackCondition(/* runOnUiThread= */ true, callbackHelper, description);
+    }
+
+    @Override
+    public String buildDescription() {
+        return mDescription;
+    }
+
+    @Override
+    public void onStartMonitoring() {
+        mStartingCount = mCallbackHelper.getCallCount();
+    }
+
+    @Override
+    public boolean check() {
+        return mCallbackHelper.getCallCount() > mStartingCount;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/Condition.java b/base/test/android/javatests/src/org/chromium/base/test/transit/Condition.java
new file mode 100644
index 0000000..9dc9ae4
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/Condition.java
@@ -0,0 +1,71 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+/**
+ * A condition that needs to be fulfilled for a state transition to be considered done.
+ *
+ * <p>{@link ConditionWaiter} waits for multiple Conditions to be fulfilled. {@link
+ * ConditionChecker} performs one-time checks for whether multiple Conditions are fulfilled.
+ */
+public abstract class Condition {
+    private String mDescription;
+
+    private boolean mIsRunOnUiThread;
+
+    /**
+     * @param isRunOnUiThread true if the Condition should be checked on the UI Thread, false if it
+     *     should be checked on the Instrumentation Thread.
+     */
+    public Condition(boolean isRunOnUiThread) {
+        mIsRunOnUiThread = isRunOnUiThread;
+    }
+
+    /**
+     * Called on the instrumentation thread, depending on #shouldRunOnUiThread().
+     *
+     * @return whether the condition has been fulfilled.
+     */
+    public abstract boolean check();
+
+    /**
+     * @return a short description to be printed as part of a list of conditions. Use {@link
+     *     #getDescription()} to get a description as it caches the description until {@link
+     *     #rebuildDescription()} invalidates it.
+     */
+    public abstract String buildDescription();
+
+    /**
+     * Hook run right before the condition starts being monitored. Used, for example, to get initial
+     * callback counts.
+     */
+    public void onStartMonitoring() {}
+
+    /**
+     * @return a short description to be printed as part of a list of conditions.
+     */
+    public String getDescription() {
+        if (mDescription == null) {
+            rebuildDescription();
+        }
+        return mDescription;
+    }
+
+    /**
+     * Invalidates last description; the next time {@link #getDescription()}, it will get a new one
+     * from {@link #buildDescription()}.
+     */
+    protected void rebuildDescription() {
+        mDescription = buildDescription();
+    }
+
+    /**
+     * @return true if the check is intended to be run on the UI Thread, false if it should be run
+     *     on the instrumentation thread.
+     */
+    public boolean isRunOnUiThread() {
+        return mIsRunOnUiThread;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/ConditionChecker.java b/base/test/android/javatests/src/org/chromium/base/test/transit/ConditionChecker.java
new file mode 100644
index 0000000..df10cb4
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/ConditionChecker.java
@@ -0,0 +1,135 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import org.chromium.base.ThreadUtils;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/** Spot checks multiple {@link Condition}s to assert preconditions are still valid. */
+public class ConditionChecker {
+
+    /** The fulfillment status of a {@link Condition} being checked once. */
+    private static class ConditionCheckStatus {
+
+        private final Condition mCondition;
+        private boolean mFulfilled;
+        private String mError;
+
+        private ConditionCheckStatus(Condition condition) {
+            mCondition = condition;
+        }
+
+        private boolean update() {
+            try {
+                boolean fulfilled;
+                if (mCondition.isRunOnUiThread()) {
+                    // TODO(crbug.com/1489445): Post multiple checks in parallel, the UI thread will
+                    // run them sequentially.
+                    fulfilled = ThreadUtils.runOnUiThreadBlocking(mCondition::check);
+                } else {
+                    fulfilled = mCondition.check();
+                }
+
+                if (fulfilled) {
+                    reportFulfilled();
+                    return false;
+                } else {
+                    reportUnfulfilled();
+                    return true;
+                }
+            } catch (Exception e) {
+                reportError(e);
+                return true;
+            }
+        }
+
+        private void reportFulfilled() {
+            mFulfilled = true;
+        }
+
+        private void reportUnfulfilled() {
+            mFulfilled = false;
+        }
+
+        private void reportError(Exception e) {
+            mError = e.getMessage();
+        }
+
+        private boolean isFulfilled() {
+            return mFulfilled;
+        }
+
+        private String getError() {
+            return mError;
+        }
+    }
+
+    /**
+     * Spot checks each of the {@link Condition}s.
+     *
+     * @param conditions the {@link Condition}s to check.
+     * @throws AssertionError if not all Conditions are fulfilled.
+     */
+    public static void check(List<Condition> conditions) {
+        boolean anyCriteriaMissing = false;
+        List<ConditionCheckStatus> checkStatuses = new ArrayList<>();
+        for (Condition condition : conditions) {
+            checkStatuses.add(new ConditionCheckStatus(condition));
+        }
+
+        for (ConditionCheckStatus status : checkStatuses) {
+            anyCriteriaMissing |= status.update();
+        }
+
+        if (anyCriteriaMissing) {
+            throw buildCheckConditionsException(checkStatuses);
+        }
+    }
+
+    private static AssertionError buildCheckConditionsException(
+            List<ConditionCheckStatus> checkStatuses) {
+        return new AssertionError(
+                "Preconditions not fulfilled:\n" + createCheckConditionsSummary(checkStatuses));
+    }
+
+    private static String createCheckConditionsSummary(List<ConditionCheckStatus> checkStatuses) {
+        StringBuilder detailsString = new StringBuilder();
+
+        int i = 1;
+        for (ConditionCheckStatus checkStatus : checkStatuses) {
+            String conditionDescription = checkStatus.mCondition.getDescription();
+
+            String error = checkStatus.getError();
+            String errorsString = null;
+            String statusString;
+            if (error != null) {
+                errorsString = String.format(" {error: %s}", error);
+                statusString = "[ERR ]";
+            } else {
+                if (checkStatus.isFulfilled()) {
+                    statusString = "[OK  ]";
+                } else {
+                    statusString = "[FAIL]";
+                }
+            }
+
+            detailsString
+                    .append("    [")
+                    .append(i)
+                    .append(" ")
+                    .append(statusString)
+                    .append(" ")
+                    .append(conditionDescription);
+            if (errorsString != null) {
+                detailsString.append(" ").append(errorsString);
+            }
+            detailsString.append('\n');
+            i++;
+        }
+        return detailsString.toString();
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/ConditionWaiter.java b/base/test/android/javatests/src/org/chromium/base/test/transit/ConditionWaiter.java
new file mode 100644
index 0000000..89d52d7
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/ConditionWaiter.java
@@ -0,0 +1,301 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import android.util.ArrayMap;
+import android.util.Pair;
+
+import androidx.annotation.IntDef;
+
+import org.chromium.base.Log;
+import org.chromium.base.ThreadUtils;
+import org.chromium.base.TimeUtils;
+import org.chromium.base.test.util.CriteriaHelper;
+import org.chromium.base.test.util.CriteriaNotSatisfiedException;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.util.List;
+import java.util.Map;
+
+/** Waits for multiple {@link ConditionWaitStatus}es, polling the {@link Condition}s in parallel. */
+public class ConditionWaiter {
+
+    /**
+     * The fulfillment status of a {@link Condition} being waited for.
+     *
+     * <p>Tracks the times at which the Condition was checked to provide information about how long
+     * it took to be fulfilled (or for long it was checked until it timed out).
+     *
+     * <p>Tracks and aggregates errors thrown during the Condition checking for user-friendly
+     * printing.
+     */
+    static class ConditionWaitStatus {
+
+        private final Condition mCondition;
+        private final @ConditionOrigin int mOrigin;
+        private long mTimeStarted;
+        private long mTimeUnfulfilled;
+        private long mTimeFulfilled;
+        private ArrayMap<String, Integer> mErrors = new ArrayMap<>();
+
+        /**
+         * Constructor.
+         *
+         * @param condition the {@link Condition} that this will hold the status for.
+         * @param origin the origin of the |condition|.
+         */
+        ConditionWaitStatus(Condition condition, @ConditionOrigin int origin) {
+            mCondition = condition;
+            mOrigin = origin;
+        }
+
+        private void startTimer() {
+            mTimeStarted = getNow();
+            mTimeUnfulfilled = mTimeStarted;
+        }
+
+        private boolean update() {
+            try {
+                boolean fulfilled;
+                if (mCondition.isRunOnUiThread()) {
+                    // TODO(crbug.com/1489445): Post multiple checks in parallel, the UI thread will
+                    // run them sequentially.
+                    fulfilled = ThreadUtils.runOnUiThreadBlocking(mCondition::check);
+                } else {
+                    fulfilled = mCondition.check();
+                }
+
+                if (fulfilled) {
+                    reportFulfilledWait();
+                    return false;
+                } else {
+                    reportUnfulfilledWait();
+                    return true;
+                }
+            } catch (Exception e) {
+                reportError(e.getMessage());
+                return true;
+            }
+        }
+
+        /**
+         * Report that the Condition being waited on is not fulfilled at this time.
+         *
+         * @throws IllegalStateException when the Condition is unfulfilled but it had previously
+         *     been fulfilled.
+         */
+        private void reportUnfulfilledWait() throws IllegalStateException {
+            if (isFulfilled()) {
+                throw new IllegalStateException("Unfulfilled after already being fulfilled");
+            }
+
+            mTimeUnfulfilled = getNow();
+        }
+
+        /** Report that the Condition being waited on is fulfilled at this time. */
+        private void reportFulfilledWait() {
+            if (!isFulfilled()) {
+                // isFulfilled() will return true after setting a non-zero time.
+                mTimeFulfilled = getNow();
+            }
+        }
+
+        /**
+         * Report that an error happened when checking the Condition.
+         *
+         * @param reason a String that will be printed as the reason; errors with the exact same
+         *     reason are aggregated.
+         */
+        private void reportError(String reason) {
+            int beforeCount = mErrors.getOrDefault(reason, 0);
+            mErrors.put(reason, beforeCount + 1);
+        }
+
+        /**
+         * @return if the Condition is fulfilled.
+         */
+        private boolean isFulfilled() {
+            return mTimeFulfilled > 0;
+        }
+
+        /**
+         * @return how long the condition has been considered unfulfilled for.
+         *     <p>The Condition must be unfulfilled, or an assertion will be raised.
+         */
+        private long getTimeUnfulfilled() {
+            assert !isFulfilled();
+
+            return mTimeUnfulfilled - mTimeStarted;
+        }
+
+        /**
+         * @return how long the condition took to be fulfilled for the first time. The result is a
+         *     pair (lowerBound, upperBound), where the time it took is between these two numbers.
+         *     |lowerBound| is the last time at which the Condition was seen as unfulfilled and
+         *     |upperBound| is the first time at which the Condition was seen as fulfilled.
+         *     <p>The Condition must be fulfilled, or an assertion will be raised.
+         */
+        private Pair<Long, Long> getTimeToFulfill() {
+            assert isFulfilled();
+
+            long minTimeToFulfill = mTimeUnfulfilled - mTimeStarted;
+            long maxTimeToFulfill = mTimeFulfilled - mTimeStarted;
+            return Pair.create(minTimeToFulfill, maxTimeToFulfill);
+        }
+
+        /**
+         * @return an aggegation of the errors reported while checking a Condition or reporting its
+         *     status.
+         */
+        private Map<String, Integer> getErrors() {
+            return mErrors;
+        }
+
+        private static long getNow() {
+            long now = TimeUtils.currentTimeMillis();
+            assert now > 0;
+            return now;
+        }
+    }
+
+    /** The maximum time to wait for a criteria to become valid. */
+    public static final long MAX_TIME_TO_POLL = 3000L;
+
+    /** The polling interval to wait between checking for a satisfied criteria. */
+    public static final long POLLING_INTERVAL = 50;
+
+    private static final String TAG = "Transit";
+
+    /**
+     * Blocks waiting for multiple {@link ConditionWaitStatus}es, polling the {@link Condition}s in
+     * parallel and reporting their status to the {@link ConditionWaitStatus}es.
+     *
+     * <p>The timeout is |MAX_TIME_TO_POLL|.
+     *
+     * <p>TODO(crbug.com/1489462): Make the timeout configurable per transition.
+     *
+     * @param conditionStatuses the {@link ConditionWaitStatus}es to wait for.
+     * @throws AssertionError if not all {@link Condition}s are fulfilled before timing out.
+     */
+    public static void waitFor(List<ConditionWaitStatus> conditionStatuses) {
+        if (conditionStatuses.isEmpty()) {
+            Log.i(TAG, "No conditions to fulfill.");
+        }
+
+        for (ConditionWaitStatus status : conditionStatuses) {
+            status.startTimer();
+        }
+
+        Runnable checker =
+                () -> {
+                    boolean anyCriteriaMissing = false;
+                    for (ConditionWaitStatus status : conditionStatuses) {
+                        anyCriteriaMissing |= status.update();
+                    }
+
+                    if (anyCriteriaMissing) {
+                        throw buildWaitConditionsException(conditionStatuses);
+                    } else {
+                        Log.i(
+                                TAG,
+                                "Conditions fulfilled:\n%s",
+                                createWaitConditionsSummary(conditionStatuses));
+                    }
+                };
+
+        CriteriaHelper.pollInstrumentationThread(checker, MAX_TIME_TO_POLL, POLLING_INTERVAL);
+    }
+
+    private static CriteriaNotSatisfiedException buildWaitConditionsException(
+            List<ConditionWaitStatus> conditionStatuses) {
+        return new CriteriaNotSatisfiedException(
+                "Did not meet all conditions:\n" + createWaitConditionsSummary(conditionStatuses));
+    }
+
+    private static String createWaitConditionsSummary(List<ConditionWaitStatus> conditionStatuses) {
+        StringBuilder detailsString = new StringBuilder();
+
+        int i = 1;
+        for (ConditionWaitStatus conditionStatus : conditionStatuses) {
+            String conditionDescription = conditionStatus.mCondition.getDescription();
+
+            String originString = "";
+            switch (conditionStatus.mOrigin) {
+                case ConditionOrigin.ENTER:
+                    originString = "[ENTER]";
+                    break;
+                case ConditionOrigin.EXIT:
+                    originString = "[EXIT ]";
+                    break;
+                case ConditionOrigin.TRANSITION:
+                    originString = "[TRSTN]";
+                    break;
+            }
+
+            Map<String, Integer> errors = conditionStatus.getErrors();
+            StringBuilder errorsString = new StringBuilder();
+            String statusString;
+            if (!errors.isEmpty()) {
+                errorsString.append(" {errors: ");
+                for (Map.Entry<String, Integer> e : errors.entrySet()) {
+                    String errorReason = e.getKey();
+                    Integer errorCount = e.getValue();
+                    errorsString.append(String.format("%s (%d errors);", errorReason, errorCount));
+                }
+                errorsString.append("}");
+                statusString = "[ERR ]";
+            } else if (conditionStatus.isFulfilled()) {
+                statusString = "[OK  ]";
+            } else {
+                statusString = "[FAIL]";
+            }
+
+            String fulfilledString;
+            if (conditionStatus.isFulfilled()) {
+                Pair<Long, Long> timeToFulfill = conditionStatus.getTimeToFulfill();
+                fulfilledString =
+                        String.format(
+                                "{fulfilled after %d~%d ms}",
+                                timeToFulfill.first, timeToFulfill.second);
+            } else {
+                fulfilledString =
+                        String.format(
+                                "{unfulfilled after %d ms}", conditionStatus.getTimeUnfulfilled());
+            }
+
+            detailsString
+                    .append("    [")
+                    .append(i)
+                    .append("] ")
+                    .append(originString)
+                    .append(" ")
+                    .append(statusString)
+                    .append(" ")
+                    .append(conditionDescription)
+                    .append(" ")
+                    .append(fulfilledString);
+            if (errorsString.length() > 0) {
+                detailsString.append(" ").append(errorsString);
+            }
+            detailsString.append('\n');
+            i++;
+        }
+        return detailsString.toString();
+    }
+
+    /** The origin of a {@link Condition} (enter, exit, transition). */
+    @IntDef({
+        ConditionWaiter.ConditionOrigin.ENTER,
+        ConditionWaiter.ConditionOrigin.EXIT,
+        ConditionWaiter.ConditionOrigin.TRANSITION
+    })
+    @Retention(RetentionPolicy.SOURCE)
+    @interface ConditionOrigin {
+        int ENTER = 0;
+        int EXIT = 1;
+        int TRANSITION = 2;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/ConditionalState.java b/base/test/android/javatests/src/org/chromium/base/test/transit/ConditionalState.java
new file mode 100644
index 0000000..22804a4
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/ConditionalState.java
@@ -0,0 +1,172 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import static org.junit.Assert.fail;
+
+import androidx.annotation.IntDef;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.util.List;
+
+/**
+ * Base class for states with conditions for entering and exiting them.
+ *
+ * <p>Conditions include the existence of {@link Elements}, e.g. Views.
+ *
+ * <pre>ConditionalStates can be in the following phases:
+ * - NEW: Inactive, just created. No transition has started.
+ * - TRANSITIONING_TO: A transition into the state has started, but enter conditions might not be
+ *     fulfilled yet.
+ * - ACTIVE: Active, declared elements should exist.
+ * - TRANSITIONING_FROM: A transition out of the state has started, but exit conditions are not
+ *     fulfilled yet.
+ * - FINISHED: Inactive, transition away is done.
+ * </pre>
+ *
+ * <p>The lifecycle of ConditionalStates is linear:
+ *
+ * <p>NEW > TRANSITIONING_TO > ACTIVE > TRANSITIONING_FROM > FINISHED
+ *
+ * <p>Once FINISHED, the ConditionalState does not change state anymore.
+ *
+ * <p>This is the base class for {@link TransitStation} and {@link StationFacility}.
+ */
+public abstract class ConditionalState {
+    @Phase private int mLifecyclePhase = Phase.NEW;
+    private Elements mElements;
+
+    /** Lifecycle phases of ConditionalState. */
+    @IntDef({
+        Phase.NEW,
+        Phase.TRANSITIONING_TO,
+        Phase.ACTIVE,
+        Phase.TRANSITIONING_FROM,
+        Phase.FINISHED
+    })
+    @Retention(RetentionPolicy.SOURCE)
+    public @interface Phase {
+        int NEW = 0;
+        int TRANSITIONING_TO = 1;
+        int ACTIVE = 2;
+        int TRANSITIONING_FROM = 3;
+        int FINISHED = 4;
+    }
+
+    /**
+     * Declare the {@link Elements} that define this ConditionalState, such as Views.
+     *
+     * <p>Transit-layer {@link TransitStation}s and {@link StationFacility}s should override this
+     * and use the |elements| param to declare what elements need to be waited for for the state to
+     * be considered active.
+     *
+     * @param elements use the #declare___() methods to describe the Elements that define the state.
+     */
+    public abstract void declareElements(Elements.Builder elements);
+
+    List<Condition> getEnterConditions() {
+        initElements();
+        return mElements.getEnterConditions();
+    }
+
+    List<Condition> getExitConditions() {
+        initElements();
+        return mElements.getExitConditions();
+    }
+
+    private void initElements() {
+        if (mElements == null) {
+            Elements.Builder builder = new Elements.Builder();
+            declareElements(builder);
+            mElements = builder.build(this);
+        }
+    }
+
+    void setStateTransitioningTo() {
+        assertInPhase(Phase.NEW);
+        mLifecyclePhase = Phase.TRANSITIONING_TO;
+        onStartMonitoringTransitionTo();
+        for (Condition condition : getEnterConditions()) {
+            condition.onStartMonitoring();
+        }
+    }
+
+    /** Hook to setup observers for the transition into the ConditionalState. */
+    protected void onStartMonitoringTransitionTo() {}
+
+    void setStateActive() {
+        assertInPhase(Phase.TRANSITIONING_TO);
+        mLifecyclePhase = Phase.ACTIVE;
+        onStopMonitoringTransitionTo();
+    }
+
+    /** Hook to cleanup observers for the transition into the ConditionalState. */
+    protected void onStopMonitoringTransitionTo() {}
+
+    void setStateTransitioningFrom() {
+        assertInPhase(Phase.ACTIVE);
+        mLifecyclePhase = Phase.TRANSITIONING_FROM;
+        onStartMonitoringTransitionFrom();
+        for (Condition condition : getExitConditions()) {
+            condition.onStartMonitoring();
+        }
+    }
+
+    /** Hook to setup observers for the transition from the ConditionalState. */
+    protected void onStartMonitoringTransitionFrom() {}
+
+    void setStateFinished() {
+        assertInPhase(Phase.TRANSITIONING_FROM);
+        mLifecyclePhase = Phase.FINISHED;
+        onStopMonitoringTransitionFrom();
+    }
+
+    /** Hook to cleanup observers for the transition from the ConditionalState. */
+    protected void onStopMonitoringTransitionFrom() {}
+
+    /**
+     * @return the lifecycle {@link Phase} this ConditionalState is in.
+     */
+    public @Phase int getPhase() {
+        return mLifecyclePhase;
+    }
+
+    /** Assert this ConditionalState is in an expected lifecycle {@link Phase}. */
+    public void assertInPhase(@Phase int expectedPhase) {
+        if (mLifecyclePhase != expectedPhase) {
+            fail(
+                    String.format(
+                            "%s should have been in %s, but was %s",
+                            this, phaseToString(expectedPhase), phaseToString(mLifecyclePhase)));
+        }
+    }
+
+    /** Check the enter Conditions are still fulfilled. */
+    public final void recheckEnterConditions() {
+        assertInPhase(Phase.ACTIVE);
+        ConditionChecker.check(getEnterConditions());
+    }
+
+    /**
+     * @return a String representation of a lifecycle {@link Phase}.
+     */
+    public static String phaseToString(@Phase int phase) {
+        switch (phase) {
+            case Phase.NEW:
+                return "Phase.NEW";
+            case Phase.TRANSITIONING_TO:
+                return "Phase.TRANSITIONING_TO";
+            case Phase.ACTIVE:
+                return "Phase.ACTIVE";
+            case Phase.TRANSITIONING_FROM:
+                return "Phase.TRANSITIONING_AWAY";
+            case Phase.FINISHED:
+                return "Phase.FINISHED";
+            default:
+                throw new IllegalArgumentException("No string representation for phase " + phase);
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/DIR_METADATA b/base/test/android/javatests/src/org/chromium/base/test/transit/DIR_METADATA
new file mode 100644
index 0000000..34592b3
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/DIR_METADATA
@@ -0,0 +1,4 @@
+monorail {
+  component: "Test>Android"
+}
+
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/Elements.java b/base/test/android/javatests/src/org/chromium/base/test/transit/Elements.java
new file mode 100644
index 0000000..6623128
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/Elements.java
@@ -0,0 +1,131 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import android.view.View;
+
+import org.hamcrest.Matcher;
+
+import org.chromium.base.test.transit.ViewConditions.DisplayedCondition;
+import org.chromium.base.test.transit.ViewConditions.DoesNotExistAnymoreCondition;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * The elements that define a {@link ConditionalState}.
+ *
+ * <pre>
+ * - An ACTIVE ConditionalState is considered to have these elements available.
+ * - The presence of each element is an enter condition for the ConditionalState.
+ * - The absence of each element is an exit condition for the ConditionalState (except for unowned
+ *     elements).
+ * </pre>
+ */
+public class Elements {
+    private final List<Condition> mEnterConditions;
+    private final List<Condition> mExitConditions;
+
+    /** Private constructor, instantiated by {@link Builder#build(ConditionalState)}. */
+    private Elements(List<Condition> enterConditions, List<Condition> exitConditions) {
+        mEnterConditions = enterConditions;
+        mExitConditions = exitConditions;
+    }
+
+    List<Condition> getEnterConditions() {
+        return mEnterConditions;
+    }
+
+    List<Condition> getExitConditions() {
+        return mExitConditions;
+    }
+
+    /**
+     * Builder for {@link Elements}.
+     *
+     * <p>Passed to {@link ConditionalState#declareElements(Builder)}, which must declare the
+     * ConditionalState's elements by calling the declare___() methods.
+     */
+    public static class Builder {
+        private ArrayList<ViewElement> mViewElements = new ArrayList<>();
+        private ArrayList<Condition> mOtherEnterConditions = new ArrayList<>();
+        private ArrayList<Condition> mOtherExitConditions = new ArrayList<>();
+
+        Builder() {}
+
+        /**
+         * Declare as an element a single view that matches |viewMatcher| which will be gone after
+         * the ConditionalState is FINISHED.
+         */
+        public Builder declareView(Matcher<View> viewMatcher) {
+            declareView(viewMatcher, /* owned= */ true);
+            return this;
+        }
+
+        /**
+         * Declare as an element a single view that matches |viewMatcher| which will not necessarily
+         * be gone after the ConditionalState is FINISHED.
+         */
+        public Builder declareUnownedView(Matcher<View> viewMatcher) {
+            declareView(viewMatcher, /* owned= */ false);
+            return this;
+        }
+
+        private Builder declareView(Matcher<View> viewMatcher, boolean owned) {
+            mViewElements.add(new ViewElement(viewMatcher, owned));
+            return this;
+        }
+
+        /**
+         * Declare as an element a generic enter Condition. It must remain true as long as the
+         * ConditionalState is ACTIVE.
+         */
+        public Builder declareEnterCondition(Condition condition) {
+            mOtherEnterConditions.add(condition);
+            return this;
+        }
+
+        /** Declare as an element a generic exit Condition. */
+        public Builder declareExitCondition(Condition condition) {
+            mOtherExitConditions.add(condition);
+            return this;
+        }
+
+        /**
+         * Instantiates the {@link Elements} of a given |conditionalState| after they were declared
+         * by calling the Builder's declare___() methods.
+         */
+        Elements build(ConditionalState conditionalState) {
+            ArrayList<Condition> enterConditions = new ArrayList<>();
+            ArrayList<Condition> exitConditions = new ArrayList<>();
+
+            for (ViewElement viewElement : mViewElements) {
+                DisplayedCondition displayedCondition =
+                        new DisplayedCondition(viewElement.mViewMatcher);
+                enterConditions.add(displayedCondition);
+                if (viewElement.mOwned) {
+                    exitConditions.add(
+                            new DoesNotExistAnymoreCondition(
+                                    viewElement.mViewMatcher, displayedCondition));
+                }
+            }
+
+            enterConditions.addAll(mOtherEnterConditions);
+            exitConditions.addAll(mOtherExitConditions);
+
+            return new Elements(enterConditions, exitConditions);
+        }
+    }
+
+    private static class ViewElement {
+        private final Matcher<View> mViewMatcher;
+        private final boolean mOwned;
+
+        public ViewElement(Matcher<View> viewMatcher, boolean owned) {
+            mViewMatcher = viewMatcher;
+            mOwned = owned;
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/FacilityCheckIn.java b/base/test/android/javatests/src/org/chromium/base/test/transit/FacilityCheckIn.java
new file mode 100644
index 0000000..954833f
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/FacilityCheckIn.java
@@ -0,0 +1,75 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import androidx.annotation.Nullable;
+
+import org.chromium.base.Log;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/** A {@link Transition} into a {@link StationFacility}. */
+class FacilityCheckIn extends Transition {
+    private static final String TAG = "Transit";
+
+    private StationFacility mFacility;
+
+    /**
+     * Constructor. FacilityCheckIn is instantiated to enter a {@link StationFacility}.
+     *
+     * @param facility the {@link StationFacility} to enter.
+     * @param trigger the action that triggers the transition into the facility. e.g. clicking a
+     *     View.
+     */
+    FacilityCheckIn(StationFacility facility, @Nullable Trigger trigger) {
+        super(trigger);
+        mFacility = facility;
+    }
+
+    void enterSync() {
+        onBeforeTransition();
+        triggerTransition();
+        List<ConditionWaiter.ConditionWaitStatus> transitionConditions = createConditions();
+        waitUntilEntry(transitionConditions);
+        onAfterTransition();
+        PublicTransitConfig.maybePauseAfterTransition(mFacility);
+    }
+
+    private void onBeforeTransition() {
+        mFacility.setStateTransitioningTo();
+        Log.i(TAG, "Will enter %s", mFacility);
+    }
+
+    @Override
+    protected void triggerTransition() {
+        super.triggerTransition();
+        Log.i(TAG, "Triggered entry into %s", mFacility);
+    }
+
+    private List<ConditionWaiter.ConditionWaitStatus> createConditions() {
+        ArrayList<ConditionWaiter.ConditionWaitStatus> transitionConditions = new ArrayList<>();
+        for (Condition condition : mFacility.getEnterConditions()) {
+            transitionConditions.add(
+                    new ConditionWaiter.ConditionWaitStatus(
+                            condition, ConditionWaiter.ConditionOrigin.ENTER));
+        }
+        transitionConditions.addAll(createTransitionConditionStatuses());
+        return transitionConditions;
+    }
+
+    private void waitUntilEntry(List<ConditionWaiter.ConditionWaitStatus> transitionConditions) {
+        try {
+            ConditionWaiter.waitFor(transitionConditions);
+        } catch (AssertionError e) {
+            throw TravelException.newEnterFacilityException(mFacility, e);
+        }
+    }
+
+    private void onAfterTransition() {
+        mFacility.setStateActive();
+        Log.i(TAG, "Entered %s", mFacility);
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/FacilityCheckOut.java b/base/test/android/javatests/src/org/chromium/base/test/transit/FacilityCheckOut.java
new file mode 100644
index 0000000..4853ee6
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/FacilityCheckOut.java
@@ -0,0 +1,75 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import androidx.annotation.Nullable;
+
+import org.chromium.base.Log;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/** A {@link Transition} out of a {@link StationFacility}. */
+class FacilityCheckOut extends Transition {
+    private static final String TAG = "Transit";
+
+    private StationFacility mFacility;
+
+    /**
+     * Constructor. FacilityCheckOut is instantiated to leave a {@link StationFacility}.
+     *
+     * @param facility the {@link StationFacility} to leave.
+     * @param trigger the action that triggers the transition out of the facility. e.g. clicking a
+     *     View.
+     */
+    FacilityCheckOut(StationFacility facility, @Nullable Trigger trigger) {
+        super(trigger);
+        mFacility = facility;
+    }
+
+    void exitSync() {
+        onBeforeTransition();
+        triggerTransition();
+        List<ConditionWaiter.ConditionWaitStatus> transitionConditions = createConditions();
+        waitUntilExit(transitionConditions);
+        onAfterTransition();
+        PublicTransitConfig.maybePauseAfterTransition(mFacility);
+    }
+
+    private void onBeforeTransition() {
+        mFacility.setStateTransitioningFrom();
+        Log.i(TAG, "Will exit %s", mFacility);
+    }
+
+    @Override
+    protected void triggerTransition() {
+        super.triggerTransition();
+        Log.i(TAG, "Triggered exit from %s", mFacility);
+    }
+
+    private List<ConditionWaiter.ConditionWaitStatus> createConditions() {
+        ArrayList<ConditionWaiter.ConditionWaitStatus> transitionConditions = new ArrayList<>();
+        for (Condition condition : mFacility.getExitConditions()) {
+            transitionConditions.add(
+                    new ConditionWaiter.ConditionWaitStatus(
+                            condition, ConditionWaiter.ConditionOrigin.EXIT));
+        }
+        transitionConditions.addAll(createTransitionConditionStatuses());
+        return transitionConditions;
+    }
+
+    private void waitUntilExit(List<ConditionWaiter.ConditionWaitStatus> transitionConditions) {
+        try {
+            ConditionWaiter.waitFor(transitionConditions);
+        } catch (AssertionError e) {
+            throw TravelException.newExitFacilityException(mFacility, e);
+        }
+    }
+
+    private void onAfterTransition() {
+        mFacility.setStateFinished();
+        Log.i(TAG, "Exited %s", mFacility);
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/InstrumentationThreadCondition.java b/base/test/android/javatests/src/org/chromium/base/test/transit/InstrumentationThreadCondition.java
new file mode 100644
index 0000000..e8f4bcb
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/InstrumentationThreadCondition.java
@@ -0,0 +1,12 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+/** A {@link Condition} that is checked in the instrumentation thread. */
+public abstract class InstrumentationThreadCondition extends Condition {
+    public InstrumentationThreadCondition() {
+        super(/*shouldRunOnUiThread*/ false);
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/PublicTransitConfig.java b/base/test/android/javatests/src/org/chromium/base/test/transit/PublicTransitConfig.java
new file mode 100644
index 0000000..a27c2ef
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/PublicTransitConfig.java
@@ -0,0 +1,50 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import android.widget.Toast;
+
+import androidx.test.platform.app.InstrumentationRegistry;
+
+import org.chromium.base.Log;
+import org.chromium.base.ResettersForTesting;
+import org.chromium.base.ThreadUtils;
+
+/** Configuration for PublicTransit tests. */
+public class PublicTransitConfig {
+    private static final String TAG = "Transit";
+    private static long sTransitionPause;
+
+    /**
+     * Set a pause for all transitions for debugging.
+     *
+     * @param millis how long to pause for (1000 to 4000 ms is typical).
+     */
+    public static void setTransitionPauseForDebugging(long millis) {
+        sTransitionPause = millis;
+        ResettersForTesting.register(() -> sTransitionPause = 0);
+    }
+
+    static void maybePauseAfterTransition(ConditionalState state) {
+        long pauseMs = sTransitionPause;
+        if (pauseMs > 0) {
+            ThreadUtils.runOnUiThread(
+                    () -> {
+                        Toast.makeText(
+                                        InstrumentationRegistry.getInstrumentation()
+                                                .getTargetContext(),
+                                        state.toString(),
+                                        Toast.LENGTH_SHORT)
+                                .show();
+                    });
+            try {
+                Log.e(TAG, "Pause for sightseeing %s for %dms", state, pauseMs);
+                Thread.sleep(pauseMs);
+            } catch (InterruptedException e) {
+                Log.e(TAG, "Interrupted pause", e);
+            }
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/README.md b/base/test/android/javatests/src/org/chromium/base/test/transit/README.md
new file mode 100644
index 0000000..6c1ba8b
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/README.md
@@ -0,0 +1,317 @@
+# Public Transit
+
+Public Transit is a framework for instrumentation tests that models app states,
+and transitions between them.
+
+### Metaphor
+
+The metaphor for the framework is that a Public Transit Layer provides tests
+with public transit routes to navigate the app using shared code, as opposed to
+each test driving its private car (writing its own private code) to set up the
+test.
+
+A Public Transit test moves around the app by going from `TransitStation` to
+`TransitStation`, and the stations are connected by routes (transition methods).
+`TransitStations` are marked by `Elements`, which are recognizable features of
+the destination station (features such as Android Views), which the test takes
+as evidence that it has arrived and is ready to perform any test-specific
+operation, checking or further navigation.
+
+### Structure and layers
+
+Public Transit is structured as follows:
+
+|Layer|Contents|File names|Location|Width (how many files)
+|-|-|-|-|-|
+|Test Layer|Instrumentation test classes|`*Test.java`|`//chrome/**/javatests`|wide|
+|Transit Layer|Concrete `TransitStations`, `StationFacilities`|`*Station.java`, `*Condition.java`, etc.|`//chrome/test/android/javatests`|wide|
+|Framework Layer|Public Transit classes|All classes with package `org.chromium.base.test.transit.*`|`//base/test`|narrow|
+
+This directory (//base/test/.../base/test/transit) contains the Framework Layer.
+
+
+## Framework Features
+
+
+### State awareness
+
+Public Transit is based on the concepts of `ConditionalStates`, `Conditions` and
+`Transitions`, which means:
+
+* Keeping track of the state the app is in, including transitions between
+  states.
+* Evaluating if transitions are done by checking `Conditions`.
+* Giving execution control to the Test Layer only while no transitions are
+  happening, so as to reduce flakiness.
+
+
+### Transition management
+
+A transition is considered done when:
+* All **enter Conditions** of a `ConditionalState` being entered are fulfilled
+  * When moving between `TransitStations` or entering a `StationFacility`
+* All **exit Conditions** of a `ConditionalState` being exited are fulfilled
+  * When moving between `TransitStations` or leaving a `StationFacility`
+* All extra **transition Conditions** specific to the transition are fulfilled
+  * Most transitions don't need to add extra special Conditions.
+
+
+### Better error messages
+
+If any conditions in a transition are not fulfilled within a timeout, the test
+fails and the states of all conditions being waited on is printed out:
+
+```
+org.chromium.base.test.transit.TravelException: Did not complete transition from <S1: EntryPageStation> to <S2: NewTabPageStation>
+    [...]
+    at org.chromium.chrome.test.transit.BasePageStation.openNewIncognitoTabFromMenu(BasePageStation.java:82)
+    at org.chromium.chrome.browser.toolbar.top.TabSwitcherActionMenuPTTest.testClosingAllRegularTabs_DoNotFinishActivity(TabSwitcherActionMenuPTTest.java:94)
+    ... 44 trimmed
+Caused by: java.lang.AssertionError: org.chromium.base.test.util.CriteriaNotSatisfiedException: Did not meet all conditions:
+    [1] [ENTER] [OK  ] View: (with id: id/tab_switcher_button and is displayed on the screen to the user) {fulfilled after 0~701 ms}
+    [2] [ENTER] [OK  ] Receive tab opened callback {fulfilled after 0~701 ms}
+    [3] [ENTER] [OK  ] Receive tab selected callback {fulfilled after 0~701 ms}
+    [4] [ENTER] [OK  ] Tab loaded {fulfilled after 0~701 ms}
+    [5] [ENTER] [FAIL] Page interactable or hidden {unfulfilled after 3746 ms}
+    [6] [ENTER] [OK  ] Ntp loaded {fulfilled after 0~701 ms}
+```
+
+
+### Reuse of code between tests
+
+Instrumentation tests share code primarily through util functions and test
+rules, which are limited to certain areas of the code and not easily located.
+
+Public Transit has the goal of increasing code reuse between test classes that
+go through the same test setup and user flow by putting common code in the
+Transit Layer:
+
+* conditions to ensure certain states are reached
+* triggers for transitions
+* Espresso `ViewMatchers` for the same UI elements
+
+
+## Classes and concepts
+
+
+### TransitStations
+
+A **`TransitStation`** represents one of the app's "screens", that is, a full
+(or mostly full) window view. Only one `TransitStation` can be active at any
+time.
+
+For each screen in the app, a concrete implementation of `TransitStation` should
+be created in the Transit Layer, implementing:
+
+* **`declareElements()`** declaring the `Views` and other enter/exit conditions
+  define this `TransitStation`.
+* **transition methods** to travel to other `TransitStations` or to enter
+  `StationFacilities`. These methods are synchronous and return a handle to the
+  entered `ConditionalState` only after the transition is done and the new
+  `ConditionalState` becomes `ACTIVE`.
+
+Example of a concrete `TransitStation`:
+
+```
+/** The tab switcher screen, with the tab grid and the tab management toolbar. */
+public class TabSwitcherStation extends TransitStation {
+    public static final Matcher<View> NEW_TAB_BUTTON = withId(R.id.new_tab_button);
+    public static final Matcher<View> INCOGNITO_TOGGLE_TABS = withId(R.id.incognito_toggle_tabs);
+
+    private final ChromeTabbedActivityTestRule mChromeTabbedActivityTestRule;
+
+    public TabSwitcherStation(ChromeTabbedActivityTestRule chromeTabbedActivityTestRule) {
+        mChromeTabbedActivityTestRule = chromeTabbedActivityTestRule;
+    }
+
+    @Override
+    public void declareElements(Elements.Builder elements) {
+        elements.declareView(NEW_TAB_BUTTON);
+        elements.declareView(INCOGNITO_TOGGLE_TABS);
+    }
+
+    public NewTabPageStation openNewTabFromButton() {
+        recheckEnterConditions();
+        NewTabPageStation newTab = new NewTabPageStation(mChromeTabbedActivityTestRule);
+        Trip.goSync(this, newTab, (e) -> onView(NEW_TAB_BUTTON).perform(click()))
+    }
+```
+
+
+### StationsFacilities
+
+A **`StationFacility`** represents things like pop-up menus, dialogs or messages
+that are scoped to one of the app's "screens".
+
+Multiple `StationFacilities` may be active at one time besides the active
+TransitStation that contains them.
+
+As with `TransitStations`, concrete, app-specific implementations of
+StationFacility should be created in the Transit Layer overriding
+**`declareElements()`** and **transition methods**.
+
+
+### ConditionalState
+
+Both `TransitStation` and `StationFacility` are **`ConditionalStates`**, which
+means they declare enter and exit conditions as `Elements` and have a linear
+lifecycle:
+
+`NEW` -> `TRANSITIONING_TO` -> `ACTIVE` -> `TRANSITIONING_FROM` -> `FINISHED`
+
+Once `FINISHED`, a `ConditionalState` should not be navigated to anymore. If a
+test comes back to a previous screen, it should be represented by a new
+`TransitStation`.
+
+
+### Condition
+
+**`Conditions`** are checks performed to ensure a certain transition is
+finished.
+
+Common `Condition` subclasses are provided by the Framework Layer (e.g.
+`ViewConditions` and `CallbackCondition`), and app-specific Conditions should be
+implemented in the TransitLayer extending `UIThreadCondition` or
+`InstrumentationThreadConditions`.
+
+An example of app-specific condition:
+
+```
+class PageLoadedCondition extends UiThreadCondition {
+    private final ChromeTabbedActivityTestRule mChromeTabbedActivityTestRule;
+    private Tab mMatchedTab;
+
+    PageLoadedCondition(
+            ChromeTabbedActivityTestRule chromeTabbedActivityTestRule) {
+        mChromeTabbedActivityTestRule = chromeTabbedActivityTestRule;
+    }
+
+    @Override
+    public String buildDescription() {
+        return "Tab loaded";
+    }
+
+    @Override
+    public boolean check() {
+        Tab tab = mChromeTabbedActivityTestRule.getActivity().getActivityTab();
+        if (tab != null
+                && !tab.isLoading()
+                && !tab.getWebContents().shouldShowLoadingUI()) {
+            mMatchedTab = tab;
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    public Tab getMatchedTab() {
+        return mMatchedTab;
+    }
+}
+```
+
+`Conditions` are split between `UIThreadConditions` and
+`InstrumentationThreadConditions`. The framework knows to run the check() of
+each condition in the right thread.
+
+`Conditions` can depend on each other. See below as an example
+`PageInteractableCondition`, which depends on a Tab matched by
+`PageLoadedCondition`:
+
+```
+/** Fulfilled when a page is interactable. */
+class PageInteractableCondition extends UiThreadCondition {
+    private final PageLoadedCondition mPageLoadedCondition;
+
+    PageInteractableCondition(PageLoadedCondition pageLoadedCondition) {
+        mPageLoadedCondition = pageLoadedCondition;
+    }
+
+
+    @Override
+    public String buildDescription() {
+        return "Page interactable";
+    }
+
+    @Override
+    public boolean check() {
+        Tab tab = mPageLoadedCondition.getMatchedTab();
+        return tab != null && tab.isUsedInteractable();
+    }
+}
+```
+
+
+### Transitions
+
+From the point of view of the Test Layer, transitions methods are blocking. When
+a `TransitStation` or `StationFacility` is returned by one of those methods, it
+is always `ACTIVE` and can be immediately acted upon without further waiting.
+
+Code in the Test Layer contains no explicit waits; the waits are in the
+Framework Layer.
+
+An example of Test Layer code:
+
+```
+@Test
+public void testOpenTabSwitcher() {
+    BasePageStation page = mTransitEntryPoints.startOnBlankPage();
+    AppMenuFacility appMenu = page.openAppMenu();
+    page = appMenu.openNewIncognitoTab();
+    TabSwitcherStation tabSwitcher = page.openTabSwitcher();
+}
+```
+
+Transitions between `TransitStations` are done by calling `Trip.goSync()`.
+
+Transitions into and out of `StationFacilities` are done by calling
+`stationFacility.enterSync()` or `stationFacility.leaveSync()`. If the app moves
+to another `TransitStation`, any active `StationFacilities` have their exit
+conditions added to the transition conditions.
+
+
+## Workflow
+
+
+### App behavior changes
+
+Since the Transit Layer reflects what the app looks like and what it does,
+changes to the app's behavior - such as which screens exist, UI elements and the
+navigation graph - need to be reflected in the Transit Layer.
+
+
+### The Transit Layer cohesion
+
+The Transit Layer is a directed graph of `TransitStations`. Transit Layer
+EntryPoints classes provide the entry points into the graph.
+
+There should not be multiple `TransitStations` that represent the same state,
+but different variations of the same screen may be modeled as different
+`TransitStations`. The cohesion of this graph is important to maximize code
+reuse.
+
+
+### Partially Public Transit tests
+
+It is possible to write tests that start as a Public Transit test and use the
+Transit layer to navigate to a certain point, then "hop off" framework and
+continue navigating the app as a regular instrumentation test.
+
+While it is preferable to model all transitions to Transit Layer, a test that
+uses Public Transit partially also realizes its benefits partially and there
+should be no framework impediment to doing so.
+
+Metaphorically, if there is no public transit to an area, you ride it as close
+as possible and continue on foot.
+
+
+### Ownership of the Transit Layer
+
+The Chrome-specific `TransitStations`, `StationFacilities` and `Conditions` that
+comprise the Transit Layer should be owned by the same team responsible for the
+related production code.
+
+The exception is the core of the Transit Layer, for example `PageStation`, which
+is not owned by specific teams, and will be owned by Clank Code Health and Clank
+EngProd.
\ No newline at end of file
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/StationFacility.java b/base/test/android/javatests/src/org/chromium/base/test/transit/StationFacility.java
new file mode 100644
index 0000000..907d439
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/StationFacility.java
@@ -0,0 +1,81 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import org.chromium.base.test.transit.Transition.Trigger;
+
+/**
+ * StationFacility is a {@link ConditionalState} scoped to a single {@link TransitStation} instance.
+ *
+ * <p>This should be used for example for popup dialogs, menus, temporary messages. A transit-layer
+ * class should be derived from it and instantiated. It should expose facility-specific methods for
+ * the test-layer to use.
+ *
+ * <p>As a {@link ConditionalState}, it has a defined lifecycle and must declare {@link Elements}
+ * that determine its enter and exit {@link Condition}s.
+ *
+ * <p>Leaving the TransitStation causes this state to be left as well, and exit Conditions will be
+ * waited upon for the TransitStation transition to be complete.
+ *
+ * <p>Transitions into and out of a StationFacility while the TransitStation is ACTIVE should be
+ * done with {@link #enterSync(StationFacility, Trigger)} and {@link #exitSync(StationFacility,
+ * Trigger)}.
+ *
+ * @param <T> the type of TransitStation this is scoped to.
+ */
+public abstract class StationFacility<T extends TransitStation> extends ConditionalState {
+    protected final T mStation;
+    private final int mId;
+    private static int sLastFacilityId = 1000;
+
+    /**
+     * Constructor.
+     *
+     * <p>Instantiate a subclass, then call {@link #enterSync(StationFacility, Trigger)} to enter
+     * it.
+     *
+     * @param station the TransitStation this StationFacility is scoped to.
+     */
+    protected StationFacility(T station) {
+        mId = ++sLastFacilityId;
+        mStation = station;
+        mStation.registerFacility(this);
+    }
+
+    @Override
+    public String toString() {
+        return String.format("<S%d|F%d: %s>", mStation.getId(), mId, getClass().getSimpleName());
+    }
+
+    /**
+     * Starts a transition into the StationFacility, runs the transition |trigger| and blocks until
+     * the facility is considered ACTIVE (enter Conditions are fulfilled).
+     *
+     * @param facility the StationFacility to enter.
+     * @param trigger the trigger to start the transition (e.g. clicking a view).
+     * @return the StationFacility entered.
+     * @param <F> the type of StationFacility entered.
+     */
+    public static <F extends StationFacility> F enterSync(F facility, Trigger trigger) {
+        FacilityCheckIn checkIn = new FacilityCheckIn(facility, trigger);
+        checkIn.enterSync();
+        return facility;
+    }
+
+    /**
+     * Starts a transition out of the StationFacility, runs the transition |trigger| and blocks
+     * until the facility is considered FINISHED (exit Conditions are fulfilled).
+     *
+     * @param facility the StationFacility to exit.
+     * @param trigger the trigger to start the transition (e.g. clicking a view).
+     * @return the StationFacility exited.
+     * @param <F> the type of StationFacility exited.
+     */
+    public static <F extends StationFacility> F exitSync(F facility, Trigger trigger) {
+        FacilityCheckOut checkOut = new FacilityCheckOut(facility, trigger);
+        checkOut.exitSync();
+        return facility;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/TransitAsserts.java b/base/test/android/javatests/src/org/chromium/base/test/transit/TransitAsserts.java
new file mode 100644
index 0000000..7121d9d
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/TransitAsserts.java
@@ -0,0 +1,13 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+/** Assertions specific to Public Transit. */
+public class TransitAsserts {
+    public static void assertFinalDestination(TransitStation station) {
+        // TODO(crbug.com/1489446): Keep track of past stations and check that the last active
+        // station was |station|.
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/TransitStation.java b/base/test/android/javatests/src/org/chromium/base/test/transit/TransitStation.java
new file mode 100644
index 0000000..c596133
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/TransitStation.java
@@ -0,0 +1,58 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import org.chromium.base.test.transit.Transition.Trigger;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A major {@link ConditionalState}, a "screen" the app can be in. Only one can be active at a time.
+ *
+ * <p>A transit-layer class should be derived from it and instantiated.
+ *
+ * <p>As a {@link ConditionalState}, it has a defined lifecycle and must declare {@link Elements}
+ * that determine its enter and exit {@link Condition}s.
+ *
+ * <p>Transitions should be done with {@link Trip#goSync(TransitStation, TransitStation, Trigger)}.
+ * The transit-layer derived class should expose screen-specific methods for the test-layer to use.
+ */
+public abstract class TransitStation extends ConditionalState {
+    private static final String TAG = "Transit";
+    private final int mId;
+    private static int sLastStationId;
+    private List<StationFacility> mFacilities = new ArrayList<>();
+
+    protected TransitStation() {
+        mId = ++sLastStationId;
+    }
+
+    List<Condition> getActiveFacilityExitConditions() {
+        List<Condition> conditions = new ArrayList<>();
+        for (StationFacility facility : mFacilities) {
+            if (facility.getPhase() == Phase.ACTIVE) {
+                conditions.addAll(facility.getExitConditions());
+            }
+        }
+        return conditions;
+    }
+
+    void registerFacility(StationFacility facility) {
+        mFacilities.add(facility);
+    }
+
+    @Override
+    public String toString() {
+        return String.format("<S%d: %s>", mId, getClass().getSimpleName());
+    }
+
+    /**
+     * @return the self-incrementing id for logging purposes.
+     */
+    public int getId() {
+        return mId;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/Transition.java b/base/test/android/javatests/src/org/chromium/base/test/transit/Transition.java
new file mode 100644
index 0000000..f4bc7f2
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/Transition.java
@@ -0,0 +1,67 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import androidx.annotation.Nullable;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/** A transition into and/or out of {@link ConditionalState}s. */
+public class Transition {
+    /**
+     * A trigger that will be executed to start the transition after all Conditions are in place and
+     * states are set to TRANSITIONING_*.
+     */
+    public interface Trigger {
+        /**
+         * Code to trigger the transition, e.g. click a View.
+         *
+         * @param transition the Transition that will be triggered; Conditions can be added to it.
+         */
+        void triggerTransition(Transition transition);
+    }
+
+    @Nullable private final Trigger mTrigger;
+
+    @Nullable private List<Condition> mConditions;
+
+    Transition(@Nullable Trigger trigger) {
+        mTrigger = trigger;
+    }
+
+    /**
+     * Add a |condition| to the Transition that is not in the exit or enter conditions of the states
+     * involved. The condition will be waited in parallel with the exit and enter conditions of the
+     * states.
+     */
+    public void addCondition(Condition condition) {
+        if (mConditions == null) {
+            mConditions = new ArrayList<>();
+        }
+        mConditions.add(condition);
+    }
+
+    protected void triggerTransition() {
+        if (mTrigger != null) {
+            mTrigger.triggerTransition(this);
+        }
+    }
+
+    protected List<ConditionWaiter.ConditionWaitStatus> createTransitionConditionStatuses() {
+        if (mConditions == null) {
+            return Collections.EMPTY_LIST;
+        }
+
+        ArrayList<ConditionWaiter.ConditionWaitStatus> statuses = new ArrayList<>();
+        for (Condition condition : mConditions) {
+            statuses.add(
+                    new ConditionWaiter.ConditionWaitStatus(
+                            condition, ConditionWaiter.ConditionOrigin.TRANSITION));
+        }
+        return statuses;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/TravelException.java b/base/test/android/javatests/src/org/chromium/base/test/transit/TravelException.java
new file mode 100644
index 0000000..6ef1f76
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/TravelException.java
@@ -0,0 +1,35 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import androidx.annotation.Nullable;
+
+/**
+ * {@link RuntimeException}s thrown by Public Transit transitions; the message of the wrapping
+ * Exception give context to when the underlying Exception happened.
+ */
+public class TravelException extends RuntimeException {
+    public TravelException(
+            @Nullable TransitStation fromStation, TransitStation toStation, Throwable cause) {
+        super(
+                "Did not complete transition from "
+                        + (fromStation != null ? fromStation.toString() : "<entry point>")
+                        + " to "
+                        + toStation,
+                cause);
+    }
+
+    public TravelException(String message, StationFacility facility, Throwable cause) {
+        super(message + " " + facility, cause);
+    }
+
+    static TravelException newEnterFacilityException(StationFacility facility, Throwable cause) {
+        return new TravelException("Did not enter", facility, cause);
+    }
+
+    static TravelException newExitFacilityException(StationFacility facility, Throwable cause) {
+        return new TravelException("Did not exit", facility, cause);
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/Trip.java b/base/test/android/javatests/src/org/chromium/base/test/transit/Trip.java
new file mode 100644
index 0000000..2d12bb2
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/Trip.java
@@ -0,0 +1,114 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import androidx.annotation.Nullable;
+
+import org.chromium.base.Log;
+
+import java.util.ArrayList;
+
+/**
+ * A {@link Transition} into a {@link TransitStation}, either from another TransitStation or as an
+ * entry point.
+ */
+public class Trip extends Transition {
+    private static final String TAG = "Transit";
+    private final int mId;
+
+    @Nullable private final TransitStation mOrigin;
+    private final TransitStation mDestination;
+
+    private static int sLastTripId;
+
+    private Trip(@Nullable TransitStation origin, TransitStation destination, Trigger trigger) {
+        super(trigger);
+        mOrigin = origin;
+        mDestination = destination;
+        mId = ++sLastTripId;
+    }
+
+    /**
+     * Starts a transition from a TransitStation to another (or from no TransitStation if at an
+     * entry point). Runs the transition |trigger|, and blocks until the destination TransitStation
+     * is considered ACTIVE (enter Conditions are fulfilled), the origin TransitStation is
+     * considered FINISHED (exit Conditions are fulfilled), and the Transition's conditions are
+     * fulfilled.
+     *
+     * @param origin the StationFacility to depart from, null if at an entry point.
+     * @param destination the StationFacility to arrive at.
+     * @param trigger the trigger to start the transition (e.g. clicking a view).
+     * @return the TransitStation entered.
+     * @param <T> the type of TransitStation entered.
+     */
+    public static <T extends TransitStation> T goSync(
+            @Nullable TransitStation origin, T destination, Trigger trigger) {
+        Trip trip = new Trip(origin, destination, trigger);
+        trip.travelSync();
+        return destination;
+    }
+
+    private void travelSync() {
+        embark();
+        if (mOrigin != null) {
+            Log.i(TAG, "Trip %d: Embarked at %s towards %s", mId, mOrigin, mDestination);
+        } else {
+            Log.i(TAG, "Trip %d: Starting at entry point %s", mId, mDestination);
+        }
+
+        triggerTransition();
+        Log.i(TAG, "Trip %d: Triggered transition, waiting to arrive at %s", mId, mDestination);
+
+        waitUntilArrival();
+        Log.i(TAG, "Trip %d: Arrived at %s", mId, mDestination);
+
+        PublicTransitConfig.maybePauseAfterTransition(mDestination);
+    }
+
+    private void embark() {
+        if (mOrigin != null) {
+            mOrigin.setStateTransitioningFrom();
+        }
+        mDestination.setStateTransitioningTo();
+    }
+
+    private void waitUntilArrival() {
+        ArrayList<ConditionWaiter.ConditionWaitStatus> transitionConditions = new ArrayList<>();
+
+        if (mOrigin != null) {
+            for (Condition condition : mOrigin.getExitConditions()) {
+                transitionConditions.add(
+                        new ConditionWaiter.ConditionWaitStatus(
+                                condition, ConditionWaiter.ConditionOrigin.EXIT));
+            }
+            for (Condition condition : mOrigin.getActiveFacilityExitConditions()) {
+                transitionConditions.add(
+                        new ConditionWaiter.ConditionWaitStatus(
+                                condition, ConditionWaiter.ConditionOrigin.EXIT));
+            }
+        }
+
+        for (Condition condition : mDestination.getEnterConditions()) {
+            transitionConditions.add(
+                    new ConditionWaiter.ConditionWaitStatus(
+                            condition, ConditionWaiter.ConditionOrigin.ENTER));
+        }
+        transitionConditions.addAll(createTransitionConditionStatuses());
+
+        // Throws CriteriaNotSatisfiedException if any conditions aren't met within the timeout and
+        // prints the state of all conditions. The timeout can be reduced when explicitly looking
+        // for flakiness due to tight timeouts.
+        try {
+            ConditionWaiter.waitFor(transitionConditions);
+        } catch (AssertionError e) {
+            throw new TravelException(mOrigin, mDestination, e);
+        }
+
+        if (mOrigin != null) {
+            mOrigin.setStateFinished();
+        }
+        mDestination.setStateActive();
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/UiThreadCondition.java b/base/test/android/javatests/src/org/chromium/base/test/transit/UiThreadCondition.java
new file mode 100644
index 0000000..16cd269
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/UiThreadCondition.java
@@ -0,0 +1,12 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+/** A {@link Condition} that is checked in the UI thread. */
+public abstract class UiThreadCondition extends Condition {
+    public UiThreadCondition() {
+        super(/*shouldRunOnUiThread*/ true);
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/transit/ViewConditions.java b/base/test/android/javatests/src/org/chromium/base/test/transit/ViewConditions.java
new file mode 100644
index 0000000..010f615
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/transit/ViewConditions.java
@@ -0,0 +1,199 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.transit;
+
+import static androidx.test.espresso.Espresso.onView;
+import static androidx.test.espresso.assertion.ViewAssertions.doesNotExist;
+import static androidx.test.espresso.matcher.ViewMatchers.isDisplayed;
+
+import static org.hamcrest.CoreMatchers.allOf;
+import static org.hamcrest.CoreMatchers.any;
+import static org.hamcrest.CoreMatchers.is;
+
+import android.content.res.Resources;
+import android.view.View;
+
+import androidx.test.espresso.AmbiguousViewMatcherException;
+import androidx.test.espresso.NoMatchingRootException;
+import androidx.test.espresso.NoMatchingViewException;
+import androidx.test.espresso.UiController;
+import androidx.test.espresso.ViewAction;
+import androidx.test.espresso.ViewInteraction;
+import androidx.test.platform.app.InstrumentationRegistry;
+
+import org.hamcrest.Matcher;
+import org.hamcrest.StringDescription;
+
+import java.util.ArrayList;
+import java.util.regex.Pattern;
+
+/** {@link Condition}s related to Android {@link View}s. */
+public class ViewConditions {
+    /** Fulfilled when a single matching View exists and is displayed. */
+    public static class DisplayedCondition extends ExistsCondition {
+        public DisplayedCondition(Matcher<View> matcher) {
+            super(allOf(matcher, isDisplayed()));
+        }
+    }
+
+    /** Fulfilled when a single matching View exists. */
+    public static class ExistsCondition extends InstrumentationThreadCondition {
+        private final Matcher<View> mMatcher;
+        private View mViewMatched;
+
+        public ExistsCondition(Matcher<View> matcher) {
+            super();
+            this.mMatcher = matcher;
+        }
+
+        @Override
+        public String buildDescription() {
+            return "View: " + ViewConditions.createMatcherDescription(mMatcher);
+        }
+
+        @Override
+        public boolean check() {
+            ViewInteraction viewInteraction = onView(mMatcher);
+            try {
+                viewInteraction.perform(
+                        new ViewAction() {
+                            @Override
+                            public Matcher<View> getConstraints() {
+                                return any(View.class);
+                            }
+
+                            @Override
+                            public String getDescription() {
+                                return "check exists and consistent";
+                            }
+
+                            @Override
+                            public void perform(UiController uiController, View view) {
+                                if (mViewMatched != null && mViewMatched != view) {
+                                    throw new IllegalStateException(
+                                            String.format(
+                                                    "Matched a different view, was %s, now %s",
+                                                    mViewMatched, view));
+                                }
+                                mViewMatched = view;
+                            }
+                        });
+                return true;
+            } catch (NoMatchingViewException
+                    | NoMatchingRootException
+                    | AmbiguousViewMatcherException e) {
+                if (mViewMatched != null) {
+                    throw new IllegalStateException(
+                            String.format(
+                                    "Had matched a view (%s), but now got %s",
+                                    mViewMatched, e.getClass().getSimpleName()),
+                            e);
+                }
+                return false;
+            }
+        }
+
+        public View getViewMatched() {
+            return mViewMatched;
+        }
+    }
+
+    /** Fulfilled when no matching Views exist. */
+    public static class DoesNotExistAnymoreCondition extends InstrumentationThreadCondition {
+        private final Matcher<View> mMatcher;
+        private Matcher<View> mStricterMatcher;
+        private final ExistsCondition mExistsCondition;
+
+        public DoesNotExistAnymoreCondition(
+                Matcher<View> matcher, ExistsCondition existsCondition) {
+            super();
+            mMatcher = matcher;
+            mExistsCondition = existsCondition;
+        }
+
+        @Override
+        public String buildDescription() {
+            if (mStricterMatcher != null) {
+                return "No more view: "
+                        + ViewConditions.createMatcherDescription(mMatcher)
+                        + " that exactly "
+                        + ViewConditions.createMatcherDescription(mStricterMatcher);
+            } else {
+                return "No more view: " + ViewConditions.createMatcherDescription(mMatcher);
+            }
+        }
+
+        @Override
+        public boolean check() {
+            Matcher<View> matcherToUse;
+            if (mStricterMatcher != null) {
+                matcherToUse = mStricterMatcher;
+            } else if (mExistsCondition.getViewMatched() != null) {
+                mStricterMatcher = is(mExistsCondition.getViewMatched());
+                rebuildDescription();
+                matcherToUse = mStricterMatcher;
+            } else {
+                matcherToUse = mMatcher;
+            }
+
+            try {
+                onView(matcherToUse).check(doesNotExist());
+                return true;
+            } catch (AssertionError e) {
+                return false;
+            }
+        }
+    }
+
+    private static String getResourceName(int resId) {
+        return InstrumentationRegistry.getInstrumentation()
+                .getContext()
+                .getResources()
+                .getResourceName(resId);
+    }
+
+    /** Generates a description for the matcher that replaces raw ids with resource names. */
+    private static String createMatcherDescription(Matcher<View> matcher) {
+        StringDescription d = new StringDescription();
+        matcher.describeTo(d);
+        String description = d.toString();
+        Pattern numberPattern = Pattern.compile("[0-9]+");
+        java.util.regex.Matcher numberMatcher = numberPattern.matcher(description);
+        ArrayList<Integer> starts = new ArrayList<>();
+        ArrayList<Integer> ends = new ArrayList<>();
+        ArrayList<String> resourceNames = new ArrayList<>();
+        while (numberMatcher.find()) {
+            int resourceId = Integer.parseInt(numberMatcher.group());
+            if (resourceId > 0xFFFFFF) {
+                // Build-time Android resources have ids > 0xFFFFFF
+                starts.add(numberMatcher.start());
+                ends.add(numberMatcher.end());
+                String resourceDescription = createResourceDescription(resourceId);
+                resourceNames.add(resourceDescription);
+            } else {
+                resourceNames.add(numberMatcher.group());
+            }
+        }
+
+        if (starts.size() == 0) return description;
+
+        String newDescription = description.substring(0, starts.get(0));
+        for (int i = 0; i < starts.size(); i++) {
+            newDescription += resourceNames.get(i);
+            int nextStart = (i == starts.size() - 1) ? description.length() : starts.get(i + 1);
+            newDescription += description.substring(ends.get(i), nextStart);
+        }
+
+        return newDescription;
+    }
+
+    private static String createResourceDescription(int possibleResourceId) {
+        try {
+            return getResourceName(possibleResourceId);
+        } catch (Resources.NotFoundException e) {
+            return String.valueOf(possibleResourceId);
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java b/base/test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java
index 9fccb2c..e7c6648 100644
--- a/base/test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java
@@ -82,8 +82,6 @@
     private static final String DISABLE_FEATURES = "disable-features";
     private static final String ENABLE_FEATURES = "enable-features";
 
-    private static boolean sInitializedForTest;
-
     // These members are used to track CommandLine state modifications made by the class/test method
     // currently being run, to be undone when the class/test method finishes.
     private static Set<String> sClassFlagsToRemove;
@@ -124,12 +122,8 @@
      * trying to remove a flag set externally, i.e. by the command-line flags file, will not work.
      */
     public static void setUpClass(Class<?> clazz) {
-        // The command line may already have been initialized by Application-level init. We need to
-        // re-initialize it with test flags.
-        if (!sInitializedForTest) {
-            CommandLine.reset();
+        if (!CommandLine.isInitialized()) {
             CommandLineInitUtil.initCommandLine(getTestCmdLineFile());
-            sInitializedForTest = true;
         }
 
         Set<String> flags = new HashSet<>();
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/DumpThreadsOnFailureRule.java b/base/test/android/javatests/src/org/chromium/base/test/util/DumpThreadsOnFailureRule.java
new file mode 100644
index 0000000..fc19911
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/DumpThreadsOnFailureRule.java
@@ -0,0 +1,37 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import org.junit.rules.TestWatcher;
+import org.junit.runner.Description;
+
+import org.chromium.base.Log;
+
+import java.util.Map;
+
+/**
+ * A simple rule that dumps all threads if the test fails. Used for debugging tests where an
+ * unknown long running task might be causing problems.
+ */
+public class DumpThreadsOnFailureRule extends TestWatcher {
+    private static final String TAG = " DTOFR";
+
+    @Override
+    protected void failed(Throwable e, Description description) {
+        super.failed(e, description);
+        logThreadDumps();
+    }
+
+    private void logThreadDumps() {
+        Map<Thread, StackTraceElement[]> threadDumps = Thread.getAllStackTraces();
+        for (Map.Entry<Thread, StackTraceElement[]> entry : threadDumps.entrySet()) {
+            Thread thread = entry.getKey();
+            Log.e(TAG, thread.getName() + ": " + thread.getState());
+            for (StackTraceElement stackTraceElement : entry.getValue()) {
+                Log.e(TAG, "\t" + stackTraceElement);
+            }
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java b/base/test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java
index ff03e4f..2b9bd6d 100644
--- a/base/test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java
@@ -11,14 +11,13 @@
 
 /**
  * This annotation is for enormous tests.
- * <p>
- * Examples of enormous tests are tests that depend on external web sites or
- * tests that are long running.
- * <p>
- * Such tests are likely NOT reliable enough to run on tree closing bots and
- * should only be run on FYI bots.
+ *
+ * <p>Examples of enormous tests are tests that depend on external web sites or tests that are long
+ * running.
+ *
+ * <p>Such tests are likely NOT reliable enough to run on tree closing bots and should only be run
+ * on FYI bots.
  */
 @Target(ElementType.METHOD)
 @Retention(RetentionPolicy.RUNTIME)
-public @interface EnormousTest {
-}
+public @interface EnormousTest {}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/EspressoIdleTimeoutRule.java b/base/test/android/javatests/src/org/chromium/base/test/util/EspressoIdleTimeoutRule.java
new file mode 100644
index 0000000..8916019
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/EspressoIdleTimeoutRule.java
@@ -0,0 +1,39 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import androidx.test.espresso.IdlingPolicies;
+
+import org.junit.rules.TestRule;
+import org.junit.runner.Description;
+import org.junit.runners.model.Statement;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Sets Espresso's master timeout policy. This helps reduce the time Espresso waits before failing
+ * test cases that hang. This results in more useful stacks and errors messages than when the
+ * process is killed from the outside.
+ */
+public final class EspressoIdleTimeoutRule implements TestRule {
+    private final long mTimeout;
+    private final TimeUnit mUnit;
+
+    public EspressoIdleTimeoutRule(long timeout, TimeUnit unit) {
+        mTimeout = timeout;
+        mUnit = unit;
+    }
+
+    @Override
+    public Statement apply(Statement base, Description description) {
+        return new Statement() {
+            @Override
+            public void evaluate() throws Throwable {
+                IdlingPolicies.setMasterPolicyTimeout(mTimeout, mUnit);
+                base.evaluate();
+            }
+        };
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/Features.java b/base/test/android/javatests/src/org/chromium/base/test/util/Features.java
index 8fae956..fc42b5f 100644
--- a/base/test/android/javatests/src/org/chromium/base/test/util/Features.java
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/Features.java
@@ -25,10 +25,10 @@
  *    &#64;Rule
  *    public TestRule mProcessor = new Features.JUnitProcessor();
  *
- *    &#64;Features.EnableFeatures(BaseFeatures.Foo)
+ *    &#64;EnableFeatures(BaseFeatures.Foo)
  *    public void testFoo() { ... }
  *
- *    &#64;Features.EnableFeatures(ContentFeatureList.Foo)
+ *    &#64;EnableFeatures(ContentFeatureList.Foo)
  *    public void testFoo() { ... }
  * }
  * </pre>
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/HistogramWatcher.java b/base/test/android/javatests/src/org/chromium/base/test/util/HistogramWatcher.java
index ac5bf6e..26b9bb4 100644
--- a/base/test/android/javatests/src/org/chromium/base/test/util/HistogramWatcher.java
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/HistogramWatcher.java
@@ -49,8 +49,16 @@
  *
  * // Assert
  * histogramWatcher.assertExpected();
+ *
+ * Alternatively, Java's try-with-resources can be used to wrap the act block to make the assert
+ * implicit. This can be especially helpful when a test case needs to create multiple watchers,
+ * as the watcher variables are scoped separately and cannot be accidentally swapped.
+ *
+ * try (HistogramWatcher ignored = HistogramWatcher.newSingleRecordWatcher("Histogram1") {
+ *     [code under test that is expected to record the histogram above]
+ * }
  */
-public class HistogramWatcher {
+public class HistogramWatcher implements AutoCloseable {
     /**
      * Create a new {@link HistogramWatcher.Builder} to instantiate {@link HistogramWatcher}.
      */
@@ -253,6 +261,16 @@
     }
 
     /**
+     * Implements {@link AutoCloseable}. Note while this interface throws an {@link Exception}, we
+     * do not have to, and this allows call sites that know they're handling a
+     * {@link HistogramWatcher} to not catch or declare an exception either.
+     */
+    @Override
+    public void close() {
+        assertExpected();
+    }
+
+    /**
      * Assert that the watched histograms were recorded as expected.
      */
     public void assertExpected() {
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java b/base/test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java
index 72e7818..f1055b5 100644
--- a/base/test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java
@@ -11,16 +11,14 @@
 
 /**
  * This annotation is for integration tests.
- * <p>
- * Examples of integration tests are tests that rely on real instances of the
- * application's services and components (e.g. Search) to test the system as
- * a whole. These tests may use additional command-line flags to configure the
- * existing backends to use.
- * <p>
- * Such tests are likely NOT reliable enough to run on tree closing bots and
- * should only be run on FYI bots.
+ *
+ * <p>Examples of integration tests are tests that rely on real instances of the application's
+ * services and components (e.g. Search) to test the system as a whole. These tests may use
+ * additional command-line flags to configure the existing backends to use.
+ *
+ * <p>Such tests are likely NOT reliable enough to run on tree closing bots and should only be run
+ * on FYI bots.
  */
 @Target(ElementType.METHOD)
 @Retention(RetentionPolicy.RUNTIME)
-public @interface IntegrationTest {
-}
\ No newline at end of file
+public @interface IntegrationTest {}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/JniMocker.java b/base/test/android/javatests/src/org/chromium/base/test/util/JniMocker.java
index 6d17f44..e186a71 100644
--- a/base/test/android/javatests/src/org/chromium/base/test/util/JniMocker.java
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/JniMocker.java
@@ -4,11 +4,10 @@
 
 package org.chromium.base.test.util;
 
+import org.jni_zero.JniStaticTestMocker;
+import org.jni_zero.NativeMethods;
 import org.junit.rules.ExternalResource;
 
-import org.chromium.base.JniStaticTestMocker;
-import org.chromium.base.annotations.NativeMethods;
-
 import java.util.ArrayList;
 
 /**
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/UrlUtils.java b/base/test/android/javatests/src/org/chromium/base/test/util/UrlUtils.java
index cc7f534..b8a8537 100644
--- a/base/test/android/javatests/src/org/chromium/base/test/util/UrlUtils.java
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/UrlUtils.java
@@ -4,17 +4,15 @@
 
 package org.chromium.base.test.util;
 
+import org.jni_zero.CalledByNative;
 import org.junit.Assert;
 
 import org.chromium.base.PathUtils;
 import org.chromium.base.StrictModeContext;
-import org.chromium.base.annotations.CalledByNative;
-import org.chromium.build.annotations.MainDex;
 
 /**
  * Collection of URL utilities.
  */
-@MainDex
 public class UrlUtils {
     private static final String DATA_DIR = "chrome/test/data/";
 
diff --git a/base/test/android/javatests/src/stub/org/chromium/base/test/ClangProfiler.java b/base/test/android/javatests/src/stub/org/chromium/base/test/ClangProfiler.java
new file mode 100644
index 0000000..263fd21
--- /dev/null
+++ b/base/test/android/javatests/src/stub/org/chromium/base/test/ClangProfiler.java
@@ -0,0 +1,14 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+/**
+ * Placeholder class used when clang profiling is not enabled at building.
+ */
+public class ClangProfiler {
+    private ClangProfiler() {}
+
+    public static void writeClangProfilingProfile() {}
+}
diff --git a/base/test/android/junit/src/org/chromium/base/task/test/ShadowPostTask.java b/base/test/android/junit/src/org/chromium/base/task/test/ShadowPostTask.java
index 56043be..ead8696 100644
--- a/base/test/android/junit/src/org/chromium/base/task/test/ShadowPostTask.java
+++ b/base/test/android/junit/src/org/chromium/base/task/test/ShadowPostTask.java
@@ -4,40 +4,49 @@
 
 package org.chromium.base.task.test;
 
-import org.robolectric.Robolectric;
 import org.robolectric.annotation.Implementation;
 import org.robolectric.annotation.Implements;
 import org.robolectric.annotation.Resetter;
+import org.robolectric.shadow.api.Shadow;
+import org.robolectric.util.ReflectionHelpers.ClassParameter;
 
+import org.chromium.base.ResettersForTesting;
 import org.chromium.base.task.PostTask;
 import org.chromium.base.task.TaskTraits;
 
-/**
- * Shadow implementation for {@link PostTask}.
- */
+/** Shadow implementation for {@link PostTask}. */
 @Implements(PostTask.class)
 public class ShadowPostTask {
-    private static TestImpl sTestImpl = new TestImpl();
-
-    /** Set implementation for tests. Don't forget to call {@link #reset} later. */
-    public static void setTestImpl(TestImpl testImpl) {
-        sTestImpl = testImpl;
+    @FunctionalInterface
+    public interface TestImpl {
+        void postDelayedTask(@TaskTraits int taskTraits, Runnable task, long delay);
     }
 
+    private static TestImpl sTestImpl;
+
+    /** Set implementation for tests. */
+    public static void setTestImpl(TestImpl testImpl) {
+        sTestImpl = testImpl;
+        ResettersForTesting.register(ShadowPostTask::reset);
+    }
+
+    /** Resets the {@link TestImpl} instance, undoing any shadowing. */
     @Resetter
     public static void reset() {
-        sTestImpl = new TestImpl();
+        sTestImpl = null;
     }
 
     @Implementation
     public static void postDelayedTask(@TaskTraits int taskTraits, Runnable task, long delay) {
-        sTestImpl.postDelayedTask(taskTraits, task, delay);
-    }
-
-    /** Default implementation for tests. Override methods or add new ones as necessary. */
-    public static class TestImpl {
-        public void postDelayedTask(@TaskTraits int taskTraits, Runnable task, long delay) {
-            Robolectric.getForegroundThreadScheduler().postDelayed(task, delay);
+        if (sTestImpl == null) {
+            // Can use reflection to call into the real method that is being shadowed. This is the
+            // same as not having a shadow.
+            Shadow.directlyOn(PostTask.class, "postDelayedTask",
+                    ClassParameter.from(int.class, taskTraits),
+                    ClassParameter.from(Runnable.class, task),
+                    ClassParameter.from(long.class, delay));
+        } else {
+            sTestImpl.postDelayedTask(taskTraits, task, delay);
         }
     }
 }
diff --git a/base/test/android/junit/src/org/chromium/base/test/BaseRobolectricTestRule.java b/base/test/android/junit/src/org/chromium/base/test/BaseRobolectricTestRule.java
index ceaa80e..eace5f2 100644
--- a/base/test/android/junit/src/org/chromium/base/test/BaseRobolectricTestRule.java
+++ b/base/test/android/junit/src/org/chromium/base/test/BaseRobolectricTestRule.java
@@ -24,8 +24,11 @@
 import org.chromium.base.task.PostTask;
 import org.chromium.base.test.BaseRobolectricTestRunner.HelperTestRunner;
 import org.chromium.base.test.util.CommandLineFlags;
+import org.chromium.build.NativeLibraries;
 
 import java.lang.reflect.Method;
+import java.util.Locale;
+import java.util.TimeZone;
 
 /**
  * The default Rule used by BaseRobolectricTestRunner. Include this directly when using
@@ -33,6 +36,9 @@
  * Use @Rule(order=-2) to ensure it runs before other rules.
  */
 public class BaseRobolectricTestRule implements TestRule {
+    private static final Locale ORIG_LOCALE = Locale.getDefault();
+    private static final TimeZone ORIG_TIMEZONE = TimeZone.getDefault();
+
     // Removes the API Level suffix. E.g. "testSomething[28]" -> "testSomething".
     private static String stripBrackets(String methodName) {
         int idx = methodName.indexOf('[');
@@ -65,8 +71,13 @@
 
     static void setUp(Method method) {
         UmaRecorderHolder.setUpNativeUmaRecorder(false);
-        LibraryLoader.getInstance().setLibraryProcessType(LibraryProcessType.PROCESS_BROWSER);
         ContextUtils.initApplicationContextForTests(ApplicationProvider.getApplicationContext());
+        LibraryLoader.getInstance().setLibraryProcessType(LibraryProcessType.PROCESS_BROWSER);
+        // Whether or not native is loaded is a global one-way switch, so do it automatically so
+        // that it is always in the same state.
+        if (NativeLibraries.LIBRARIES.length > 0) {
+            LibraryLoader.getInstance().ensureMainDexInitialized();
+        }
         ApplicationStatus.initialize(ApplicationProvider.getApplicationContext());
         UmaRecorderHolder.resetForTesting();
         CommandLineFlags.setUpClass(method.getDeclaringClass());
@@ -90,6 +101,8 @@
             ContextUtils.clearApplicationContextForTests();
             PathUtils.resetForTesting();
             ThreadUtils.clearUiThreadForTesting();
+            Locale.setDefault(ORIG_LOCALE);
+            TimeZone.setDefault(ORIG_TIMEZONE);
             // Run assertions only when the test has not already failed so as to not mask
             // failures. https://crbug.com/1466313
             if (testFailed) {
diff --git a/base/test/android/junit/src/org/chromium/base/test/SetUpStatementTest.java b/base/test/android/junit/src/org/chromium/base/test/SetUpStatementTest.java
index 88792fd..4fd8380 100644
--- a/base/test/android/junit/src/org/chromium/base/test/SetUpStatementTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/SetUpStatementTest.java
@@ -15,9 +15,7 @@
 import java.util.ArrayList;
 import java.util.List;
 
-/**
- * Test SetUpStatement is working as intended with SetUpTestRule.
- */
+/** Test SetUpStatement is working as intended with SetUpTestRule. */
 @RunWith(BlockJUnit4ClassRunner.class)
 public class SetUpStatementTest {
     private Statement mBase;
@@ -26,24 +24,26 @@
 
     @Before
     public void setUp() {
-        mBase = new Statement() {
-            @Override
-            public void evaluate() {
-                mList.add(1);
-            }
-        };
+        mBase =
+                new Statement() {
+                    @Override
+                    public void evaluate() {
+                        mList.add(1);
+                    }
+                };
         mList = new ArrayList<>();
-        mRule = new SetUpTestRule<TestRule>() {
-            @Override
-            public void setUp() {
-                mList.add(0);
-            }
+        mRule =
+                new SetUpTestRule<TestRule>() {
+                    @Override
+                    public void setUp() {
+                        mList.add(0);
+                    }
 
-            @Override
-            public TestRule shouldSetUp(boolean toSetUp) {
-                return null;
-            }
-        };
+                    @Override
+                    public TestRule shouldSetUp(boolean toSetUp) {
+                        return null;
+                    }
+                };
     }
 
     @Test
diff --git a/base/test/android/junit/src/org/chromium/base/test/ShadowBuildInfo.java b/base/test/android/junit/src/org/chromium/base/test/ShadowBuildInfo.java
index b831435..93848c3 100644
--- a/base/test/android/junit/src/org/chromium/base/test/ShadowBuildInfo.java
+++ b/base/test/android/junit/src/org/chromium/base/test/ShadowBuildInfo.java
@@ -13,33 +13,20 @@
 /** Shadow class of {@link BuildInfo} */
 @Implements(BuildInfo.class)
 public class ShadowBuildInfo {
-    private static boolean sIsAtLeastT;
     private static boolean sTargetsAtLeastT;
 
     /** Rests the changes made to static state. */
     @Resetter
     public static void reset() {
-        sIsAtLeastT = false;
         sTargetsAtLeastT = false;
     }
 
-    /** Whether the current build is considered to be at least T. */
-    @Implementation
-    public static boolean isAtLeastT() {
-        return sIsAtLeastT;
-    }
-
     /** Whether the current build is targeting at least T. */
     @Implementation
     public static boolean targetsAtLeastT() {
         return sTargetsAtLeastT;
     }
 
-    /** Sets whether current Android version is at least T. */
-    public static void setIsAtLeastT(boolean isAtLeastT) {
-        sIsAtLeastT = isAtLeastT;
-    }
-
     /** Sets whether the current build is targeting at least T. */
     public static void setTargetsAtLeastT(boolean targetsAtLeastT) {
         sTargetsAtLeastT = targetsAtLeastT;
diff --git a/base/test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java b/base/test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java
index 18633ae..86d2bb7 100644
--- a/base/test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java
@@ -36,13 +36,12 @@
     private static class ChildClass extends ParentClass {}
 
     private static class Groups {
-        // clang-format off
         @ParameterizedCommandLineFlags({
             @Switches({"c1", "c2"}),
             @Switches({"c3", "c4"}),
         })
         public void testA() {}
-        // clang-format on
+
         @ParameterizedCommandLineFlags
         public void testB() {}
     }
@@ -57,147 +56,132 @@
 
     @Test
     public void testGetTestMethodJSON_testA() throws Throwable {
-        Description desc = Description.createTestDescription(
-                ParentClass.class, "testA",
-                ParentClass.class.getMethod("testA").getAnnotations());
+        Description desc =
+                Description.createTestDescription(
+                        ParentClass.class,
+                        "testA",
+                        ParentClass.class.getMethod("testA").getAnnotations());
         JSONObject json = TestListInstrumentationRunListener.getTestMethodJSON(desc);
-        // clang-format off
-        String expectedJsonString = makeJSON(
-            "{",
-            " 'method': 'testA',",
-            " 'annotations': {}",
-            "}"
-        );
-        // clang-format on
+        String expectedJsonString = makeJSON("{", " 'method': 'testA',", " 'annotations': {}", "}");
         Assert.assertEquals(expectedJsonString, json.toString());
     }
 
     @Test
     public void testGetTestMethodJSON_testB() throws Throwable {
-        Description desc = Description.createTestDescription(
-                ParentClass.class, "testB",
-                ParentClass.class.getMethod("testB").getAnnotations());
+        Description desc =
+                Description.createTestDescription(
+                        ParentClass.class,
+                        "testB",
+                        ParentClass.class.getMethod("testB").getAnnotations());
         JSONObject json = TestListInstrumentationRunListener.getTestMethodJSON(desc);
-        // clang-format off
-        String expectedJsonString = makeJSON(
-            "{",
-            " 'method': 'testB',",
-            " 'annotations': {",
-            "  'CommandLineFlags$Add': {",
-            "   'value': ['world']",
-            "  }",
-            " }",
-            "}"
-        );
-        // clang-format on
+        String expectedJsonString =
+                makeJSON(
+                        "{",
+                        " 'method': 'testB',",
+                        " 'annotations': {",
+                        "  'CommandLineFlags$Add': {",
+                        "   'value': ['world']",
+                        "  }",
+                        " }",
+                        "}");
         Assert.assertEquals(expectedJsonString, json.toString());
     }
 
-
     @Test
     public void testGetTestMethodJSONForInheritedClass() throws Throwable {
-        Description desc = Description.createTestDescription(
-                ChildClass.class, "testB",
-                ChildClass.class.getMethod("testB").getAnnotations());
+        Description desc =
+                Description.createTestDescription(
+                        ChildClass.class,
+                        "testB",
+                        ChildClass.class.getMethod("testB").getAnnotations());
         JSONObject json = TestListInstrumentationRunListener.getTestMethodJSON(desc);
-        // clang-format off
-        String expectedJsonString = makeJSON(
-            "{",
-            " 'method': 'testB',",
-            " 'annotations': {",
-            "   'CommandLineFlags$Add': {",
-            "    'value': ['world']",
-            "   }",
-            "  }",
-            "}"
-        );
-        // clang-format on
+        String expectedJsonString =
+                makeJSON(
+                        "{",
+                        " 'method': 'testB',",
+                        " 'annotations': {",
+                        "   'CommandLineFlags$Add': {",
+                        "    'value': ['world']",
+                        "   }",
+                        "  }",
+                        "}");
         Assert.assertEquals(expectedJsonString, json.toString());
     }
 
     @Test
     public void testGetAnnotationJSONForParentClass() throws Throwable {
-        JSONObject json = TestListInstrumentationRunListener.getAnnotationJSON(
-                Arrays.asList(ParentClass.class.getAnnotations()));
-        // clang-format off
-        String expectedJsonString = makeJSON(
-            "{",
-            " 'CommandLineFlags$Add': {",
-            "  'value': ['hello']",
-            " }",
-            "}"
-        );
-        // clang-format on
+        JSONObject json =
+                TestListInstrumentationRunListener.getAnnotationJSON(
+                        Arrays.asList(ParentClass.class.getAnnotations()));
+        String expectedJsonString =
+                makeJSON("{", " 'CommandLineFlags$Add': {", "  'value': ['hello']", " }", "}");
         Assert.assertEquals(expectedJsonString, json.toString());
     }
 
     @Test
     public void testGetAnnotationJSONForChildClass() throws Throwable {
-        JSONObject json = TestListInstrumentationRunListener.getAnnotationJSON(
-                Arrays.asList(ChildClass.class.getAnnotations()));
-        // clang-format off
-        String expectedJsonString = makeJSON(
-            "{",
-            " 'CommandLineFlags$Add': {",
-            "  'value': ['hello']",
-            " },",
-            " 'Batch': {",
-            "  'value': 'foo'",
-            " }",
-            "}"
-        );
-        // clang-format on
+        JSONObject json =
+                TestListInstrumentationRunListener.getAnnotationJSON(
+                        Arrays.asList(ChildClass.class.getAnnotations()));
+        String expectedJsonString =
+                makeJSON(
+                        "{",
+                        " 'CommandLineFlags$Add': {",
+                        "  'value': ['hello']",
+                        " },",
+                        " 'Batch': {",
+                        "  'value': 'foo'",
+                        " }",
+                        "}");
         Assert.assertEquals(expectedJsonString, json.toString());
     }
 
     @Test
     public void testGetTestMethodJSONGroup_testA() throws Throwable {
-        Description desc = Description.createTestDescription(
-                Groups.class, "testA", Groups.class.getMethod("testA").getAnnotations());
+        Description desc =
+                Description.createTestDescription(
+                        Groups.class, "testA", Groups.class.getMethod("testA").getAnnotations());
         JSONObject json = TestListInstrumentationRunListener.getTestMethodJSON(desc);
-        // clang-format off
-        String expectedJsonString = makeJSON(
-            "{",
-            " 'method': 'testA',",
-            " 'annotations': {",
-            "  'ParameterizedCommandLineFlags': {",
-            "   'value': [",
-            "    {",
-            "     'ParameterizedCommandLineFlags$Switches': {",
-            "      'value': ['c1','c2']",
-            "     }",
-            "    },",
-            "    {",
-            "     'ParameterizedCommandLineFlags$Switches': {",
-            "      'value': ['c3','c4']",
-            "     }",
-            "    }",
-            "   ]",
-            "  }",
-            " }",
-            "}"
-        );
-        // clang-format on
+        String expectedJsonString =
+                makeJSON(
+                        "{",
+                        " 'method': 'testA',",
+                        " 'annotations': {",
+                        "  'ParameterizedCommandLineFlags': {",
+                        "   'value': [",
+                        "    {",
+                        "     'ParameterizedCommandLineFlags$Switches': {",
+                        "      'value': ['c1','c2']",
+                        "     }",
+                        "    },",
+                        "    {",
+                        "     'ParameterizedCommandLineFlags$Switches': {",
+                        "      'value': ['c3','c4']",
+                        "     }",
+                        "    }",
+                        "   ]",
+                        "  }",
+                        " }",
+                        "}");
         Assert.assertEquals(expectedJsonString, json.toString());
     }
 
     @Test
     public void testGetTestMethodJSONGroup_testB() throws Throwable {
-        Description desc = Description.createTestDescription(
-                Groups.class, "testB", Groups.class.getMethod("testB").getAnnotations());
+        Description desc =
+                Description.createTestDescription(
+                        Groups.class, "testB", Groups.class.getMethod("testB").getAnnotations());
         JSONObject json = TestListInstrumentationRunListener.getTestMethodJSON(desc);
-        // clang-format off
-        String expectedJsonString = makeJSON(
-            "{",
-            " 'method': 'testB',",
-            " 'annotations': {",
-            "  'ParameterizedCommandLineFlags': {",
-            "   'value': []",
-            "  }",
-            " }",
-            "}"
-        );
-        // clang-format on
+        String expectedJsonString =
+                makeJSON(
+                        "{",
+                        " 'method': 'testB',",
+                        " 'annotations': {",
+                        "  'ParameterizedCommandLineFlags': {",
+                        "   'value': []",
+                        "  }",
+                        " }",
+                        "}");
         Assert.assertEquals(expectedJsonString, json.toString());
     }
 }
diff --git a/base/test/android/junit/src/org/chromium/base/test/params/ExampleParameterizedTest.java b/base/test/android/junit/src/org/chromium/base/test/params/ExampleParameterizedTest.java
index 614014c..9a097a5 100644
--- a/base/test/android/junit/src/org/chromium/base/test/params/ExampleParameterizedTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/params/ExampleParameterizedTest.java
@@ -19,21 +19,21 @@
 import java.util.Arrays;
 import java.util.List;
 
-/**
- * Example test that uses ParameterizedRunner
- */
+/** Example test that uses ParameterizedRunner */
 @RunWith(ParameterizedRunner.class)
 @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
 public class ExampleParameterizedTest {
     @ClassParameter
     private static List<ParameterSet> sClassParams =
-            Arrays.asList(new ParameterSet().value("hello", "world").name("HelloWorld"),
+            Arrays.asList(
+                    new ParameterSet().value("hello", "world").name("HelloWorld"),
                     new ParameterSet().value("Xxxx", "Yyyy").name("XxxxYyyy"),
                     new ParameterSet().value("aa", "yy").name("AaYy"));
 
     public static class MethodParamsA implements ParameterProvider {
         private static List<ParameterSet> sMethodParamA =
-                Arrays.asList(new ParameterSet().value(1, 2).name("OneTwo"),
+                Arrays.asList(
+                        new ParameterSet().value(1, 2).name("OneTwo"),
                         new ParameterSet().value(2, 3).name("TwoThree"),
                         new ParameterSet().value(3, 4).name("ThreeFour"));
 
@@ -45,7 +45,8 @@
 
     public static class MethodParamsB implements ParameterProvider {
         private static List<ParameterSet> sMethodParamB =
-                Arrays.asList(new ParameterSet().value("a", "b").name("Ab"),
+                Arrays.asList(
+                        new ParameterSet().value("a", "b").name("Ab"),
                         new ParameterSet().value("b", "c").name("Bc"),
                         new ParameterSet().value("c", "d").name("Cd"),
                         new ParameterSet().value("d", "e").name("De"));
@@ -70,8 +71,7 @@
                 "A and B string length aren't equal", mStringA.length(), mStringB.length());
     }
 
-    @Rule
-    public MethodRule mMethodParamAnnotationProcessor = new MethodParamAnnotationRule();
+    @Rule public MethodRule mMethodParamAnnotationProcessor = new MethodParamAnnotationRule();
 
     private Integer mSum;
 
diff --git a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java
index c0e47c3..50f54f5 100644
--- a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java
@@ -24,12 +24,13 @@
     private static Object createTest(TestClass testClass, ParameterSet classParameterSet)
             throws ParameterizedTestInstantiationException {
         return new ParameterizedRunnerDelegateCommon(
-                testClass, classParameterSet, Collections.emptyList())
+                        testClass, classParameterSet, Collections.emptyList())
                 .createTest();
     }
 
     static class BadTestClassWithMoreThanOneConstructor {
         public BadTestClassWithMoreThanOneConstructor() {}
+
         @SuppressWarnings("unused")
         public BadTestClassWithMoreThanOneConstructor(String argument) {}
     }
diff --git a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactoryTest.java b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactoryTest.java
index e35f334..bb1f46d 100644
--- a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactoryTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactoryTest.java
@@ -21,23 +21,22 @@
 import java.util.List;
 import java.util.Map;
 
-/**
- * Test for org.chromium.base.test.params.ParameterizedRunnerDelegateFactory
- */
+/** Test for org.chromium.base.test.params.ParameterizedRunnerDelegateFactory */
 @RunWith(BlockJUnit4ClassRunner.class)
 public class ParameterizedRunnerDelegateFactoryTest {
     /**
-     * This RunnerDelegate calls `super.collectInitializationErrors()` and would
-     * cause BlockJUnit4ClassRunner to validate test classes.
+     * This RunnerDelegate calls `super.collectInitializationErrors()` and would cause
+     * BlockJUnit4ClassRunner to validate test classes.
      */
-    public static class BadExampleRunnerDelegate
-            extends BlockJUnit4ClassRunner implements ParameterizedRunnerDelegate {
+    public static class BadExampleRunnerDelegate extends BlockJUnit4ClassRunner
+            implements ParameterizedRunnerDelegate {
         public static class LalaTestClass {}
 
         private final List<FrameworkMethod> mParameterizedFrameworkMethodList;
 
-        BadExampleRunnerDelegate(Class<?> klass,
-                List<FrameworkMethod> parameterizedFrameworkMethods) throws InitializationError {
+        BadExampleRunnerDelegate(
+                Class<?> klass, List<FrameworkMethod> parameterizedFrameworkMethods)
+                throws InitializationError {
             super(klass);
             mParameterizedFrameworkMethodList = parameterizedFrameworkMethods;
         }
@@ -64,8 +63,7 @@
             public Iterable<ParameterSet> getParameters() {
                 return Arrays.asList(
                         new ParameterSet().value("a").name("testWithValue_a"),
-                        new ParameterSet().value("b").name("testWithValue_b")
-                );
+                        new ParameterSet().value("b").name("testWithValue_b"));
             }
         }
 
@@ -80,8 +78,7 @@
                 return Arrays.asList(
                         new ParameterSet().value(1).name("testWithValue_1"),
                         new ParameterSet().value(2).name("testWithValue_2"),
-                        new ParameterSet().value(3).name("testWithValue_3")
-                );
+                        new ParameterSet().value(3).name("testWithValue_3"));
             }
         }
 
diff --git a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerTest.java b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerTest.java
index 3faa41c..ec70085 100644
--- a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerTest.java
@@ -15,15 +15,12 @@
 import java.util.ArrayList;
 import java.util.List;
 
-/**
- * Test for org.chromium.base.test.params.ParameterizedRunner
- */
+/** Test for org.chromium.base.test.params.ParameterizedRunner */
 @RunWith(BlockJUnit4ClassRunner.class)
 public class ParameterizedRunnerTest {
     @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
     public static class BadTestClassWithMoreThanOneConstructor {
-        @ClassParameter
-        static List<ParameterSet> sClassParams = new ArrayList<>();
+        @ClassParameter static List<ParameterSet> sClassParams = new ArrayList<>();
 
         public BadTestClassWithMoreThanOneConstructor() {}
 
@@ -32,8 +29,7 @@
 
     @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
     public static class BadTestClassWithNonListParameters {
-        @ClassParameter
-        static String[] sMethodParamA = {"1", "2"};
+        @ClassParameter static String[] sMethodParamA = {"1", "2"};
 
         @Test
         public void test() {}
@@ -47,8 +43,7 @@
 
     @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
     public static class BadTestClassWithNonStaticParameterSetList {
-        @ClassParameter
-        public List<ParameterSet> mClassParams = new ArrayList<>();
+        @ClassParameter public List<ParameterSet> mClassParams = new ArrayList<>();
 
         @Test
         public void test() {}
@@ -56,11 +51,9 @@
 
     @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
     public static class BadTestClassWithMultipleClassParameter {
-        @ClassParameter
-        private static List<ParameterSet> sParamA = new ArrayList<>();
+        @ClassParameter private static List<ParameterSet> sParamA = new ArrayList<>();
 
-        @ClassParameter
-        private static List<ParameterSet> sParamB = new ArrayList<>();
+        @ClassParameter private static List<ParameterSet> sParamB = new ArrayList<>();
     }
 
     @Test(expected = ParameterizedRunner.IllegalParameterArgumentException.class)
@@ -91,7 +84,8 @@
     @Test(expected = IllegalArgumentException.class)
     @SuppressWarnings("ModifiedButNotUsed")
     public void testUnsupportedParameterType() throws Throwable {
-        class MyPair {};
+        class MyPair {}
+        ;
         List<ParameterSet> paramList = new ArrayList<>();
         paramList.add(new ParameterSet().value(new MyPair()));
     }
diff --git a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedTestNameTest.java b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedTestNameTest.java
index ed74752..efbc9a1 100644
--- a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedTestNameTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedTestNameTest.java
@@ -21,18 +21,16 @@
 import java.util.LinkedList;
 import java.util.List;
 
-/**
- * Test for verify the names and test method Description works properly
- */
+/** Test for verify the names and test method Description works properly */
 @RunWith(BlockJUnit4ClassRunner.class)
 public class ParameterizedTestNameTest {
     @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
     public static class TestClassWithClassParameterAppendName {
         @ClassParameter
-        static List<ParameterSet> sAllName = Arrays.asList(
-                new ParameterSet().value("hello").name("Hello"),
-                new ParameterSet().value("world").name("World")
-        );
+        static List<ParameterSet> sAllName =
+                Arrays.asList(
+                        new ParameterSet().value("hello").name("Hello"),
+                        new ParameterSet().value("world").name("World"));
 
         public TestClassWithClassParameterAppendName(String a) {}
 
@@ -43,10 +41,8 @@
     @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
     public static class TestClassWithClassParameterDefaultName {
         @ClassParameter
-        static List<ParameterSet> sAllName = Arrays.asList(
-                new ParameterSet().value("hello"),
-                new ParameterSet().value("world")
-        );
+        static List<ParameterSet> sAllName =
+                Arrays.asList(new ParameterSet().value("hello"), new ParameterSet().value("world"));
 
         public TestClassWithClassParameterDefaultName(String a) {}
 
@@ -61,8 +57,7 @@
             public Iterable<ParameterSet> getParameters() {
                 return Arrays.asList(
                         new ParameterSet().value("hello").name("Hello"),
-                        new ParameterSet().value("world").name("World")
-                );
+                        new ParameterSet().value("world").name("World"));
             }
         }
 
@@ -70,9 +65,7 @@
             @Override
             public Iterable<ParameterSet> getParameters() {
                 return Arrays.asList(
-                        new ParameterSet().value("hello"),
-                        new ParameterSet().value("world")
-                );
+                        new ParameterSet().value("hello"), new ParameterSet().value("world"));
             }
         }
 
@@ -88,18 +81,17 @@
     @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
     public static class TestClassWithMixedParameter {
         @ClassParameter
-        static List<ParameterSet> sAllName = Arrays.asList(
-                new ParameterSet().value("hello").name("Hello"),
-                new ParameterSet().value("world").name("World")
-        );
+        static List<ParameterSet> sAllName =
+                Arrays.asList(
+                        new ParameterSet().value("hello").name("Hello"),
+                        new ParameterSet().value("world").name("World"));
 
         static class AppendNameParams implements ParameterProvider {
             @Override
             public Iterable<ParameterSet> getParameters() {
                 return Arrays.asList(
                         new ParameterSet().value("1").name("A"),
-                        new ParameterSet().value("2").name("B")
-                );
+                        new ParameterSet().value("2").name("B"));
             }
         }
 
@@ -115,8 +107,9 @@
 
     @Test
     public void testClassParameterAppendName() throws Throwable {
-        List<Runner> runners = ParameterizedRunner.createRunners(
-                new TestClass(TestClassWithClassParameterAppendName.class));
+        List<Runner> runners =
+                ParameterizedRunner.createRunners(
+                        new TestClass(TestClassWithClassParameterAppendName.class));
         List<String> expectedTestNames =
                 new LinkedList<String>(Arrays.asList("test__Hello", "test__World"));
         List<String> computedMethodNames = new ArrayList<>();
@@ -124,7 +117,8 @@
             BlockJUnit4RunnerDelegate castedRunner = (BlockJUnit4RunnerDelegate) r;
             for (FrameworkMethod method : castedRunner.computeTestMethods()) {
                 computedMethodNames.add(method.getName());
-                Assert.assertTrue("This test name is not expected: " + method.getName(),
+                Assert.assertTrue(
+                        "This test name is not expected: " + method.getName(),
                         expectedTestNames.contains(method.getName()));
                 expectedTestNames.remove(method.getName());
                 method.getName();
@@ -140,40 +134,51 @@
 
     @Test
     public void testClassParameterDefaultName() throws Throwable {
-        List<Runner> runners = ParameterizedRunner.createRunners(
-                new TestClass(TestClassWithClassParameterDefaultName.class));
+        List<Runner> runners =
+                ParameterizedRunner.createRunners(
+                        new TestClass(TestClassWithClassParameterDefaultName.class));
         List<String> expectedTestNames = new LinkedList<String>(Arrays.asList("test", "test"));
         for (Runner r : runners) {
             @SuppressWarnings("unchecked")
             BlockJUnit4RunnerDelegate castedRunner = (BlockJUnit4RunnerDelegate) r;
             for (FrameworkMethod method : castedRunner.computeTestMethods()) {
-                Assert.assertTrue("This test name is not expected: " + method.getName(),
+                Assert.assertTrue(
+                        "This test name is not expected: " + method.getName(),
                         expectedTestNames.contains(method.getName()));
                 expectedTestNames.remove(method.getName());
                 method.getName();
             }
         }
-        Assert.assertTrue("These expected names are not found: "
+        Assert.assertTrue(
+                "These expected names are not found: "
                         + Arrays.toString(expectedTestNames.toArray()),
                 expectedTestNames.isEmpty());
     }
 
     @Test
     public void testMethodParameter() throws Throwable {
-        List<Runner> runners = ParameterizedRunner.createRunners(
-                new TestClass(TestClassWithMethodParameter.class));
-        List<String> expectedTestNames = new LinkedList<String>(
-                Arrays.asList("test__Hello", "test__World", "testDefaultName", "testDefaultName"));
+        List<Runner> runners =
+                ParameterizedRunner.createRunners(
+                        new TestClass(TestClassWithMethodParameter.class));
+        List<String> expectedTestNames =
+                new LinkedList<String>(
+                        Arrays.asList(
+                                "test__Hello",
+                                "test__World",
+                                "testDefaultName",
+                                "testDefaultName"));
         for (Runner r : runners) {
             BlockJUnit4RunnerDelegate castedRunner = (BlockJUnit4RunnerDelegate) r;
             for (FrameworkMethod method : castedRunner.computeTestMethods()) {
-                Assert.assertTrue("This test name is not expected: " + method.getName(),
+                Assert.assertTrue(
+                        "This test name is not expected: " + method.getName(),
                         expectedTestNames.contains(method.getName()));
                 expectedTestNames.remove(method.getName());
                 method.getName();
             }
         }
-        Assert.assertTrue("These expected names are not found: "
+        Assert.assertTrue(
+                "These expected names are not found: "
                         + Arrays.toString(expectedTestNames.toArray()),
                 expectedTestNames.isEmpty());
     }
@@ -183,18 +188,26 @@
         List<Runner> runners =
                 ParameterizedRunner.createRunners(new TestClass(TestClassWithMixedParameter.class));
         List<String> expectedTestNames =
-                new LinkedList<String>(Arrays.asList("testA__Hello_A", "testA__World_A",
-                        "testA__Hello_B", "testA__World_B", "test__Hello", "test__World"));
+                new LinkedList<String>(
+                        Arrays.asList(
+                                "testA__Hello_A",
+                                "testA__World_A",
+                                "testA__Hello_B",
+                                "testA__World_B",
+                                "test__Hello",
+                                "test__World"));
         for (Runner r : runners) {
             BlockJUnit4RunnerDelegate castedRunner = (BlockJUnit4RunnerDelegate) r;
             for (FrameworkMethod method : castedRunner.computeTestMethods()) {
-                Assert.assertTrue("This test name is not expected: " + method.getName(),
+                Assert.assertTrue(
+                        "This test name is not expected: " + method.getName(),
                         expectedTestNames.contains(method.getName()));
                 expectedTestNames.remove(method.getName());
                 method.getName();
             }
         }
-        Assert.assertTrue("These expected names are not found: "
+        Assert.assertTrue(
+                "These expected names are not found: "
                         + Arrays.toString(expectedTestNames.toArray()),
                 expectedTestNames.isEmpty());
     }
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/AndroidSdkLevelSkipCheckTest.java b/base/test/android/junit/src/org/chromium/base/test/util/AndroidSdkLevelSkipCheckTest.java
index ee85590..225eb0d 100644
--- a/base/test/android/junit/src/org/chromium/base/test/util/AndroidSdkLevelSkipCheckTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/util/AndroidSdkLevelSkipCheckTest.java
@@ -105,7 +105,8 @@
                 sSkipCheck.shouldSkip(new FrameworkMethod(testClass.getMethod(methodName))),
                 equalTo(shouldSkip));
         TestRunnerTestRule.TestLog runListener = mTestRunnerTestRule.runTest(testClass);
-        Assert.assertThat(Description.createTestDescription(testClass, methodName),
+        Assert.assertThat(
+                Description.createTestDescription(testClass, methodName),
                 isIn(shouldSkip ? runListener.skippedTests : runListener.runTests));
     }
 
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/AnnotationProcessingUtilsTest.java b/base/test/android/junit/src/org/chromium/base/test/util/AnnotationProcessingUtilsTest.java
index 44d8c59..7f7a8ff 100644
--- a/base/test/android/junit/src/org/chromium/base/test/util/AnnotationProcessingUtilsTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/util/AnnotationProcessingUtilsTest.java
@@ -41,10 +41,11 @@
     public void testGetTargetAnnotation_NotOnClassNorMethod() {
         TargetAnnotation retrievedAnnotation;
 
-        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
-                createTestDescription(
-                        ClassWithoutTargetAnnotation.class, "methodWithoutAnnotation"),
-                TargetAnnotation.class);
+        retrievedAnnotation =
+                AnnotationProcessingUtils.getAnnotation(
+                        createTestDescription(
+                                ClassWithoutTargetAnnotation.class, "methodWithoutAnnotation"),
+                        TargetAnnotation.class);
         assertNull(retrievedAnnotation);
     }
 
@@ -52,9 +53,10 @@
     public void testGetTargetAnnotation_NotOnClassButOnMethod() {
         TargetAnnotation retrievedAnnotation;
 
-        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
-                getTest(ClassWithoutTargetAnnotation.class, "methodWithTargetAnnotation"),
-                TargetAnnotation.class);
+        retrievedAnnotation =
+                AnnotationProcessingUtils.getAnnotation(
+                        getTest(ClassWithoutTargetAnnotation.class, "methodWithTargetAnnotation"),
+                        TargetAnnotation.class);
         assertNotNull(retrievedAnnotation);
     }
 
@@ -62,9 +64,12 @@
     public void testGetTargetAnnotation_NotOnClassDifferentOneOnMethod() {
         TargetAnnotation retrievedAnnotation;
 
-        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
-                getTest(ClassWithoutTargetAnnotation.class, "methodWithAnnotatedAnnotation"),
-                TargetAnnotation.class);
+        retrievedAnnotation =
+                AnnotationProcessingUtils.getAnnotation(
+                        getTest(
+                                ClassWithoutTargetAnnotation.class,
+                                "methodWithAnnotatedAnnotation"),
+                        TargetAnnotation.class);
         assertNull(retrievedAnnotation);
     }
 
@@ -72,9 +77,10 @@
     public void testGetTargetAnnotation_OnClassButNotOnMethod() {
         TargetAnnotation retrievedAnnotation;
 
-        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
-                getTest(ClassWithAnnotation.class, "methodWithoutAnnotation"),
-                TargetAnnotation.class);
+        retrievedAnnotation =
+                AnnotationProcessingUtils.getAnnotation(
+                        getTest(ClassWithAnnotation.class, "methodWithoutAnnotation"),
+                        TargetAnnotation.class);
         assertNotNull(retrievedAnnotation);
         assertEquals(Location.Class, retrievedAnnotation.value());
     }
@@ -83,9 +89,10 @@
     public void testGetTargetAnnotation_OnClassAndMethod() {
         TargetAnnotation retrievedAnnotation;
 
-        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
-                getTest(ClassWithAnnotation.class, "methodWithTargetAnnotation"),
-                TargetAnnotation.class);
+        retrievedAnnotation =
+                AnnotationProcessingUtils.getAnnotation(
+                        getTest(ClassWithAnnotation.class, "methodWithTargetAnnotation"),
+                        TargetAnnotation.class);
         assertNotNull(retrievedAnnotation);
         assertEquals(Location.Method, retrievedAnnotation.value());
     }
@@ -95,8 +102,10 @@
     public void testGetTargetAnnotation_OnRuleButNotOnMethod() {
         TargetAnnotation retrievedAnnotation;
 
-        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
-                getTest(ClassWithRule.class, "methodWithoutAnnotation"), TargetAnnotation.class);
+        retrievedAnnotation =
+                AnnotationProcessingUtils.getAnnotation(
+                        getTest(ClassWithRule.class, "methodWithoutAnnotation"),
+                        TargetAnnotation.class);
         assertNotNull(retrievedAnnotation);
         assertEquals(Location.Rule, retrievedAnnotation.value());
     }
@@ -106,8 +115,10 @@
     public void testGetTargetAnnotation_OnRuleAndMethod() {
         TargetAnnotation retrievedAnnotation;
 
-        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
-                getTest(ClassWithRule.class, "methodWithTargetAnnotation"), TargetAnnotation.class);
+        retrievedAnnotation =
+                AnnotationProcessingUtils.getAnnotation(
+                        getTest(ClassWithRule.class, "methodWithTargetAnnotation"),
+                        TargetAnnotation.class);
         assertNotNull(retrievedAnnotation);
         assertEquals(Location.Method, retrievedAnnotation.value());
     }
@@ -116,9 +127,12 @@
     public void testGetMetaAnnotation_Indirectly() {
         MetaAnnotation retrievedAnnotation;
 
-        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
-                getTest(ClassWithoutTargetAnnotation.class, "methodWithAnnotatedAnnotation"),
-                MetaAnnotation.class);
+        retrievedAnnotation =
+                AnnotationProcessingUtils.getAnnotation(
+                        getTest(
+                                ClassWithoutTargetAnnotation.class,
+                                "methodWithAnnotatedAnnotation"),
+                        MetaAnnotation.class);
         assertNotNull(retrievedAnnotation);
     }
 
@@ -126,9 +140,10 @@
     public void testGetAllTargetAnnotations() {
         List<TargetAnnotation> retrievedAnnotations;
 
-        retrievedAnnotations = AnnotationProcessingUtils.getAnnotations(
-                getTest(ClassWithAnnotation.class, "methodWithTargetAnnotation"),
-                TargetAnnotation.class);
+        retrievedAnnotations =
+                AnnotationProcessingUtils.getAnnotations(
+                        getTest(ClassWithAnnotation.class, "methodWithTargetAnnotation"),
+                        TargetAnnotation.class);
         assertEquals(2, retrievedAnnotations.size());
         assertEquals(Location.Class, retrievedAnnotations.get(0).value());
         assertEquals(Location.Method, retrievedAnnotations.get(1).value());
@@ -138,9 +153,10 @@
     public void testGetAllTargetAnnotations_OnParentClass() {
         List<TargetAnnotation> retrievedAnnotations;
 
-        retrievedAnnotations = AnnotationProcessingUtils.getAnnotations(
-                getTest(DerivedClassWithoutAnnotation.class, "newMethodWithoutAnnotation"),
-                TargetAnnotation.class);
+        retrievedAnnotations =
+                AnnotationProcessingUtils.getAnnotations(
+                        getTest(DerivedClassWithoutAnnotation.class, "newMethodWithoutAnnotation"),
+                        TargetAnnotation.class);
         assertEquals(1, retrievedAnnotations.size());
         assertEquals(Location.Class, retrievedAnnotations.get(0).value());
     }
@@ -149,9 +165,12 @@
     public void testGetAllTargetAnnotations_OnDerivedMethodAndParentClass() {
         List<TargetAnnotation> retrievedAnnotations;
 
-        retrievedAnnotations = AnnotationProcessingUtils.getAnnotations(
-                getTest(DerivedClassWithoutAnnotation.class, "newMethodWithTargetAnnotation"),
-                TargetAnnotation.class);
+        retrievedAnnotations =
+                AnnotationProcessingUtils.getAnnotations(
+                        getTest(
+                                DerivedClassWithoutAnnotation.class,
+                                "newMethodWithTargetAnnotation"),
+                        TargetAnnotation.class);
         assertEquals(2, retrievedAnnotations.size());
         assertEquals(Location.Class, retrievedAnnotations.get(0).value());
         assertEquals(Location.DerivedMethod, retrievedAnnotations.get(1).value());
@@ -161,9 +180,10 @@
     public void testGetAllTargetAnnotations_OnDerivedMethodAndParentClassAndMethod() {
         List<TargetAnnotation> retrievedAnnotations;
 
-        retrievedAnnotations = AnnotationProcessingUtils.getAnnotations(
-                getTest(DerivedClassWithoutAnnotation.class, "methodWithTargetAnnotation"),
-                TargetAnnotation.class);
+        retrievedAnnotations =
+                AnnotationProcessingUtils.getAnnotations(
+                        getTest(DerivedClassWithoutAnnotation.class, "methodWithTargetAnnotation"),
+                        TargetAnnotation.class);
         // We should not look at the base implementation of the method. Mostly it should not happen
         // in the context of tests.
         assertEquals(2, retrievedAnnotations.size());
@@ -175,9 +195,10 @@
     public void testGetAllTargetAnnotations_OnDerivedParentAndParentClass() {
         List<TargetAnnotation> retrievedAnnotations;
 
-        retrievedAnnotations = AnnotationProcessingUtils.getAnnotations(
-                getTest(DerivedClassWithAnnotation.class, "methodWithoutAnnotation"),
-                TargetAnnotation.class);
+        retrievedAnnotations =
+                AnnotationProcessingUtils.getAnnotations(
+                        getTest(DerivedClassWithAnnotation.class, "methodWithoutAnnotation"),
+                        TargetAnnotation.class);
         assertEquals(2, retrievedAnnotations.size());
         assertEquals(Location.Class, retrievedAnnotations.get(0).value());
         assertEquals(Location.DerivedClass, retrievedAnnotations.get(1).value());
@@ -187,10 +208,12 @@
     public void testGetAllAnnotations() {
         List<Annotation> annotations;
 
-        AnnotationExtractor annotationExtractor = new AnnotationExtractor(
-                TargetAnnotation.class, MetaAnnotation.class, AnnotatedAnnotation.class);
-        annotations = annotationExtractor.getMatchingAnnotations(
-                getTest(DerivedClassWithAnnotation.class, "methodWithTwoAnnotations"));
+        AnnotationExtractor annotationExtractor =
+                new AnnotationExtractor(
+                        TargetAnnotation.class, MetaAnnotation.class, AnnotatedAnnotation.class);
+        annotations =
+                annotationExtractor.getMatchingAnnotations(
+                        getTest(DerivedClassWithAnnotation.class, "methodWithTwoAnnotations"));
         assertEquals(5, annotations.size());
 
         // Retrieved annotation order:
@@ -220,7 +243,8 @@
         List<Class<? extends Annotation>> testList =
                 Arrays.asList(Rule.class, Test.class, Override.class, Target.class, Rule.class);
         testList.sort(comparator);
-        assertThat("Unknown annotations should not be reordered and come before the known ones.",
+        assertThat(
+                "Unknown annotations should not be reordered and come before the known ones.",
                 testList,
                 contains(Rule.class, Test.class, Override.class, Rule.class, Target.class));
     }
@@ -256,7 +280,14 @@
     }
 
     // region Test Data: Annotations and dummy test classes
-    private enum Location { Unspecified, Class, Method, Rule, DerivedClass, DerivedMethod }
+    private enum Location {
+        Unspecified,
+        Class,
+        Method,
+        Rule,
+        DerivedClass,
+        DerivedMethod
+    }
 
     @Retention(RetentionPolicy.RUNTIME)
     @Target({ElementType.TYPE, ElementType.METHOD})
@@ -333,8 +364,7 @@
     }
 
     private static class ClassWithRule {
-        @Rule
-        Rule1 mRule = new Rule1();
+        @Rule Rule1 mRule = new Rule1();
 
         @Test
         public void methodWithoutAnnotation() {}
@@ -374,4 +404,4 @@
     }
 
     // endregion
-    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/CommandLineFlagsNoClassAnnotationCheckTest.java b/base/test/android/junit/src/org/chromium/base/test/util/CommandLineFlagsNoClassAnnotationCheckTest.java
index d591ec8..0815c42 100644
--- a/base/test/android/junit/src/org/chromium/base/test/util/CommandLineFlagsNoClassAnnotationCheckTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/util/CommandLineFlagsNoClassAnnotationCheckTest.java
@@ -28,7 +28,8 @@
     @Test
     @CommandLineFlags.Add("some-switch")
     public void testAddSwitch_method() throws Throwable {
-        Assert.assertTrue("some-switch should be appended",
+        Assert.assertTrue(
+                "some-switch should be appended",
                 CommandLine.getInstance().hasSwitch("some-switch"));
     }
 
@@ -38,7 +39,8 @@
     public void testAddThenRemoveSwitch_method() throws Throwable {
         Assert.assertEquals(
                 "some-switch should be removed from the class level and added back, not ignored",
-                "method_value", CommandLine.getInstance().getSwitchValue("some-switch"));
+                "method_value",
+                CommandLine.getInstance().getSwitchValue("some-switch"));
     }
 
     @Test
@@ -47,6 +49,7 @@
     public void testRemoveThenAddSwitch_method() throws Throwable {
         Assert.assertEquals(
                 "some-switch should be removed from the class level and added back, not ignored",
-                "method_value", CommandLine.getInstance().getSwitchValue("some-switch"));
+                "method_value",
+                CommandLine.getInstance().getSwitchValue("some-switch"));
     }
 }
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/CommandLineFlagsWithClassAnnotationCheckTest.java b/base/test/android/junit/src/org/chromium/base/test/util/CommandLineFlagsWithClassAnnotationCheckTest.java
index 4dd97a3..713a554 100644
--- a/base/test/android/junit/src/org/chromium/base/test/util/CommandLineFlagsWithClassAnnotationCheckTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/util/CommandLineFlagsWithClassAnnotationCheckTest.java
@@ -22,14 +22,16 @@
 public class CommandLineFlagsWithClassAnnotationCheckTest {
     @Test
     public void testOnlyClassAnnotation() throws Throwable {
-        Assert.assertTrue("some-switch should be appended by the class",
+        Assert.assertTrue(
+                "some-switch should be appended by the class",
                 CommandLine.getInstance().hasSwitch("some-switch"));
     }
 
     @Test
     @CommandLineFlags.Remove("some-switch")
     public void testRemoveSwitch_method() throws Throwable {
-        Assert.assertTrue("CommandLine switches should be removed by the method",
+        Assert.assertTrue(
+                "CommandLine switches should be removed by the method",
                 CommandLine.getInstance().getSwitches().isEmpty());
     }
 }
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java b/base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java
index ff0d4f4..a69d806 100644
--- a/base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java
@@ -22,68 +22,75 @@
 public class DisableIfTest {
     @Test
     public void testSdkIsLessThanAndIsLessThan() {
-        TestCase sdkIsLessThan = new TestCase("sdkIsLessThan") {
-            @DisableIf.Build(sdk_is_less_than = 30)
-            public void sdkIsLessThan() {}
-        };
+        TestCase sdkIsLessThan =
+                new TestCase("sdkIsLessThan") {
+                    @DisableIf.Build(sdk_is_less_than = 30)
+                    public void sdkIsLessThan() {}
+                };
         Assert.assertTrue(new DisableIfSkipCheck().shouldSkip(sdkIsLessThan));
     }
 
     @Test
     public void testSdkIsLessThanButIsEqual() {
-        TestCase sdkIsEqual = new TestCase("sdkIsEqual") {
-            @DisableIf.Build(sdk_is_less_than = 29)
-            public void sdkIsEqual() {}
-        };
+        TestCase sdkIsEqual =
+                new TestCase("sdkIsEqual") {
+                    @DisableIf.Build(sdk_is_less_than = 29)
+                    public void sdkIsEqual() {}
+                };
         Assert.assertFalse(new DisableIfSkipCheck().shouldSkip(sdkIsEqual));
     }
 
     @Test
     public void testSdkIsLessThanButIsGreaterThan() {
-        TestCase sdkIsGreaterThan = new TestCase("sdkIsGreaterThan") {
-            @DisableIf.Build(sdk_is_less_than = 28)
-            public void sdkIsGreaterThan() {}
-        };
+        TestCase sdkIsGreaterThan =
+                new TestCase("sdkIsGreaterThan") {
+                    @DisableIf.Build(sdk_is_less_than = 28)
+                    public void sdkIsGreaterThan() {}
+                };
         Assert.assertFalse(new DisableIfSkipCheck().shouldSkip(sdkIsGreaterThan));
     }
 
     @Test
     public void testSdkIsGreaterThanButIsLessThan() {
-        TestCase sdkIsLessThan = new TestCase("sdkIsLessThan") {
-            @DisableIf.Build(sdk_is_greater_than = 30)
-            public void sdkIsLessThan() {}
-        };
+        TestCase sdkIsLessThan =
+                new TestCase("sdkIsLessThan") {
+                    @DisableIf.Build(sdk_is_greater_than = 30)
+                    public void sdkIsLessThan() {}
+                };
         Assert.assertFalse(new DisableIfSkipCheck().shouldSkip(sdkIsLessThan));
     }
 
     @Test
     public void testSdkIsGreaterThanButIsEqual() {
-        TestCase sdkIsEqual = new TestCase("sdkIsEqual") {
-            @DisableIf.Build(sdk_is_greater_than = 29)
-            public void sdkIsEqual() {}
-        };
+        TestCase sdkIsEqual =
+                new TestCase("sdkIsEqual") {
+                    @DisableIf.Build(sdk_is_greater_than = 29)
+                    public void sdkIsEqual() {}
+                };
         Assert.assertFalse(new DisableIfSkipCheck().shouldSkip(sdkIsEqual));
     }
 
     @Test
     public void testSdkIsGreaterThanAndIsGreaterThan() {
-        TestCase sdkIsGreaterThan = new TestCase("sdkIsGreaterThan") {
-            @DisableIf.Build(sdk_is_greater_than = 28)
-            public void sdkIsGreaterThan() {}
-        };
+        TestCase sdkIsGreaterThan =
+                new TestCase("sdkIsGreaterThan") {
+                    @DisableIf.Build(sdk_is_greater_than = 28)
+                    public void sdkIsGreaterThan() {}
+                };
         Assert.assertTrue(new DisableIfSkipCheck().shouldSkip(sdkIsGreaterThan));
     }
 
     @Test
     public void testSupportedAbiIncludesAndCpuAbiMatches() {
-        TestCase supportedAbisCpuAbiMatch = new TestCase("supportedAbisCpuAbiMatch") {
-            @DisableIf.Build(supported_abis_includes = "foo")
-            public void supportedAbisCpuAbiMatch() {}
-        };
+        TestCase supportedAbisCpuAbiMatch =
+                new TestCase("supportedAbisCpuAbiMatch") {
+                    @DisableIf.Build(supported_abis_includes = "foo")
+                    public void supportedAbisCpuAbiMatch() {}
+                };
         String[] originalAbis = Build.SUPPORTED_ABIS;
         try {
-            ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS",
-                    new String[] {"foo", "bar"});
+            ReflectionHelpers.setStaticField(
+                    Build.class, "SUPPORTED_ABIS", new String[] {"foo", "bar"});
             Assert.assertTrue(new DisableIfSkipCheck().shouldSkip(supportedAbisCpuAbiMatch));
         } finally {
             ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS", originalAbis);
@@ -92,14 +99,15 @@
 
     @Test
     public void testSupportedAbiIncludesAndCpuAbi2Matches() {
-        TestCase supportedAbisCpuAbi2Match = new TestCase("supportedAbisCpuAbi2Match") {
-            @DisableIf.Build(supported_abis_includes = "bar")
-            public void supportedAbisCpuAbi2Match() {}
-        };
+        TestCase supportedAbisCpuAbi2Match =
+                new TestCase("supportedAbisCpuAbi2Match") {
+                    @DisableIf.Build(supported_abis_includes = "bar")
+                    public void supportedAbisCpuAbi2Match() {}
+                };
         String[] originalAbis = Build.SUPPORTED_ABIS;
         try {
-            ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS",
-                    new String[] {"foo", "bar"});
+            ReflectionHelpers.setStaticField(
+                    Build.class, "SUPPORTED_ABIS", new String[] {"foo", "bar"});
             Assert.assertTrue(new DisableIfSkipCheck().shouldSkip(supportedAbisCpuAbi2Match));
         } finally {
             ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS", originalAbis);
@@ -108,14 +116,15 @@
 
     @Test
     public void testSupportedAbiIncludesButNoMatch() {
-        TestCase supportedAbisNoMatch = new TestCase("supportedAbisNoMatch") {
-            @DisableIf.Build(supported_abis_includes = "baz")
-            public void supportedAbisNoMatch() {}
-        };
+        TestCase supportedAbisNoMatch =
+                new TestCase("supportedAbisNoMatch") {
+                    @DisableIf.Build(supported_abis_includes = "baz")
+                    public void supportedAbisNoMatch() {}
+                };
         String[] originalAbis = Build.SUPPORTED_ABIS;
         try {
-            ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS",
-                    new String[] {"foo", "bar"});
+            ReflectionHelpers.setStaticField(
+                    Build.class, "SUPPORTED_ABIS", new String[] {"foo", "bar"});
             Assert.assertFalse(new DisableIfSkipCheck().shouldSkip(supportedAbisNoMatch));
         } finally {
             ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS", originalAbis);
@@ -124,10 +133,11 @@
 
     @Test
     public void testHardwareIsMatches() {
-        TestCase hardwareIsMatches = new TestCase("hardwareIsMatches") {
-            @DisableIf.Build(hardware_is = "hammerhead")
-            public void hardwareIsMatches() {}
-        };
+        TestCase hardwareIsMatches =
+                new TestCase("hardwareIsMatches") {
+                    @DisableIf.Build(hardware_is = "hammerhead")
+                    public void hardwareIsMatches() {}
+                };
         String originalHardware = Build.HARDWARE;
         try {
             ReflectionHelpers.setStaticField(Build.class, "HARDWARE", "hammerhead");
@@ -139,10 +149,11 @@
 
     @Test
     public void testHardwareIsDoesntMatch() {
-        TestCase hardwareIsDoesntMatch = new TestCase("hardwareIsDoesntMatch") {
-            @DisableIf.Build(hardware_is = "hammerhead")
-            public void hardwareIsDoesntMatch() {}
-        };
+        TestCase hardwareIsDoesntMatch =
+                new TestCase("hardwareIsDoesntMatch") {
+                    @DisableIf.Build(hardware_is = "hammerhead")
+                    public void hardwareIsDoesntMatch() {}
+                };
         String originalHardware = Build.HARDWARE;
         try {
             ReflectionHelpers.setStaticField(Build.class, "HARDWARE", "mako");
@@ -164,6 +175,7 @@
         public DisableIfTestCase(String name) {
             super(name);
         }
+
         public void sampleTestMethod() {}
     }
 
@@ -193,10 +205,11 @@
 
     @Test
     public void testTwoConditionsBothMet() {
-        TestCase twoConditionsBothMet = new TestCase("twoConditionsBothMet") {
-            @DisableIf.Build(sdk_is_greater_than = 28, supported_abis_includes = "foo")
-            public void twoConditionsBothMet() {}
-        };
+        TestCase twoConditionsBothMet =
+                new TestCase("twoConditionsBothMet") {
+                    @DisableIf.Build(sdk_is_greater_than = 28, supported_abis_includes = "foo")
+                    public void twoConditionsBothMet() {}
+                };
         String[] originalAbis = Build.SUPPORTED_ABIS;
         try {
             ReflectionHelpers.setStaticField(
@@ -209,10 +222,11 @@
 
     @Test
     public void testTwoConditionsFirstMet() {
-        TestCase twoConditionsFirstMet = new TestCase("twoConditionsFirstMet") {
-            @DisableIf.Build(sdk_is_greater_than = 28, supported_abis_includes = "baz")
-            public void twoConditionsFirstMet() {}
-        };
+        TestCase twoConditionsFirstMet =
+                new TestCase("twoConditionsFirstMet") {
+                    @DisableIf.Build(sdk_is_greater_than = 28, supported_abis_includes = "baz")
+                    public void twoConditionsFirstMet() {}
+                };
         String[] originalAbis = Build.SUPPORTED_ABIS;
         try {
             ReflectionHelpers.setStaticField(
@@ -225,10 +239,11 @@
 
     @Test
     public void testTwoConditionsSecondMet() {
-        TestCase twoConditionsSecondMet = new TestCase("twoConditionsSecondMet") {
-            @DisableIf.Build(sdk_is_greater_than = 30, supported_abis_includes = "foo")
-            public void twoConditionsSecondMet() {}
-        };
+        TestCase twoConditionsSecondMet =
+                new TestCase("twoConditionsSecondMet") {
+                    @DisableIf.Build(sdk_is_greater_than = 30, supported_abis_includes = "foo")
+                    public void twoConditionsSecondMet() {}
+                };
         String[] originalAbis = Build.SUPPORTED_ABIS;
         try {
             ReflectionHelpers.setStaticField(
@@ -241,10 +256,11 @@
 
     @Test
     public void testTwoConditionsNeitherMet() {
-        TestCase twoConditionsNeitherMet = new TestCase("twoConditionsNeitherMet") {
-            @DisableIf.Build(sdk_is_greater_than = 30, supported_abis_includes = "baz")
-            public void twoConditionsNeitherMet() {}
-        };
+        TestCase twoConditionsNeitherMet =
+                new TestCase("twoConditionsNeitherMet") {
+                    @DisableIf.Build(sdk_is_greater_than = 30, supported_abis_includes = "baz")
+                    public void twoConditionsNeitherMet() {}
+                };
         String[] originalAbis = Build.SUPPORTED_ABIS;
         try {
             ReflectionHelpers.setStaticField(
@@ -257,11 +273,12 @@
 
     @Test
     public void testTwoAnnotationsBothMet() {
-        TestCase twoAnnotationsBothMet = new TestCase("twoAnnotationsBothMet") {
-            @DisableIf.Build(supported_abis_includes = "foo")
-            @DisableIf.Build(sdk_is_greater_than = 28)
-            public void twoAnnotationsBothMet() {}
-        };
+        TestCase twoAnnotationsBothMet =
+                new TestCase("twoAnnotationsBothMet") {
+                    @DisableIf.Build(supported_abis_includes = "foo")
+                    @DisableIf.Build(sdk_is_greater_than = 28)
+                    public void twoAnnotationsBothMet() {}
+                };
         String[] originalAbis = Build.SUPPORTED_ABIS;
         try {
             ReflectionHelpers.setStaticField(
@@ -274,11 +291,12 @@
 
     @Test
     public void testTwoAnnotationsFirstMet() {
-        TestCase twoAnnotationsFirstMet = new TestCase("twoAnnotationsFirstMet") {
-            @DisableIf.Build(supported_abis_includes = "foo")
-            @DisableIf.Build(sdk_is_greater_than = 30)
-            public void twoAnnotationsFirstMet() {}
-        };
+        TestCase twoAnnotationsFirstMet =
+                new TestCase("twoAnnotationsFirstMet") {
+                    @DisableIf.Build(supported_abis_includes = "foo")
+                    @DisableIf.Build(sdk_is_greater_than = 30)
+                    public void twoAnnotationsFirstMet() {}
+                };
         String[] originalAbis = Build.SUPPORTED_ABIS;
         try {
             ReflectionHelpers.setStaticField(
@@ -291,11 +309,12 @@
 
     @Test
     public void testTwoAnnotationsSecondMet() {
-        TestCase twoAnnotationsSecondMet = new TestCase("twoAnnotationsSecondMet") {
-            @DisableIf.Build(supported_abis_includes = "baz")
-            @DisableIf.Build(sdk_is_greater_than = 28)
-            public void twoAnnotationsSecondMet() {}
-        };
+        TestCase twoAnnotationsSecondMet =
+                new TestCase("twoAnnotationsSecondMet") {
+                    @DisableIf.Build(supported_abis_includes = "baz")
+                    @DisableIf.Build(sdk_is_greater_than = 28)
+                    public void twoAnnotationsSecondMet() {}
+                };
         String[] originalAbis = Build.SUPPORTED_ABIS;
         try {
             ReflectionHelpers.setStaticField(
@@ -308,11 +327,12 @@
 
     @Test
     public void testTwoAnnotationsNeitherMet() {
-        TestCase testTwoAnnotationsNeitherMet = new TestCase("testTwoAnnotationsNeitherMet") {
-            @DisableIf.Build(supported_abis_includes = "baz")
-            @DisableIf.Build(sdk_is_greater_than = 30)
-            public void testTwoAnnotationsNeitherMet() {}
-        };
+        TestCase testTwoAnnotationsNeitherMet =
+                new TestCase("testTwoAnnotationsNeitherMet") {
+                    @DisableIf.Build(supported_abis_includes = "baz")
+                    @DisableIf.Build(sdk_is_greater_than = 30)
+                    public void testTwoAnnotationsNeitherMet() {}
+                };
         String[] originalAbis = Build.SUPPORTED_ABIS;
         try {
             ReflectionHelpers.setStaticField(
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java b/base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java
index c67917d..1677ca6 100644
--- a/base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java
@@ -28,6 +28,7 @@
         public TestRestrictionSkipCheck() {
             super(null);
         }
+
         @Override
         protected boolean restrictionApplies(String restriction) {
             return TextUtils.equals(restriction, TEST_RESTRICTION_APPLIES);
@@ -38,8 +39,12 @@
         public UnannotatedBaseClass(String name) {
             super(name);
         }
-        @Restriction({TEST_RESTRICTION_APPLIES}) public void restrictedMethod() {}
-        @Restriction({TEST_RESTRICTION_DOES_NOT_APPLY}) public void unrestrictedMethod() {}
+
+        @Restriction({TEST_RESTRICTION_APPLIES})
+        public void restrictedMethod() {}
+
+        @Restriction({TEST_RESTRICTION_DOES_NOT_APPLY})
+        public void unrestrictedMethod() {}
     }
 
     @Restriction({TEST_RESTRICTION_APPLIES})
@@ -47,6 +52,7 @@
         public RestrictedClass(String name) {
             super(name);
         }
+
         public void unannotatedMethod() {}
     }
 
@@ -55,16 +61,16 @@
         public UnrestrictedClass(String name) {
             super(name);
         }
+
         public void unannotatedMethod() {}
     }
 
-    @Restriction({
-            TEST_RESTRICTION_APPLIES,
-            TEST_RESTRICTION_DOES_NOT_APPLY})
+    @Restriction({TEST_RESTRICTION_APPLIES, TEST_RESTRICTION_DOES_NOT_APPLY})
     private static class MultipleRestrictionsRestrictedClass extends UnannotatedBaseClass {
         public MultipleRestrictionsRestrictedClass(String name) {
             super(name);
         }
+
         public void unannotatedMethod() {}
     }
 
@@ -72,6 +78,7 @@
         public ExtendsRestrictedClass(String name) {
             super(name);
         }
+
         @Override
         public void unannotatedMethod() {}
     }
@@ -80,50 +87,57 @@
         public ExtendsUnrestrictedClass(String name) {
             super(name);
         }
+
         @Override
         public void unannotatedMethod() {}
     }
 
     @Test
     public void testMethodRestricted() {
-        Assert.assertTrue(new TestRestrictionSkipCheck().shouldSkip(
-                new UnannotatedBaseClass("restrictedMethod")));
+        Assert.assertTrue(
+                new TestRestrictionSkipCheck()
+                        .shouldSkip(new UnannotatedBaseClass("restrictedMethod")));
     }
 
     @Test
     public void testMethodUnrestricted() {
-        Assert.assertFalse(new TestRestrictionSkipCheck().shouldSkip(
-                new UnannotatedBaseClass("unrestrictedMethod")));
+        Assert.assertFalse(
+                new TestRestrictionSkipCheck()
+                        .shouldSkip(new UnannotatedBaseClass("unrestrictedMethod")));
     }
 
     @Test
     public void testClassRestricted() {
-        Assert.assertTrue(new TestRestrictionSkipCheck().shouldSkip(
-                new RestrictedClass("unannotatedMethod")));
+        Assert.assertTrue(
+                new TestRestrictionSkipCheck()
+                        .shouldSkip(new RestrictedClass("unannotatedMethod")));
     }
 
     @Test
     public void testClassUnrestricted() {
-        Assert.assertFalse(new TestRestrictionSkipCheck().shouldSkip(
-                new UnrestrictedClass("unannotatedMethod")));
+        Assert.assertFalse(
+                new TestRestrictionSkipCheck()
+                        .shouldSkip(new UnrestrictedClass("unannotatedMethod")));
     }
 
     @Test
     public void testMultipleRestrictionsClassRestricted() {
-        Assert.assertTrue(new TestRestrictionSkipCheck().shouldSkip(
-                new MultipleRestrictionsRestrictedClass("unannotatedMethod")));
+        Assert.assertTrue(
+                new TestRestrictionSkipCheck()
+                        .shouldSkip(new MultipleRestrictionsRestrictedClass("unannotatedMethod")));
     }
 
     @Test
     public void testSuperclassRestricted() {
-        Assert.assertTrue(new TestRestrictionSkipCheck().shouldSkip(
-                new ExtendsRestrictedClass("unannotatedMethod")));
+        Assert.assertTrue(
+                new TestRestrictionSkipCheck()
+                        .shouldSkip(new ExtendsRestrictedClass("unannotatedMethod")));
     }
 
     @Test
     public void testSuperclassUnrestricted() {
-        Assert.assertFalse(new TestRestrictionSkipCheck().shouldSkip(
-                new ExtendsUnrestrictedClass("unannotatedMethod")));
+        Assert.assertFalse(
+                new TestRestrictionSkipCheck()
+                        .shouldSkip(new ExtendsUnrestrictedClass("unannotatedMethod")));
     }
 }
-
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java b/base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java
index f61172f..c7a6b0e 100644
--- a/base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java
@@ -41,7 +41,9 @@
     @TestAnnotation
     private class AnnotatedBaseClass {
         public void unannotatedMethod() {}
-        @TestAnnotation public void annotatedMethod() {}
+
+        @TestAnnotation
+        public void annotatedMethod() {}
     }
 
     private class ExtendsAnnotatedBaseClass extends AnnotatedBaseClass {
@@ -50,72 +52,74 @@
 
     private class UnannotatedBaseClass {
         public void unannotatedMethod() {}
-        @TestAnnotation public void annotatedMethod() {}
+
+        @TestAnnotation
+        public void annotatedMethod() {}
     }
 
     @Test
     public void getAnnotationsForClassNone() {
-        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
-                UnannotatedBaseClass.class, TestAnnotation.class);
+        List<TestAnnotation> annotations =
+                TestableSkipCheck.getAnnotationsForTesting(
+                        UnannotatedBaseClass.class, TestAnnotation.class);
         Assert.assertEquals(0, annotations.size());
     }
 
     @Test
     public void getAnnotationsForClassOnClass() {
-        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
-                AnnotatedBaseClass.class, TestAnnotation.class);
+        List<TestAnnotation> annotations =
+                TestableSkipCheck.getAnnotationsForTesting(
+                        AnnotatedBaseClass.class, TestAnnotation.class);
         Assert.assertEquals(1, annotations.size());
     }
 
     @Test
     public void getAnnotationsForClassOnSuperclass() {
-        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
-                ExtendsAnnotatedBaseClass.class, TestAnnotation.class);
+        List<TestAnnotation> annotations =
+                TestableSkipCheck.getAnnotationsForTesting(
+                        ExtendsAnnotatedBaseClass.class, TestAnnotation.class);
         Assert.assertEquals(1, annotations.size());
     }
 
     @Test
     public void getAnnotationsForMethodNone() throws NoSuchMethodException {
-        Method testMethod = UnannotatedBaseClass.class.getMethod("unannotatedMethod",
-                (Class[]) null);
-        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
-                testMethod, TestAnnotation.class);
+        Method testMethod =
+                UnannotatedBaseClass.class.getMethod("unannotatedMethod", (Class[]) null);
+        List<TestAnnotation> annotations =
+                TestableSkipCheck.getAnnotationsForTesting(testMethod, TestAnnotation.class);
         Assert.assertEquals(0, annotations.size());
     }
 
     @Test
     public void getAnnotationsForMethodOnMethod() throws NoSuchMethodException {
-        Method testMethod = UnannotatedBaseClass.class.getMethod("annotatedMethod",
-                (Class[]) null);
-        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
-                testMethod, TestAnnotation.class);
+        Method testMethod = UnannotatedBaseClass.class.getMethod("annotatedMethod", (Class[]) null);
+        List<TestAnnotation> annotations =
+                TestableSkipCheck.getAnnotationsForTesting(testMethod, TestAnnotation.class);
         Assert.assertEquals(1, annotations.size());
     }
 
     @Test
     public void getAnnotationsForMethodOnClass() throws NoSuchMethodException {
-        Method testMethod = AnnotatedBaseClass.class.getMethod("unannotatedMethod",
-                (Class[]) null);
-        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
-                testMethod, TestAnnotation.class);
+        Method testMethod = AnnotatedBaseClass.class.getMethod("unannotatedMethod", (Class[]) null);
+        List<TestAnnotation> annotations =
+                TestableSkipCheck.getAnnotationsForTesting(testMethod, TestAnnotation.class);
         Assert.assertEquals(1, annotations.size());
     }
 
     @Test
     public void getAnnotationsForMethodOnSuperclass() throws NoSuchMethodException {
-        Method testMethod = ExtendsAnnotatedBaseClass.class.getMethod("unannotatedMethod",
-                (Class[]) null);
-        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
-                testMethod, TestAnnotation.class);
+        Method testMethod =
+                ExtendsAnnotatedBaseClass.class.getMethod("unannotatedMethod", (Class[]) null);
+        List<TestAnnotation> annotations =
+                TestableSkipCheck.getAnnotationsForTesting(testMethod, TestAnnotation.class);
         Assert.assertEquals(1, annotations.size());
     }
 
     @Test
     public void getAnnotationsOverlapping() throws NoSuchMethodException {
-        Method testMethod = AnnotatedBaseClass.class.getMethod("annotatedMethod",
-                (Class[]) null);
-        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
-                testMethod, TestAnnotation.class);
+        Method testMethod = AnnotatedBaseClass.class.getMethod("annotatedMethod", (Class[]) null);
+        List<TestAnnotation> annotations =
+                TestableSkipCheck.getAnnotationsForTesting(testMethod, TestAnnotation.class);
         Assert.assertEquals(2, annotations.size());
     }
 }
diff --git a/base/test/clang_profiling_android.cc b/base/test/clang_profiling_android.cc
new file mode 100644
index 0000000..c2eebef
--- /dev/null
+++ b/base/test/clang_profiling_android.cc
@@ -0,0 +1,16 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/android/jni_android.h"
+#include "base/base_jni/ClangProfiler_jni.h"
+#include "base/test/clang_profiling.h"
+
+// Used in java tests when clang profiling is enabled.
+namespace base {
+
+static void JNI_ClangProfiler_WriteClangProfilingProfile(JNIEnv* env) {
+  WriteClangProfilingProfile();
+}
+
+}  // namespace base
diff --git a/base/test/gmock_callback_support.h b/base/test/gmock_callback_support.h
index fb99024..a00322c 100644
--- a/base/test/gmock_callback_support.h
+++ b/base/test/gmock_callback_support.h
@@ -44,7 +44,7 @@
 decltype(auto) RunImpl(Callback&& cb, Tuple&& tuple) {
   return RunImpl(std::forward<Callback>(cb), std::forward<Tuple>(tuple),
                  std::make_index_sequence<
-                     std::tuple_size<std::remove_reference_t<Tuple>>::value>());
+                     std::tuple_size_v<std::remove_reference_t<Tuple>>>());
 }
 
 // Invoked when the arguments to a OnceCallback are copy constructible. In this
@@ -52,7 +52,7 @@
 // copy, allowing it to be used multiple times.
 template <size_t I,
           typename Tuple,
-          std::enable_if_t<std::is_copy_constructible<Tuple>::value, int> = 0>
+          std::enable_if_t<std::is_copy_constructible_v<Tuple>, int> = 0>
 auto RunOnceCallbackImpl(Tuple&& tuple) {
   return
       [tuple = std::forward<Tuple>(tuple)](auto&&... args) -> decltype(auto) {
@@ -67,7 +67,7 @@
 // callback by move, allowing it to be only used once.
 template <size_t I,
           typename Tuple,
-          std::enable_if_t<!std::is_copy_constructible<Tuple>::value, int> = 0>
+          std::enable_if_t<!std::is_copy_constructible_v<Tuple>, int> = 0>
 auto RunOnceCallbackImpl(Tuple&& tuple) {
   // Mock actions need to be copyable, but `tuple` is not. Wrap it in in a
   // `scoped_refptr` to allow it to be copied.
diff --git a/base/test/gtest_xml_unittest_result_printer.cc b/base/test/gtest_xml_unittest_result_printer.cc
index 2d6b9f3..072efd1 100644
--- a/base/test/gtest_xml_unittest_result_printer.cc
+++ b/base/test/gtest_xml_unittest_result_printer.cc
@@ -8,11 +8,11 @@
 #include "base/check.h"
 #include "base/command_line.h"
 #include "base/files/file_util.h"
+#include "base/i18n/time_formatting.h"
 #include "base/strings/string_util.h"
 #include "base/test/test_switches.h"
 #include "base/threading/thread_checker.h"
 #include "base/time/time.h"
-#include "base/time/time_to_iso8601.h"
 
 namespace base {
 
@@ -145,21 +145,21 @@
   fprintf(output_file_.get(),
           "    <x-teststart name=\"%s\" classname=\"%s\" timestamp=\"%s\" />\n",
           test_info.name(), test_info.test_case_name(),
-          TimeToISO8601(Time::Now()).c_str());
+          TimeFormatAsIso8601(Time::Now()).c_str());
   fflush(output_file_);
 }
 
 void XmlUnitTestResultPrinter::OnTestEnd(const testing::TestInfo& test_info) {
-  fprintf(
-      output_file_.get(),
-      "    <testcase name=\"%s\" status=\"run\" time=\"%.3f\""
-      " classname=\"%s\" timestamp=\"%s\">\n",
-      test_info.name(),
-      static_cast<double>(test_info.result()->elapsed_time()) /
-          Time::kMillisecondsPerSecond,
-      test_info.test_case_name(),
-      TimeToISO8601(Time::FromJavaTime(test_info.result()->start_timestamp()))
-          .c_str());
+  fprintf(output_file_.get(),
+          "    <testcase name=\"%s\" status=\"run\" time=\"%.3f\""
+          " classname=\"%s\" timestamp=\"%s\">\n",
+          test_info.name(),
+          static_cast<double>(test_info.result()->elapsed_time()) /
+              Time::kMillisecondsPerSecond,
+          test_info.test_case_name(),
+          TimeFormatAsIso8601(Time::FromMillisecondsSinceUnixEpoch(
+                                  test_info.result()->start_timestamp()))
+              .c_str());
   if (test_info.result()->Failed()) {
     fprintf(output_file_.get(),
             "      <failure message=\"\" type=\"\"></failure>\n");
diff --git a/base/test/gtest_xml_util.cc b/base/test/gtest_xml_util.cc
index 6c8b950..e9f9df3 100644
--- a/base/test/gtest_xml_util.cc
+++ b/base/test/gtest_xml_util.cc
@@ -12,10 +12,21 @@
 #include "base/strings/string_number_conversions.h"
 #include "base/test/gtest_util.h"
 #include "base/test/launcher/test_launcher.h"
+#include "third_party/libxml/chromium/libxml_utils.h"
 #include "third_party/libxml/chromium/xml_reader.h"
 
 namespace base {
 
+namespace {
+
+// No-op error handler that replaces libxml's default, which writes to stderr.
+// The test launcher's worker threads speculatively parse results XML to detect
+// timeouts in the processes they manage, so logging parsing errors could be
+// noisy (e.g., crbug.com/1466897).
+void NullXmlErrorFunc(void* context, const char* message, ...) {}
+
+}  // namespace
+
 struct Link {
   // The name of the test case.
   std::string name;
@@ -54,6 +65,9 @@
   if (!ReadFileToString(output_file, &xml_contents))
     return false;
 
+  // Silence XML errors - otherwise they go to stderr.
+  ScopedXmlErrorFunc error_func(nullptr, &NullXmlErrorFunc);
+
   XmlReader xml_reader;
   if (!xml_reader.Load(xml_contents))
     return false;
diff --git a/base/test/ios/wait_util.h b/base/test/ios/wait_util.h
index bf47781..9179096 100644
--- a/base/test/ios/wait_util.h
+++ b/base/test/ios/wait_util.h
@@ -43,9 +43,8 @@
 
 // Returns true when condition() becomes true, otherwise returns false after
 // |timeout|. Repetitively runs the current NSRunLoop and the current
-// MessageLoop (if |run_message_loop| is true).
-// TODO(crbug.com/1462320): Investigate whether we can always run the
-// message loop.
+// MessageLoop (if |run_message_loop| is true). Passing |run_message_loop| true
+// only makes sense in unit tests.
 [[nodiscard]] bool WaitUntilConditionOrTimeout(TimeDelta timeout,
                                                bool run_message_loop,
                                                ConditionBlock condition);
diff --git a/base/test/launcher/test_launcher.cc b/base/test/launcher/test_launcher.cc
index 0ccf067..b8efae8 100644
--- a/base/test/launcher/test_launcher.cc
+++ b/base/test/launcher/test_launcher.cc
@@ -9,6 +9,7 @@
 #include <algorithm>
 #include <map>
 #include <random>
+#include <string_view>
 #include <unordered_map>
 #include <unordered_set>
 #include <utility>
@@ -27,6 +28,7 @@
 #include "base/format_macros.h"
 #include "base/functional/bind.h"
 #include "base/hash/hash.h"
+#include "base/i18n/icu_util.h"
 #include "base/lazy_instance.h"
 #include "base/location.h"
 #include "base/logging.h"
@@ -66,7 +68,6 @@
 #include "build/build_config.h"
 #include "build/chromeos_buildflags.h"
 #include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/libxml/chromium/libxml_utils.h"
 
 #if BUILDFLAG(IS_POSIX)
 #include <fcntl.h>
@@ -75,7 +76,7 @@
 #endif
 
 #if BUILDFLAG(IS_APPLE)
-#include "base/mac/scoped_nsautorelease_pool.h"
+#include "base/apple/scoped_nsautorelease_pool.h"
 #endif
 
 #if BUILDFLAG(IS_WIN)
@@ -833,10 +834,6 @@
   // |done|.
   void CleanupTask(base::ScopedTempDir task_temp_dir, bool done);
 
-  // No-op error function that replaces libxml's default, which writes to
-  // stderr.
-  static void NullXmlErrorFunc(void* context, const char* message, ...) {}
-
   ThreadChecker thread_checker_;
 
   const raw_ptr<TestLauncher> launcher_;
@@ -849,9 +846,6 @@
   // Protects member used concurrently by worker tasks.
   base::Lock lock_;
   std::vector<std::string> tests_to_run_ GUARDED_BY(lock_);
-  // Set the global libxml error context and function pointer for the lifetime
-  // of this test runner.
-  ScopedXmlErrorFunc xml_error_func_{nullptr, &NullXmlErrorFunc};
 
   base::WeakPtrFactory<TestRunner> weak_ptr_factory_{this};
 };
@@ -1493,6 +1487,25 @@
   return true;
 }
 
+bool TestLauncher::IsOnlyExactPositiveFilterFromFile(
+    const CommandLine* command_line) const {
+  if (command_line->HasSwitch(kGTestFilterFlag)) {
+    LOG(ERROR) << "Found " << switches::kTestLauncherFilterFile;
+    return false;
+  }
+  if (!negative_test_filter_.empty()) {
+    LOG(ERROR) << "Found negative filters in the filter file.";
+    return false;
+  }
+  for (const auto& filter : positive_test_filter_) {
+    if (Contains(filter, '*')) {
+      LOG(ERROR) << "Found wildcard positive filters in the filter file.";
+      return false;
+    }
+  }
+  return true;
+}
+
 bool TestLauncher::Init(CommandLine* command_line) {
   // Initialize sharding. Command line takes precedence over legacy environment
   // variables.
@@ -1612,6 +1625,10 @@
   fprintf(stdout, "Using %zu parallel jobs.\n", parallel_jobs_);
   fflush(stdout);
 
+  if (!base::i18n::InitializeICU()) {
+    return false;
+  }
+
   CreateAndStartThreadPool(parallel_jobs_);
 
   std::vector<std::string> positive_file_filter;
@@ -1645,6 +1662,18 @@
     negative_test_filter_.clear();
   }
 
+  // If `kEnforceExactPositiveFilter` is set, only accept exact positive
+  // filters from the filter file.
+  enforce_exact_postive_filter_ =
+      command_line->HasSwitch(switches::kEnforceExactPositiveFilter);
+  if (enforce_exact_postive_filter_ &&
+      !IsOnlyExactPositiveFilterFromFile(command_line)) {
+    LOG(ERROR) << "With " << switches::kEnforceExactPositiveFilter
+               << ", only accept exact positive filters via "
+               << switches::kTestLauncherFilterFile;
+    return false;
+  }
+
   // Split --gtest_filter at '-', if there is one, to separate into
   // positive filter and negative filter portions.
   bool double_colon_supported = !command_line->HasSwitch(kGTestFilterFlag);
@@ -1973,14 +2002,23 @@
   }
 }
 
+bool TestLauncher::ShouldRunInCurrentShard(
+    std::string_view prefix_stripped_name) const {
+  CHECK(!StartsWith(prefix_stripped_name, kPreTestPrefix));
+  CHECK(!StartsWith(prefix_stripped_name, kDisabledTestPrefix));
+  return PersistentHash(prefix_stripped_name) % total_shards_ ==
+         static_cast<uint32_t>(shard_index_);
+}
+
 std::vector<std::string> TestLauncher::CollectTests() {
   std::vector<std::string> test_names;
   // To support RTS(regression test selection), which may have 100,000 or
   // more exact gtest filter, we first split filter into exact filter
   // and wildcards filter, then exact filter can match faster.
   std::vector<StringPiece> positive_wildcards_filter;
-  std::unordered_set<StringPiece, StringPieceHash> positive_exact_filter;
+  std::unordered_set<StringPiece> positive_exact_filter;
   positive_exact_filter.reserve(positive_test_filter_.size());
+  std::unordered_set<std::string> enforced_positive_tests;
   for (const std::string& filter : positive_test_filter_) {
     if (filter.find('*') != std::string::npos) {
       positive_wildcards_filter.push_back(filter);
@@ -1990,7 +2028,7 @@
   }
 
   std::vector<StringPiece> negative_wildcards_filter;
-  std::unordered_set<StringPiece, StringPieceHash> negative_exact_filter;
+  std::unordered_set<StringPiece> negative_exact_filter;
   negative_exact_filter.reserve(negative_test_filter_.size());
   for (const std::string& filter : negative_test_filter_) {
     if (filter.find('*') != std::string::npos) {
@@ -2011,6 +2049,9 @@
                        positive_exact_filter.end() ||
                    positive_exact_filter.find(prefix_stripped_name) !=
                        positive_exact_filter.end();
+      if (found && enforce_exact_postive_filter_) {
+        enforced_positive_tests.insert(prefix_stripped_name);
+      }
       if (!found) {
         for (const StringPiece& filter : positive_wildcards_filter) {
           if (MatchPattern(test_name, filter) ||
@@ -2044,8 +2085,7 @@
 
     // Tests with the name XYZ will cause tests with the name PRE_XYZ to run. We
     // should bucket all of these tests together.
-    if (PersistentHash(prefix_stripped_name) % total_shards_ !=
-        static_cast<uint32_t>(shard_index_)) {
+    if (!ShouldRunInCurrentShard(prefix_stripped_name)) {
       continue;
     }
 
@@ -2065,6 +2105,26 @@
     test_names.push_back(test_name);
   }
 
+  // If `kEnforceExactPositiveFilter` is set, all test cases listed in the
+  // exact positive filter for the current shard should exist in the
+  // `enforced_positive_tests`. Otherwise, print the missing cases and fail
+  // loudly.
+  if (enforce_exact_postive_filter_) {
+    bool found_exact_positive_filter_not_enforced = false;
+    for (const auto& filter : positive_exact_filter) {
+      if (!ShouldRunInCurrentShard(filter) ||
+          Contains(enforced_positive_tests, std::string(filter))) {
+        continue;
+      }
+      if (!found_exact_positive_filter_not_enforced) {
+        LOG(ERROR) << "Found exact positive filter not enforced:";
+        found_exact_positive_filter_not_enforced = true;
+      }
+      LOG(ERROR) << filter;
+    }
+    CHECK(!found_exact_positive_filter_not_enforced);
+  }
+
   return test_names;
 }
 
diff --git a/base/test/launcher/test_launcher.h b/base/test/launcher/test_launcher.h
index 2598ac5..46bde67 100644
--- a/base/test/launcher/test_launcher.h
+++ b/base/test/launcher/test_launcher.h
@@ -11,6 +11,7 @@
 #include <memory>
 #include <set>
 #include <string>
+#include <string_view>
 #include <unordered_set>
 #include <vector>
 
@@ -245,6 +246,15 @@
 
   std::vector<std::string> CollectTests();
 
+  // Helper to tell if the test runs in current shard.
+  // `prefix_stripped_name` is the test name excluding DISABLED_ and
+  // PRE_ prefixes.
+  bool ShouldRunInCurrentShard(std::string_view prefix_stripped_name) const;
+
+  // Helper to check whether only exact positive filter is passed via
+  // a filter file.
+  bool IsOnlyExactPositiveFilterFromFile(const CommandLine* command_line) const;
+
   // Make sure we don't accidentally call the wrong methods e.g. on the worker
   // pool thread.  Should be the first member so that it's destroyed last: when
   // destroying other members, especially the worker pool, we may check the code
@@ -264,6 +274,9 @@
   std::vector<std::string> positive_test_filter_;
   std::vector<std::string> negative_test_filter_;
 
+  // Enforce to run all test cases listed in exact positive filter.
+  bool enforce_exact_postive_filter_;
+
   // Class to encapsulate gtest information.
   class TestInfo;
 
diff --git a/base/test/launcher/test_launcher_unittest.cc b/base/test/launcher/test_launcher_unittest.cc
index 2d9a399..1d58a3e 100644
--- a/base/test/launcher/test_launcher_unittest.cc
+++ b/base/test/launcher/test_launcher_unittest.cc
@@ -12,6 +12,7 @@
 #include "base/files/scoped_temp_dir.h"
 #include "base/functional/bind.h"
 #include "base/functional/callback_helpers.h"
+#include "base/i18n/time_formatting.h"
 #include "base/logging.h"
 #include "base/no_destructor.h"
 #include "base/process/launch.h"
@@ -25,7 +26,6 @@
 #include "base/test/scoped_logging_settings.h"
 #include "base/test/task_environment.h"
 #include "base/test/test_timeouts.h"
-#include "base/time/time_to_iso8601.h"
 #include "build/build_config.h"
 #include "build/chromeos_buildflags.h"
 #include "testing/gmock/include/gmock/gmock.h"
@@ -550,6 +550,63 @@
   EXPECT_TRUE(test_launcher.Run(command_line.get()));
 }
 
+// Test TestLauncher enforce to run tests in the exact positive filter.
+TEST_F(TestLauncherTest, EnforceRunTestsInExactPositiveFilter) {
+  AddMockedTests("Test", {"firstTest", "secondTest", "thirdTest"});
+  SetUpExpectCalls();
+
+  ASSERT_TRUE(dir.CreateUniqueTempDir());
+  FilePath path = dir.GetPath().AppendASCII("test.filter");
+  WriteFile(path, "Test.firstTest\nTest.thirdTest");
+  command_line->AppendSwitchPath("test-launcher-filter-file", path);
+  command_line->AppendSwitch("enforce-exact-positive-filter");
+  command_line->AppendSwitchASCII("test-launcher-total-shards", "2");
+  command_line->AppendSwitchASCII("test-launcher-shard-index", "0");
+
+  // Test.firstTest is in the exact positive filter, so expected to run.
+  // Test.thirdTest is launched in another shard.
+  std::vector<std::string> tests_names = {"Test.firstTest"};
+  EXPECT_CALL(test_launcher, LaunchChildGTestProcess(
+                                 _,
+                                 testing::ElementsAreArray(tests_names.cbegin(),
+                                                           tests_names.cend()),
+                                 _, _))
+      .WillOnce(::testing::DoAll(OnTestResult(&test_launcher, "Test.firstTest",
+                                              TestResult::TEST_SUCCESS)));
+  EXPECT_TRUE(test_launcher.Run(command_line.get()));
+}
+
+// Test TestLauncher should fail if enforce-exact-positive-filter and
+// gtest_filter both presented.
+TEST_F(TestLauncherTest,
+       EnforceRunTestsInExactPositiveFailWithGtestFilterFlag) {
+  command_line->AppendSwitch("enforce-exact-positive-filter");
+  command_line->AppendSwitchASCII("gtest_filter", "Test.firstTest;-Test.*");
+  EXPECT_FALSE(test_launcher.Run(command_line.get()));
+}
+
+// Test TestLauncher should fail if enforce-exact-positive-filter is set
+// with negative test filters.
+TEST_F(TestLauncherTest, EnforceRunTestsInExactPositiveFailWithNegativeFilter) {
+  command_line->AppendSwitch("enforce-exact-positive-filter");
+  ASSERT_TRUE(dir.CreateUniqueTempDir());
+  FilePath path = CreateFilterFile();
+  command_line->AppendSwitchPath("test-launcher-filter-file", path);
+  EXPECT_FALSE(test_launcher.Run(command_line.get()));
+}
+
+// Test TestLauncher should fail if enforce-exact-positive-filter is set
+// with wildcard positive filters.
+TEST_F(TestLauncherTest,
+       EnforceRunTestsInExactPositiveFailWithWildcardPositiveFilter) {
+  command_line->AppendSwitch("enforce-exact-positive-filter");
+  ASSERT_TRUE(dir.CreateUniqueTempDir());
+  FilePath path = dir.GetPath().AppendASCII("test.filter");
+  WriteFile(path, "Test.*");
+  command_line->AppendSwitchPath("test-launcher-filter-file", path);
+  EXPECT_FALSE(test_launcher.Run(command_line.get()));
+}
+
 // Tests fail if they produce too much output.
 TEST_F(TestLauncherTest, ExcessiveOutput) {
   AddMockedTests("Test", {"firstTest"});
@@ -896,16 +953,19 @@
   ASSERT_TRUE(AppendToFile(
       result_file,
       StrCat({"    <x-teststart name=\"B\" classname=\"A\" timestamp=\"",
-              TimeToISO8601(Time::Now()).c_str(), "\" />\n",
+              TimeFormatAsIso8601(Time::Now()).c_str(), "\" />\n",
               "    <testcase name=\"B\" status=\"run\" time=\"0.500\" "
               "classname=\"A\" timestamp=\"",
-              TimeToISO8601(Time::Now()).c_str(), "\">\n", "    </testcase>\n",
+              TimeFormatAsIso8601(Time::Now()).c_str(), "\">\n",
+              "    </testcase>\n",
               "    <x-teststart name=\"C\" classname=\"A\" timestamp=\"",
-              TimeToISO8601(Time::Now() + Milliseconds(500)).c_str(), "\" />\n",
+              TimeFormatAsIso8601(Time::Now() + Milliseconds(500)).c_str(),
+              "\" />\n",
               "    <testcase name=\"C\" status=\"run\" time=\"0.500\" "
               "classname=\"A\" timestamp=\"",
-              TimeToISO8601(Time::Now() + Milliseconds(500)).c_str(), "\">\n",
-              "    </testcase>\n", "  </testsuite>\n", "</testsuites>\n"})));
+              TimeFormatAsIso8601(Time::Now() + Milliseconds(500)).c_str(),
+              "\">\n", "    </testcase>\n", "  </testsuite>\n",
+              "</testsuites>\n"})));
 
   MockResultWatcher result_watcher(result_file, 2);
   EXPECT_CALL(result_watcher, WaitWithTimeout(_))
@@ -922,12 +982,15 @@
 // Verify that a result watcher repeatedly checks the file for a batch of slow
 // tests. Each test completes in 40s, which is just under the timeout of 45s.
 TEST_F(ResultWatcherTest, PollCompletesSlowly) {
+  SCOPED_TRACE(::testing::Message() << "Start ticks: " << TimeTicks::Now());
+
   ASSERT_TRUE(dir.CreateUniqueTempDir());
   FilePath result_file = CreateResultFile();
+  const Time start = Time::Now();
   ASSERT_TRUE(AppendToFile(
       result_file,
       StrCat({"    <x-teststart name=\"B\" classname=\"A\" timestamp=\"",
-              TimeToISO8601(Time::Now()).c_str(), "\" />\n"})));
+              TimeFormatAsIso8601(start).c_str(), "\" />\n"})));
 
   MockResultWatcher result_watcher(result_file, 10);
   size_t checks = 0;
@@ -943,7 +1006,8 @@
                       result_file,
                       StrCat({"    <testcase name=\"B\" status=\"run\" "
                               "time=\"40.000\" classname=\"A\" timestamp=\"",
-                              TimeToISO8601(Time::Now() - Seconds(45)).c_str(),
+                              TimeFormatAsIso8601(Time::Now() - Seconds(45))
+                                  .c_str(),
                               "\">\n", "    </testcase>\n"}));
                   checks++;
                   if (checks == 10) {
@@ -959,13 +1023,13 @@
                         result_file,
                         StrCat({"    <x-teststart name=\"B\" classname=\"A\" "
                                 "timestamp=\"",
-                                TimeToISO8601(Time::Now() - Seconds(5)).c_str(),
+                                TimeFormatAsIso8601(Time::Now() - Seconds(5))
+                                    .c_str(),
                                 "\" />\n"}));
                   }
                 }),
                 ReturnPointee(&done)));
 
-  Time start = Time::Now();
   ASSERT_TRUE(result_watcher.PollUntilDone(Seconds(45)));
   // The first check occurs 45s after the batch starts, so the sequence of
   // events looks like:
@@ -987,7 +1051,7 @@
   ASSERT_TRUE(AppendToFile(
       result_file,
       StrCat({"    <x-teststart name=\"B\" classname=\"A\" timestamp=\"",
-              TimeToISO8601(Time::Now()).c_str(), "\" />\n"})));
+              TimeFormatAsIso8601(Time::Now()).c_str(), "\" />\n"})));
 
   MockResultWatcher result_watcher(result_file, 10);
   EXPECT_CALL(result_watcher, WaitWithTimeout(_))
@@ -1010,10 +1074,10 @@
   ASSERT_TRUE(AppendToFile(
       result_file,
       StrCat({"    <x-teststart name=\"B\" classname=\"A\" timestamp=\"",
-              TimeToISO8601(Time::Now()).c_str(), "\" />\n",
+              TimeFormatAsIso8601(Time::Now()).c_str(), "\" />\n",
               "    <testcase name=\"B\" status=\"run\" time=\"40.000\" "
               "classname=\"A\" timestamp=\"",
-              TimeToISO8601(Time::Now()).c_str(), "\">\n",
+              TimeFormatAsIso8601(Time::Now()).c_str(), "\">\n",
               "      <summary>"})));
 
   MockResultWatcher result_watcher(result_file, 2);
@@ -1045,15 +1109,16 @@
   Time time_before_change = Time::Now() + Hours(1);
   ASSERT_TRUE(AppendToFile(
       result_file,
-      StrCat({"    <x-teststart name=\"B\" classname=\"A\" timestamp=\"",
-              TimeToISO8601(time_before_change).c_str(), "\" />\n",
-              "    <testcase name=\"B\" status=\"run\" time=\"0.500\" "
-              "classname=\"A\" timestamp=\"",
-              TimeToISO8601(time_before_change).c_str(), "\">\n",
-              "    </testcase>\n",
-              "    <x-teststart name=\"C\" classname=\"A\" timestamp=\"",
-              TimeToISO8601(time_before_change + Milliseconds(500)).c_str(),
-              "\" />\n"})));
+      StrCat(
+          {"    <x-teststart name=\"B\" classname=\"A\" timestamp=\"",
+           TimeFormatAsIso8601(time_before_change).c_str(), "\" />\n",
+           "    <testcase name=\"B\" status=\"run\" time=\"0.500\" "
+           "classname=\"A\" timestamp=\"",
+           TimeFormatAsIso8601(time_before_change).c_str(), "\">\n",
+           "    </testcase>\n",
+           "    <x-teststart name=\"C\" classname=\"A\" timestamp=\"",
+           TimeFormatAsIso8601(time_before_change + Milliseconds(500)).c_str(),
+           "\" />\n"})));
 
   MockResultWatcher result_watcher(result_file, 2);
   EXPECT_CALL(result_watcher, WaitWithTimeout(_))
@@ -1077,12 +1142,13 @@
   ASSERT_TRUE(AppendToFile(
       result_file,
       StrCat({"    <x-teststart name=\"B\" classname=\"A\" timestamp=\"",
-              TimeToISO8601(Time::Now()).c_str(), "\" />\n",
+              TimeFormatAsIso8601(Time::Now()).c_str(), "\" />\n",
               "    <testcase name=\"B\" status=\"run\" time=\"0.500\" "
               "classname=\"A\" timestamp=\"",
-              TimeToISO8601(Time::Now()).c_str(), "\">\n", "    </testcase>\n",
+              TimeFormatAsIso8601(Time::Now()).c_str(), "\">\n",
+              "    </testcase>\n",
               "    <x-teststart name=\"C\" classname=\"A\" timestamp=\"",
-              TimeToISO8601(Time::Now() + Milliseconds(500)).c_str(),
+              TimeFormatAsIso8601(Time::Now() + Milliseconds(500)).c_str(),
               "\" />\n"})));
   task_environment.AdvanceClock(Hours(1));
 
@@ -1217,6 +1283,67 @@
   EXPECT_GT(*test_results[2].timestamp, Time());
 }
 
+// TODO(crbug.com/1498237): Enable the test once GetAppOutputAndError
+// can collect stdout and stderr on Fuchsia.
+#if !BUILDFLAG(IS_FUCHSIA)
+TEST(ProcessGTestOutputTest, FoundTestCaseNotEnforced) {
+  ScopedTempDir dir;
+  ASSERT_TRUE(dir.CreateUniqueTempDir());
+  FilePath path = dir.GetPath().AppendASCII("test.filter");
+  WriteFile(path, "Test.firstTest\nTest.secondTest");
+  CommandLine command_line(CommandLine::ForCurrentProcess()->GetProgram());
+  command_line.AppendSwitchPath("test-launcher-filter-file", path);
+  command_line.AppendSwitch("enforce-exact-positive-filter");
+  std::string output;
+  // Test cases in the filter do not exist, hence test launcher should
+  // fail and print their names.
+  EXPECT_FALSE(GetAppOutputAndError(command_line, &output));
+  // Banner should appear in the output.
+  const char kBanner[] = "Found exact positive filter not enforced:";
+  EXPECT_TRUE(Contains(output, kBanner));
+  std::vector<std::string> lines = base::SplitString(
+      output, "\n", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+  std::unordered_set<std::string> tests_not_enforced;
+  bool banner_has_printed = false;
+  for (size_t i = 0; i < lines.size(); i++) {
+    if (Contains(lines[i], kBanner)) {
+      // The following two lines should have the test cases not enforced
+      // and the third line for the check failure message.
+      EXPECT_LT(i + 3, lines.size());
+      // Banner should only appear once.
+      EXPECT_FALSE(banner_has_printed);
+      banner_has_printed = true;
+      continue;
+    }
+    if (banner_has_printed && tests_not_enforced.size() < 2) {
+      // Note, gtest prints the error with datetime and file line info
+      // ahead to the test names, e.g. below:
+      // [1030/220237.425678:ERROR:test_launcher.cc(2123)] Test.secondTest
+      // [1030/220237.425682:ERROR:test_launcher.cc(2123)] Test.firstTest
+      std::vector<std::string> line_vec = base::SplitString(
+          lines[i], "]", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+      ASSERT_EQ(line_vec.size(), 2u);
+      tests_not_enforced.insert(line_vec[1]);
+      continue;
+    }
+    if (banner_has_printed && tests_not_enforced.size() == 2) {
+// For official builds, they discard logs from CHECK failures, hence
+// the test case cannot catch the "Check failed" line.
+#if !defined(OFFICIAL_BUILD) || DCHECK_IS_ON()
+      EXPECT_TRUE(Contains(lines[i],
+                           "Check failed: "
+                           "!found_exact_positive_filter_not_enforced."));
+#endif  // !defined(OFFICIAL_BUILD) || DCHECK_IS_ON()
+      break;
+    }
+  }
+  // The test case printed is not ordered, hence need UnorderedElementsAre
+  // to compare.
+  EXPECT_THAT(tests_not_enforced, testing::UnorderedElementsAre(
+                                      "Test.firstTest", "Test.secondTest"));
+}
+#endif  // !BUILDFLAG(IS_FUCHSIA)
+
 // TODO(crbug.com/1094369): Enable leaked-child checks on other platforms.
 #if BUILDFLAG(IS_FUCHSIA)
 
diff --git a/base/test/launcher/test_results_tracker.cc b/base/test/launcher/test_results_tracker.cc
index 63a0ac1..6306371 100644
--- a/base/test/launcher/test_results_tracker.cc
+++ b/base/test/launcher/test_results_tracker.cc
@@ -15,6 +15,7 @@
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/format_macros.h"
+#include "base/i18n/time_formatting.h"
 #include "base/json/json_writer.h"
 #include "base/json/string_escape.h"
 #include "base/logging.h"
@@ -25,8 +26,8 @@
 #include "base/test/launcher/test_launcher.h"
 #include "base/test/test_switches.h"
 #include "base/time/time.h"
-#include "base/time/time_to_iso8601.h"
 #include "base/values.h"
+#include "third_party/icu/source/i18n/unicode/timezone.h"
 
 namespace base {
 
@@ -38,18 +39,10 @@
 
 // Converts the given epoch time in milliseconds to a date string in the ISO
 // 8601 format, without the timezone information.
-// TODO(xyzzyz): Find a good place in Chromium to put it and refactor all uses
-// to point to it.
+// TODO(pkasting): Consider using `TimeFormatAsIso8601()`, possibly modified.
 std::string FormatTimeAsIso8601(Time time) {
-  Time::Exploded exploded;
-  time.UTCExplode(&exploded);
-  return StringPrintf("%04d-%02d-%02dT%02d:%02d:%02d",
-                      exploded.year,
-                      exploded.month,
-                      exploded.day_of_month,
-                      exploded.hour,
-                      exploded.minute,
-                      exploded.second);
+  return base::UnlocalizedTimeFormatWithPattern(time, "yyyy-MM-dd'T'HH:mm:ss",
+                                                icu::TimeZone::getGMT());
 }
 
 struct TestSuiteResultsAggregator {
@@ -471,11 +464,11 @@
         if (test_result.process_num)
           test_result_value.Set("process_num", *test_result.process_num);
         if (test_result.timestamp) {
-          // The timestamp is formatted using TimeToISO8601 instead of
-          // FormatTimeAsIso8601 here for a better accuracy that the former
-          // method would include a fraction of second (and the Z suffix).
-          test_result_value.Set("timestamp",
-                                TimeToISO8601(*test_result.timestamp).c_str());
+          // The timestamp is formatted using TimeFormatAsIso8601 instead of
+          // FormatTimeAsIso8601 here for a better accuracy, since the former
+          // method includes fractions of a second.
+          test_result_value.Set(
+              "timestamp", TimeFormatAsIso8601(*test_result.timestamp).c_str());
         }
 
         bool lossless_snippet = false;
diff --git a/base/test/launcher/unit_test_launcher.cc b/base/test/launcher/unit_test_launcher.cc
index 54f6186..aec6fcd 100644
--- a/base/test/launcher/unit_test_launcher.cc
+++ b/base/test/launcher/unit_test_launcher.cc
@@ -8,6 +8,7 @@
 #include <memory>
 #include <utility>
 
+#include "base/base_paths.h"
 #include "base/base_switches.h"
 #include "base/command_line.h"
 #include "base/compiler_specific.h"
@@ -28,6 +29,7 @@
 #include "base/task/single_thread_task_runner.h"
 #include "base/test/allow_check_is_test_for_testing.h"
 #include "base/test/launcher/test_launcher.h"
+#include "base/test/scoped_block_tests_writing_to_special_dirs.h"
 #include "base/test/test_switches.h"
 #include "base/test/test_timeouts.h"
 #include "base/threading/thread_checker.h"
@@ -237,6 +239,22 @@
                                 std::move(gtest_init)));
   return RunTestsFromIOSApp();
 #else
+  ScopedBlockTestsWritingToSpecialDirs scoped_blocker(
+      {
+        // Please keep these in alphabetic order within each platform type.
+        base::DIR_SRC_TEST_DATA_ROOT, base::DIR_USER_DESKTOP,
+#if BUILDFLAG(IS_WIN)
+            base::DIR_COMMON_DESKTOP, base::DIR_START_MENU,
+            base::DIR_USER_STARTUP,
+
+#endif  // BUILDFLAG(IS_WIN)
+      },
+      ([](const base::FilePath& path) {
+        ADD_FAILURE()
+            << "Attempting to write file in dir " << path
+            << " Use ScopedPathOverride or other mechanism to not write to this"
+               " directory.";
+      }));
   return RunTestSuite(std::move(run_test_suite), parallel_jobs,
                       default_batch_limit, retry_limit, use_job_objects,
                       timeout_callback, std::move(gtest_init));
diff --git a/base/test/launcher/unit_test_launcher_ios.cc b/base/test/launcher/unit_test_launcher_ios.cc
index aa3ceb7..de7fbb6 100644
--- a/base/test/launcher/unit_test_launcher_ios.cc
+++ b/base/test/launcher/unit_test_launcher_ios.cc
@@ -4,11 +4,11 @@
 
 #include "base/test/launcher/unit_test_launcher.h"
 
+#include "base/apple/foundation_util.h"
 #include "base/command_line.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
-#include "base/mac/foundation_util.h"
 #include "base/test/allow_check_is_test_for_testing.h"
 #include "base/test/gtest_util.h"
 #include "base/test/test_support_ios.h"
@@ -57,14 +57,14 @@
         only_write_tests
             ? (command_line->GetSwitchValuePath(
                   switches::kTestLauncherListTests))
-            : mac::GetUserDocumentPath().Append("compiled_tests.json");
+            : apple::GetUserDocumentPath().Append("compiled_tests.json");
     int write_result = WriteCompiledInTestsToFileAndLog(list_path);
     if (only_write_tests) {
       return write_result;
     }
   } else if (command_line->HasSwitch(
                  switches::kTestLauncherPrintWritablePath)) {
-    fprintf(stdout, "%s", mac::GetUserLibraryPath().value().c_str());
+    fprintf(stdout, "%s", apple::GetUserLibraryPath().value().c_str());
     fflush(stdout);
     return 0;
   }
diff --git a/base/test/memory/dangling_ptr_instrumentation.cc b/base/test/memory/dangling_ptr_instrumentation.cc
index 0a4a789..87d3f72 100644
--- a/base/test/memory/dangling_ptr_instrumentation.cc
+++ b/base/test/memory/dangling_ptr_instrumentation.cc
@@ -6,7 +6,7 @@
 #include <cstdint>
 
 #include "base/allocator/partition_alloc_features.h"
-#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h"
 #include "base/check_op.h"
 #include "base/feature_list.h"
 #include "base/memory/raw_ptr.h"
diff --git a/base/test/memory/dangling_ptr_instrumentation.h b/base/test/memory/dangling_ptr_instrumentation.h
index 37fc563..bb4d155 100644
--- a/base/test/memory/dangling_ptr_instrumentation.h
+++ b/base/test/memory/dangling_ptr_instrumentation.h
@@ -7,7 +7,7 @@
 
 #include <cstdint>
 
-#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/dangling_raw_ptr_checks.h"
 #include "base/memory/raw_ptr.h"
 #include "base/strings/string_piece.h"
 #include "base/types/expected.h"
diff --git a/base/test/metrics/histogram_enum_reader.cc b/base/test/metrics/histogram_enum_reader.cc
index 0a8a6e1..16f9b86 100644
--- a/base/test/metrics/histogram_enum_reader.cc
+++ b/base/test/metrics/histogram_enum_reader.cc
@@ -85,7 +85,7 @@
 absl::optional<HistogramEnumEntryMap> ReadEnumFromEnumsXml(
     const std::string& enum_name) {
   FilePath src_root;
-  if (!PathService::Get(DIR_SOURCE_ROOT, &src_root)) {
+  if (!PathService::Get(DIR_SRC_TEST_DATA_ROOT, &src_root)) {
     ADD_FAILURE() << "Failed to get src root.";
     return absl::nullopt;
   }
diff --git a/base/test/mock_callback_unittest.cc b/base/test/mock_callback_unittest.cc
index a43e51a..d537706 100644
--- a/base/test/mock_callback_unittest.cc
+++ b/base/test/mock_callback_unittest.cc
@@ -57,23 +57,23 @@
 }
 
 TEST(MockCallbackTest, Typedefs) {
-  static_assert(std::is_same<MockCallback<RepeatingCallback<int()>>,
-                             MockRepeatingCallback<int()>>::value,
+  static_assert(std::is_same_v<MockCallback<RepeatingCallback<int()>>,
+                               MockRepeatingCallback<int()>>,
                 "Repeating typedef differs for zero args");
-  static_assert(std::is_same<MockCallback<RepeatingCallback<int(int, int)>>,
-                             MockRepeatingCallback<int(int, int)>>::value,
+  static_assert(std::is_same_v<MockCallback<RepeatingCallback<int(int, int)>>,
+                               MockRepeatingCallback<int(int, int)>>,
                 "Repeating typedef differs for multiple args");
-  static_assert(std::is_same<MockCallback<RepeatingCallback<void()>>,
-                             MockRepeatingClosure>::value,
+  static_assert(std::is_same_v<MockCallback<RepeatingCallback<void()>>,
+                               MockRepeatingClosure>,
                 "Repeating typedef differs for closure");
-  static_assert(std::is_same<MockCallback<OnceCallback<int()>>,
-                             MockOnceCallback<int()>>::value,
+  static_assert(std::is_same_v<MockCallback<OnceCallback<int()>>,
+                               MockOnceCallback<int()>>,
                 "Once typedef differs for zero args");
-  static_assert(std::is_same<MockCallback<OnceCallback<int(int, int)>>,
-                             MockOnceCallback<int(int, int)>>::value,
+  static_assert(std::is_same_v<MockCallback<OnceCallback<int(int, int)>>,
+                               MockOnceCallback<int(int, int)>>,
                 "Once typedef differs for multiple args");
-  static_assert(std::is_same<MockCallback<RepeatingCallback<void()>>,
-                             MockRepeatingClosure>::value,
+  static_assert(std::is_same_v<MockCallback<RepeatingCallback<void()>>,
+                               MockRepeatingClosure>,
                 "Once typedef differs for closure");
 }
 
diff --git a/base/test/mock_chrome_application_mac.h b/base/test/mock_chrome_application_mac.h
index 6b329f8..c317390 100644
--- a/base/test/mock_chrome_application_mac.h
+++ b/base/test/mock_chrome_application_mac.h
@@ -10,7 +10,7 @@
 #import <AppKit/AppKit.h>
 
 #include "base/mac/scoped_sending_event.h"
-#include "base/message_loop/message_pump_mac.h"
+#include "base/message_loop/message_pump_apple.h"
 
 // A basic implementation of CrAppProtocol and
 // CrAppControlProtocol. This can be used in tests that need an
diff --git a/base/test/mock_chrome_application_mac.mm b/base/test/mock_chrome_application_mac.mm
index 8ac3f67..cf2b0eb 100644
--- a/base/test/mock_chrome_application_mac.mm
+++ b/base/test/mock_chrome_application_mac.mm
@@ -14,8 +14,8 @@
   DCHECK([app conformsToProtocol:@protocol(CrAppControlProtocol)])
       << "Existing NSApp (class " << [[app className] UTF8String]
       << ") does not conform to required protocol.";
-  DCHECK(base::message_pump_mac::UsingCrApp())
-      << "message_pump_mac::Create() was called before "
+  DCHECK(base::message_pump_apple::UsingCrApp())
+      << "message_pump_apple::Create() was called before "
       << "+[MockCrApp sharedApplication]";
   return app;
 }
diff --git a/base/test/perf_test_suite.cc b/base/test/perf_test_suite.cc
index cd152c5..f809859 100644
--- a/base/test/perf_test_suite.cc
+++ b/base/test/perf_test_suite.cc
@@ -50,9 +50,9 @@
     RaiseProcessToHighPriority();
 }
 
-void PerfTestSuite::InitializeFromCommandLine(int argc, char** argv) {
+void PerfTestSuite::InitializeFromCommandLine(int* argc, char** argv) {
   TestSuite::InitializeFromCommandLine(argc, argv);
-  ::benchmark::Initialize(&argc, argv);
+  ::benchmark::Initialize(argc, argv);
 }
 
 int PerfTestSuite::RunAllTests() {
diff --git a/base/test/perf_test_suite.h b/base/test/perf_test_suite.h
index 50d7d0f..7ae3cbf 100644
--- a/base/test/perf_test_suite.h
+++ b/base/test/perf_test_suite.h
@@ -16,7 +16,7 @@
   // TODO(crbug.com/1404759): Port all perf tests to Google Benchmark and
   // eliminate gTest dependence from perf tests.
   void Initialize() override;
-  void InitializeFromCommandLine(int argc, char** argv) override;
+  void InitializeFromCommandLine(int* argc, char** argv) override;
   int RunAllTests() override;
   void Shutdown() override;
 };
diff --git a/base/test/protobuf_matchers.h b/base/test/protobuf_matchers.h
new file mode 100644
index 0000000..df9d909
--- /dev/null
+++ b/base/test/protobuf_matchers.h
@@ -0,0 +1,39 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_PROTOBUF_MATCHERS_H_
+#define BASE_TEST_PROTOBUF_MATCHERS_H_
+
+#include <string>
+
+#include "testing/gmock/include/gmock/gmock-matchers.h"
+
+namespace base::test {
+
+// Matcher that verifies two protobufs contain the same data.
+MATCHER_P(EqualsProto,
+          message,
+          "Match a proto Message equal to the matcher's argument.") {
+  std::string expected_serialized;
+  if (!message.SerializeToString(&expected_serialized)) {
+    *result_listener << "Expected proto fails to serialize";
+    return false;
+  }
+  std::string actual_serialized;
+  if (!arg.SerializeToString(&actual_serialized)) {
+    *result_listener << "Actual proto fails to serialize";
+    return false;
+  }
+  if (expected_serialized != actual_serialized) {
+    *result_listener << "Provided proto did not match the expected proto"
+                     << "\n Serialized Expected Proto: " << expected_serialized
+                     << "\n Serialized Provided Proto: " << actual_serialized;
+    return false;
+  }
+  return true;
+}
+
+}  // namespace base::test
+
+#endif  // BASE_TEST_PROTOBUF_MATCHERS_H_
diff --git a/base/test/repeating_test_future_unittest.cc b/base/test/repeating_test_future_unittest.cc
index 7ecb777..9c5eced 100644
--- a/base/test/repeating_test_future_unittest.cc
+++ b/base/test/repeating_test_future_unittest.cc
@@ -124,12 +124,12 @@
   test::ScopedRunLoopTimeout timeout(FROM_HERE, Milliseconds(1));
 
   // `ScopedRunLoopTimeout` will automatically fail the test when a timeout
-  // happens, so we use EXPECT_FATAL_FAILURE to handle this failure.
-  // EXPECT_FATAL_FAILURE only works on static objects.
+  // happens, so we use EXPECT_NONFATAL_FAILURE to handle this failure.
+  // EXPECT_NONFATAL_FAILURE only works on static objects.
   static bool success;
   static RepeatingTestFuture<std::string> future;
 
-  EXPECT_FATAL_FAILURE({ success = future.Wait(); }, "timed out");
+  EXPECT_NONFATAL_FAILURE({ success = future.Wait(); }, "timed out");
 
   EXPECT_FALSE(success);
 }
diff --git a/base/test/run_until_unittest.cc b/base/test/run_until_unittest.cc
index e6eb201..5fc3510 100644
--- a/base/test/run_until_unittest.cc
+++ b/base/test/run_until_unittest.cc
@@ -112,8 +112,8 @@
   // EXPECT_FATAL_FAILURE only works on static objects.
   static bool success;
 
-  EXPECT_FATAL_FAILURE({ success = RunUntil([]() { return false; }); },
-                       "timed out");
+  EXPECT_NONFATAL_FAILURE({ success = RunUntil([]() { return false; }); },
+                          "timed out");
 
   EXPECT_FALSE(success);
 }
diff --git a/base/test/scoped_block_tests_writing_to_special_dirs.cc b/base/test/scoped_block_tests_writing_to_special_dirs.cc
new file mode 100644
index 0000000..df5803a
--- /dev/null
+++ b/base/test/scoped_block_tests_writing_to_special_dirs.cc
@@ -0,0 +1,29 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_block_tests_writing_to_special_dirs.h"
+
+#include <ostream>
+#include <utility>
+
+#include "base/check.h"
+
+namespace base {
+
+ScopedBlockTestsWritingToSpecialDirs::ScopedBlockTestsWritingToSpecialDirs(
+    std::vector<int> dirs_to_block,
+    FileWriteBlockedForTestingFunctionPtr failure_callback) {
+  CHECK(failure_callback) << "Can't use NULL failure callback";
+  auto& special_dir_write_blocker = BlockTestsWritingToSpecialDirs::Get();
+  CHECK(!special_dir_write_blocker.has_value())
+      << "ScopedBlockTestsWritingToSpecialDirs can't be nested.";
+
+  special_dir_write_blocker.emplace(std::move(dirs_to_block), failure_callback);
+}
+
+ScopedBlockTestsWritingToSpecialDirs::~ScopedBlockTestsWritingToSpecialDirs() {
+  BlockTestsWritingToSpecialDirs::Get().reset();
+}
+
+}  // namespace base
diff --git a/base/test/scoped_block_tests_writing_to_special_dirs.h b/base/test/scoped_block_tests_writing_to_special_dirs.h
new file mode 100644
index 0000000..3c613c5
--- /dev/null
+++ b/base/test/scoped_block_tests_writing_to_special_dirs.h
@@ -0,0 +1,33 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SCOPED_BLOCK_TESTS_WRITING_TO_SPECIAL_DIRS_H_
+#define BASE_TEST_SCOPED_BLOCK_TESTS_WRITING_TO_SPECIAL_DIRS_H_
+
+#include <vector>
+
+#include "base/files/block_tests_writing_to_special_dirs.h"
+
+namespace base {
+
+// This is used by test harnesses to detect and prevent tests writing to
+// special directories, with help from `BlockTestsWritingToSpecialDirs`.
+class ScopedBlockTestsWritingToSpecialDirs {
+ public:
+  // `dirs_to_block` contains the PathService keys of the dirs to block.
+  ScopedBlockTestsWritingToSpecialDirs(
+      std::vector<int> dirs_to_block,
+      FileWriteBlockedForTestingFunctionPtr failure_callback);
+
+  ScopedBlockTestsWritingToSpecialDirs(
+      const ScopedBlockTestsWritingToSpecialDirs&) = delete;
+  ScopedBlockTestsWritingToSpecialDirs& operator=(
+      const ScopedBlockTestsWritingToSpecialDirs&) = delete;
+
+  ~ScopedBlockTestsWritingToSpecialDirs();
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_SCOPED_BLOCK_TESTS_WRITING_TO_SPECIAL_DIRS_H_
diff --git a/base/test/scoped_mock_clock_override_unittest.cc b/base/test/scoped_mock_clock_override_unittest.cc
index 8be7463..f068716 100644
--- a/base/test/scoped_mock_clock_override_unittest.cc
+++ b/base/test/scoped_mock_clock_override_unittest.cc
@@ -13,7 +13,13 @@
 
 namespace {
 
-TEST(ScopedMockClockOverrideTest, Time) {
+// Disabled on Android due to flakes; see https://crbug.com/1474884.
+#if BUILDFLAG(IS_ANDROID)
+#define MAYBE_Time DISABLED_Time
+#else
+#define MAYBE_Time Time
+#endif
+TEST(ScopedMockClockOverrideTest, MAYBE_Time) {
   // Choose a reference time that we know to be in the past but close to now.
   Time build_time = GetBuildTime();
 
diff --git a/base/test/scoped_path_override.cc b/base/test/scoped_path_override.cc
index 84b3f5c..8f73691 100644
--- a/base/test/scoped_path_override.cc
+++ b/base/test/scoped_path_override.cc
@@ -38,7 +38,7 @@
 }
 
 void ScopedPathOverride::SaveOriginal() {
-  if (PathService::IsOverriddenForTests(key_)) {
+  if (PathService::IsOverriddenForTesting(key_)) {
     original_override_ = PathService::CheckedGet(key_);
   }
 }
diff --git a/base/test/scoped_run_loop_timeout.cc b/base/test/scoped_run_loop_timeout.cc
index 1c0df98..aabf39a 100644
--- a/base/test/scoped_run_loop_timeout.cc
+++ b/base/test/scoped_run_loop_timeout.cc
@@ -19,6 +19,9 @@
 
 bool g_add_gtest_failure_on_timeout = false;
 
+std::unique_ptr<ScopedRunLoopTimeout::TimeoutCallback>
+    g_handle_timeout_for_testing = nullptr;
+
 std::string TimeoutMessage(const RepeatingCallback<std::string()>& get_log,
                            const Location& timeout_enabled_from_here) {
   std::string message = "RunLoop::Run() timed out. Timeout set at ";
@@ -41,7 +44,18 @@
     const Location& timeout_enabled_from_here,
     RepeatingCallback<std::string()> on_timeout_log,
     const Location& run_from_here) {
-  GTEST_FAIL_AT(run_from_here.file_name(), run_from_here.line_number())
+  // Add a non-fatal failure to GTest result and cause the test to fail.
+  // A non-fatal failure is preferred over a fatal one because LUCI Analysis
+  // will select the fatal failure over the non-fatal one as the primary error
+  // message for the test. The RunLoop::Run() function is generally called by
+  // the test framework and generates similar error messages and stack traces,
+  // making it difficult to cluster the failures. Making the failure non-fatal
+  // will propagate the ASSERT fatal failures in the test body as the primary
+  // error message.
+  // Also note that, the GTest fatal failure will not actually stop the test
+  // execution if not directly used in the test body. A non-fatal/fatal failure
+  // here makes no difference to the test running flow.
+  ADD_FAILURE_AT(run_from_here.file_name(), run_from_here.line_number())
       << TimeoutMessage(on_timeout_log, timeout_enabled_from_here);
 }
 
@@ -71,13 +85,34 @@
       timeout.has_value() ? timeout.value() : nested_timeout_->timeout;
   CHECK_GT(run_timeout_.timeout, TimeDelta());
 
-  run_timeout_.on_timeout = BindRepeating(
-      g_add_gtest_failure_on_timeout ? &TimeoutCallbackWithGtestFailure
-                                     : &StandardTimeoutCallback,
-      timeout_enabled_from_here, std::move(on_timeout_log));
+  run_timeout_.on_timeout =
+      BindRepeating(GetTimeoutCallback(), timeout_enabled_from_here,
+                    std::move(on_timeout_log));
+
   RunLoop::SetTimeoutForCurrentThread(&run_timeout_);
 }
 
+ScopedRunLoopTimeout::TimeoutCallback
+ScopedRunLoopTimeout::GetTimeoutCallback() {
+  // In case both g_handle_timeout_for_testing and
+  // g_add_gtest_failure_on_timeout are set, we chain the callbacks so that they
+  // both get called eventually. This avoids confusion on what exactly is
+  // happening, especially for tests that are not controlling the call to
+  // `SetAddGTestFailureOnTimeout` directly.
+  if (g_handle_timeout_for_testing) {
+    if (g_add_gtest_failure_on_timeout) {
+      return ForwardRepeatingCallbacks(
+          {BindRepeating(&TimeoutCallbackWithGtestFailure),
+           *g_handle_timeout_for_testing});
+    }
+    return *g_handle_timeout_for_testing;
+  } else if (g_add_gtest_failure_on_timeout) {
+    return BindRepeating(&TimeoutCallbackWithGtestFailure);
+  } else {
+    return BindRepeating(&StandardTimeoutCallback);
+  }
+}
+
 // static
 bool ScopedRunLoopTimeout::ExistsForCurrentThread() {
   return RunLoop::GetTimeoutForCurrentThread() != nullptr;
@@ -94,6 +129,12 @@
   return RunLoop::GetTimeoutForCurrentThread();
 }
 
+// static
+void ScopedRunLoopTimeout::SetTimeoutCallbackForTesting(
+    std::unique_ptr<ScopedRunLoopTimeout::TimeoutCallback> cb) {
+  g_handle_timeout_for_testing = std::move(cb);
+}
+
 ScopedDisableRunLoopTimeout::ScopedDisableRunLoopTimeout()
     : nested_timeout_(RunLoop::GetTimeoutForCurrentThread()) {
   RunLoop::SetTimeoutForCurrentThread(nullptr);
diff --git a/base/test/scoped_run_loop_timeout.h b/base/test/scoped_run_loop_timeout.h
index 229b5f5..2b36e2e 100644
--- a/base/test/scoped_run_loop_timeout.h
+++ b/base/test/scoped_run_loop_timeout.h
@@ -60,6 +60,19 @@
 
 class ScopedRunLoopTimeout {
  public:
+  // This callback is the one called upon run loop timeouts.
+  // RunLoop inner mechanism will call this callback after having quit the run
+  // loop. Implementer might chose to log locations, crash the process, dump a
+  // stack trace, depending on the desired behaviour for run loop timeouts.
+  // Invoking `on_timeout_log` might return a personalized timeouts message
+  // string. This callback was sent at ScopedRunLoopTimeout creation. Invoking
+  // this callback is not mandatory, as it depends on the desired behaviour of
+  // this function.
+  using TimeoutCallback = base::RepeatingCallback<void(
+      const Location& timeout_enabled_from_here,
+      RepeatingCallback<std::string()> on_timeout_log,
+      const Location& run_from_here)>;
+
   ScopedRunLoopTimeout(const Location& timeout_enabled_from_here,
                        TimeDelta timeout);
   ~ScopedRunLoopTimeout();
@@ -77,8 +90,24 @@
   // Returns true if there is a Run() timeout configured on the current thread.
   static bool ExistsForCurrentThread();
 
+  // Important note:
+  // The two following static methods will alter the behaviour on run loop
+  // timeouts. If both methods are being called (whatever the ordering), the
+  // behaviour will be chained, which means that both callbacks will be invoked.
+  // If the custom callback handling is reset (`SetTimeoutCallbackForTesting`
+  // called with `nullptr`), then we reset the behaviour to its previous state,
+  // which is, if `SetAddGTestFailureOnTimeout`, it will invoke GTest timeout
+  // handling. Otherwise, it will invoke the default function.
+
+  // Add GTest timeout handler.
   static void SetAddGTestFailureOnTimeout();
 
+  // Add provided callback as timeout handler.
+  static void SetTimeoutCallbackForTesting(std::unique_ptr<TimeoutCallback> cb);
+
+ private:
+  TimeoutCallback GetTimeoutCallback();
+
  protected:
   FRIEND_TEST_ALL_PREFIXES(ScopedRunLoopRunTimeoutTest, TimesOut);
   FRIEND_TEST_ALL_PREFIXES(ScopedRunLoopRunTimeoutTest, RunTasksUntilTimeout);
diff --git a/base/test/scoped_run_loop_timeout_unittest.cc b/base/test/scoped_run_loop_timeout_unittest.cc
index cadd0bc..4941564 100644
--- a/base/test/scoped_run_loop_timeout_unittest.cc
+++ b/base/test/scoped_run_loop_timeout_unittest.cc
@@ -38,9 +38,9 @@
   SequencedTaskRunner::GetCurrentDefault()->PostDelayedTask(
       FROM_HERE, MakeExpectedRunClosure(FROM_HERE), kArbitraryTimeout);
 
-  // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+  // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
   static RunLoop& static_loop = run_loop;
-  EXPECT_FATAL_FAILURE(static_loop.Run(), "Run() timed out.");
+  EXPECT_NONFATAL_FAILURE(static_loop.Run(), "Run() timed out.");
 }
 
 TEST(ScopedRunLoopTimeoutTest, RunTasksUntilTimeout) {
@@ -56,9 +56,9 @@
   SequencedTaskRunner::GetCurrentDefault()->PostDelayedTask(
       FROM_HERE, MakeExpectedRunClosure(FROM_HERE), kArbitraryTimeout);
 
-  // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+  // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
   static RunLoop& static_loop = run_loop;
-  EXPECT_FATAL_FAILURE(static_loop.Run(), "Run() timed out.");
+  EXPECT_NONFATAL_FAILURE(static_loop.Run(), "Run() timed out.");
 }
 
 TEST(ScopedRunLoopTimeoutTest, TimesOutWithInheritedTimeoutValue) {
@@ -87,9 +87,9 @@
 
   EXPECT_CALL(log_callback, Run).WillOnce(testing::Return(std::string()));
 
-  // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+  // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
   static RunLoop& static_loop = run_loop;
-  EXPECT_FATAL_FAILURE(static_loop.Run(), "Run() timed out.");
+  EXPECT_NONFATAL_FAILURE(static_loop.Run(), "Run() timed out.");
 }
 
 TEST(ScopedRunLoopTimeoutTest, RunTasksUntilTimeoutWithInheritedTimeoutValue) {
@@ -111,9 +111,9 @@
 
   EXPECT_CALL(log_callback, Run).WillOnce(testing::Return(std::string()));
 
-  // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+  // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
   static RunLoop& static_loop = run_loop;
-  EXPECT_FATAL_FAILURE(static_loop.Run(), "Run() timed out.");
+  EXPECT_NONFATAL_FAILURE(static_loop.Run(), "Run() timed out.");
 }
 
 namespace {
@@ -143,10 +143,10 @@
       location, kArbitraryTimeout,
       BindRepeating([]() -> std::string { return kErrorMessage; }));
 
-  // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+  // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
   static RunLoop& static_loop = run_loop;
-  EXPECT_FATAL_FAILURE(static_loop.Run(),
-                       GetExpectedTimeoutMessage(location, kErrorMessage));
+  EXPECT_NONFATAL_FAILURE(static_loop.Run(),
+                          GetExpectedTimeoutMessage(location, kErrorMessage));
 }
 
 TEST(ScopedRunLoopTimeoutTest, OnTimeoutLogWithNestedTimeouts) {
@@ -162,10 +162,34 @@
       location, kArbitraryTimeout,
       BindRepeating([]() -> std::string { return kErrorMessage; }));
 
-  // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+  // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
   static RunLoop& static_loop = run_loop;
-  EXPECT_FATAL_FAILURE(static_loop.Run(),
-                       GetExpectedTimeoutMessage(location, kErrorMessage));
+  EXPECT_NONFATAL_FAILURE(static_loop.Run(),
+                          GetExpectedTimeoutMessage(location, kErrorMessage));
+}
+
+TEST(ScopedRunLoopTimeoutTest, OverwriteTimeoutCallbackForTesting) {
+  TaskEnvironment task_environment;
+  RunLoop run_loop;
+
+  bool custom_handler_called = false;
+  ScopedRunLoopTimeout::TimeoutCallback cb = DoNothing();
+  ScopedRunLoopTimeout::SetTimeoutCallbackForTesting(
+      std::make_unique<ScopedRunLoopTimeout::TimeoutCallback>(
+          std::move(cb).Then(BindLambdaForTesting(
+              [&custom_handler_called]() { custom_handler_called = true; }))));
+  static constexpr auto kArbitraryTimeout = Milliseconds(1);
+  const auto location = FROM_HERE;
+  ScopedRunLoopTimeout run_timeout(
+      location, kArbitraryTimeout,
+      BindRepeating([]() -> std::string { return kErrorMessage; }));
+
+  // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
+  static RunLoop& static_loop = run_loop;
+  EXPECT_NONFATAL_FAILURE(static_loop.Run(),
+                          GetExpectedTimeoutMessage(location, kErrorMessage));
+
+  EXPECT_TRUE(custom_handler_called);
 }
 
 }  // namespace base::test
diff --git a/base/test/task_environment_unittest.cc b/base/test/task_environment_unittest.cc
index a8ee403..e340e33 100644
--- a/base/test/task_environment_unittest.cc
+++ b/base/test/task_environment_unittest.cc
@@ -993,13 +993,13 @@
     }
     static auto& static_on_timeout_cb = run_timeout->on_timeout;
 #if defined(__clang__) && defined(_MSC_VER)
-    EXPECT_FATAL_FAILURE(
+    EXPECT_NONFATAL_FAILURE(
         static_on_timeout_cb.Run(FROM_HERE),
         "RunLoop::Run() timed out. Timeout set at "
         // We don't test the line number but it would be present.
         "TaskEnvironment@base\\test\\task_environment.cc:");
 #else
-    EXPECT_FATAL_FAILURE(
+    EXPECT_NONFATAL_FAILURE(
         static_on_timeout_cb.Run(FROM_HERE),
         "RunLoop::Run() timed out. Timeout set at "
         // We don't test the line number but it would be present.
diff --git a/base/test/test_future.h b/base/test/test_future.h
index 2beea9e..e12c95a 100644
--- a/base/test/test_future.h
+++ b/base/test/test_future.h
@@ -6,17 +6,20 @@
 #define BASE_TEST_TEST_FUTURE_H_
 
 #include <memory>
-#include <string>
 #include <tuple>
 
+#include "base/auto_reset.h"
 #include "base/check.h"
 #include "base/functional/bind.h"
 #include "base/functional/callback_forward.h"
+#include "base/functional/callback_helpers.h"
 #include "base/memory/weak_ptr.h"
 #include "base/run_loop.h"
 #include "base/sequence_checker.h"
+#include "base/strings/to_string.h"
 #include "base/test/test_future_internal.h"
 #include "base/thread_annotations.h"
+#include "testing/gtest/include/gtest/gtest.h"
 #include "third_party/abseil-cpp/absl/types/optional.h"
 
 namespace base::test {
@@ -168,7 +171,7 @@
  public:
   using TupleType = std::tuple<std::decay_t<Types>...>;
 
-  static_assert(std::tuple_size<TupleType>::value > 0,
+  static_assert(std::tuple_size_v<TupleType> > 0,
                 "Don't use TestFuture<> but use TestFuture<void> instead");
 
   TestFuture() = default;
@@ -193,7 +196,10 @@
       return true;
     }
 
-    run_loop_->Run();
+    // Wait for the value to arrive.
+    RunLoop loop;
+    AutoReset<RepeatingClosure> quit_loop(&ready_signal_, loop.QuitClosure());
+    loop.Run();
 
     return IsReady();
   }
@@ -206,7 +212,7 @@
 
   // Waits for the value to arrive, and returns the I-th value.
   //
-  // Will DCHECK if a timeout happens.
+  // Will CHECK if a timeout happens.
   //
   // Example usage:
   //
@@ -223,7 +229,7 @@
 
   // Waits for the value to arrive, and returns the value with the given type.
   //
-  // Will DCHECK if a timeout happens.
+  // Will CHECK if a timeout happens.
   //
   // Example usage:
   //
@@ -302,17 +308,19 @@
 
   // Sets the value of the future.
   // This will unblock any pending Wait() or Get() call.
-  // This can only be called once.
   void SetValue(Types... values) {
     DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
 
-    DCHECK(!values_.has_value())
-        << "Overwriting previously stored value of the TestFuture."
-           "If you expect this new value, be sure to first "
-           "consume the stored value by calling `Take()` or `Clear()`";
+    auto new_values = std::make_tuple(std::forward<Types>(values)...);
 
-    values_ = std::make_tuple(std::forward<Types>(values)...);
-    run_loop_->Quit();
+    EXPECT_FALSE(values_.has_value())
+        << "Received new value " << ToString(new_values)  //
+        << " before old value " << ToString(GetTuple())
+        << " was consumed through Take() or Clear().";
+
+    values_ = std::move(new_values);
+
+    ready_signal_.Run();
   }
 
   // Clears the future, allowing it to be reused and accept a new value.
@@ -330,7 +338,7 @@
 
   // Waits for the value to arrive, and returns a reference to it.
   //
-  // Will DCHECK if a timeout happens.
+  // Will CHECK if a timeout happens.
   template <typename T = TupleType, internal::EnableIfSingleValue<T> = true>
   [[nodiscard]] const auto& Get() {
     return std::get<0>(GetTuple());
@@ -338,7 +346,7 @@
 
   // Waits for the value to arrive, and returns it.
   //
-  // Will DCHECK if a timeout happens.
+  // Will CHECK if a timeout happens.
   template <typename T = TupleType, internal::EnableIfSingleValue<T> = true>
   [[nodiscard]] auto Take() {
     return std::get<0>(TakeTuple());
@@ -350,7 +358,7 @@
 
   // Waits for the values to arrive, and returns a tuple with the values.
   //
-  // Will DCHECK if a timeout happens.
+  // Will CHECK if a timeout happens.
   template <typename T = TupleType, internal::EnableIfMultiValue<T> = true>
   [[nodiscard]] const TupleType& Get() {
     return GetTuple();
@@ -358,7 +366,7 @@
 
   // Waits for the values to arrive, and moves a tuple with the values out.
   //
-  // Will DCHECK if a timeout happens.
+  // Will CHECK if a timeout happens.
   template <typename T = TupleType, internal::EnableIfMultiValue<T> = true>
   [[nodiscard]] TupleType Take() {
     return TakeTuple();
@@ -368,23 +376,22 @@
   [[nodiscard]] const TupleType& GetTuple() {
     DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
     bool success = Wait();
-    DCHECK(success) << "Waiting for value timed out.";
+    CHECK(success) << "Waiting for value timed out.";
     return values_.value();
   }
 
   [[nodiscard]] TupleType TakeTuple() {
     DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
     bool success = Wait();
-    DCHECK(success) << "Waiting for value timed out.";
+    CHECK(success) << "Waiting for value timed out.";
 
-    run_loop_ = std::make_unique<RunLoop>();
     return std::exchange(values_, {}).value();
   }
 
   SEQUENCE_CHECKER(sequence_checker_);
 
-  std::unique_ptr<RunLoop> run_loop_ GUARDED_BY_CONTEXT(sequence_checker_) =
-      std::make_unique<RunLoop>();
+  base::RepeatingClosure ready_signal_ GUARDED_BY_CONTEXT(sequence_checker_) =
+      base::DoNothing();
 
   absl::optional<TupleType> values_ GUARDED_BY_CONTEXT(sequence_checker_);
 
diff --git a/base/test/test_future_unittest.cc b/base/test/test_future_unittest.cc
index 4da3a8a..f8991a9 100644
--- a/base/test/test_future_unittest.cc
+++ b/base/test/test_future_unittest.cc
@@ -99,7 +99,7 @@
   static bool success;
   static TestFuture<AnyType> future;
 
-  EXPECT_FATAL_FAILURE({ success = future.Wait(); }, "timed out");
+  EXPECT_NONFATAL_FAILURE({ success = future.Wait(); }, "timed out");
 
   EXPECT_FALSE(success);
 }
@@ -116,12 +116,12 @@
   EXPECT_EQ(expected_value, actual_value);
 }
 
-TEST_F(TestFutureTest, GetShouldDcheckIfTimeoutHappens) {
+TEST_F(TestFutureTest, GetShouldCheckIfTimeoutHappens) {
   ScopedRunLoopTimeout timeout(FROM_HERE, Milliseconds(1));
 
   TestFuture<AnyType> future;
 
-  EXPECT_DCHECK_DEATH_WITH((void)future.Get(), "timed out");
+  EXPECT_CHECK_DEATH_WITH(std::ignore = future.Get(), "timed out");
 }
 
 TEST_F(TestFutureTest, TakeShouldWorkWithMoveOnlyValue) {
@@ -135,12 +135,12 @@
   EXPECT_EQ(expected_data, actual_value.data);
 }
 
-TEST_F(TestFutureTest, TakeShouldDcheckIfTimeoutHappens) {
+TEST_F(TestFutureTest, TakeShouldCheckIfTimeoutHappens) {
   ScopedRunLoopTimeout timeout(FROM_HERE, Milliseconds(1));
 
   TestFuture<AnyType> future;
 
-  EXPECT_DCHECK_DEATH_WITH((void)future.Take(), "timed out");
+  EXPECT_CHECK_DEATH_WITH(std::ignore = future.Take(), "timed out");
 }
 
 TEST_F(TestFutureTest, IsReadyShouldBeTrueWhenValueIsSet) {
@@ -168,8 +168,7 @@
 
   future.SetValue(kAnyValue);
 
-  EXPECT_DCHECK_DEATH_WITH(future.SetValue(kOtherValue),
-                           "Overwriting previously stored value");
+  EXPECT_NONFATAL_FAILURE(future.SetValue(kOtherValue), "Received new value");
 }
 
 TEST_F(TestFutureTest, ShouldAllowReuseIfPreviousValueIsFirstConsumed) {
@@ -375,6 +374,30 @@
   EXPECT_EQ(future.Take(), std::make_tuple("second value", 2));
 }
 
+TEST_F(TestFutureTest, ShouldPrintCurrentValueIfItIsOverwritten) {
+  using UnprintableValue = MoveOnlyValue;
+
+  TestFuture<const char*, int, UnprintableValue> future;
+
+  future.SetValue("first-value", 1111, UnprintableValue());
+
+  EXPECT_NONFATAL_FAILURE(
+      future.SetValue("second-value", 2222, UnprintableValue()),
+      "old value <first-value, 1111, [4-byte object at 0x");
+}
+
+TEST_F(TestFutureTest, ShouldPrintNewValueIfItOverwritesOldValue) {
+  using UnprintableValue = MoveOnlyValue;
+
+  TestFuture<const char*, int, UnprintableValue> future;
+
+  future.SetValue("first-value", 1111, UnprintableValue());
+
+  EXPECT_NONFATAL_FAILURE(
+      future.SetValue("second-value", 2222, UnprintableValue()),
+      "new value <second-value, 2222, [4-byte object at 0x");
+}
+
 using TestFutureWithoutValuesTest = TestFutureTest;
 
 TEST_F(TestFutureWithoutValuesTest, IsReadyShouldBeTrueWhenSetValueIsInvoked) {
@@ -417,4 +440,20 @@
   EXPECT_TRUE(future.IsReady());
 }
 
+TEST(TestFutureWithoutSingleThreadTaskEnvironment,
+     CanCreateTestFutureBeforeTaskEnvironment) {
+  TestFuture<AnyType> future;
+
+  // If we come here the test passes, since it means we can create a
+  // `TestFuture` without having a `TaskEnvironment`.
+}
+
+TEST(TestFutureWithoutSingleThreadTaskEnvironment,
+     WaitShouldDcheckWithoutTaskEnvironment) {
+  TestFuture<AnyType> future;
+
+  EXPECT_CHECK_DEATH_WITH((void)future.Wait(),
+                          "requires a single-threaded context");
+}
+
 }  // namespace base::test
diff --git a/base/test/test_reg_util_win.cc b/base/test/test_reg_util_win.cc
index 101a598..58b5450 100644
--- a/base/test/test_reg_util_win.cc
+++ b/base/test/test_reg_util_win.cc
@@ -12,6 +12,7 @@
 #include "base/strings/string_split.h"
 #include "base/strings/string_util.h"
 #include "base/strings/utf_string_conversions.h"
+#include "base/time/time_override.h"
 #include "base/uuid.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -21,6 +22,11 @@
 
 namespace {
 
+// Overriding HKLM is not permitted in some environments. This is controlled by
+// this bool and disallowed by calling
+// DisallowHKLMRegistryOverrideForIntegrationTests.
+bool g_hklm_override_allowed = true;
+
 constexpr char16_t kTimestampDelimiter[] = u"$";
 constexpr wchar_t kTempTestKeyPath[] = L"Software\\Chromium\\TempTestKeys";
 
@@ -82,7 +88,15 @@
 }
 
 RegistryOverrideManager::RegistryOverrideManager()
-    : timestamp_(base::Time::Now()), test_key_root_(kTempTestKeyPath) {
+    : timestamp_(base::subtle::TimeNowIgnoringOverride()),
+      test_key_root_(kTempTestKeyPath) {
+  // Use |base::subtle::TimeNowIgnoringOverride()| instead of
+  // |base::Time::Now()| can give us the real current time instead of the mock
+  // time in 1970 when MOCK_TIME is enabled. This can prevent test bugs where
+  // new instances of RegistryOverrideManager will clean up any redirected
+  // registry paths that have the timestamp from 1970, which then cause the
+  // currently running tests to fail since their expected reg keys were deleted
+  // by the other test.
   DeleteStaleTestKeys(timestamp_, test_key_root_);
 }
 
@@ -101,6 +115,10 @@
 
 void RegistryOverrideManager::OverrideRegistry(HKEY override,
                                                std::wstring* override_path) {
+  CHECK(override != HKEY_LOCAL_MACHINE || g_hklm_override_allowed)
+      << "Use of RegistryOverrideManager to override HKLM is not permitted in "
+         "this environment.";
+
   std::wstring key_path = GenerateTempKeyPath(test_key_root_, timestamp_);
 
   base::win::RegKey temp_key;
@@ -114,8 +132,14 @@
     override_path->assign(key_path);
 }
 
+void RegistryOverrideManager::SetAllowHKLMRegistryOverrideForIntegrationTests(
+    bool allow) {
+  g_hklm_override_allowed = allow;
+}
+
 std::wstring GenerateTempKeyPath() {
-  return GenerateTempKeyPath(kTempTestKeyPath, base::Time::Now());
+  return GenerateTempKeyPath(kTempTestKeyPath,
+                             base::subtle::TimeNowIgnoringOverride());
 }
 
 }  // namespace registry_util
diff --git a/base/test/test_reg_util_win.h b/base/test/test_reg_util_win.h
index 91cdeab..8f47eaf 100644
--- a/base/test/test_reg_util_win.h
+++ b/base/test/test_reg_util_win.h
@@ -13,6 +13,10 @@
 #include "base/time/time.h"
 #include "base/win/registry.h"
 
+namespace content {
+class BrowserTestBase;
+}
+
 namespace registry_util {
 
 // Allows a test to easily override registry hives so that it can start from a
@@ -43,12 +47,16 @@
   // behavior.
   // Optional return of the registry override path.
   // Calls to these functions must be wrapped in ASSERT_NO_FATAL_FAILURE to
-  // ensure that tests do not proceeed in case of failure to override.
+  // ensure that tests do not proceed in case of failure to override.
+  // HKEY_LOCAL_MACHINE should not be overridden in initialization for tests
+  // that launch sandboxed processes e.g. browser tests. It is safe to use from
+  // within a text fixture, and in unit tests.
   void OverrideRegistry(HKEY override);
   void OverrideRegistry(HKEY override, std::wstring* override_path);
 
  private:
   friend class RegistryOverrideManagerTest;
+  friend class content::BrowserTestBase;
 
   // Keeps track of one override.
   class ScopedRegistryKeyOverride {
@@ -70,6 +78,10 @@
   RegistryOverrideManager(const base::Time& timestamp,
                           const std::wstring& test_key_root);
 
+  // Whether or not to allow using the RegistryOverrideManager for HKLM (e.g. in
+  // browser_tests).
+  static void SetAllowHKLMRegistryOverrideForIntegrationTests(bool allow);
+
   base::Time timestamp_;
   std::wstring guid_;
 
diff --git a/base/test/test_reg_util_win_unittest.cc b/base/test/test_reg_util_win_unittest.cc
index 54ffa72..be3f554 100644
--- a/base/test/test_reg_util_win_unittest.cc
+++ b/base/test/test_reg_util_win_unittest.cc
@@ -10,6 +10,7 @@
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_util.h"
 #include "base/strings/utf_string_conversions.h"
+#include "base/test/task_environment.h"
 #include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -101,7 +102,8 @@
 }
 
 TEST_F(RegistryOverrideManagerTest, DeleteStaleKeys) {
-  base::Time::Exploded kTestTimeExploded = {2013, 11, 1, 4, 0, 0, 0, 0};
+  static constexpr base::Time::Exploded kTestTimeExploded = {
+      .year = 2013, .month = 11, .day_of_week = 1, .day_of_month = 4};
   base::Time kTestTime;
   EXPECT_TRUE(base::Time::FromUTCExploded(kTestTimeExploded, &kTestTime));
 
@@ -130,4 +132,82 @@
   ASSERT_NO_FATAL_FAILURE(AssertKeyExists(path_future));
 }
 
+TEST_F(RegistryOverrideManagerTest, DoesNotUseMockTime) {
+  // This test is targeted at scenarios when multiple tests run at the same
+  // time using `RegistryOverrideManager`, new instances of
+  // `RegistryOverrideManager` will clean up any redirected registry paths that
+  // have the timestamp generated (1970) when using `base::Time::Now()` with
+  // MOCK_TIME enabled, which then cause the currently running tests to fail
+  // since their expected reg keys were deleted by the other test.
+
+  // To fix this issue, we have updated `RegistryOverrideManager` by using
+  // `base::subtle::TimeNowIgnoringOverride()` instead of `base::Time::Now()`.
+  // So the real current time is used instead of the mock time in 1970. This can
+  // resolve related `RegistryOverrideManager` test failure when using
+  // MOCK_TIME. This test ensures we are fetching the real current time even
+  // when using MOCK_TIME.
+
+  // Use mock time to init RegKey, which is based on 1970-01-03.
+  // The RegKey contains information about the registry for the
+  // `RegistryOverrideManager`, which also contains a time stamp, which is used
+  // to delete stale keys left over from crashed tests.
+  base::test::TaskEnvironment test_task_env(
+      base::test::TaskEnvironment::TimeSource::MOCK_TIME);
+
+  const base::Time kTestTime = base::Time::Now();
+
+  std::wstring mock_time_path_stale =
+      FakeOverrideManagerPath(kTestTime - base::Days(5));
+  std::wstring mock_time_path_current =
+      FakeOverrideManagerPath(kTestTime - base::Minutes(1));
+
+  ASSERT_NO_FATAL_FAILURE(CreateKey(mock_time_path_stale));
+  ASSERT_NO_FATAL_FAILURE(CreateKey(mock_time_path_current));
+
+  // Use real time to init `path_real`.
+  std::wstring path_real = GenerateTempKeyPath();
+  ASSERT_NO_FATAL_FAILURE(CreateKey(path_real));
+
+  ASSERT_NO_FATAL_FAILURE(CreateManager(kTestTime));
+  manager_.reset();
+
+  ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(mock_time_path_stale));
+  ASSERT_NO_FATAL_FAILURE(AssertKeyExists(mock_time_path_current));
+  // `path_real` should exist as it is initiated using real time, not mock time
+  // in 1970.
+  ASSERT_NO_FATAL_FAILURE(AssertKeyExists(path_real));
+
+  // Use real time to init following the new set of keys with
+  // `base::subtle::TimeNowIgnoringOverride()`.
+  const base::Time kTestTime_new = base::subtle::TimeNowIgnoringOverride();
+  std::wstring system_time_path_stale =
+      FakeOverrideManagerPath(kTestTime_new - base::Days(5));
+  std::wstring system_time_path_current =
+      FakeOverrideManagerPath(kTestTime_new - base::Minutes(1));
+
+  ASSERT_NO_FATAL_FAILURE(CreateKey(system_time_path_stale));
+  ASSERT_NO_FATAL_FAILURE(CreateKey(system_time_path_current));
+
+  ASSERT_NO_FATAL_FAILURE(CreateManager(kTestTime_new));
+  manager_.reset();
+
+  // Check old keys created with mock time
+  ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(mock_time_path_stale));
+  // While old keys are created using mock time in 1970, these keys will be
+  // deleted.
+  ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(mock_time_path_current));
+  // `path_real` should exist as it is initiated using real time, not mock time
+  // in 1970.
+  ASSERT_NO_FATAL_FAILURE(AssertKeyExists(path_real));
+
+  // Create a new manager with real system time.
+  const base::Time kTestTime_latest = base::subtle::TimeNowIgnoringOverride();
+  ASSERT_NO_FATAL_FAILURE(CreateManager(kTestTime_latest));
+  manager_.reset();
+
+  // Check new keys created with current time
+  ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(system_time_path_stale));
+  ASSERT_NO_FATAL_FAILURE(AssertKeyExists(system_time_path_current));
+}
+
 }  // namespace registry_util
diff --git a/base/test/test_suite.cc b/base/test/test_suite.cc
index 057514f..ef7b2e5 100644
--- a/base/test/test_suite.cc
+++ b/base/test/test_suite.cc
@@ -11,8 +11,8 @@
 #include <string>
 #include <vector>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/tagging.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/tagging.h"
 #include "base/at_exit.h"
 #include "base/base_paths.h"
 #include "base/base_switches.h"
@@ -30,6 +30,7 @@
 #include "base/logging.h"
 #include "base/memory/ptr_util.h"
 #include "base/memory/raw_ptr.h"
+#include "base/metrics/statistics_recorder.h"
 #include "base/no_destructor.h"
 #include "base/path_service.h"
 #include "base/process/launch.h"
@@ -58,7 +59,7 @@
 #include "testing/multiprocess_func_list.h"
 
 #if BUILDFLAG(IS_APPLE)
-#include "base/mac/scoped_nsautorelease_pool.h"
+#include "base/apple/scoped_nsautorelease_pool.h"
 #endif  // BUILDFLAG(IS_APPLE)
 
 #if BUILDFLAG(IS_IOS)
@@ -117,7 +118,14 @@
 
 class ResetCommandLineBetweenTests : public testing::EmptyTestEventListener {
  public:
-  ResetCommandLineBetweenTests() : old_command_line_(CommandLine::NO_PROGRAM) {}
+  ResetCommandLineBetweenTests() : old_command_line_(CommandLine::NO_PROGRAM) {
+    // TODO(crbug.com/1123627): Remove this after A/B test is done.
+    // Workaround a test-specific race conditon with StatisticsRecorder lock
+    // initialization checking CommandLine by ensuring it's created here (when
+    // we start the test process), rather than in some arbitrary test. This
+    // prevents a race with OnTestEnd().
+    StatisticsRecorder::FindHistogram("Dummy");
+  }
 
   ResetCommandLineBetweenTests(const ResetCommandLineBetweenTests&) = delete;
   ResetCommandLineBetweenTests& operator=(const ResetCommandLineBetweenTests&) =
@@ -333,24 +341,24 @@
                          BindOnce(&TestSuite::Run, Unretained(&test_suite)));
 }
 
-TestSuite::TestSuite(int argc, char** argv) {
-  Construct(argc, argv);
+TestSuite::TestSuite(int argc, char** argv) : argc_(argc), argv_(argv) {
+  PreInitialize();
 }
 
 #if BUILDFLAG(IS_WIN)
-TestSuite::TestSuite(int argc, wchar_t** argv) {
-  std::vector<std::string> arg_strs;
-  arg_strs.reserve(argc);
-  std::vector<char*> arg_ptrs;
-  arg_ptrs.reserve(argc);
-  std::for_each(argv, argv + argc, [&](wchar_t* arg) {
-    arg_strs.push_back(WideToUTF8(arg));
-    arg_ptrs.push_back(arg_strs.back().data());
+TestSuite::TestSuite(int argc, wchar_t** argv) : argc_(argc) {
+  argv_as_strings_.reserve(argc);
+  argv_as_pointers_.reserve(argc + 1);
+  std::for_each(argv, argv + argc, [this](wchar_t* arg) {
+    argv_as_strings_.push_back(WideToUTF8(arg));
+    // Have to use .data() here to get a mutable pointer.
+    argv_as_pointers_.push_back(argv_as_strings_.back().data());
   });
   // `argv` is specified as containing `argc + 1` pointers, of which the last is
   // null.
-  arg_ptrs.push_back(nullptr);
-  Construct(argc, arg_ptrs.data());
+  argv_as_pointers_.push_back(nullptr);
+  argv_ = argv_as_pointers_.data();
+  PreInitialize();
 }
 #endif  // BUILDFLAG(IS_WIN)
 
@@ -363,7 +371,7 @@
 // Initialize().  See bug 6436.
 int TestSuite::Run() {
 #if BUILDFLAG(IS_APPLE)
-  mac::ScopedNSAutoreleasePool scoped_pool;
+  apple::ScopedNSAutoreleasePool scoped_pool;
 #endif
 
   std::string client_func =
@@ -490,6 +498,12 @@
 void TestSuite::Initialize() {
   DCHECK(!is_initialized_);
 
+  InitializeFromCommandLine(&argc_, argv_);
+
+  // Logging must be initialized before any thread has a chance to call logging
+  // functions.
+  InitializeLogging();
+
   // The AsanService causes ASAN errors to emit additional information. It is
   // helpful on its own. It is also required by ASAN BackupRefPtr when
   // reconfiguring PartitionAlloc below.
@@ -593,13 +607,13 @@
   is_initialized_ = true;
 }
 
-void TestSuite::InitializeFromCommandLine(int argc, char** argv) {
-  initialized_command_line_ = CommandLine::Init(argc, argv);
-  testing::InitGoogleTest(&argc, argv);
-  testing::InitGoogleMock(&argc, argv);
+void TestSuite::InitializeFromCommandLine(int* argc, char** argv) {
+  // CommandLine::Init() is called earlier from PreInitialize().
+  testing::InitGoogleTest(argc, argv);
+  testing::InitGoogleMock(argc, argv);
 
 #if BUILDFLAG(IS_IOS)
-  InitIOSArgs(argc, argv);
+  InitIOSArgs(*argc, argv);
 #endif
 }
 
@@ -612,14 +626,6 @@
   debug::StopProfiling();
 }
 
-void TestSuite::Construct(int argc, char** argv) {
-  PreInitialize();
-  InitializeFromCommandLine(argc, argv);
-  // Logging must be initialized before any thread has a chance to call logging
-  // functions.
-  InitializeLogging();
-}
-
 void TestSuite::PreInitialize() {
   DCHECK(!is_initialized_);
 
@@ -661,6 +667,10 @@
   at_exit_manager_ = std::make_unique<AtExitManager>();
 #endif
 
+  // This needs to be done during construction as some users of this class rely
+  // on the constructor to initialise the CommandLine.
+  initialized_command_line_ = CommandLine::Init(argc_, argv_);
+
   // Don't add additional code to this function.  Instead add it to
   // Initialize().  See bug 6436.
 }
diff --git a/base/test/test_suite.h b/base/test/test_suite.h
index 7c31603..f1d64aa 100644
--- a/base/test/test_suite.h
+++ b/base/test/test_suite.h
@@ -22,6 +22,11 @@
 #include "base/test/trace_to_file.h"
 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
 
+#if BUILDFLAG(IS_WIN)
+#include <vector>
+#include "base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_exclusion.h"
+#endif
+
 namespace logging {
 class ScopedLogAssertHandler;
 }
@@ -76,7 +81,7 @@
   // Override these for custom test handling. Use these instead of putting
   // complex code in your constructor/destructor.
   virtual void Initialize();
-  virtual void InitializeFromCommandLine(int argc, char** argv);
+  virtual void InitializeFromCommandLine(int* argc, char** argv);
   virtual int RunAllTests();
   virtual void Shutdown();
 
@@ -85,11 +90,6 @@
   std::unique_ptr<base::AtExitManager> at_exit_manager_;
 
  private:
-  // Implementation of the constructor. Factored to a helper so that the
-  // Windows-specific constructor can delegate to it after doing some string
-  // conversion.
-  void Construct(int argc, char** argv);
-
   // Basic initialization for the test suite happens here.
   void PreInitialize();
 
@@ -99,15 +99,22 @@
   test::TraceToFile trace_to_file_;
 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
 
-  bool initialized_command_line_ = false;
-
   raw_ptr<XmlUnitTestResultPrinter, DanglingUntriaged> printer_ = nullptr;
 
   std::unique_ptr<logging::ScopedLogAssertHandler> assert_handler_;
 
+  bool initialized_command_line_ = false;
   bool check_for_leaked_globals_ = true;
   bool check_for_thread_and_process_priority_ = true;
   bool is_initialized_ = false;
+  int argc_;
+#if BUILDFLAG(IS_WIN)
+  // We need argv_as_pointers_.data() to have type char**, so we can't use
+  // raw_ptr here.
+  RAW_PTR_EXCLUSION std::vector<char*> argv_as_pointers_;
+  std::vector<std::string> argv_as_strings_;
+#endif
+  raw_ptr<char*> argv_;
 };
 
 }  // namespace base
diff --git a/base/test/test_support_ios.mm b/base/test/test_support_ios.mm
index 6f115c2..97be846 100644
--- a/base/test/test_support_ios.mm
+++ b/base/test/test_support_ios.mm
@@ -10,7 +10,7 @@
 #include "base/command_line.h"
 #include "base/debug/debugger.h"
 #include "base/message_loop/message_pump.h"
-#include "base/message_loop/message_pump_mac.h"
+#include "base/message_loop/message_pump_apple.h"
 #import "base/test/ios/google_test_runner_delegate.h"
 #include "base/test/test_suite.h"
 #include "base/test/test_switches.h"
diff --git a/base/test/test_switches.cc b/base/test/test_switches.cc
index 7bca524..1b9a3c8 100644
--- a/base/test/test_switches.cc
+++ b/base/test/test_switches.cc
@@ -38,6 +38,15 @@
 // pattern per line).
 const char switches::kTestLauncherFilterFile[] = "test-launcher-filter-file";
 
+// Force running the test cases listed in the positive filter file with full
+// name (not wildcard). If set, only exact positive filter from the filter
+// file is allowed. Passing --gtest_filter, positive wildcard filters, or
+// negative filters will fail the test launcher.
+// If any test case is disabled or deleted in source files, the test suite
+// fails.
+const char switches::kEnforceExactPositiveFilter[] =
+    "enforce-exact-positive-filter";
+
 // Force running all requested tests and retries even if too many test errors
 // occur.
 const char switches::kTestLauncherForceRunBrokenTests[] =
diff --git a/base/test/test_switches.h b/base/test/test_switches.h
index 14a4cad..e3a57a9 100644
--- a/base/test/test_switches.h
+++ b/base/test/test_switches.h
@@ -11,6 +11,7 @@
 
 // All switches in alphabetical order. The switches should be documented
 // alongside the definition of their values in the .cc file.
+extern const char kEnforceExactPositiveFilter[];
 extern const char kHelpFlag[];
 extern const char kIsolatedScriptTestLauncherRetryLimit[];
 extern const char kRebaselinePixelTests[];
diff --git a/base/test/test_trace_processor.cc b/base/test/test_trace_processor.cc
index ca5be3f..fe06748 100644
--- a/base/test/test_trace_processor.cc
+++ b/base/test/test_trace_processor.cc
@@ -3,12 +3,54 @@
 // found in the LICENSE file.
 
 #include "base/test/test_trace_processor.h"
+#include "base/test/chrome_track_event.descriptor.h"
+#include "base/test/perfetto_sql_stdlib.h"
 #include "base/trace_event/trace_log.h"
+#include "third_party/perfetto/protos/perfetto/trace/extension_descriptor.pbzero.h"
 
 namespace base::test {
 
 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
 
+namespace {
+// Emitting the chrome_track_event.descriptor into the trace allows the trace
+// processor to parse the arguments during ingestion of the trace events.
+// This function emits the descriptor generated from
+// base/tracing/protos/chrome_track_event.proto so we can use TestTraceProcessor
+// to write tests based on new arguments/types added in the same patch.
+void EmitChromeTrackEventDescriptor() {
+  base::TrackEvent::Trace([&](base::TrackEvent::TraceContext ctx) {
+    protozero::MessageHandle<perfetto::protos::pbzero::TracePacket> handle =
+        ctx.NewTracePacket();
+    auto* extension_descriptor = handle->BeginNestedMessage<protozero::Message>(
+        perfetto::protos::pbzero::TracePacket::kExtensionDescriptorFieldNumber);
+    extension_descriptor->AppendBytes(
+        perfetto::protos::pbzero::ExtensionDescriptor::kExtensionSetFieldNumber,
+        perfetto::kChromeTrackEventDescriptor.data(),
+        perfetto::kChromeTrackEventDescriptor.size());
+    handle->Finalize();
+  });
+}
+
+std::string kChromeSqlModuleName = "chrome";
+
+// Returns a vector of pairs of strings consisting of
+// {include_key, sql_file_contents}. For example, the include key for
+// `chrome/scroll_jank/utils.sql` is `chrome.scroll_jank.utils`.
+// The output is used to override the Chrome SQL module in the trace processor.
+TestTraceProcessorImpl::PerfettoSQLModule GetChromeStdlib() {
+  std::vector<std::pair<std::string, std::string>> stdlib;
+  for (const auto& file_to_sql :
+       perfetto::trace_processor::chrome_stdlib::kFileToSql) {
+    std::string include_key;
+    base::ReplaceChars(file_to_sql.path, "/", ".", &include_key);
+    stdlib.emplace_back(kChromeSqlModuleName + "." + include_key,
+                        file_to_sql.sql);
+  }
+  return stdlib;
+}
+}  // namespace
+
 TraceConfig DefaultTraceConfig(const StringPiece& category_filter_string,
                                bool privacy_filtering) {
   TraceConfig trace_config;
@@ -50,7 +92,12 @@
   return trace_config;
 }
 
-TestTraceProcessor::TestTraceProcessor() = default;
+TestTraceProcessor::TestTraceProcessor() {
+  auto status = test_trace_processor_.OverrideSqlModule(kChromeSqlModuleName,
+                                                        GetChromeStdlib());
+  CHECK(status.ok());
+}
+
 TestTraceProcessor::~TestTraceProcessor() = default;
 
 void TestTraceProcessor::StartTrace(const StringPiece& category_filter_string,
@@ -84,6 +131,7 @@
 }
 
 absl::Status TestTraceProcessor::StopAndParseTrace() {
+  EmitChromeTrackEventDescriptor();
   base::TrackEvent::Flush();
   session_->StopBlocking();
   std::vector<char> trace = session_->ReadTraceBlocking();
diff --git a/base/test/test_trace_processor_impl.cc b/base/test/test_trace_processor_impl.cc
index 8245b33..a20e3d1 100644
--- a/base/test/test_trace_processor_impl.cc
+++ b/base/test/test_trace_processor_impl.cc
@@ -94,4 +94,12 @@
   return ParseTrace(std::move(data_copy), size);
 }
 
+absl::Status TestTraceProcessorImpl::OverrideSqlModule(
+    const std::string& module_name,
+    const TestTraceProcessorImpl::PerfettoSQLModule& module) {
+  auto status =
+      trace_processor_->RegisterSqlModule({module_name, module, true});
+  return status.ok() ? absl::OkStatus() : absl::UnknownError(status.message());
+}
+
 }  // namespace base::test
diff --git a/base/test/test_trace_processor_impl.h b/base/test/test_trace_processor_impl.h
index 40e79dc..770284d 100644
--- a/base/test/test_trace_processor_impl.h
+++ b/base/test/test_trace_processor_impl.h
@@ -51,20 +51,32 @@
 
 class TEST_TRACE_PROCESSOR_EXPORT TestTraceProcessorImpl {
  public:
+  // Note: All arguments must be received as refs/ptrs as receiving them
+  // as moved copies, on Windows, causes them to be destroyed in
+  // TEST_TRACE_PROCESSOR_IMPL's DLL after having been allocated in the
+  // caller's DLL which is not allowed.
+
   TestTraceProcessorImpl();
   ~TestTraceProcessorImpl();
 
   TestTraceProcessorImpl(TestTraceProcessorImpl&& other);
   TestTraceProcessorImpl& operator=(TestTraceProcessorImpl&& other);
 
-  absl::Status ParseTrace(std::unique_ptr<uint8_t[]> buf, size_t size);
   absl::Status ParseTrace(const std::vector<char>& raw_trace);
 
   // Runs the sql query on the parsed trace and returns the result as a
   // vector of strings.
   QueryResultOrError ExecuteQuery(const std::string& sql) const;
 
+  using PerfettoSQLModule = std::vector<std::pair<std::string, std::string>>;
+  // Overrides PerfettoSQL module with |name| and |files| containing pairs of
+  // strings {include_key, sql_file_contents}.
+  absl::Status OverrideSqlModule(const std::string& name,
+                                 const PerfettoSQLModule& module);
+
  private:
+  absl::Status ParseTrace(std::unique_ptr<uint8_t[]> buf, size_t size);
+
   std::unique_ptr<perfetto::trace_processor::Config> config_;
   std::unique_ptr<perfetto::trace_processor::TraceProcessor> trace_processor_;
 };
diff --git a/base/test/values_test_util.cc b/base/test/values_test_util.cc
index 0e29b77..170d0a4 100644
--- a/base/test/values_test_util.cc
+++ b/base/test/values_test_util.cc
@@ -59,8 +59,9 @@
 }
 
 void ExpectStringValue(const std::string& expected_str, const Value& actual) {
-  EXPECT_EQ(Value::Type::STRING, actual.type());
-  EXPECT_EQ(expected_str, actual.GetString());
+  const std::string* maybe_string = actual.GetIfString();
+  ASSERT_TRUE(maybe_string);
+  EXPECT_EQ(expected_str, *maybe_string);
 }
 
 namespace test {
diff --git a/base/third_party/icu/README.chromium b/base/third_party/icu/README.chromium
index b738af6..c514fc5 100644
--- a/base/third_party/icu/README.chromium
+++ b/base/third_party/icu/README.chromium
@@ -14,5 +14,5 @@
 ICU macros should ICU be in use on the system. For the same reason, the
 functions and types have been put in the "base_icu" namespace.
 
-Note that this license file is marked as NOT_SHIPPED, since a more complete
+Note that this license file is marked as not shipped, since a more complete
 ICU license is included from //third_party/icu/README.chromium
diff --git a/base/threading/hang_watcher.cc b/base/threading/hang_watcher.cc
index f55bcc8..af5b53a 100644
--- a/base/threading/hang_watcher.cc
+++ b/base/threading/hang_watcher.cc
@@ -164,6 +164,10 @@
              "EnableHangWatcher",
              FEATURE_ENABLED_BY_DEFAULT);
 
+BASE_FEATURE(kEnableHangWatcherInZygoteChildren,
+             "EnableHangWatcherInZygoteChildren",
+             FEATURE_ENABLED_BY_DEFAULT);
+
 // Browser process.
 constexpr base::FeatureParam<int> kIOThreadLogLevel{
     &kEnableHangWatcher, "io_thread_log_level",
@@ -318,7 +322,8 @@
 }
 
 // static
-void HangWatcher::InitializeOnMainThread(ProcessType process_type) {
+void HangWatcher::InitializeOnMainThread(ProcessType process_type,
+                                         bool is_zygote_child) {
   DCHECK(!g_use_hang_watcher);
   DCHECK(g_io_thread_log_level == LoggingLevel::kNone);
   DCHECK(g_main_thread_log_level == LoggingLevel::kNone);
@@ -326,6 +331,14 @@
 
   bool enable_hang_watcher = base::FeatureList::IsEnabled(kEnableHangWatcher);
 
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+  if (is_zygote_child) {
+    enable_hang_watcher =
+        enable_hang_watcher &&
+        base::FeatureList::IsEnabled(kEnableHangWatcherInZygoteChildren);
+  }
+#endif
+
   // Do not start HangWatcher in the GPU process until the issue related to
   // invalid magic signature in the GPU WatchDog is fixed
   // (https://crbug.com/1297760).
@@ -544,12 +557,14 @@
 
 void HangWatcher::Start() {
   thread_.Start();
+  thread_started_ = true;
 }
 
 void HangWatcher::Stop() {
   g_keep_monitoring.store(false, std::memory_order_relaxed);
   should_monitor_.Signal();
   thread_.Join();
+  thread_started_ = false;
 
   // In production HangWatcher is always leaked but during testing it's possibly
   // stopped and restarted using a new instance. This makes sure the next call
diff --git a/base/threading/hang_watcher.h b/base/threading/hang_watcher.h
index 470f627..85b8e71 100644
--- a/base/threading/hang_watcher.h
+++ b/base/threading/hang_watcher.h
@@ -150,7 +150,8 @@
 
   // Initializes HangWatcher. Must be called once on the main thread during
   // startup while single-threaded.
-  static void InitializeOnMainThread(ProcessType process_type);
+  static void InitializeOnMainThread(ProcessType process_type,
+                                     bool is_zygote_child);
 
   // Returns the values that were set through InitializeOnMainThread() to their
   // default value. Used for testing since in prod initialization should happen
@@ -244,6 +245,10 @@
   // Begin executing the monitoring loop on the HangWatcher thread.
   void Start();
 
+  // Returns true if Start() has been called and Stop() has not been called
+  // since.
+  bool IsStarted() const { return thread_started_; }
+
   // Returns the value of the crash key with the time since last system power
   // resume.
   std::string GetTimeSinceLastSystemPowerResumeCrashKeyValue() const;
@@ -377,6 +382,7 @@
       GUARDED_BY_CONTEXT(hang_watcher_thread_checker_);
 
   base::DelegateSimpleThread thread_;
+  bool thread_started_ = false;
 
   RepeatingClosure after_monitor_closure_for_testing_;
   RepeatingClosure on_hang_closure_for_testing_;
@@ -493,7 +499,7 @@
   using TimeTicksInternalRepresentation =
       std::invoke_result<decltype(&TimeTicks::ToInternalValue),
                          TimeTicks>::type;
-  static_assert(std::is_same<TimeTicksInternalRepresentation, int64_t>::value,
+  static_assert(std::is_same_v<TimeTicksInternalRepresentation, int64_t>,
                 "Bit manipulations made by HangWatchDeadline need to be"
                 "adapted if internal representation of TimeTicks changes.");
 
@@ -529,7 +535,7 @@
   // necessary to run the proper checks to insure correctness of the conversion
   // that has to go through int_64t. (See DeadlineFromBits()).
   using BitsType = uint64_t;
-  static_assert(std::is_same<std::underlying_type<Flag>::type, BitsType>::value,
+  static_assert(std::is_same_v<std::underlying_type<Flag>::type, BitsType>,
                 "Flag should have the same underlying type as bits_ to "
                 "simplify thinking about bit operations");
 
diff --git a/base/threading/hang_watcher_unittest.cc b/base/threading/hang_watcher_unittest.cc
index badffab..424d92f 100644
--- a/base/threading/hang_watcher_unittest.cc
+++ b/base/threading/hang_watcher_unittest.cc
@@ -11,6 +11,7 @@
 #include "base/functional/callback.h"
 #include "base/functional/callback_helpers.h"
 #include "base/memory/raw_ptr.h"
+#include "base/metrics/field_trial_params.h"
 #include "base/run_loop.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/synchronization/lock.h"
@@ -57,6 +58,51 @@
 constexpr uint64_t kOnesThenZeroes = 0xAAAAAAAAAAAAAAAAu;
 constexpr uint64_t kZeroesThenOnes = 0x5555555555555555u;
 
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+class HangWatcherEnabledInZygoteChildTest
+    : public testing::TestWithParam<std::tuple<bool, bool>> {
+ public:
+  HangWatcherEnabledInZygoteChildTest() {
+    std::vector<base::test::FeatureRefAndParams> enabled_features =
+        kFeatureAndParams;
+    std::vector<test::FeatureRef> disabled_features;
+    if (std::get<0>(GetParam())) {
+      enabled_features.push_back(test::FeatureRefAndParams(
+          base::kEnableHangWatcherInZygoteChildren, {}));
+    } else {
+      disabled_features.push_back(base::kEnableHangWatcherInZygoteChildren);
+    }
+    feature_list_.InitWithFeaturesAndParameters(enabled_features,
+                                                disabled_features);
+    HangWatcher::InitializeOnMainThread(
+        HangWatcher::ProcessType::kUtilityProcess,
+        /*is_zygote_child=*/std::get<1>(GetParam()));
+  }
+
+  void TearDown() override { HangWatcher::UnitializeOnMainThreadForTesting(); }
+
+  HangWatcherEnabledInZygoteChildTest(
+      const HangWatcherEnabledInZygoteChildTest& other) = delete;
+  HangWatcherEnabledInZygoteChildTest& operator=(
+      const HangWatcherEnabledInZygoteChildTest& other) = delete;
+
+ protected:
+  base::test::ScopedFeatureList feature_list_;
+};
+
+TEST_P(HangWatcherEnabledInZygoteChildTest, IsEnabled) {
+  // If the kEnableHangWatcherInZygoteChildren feature is disabled and
+  // InitializeOnMainThread is called with is_zygote_child==true, IsEnabled()
+  // should return false. It should return true in all other situations.
+  ASSERT_EQ(std::get<0>(GetParam()) || !std::get<1>(GetParam()),
+            HangWatcher::IsEnabled());
+}
+
+INSTANTIATE_TEST_SUITE_P(HangWatcherZygoteTest,
+                         HangWatcherEnabledInZygoteChildTest,
+                         testing::Combine(testing::Bool(), testing::Bool()));
+#endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
 // Waits on provided WaitableEvent before executing and signals when done.
 class BlockingThread : public DelegateSimpleThread::Delegate {
  public:
@@ -117,8 +163,8 @@
 
   HangWatcherTest() {
     feature_list_.InitWithFeaturesAndParameters(kFeatureAndParams, {});
-    hang_watcher_.InitializeOnMainThread(
-        HangWatcher::ProcessType::kBrowserProcess);
+    HangWatcher::InitializeOnMainThread(
+        HangWatcher::ProcessType::kBrowserProcess, false);
 
     hang_watcher_.SetAfterMonitorClosureForTesting(base::BindRepeating(
         &WaitableEvent::Signal, base::Unretained(&monitor_event_)));
@@ -134,7 +180,7 @@
     hang_watcher_.Start();
   }
 
-  void TearDown() override { hang_watcher_.UnitializeOnMainThreadForTesting(); }
+  void TearDown() override { HangWatcher::UnitializeOnMainThreadForTesting(); }
 
   HangWatcherTest(const HangWatcherTest& other) = delete;
   HangWatcherTest& operator=(const HangWatcherTest& other) = delete;
@@ -516,15 +562,15 @@
  public:
   void SetUp() override {
     feature_list_.InitWithFeaturesAndParameters(kFeatureAndParams, {});
-    hang_watcher_.InitializeOnMainThread(
-        HangWatcher::ProcessType::kBrowserProcess);
+    HangWatcher::InitializeOnMainThread(
+        HangWatcher::ProcessType::kBrowserProcess, false);
 
     // The monitoring loop behavior is not verified in this test so we want to
     // trigger monitoring manually.
     hang_watcher_.SetMonitoringPeriodForTesting(kVeryLongDelta);
   }
 
-  void TearDown() override { hang_watcher_.UnitializeOnMainThreadForTesting(); }
+  void TearDown() override { HangWatcher::UnitializeOnMainThreadForTesting(); }
 
   HangWatcherSnapshotTest() = default;
   HangWatcherSnapshotTest(const HangWatcherSnapshotTest& other) = delete;
@@ -779,7 +825,7 @@
  public:
   HangWatcherPeriodicMonitoringTest() {
     hang_watcher_.InitializeOnMainThread(
-        HangWatcher::ProcessType::kBrowserProcess);
+        HangWatcher::ProcessType::kBrowserProcess, false);
 
     hang_watcher_.SetMonitoringPeriodForTesting(kMonitoringPeriod);
     hang_watcher_.SetOnHangClosureForTesting(base::BindRepeating(
@@ -935,8 +981,8 @@
  public:
   WatchHangsInScopeBlockingTest() {
     feature_list_.InitWithFeaturesAndParameters(kFeatureAndParams, {});
-    hang_watcher_.InitializeOnMainThread(
-        HangWatcher::ProcessType::kBrowserProcess);
+    HangWatcher::InitializeOnMainThread(
+        HangWatcher::ProcessType::kBrowserProcess, false);
 
     hang_watcher_.SetOnHangClosureForTesting(base::BindLambdaForTesting([&] {
       capture_started_.Signal();
@@ -964,7 +1010,7 @@
         HangWatcher::RegisterThread(base::HangWatcher::ThreadType::kMainThread);
   }
 
-  void TearDown() override { hang_watcher_.UnitializeOnMainThreadForTesting(); }
+  void TearDown() override { HangWatcher::UnitializeOnMainThreadForTesting(); }
 
   WatchHangsInScopeBlockingTest(const WatchHangsInScopeBlockingTest& other) =
       delete;
diff --git a/base/threading/platform_thread.h b/base/threading/platform_thread.h
index 8871487..f0740c1 100644
--- a/base/threading/platform_thread.h
+++ b/base/threading/platform_thread.h
@@ -16,8 +16,11 @@
 
 #include "base/base_export.h"
 #include "base/message_loop/message_pump_type.h"
+#include "base/process/process_handle.h"
+#include "base/sequence_checker_impl.h"
 #include "base/threading/platform_thread_ref.h"
 #include "base/time/time.h"
+#include "base/types/strong_alias.h"
 #include "build/build_config.h"
 #include "build/chromeos_buildflags.h"
 #include "third_party/abseil-cpp/absl/types/optional.h"
@@ -124,9 +127,9 @@
 enum class ThreadPriorityForTest : int {
   kBackground,
   kUtility,
+  kResourceEfficient,
   kNormal,
-  // The priority obtained via ThreadType::kDisplayCritical (and potentially
-  // other ThreadTypes).
+  kCompositing,
   kDisplay,
   kRealtimeAudio,
   kMaxValue = kRealtimeAudio,
@@ -290,10 +293,12 @@
 
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
 class ThreadTypeDelegate;
+using IsViaIPC = base::StrongAlias<class IsViaIPCTag, bool>;
 
 class BASE_EXPORT PlatformThreadLinux : public PlatformThreadBase {
  public:
-  static constexpr struct sched_param kRealTimePrio = {8};
+  static constexpr struct sched_param kRealTimeAudioPrio = {8};
+  static constexpr struct sched_param kRealTimeDisplayPrio = {6};
 
   // Sets a delegate which handles thread type changes for this process. This
   // must be externally synchronized with any call to SetCurrentThreadType.
@@ -310,16 +315,22 @@
   // whole thread group's (i.e. process) priority.
   static void SetThreadType(PlatformThreadId process_id,
                             PlatformThreadId thread_id,
-                            ThreadType thread_type);
+                            ThreadType thread_type,
+                            IsViaIPC via_ipc);
 
   // For a given thread id and thread type, setup the cpuset and schedtune
   // CGroups for the thread.
   static void SetThreadCgroupsForThreadType(PlatformThreadId thread_id,
                                             ThreadType thread_type);
+
+  // Determine if thread_id is a background thread by looking up whether
+  // it is in the urgent or non-urgent cpuset
+  static bool IsThreadBackgroundedForTest(PlatformThreadId thread_id);
 };
 #endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
 
 #if BUILDFLAG(IS_CHROMEOS)
+
 class BASE_EXPORT PlatformThreadChromeOS : public PlatformThreadLinux {
  public:
   // Signals that the feature list has been initialized. Used for preventing
@@ -331,7 +342,30 @@
   // PlatformThreadLinux's SetThreadType() header comment for Linux details.
   static void SetThreadType(PlatformThreadId process_id,
                             PlatformThreadId thread_id,
-                            ThreadType thread_type);
+                            ThreadType thread_type,
+                            IsViaIPC via_ipc);
+
+  // Returns true if the feature for backgrounding of threads is enabled.
+  static bool IsThreadsBgFeatureEnabled();
+
+  // Returns true if the feature for setting display threads to RT is enabled.
+  static bool IsDisplayThreadsRtFeatureEnabled();
+
+  // Set a specific thread as backgrounded. This is called when the process
+  // moves to and from the background and changes have to be made to each of its
+  // thread's scheduling attributes.
+  static void SetThreadBackgrounded(ProcessId process_id,
+                                    PlatformThreadId thread_id,
+                                    bool backgrounded);
+
+  // Returns the thread type of a thread given its thread id.
+  static absl::optional<ThreadType> GetThreadTypeFromThreadId(
+      ProcessId process_id,
+      PlatformThreadId thread_id);
+
+  // Returns a SequenceChecker which should be used to verify that all
+  // cross-process priority changes are performed without races.
+  static SequenceCheckerImpl& GetCrossProcessThreadPrioritySequenceChecker();
 };
 #endif  // BUILDFLAG(IS_CHROMEOS)
 
diff --git a/base/threading/platform_thread_android.cc b/base/threading/platform_thread_android.cc
index 0d06099..667b6d2 100644
--- a/base/threading/platform_thread_android.cc
+++ b/base/threading/platform_thread_android.cc
@@ -28,10 +28,12 @@
 // result in heavy throttling and force the thread onto a little core on
 // big.LITTLE devices.
 const ThreadPriorityToNiceValuePairForTest
-    kThreadPriorityToNiceValueMapForTest[5] = {
+    kThreadPriorityToNiceValueMapForTest[7] = {
         {ThreadPriorityForTest::kRealtimeAudio, -16},
         {ThreadPriorityForTest::kDisplay, -4},
+        {ThreadPriorityForTest::kCompositing, -4},
         {ThreadPriorityForTest::kNormal, 0},
+        {ThreadPriorityForTest::kResourceEfficient, 0},
         {ThreadPriorityForTest::kUtility, 1},
         {ThreadPriorityForTest::kBackground, 10},
 };
diff --git a/base/threading/platform_thread_apple.mm b/base/threading/platform_thread_apple.mm
new file mode 100644
index 0000000..420b47f
--- /dev/null
+++ b/base/threading/platform_thread_apple.mm
@@ -0,0 +1,414 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#import <Foundation/Foundation.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach/thread_policy.h>
+#include <mach/thread_switch.h>
+#include <stddef.h>
+#include <sys/resource.h>
+
+#include <algorithm>
+#include <atomic>
+
+#include "base/apple/foundation_util.h"
+#include "base/apple/mach_logging.h"
+#include "base/feature_list.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/mac/mac_util.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/threading_features.h"
+#include "build/blink_buildflags.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+NSString* const kThreadPriorityForTestKey = @"CrThreadPriorityForTestKey";
+NSString* const kRealtimePeriodNsKey = @"CrRealtimePeriodNsKey";
+}  // namespace
+
+// If Foundation is to be used on more than one thread, it must know that the
+// application is multithreaded.  Since it's possible to enter Foundation code
+// from threads created by pthread_thread_create, Foundation won't necessarily
+// be aware that the application is multithreaded.  Spawning an NSThread is
+// enough to get Foundation to set up for multithreaded operation, so this is
+// done if necessary before pthread_thread_create spawns any threads.
+//
+// https://developer.apple.com/documentation/foundation/nsthread/1410702-ismultithreaded
+void InitThreading() {
+  static BOOL multithreaded = [NSThread isMultiThreaded];
+  if (!multithreaded) {
+    // +[NSObject class] is idempotent.
+    @autoreleasepool {
+      [NSThread detachNewThreadSelector:@selector(class)
+                               toTarget:[NSObject class]
+                             withObject:nil];
+      multithreaded = YES;
+
+      DCHECK([NSThread isMultiThreaded]);
+    }
+  }
+}
+
+TimeDelta PlatformThreadBase::Delegate::GetRealtimePeriod() {
+  return TimeDelta();
+}
+
+// static
+void PlatformThreadBase::YieldCurrentThread() {
+  // Don't use sched_yield(), as it can lead to 10ms delays.
+  //
+  // This only depresses the thread priority for 1ms, which is more in line
+  // with what calling code likely wants. See this bug in webkit for context:
+  // https://bugs.webkit.org/show_bug.cgi?id=204871
+  mach_msg_timeout_t timeout_ms = 1;
+  thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DEPRESS, timeout_ms);
+}
+
+// static
+void PlatformThreadBase::SetName(const std::string& name) {
+  SetNameCommon(name);
+
+  // macOS does not expose the length limit of the name, so hardcode it.
+  const int kMaxNameLength = 63;
+  std::string shortened_name = name.substr(0, kMaxNameLength);
+  // pthread_setname() fails (harmlessly) in the sandbox, ignore when it does.
+  // See https://crbug.com/47058
+  pthread_setname_np(shortened_name.c_str());
+}
+
+// Whether optimized real-time thread config should be used for audio.
+BASE_FEATURE(kOptimizedRealtimeThreadingMac,
+             "OptimizedRealtimeThreadingMac",
+#if BUILDFLAG(IS_MAC)
+             FEATURE_ENABLED_BY_DEFAULT
+#else
+             FEATURE_DISABLED_BY_DEFAULT
+#endif
+);
+
+const Feature kUserInteractiveCompositingMac{"UserInteractiveCompositingMac",
+                                             FEATURE_DISABLED_BY_DEFAULT};
+
+namespace {
+
+bool IsOptimizedRealtimeThreadingMacEnabled() {
+  return FeatureList::IsEnabled(kOptimizedRealtimeThreadingMac);
+}
+
+}  // namespace
+
+// Fine-tuning optimized real-time thread config:
+// Whether or not the thread should be preemptible.
+const FeatureParam<bool> kOptimizedRealtimeThreadingMacPreemptible{
+    &kOptimizedRealtimeThreadingMac, "preemptible", true};
+// Portion of the time quantum the thread is expected to be busy, (0, 1].
+const FeatureParam<double> kOptimizedRealtimeThreadingMacBusy{
+    &kOptimizedRealtimeThreadingMac, "busy", 0.5};
+// Maximum portion of the time quantum the thread is expected to be busy,
+// (kOptimizedRealtimeThreadingMacBusy, 1].
+const FeatureParam<double> kOptimizedRealtimeThreadingMacBusyLimit{
+    &kOptimizedRealtimeThreadingMac, "busy_limit", 1.0};
+std::atomic<bool> g_user_interactive_compositing(
+    kUserInteractiveCompositingMac.default_state == FEATURE_ENABLED_BY_DEFAULT);
+
+namespace {
+
+struct TimeConstraints {
+  bool preemptible{kOptimizedRealtimeThreadingMacPreemptible.default_value};
+  double busy{kOptimizedRealtimeThreadingMacBusy.default_value};
+  double busy_limit{kOptimizedRealtimeThreadingMacBusyLimit.default_value};
+
+  static TimeConstraints ReadFromFeatureParams() {
+    double busy_limit = kOptimizedRealtimeThreadingMacBusyLimit.Get();
+    return TimeConstraints{
+        kOptimizedRealtimeThreadingMacPreemptible.Get(),
+        std::min(busy_limit, kOptimizedRealtimeThreadingMacBusy.Get()),
+        busy_limit};
+  }
+};
+
+// Use atomics to access FeatureList values when setting up a thread, since
+// there are cases when FeatureList initialization is not synchronized with
+// PlatformThread creation.
+std::atomic<bool> g_use_optimized_realtime_threading(
+    kOptimizedRealtimeThreadingMac.default_state == FEATURE_ENABLED_BY_DEFAULT);
+std::atomic<TimeConstraints> g_time_constraints;
+
+}  // namespace
+
+// static
+void PlatformThreadApple::InitFeaturesPostFieldTrial() {
+  // A DCHECK is triggered on FeatureList initialization if the state of a
+  // feature has been checked before. To avoid triggering this DCHECK in unit
+  // tests that call this before initializing the FeatureList, only check the
+  // state of the feature if the FeatureList is initialized.
+  if (FeatureList::GetInstance()) {
+    g_time_constraints.store(TimeConstraints::ReadFromFeatureParams());
+    g_use_optimized_realtime_threading.store(
+        IsOptimizedRealtimeThreadingMacEnabled());
+    g_user_interactive_compositing.store(
+        FeatureList::IsEnabled(kUserInteractiveCompositingMac));
+  }
+}
+
+// static
+void PlatformThreadApple::SetCurrentThreadRealtimePeriodValue(
+    TimeDelta realtime_period) {
+  if (g_use_optimized_realtime_threading.load()) {
+    NSThread.currentThread.threadDictionary[kRealtimePeriodNsKey] =
+        @(realtime_period.InNanoseconds());
+  }
+}
+
+namespace {
+
+TimeDelta GetCurrentThreadRealtimePeriod() {
+  NSNumber* period = apple::ObjCCast<NSNumber>(
+      NSThread.currentThread.threadDictionary[kRealtimePeriodNsKey]);
+
+  return period ? Nanoseconds(period.longLongValue) : TimeDelta();
+}
+
+// Calculates time constraints for THREAD_TIME_CONSTRAINT_POLICY.
+// |realtime_period| is used as a base if it's non-zero.
+// Otherwise we fall back to empirical values.
+thread_time_constraint_policy_data_t GetTimeConstraints(
+    TimeDelta realtime_period) {
+  thread_time_constraint_policy_data_t time_constraints;
+  mach_timebase_info_data_t tb_info;
+  mach_timebase_info(&tb_info);
+
+  if (!realtime_period.is_zero()) {
+    // Limit the lowest value to 2.9 ms we used to have historically. The lower
+    // the period, the more CPU frequency may go up, and we don't want to risk
+    // worsening the thermal situation.
+    uint32_t abs_realtime_period = saturated_cast<uint32_t>(
+        std::max(realtime_period.InNanoseconds(), 2900000LL) *
+        (double(tb_info.denom) / tb_info.numer));
+    TimeConstraints config = g_time_constraints.load();
+    time_constraints.period = abs_realtime_period;
+    time_constraints.constraint = std::min(
+        abs_realtime_period, uint32_t(abs_realtime_period * config.busy_limit));
+    time_constraints.computation =
+        std::min(time_constraints.constraint,
+                 uint32_t(abs_realtime_period * config.busy));
+    time_constraints.preemptible = config.preemptible ? YES : NO;
+    return time_constraints;
+  }
+
+  // Empirical configuration.
+
+  // Define the guaranteed and max fraction of time for the audio thread.
+  // These "duty cycle" values can range from 0 to 1.  A value of 0.5
+  // means the scheduler would give half the time to the thread.
+  // These values have empirically been found to yield good behavior.
+  // Good means that audio performance is high and other threads won't starve.
+  const double kGuaranteedAudioDutyCycle = 0.75;
+  const double kMaxAudioDutyCycle = 0.85;
+
+  // Define constants determining how much time the audio thread can
+  // use in a given time quantum.  All times are in milliseconds.
+
+  // About 128 frames @44.1KHz
+  const double kTimeQuantum = 2.9;
+
+  // Time guaranteed each quantum.
+  const double kAudioTimeNeeded = kGuaranteedAudioDutyCycle * kTimeQuantum;
+
+  // Maximum time each quantum.
+  const double kMaxTimeAllowed = kMaxAudioDutyCycle * kTimeQuantum;
+
+  // Get the conversion factor from milliseconds to absolute time
+  // which is what the time-constraints call needs.
+  double ms_to_abs_time = double(tb_info.denom) / tb_info.numer * 1000000;
+
+  time_constraints.period = kTimeQuantum * ms_to_abs_time;
+  time_constraints.computation = kAudioTimeNeeded * ms_to_abs_time;
+  time_constraints.constraint = kMaxTimeAllowed * ms_to_abs_time;
+  time_constraints.preemptible = 0;
+  return time_constraints;
+}
+
+// Enables time-constraint policy and priority suitable for low-latency,
+// glitch-resistant audio.
+void SetPriorityRealtimeAudio(TimeDelta realtime_period) {
+  // Increase thread priority to real-time.
+
+  // Please note that the thread_policy_set() calls may fail in
+  // rare cases if the kernel decides the system is under heavy load
+  // and is unable to handle boosting the thread priority.
+  // In these cases we just return early and go on with life.
+
+  mach_port_t mach_thread_id =
+      pthread_mach_thread_np(PlatformThread::CurrentHandle().platform_handle());
+
+  // Make thread fixed priority.
+  thread_extended_policy_data_t policy;
+  policy.timeshare = 0;  // Set to 1 for a non-fixed thread.
+  kern_return_t result = thread_policy_set(
+      mach_thread_id, THREAD_EXTENDED_POLICY,
+      reinterpret_cast<thread_policy_t>(&policy), THREAD_EXTENDED_POLICY_COUNT);
+  if (result != KERN_SUCCESS) {
+    MACH_DVLOG(1, result) << "thread_policy_set";
+    return;
+  }
+
+  // Set to relatively high priority.
+  thread_precedence_policy_data_t precedence;
+  precedence.importance = 63;
+  result = thread_policy_set(mach_thread_id, THREAD_PRECEDENCE_POLICY,
+                             reinterpret_cast<thread_policy_t>(&precedence),
+                             THREAD_PRECEDENCE_POLICY_COUNT);
+  if (result != KERN_SUCCESS) {
+    MACH_DVLOG(1, result) << "thread_policy_set";
+    return;
+  }
+
+  // Most important, set real-time constraints.
+
+  thread_time_constraint_policy_data_t time_constraints =
+      GetTimeConstraints(realtime_period);
+
+  result =
+      thread_policy_set(mach_thread_id, THREAD_TIME_CONSTRAINT_POLICY,
+                        reinterpret_cast<thread_policy_t>(&time_constraints),
+                        THREAD_TIME_CONSTRAINT_POLICY_COUNT);
+  MACH_DVLOG_IF(1, result != KERN_SUCCESS, result) << "thread_policy_set";
+  return;
+}
+
+}  // anonymous namespace
+
+// static
+bool PlatformThreadBase::CanChangeThreadType(ThreadType from, ThreadType to) {
+  return true;
+}
+
+namespace internal {
+
+void SetCurrentThreadTypeImpl(ThreadType thread_type,
+                              MessagePumpType pump_type_hint) {
+  // Changing the priority of the main thread causes performance
+  // regressions. https://crbug.com/601270
+  // TODO(https://crbug.com/1280764): Remove this check. kCompositing is the
+  // default on Mac, so this check is counter intuitive.
+  if ([[NSThread currentThread] isMainThread] &&
+      thread_type >= ThreadType::kCompositing) {
+    DCHECK(thread_type == ThreadType::kDefault ||
+           thread_type == ThreadType::kCompositing);
+    return;
+  }
+
+  ThreadPriorityForTest priority = ThreadPriorityForTest::kNormal;
+  switch (thread_type) {
+    case ThreadType::kBackground:
+      priority = ThreadPriorityForTest::kBackground;
+      pthread_set_qos_class_self_np(QOS_CLASS_BACKGROUND, 0);
+      break;
+    case ThreadType::kUtility:
+      priority = ThreadPriorityForTest::kUtility;
+      pthread_set_qos_class_self_np(QOS_CLASS_UTILITY, 0);
+      break;
+    case ThreadType::kResourceEfficient:
+      priority = ThreadPriorityForTest::kUtility;
+      pthread_set_qos_class_self_np(QOS_CLASS_UTILITY, 0);
+      break;
+    case ThreadType::kDefault:
+      priority = ThreadPriorityForTest::kNormal;
+      pthread_set_qos_class_self_np(QOS_CLASS_USER_INITIATED, 0);
+      break;
+    case ThreadType::kCompositing:
+      if (g_user_interactive_compositing.load(std::memory_order_relaxed)) {
+        priority = ThreadPriorityForTest::kDisplay;
+        pthread_set_qos_class_self_np(QOS_CLASS_USER_INTERACTIVE, 0);
+      } else {
+        priority = ThreadPriorityForTest::kNormal;
+        pthread_set_qos_class_self_np(QOS_CLASS_USER_INITIATED, 0);
+      }
+      break;
+    case ThreadType::kDisplayCritical: {
+      priority = ThreadPriorityForTest::kDisplay;
+      pthread_set_qos_class_self_np(QOS_CLASS_USER_INTERACTIVE, 0);
+      break;
+    }
+    case ThreadType::kRealtimeAudio:
+      priority = ThreadPriorityForTest::kRealtimeAudio;
+      SetPriorityRealtimeAudio(GetCurrentThreadRealtimePeriod());
+      DCHECK_EQ([NSThread.currentThread threadPriority], 1.0);
+      break;
+  }
+
+  NSThread.currentThread.threadDictionary[kThreadPriorityForTestKey] =
+      @(static_cast<int>(priority));
+}
+
+}  // namespace internal
+
+// static
+ThreadPriorityForTest PlatformThreadBase::GetCurrentThreadPriorityForTest() {
+  NSNumber* priority = base::apple::ObjCCast<NSNumber>(
+      NSThread.currentThread.threadDictionary[kThreadPriorityForTestKey]);
+
+  if (!priority) {
+    return ThreadPriorityForTest::kNormal;
+  }
+
+  ThreadPriorityForTest thread_priority =
+      static_cast<ThreadPriorityForTest>(priority.intValue);
+  DCHECK_GE(thread_priority, ThreadPriorityForTest::kBackground);
+  DCHECK_LE(thread_priority, ThreadPriorityForTest::kMaxValue);
+  return thread_priority;
+}
+
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
+#if BUILDFLAG(IS_IOS)
+#if BUILDFLAG(USE_BLINK)
+  // For iOS 512kB (the default) isn't sufficient, but using the code
+  // for macOS below will return 8MB. So just be a little more conservative
+  // and return 1MB for now.
+  return 1024 * 1024;
+#else
+  return 0;
+#endif
+#else
+  // The macOS default for a pthread stack size is 512kB.
+  // Libc-594.1.4/pthreads/pthread.c's pthread_attr_init uses
+  // DEFAULT_STACK_SIZE for this purpose.
+  //
+  // 512kB isn't quite generous enough for some deeply recursive threads that
+  // otherwise request the default stack size by specifying 0. Here, adopt
+  // glibc's behavior as on Linux, which is to use the current stack size
+  // limit (ulimit -s) as the default stack size. See
+  // glibc-2.11.1/nptl/nptl-init.c's __pthread_initialize_minimal_internal. To
+  // avoid setting the limit below the macOS default or the minimum usable
+  // stack size, these values are also considered. If any of these values
+  // can't be determined, or if stack size is unlimited (ulimit -s unlimited),
+  // stack_size is left at 0 to get the system default.
+  //
+  // macOS normally only applies ulimit -s to the main thread stack. On
+  // contemporary macOS and Linux systems alike, this value is generally 8MB
+  // or in that neighborhood.
+  size_t default_stack_size = 0;
+  struct rlimit stack_rlimit;
+  if (pthread_attr_getstacksize(&attributes, &default_stack_size) == 0 &&
+      getrlimit(RLIMIT_STACK, &stack_rlimit) == 0 &&
+      stack_rlimit.rlim_cur != RLIM_INFINITY) {
+    default_stack_size = std::max(
+        std::max(default_stack_size, static_cast<size_t>(PTHREAD_STACK_MIN)),
+        static_cast<size_t>(stack_rlimit.rlim_cur));
+  }
+  return default_stack_size;
+#endif
+}
+
+void TerminateOnThread() {}
+
+}  // namespace base
diff --git a/base/threading/platform_thread_cros.cc b/base/threading/platform_thread_cros.cc
index 28e6407..ec5ecc7 100644
--- a/base/threading/platform_thread_cros.cc
+++ b/base/threading/platform_thread_cros.cc
@@ -4,6 +4,8 @@
 // Description: ChromeOS specific Linux code layered on top of
 // base/threading/platform_thread_linux{,_base}.cc.
 
+#include "base/feature_list.h"
+#include "base/no_destructor.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/platform_thread_internal_posix.h"
 
@@ -12,6 +14,7 @@
 #include "base/files/file_util.h"
 #include "base/metrics/field_trial_params.h"
 #include "base/process/internal_linux.h"
+#include "base/process/process.h"
 #include "base/strings/stringprintf.h"
 
 #include <sys/resource.h>
@@ -22,10 +25,19 @@
              "SchedUtilHints",
              base::FEATURE_ENABLED_BY_DEFAULT);
 
+BASE_FEATURE(kSetThreadBgForBgProcess,
+             "SetThreadBgForBgProcess",
+             FEATURE_DISABLED_BY_DEFAULT);
+
+BASE_FEATURE(kSetRtForDisplayThreads,
+             "SetRtForDisplayThreads",
+             FEATURE_DISABLED_BY_DEFAULT);
 namespace {
 
 std::atomic<bool> g_use_sched_util(true);
 std::atomic<bool> g_scheduler_hints_adjusted(false);
+std::atomic<bool> g_threads_bg_enabled(false);
+std::atomic<bool> g_display_threads_rt(false);
 
 // When a device doesn't specify uclamp values via chrome switches,
 // default boosting for urgent tasks is hardcoded here as 20%.
@@ -200,10 +212,101 @@
   }
 }
 
+// Get the type by reading through kThreadTypeToNiceValueMap
+absl::optional<ThreadType> GetThreadTypeForNiceValue(int nice_value) {
+  for (auto i : internal::kThreadTypeToNiceValueMap) {
+    if (nice_value == i.nice_value) {
+      return i.thread_type;
+    }
+  }
+  return absl::nullopt;
+}
+
+absl::optional<int> GetNiceValueForThreadId(PlatformThreadId thread_id) {
+  // Get the current nice value of the thread_id
+  errno = 0;
+  int nice_value = getpriority(PRIO_PROCESS, static_cast<id_t>(thread_id));
+  if (nice_value == -1 && errno != 0) {
+    // The thread may disappear for any reason so ignore ESRCH.
+    DPLOG_IF(ERROR, errno != ESRCH)
+        << "Failed to call getpriority for thread id " << thread_id
+        << ", performance may be effected.";
+    return absl::nullopt;
+  }
+  return nice_value;
+}
+
 } // namespace
 
+void SetThreadTypeOtherAttrs(ProcessId process_id,
+                             PlatformThreadId thread_id,
+                             ThreadType thread_type) {
+  // For cpuset and legacy schedtune interface
+  PlatformThreadLinux::SetThreadCgroupsForThreadType(thread_id, thread_type);
+
+  // For upstream uclamp interface. We try both legacy (schedtune, as done
+  // earlier) and upstream (uclamp) interfaces, and whichever succeeds wins.
+  SetThreadLatencySensitivity(process_id, thread_id, thread_type);
+}
+
+// Set or reset the RT priority of a thread based on its type
+// and whether the process it is in is backgrounded.
+// Setting an RT task to CFS retains the task's nice value.
+void SetThreadRTPrioFromType(ProcessId process_id,
+                             PlatformThreadId thread_id,
+                             ThreadType thread_type,
+                             bool proc_bg) {
+  struct sched_param prio;
+  int policy;
+
+  switch (thread_type) {
+    case ThreadType::kRealtimeAudio:
+      prio = PlatformThreadChromeOS::kRealTimeAudioPrio;
+      policy = SCHED_RR;
+      break;
+    case ThreadType::kCompositing:
+      [[fallthrough]];
+    case ThreadType::kDisplayCritical:
+      if (!PlatformThreadChromeOS::IsDisplayThreadsRtFeatureEnabled()) {
+        return;
+      }
+      if (proc_bg) {
+        // Per manpage, must be 0. Otherwise could have passed nice value here.
+        // Note that even though the prio.sched_priority passed to the
+        // sched_setscheduler() syscall is 0, the old nice value (which holds the
+        // ThreadType of the thread) is retained.
+        prio.sched_priority = 0;
+        policy = SCHED_OTHER;
+      } else {
+        prio = PlatformThreadChromeOS::kRealTimeDisplayPrio;
+        policy = SCHED_RR;
+      }
+      break;
+    default:
+      return;
+  }
+
+  PlatformThreadId syscall_tid = thread_id == PlatformThread::CurrentId() ? 0 : thread_id;
+  if (sched_setscheduler(syscall_tid, policy, &prio) != 0) {
+    DPLOG(ERROR) << "Failed to set policy/priority for thread " << thread_id;
+  }
+}
+
+void SetThreadNiceFromType(ProcessId process_id,
+                           PlatformThreadId thread_id,
+                           ThreadType thread_type) {
+  PlatformThreadId syscall_tid = thread_id == PlatformThread::CurrentId() ? 0 : thread_id;
+  const int nice_setting = internal::ThreadTypeToNiceValue(thread_type);
+  if (setpriority(PRIO_PROCESS, static_cast<id_t>(syscall_tid), nice_setting)) {
+    DPLOG(ERROR) << "Failed to set nice value of thread " << thread_id << " to "
+                 << nice_setting;
+  }
+}
+
 void PlatformThreadChromeOS::InitFeaturesPostFieldTrial() {
   DCHECK(FeatureList::GetInstance());
+  g_threads_bg_enabled.store(FeatureList::IsEnabled(kSetThreadBgForBgProcess));
+  g_display_threads_rt.store(FeatureList::IsEnabled(kSetRtForDisplayThreads));
   if (!FeatureList::IsEnabled(kSchedUtilHints)) {
     g_use_sched_util.store(false);
     return;
@@ -237,36 +340,88 @@
 }
 
 // static
+bool PlatformThreadChromeOS::IsThreadsBgFeatureEnabled() {
+  return g_threads_bg_enabled.load();
+}
+
+bool PlatformThreadChromeOS::IsDisplayThreadsRtFeatureEnabled() {
+  return g_display_threads_rt.load();
+}
+
+// static
+absl::optional<ThreadType> PlatformThreadChromeOS::GetThreadTypeFromThreadId(
+    ProcessId process_id,
+    PlatformThreadId thread_id) {
+  // Get the current nice_value of the thread_id
+  absl::optional<int> nice_value = GetNiceValueForThreadId(thread_id);
+  if (!nice_value.has_value()) {
+    return absl::nullopt;
+  }
+  return GetThreadTypeForNiceValue(nice_value.value());
+}
+
+// static
 void PlatformThreadChromeOS::SetThreadType(ProcessId process_id,
                                            PlatformThreadId thread_id,
-                                           ThreadType thread_type) {
-  // TODO(b/262267726): Call PlatformThreadLinux::SetThreadType for common code.
-  PlatformThreadId syscall_tid = thread_id;
-  if (thread_id == PlatformThread::CurrentId()) {
-    syscall_tid = 0;
+                                           ThreadType thread_type,
+                                           IsViaIPC via_ipc) {
+  // TODO(b/262267726): Re-use common code with PlatformThreadLinux::SetThreadType
+  // Should not be called concurrently with other functions
+  // like SetThreadBackgrounded.
+  if (via_ipc) {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(
+        PlatformThread::GetCrossProcessThreadPrioritySequenceChecker());
   }
 
-  // For legacy schedtune interface
-  PlatformThreadLinux::SetThreadCgroupsForThreadType(thread_id, thread_type);
-
-  // For upstream uclamp interface. We try both legacy (schedtune, as done
-  // earlier) and upstream (uclamp) interfaces, and whichever succeeds wins.
-  SetThreadLatencySensitivity(process_id, thread_id, thread_type);
-
-  if (thread_type == ThreadType::kRealtimeAudio) {
-    if (sched_setscheduler(syscall_tid, SCHED_RR, &kRealTimePrio) == 0) {
-      return;
-    }
-    // If failed to set to RT, fallback to setting nice value.
-    DVPLOG(1) << "Failed to set realtime priority for thread (" << thread_id
-              << ")";
+  auto proc = Process::Open(process_id);
+  bool backgrounded = false;
+  if (IsThreadsBgFeatureEnabled() &&
+      thread_type != ThreadType::kRealtimeAudio && proc.IsValid() &&
+      proc.GetPriority() == base::Process::Priority::kBestEffort) {
+    backgrounded = true;
   }
 
-  const int nice_setting = internal::ThreadTypeToNiceValue(thread_type);
-  if (setpriority(PRIO_PROCESS, static_cast<id_t>(syscall_tid), nice_setting)) {
-    DVPLOG(1) << "Failed to set nice value of thread (" << thread_id << ") to "
-              << nice_setting;
+  SetThreadTypeOtherAttrs(process_id, thread_id,
+                          backgrounded ? ThreadType::kBackground : thread_type);
+
+  SetThreadRTPrioFromType(process_id, thread_id, thread_type, backgrounded);
+  SetThreadNiceFromType(process_id, thread_id, thread_type);
+}
+
+void PlatformThreadChromeOS::SetThreadBackgrounded(ProcessId process_id,
+                                                   PlatformThreadId thread_id,
+                                                   bool backgrounded) {
+  // Get the current nice value of the thread_id
+  absl::optional<int> nice_value =
+      GetNiceValueForThreadId(thread_id);
+  if (!nice_value.has_value()) {
+    return;
   }
+
+  absl::optional<ThreadType> type =
+      GetThreadTypeForNiceValue(nice_value.value());
+  if (!type.has_value()) {
+    return;
+  }
+
+  // kRealtimeAudio threads are not backgrounded or foregrounded.
+  if (type == ThreadType::kRealtimeAudio) {
+    return;
+  }
+
+  SetThreadTypeOtherAttrs(
+      process_id, thread_id,
+      backgrounded ? ThreadType::kBackground : type.value());
+  SetThreadRTPrioFromType(process_id, thread_id, type.value(), backgrounded);
+}
+
+SequenceCheckerImpl&
+PlatformThreadChromeOS::GetCrossProcessThreadPrioritySequenceChecker() {
+  // In order to use a NoDestructor instance, use SequenceCheckerImpl instead of
+  // SequenceCheckerDoNothing because SequenceCheckerImpl is trivially
+  // destructible but SequenceCheckerDoNothing isn't.
+  static NoDestructor<SequenceCheckerImpl> instance;
+  return *instance;
 }
 
 }  // namespace base
diff --git a/base/threading/platform_thread_internal_posix.cc b/base/threading/platform_thread_internal_posix.cc
index b7d0bdf..f589d17 100644
--- a/base/threading/platform_thread_internal_posix.cc
+++ b/base/threading/platform_thread_internal_posix.cc
@@ -17,7 +17,7 @@
 
 namespace internal {
 
-int ThreadTypeToNiceValue(ThreadType thread_type) {
+BASE_EXPORT int ThreadTypeToNiceValue(ThreadType thread_type) {
   for (const auto& pair : kThreadTypeToNiceValueMap) {
     if (pair.thread_type == thread_type)
       return pair.nice_value;
diff --git a/base/threading/platform_thread_internal_posix.h b/base/threading/platform_thread_internal_posix.h
index 120abae..1f724c1 100644
--- a/base/threading/platform_thread_internal_posix.h
+++ b/base/threading/platform_thread_internal_posix.h
@@ -33,7 +33,7 @@
 // priority first), that is, in the order of increasing nice values (lowest nice
 // value first).
 extern const ThreadPriorityToNiceValuePairForTest
-    kThreadPriorityToNiceValueMapForTest[5];
+    kThreadPriorityToNiceValueMapForTest[7];
 
 // Returns the nice value matching |priority| based on the platform-specific
 // implementation of kThreadTypeToNiceValueMap.
diff --git a/base/threading/platform_thread_linux.cc b/base/threading/platform_thread_linux.cc
index 0fbe17c..1b5384b 100644
--- a/base/threading/platform_thread_linux.cc
+++ b/base/threading/platform_thread_linux.cc
@@ -4,6 +4,7 @@
 // Description: Linux specific functionality. Other Linux-derivatives layer on
 // top of this translation unit.
 
+#include "base/no_destructor.h"
 #include "base/threading/platform_thread.h"
 
 #include <errno.h>
@@ -103,27 +104,47 @@
 namespace internal {
 
 const ThreadPriorityToNiceValuePairForTest
-    kThreadPriorityToNiceValueMapForTest[5] = {
+    kThreadPriorityToNiceValueMapForTest[7] = {
         {ThreadPriorityForTest::kRealtimeAudio, -10},
         {ThreadPriorityForTest::kDisplay, -8},
+#if BUILDFLAG(IS_CHROMEOS)
+        {ThreadPriorityForTest::kCompositing, -8},
+#else
+        // TODO(1329208): Experiment with bringing IS_LINUX inline with
+        // IS_CHROMEOS.
+        {ThreadPriorityForTest::kCompositing, -1},
+#endif
         {ThreadPriorityForTest::kNormal, 0},
-        {ThreadPriorityForTest::kUtility, 1},
+        {ThreadPriorityForTest::kResourceEfficient, 1},
+        {ThreadPriorityForTest::kUtility, 2},
         {ThreadPriorityForTest::kBackground, 10},
 };
 
+// These nice values are shared with ChromeOS platform code
+// (platform_thread_cros.cc) and have to be unique as ChromeOS has a unique
+// type -> nice value mapping. An exception is kCompositing and
+// kDisplayCritical where aliasing is OK as they have the same scheduler
+// attributes (cpusets, latency_sensitive etc) including nice value.
+// The uniqueness of the nice value per-type helps to change and restore the
+// scheduling params of threads when their process toggles between FG and BG.
 const ThreadTypeToNiceValuePair kThreadTypeToNiceValueMap[7] = {
-    {ThreadType::kBackground, 10},       {ThreadType::kUtility, 1},
-    {ThreadType::kResourceEfficient, 0}, {ThreadType::kDefault, 0},
+    {ThreadType::kBackground, 10},       {ThreadType::kUtility, 2},
+    {ThreadType::kResourceEfficient, 1}, {ThreadType::kDefault, 0},
 #if BUILDFLAG(IS_CHROMEOS)
     {ThreadType::kCompositing, -8},
 #else
     // TODO(1329208): Experiment with bringing IS_LINUX inline with IS_CHROMEOS.
-    {ThreadType::kCompositing, 0},
+    {ThreadType::kCompositing, -1},
 #endif
     {ThreadType::kDisplayCritical, -8},  {ThreadType::kRealtimeAudio, -10},
 };
 
 bool CanSetThreadTypeToRealtimeAudio() {
+  // Check if root
+  if (geteuid() == 0) {
+    return true;
+  }
+
   // A non-zero soft-limit on RLIMIT_RTPRIO is required to be allowed to invoke
   // pthread_setschedparam in SetCurrentThreadTypeForPlatform().
   struct rlimit rlim;
@@ -139,7 +160,7 @@
     return true;
   }
 
-  PlatformThread::SetThreadType(getpid(), tid, thread_type);
+  PlatformThread::SetThreadType(getpid(), tid, thread_type, IsViaIPC(false));
   return true;
 }
 
@@ -151,7 +172,7 @@
                             &maybe_realtime_prio) == 0 &&
       maybe_sched_rr == SCHED_RR &&
       maybe_realtime_prio.sched_priority ==
-          PlatformThreadLinux::kRealTimePrio.sched_priority) {
+          PlatformThreadLinux::kRealTimeAudioPrio.sched_priority) {
     return absl::make_optional(ThreadPriorityForTest::kRealtimeAudio);
   }
   return absl::nullopt;
@@ -159,6 +180,54 @@
 
 }  // namespace internal
 
+// Determine if thread_id is a background thread by looking up whether
+// it is in the urgent or non-urgent cpuset.
+bool PlatformThreadLinux::IsThreadBackgroundedForTest(
+    PlatformThreadId thread_id) {
+  FilePath cgroup_filepath(kCgroupDirectory);
+
+  FilePath urgent_cgroup_directory =
+      cgroup_filepath.Append(FILE_PATH_LITERAL("cpuset"))
+          .Append(FILE_PATH_LITERAL("chrome"))
+          .Append(FILE_PATH_LITERAL("urgent"));
+  FilePath non_urgent_cgroup_directory =
+      cgroup_filepath.Append(FILE_PATH_LITERAL("cpuset"))
+          .Append(FILE_PATH_LITERAL("chrome"))
+          .Append(FILE_PATH_LITERAL("non-urgent"));
+
+  // Silently ignore request if cgroup directory doesn't exist.
+  if (!DirectoryExists(urgent_cgroup_directory) ||
+      !DirectoryExists(non_urgent_cgroup_directory)) {
+    return false;
+  }
+
+  FilePath urgent_tasks_filepath =
+      urgent_cgroup_directory.Append(FILE_PATH_LITERAL("tasks"));
+  FilePath non_urgent_tasks_filepath =
+      non_urgent_cgroup_directory.Append(FILE_PATH_LITERAL("tasks"));
+
+  std::string tid = NumberToString(thread_id);
+  // Check if thread_id is in the urgent cpuset
+  std::string urgent_tasks;
+  if (!ReadFileToString(urgent_tasks_filepath, &urgent_tasks)) {
+    return false;
+  }
+  if (urgent_tasks.find(tid) != std::string::npos) {
+    return false;
+  }
+
+  // Check if thread_id is in the non-urgent cpuset
+  std::string non_urgent_tasks;
+  if (!ReadFileToString(non_urgent_tasks_filepath, &non_urgent_tasks)) {
+    return false;
+  }
+  if (non_urgent_tasks.find(tid) != std::string::npos) {
+    return true;
+  }
+
+  return false;
+}
+
 void PlatformThreadBase::SetName(const std::string& name) {
   SetNameCommon(name);
 
@@ -205,7 +274,8 @@
 // static
 void PlatformThreadLinux::SetThreadType(ProcessId process_id,
                                         PlatformThreadId thread_id,
-                                        ThreadType thread_type) {
+                                        ThreadType thread_type,
+                                        IsViaIPC via_ipc) {
   SetThreadCgroupsForThreadType(thread_id, thread_type);
 
   // Some scheduler syscalls require thread ID of 0 for current thread.
@@ -218,7 +288,7 @@
 
   if (thread_type == ThreadType::kRealtimeAudio) {
     if (sched_setscheduler(syscall_tid, SCHED_RR,
-                           &PlatformThreadLinux::kRealTimePrio) == 0) {
+                           &PlatformThreadLinux::kRealTimeAudioPrio) == 0) {
       return;
     }
     // If failed to set to RT, fallback to setpriority to set nice value.
diff --git a/base/threading/platform_thread_mac.mm b/base/threading/platform_thread_mac.mm
deleted file mode 100644
index a67ef0b..0000000
--- a/base/threading/platform_thread_mac.mm
+++ /dev/null
@@ -1,415 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/threading/platform_thread.h"
-
-#import <Foundation/Foundation.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#include <mach/thread_policy.h>
-#include <mach/thread_switch.h>
-#include <stddef.h>
-#include <sys/resource.h>
-
-#include <algorithm>
-#include <atomic>
-
-#include "base/feature_list.h"
-#include "base/lazy_instance.h"
-#include "base/logging.h"
-#include "base/mac/foundation_util.h"
-#include "base/mac/mac_util.h"
-#include "base/mac/mach_logging.h"
-#include "base/metrics/histogram_functions.h"
-#include "base/threading/thread_id_name_manager.h"
-#include "base/threading/threading_features.h"
-#include "build/blink_buildflags.h"
-#include "build/build_config.h"
-
-namespace base {
-
-namespace {
-NSString* const kThreadPriorityForTestKey = @"CrThreadPriorityForTestKey";
-NSString* const kRealtimePeriodNsKey = @"CrRealtimePeriodNsKey";
-}  // namespace
-
-// If Cocoa is to be used on more than one thread, it must know that the
-// application is multithreaded.  Since it's possible to enter Cocoa code
-// from threads created by pthread_thread_create, Cocoa won't necessarily
-// be aware that the application is multithreaded.  Spawning an NSThread is
-// enough to get Cocoa to set up for multithreaded operation, so this is done
-// if necessary before pthread_thread_create spawns any threads.
-//
-// http://developer.apple.com/documentation/Cocoa/Conceptual/Multithreading/CreatingThreads/chapter_4_section_4.html
-void InitThreading() {
-  static BOOL multithreaded = [NSThread isMultiThreaded];
-  if (!multithreaded) {
-    // +[NSObject class] is idempotent.
-    @autoreleasepool {
-      [NSThread detachNewThreadSelector:@selector(class)
-                               toTarget:[NSObject class]
-                             withObject:nil];
-      multithreaded = YES;
-
-      DCHECK([NSThread isMultiThreaded]);
-    }
-  }
-}
-
-TimeDelta PlatformThreadBase::Delegate::GetRealtimePeriod() {
-  return TimeDelta();
-}
-
-// static
-void PlatformThreadBase::YieldCurrentThread() {
-  // Don't use sched_yield(), as it can lead to 10ms delays.
-  //
-  // This only depresses the thread priority for 1ms, which is more in line
-  // with what calling code likely wants. See this bug in webkit for context:
-  // https://bugs.webkit.org/show_bug.cgi?id=204871
-  mach_msg_timeout_t timeout_ms = 1;
-  thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DEPRESS, timeout_ms);
-}
-
-// static
-void PlatformThreadBase::SetName(const std::string& name) {
-  SetNameCommon(name);
-
-  // macOS does not expose the length limit of the name, so hardcode it.
-  const int kMaxNameLength = 63;
-  std::string shortened_name = name.substr(0, kMaxNameLength);
-  // pthread_setname() fails (harmlessly) in the sandbox, ignore when it does.
-  // See http://crbug.com/47058
-  pthread_setname_np(shortened_name.c_str());
-}
-
-// Whether optimized realt-time thread config should be used for audio.
-BASE_FEATURE(kOptimizedRealtimeThreadingMac,
-             "OptimizedRealtimeThreadingMac",
-#if BUILDFLAG(IS_MAC)
-             FEATURE_ENABLED_BY_DEFAULT
-#else
-             FEATURE_DISABLED_BY_DEFAULT
-#endif
-);
-
-const Feature kUserInteractiveCompositingMac{"UserInteractiveCompositingMac",
-                                             FEATURE_DISABLED_BY_DEFAULT};
-
-namespace {
-
-bool IsOptimizedRealtimeThreadingMacEnabled() {
-  return FeatureList::IsEnabled(kOptimizedRealtimeThreadingMac);
-}
-
-}  // namespace
-
-// Fine-tuning optimized real-time thread config:
-// Whether or not the thread should be preemptible.
-const FeatureParam<bool> kOptimizedRealtimeThreadingMacPreemptible{
-    &kOptimizedRealtimeThreadingMac, "preemptible", true};
-// Portion of the time quantum the thread is expected to be busy, (0, 1].
-const FeatureParam<double> kOptimizedRealtimeThreadingMacBusy{
-    &kOptimizedRealtimeThreadingMac, "busy", 0.5};
-// Maximum portion of the time quantum the thread is expected to be busy,
-// (kOptimizedRealtimeThreadingMacBusy, 1].
-const FeatureParam<double> kOptimizedRealtimeThreadingMacBusyLimit{
-    &kOptimizedRealtimeThreadingMac, "busy_limit", 1.0};
-std::atomic<bool> g_user_interactive_compositing(
-    kUserInteractiveCompositingMac.default_state == FEATURE_ENABLED_BY_DEFAULT);
-
-namespace {
-
-struct TimeConstraints {
-  bool preemptible{kOptimizedRealtimeThreadingMacPreemptible.default_value};
-  double busy{kOptimizedRealtimeThreadingMacBusy.default_value};
-  double busy_limit{kOptimizedRealtimeThreadingMacBusyLimit.default_value};
-
-  static TimeConstraints ReadFromFeatureParams() {
-    double busy_limit = kOptimizedRealtimeThreadingMacBusyLimit.Get();
-    return TimeConstraints{
-        kOptimizedRealtimeThreadingMacPreemptible.Get(),
-        std::min(busy_limit, kOptimizedRealtimeThreadingMacBusy.Get()),
-        busy_limit};
-  }
-};
-
-// Use atomics to access FeatureList values when setting up a thread, since
-// there are cases when FeatureList initialization is not synchronized with
-// PlatformThread creation.
-std::atomic<bool> g_use_optimized_realtime_threading(
-    kOptimizedRealtimeThreadingMac.default_state == FEATURE_ENABLED_BY_DEFAULT);
-std::atomic<TimeConstraints> g_time_constraints;
-
-}  // namespace
-
-// static
-void PlatformThreadApple::InitFeaturesPostFieldTrial() {
-  // A DCHECK is triggered on FeatureList initialization if the state of a
-  // feature has been checked before. To avoid triggering this DCHECK in unit
-  // tests that call this before initializing the FeatureList, only check the
-  // state of the feature if the FeatureList is initialized.
-  if (FeatureList::GetInstance()) {
-    g_time_constraints.store(TimeConstraints::ReadFromFeatureParams());
-    g_use_optimized_realtime_threading.store(
-        IsOptimizedRealtimeThreadingMacEnabled());
-    g_user_interactive_compositing.store(
-        FeatureList::IsEnabled(kUserInteractiveCompositingMac));
-  }
-}
-
-// static
-void PlatformThreadApple::SetCurrentThreadRealtimePeriodValue(
-    TimeDelta realtime_period) {
-  if (g_use_optimized_realtime_threading.load()) {
-    NSThread.currentThread.threadDictionary[kRealtimePeriodNsKey] =
-        @(realtime_period.InNanoseconds());
-  }
-}
-
-namespace {
-
-TimeDelta GetCurrentThreadRealtimePeriod() {
-  NSNumber* period = mac::ObjCCast<NSNumber>(
-      NSThread.currentThread.threadDictionary[kRealtimePeriodNsKey]);
-
-  return period ? Nanoseconds(period.longLongValue) : TimeDelta();
-}
-
-// Calculates time constraints for THREAD_TIME_CONSTRAINT_POLICY.
-// |realtime_period| is used as a base if it's non-zero.
-// Otherwise we fall back to empirical values.
-thread_time_constraint_policy_data_t GetTimeConstraints(
-    TimeDelta realtime_period) {
-  thread_time_constraint_policy_data_t time_constraints;
-  mach_timebase_info_data_t tb_info;
-  mach_timebase_info(&tb_info);
-
-  if (!realtime_period.is_zero()) {
-    // Limit the lowest value to 2.9 ms we used to have historically. The lower
-    // the period, the more CPU frequency may go up, and we don't want to risk
-    // worsening the thermal situation.
-    uint32_t abs_realtime_period = saturated_cast<uint32_t>(
-        std::max(realtime_period.InNanoseconds(), 2900000LL) *
-        (double(tb_info.denom) / tb_info.numer));
-    TimeConstraints config = g_time_constraints.load();
-    time_constraints.period = abs_realtime_period;
-    time_constraints.constraint = std::min(
-        abs_realtime_period, uint32_t(abs_realtime_period * config.busy_limit));
-    time_constraints.computation =
-        std::min(time_constraints.constraint,
-                 uint32_t(abs_realtime_period * config.busy));
-    time_constraints.preemptible = config.preemptible ? YES : NO;
-    return time_constraints;
-  }
-
-  // Empirical configuration.
-
-  // Define the guaranteed and max fraction of time for the audio thread.
-  // These "duty cycle" values can range from 0 to 1.  A value of 0.5
-  // means the scheduler would give half the time to the thread.
-  // These values have empirically been found to yield good behavior.
-  // Good means that audio performance is high and other threads won't starve.
-  const double kGuaranteedAudioDutyCycle = 0.75;
-  const double kMaxAudioDutyCycle = 0.85;
-
-  // Define constants determining how much time the audio thread can
-  // use in a given time quantum.  All times are in milliseconds.
-
-  // About 128 frames @44.1KHz
-  const double kTimeQuantum = 2.9;
-
-  // Time guaranteed each quantum.
-  const double kAudioTimeNeeded = kGuaranteedAudioDutyCycle * kTimeQuantum;
-
-  // Maximum time each quantum.
-  const double kMaxTimeAllowed = kMaxAudioDutyCycle * kTimeQuantum;
-
-  // Get the conversion factor from milliseconds to absolute time
-  // which is what the time-constraints call needs.
-  double ms_to_abs_time = double(tb_info.denom) / tb_info.numer * 1000000;
-
-  time_constraints.period = kTimeQuantum * ms_to_abs_time;
-  time_constraints.computation = kAudioTimeNeeded * ms_to_abs_time;
-  time_constraints.constraint = kMaxTimeAllowed * ms_to_abs_time;
-  time_constraints.preemptible = 0;
-  return time_constraints;
-}
-
-// Enables time-constraint policy and priority suitable for low-latency,
-// glitch-resistant audio.
-void SetPriorityRealtimeAudio(TimeDelta realtime_period) {
-  // Increase thread priority to real-time.
-
-  // Please note that the thread_policy_set() calls may fail in
-  // rare cases if the kernel decides the system is under heavy load
-  // and is unable to handle boosting the thread priority.
-  // In these cases we just return early and go on with life.
-
-  mach_port_t mach_thread_id =
-      pthread_mach_thread_np(PlatformThread::CurrentHandle().platform_handle());
-
-  // Make thread fixed priority.
-  thread_extended_policy_data_t policy;
-  policy.timeshare = 0;  // Set to 1 for a non-fixed thread.
-  kern_return_t result = thread_policy_set(
-      mach_thread_id, THREAD_EXTENDED_POLICY,
-      reinterpret_cast<thread_policy_t>(&policy), THREAD_EXTENDED_POLICY_COUNT);
-  if (result != KERN_SUCCESS) {
-    MACH_DVLOG(1, result) << "thread_policy_set";
-    return;
-  }
-
-  // Set to relatively high priority.
-  thread_precedence_policy_data_t precedence;
-  precedence.importance = 63;
-  result = thread_policy_set(mach_thread_id, THREAD_PRECEDENCE_POLICY,
-                             reinterpret_cast<thread_policy_t>(&precedence),
-                             THREAD_PRECEDENCE_POLICY_COUNT);
-  if (result != KERN_SUCCESS) {
-    MACH_DVLOG(1, result) << "thread_policy_set";
-    return;
-  }
-
-  // Most important, set real-time constraints.
-
-  thread_time_constraint_policy_data_t time_constraints =
-      GetTimeConstraints(realtime_period);
-
-  result =
-      thread_policy_set(mach_thread_id, THREAD_TIME_CONSTRAINT_POLICY,
-                        reinterpret_cast<thread_policy_t>(&time_constraints),
-                        THREAD_TIME_CONSTRAINT_POLICY_COUNT);
-  MACH_DVLOG_IF(1, result != KERN_SUCCESS, result) << "thread_policy_set";
-  return;
-}
-
-}  // anonymous namespace
-
-// static
-bool PlatformThreadBase::CanChangeThreadType(ThreadType from, ThreadType to) {
-  return true;
-}
-
-namespace internal {
-
-void SetCurrentThreadTypeImpl(ThreadType thread_type,
-                              MessagePumpType pump_type_hint) {
-  // Changing the priority of the main thread causes performance
-  // regressions. https://crbug.com/601270
-  // TODO(1280764): Remove this check. kCompositing is the default on Mac, so
-  // this check is counter intuitive.
-  if ([[NSThread currentThread] isMainThread] &&
-      thread_type >= ThreadType::kCompositing) {
-    DCHECK(thread_type == ThreadType::kDefault ||
-           thread_type == ThreadType::kCompositing);
-    return;
-  }
-
-  ThreadPriorityForTest priority = ThreadPriorityForTest::kNormal;
-  switch (thread_type) {
-    case ThreadType::kBackground:
-      priority = ThreadPriorityForTest::kBackground;
-      pthread_set_qos_class_self_np(QOS_CLASS_BACKGROUND, 0);
-      break;
-    case ThreadType::kUtility:
-      priority = ThreadPriorityForTest::kUtility;
-      pthread_set_qos_class_self_np(QOS_CLASS_UTILITY, 0);
-      break;
-    case ThreadType::kResourceEfficient:
-      priority = ThreadPriorityForTest::kUtility;
-      pthread_set_qos_class_self_np(QOS_CLASS_UTILITY, 0);
-      break;
-    case ThreadType::kDefault:
-      priority = ThreadPriorityForTest::kNormal;
-      pthread_set_qos_class_self_np(QOS_CLASS_USER_INITIATED, 0);
-      break;
-    case ThreadType::kCompositing:
-      if (g_user_interactive_compositing.load(std::memory_order_relaxed)) {
-        priority = ThreadPriorityForTest::kDisplay;
-        pthread_set_qos_class_self_np(QOS_CLASS_USER_INTERACTIVE, 0);
-      } else {
-        priority = ThreadPriorityForTest::kNormal;
-        pthread_set_qos_class_self_np(QOS_CLASS_USER_INITIATED, 0);
-      }
-      break;
-    case ThreadType::kDisplayCritical: {
-      priority = ThreadPriorityForTest::kDisplay;
-      pthread_set_qos_class_self_np(QOS_CLASS_USER_INTERACTIVE, 0);
-      break;
-    }
-    case ThreadType::kRealtimeAudio:
-      priority = ThreadPriorityForTest::kRealtimeAudio;
-      SetPriorityRealtimeAudio(GetCurrentThreadRealtimePeriod());
-      DCHECK_EQ([NSThread.currentThread threadPriority], 1.0);
-      break;
-  }
-
-  NSThread.currentThread.threadDictionary[kThreadPriorityForTestKey] =
-      @(static_cast<int>(priority));
-}
-
-}  // namespace internal
-
-// static
-ThreadPriorityForTest PlatformThreadBase::GetCurrentThreadPriorityForTest() {
-  NSNumber* priority = base::mac::ObjCCast<NSNumber>(
-      NSThread.currentThread.threadDictionary[kThreadPriorityForTestKey]);
-
-  if (!priority)
-    return ThreadPriorityForTest::kNormal;
-
-  ThreadPriorityForTest thread_priority =
-      static_cast<ThreadPriorityForTest>(priority.intValue);
-  DCHECK_GE(thread_priority, ThreadPriorityForTest::kBackground);
-  DCHECK_LE(thread_priority, ThreadPriorityForTest::kMaxValue);
-  return thread_priority;
-}
-
-size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
-#if BUILDFLAG(IS_IOS)
-#if BUILDFLAG(USE_BLINK)
-  // For iOS 512kB (the default) isn't sufficient, but using the code
-  // for macOS below will return 8MB. So just be a little more conservative
-  // and return 1MB for now.
-  return 1024 * 1024;
-#else
-  return 0;
-#endif
-#else
-  // The macOS default for a pthread stack size is 512kB.
-  // Libc-594.1.4/pthreads/pthread.c's pthread_attr_init uses
-  // DEFAULT_STACK_SIZE for this purpose.
-  //
-  // 512kB isn't quite generous enough for some deeply recursive threads that
-  // otherwise request the default stack size by specifying 0. Here, adopt
-  // glibc's behavior as on Linux, which is to use the current stack size
-  // limit (ulimit -s) as the default stack size. See
-  // glibc-2.11.1/nptl/nptl-init.c's __pthread_initialize_minimal_internal. To
-  // avoid setting the limit below the macOS default or the minimum usable
-  // stack size, these values are also considered. If any of these values
-  // can't be determined, or if stack size is unlimited (ulimit -s unlimited),
-  // stack_size is left at 0 to get the system default.
-  //
-  // macOS normally only applies ulimit -s to the main thread stack. On
-  // contemporary macOS and Linux systems alike, this value is generally 8MB
-  // or in that neighborhood.
-  size_t default_stack_size = 0;
-  struct rlimit stack_rlimit;
-  if (pthread_attr_getstacksize(&attributes, &default_stack_size) == 0 &&
-      getrlimit(RLIMIT_STACK, &stack_rlimit) == 0 &&
-      stack_rlimit.rlim_cur != RLIM_INFINITY) {
-    default_stack_size =
-        std::max(std::max(default_stack_size,
-                          static_cast<size_t>(PTHREAD_STACK_MIN)),
-                 static_cast<size_t>(stack_rlimit.rlim_cur));
-  }
-  return default_stack_size;
-#endif
-}
-
-void TerminateOnThread() {
-}
-
-}  // namespace base
diff --git a/base/threading/platform_thread_posix.cc b/base/threading/platform_thread_posix.cc
index cb37940..e5a7514 100644
--- a/base/threading/platform_thread_posix.cc
+++ b/base/threading/platform_thread_posix.cc
@@ -17,7 +17,7 @@
 #include <memory>
 #include <tuple>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/compiler_specific.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
@@ -44,8 +44,8 @@
 #endif
 
 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#include "base/allocator/partition_allocator/starscan/stack/stack.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h"
 #endif
 
 namespace base {
diff --git a/base/threading/platform_thread_unittest.cc b/base/threading/platform_thread_unittest.cc
index 2cf7629..13fe7b7 100644
--- a/base/threading/platform_thread_unittest.cc
+++ b/base/threading/platform_thread_unittest.cc
@@ -425,8 +425,12 @@
 TEST(PlatformThreadTest, CanChangeThreadType) {
 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
   // On Ubuntu, RLIMIT_NICE and RLIMIT_RTPRIO are 0 by default, so we won't be
-  // able to increase priority to any level.
-  constexpr bool kCanIncreasePriority = false;
+  // able to increase priority to any level unless we are root (euid == 0).
+  bool kCanIncreasePriority = false;
+  if (geteuid() == 0) {
+    kCanIncreasePriority = true;
+  }
+
 #else
   constexpr bool kCanIncreasePriority = true;
 #endif
@@ -489,15 +493,22 @@
                                       ThreadPriorityForTest::kBackground);
   TestPriorityResultingFromThreadType(ThreadType::kUtility,
                                       ThreadPriorityForTest::kUtility);
+
 #if BUILDFLAG(IS_APPLE)
   TestPriorityResultingFromThreadType(ThreadType::kResourceEfficient,
                                       ThreadPriorityForTest::kUtility);
+#elif BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
+  TestPriorityResultingFromThreadType(
+      ThreadType::kResourceEfficient,
+      ThreadPriorityForTest::kResourceEfficient);
 #else
   TestPriorityResultingFromThreadType(ThreadType::kResourceEfficient,
                                       ThreadPriorityForTest::kNormal);
 #endif  // BUILDFLAG(IS_APPLE)
+
   TestPriorityResultingFromThreadType(ThreadType::kDefault,
                                       ThreadPriorityForTest::kNormal);
+
 #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
   TestPriorityResultingFromThreadType(ThreadType::kCompositing,
                                       ThreadPriorityForTest::kDisplay);
diff --git a/base/threading/platform_thread_win.cc b/base/threading/platform_thread_win.cc
index 83a1402..0247ed5 100644
--- a/base/threading/platform_thread_win.cc
+++ b/base/threading/platform_thread_win.cc
@@ -8,7 +8,7 @@
 
 #include <string>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/debug/alias.h"
 #include "base/debug/crash_logging.h"
 #include "base/debug/profiler.h"
@@ -32,8 +32,8 @@
 #include <windows.h>
 
 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
-#include "base/allocator/partition_allocator/starscan/pcscan.h"
-#include "base/allocator/partition_allocator/starscan/stack/stack.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/pcscan.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.h"
 #endif
 
 namespace base {
@@ -45,6 +45,10 @@
              "AboveNormalCompositingBrowserWin",
              base::FEATURE_ENABLED_BY_DEFAULT);
 
+BASE_FEATURE(kBackgroundThreadNormalMemoryPriorityWin,
+             "BackgroundThreadNormalMemoryPriorityWin",
+             base::FEATURE_DISABLED_BY_DEFAULT);
+
 namespace {
 
 // Flag used to set thread priority to |THREAD_PRIORITY_LOWEST| for
@@ -53,6 +57,9 @@
 // Flag used to map Compositing ThreadType |THREAD_PRIORITY_ABOVE_NORMAL| on the
 // UI thread for |kAboveNormalCompositingBrowserWin| Feature.
 std::atomic<bool> g_above_normal_compositing_browser{true};
+// Flag used to set thread memory priority to |MEMORY_PRIORITY_NORMAL| on
+// background threads for |kThreadNormalMemoryPriorityWin| Feature.
+std::atomic<bool> g_background_thread_normal_memory_priority_win{false};
 
 // These values are sometimes returned by ::GetThreadPriority().
 constexpr int kWinDisplayPriority1 = 5;
@@ -425,11 +432,23 @@
   }
   DCHECK_NE(desired_priority, THREAD_PRIORITY_ERROR_RETURN);
 
-  [[maybe_unused]] const BOOL success =
+  [[maybe_unused]] const BOOL cpu_priority_success =
       ::SetThreadPriority(thread_handle, desired_priority);
-  DPLOG_IF(ERROR, !success)
+  DPLOG_IF(ERROR, !cpu_priority_success)
       << "Failed to set thread priority to " << desired_priority;
 
+  if (g_background_thread_normal_memory_priority_win &&
+      desired_priority == THREAD_MODE_BACKGROUND_BEGIN) {
+    // Override the memory priority.
+    MEMORY_PRIORITY_INFORMATION memory_priority{.MemoryPriority =
+                                                    MEMORY_PRIORITY_NORMAL};
+    [[maybe_unused]] const BOOL memory_priority_success =
+        SetThreadInformation(thread_handle, ::ThreadMemoryPriority,
+                             &memory_priority, sizeof(memory_priority));
+    DPLOG_IF(ERROR, !memory_priority_success)
+        << "Set thread memory priority failed.";
+  }
+
   if (!g_use_thread_priority_lowest && thread_type == ThreadType::kBackground) {
     // In a background process, THREAD_MODE_BACKGROUND_BEGIN lowers the memory
     // and I/O priorities but not the CPU priority (kernel bug?). Use
@@ -446,16 +465,7 @@
 }
 
 void SetCurrentThreadQualityOfService(ThreadType thread_type) {
-  // QoS and power throttling were introduced in Win10 1709
-  if (win::GetVersion() < win::Version::WIN10_RS3) {
-    return;
-  }
-
-  static const auto set_thread_information_fn =
-      reinterpret_cast<decltype(&::SetThreadInformation)>(::GetProcAddress(
-          ::GetModuleHandle(L"kernel32.dll"), "SetThreadInformation"));
-  DCHECK(set_thread_information_fn);
-
+  // QoS and power throttling were introduced in Win10 1709.
   bool desire_ecoqos = false;
   switch (thread_type) {
     case ThreadType::kBackground:
@@ -478,10 +488,11 @@
       .StateMask =
           desire_ecoqos ? THREAD_POWER_THROTTLING_EXECUTION_SPEED : 0ul,
   };
-  [[maybe_unused]] const BOOL success = set_thread_information_fn(
+  [[maybe_unused]] const BOOL success = ::SetThreadInformation(
       ::GetCurrentThread(), ::ThreadPowerThrottling,
       &thread_power_throttling_state, sizeof(thread_power_throttling_state));
-  DPLOG_IF(ERROR, !success)
+  // Failure is expected on versions of Windows prior to RS3.
+  DPLOG_IF(ERROR, !success && win::GetVersion() >= win::Version::WIN10_RS3)
       << "Failed to set EcoQoS to " << std::boolalpha << desire_ecoqos;
 }
 
@@ -564,6 +575,9 @@
   g_above_normal_compositing_browser.store(
       FeatureList::IsEnabled(kAboveNormalCompositingBrowserWin),
       std::memory_order_relaxed);
+  g_background_thread_normal_memory_priority_win.store(
+      FeatureList::IsEnabled(kBackgroundThreadNormalMemoryPriorityWin),
+      std::memory_order_relaxed);
 }
 
 // static
diff --git a/base/threading/platform_thread_win_unittest.cc b/base/threading/platform_thread_win_unittest.cc
index 009d66e..c12ee17 100644
--- a/base/threading/platform_thread_win_unittest.cc
+++ b/base/threading/platform_thread_win_unittest.cc
@@ -9,6 +9,10 @@
 #include <array>
 
 #include "base/process/process.h"
+#include "base/test/scoped_feature_list.h"
+#include "base/threading/platform_thread_win.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/threading_features.h"
 #include "base/win/windows_version.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -88,4 +92,53 @@
   internal::AssertMemoryPriority(thread_handle, MEMORY_PRIORITY_NORMAL);
 }
 
+namespace {
+class MemoryPriorityAssertingThreadDelegate
+    : public base::PlatformThread::Delegate {
+ public:
+  explicit MemoryPriorityAssertingThreadDelegate(LONG memory_priority)
+      : memory_priority_(memory_priority) {}
+
+  void ThreadMain() override {
+    PlatformThreadHandle::Handle thread_handle =
+        PlatformThread::CurrentHandle().platform_handle();
+    internal::AssertMemoryPriority(thread_handle, memory_priority_);
+  }
+
+  LONG memory_priority_;
+};
+}  // namespace
+
+// It has been observed (crbug.com/1489467) that memory priority is set to very
+// low on background threads, and a possible mitigation is running in the
+// kThreadNormalMemoryPriorityWin experiment which sets memory priority to
+// NORMAL on all threads at creation. If this test fails, the feature is broken
+// and investigation needs to be done into whether pages are being allocated at
+// pri-1 despite it as shown in the above linked bug.
+TEST(PlatformThreadWinTest, NormalPriorityFeatureForBackgroundThreads) {
+  base::test::ScopedFeatureList list;
+  list.InitAndEnableFeature(kBackgroundThreadNormalMemoryPriorityWin);
+  base::InitializePlatformThreadFeatures();
+
+  MemoryPriorityAssertingThreadDelegate delegate{MEMORY_PRIORITY_NORMAL};
+
+  PlatformThreadHandle handle;
+
+  CHECK(PlatformThread::CreateWithType(0, &delegate, &handle,
+                                       ThreadType::kBackground));
+  PlatformThread::Join(handle);
+}
+
+TEST(PlatformThreadWinTest, BackgroundThreadsSetLowMemoryPriority) {
+  base::InitializePlatformThreadFeatures();
+
+  MemoryPriorityAssertingThreadDelegate delegate{MEMORY_PRIORITY_VERY_LOW};
+
+  PlatformThreadHandle handle;
+
+  CHECK(PlatformThread::CreateWithType(0, &delegate, &handle,
+                                       ThreadType::kBackground));
+  PlatformThread::Join(handle);
+}
+
 }  // namespace base
diff --git a/base/threading/post_task_and_reply_impl.cc b/base/threading/post_task_and_reply_impl.cc
index fb72de3..f27e5ff 100644
--- a/base/threading/post_task_and_reply_impl.cc
+++ b/base/threading/post_task_and_reply_impl.cc
@@ -8,158 +8,85 @@
 
 #include "base/check_op.h"
 #include "base/debug/leak_annotations.h"
-#include "base/functional/bind.h"
-#include "base/memory/ref_counted.h"
 #include "base/task/sequenced_task_runner.h"
 #include "base/task/thread_pool/thread_pool_instance.h"
 
-namespace base {
+namespace base::internal {
 
-namespace {
+PostTaskAndReplyRelay::PostTaskAndReplyRelay(
+    const Location& from_here,
+    OnceClosure task,
+    OnceClosure reply,
+    scoped_refptr<SequencedTaskRunner> reply_task_runner)
+    : from_here_(from_here),
+      task_(std::move(task)),
+      reply_(std::move(reply)),
+      reply_task_runner_(std::move(reply_task_runner)) {}
 
-class PostTaskAndReplyRelay {
- public:
-  PostTaskAndReplyRelay(const Location& from_here,
-                        OnceClosure task,
-                        OnceClosure reply,
-                        scoped_refptr<SequencedTaskRunner> reply_task_runner)
-      : from_here_(from_here),
-        task_(std::move(task)),
-        reply_(std::move(reply)),
-        reply_task_runner_(std::move(reply_task_runner)) {}
-  PostTaskAndReplyRelay(PostTaskAndReplyRelay&&) = default;
+PostTaskAndReplyRelay::PostTaskAndReplyRelay(PostTaskAndReplyRelay&&) = default;
 
-  PostTaskAndReplyRelay(const PostTaskAndReplyRelay&) = delete;
-  PostTaskAndReplyRelay& operator=(const PostTaskAndReplyRelay&) = delete;
-
-  // It is important that |reply_| always be deleted on the origin sequence
-  // (|reply_task_runner_|) since its destructor can be affine to it. More
-  // sutbly, it is also important that |task_| be destroyed on the origin
-  // sequence when it fails to run. This is because |task_| can own state which
-  // is affine to |reply_task_runner_| and was intended to be handed to
-  // |reply_|, e.g. https://crbug.com/829122. Since |task_| already needs to
-  // support deletion on the origin sequence (since the initial PostTask can
-  // always fail), it's safer to delete it there when PostTask succeeds but
-  // |task_| is later prevented from running.
-  //
-  // PostTaskAndReplyRelay's move semantics along with logic in this destructor
-  // enforce the above semantics in all the following cases :
-  //  1) Posting |task_| fails right away on the origin sequence:
-  //    a) |reply_task_runner_| is null (i.e. during late shutdown);
-  //    b) |reply_task_runner_| is set.
-  //  2) ~PostTaskAndReplyRelay() runs on the destination sequence:
-  //    a) RunTaskAndPostReply() is cancelled before running;
-  //    b) RunTaskAndPostReply() is skipped on shutdown;
-  //    c) Posting RunReply() fails.
-  //  3) ~PostTaskAndReplyRelay() runs on the origin sequence:
-  //    a) RunReply() is cancelled before running;
-  //    b) RunReply() is skipped on shutdown;
-  //    c) The DeleteSoon() posted by (2) runs.
-  //  4) ~PostTaskAndReplyRelay() should no-op:
-  //    a) This relay was moved to another relay instance;
-  //    b) RunReply() ran and completed this relay's mandate.
-  ~PostTaskAndReplyRelay() {
-    // Case 1a and 4a:
-    if (!reply_task_runner_) {
-      DCHECK_EQ(task_.is_null(), reply_.is_null());
-      return;
-    }
-
-    // Case 4b:
-    if (!reply_) {
-      DCHECK(!task_);
-      return;
-    }
-
-    // Case 2:
-    if (!reply_task_runner_->RunsTasksInCurrentSequence()) {
-      DCHECK(reply_);
-      // Allow this task to be leaked on shutdown even if `reply_task_runner_`
-      // has the TaskShutdownBehaviour::BLOCK_SHUTDOWN trait. Without `fizzler`,
-      // such a task runner would DCHECK when posting to `reply_task_runner_`
-      // after shutdown. Ignore this DCHECK as the poster isn't in control when
-      // its Callback is destroyed late into shutdown. Ref. crbug.com/1375270.
-      base::ThreadPoolInstance::ScopedFizzleBlockShutdownTasks fizzler;
-
-      SequencedTaskRunner* reply_task_runner_raw = reply_task_runner_.get();
-      auto relay_to_delete =
-          std::make_unique<PostTaskAndReplyRelay>(std::move(*this));
-      // In case 2c, posting the DeleteSoon will also fail and |relay_to_delete|
-      // will be leaked. This only happens during shutdown and leaking is better
-      // than thread-unsafe execution.
-      ANNOTATE_LEAKING_OBJECT_PTR(relay_to_delete.get());
-      reply_task_runner_raw->DeleteSoon(from_here_, std::move(relay_to_delete));
-      return;
-    }
-
-    // Case 1b and 3: Any remaining state will be destroyed synchronously at the
-    // end of this scope.
+// It is important that `reply_` always be deleted on the origin sequence
+// (`reply_task_runner_`) since its destructor can be affine to it. More
+// sutbly, it is also important that `task_` be destroyed on the origin
+// sequence when it fails to run. This is because `task_` can own state which
+// is affine to `reply_task_runner_` and was intended to be handed to
+// `reply_`, e.g. https://crbug.com/829122. Since `task_` already needs to
+// support deletion on the origin sequence (since the initial PostTask can
+// always fail), it's safer to delete it there when PostTask succeeds but
+// `task_` is later prevented from running.
+//
+// PostTaskAndReplyRelay's move semantics along with logic in this destructor
+// enforce the above semantics in all the following cases :
+//  1) Posting `task_` fails right away on the origin sequence:
+//    a) `reply_task_runner_` is null (i.e. during late shutdown);
+//    b) `reply_task_runner_` is set.
+//  2) ~PostTaskAndReplyRelay() runs on the destination sequence:
+//    a) RunTaskAndPostReply() is cancelled before running;
+//    b) RunTaskAndPostReply() is skipped on shutdown;
+//    c) Posting RunReply() fails.
+//  3) ~PostTaskAndReplyRelay() runs on the origin sequence:
+//    a) RunReply() is cancelled before running;
+//    b) RunReply() is skipped on shutdown;
+//    c) The DeleteSoon() posted by (2) runs.
+//  4) ~PostTaskAndReplyRelay() should no-op:
+//    a) This relay was moved to another relay instance;
+//    b) RunReply() ran and completed this relay's mandate.
+PostTaskAndReplyRelay::~PostTaskAndReplyRelay() {
+  // Case 1a and 4a:
+  if (!reply_task_runner_) {
+    DCHECK_EQ(task_.is_null(), reply_.is_null());
+    return;
   }
 
-  // No assignment operator because of const member.
-  PostTaskAndReplyRelay& operator=(PostTaskAndReplyRelay&&) = delete;
-
-  // Static function is used because it is not possible to bind a method call to
-  // a non-pointer type.
-  static void RunTaskAndPostReply(PostTaskAndReplyRelay relay) {
-    DCHECK(relay.task_);
-    std::move(relay.task_).Run();
-
-    // Keep a pointer to the reply TaskRunner for the PostTask() call before
-    // |relay| is moved into a callback.
-    SequencedTaskRunner* reply_task_runner_raw = relay.reply_task_runner_.get();
-
-    const Location from_here = relay.from_here_;
-    reply_task_runner_raw->PostTask(
-        from_here,
-        BindOnce(&PostTaskAndReplyRelay::RunReply, std::move(relay)));
+  // Case 4b:
+  if (!reply_) {
+    DCHECK(!task_);
+    return;
   }
 
- private:
-  // Static function is used because it is not possible to bind a method call to
-  // a non-pointer type.
-  static void RunReply(PostTaskAndReplyRelay relay) {
-    DCHECK(!relay.task_);
-    DCHECK(relay.reply_);
-    std::move(relay.reply_).Run();
+  // Case 2:
+  if (!reply_task_runner_->RunsTasksInCurrentSequence()) {
+    DCHECK(reply_);
+    // Allow this task to be leaked on shutdown even if `reply_task_runner_`
+    // has the TaskShutdownBehaviour::BLOCK_SHUTDOWN trait. Without `fizzler`,
+    // such a task runner would DCHECK when posting to `reply_task_runner_`
+    // after shutdown. Ignore this DCHECK as the poster isn't in control when
+    // its Callback is destroyed late into shutdown. Ref. crbug.com/1375270.
+    base::ThreadPoolInstance::ScopedFizzleBlockShutdownTasks fizzler;
+
+    SequencedTaskRunner* reply_task_runner_raw = reply_task_runner_.get();
+    auto relay_to_delete =
+        std::make_unique<PostTaskAndReplyRelay>(std::move(*this));
+    // In case 2c, posting the DeleteSoon will also fail and `relay_to_delete`
+    // will be leaked. This only happens during shutdown and leaking is better
+    // than thread-unsafe execution.
+    ANNOTATE_LEAKING_OBJECT_PTR(relay_to_delete.get());
+    reply_task_runner_raw->DeleteSoon(from_here_, std::move(relay_to_delete));
+    return;
   }
 
-  const Location from_here_;
-  OnceClosure task_;
-  OnceClosure reply_;
-  // Not const to allow moving.
-  scoped_refptr<SequencedTaskRunner> reply_task_runner_;
-};
-
-}  // namespace
-
-namespace internal {
-
-bool PostTaskAndReplyImpl::PostTaskAndReply(const Location& from_here,
-                                            OnceClosure task,
-                                            OnceClosure reply) {
-  DCHECK(task) << from_here.ToString();
-  DCHECK(reply) << from_here.ToString();
-
-  const bool has_sequenced_context = SequencedTaskRunner::HasCurrentDefault();
-
-  const bool post_task_success = PostTask(
-      from_here, BindOnce(&PostTaskAndReplyRelay::RunTaskAndPostReply,
-                          PostTaskAndReplyRelay(
-                              from_here, std::move(task), std::move(reply),
-                              has_sequenced_context
-                                  ? SequencedTaskRunner::GetCurrentDefault()
-                                  : nullptr)));
-
-  // PostTaskAndReply() requires a SequencedTaskRunner::CurrentDefaultHandle to
-  // post the reply.  Having no SequencedTaskRunner::CurrentDefaultHandle is
-  // allowed when posting the task fails, to simplify calls during shutdown
-  // (https://crbug.com/922938).
-  CHECK(has_sequenced_context || !post_task_success);
-
-  return post_task_success;
+  // Case 1b and 3: Any remaining state will be destroyed synchronously at the
+  // end of this scope.
 }
 
-}  // namespace internal
-
-}  // namespace base
+}  // namespace base::internal
diff --git a/base/threading/post_task_and_reply_impl.h b/base/threading/post_task_and_reply_impl.h
index 9e3fda1..4f0af4e 100644
--- a/base/threading/post_task_and_reply_impl.h
+++ b/base/threading/post_task_and_reply_impl.h
@@ -2,46 +2,112 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// This file contains the implementation for TaskRunner::PostTaskAndReply.
+// This file defines a common helper encapsulating the tricky bits of
+// implementing PostTaskAndReply(). These tricky bits are specifically around
+// handling of the reply callback and ensuring it is deleted on the correct
+// sequence.
 
 #ifndef BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
 #define BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
 
+#include <utility>
+
 #include "base/base_export.h"
+#include "base/check_op.h"
+#include "base/functional/bind.h"
 #include "base/functional/callback.h"
 #include "base/location.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/task/sequenced_task_runner.h"
 
-namespace base {
-namespace internal {
+namespace base::internal {
 
-// Inherit from this in a class that implements PostTask to send a task to a
-// custom execution context.
-//
-// If you're looking for a concrete implementation of PostTaskAndReply, you
-// probably want a base::TaskRunner (typically obtained from
-// base/task/thread_pool.h).
-class BASE_EXPORT PostTaskAndReplyImpl {
+class BASE_EXPORT PostTaskAndReplyRelay {
  public:
-  virtual ~PostTaskAndReplyImpl() = default;
-
-  // Posts |task| by calling PostTask(). On completion, posts |reply| to the
-  // origin sequence. Can only be called when
-  // SequencedTaskRunner::HasCurrentDefault(). Each callback is deleted
-  // synchronously after running, or scheduled for asynchronous deletion on the
-  // origin sequence if it can't run (e.g. if a TaskRunner skips it on
-  // shutdown). See SequencedTaskRunner::DeleteSoon() for when objects scheduled
-  // for asynchronous deletion can be leaked. Note: All //base task posting APIs
-  // require callbacks to support deletion on the posting sequence if they can't
-  // be scheduled.
-  bool PostTaskAndReply(const Location& from_here,
+  PostTaskAndReplyRelay(const Location& from_here,
                         OnceClosure task,
-                        OnceClosure reply);
+                        OnceClosure reply,
+                        scoped_refptr<SequencedTaskRunner> reply_task_runner);
+
+  PostTaskAndReplyRelay(PostTaskAndReplyRelay&&);
+  // No assignment operator because of const member.
+  PostTaskAndReplyRelay& operator=(PostTaskAndReplyRelay&&) = delete;
+
+  PostTaskAndReplyRelay(const PostTaskAndReplyRelay&) = delete;
+  PostTaskAndReplyRelay& operator=(const PostTaskAndReplyRelay&) = delete;
+
+  ~PostTaskAndReplyRelay();
+
+  // Static function is used because it is not possible to bind a method call to
+  // a non-pointer type.
+  static void RunTaskAndPostReply(PostTaskAndReplyRelay relay) {
+    DCHECK(relay.task_);
+    std::move(relay.task_).Run();
+
+    // Keep a pointer to the reply TaskRunner for the PostTask() call before
+    // |relay| is moved into a callback.
+    SequencedTaskRunner* reply_task_runner_raw = relay.reply_task_runner_.get();
+
+    const Location from_here = relay.from_here_;
+    reply_task_runner_raw->PostTask(
+        from_here,
+        BindOnce(&PostTaskAndReplyRelay::RunReply, std::move(relay)));
+  }
 
  private:
-  virtual bool PostTask(const Location& from_here, OnceClosure task) = 0;
+  // Static function is used because it is not possible to bind a method call to
+  // a non-pointer type.
+  static void RunReply(PostTaskAndReplyRelay relay) {
+    DCHECK(!relay.task_);
+    DCHECK(relay.reply_);
+    std::move(relay.reply_).Run();
+  }
+
+  const Location from_here_;
+  OnceClosure task_;
+  OnceClosure reply_;
+  // Not const to allow moving.
+  scoped_refptr<SequencedTaskRunner> reply_task_runner_;
 };
 
-}  // namespace internal
-}  // namespace base
+// Precondition: `SequencedTaskRunner::HasCurrentDefault()` must be true.
+//
+// Posts `task` by calling `task_poster`. On completion, posts `reply` to the
+// origin sequence. Each callback is deleted synchronously after running, or
+// scheduled for asynchronous deletion on the origin sequence if it can't run
+// (e.g. if a TaskRunner skips it on shutdown). In this case, the callback may
+// leak: see `SequencedTaskRunner::DeleteSoon()` for details about when objects
+// scheduled for asynchronous deletion can be leaked.
+//
+// Note: All //base task posting APIs require callbacks to support deletion on
+// the posting sequence if they can't be scheduled.
+template <typename TaskPoster>
+bool PostTaskAndReplyImpl(TaskPoster task_poster,
+                          const Location& from_here,
+                          OnceClosure task,
+                          OnceClosure reply) {
+  DCHECK(task) << from_here.ToString();
+  DCHECK(reply) << from_here.ToString();
+
+  const bool has_sequenced_context = SequencedTaskRunner::HasCurrentDefault();
+
+  const bool post_task_success = task_poster(
+      from_here, BindOnce(&PostTaskAndReplyRelay::RunTaskAndPostReply,
+                          PostTaskAndReplyRelay(
+                              from_here, std::move(task), std::move(reply),
+                              has_sequenced_context
+                                  ? SequencedTaskRunner::GetCurrentDefault()
+                                  : nullptr)));
+
+  // PostTaskAndReply() requires a SequencedTaskRunner::CurrentDefaultHandle to
+  // post the reply.  Having no SequencedTaskRunner::CurrentDefaultHandle is
+  // allowed when posting the task fails, to simplify calls during shutdown
+  // (https://crbug.com/922938).
+  CHECK(has_sequenced_context || !post_task_success);
+
+  return post_task_success;
+}
+
+}  // namespace base::internal
 
 #endif  // BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
diff --git a/base/threading/post_task_and_reply_impl_unittest.cc b/base/threading/post_task_and_reply_impl_unittest.cc
index e90ea9f..024694d 100644
--- a/base/threading/post_task_and_reply_impl_unittest.cc
+++ b/base/threading/post_task_and_reply_impl_unittest.cc
@@ -11,6 +11,7 @@
 #include "base/functional/callback_helpers.h"
 #include "base/memory/raw_ptr.h"
 #include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
 #include "base/task/sequenced_task_runner.h"
 #include "base/test/test_mock_time_task_runner.h"
 #include "testing/gmock/include/gmock/gmock.h"
@@ -18,29 +19,14 @@
 
 using ::testing::_;
 
-namespace base {
-namespace internal {
+namespace base::internal {
 
 namespace {
 
-class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
- public:
-  explicit PostTaskAndReplyTaskRunner(TaskRunner* destination)
-      : destination_(destination) {}
-
- private:
-  bool PostTask(const Location& from_here, OnceClosure task) override {
-    return destination_->PostTask(from_here, std::move(task));
-  }
-
-  // Non-owning.
-  const raw_ptr<TaskRunner> destination_;
-};
-
 class ObjectToDelete : public RefCounted<ObjectToDelete> {
  public:
-  // |delete_flag| is set to true when this object is deleted
-  ObjectToDelete(bool* delete_flag) : delete_flag_(delete_flag) {
+  // `delete_flag` is set to true when this object is deleted
+  explicit ObjectToDelete(bool* delete_flag) : delete_flag_(delete_flag) {
     EXPECT_FALSE(*delete_flag_);
   }
 
@@ -63,6 +49,13 @@
 
   MOCK_METHOD1(Task, void(scoped_refptr<ObjectToDelete>));
   MOCK_METHOD1(Reply, void(scoped_refptr<ObjectToDelete>));
+
+  WeakPtr<MockObject> GetWeakPtr() { return weak_factory_.GetWeakPtr(); }
+
+  void InvalidateWeakPtrs() { weak_factory_.InvalidateWeakPtrs(); }
+
+ private:
+  WeakPtrFactory<MockObject> weak_factory_{this};
 };
 
 class MockRunsTasksInCurrentSequenceTaskRunner : public TestMockTimeTaskRunner {
@@ -119,21 +112,31 @@
  protected:
   PostTaskAndReplyImplTest() = default;
 
-  bool PostTaskAndReplyToMockObject() {
-    return PostTaskAndReplyTaskRunner(post_runner_.get())
-        .PostTaskAndReply(
-            FROM_HERE,
-            BindOnce(&MockObject::Task, Unretained(&mock_object_),
-                     MakeRefCounted<ObjectToDelete>(&delete_task_flag_)),
-            BindOnce(&MockObject::Reply, Unretained(&mock_object_),
-                     MakeRefCounted<ObjectToDelete>(&delete_reply_flag_)));
+  bool PostTaskAndReplyToMockObject(bool task_uses_weak_ptr = false) {
+    OnceClosure task;
+    if (task_uses_weak_ptr) {
+      task = BindOnce(&MockObject::Task, mock_object_.GetWeakPtr(),
+                      MakeRefCounted<ObjectToDelete>(&delete_task_flag_));
+    } else {
+      task = BindOnce(&MockObject::Task, Unretained(&mock_object_),
+                      MakeRefCounted<ObjectToDelete>(&delete_task_flag_));
+    }
+
+    return PostTaskAndReplyImpl(
+        [this](const Location& location, OnceClosure task) {
+          return post_runner_->PostTask(location, std::move(task));
+        },
+        FROM_HERE, std::move(task),
+        BindOnce(&MockObject::Reply, Unretained(&mock_object_),
+                 MakeRefCounted<ObjectToDelete>(&delete_reply_flag_)));
   }
 
-  void ExpectPostTaskAndReplyToMockObjectSucceeds() {
+  void ExpectPostTaskAndReplyToMockObjectSucceeds(
+      bool task_uses_weak_ptr = false) {
     // Expect the post to succeed.
-    EXPECT_TRUE(PostTaskAndReplyToMockObject());
+    EXPECT_TRUE(PostTaskAndReplyToMockObject(task_uses_weak_ptr));
 
-    // Expect the first task to be posted to |post_runner_|.
+    // Expect the first task to be posted to `post_runner_`.
     EXPECT_TRUE(post_runner_->HasPendingTask());
     EXPECT_FALSE(reply_runner_->HasPendingTask());
     EXPECT_FALSE(delete_task_flag_);
@@ -162,7 +165,7 @@
   EXPECT_TRUE(delete_task_flag_);
   EXPECT_FALSE(delete_reply_flag_);
 
-  // Expect the reply to be posted to |reply_runner_|.
+  // Expect the reply to be posted to `reply_runner_`.
   EXPECT_FALSE(post_runner_->HasPendingTask());
   EXPECT_TRUE(reply_runner_->HasPendingTask());
 
@@ -173,7 +176,7 @@
   // The reply should have been deleted right after being run.
   EXPECT_TRUE(delete_reply_flag_);
 
-  // Expect no pending task in |post_runner_| and |reply_runner_|.
+  // Expect no pending task in `post_runner_` and `reply_runner_`.
   EXPECT_FALSE(post_runner_->HasPendingTask());
   EXPECT_FALSE(reply_runner_->HasPendingTask());
 }
@@ -181,15 +184,15 @@
 TEST_F(PostTaskAndReplyImplTest, TaskDoesNotRun) {
   ExpectPostTaskAndReplyToMockObjectSucceeds();
 
-  // Clear the |post_runner_|. Both callbacks should be scheduled for deletion
-  // on the |reply_runner_|.
+  // Clear the `post_runner_`. Both callbacks should be scheduled for deletion
+  // on the `reply_runner_`.
   post_runner_->ClearPendingTasksWithRunsTasksInCurrentSequence();
   EXPECT_FALSE(post_runner_->HasPendingTask());
   EXPECT_TRUE(reply_runner_->HasPendingTask());
   EXPECT_FALSE(delete_task_flag_);
   EXPECT_FALSE(delete_reply_flag_);
 
-  // Run the |reply_runner_|. Both callbacks should be deleted.
+  // Run the `reply_runner_`. Both callbacks should be deleted.
   reply_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
   EXPECT_TRUE(delete_task_flag_);
   EXPECT_TRUE(delete_reply_flag_);
@@ -205,11 +208,11 @@
   EXPECT_TRUE(delete_task_flag_);
   EXPECT_FALSE(delete_reply_flag_);
 
-  // Expect the reply to be posted to |reply_runner_|.
+  // Expect the reply to be posted to `reply_runner_`.
   EXPECT_FALSE(post_runner_->HasPendingTask());
   EXPECT_TRUE(reply_runner_->HasPendingTask());
 
-  // Clear the |reply_runner_| queue without running tasks. The reply callback
+  // Clear the `reply_runner_` queue without running tasks. The reply callback
   // should be deleted.
   reply_runner_->ClearPendingTasksWithRunsTasksInCurrentSequence();
   EXPECT_TRUE(delete_task_flag_);
@@ -231,5 +234,35 @@
   EXPECT_TRUE(delete_reply_flag_);
 }
 
-}  // namespace internal
-}  // namespace base
+// Demonstrate that even if a task is not run because a weak pointer is
+// invalidated, the reply still runs.
+TEST_F(PostTaskAndReplyImplTest, ReplyStilRunsAfterInvalidatedWeakPtrTask) {
+  ExpectPostTaskAndReplyToMockObjectSucceeds(/*task_uses_weak_ptr=*/true);
+
+  // The task will not run when the provided weak pointer is invalidated.
+  EXPECT_CALL(mock_object_, Task(_)).Times(0);
+  mock_object_.InvalidateWeakPtrs();
+  post_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+  testing::Mock::VerifyAndClear(&mock_object_);
+  // The task should have been deleted as part of dropping the run because of
+  // invalidated weak pointer.
+  EXPECT_TRUE(delete_task_flag_);
+  EXPECT_FALSE(delete_reply_flag_);
+
+  // Still expect a reply to be posted to `reply_runner_`.
+  EXPECT_FALSE(post_runner_->HasPendingTask());
+  EXPECT_TRUE(reply_runner_->HasPendingTask());
+
+  EXPECT_CALL(mock_object_, Reply(_)).Times(1);
+  reply_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+  testing::Mock::VerifyAndClear(&mock_object_);
+  EXPECT_TRUE(delete_task_flag_);
+  // The reply should have been deleted right after being run.
+  EXPECT_TRUE(delete_reply_flag_);
+
+  // Expect no pending task in `post_runner_` and `reply_runner_`.
+  EXPECT_FALSE(post_runner_->HasPendingTask());
+  EXPECT_FALSE(reply_runner_->HasPendingTask());
+}
+
+}  // namespace base::internal
diff --git a/base/threading/sequence_bound.h b/base/threading/sequence_bound.h
index cfaa04c..fb7b63d 100644
--- a/base/threading/sequence_bound.h
+++ b/base/threading/sequence_bound.h
@@ -648,7 +648,7 @@
 
   template <typename ReturnType>
   using AsyncCallWithBoundArgsBuilder = typename std::conditional<
-      std::is_void<ReturnType>::value,
+      std::is_void_v<ReturnType>,
       AsyncCallWithBoundArgsBuilderVoid,
       AsyncCallWithBoundArgsBuilderDefault<ReturnType>>::type;
 
diff --git a/base/threading/sequence_local_storage_map.cc b/base/threading/sequence_local_storage_map.cc
index 136f476..b6fc41a 100644
--- a/base/threading/sequence_local_storage_map.cc
+++ b/base/threading/sequence_local_storage_map.cc
@@ -40,60 +40,84 @@
   return current_sequence_local_storage != nullptr;
 }
 
-void* SequenceLocalStorageMap::Get(int slot_id) {
-  const auto it = sls_map_.find(slot_id);
-  if (it == sls_map_.end())
-    return nullptr;
-  return it->second.value();
+bool SequenceLocalStorageMap::Has(int slot_id) const {
+  return const_cast<SequenceLocalStorageMap*>(this)->Get(slot_id) != nullptr;
 }
 
-void SequenceLocalStorageMap::Set(
+void SequenceLocalStorageMap::Reset(int slot_id) {
+  sls_map_.erase(slot_id);
+}
+
+SequenceLocalStorageMap::Value* SequenceLocalStorageMap::Get(int slot_id) {
+  auto it = sls_map_.find(slot_id);
+  if (it != sls_map_.end()) {
+    return it->second.get();
+  }
+  return nullptr;
+}
+
+SequenceLocalStorageMap::Value* SequenceLocalStorageMap::Set(
     int slot_id,
     SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair) {
   auto it = sls_map_.find(slot_id);
 
   if (it == sls_map_.end())
-    sls_map_.emplace(slot_id, std::move(value_destructor_pair));
+    it = sls_map_.emplace(slot_id, std::move(value_destructor_pair)).first;
   else
     it->second = std::move(value_destructor_pair);
 
   // The maximum number of entries in the map is 256. This can be adjusted, but
   // will require reviewing the choice of data structure for the map.
   DCHECK_LE(sls_map_.size(), 256U);
+  return it->second.get();
 }
 
+SequenceLocalStorageMap::ValueDestructorPair::ValueDestructorPair()
+    : destructor_(nullptr) {}
+
 SequenceLocalStorageMap::ValueDestructorPair::ValueDestructorPair(
-    void* value,
+    ExternalValue value,
     DestructorFunc* destructor)
-    : value_(value), destructor_(destructor) {}
+    : value_{.external_value = std::move(value)}, destructor_(destructor) {}
+
+SequenceLocalStorageMap::ValueDestructorPair::ValueDestructorPair(
+    InlineValue value,
+    DestructorFunc* destructor)
+    : value_{.inline_value = std::move(value)}, destructor_(destructor) {}
 
 SequenceLocalStorageMap::ValueDestructorPair::~ValueDestructorPair() {
-  if (value_)
-    destructor_(value_);
+  if (destructor_) {
+    destructor_(&value_);
+  }
 }
 
 SequenceLocalStorageMap::ValueDestructorPair::ValueDestructorPair(
     ValueDestructorPair&& value_destructor_pair)
     : value_(value_destructor_pair.value_),
       destructor_(value_destructor_pair.destructor_) {
-  value_destructor_pair.value_ = nullptr;
+  value_destructor_pair.destructor_ = nullptr;
 }
 
 SequenceLocalStorageMap::ValueDestructorPair&
 SequenceLocalStorageMap::ValueDestructorPair::operator=(
     ValueDestructorPair&& value_destructor_pair) {
+  if (this == &value_destructor_pair) {
+    return *this;
+  }
   // Destroy |value_| before overwriting it with a new value.
-  if (value_)
-    destructor_(value_);
-
+  if (destructor_) {
+    destructor_(&value_);
+  }
   value_ = value_destructor_pair.value_;
-  destructor_ = value_destructor_pair.destructor_;
-
-  value_destructor_pair.value_ = nullptr;
+  destructor_ = std::exchange(value_destructor_pair.destructor_, nullptr);
 
   return *this;
 }
 
+SequenceLocalStorageMap::ValueDestructorPair::operator bool() const {
+  return destructor_ != nullptr;
+}
+
 ScopedSetSequenceLocalStorageMapForCurrentThread::
     ScopedSetSequenceLocalStorageMapForCurrentThread(
         SequenceLocalStorageMap* sequence_local_storage)
diff --git a/base/threading/sequence_local_storage_map.h b/base/threading/sequence_local_storage_map.h
index ff225db..b58a992 100644
--- a/base/threading/sequence_local_storage_map.h
+++ b/base/threading/sequence_local_storage_map.h
@@ -9,6 +9,7 @@
 #include "base/base_export.h"
 #include "base/containers/flat_map.h"
 #include "base/memory/raw_ptr_exclusion.h"
+#include "third_party/abseil-cpp/absl/meta/type_traits.h"
 
 namespace base {
 namespace internal {
@@ -41,13 +42,93 @@
   // dereference SequenceLocalStorageSlots.
   static bool IsSetForCurrentThread();
 
-  // Holds a pointer to a value alongside a destructor for this pointer.
-  // Calls the destructor on the value upon destruction.
+  // A `Value` holds an `ExternalValue` or an `InlineValue`. `InlineValue` is
+  // most efficient, but can only be used with types that have a size and an
+  // alignment smaller than a pointer and are trivially relocatable.
+  struct BASE_EXPORT ExternalValue {
+    // `value_` is not a raw_ptr<...> for performance reasons
+    // (based on analysis of sampling profiler data and tab_search:top100:2020).
+    RAW_PTR_EXCLUSION void* value;
+
+    template <class T>
+    void emplace(T* ptr) {
+      value = static_cast<void*>(ptr);
+    }
+
+    template <class T, class Deleter>
+    void Destroy() {
+      Deleter()(&value_as<T>());
+    }
+
+    template <typename T>
+    T& value_as() {
+      return *static_cast<T*>(value);
+    }
+
+    template <typename T>
+    const T& value_as() const {
+      return *static_cast<const T*>(value);
+    }
+  };
+
+  struct BASE_EXPORT alignas(sizeof(void*)) InlineValue {
+    // Holds a T if small.
+    char bytes[sizeof(void*)];
+
+    template <class T, class... Args>
+    void emplace(Args&&... args) {
+      static_assert(sizeof(T) <= sizeof(void*),
+                    "Type T is too big for storage inline.");
+      static_assert(absl::is_trivially_relocatable<T>(),
+                    "T doesn't qualify as trivially relocatable, which "
+                    "precludes it from storage inline.");
+      static_assert(std::alignment_of<T>::value <= sizeof(T),
+                    "Type T has alignment requirements that preclude its "
+                    "storage inline.");
+      new (&bytes) T(std::forward<Args>(args)...);
+    }
+
+    template <class T>
+    void Destroy() {
+      value_as<T>().~T();
+    }
+
+    template <typename T>
+    T& value_as() {
+      return *reinterpret_cast<T*>(bytes);
+    }
+
+    template <typename T>
+    const T& value_as() const {
+      return *reinterpret_cast<const T*>(bytes);
+    }
+  };
+
+  // There's no need for a tagged union (absl::variant) since the value
+  // type is implicitly determined by T being stored.
+  union Value {
+    ExternalValue external_value;
+    InlineValue inline_value;
+  };
+
+  using DestructorFunc = void(Value*);
+
+  template <class T, class Deleter>
+  static DestructorFunc* MakeExternalDestructor() {
+    return [](Value* value) { value->external_value.Destroy<T, Deleter>(); };
+  }
+  template <class T>
+  static DestructorFunc* MakeInlineDestructor() {
+    return [](Value* value) { value->inline_value.Destroy<T>(); };
+  }
+
+  // Holds a value alongside its destructor. Calls the destructor on the
+  // value upon destruction.
   class BASE_EXPORT ValueDestructorPair {
    public:
-    using DestructorFunc = void(void*);
-
-    ValueDestructorPair(void* value, DestructorFunc* destructor);
+    ValueDestructorPair();
+    ValueDestructorPair(ExternalValue value, DestructorFunc* destructor);
+    ValueDestructorPair(InlineValue value, DestructorFunc* destructor);
 
     ValueDestructorPair(const ValueDestructorPair&) = delete;
     ValueDestructorPair& operator=(const ValueDestructorPair&) = delete;
@@ -58,21 +139,35 @@
 
     ValueDestructorPair& operator=(ValueDestructorPair&& value_destructor_pair);
 
-    void* value() const { return value_; }
+    explicit operator bool() const;
+
+    Value* get() { return destructor_ != nullptr ? &value_ : nullptr; }
+    const Value* get() const {
+      return destructor_ != nullptr ? &value_ : nullptr;
+    }
+
+    Value* operator->() { return get(); }
+    const Value* operator->() const { return get(); }
 
    private:
-    // `value_` and `destructor_` are not a raw_ptr<...> for performance reasons
+    Value value_;
+    // `destructor_` is not a raw_ptr<...> for performance reasons
     // (based on analysis of sampling profiler data and tab_search:top100:2020).
-    RAW_PTR_EXCLUSION void* value_;
     RAW_PTR_EXCLUSION DestructorFunc* destructor_;
   };
 
+  // Returns true if a value is stored in |slot_id|.
+  bool Has(int slot_id) const;
+
+  // Resets the value stored in |slot_id|.
+  void Reset(int slot_id);
+
   // Returns the value stored in |slot_id| or nullptr if no value was stored.
-  void* Get(int slot_id);
+  Value* Get(int slot_id);
 
   // Stores |value_destructor_pair| in |slot_id|. Overwrites and destroys any
   // previously stored value.
-  void Set(int slot_id, ValueDestructorPair value_destructor_pair);
+  Value* Set(int slot_id, ValueDestructorPair value_destructor_pair);
 
  private:
   // Map from slot id to ValueDestructorPair.
diff --git a/base/threading/sequence_local_storage_map_unittest.cc b/base/threading/sequence_local_storage_map_unittest.cc
index 4869427..cd9afe3 100644
--- a/base/threading/sequence_local_storage_map_unittest.cc
+++ b/base/threading/sequence_local_storage_map_unittest.cc
@@ -7,6 +7,7 @@
 #include <memory>
 #include <utility>
 
+#include "base/compiler_specific.h"
 #include "base/memory/raw_ptr.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -17,7 +18,7 @@
 
 constexpr int kSlotId = 1;
 
-class SetOnDestroy {
+class TRIVIAL_ABI SetOnDestroy {
  public:
   SetOnDestroy(bool* was_destroyed_ptr)
       : was_destroyed_ptr_(was_destroyed_ptr) {
@@ -28,24 +29,50 @@
   SetOnDestroy(const SetOnDestroy&) = delete;
   SetOnDestroy& operator=(const SetOnDestroy&) = delete;
 
+  SetOnDestroy(SetOnDestroy&& other) {
+    swap(was_destroyed_ptr_, other.was_destroyed_ptr_);
+  }
+  SetOnDestroy& operator=(SetOnDestroy&& other) {
+    swap(was_destroyed_ptr_, other.was_destroyed_ptr_);
+    return *this;
+  }
+
   ~SetOnDestroy() {
+    if (!was_destroyed_ptr_) {
+      return;
+    }
     DCHECK(!(*was_destroyed_ptr_));
     *was_destroyed_ptr_ = true;
   }
 
  private:
-  const raw_ptr<bool> was_destroyed_ptr_;
+  raw_ptr<bool> was_destroyed_ptr_;
 };
 
 template <typename T, typename... Args>
-SequenceLocalStorageMap::ValueDestructorPair CreateValueDestructorPair(
+SequenceLocalStorageMap::ValueDestructorPair CreateExternalValueDestructorPair(
     Args... args) {
-  T* value = new T(args...);
-  SequenceLocalStorageMap::ValueDestructorPair::DestructorFunc* destructor =
-      [](void* ptr) { std::default_delete<T>()(static_cast<T*>(ptr)); };
+  internal::SequenceLocalStorageMap::ExternalValue value;
+  value.emplace(new T(args...));
+  auto* destructor =
+      SequenceLocalStorageMap::MakeExternalDestructor<T,
+                                                      std::default_delete<T>>();
 
   SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair{
-      value, destructor};
+      std::move(value), destructor};
+
+  return value_destructor_pair;
+}
+
+template <typename T, typename... Args>
+SequenceLocalStorageMap::ValueDestructorPair CreateInlineValueDestructorPair(
+    Args... args) {
+  internal::SequenceLocalStorageMap::InlineValue value;
+  value.emplace<T>(args...);
+  auto* destructor = SequenceLocalStorageMap::MakeInlineDestructor<T>();
+
+  SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair{
+      std::move(value), destructor};
 
   return value_destructor_pair;
 }
@@ -54,22 +81,38 @@
 
 // Verify that setting a value in the SequenceLocalStorageMap, then getting
 // it will yield the same value.
-TEST(SequenceLocalStorageMapTest, SetGet) {
+TEST(SequenceLocalStorageMapTest, SetGetExternal) {
   SequenceLocalStorageMap sequence_local_storage_map;
   ScopedSetSequenceLocalStorageMapForCurrentThread
       scoped_sequence_local_storage_map(&sequence_local_storage_map);
 
   SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair =
-      CreateValueDestructorPair<int>(5);
+      CreateExternalValueDestructorPair<int>(5);
 
   sequence_local_storage_map.Set(kSlotId, std::move(value_destructor_pair));
 
-  EXPECT_EQ(*static_cast<int*>(sequence_local_storage_map.Get(kSlotId)), 5);
+  EXPECT_EQ(
+      sequence_local_storage_map.Get(kSlotId)->external_value.value_as<int>(),
+      5);
+}
+
+TEST(SequenceLocalStorageMapTest, SetGetInline) {
+  SequenceLocalStorageMap sequence_local_storage_map;
+  ScopedSetSequenceLocalStorageMapForCurrentThread
+      scoped_sequence_local_storage_map(&sequence_local_storage_map);
+
+  SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair =
+      CreateInlineValueDestructorPair<int>(5);
+
+  sequence_local_storage_map.Set(kSlotId, std::move(value_destructor_pair));
+
+  EXPECT_EQ(
+      sequence_local_storage_map.Get(kSlotId)->inline_value.value_as<int>(), 5);
 }
 
 // Verify that the destructor is called on a value stored in the
 // SequenceLocalStorageMap when SequenceLocalStorageMap is destroyed.
-TEST(SequenceLocalStorageMapTest, Destructor) {
+TEST(SequenceLocalStorageMapTest, DestructorExternal) {
   bool set_on_destruction = false;
 
   {
@@ -78,7 +121,7 @@
         scoped_sequence_local_storage_map(&sequence_local_storage_map);
 
     SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair =
-        CreateValueDestructorPair<SetOnDestroy>(&set_on_destruction);
+        CreateExternalValueDestructorPair<SetOnDestroy>(&set_on_destruction);
 
     sequence_local_storage_map.Set(kSlotId, std::move(value_destructor_pair));
   }
@@ -88,7 +131,7 @@
 
 // Verify that overwriting a value already in the SequenceLocalStorageMap
 // calls value's destructor.
-TEST(SequenceLocalStorageMapTest, DestructorCalledOnSetOverwrite) {
+TEST(SequenceLocalStorageMapTest, DestructorCalledOnSetOverwriteExternal) {
   bool set_on_destruction = false;
   bool set_on_destruction2 = false;
   {
@@ -97,9 +140,9 @@
         scoped_sequence_local_storage_map(&sequence_local_storage_map);
 
     SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair =
-        CreateValueDestructorPair<SetOnDestroy>(&set_on_destruction);
+        CreateExternalValueDestructorPair<SetOnDestroy>(&set_on_destruction);
     SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair2 =
-        CreateValueDestructorPair<SetOnDestroy>(&set_on_destruction2);
+        CreateExternalValueDestructorPair<SetOnDestroy>(&set_on_destruction2);
 
     sequence_local_storage_map.Set(kSlotId, std::move(value_destructor_pair));
 
@@ -116,5 +159,64 @@
   EXPECT_TRUE(set_on_destruction2);
 }
 
+#if defined(__clang__) && HAS_ATTRIBUTE(trivial_abi)
+#if !BUILDFLAG(IS_WIN)
+// Test disabled on Windows due to
+// https://github.com/llvm/llvm-project/issues/69394
+
+TEST(SequenceLocalStorageMapTest, DestructorInline) {
+  bool set_on_destruction = false;
+
+  {
+    SequenceLocalStorageMap sequence_local_storage_map;
+    ScopedSetSequenceLocalStorageMapForCurrentThread
+        scoped_sequence_local_storage_map(&sequence_local_storage_map);
+
+    SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair =
+        CreateInlineValueDestructorPair<SetOnDestroy>(&set_on_destruction);
+
+    sequence_local_storage_map.Set(kSlotId, std::move(value_destructor_pair));
+  }
+
+  EXPECT_TRUE(set_on_destruction);
+}
+
+TEST(SequenceLocalStorageMapTest, DestructorCalledOnSetOverwriteInline) {
+  bool set_on_destruction = false;
+  bool set_on_destruction2 = false;
+  {
+    SequenceLocalStorageMap sequence_local_storage_map;
+    ScopedSetSequenceLocalStorageMapForCurrentThread
+        scoped_sequence_local_storage_map(&sequence_local_storage_map);
+
+    SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair =
+        CreateInlineValueDestructorPair<SetOnDestroy>(&set_on_destruction);
+    SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair2 =
+        CreateInlineValueDestructorPair<SetOnDestroy>(&set_on_destruction2);
+
+    sequence_local_storage_map.Set(kSlotId, std::move(value_destructor_pair));
+
+    ASSERT_FALSE(set_on_destruction);
+
+    // Overwrites the old value in the slot.
+    sequence_local_storage_map.Set(kSlotId, std::move(value_destructor_pair2));
+
+    // Destructor should've been called for the old value in the slot, and not
+    // yet called for the new value.
+    EXPECT_TRUE(set_on_destruction);
+    EXPECT_FALSE(set_on_destruction2);
+  }
+  EXPECT_TRUE(set_on_destruction2);
+}
+
+#else  // !BUILDFLAG(IS_WIN)
+
+static_assert(!absl::is_trivially_relocatable<SetOnDestroy>(),
+              "A compiler change on Windows indicates the preprocessor "
+              "guarding the test above needs to be updated.");
+
+#endif  // !BUILDFLAG(IS_WIN)
+#endif  //  defined(__clang__) && HAS_ATTRIBUTE(trivial_abi)
+
 }  // namespace internal
 }  // namespace base
diff --git a/base/threading/sequence_local_storage_slot.h b/base/threading/sequence_local_storage_slot.h
index 12c04bb..a26a90c 100644
--- a/base/threading/sequence_local_storage_slot.h
+++ b/base/threading/sequence_local_storage_slot.h
@@ -6,11 +6,13 @@
 #define BASE_THREADING_SEQUENCE_LOCAL_STORAGE_SLOT_H_
 
 #include <memory>
+#include <type_traits>
 #include <utility>
 
 #include "base/base_export.h"
 #include "base/template_util.h"
 #include "base/threading/sequence_local_storage_map.h"
+#include "third_party/abseil-cpp/absl/meta/type_traits.h"
 
 namespace base {
 
@@ -24,7 +26,7 @@
 // Example usage:
 //
 // int& GetSequenceLocalStorage()
-//     static base::NoDestructor<SequenceLocalStorageSlot<int>> sls_value;
+//     static SequenceLocalStorageSlot<int> sls_value;
 //     return sls_value->GetOrCreateValue();
 // }
 //
@@ -50,37 +52,54 @@
 // ScopedSetSequenceLocalStorageMapForCurrentThread object.
 // Note: this is true on all ThreadPool workers and on threads bound to a
 // MessageLoop.
+// SequenceLocalStorageSlot is implemented by either [Generic/Small]
+// variants depending on the type. SequenceLocalStorageSlot itself
+// doesn't support forward declared types and thus the variant
+// [Generic/Small] needs to be specified explicitly.
+
+// Generic implementation for SequenceLocalStorageSlot.
 template <typename T, typename Deleter = std::default_delete<T>>
-class SequenceLocalStorageSlot {
+class GenericSequenceLocalStorageSlot {
  public:
-  SequenceLocalStorageSlot()
+  GenericSequenceLocalStorageSlot()
       : slot_id_(internal::GetNextSequenceLocalStorageSlotNumber()) {}
 
-  SequenceLocalStorageSlot(const SequenceLocalStorageSlot&) = delete;
-  SequenceLocalStorageSlot& operator=(const SequenceLocalStorageSlot&) = delete;
+  GenericSequenceLocalStorageSlot(const GenericSequenceLocalStorageSlot&) =
+      delete;
+  GenericSequenceLocalStorageSlot& operator=(
+      const GenericSequenceLocalStorageSlot&) = delete;
 
-  ~SequenceLocalStorageSlot() = default;
+  ~GenericSequenceLocalStorageSlot() = default;
 
-  operator bool() const { return GetValuePointer() != nullptr; }
+  explicit operator bool() const {
+    return internal::SequenceLocalStorageMap::GetForCurrentThread().Has(
+        slot_id_);
+  }
 
   // Default-constructs the value for the current sequence if not
   // already constructed. Then, returns the value.
   T& GetOrCreateValue() {
-    T* ptr = GetValuePointer();
-    if (!ptr)
-      ptr = emplace();
-    return *ptr;
+    auto* slot =
+        internal::SequenceLocalStorageMap::GetForCurrentThread().Get(slot_id_);
+    if (!slot) {
+      return emplace();
+    }
+    return slot->external_value.value_as<T>();
   }
 
   // Returns a pointer to the value for the current sequence. May be
   // nullptr if the value was not constructed on the current sequence.
   T* GetValuePointer() {
-    void* ptr =
+    auto* value =
         internal::SequenceLocalStorageMap::GetForCurrentThread().Get(slot_id_);
-    return static_cast<T*>(ptr);
+    if (value) {
+      return &value->external_value.value_as<T>();
+    }
+    return nullptr;
   }
   const T* GetValuePointer() const {
-    return const_cast<SequenceLocalStorageSlot*>(this)->GetValuePointer();
+    return const_cast<GenericSequenceLocalStorageSlot*>(this)
+        ->GetValuePointer();
   }
 
   T* operator->() { return GetValuePointer(); }
@@ -89,15 +108,17 @@
   T& operator*() { return *GetValuePointer(); }
   const T& operator*() const { return *GetValuePointer(); }
 
-  void reset() { Adopt(nullptr); }
+  void reset() {
+    internal::SequenceLocalStorageMap::GetForCurrentThread().Reset(slot_id_);
+  }
 
   // Constructs this slot's sequence-local value with |args...| and returns a
   // pointer to the created object.
   template <class... Args>
-  T* emplace(Args&&... args) {
+  T& emplace(Args&&... args) {
     T* value_ptr = new T(std::forward<Args>(args)...);
     Adopt(value_ptr);
-    return value_ptr;
+    return *value_ptr;
   }
 
  private:
@@ -110,11 +131,13 @@
     // ValueDestructorPair which is invoked when the value is overwritten by
     // another call to SequenceLocalStorageMap::Set or when the
     // SequenceLocalStorageMap is deleted.
-    internal::SequenceLocalStorageMap::ValueDestructorPair::DestructorFunc*
-        destructor = [](void* ptr) { Deleter()(static_cast<T*>(ptr)); };
-
+    internal::SequenceLocalStorageMap::ExternalValue value;
+    value.emplace(value_ptr);
     internal::SequenceLocalStorageMap::ValueDestructorPair
-        value_destructor_pair(value_ptr, destructor);
+        value_destructor_pair(
+            std::move(value),
+            internal::SequenceLocalStorageMap::MakeExternalDestructor<
+                T, Deleter>());
 
     internal::SequenceLocalStorageMap::GetForCurrentThread().Set(
         slot_id_, std::move(value_destructor_pair));
@@ -124,5 +147,94 @@
   const int slot_id_;
 };
 
+// Implementation for SequenceLocalStorageSlot optimized for small and trivial
+// objects.
+template <class T>
+class SmallSequenceLocalStorageSlot {
+ public:
+  SmallSequenceLocalStorageSlot()
+      : slot_id_(internal::GetNextSequenceLocalStorageSlotNumber()) {}
+
+  SmallSequenceLocalStorageSlot(const SmallSequenceLocalStorageSlot&) = delete;
+  SmallSequenceLocalStorageSlot& operator=(
+      const SmallSequenceLocalStorageSlot&) = delete;
+
+  ~SmallSequenceLocalStorageSlot() = default;
+
+  explicit operator bool() const {
+    return internal::SequenceLocalStorageMap::GetForCurrentThread().Has(
+        slot_id_);
+  }
+
+  // Default-constructs the value for the current sequence if not
+  // already constructed. Then, returns the value.
+  T& GetOrCreateValue() {
+    auto* slot =
+        internal::SequenceLocalStorageMap::GetForCurrentThread().Get(slot_id_);
+    if (!slot) {
+      return emplace();
+    }
+    return slot->inline_value.value_as<T>();
+  }
+
+  // Returns a pointer to the value for the current sequence. May be
+  // nullptr if the value was not constructed on the current sequence.
+  T* GetValuePointer() {
+    auto* slot =
+        internal::SequenceLocalStorageMap::GetForCurrentThread().Get(slot_id_);
+    if (!slot) {
+      return nullptr;
+    }
+    return &slot->inline_value.value_as<T>();
+  }
+  const T* GetValuePointer() const {
+    return const_cast<SmallSequenceLocalStorageSlot*>(this)->GetValuePointer();
+  }
+
+  T* operator->() { return GetValuePointer(); }
+  const T* operator->() const { return GetValuePointer(); }
+
+  T& operator*() { return *GetValuePointer(); }
+  const T& operator*() const { return *GetValuePointer(); }
+
+  void reset() {
+    internal::SequenceLocalStorageMap::GetForCurrentThread().Reset(slot_id_);
+  }
+
+  // Constructs this slot's sequence-local value with |args...| and returns a
+  // pointer to the created object.
+  template <class... Args>
+  T& emplace(Args&&... args) {
+    internal::SequenceLocalStorageMap::InlineValue value;
+    value.emplace<T>(std::forward<Args>(args)...);
+    internal::SequenceLocalStorageMap::ValueDestructorPair
+        value_destructor_pair(
+            std::move(value),
+            internal::SequenceLocalStorageMap::MakeInlineDestructor<T>());
+
+    return internal::SequenceLocalStorageMap::GetForCurrentThread()
+        .Set(slot_id_, std::move(value_destructor_pair))
+        ->inline_value.value_as<T>();
+  }
+
+ private:
+  // |slot_id_| is used as a key in SequenceLocalStorageMap
+  const int slot_id_;
+};
+
+template <typename T,
+          typename Deleter = std::default_delete<T>,
+          bool IsSmall =
+              sizeof(T) <= sizeof(void*) && absl::is_trivially_relocatable<T>()>
+struct SequenceLocalStorageSlot;
+
+template <typename T, typename Deleter>
+struct SequenceLocalStorageSlot<T, Deleter, false>
+    : GenericSequenceLocalStorageSlot<T, Deleter> {};
+
+template <typename T>
+struct SequenceLocalStorageSlot<T, std::default_delete<T>, true>
+    : SmallSequenceLocalStorageSlot<T> {};
+
 }  // namespace base
 #endif  // BASE_THREADING_SEQUENCE_LOCAL_STORAGE_SLOT_H_
diff --git a/base/threading/sequence_local_storage_slot_unittest.cc b/base/threading/sequence_local_storage_slot_unittest.cc
index 982c59c..9444003 100644
--- a/base/threading/sequence_local_storage_slot_unittest.cc
+++ b/base/threading/sequence_local_storage_slot_unittest.cc
@@ -14,6 +14,7 @@
 
 namespace {
 
+template <class T>
 class SequenceLocalStorageSlotTest : public testing::Test {
  public:
   SequenceLocalStorageSlotTest(const SequenceLocalStorageSlotTest&) = delete;
@@ -31,19 +32,34 @@
 
 }  // namespace
 
-// Verify that a value stored with Set() can be retrieved with Get().
-TEST_F(SequenceLocalStorageSlotTest, GetEmplace) {
-  SequenceLocalStorageSlot<int> slot;
+struct GenericSLS {
+  template <class T>
+  using Type = GenericSequenceLocalStorageSlot<T>;
+};
+
+struct SmallSLS {
+  template <class T>
+  using Type = GenericSequenceLocalStorageSlot<T>;
+};
+
+using StorageTypes = testing::Types<GenericSLS, SmallSLS>;
+TYPED_TEST_SUITE(SequenceLocalStorageSlotTest, StorageTypes);
+
+// Verify that a value stored with emplace() can be retrieved with operator*().
+TYPED_TEST(SequenceLocalStorageSlotTest, GetEmplace) {
+  using SLSType = typename TypeParam::template Type<int>;
+  SLSType slot;
   slot.emplace(5);
   EXPECT_EQ(*slot, 5);
 }
 
 // Verify that inserting an object in a SequenceLocalStorageSlot creates a copy
 // of that object independent of the original one.
-TEST_F(SequenceLocalStorageSlotTest, EmplaceObjectIsIndependent) {
+TYPED_TEST(SequenceLocalStorageSlotTest, EmplaceObjectIsIndependent) {
+  using SLSType = typename TypeParam::template Type<bool>;
   bool should_be_false = false;
 
-  SequenceLocalStorageSlot<bool> slot;
+  SLSType slot;
 
   slot.emplace(should_be_false);
 
@@ -54,12 +70,13 @@
   EXPECT_NE(should_be_false, *slot);
 }
 
-// Verify that multiple slots work and that calling Get after overwriting
+// Verify that multiple slots work and that calling emplace after overwriting
 // a value in a slot yields the new value.
-TEST_F(SequenceLocalStorageSlotTest, GetEmplaceMultipleSlots) {
-  SequenceLocalStorageSlot<int> slot1;
-  SequenceLocalStorageSlot<int> slot2;
-  SequenceLocalStorageSlot<int> slot3;
+TYPED_TEST(SequenceLocalStorageSlotTest, GetEmplaceMultipleSlots) {
+  using SLSType = typename TypeParam::template Type<int>;
+  SLSType slot1;
+  SLSType slot2;
+  SLSType slot3;
   EXPECT_FALSE(slot1);
   EXPECT_FALSE(slot2);
   EXPECT_FALSE(slot3);
@@ -86,31 +103,42 @@
 
 // Verify that changing the value returned by Get() changes the value
 // in sequence local storage.
-TEST_F(SequenceLocalStorageSlotTest, GetReferenceModifiable) {
-  SequenceLocalStorageSlot<bool> slot;
+TYPED_TEST(SequenceLocalStorageSlotTest, GetReferenceModifiable) {
+  using SLSType = typename TypeParam::template Type<bool>;
+  SLSType slot;
   slot.emplace(false);
   *slot = true;
   EXPECT_TRUE(*slot);
 }
 
 // Verify that a move-only type can be stored in sequence local storage.
-TEST_F(SequenceLocalStorageSlotTest, EmplaceGetWithMoveOnlyType) {
-  std::unique_ptr<int> int_unique_ptr = std::make_unique<int>(5);
+TYPED_TEST(SequenceLocalStorageSlotTest, EmplaceGetWithMoveOnlyType) {
+  struct MoveOnly {
+    MoveOnly() = default;
+    MoveOnly(const MoveOnly&) = delete;
+    MoveOnly& operator=(const MoveOnly&) = delete;
+    MoveOnly(MoveOnly&&) = default;
+    MoveOnly& operator=(MoveOnly&&) = default;
+    int x = 0x12345678;
+  };
+  using SLSType = typename TypeParam::template Type<MoveOnly>;
+  MoveOnly move_only;
 
-  SequenceLocalStorageSlot<std::unique_ptr<int>> slot;
-  slot.emplace(std::move(int_unique_ptr));
+  SLSType slot;
+  slot.emplace(std::move(move_only));
 
-  EXPECT_EQ(*slot->get(), 5);
+  EXPECT_EQ(slot->x, 0x12345678);
 }
 
 // Verify that a Get() without a previous Set() on a slot returns a
 // default-constructed value.
-TEST_F(SequenceLocalStorageSlotTest, GetWithoutSetDefaultConstructs) {
+TYPED_TEST(SequenceLocalStorageSlotTest, GetWithoutSetDefaultConstructs) {
   struct DefaultConstructable {
     int x = 0x12345678;
   };
+  using SLSType = typename TypeParam::template Type<DefaultConstructable>;
 
-  SequenceLocalStorageSlot<DefaultConstructable> slot;
+  SLSType slot;
 
   EXPECT_EQ(slot.GetOrCreateValue().x, 0x12345678);
 }
@@ -119,8 +147,9 @@
 // a POD-type returns a default-constructed value.
 // Note: this test could be flaky and give a false pass. If it's flaky, the test
 // might've "passed" because the memory for the slot happened to be zeroed.
-TEST_F(SequenceLocalStorageSlotTest, GetWithoutSetDefaultConstructsPOD) {
-  SequenceLocalStorageSlot<void*> slot;
+TYPED_TEST(SequenceLocalStorageSlotTest, GetWithoutSetDefaultConstructsPOD) {
+  using SLSType = typename TypeParam::template Type<void*>;
+  SLSType slot;
 
   EXPECT_EQ(slot.GetOrCreateValue(), nullptr);
 }
diff --git a/base/threading/simple_thread.cc b/base/threading/simple_thread.cc
index 579431f..d4954df 100644
--- a/base/threading/simple_thread.cc
+++ b/base/threading/simple_thread.cc
@@ -4,6 +4,7 @@
 
 #include "base/threading/simple_thread.h"
 
+#include <memory>
 #include <ostream>
 
 #include "base/check.h"
@@ -121,9 +122,9 @@
     std::string name(name_prefix_);
     name.push_back('/');
     name.append(NumberToString(i));
-    DelegateSimpleThread* thread = new DelegateSimpleThread(this, name);
+    auto thread = std::make_unique<DelegateSimpleThread>(this, name);
     thread->Start();
-    threads_.push_back(thread);
+    threads_.push_back(std::move(thread));
   }
 }
 
@@ -136,7 +137,6 @@
   // Join and destroy all the worker threads.
   for (size_t i = 0; i < num_threads_; ++i) {
     threads_[i]->Join();
-    delete threads_[i];
   }
   threads_.clear();
   DCHECK(delegates_.empty());
diff --git a/base/threading/simple_thread.h b/base/threading/simple_thread.h
index ac7bc01..07f5ca7 100644
--- a/base/threading/simple_thread.h
+++ b/base/threading/simple_thread.h
@@ -42,6 +42,7 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -223,7 +224,7 @@
  private:
   const std::string name_prefix_;
   size_t num_threads_;
-  std::vector<DelegateSimpleThread*> threads_;
+  std::vector<std::unique_ptr<DelegateSimpleThread>> threads_;
   base::queue<Delegate*> delegates_;
   base::Lock lock_;            // Locks delegates_
   WaitableEvent dry_;    // Not signaled when there is no work to do.
diff --git a/base/threading/thread_local_internal.h b/base/threading/thread_local_internal.h
index ed99410..5504813 100644
--- a/base/threading/thread_local_internal.h
+++ b/base/threading/thread_local_internal.h
@@ -30,7 +30,7 @@
  public:
   CheckedThreadLocalOwnedPointer() = default;
 
-  CheckedThreadLocalOwnedPointer<T>(const CheckedThreadLocalOwnedPointer<T>&) =
+  CheckedThreadLocalOwnedPointer(const CheckedThreadLocalOwnedPointer<T>&) =
       delete;
   CheckedThreadLocalOwnedPointer<T>& operator=(
       const CheckedThreadLocalOwnedPointer<T>&) = delete;
diff --git a/base/threading/thread_local_storage.cc b/base/threading/thread_local_storage.cc
index 457b986..8dc21cd 100644
--- a/base/threading/thread_local_storage.cc
+++ b/base/threading/thread_local_storage.cc
@@ -234,7 +234,7 @@
   // typical Chromium builds where the code is in a dynamic library. For the
   // static executable case, this is likely equivalent.
   static_assert(
-      std::is_same<PlatformThreadLocalStorage::TLSKey, pthread_key_t>::value,
+      std::is_same_v<PlatformThreadLocalStorage::TLSKey, pthread_key_t>,
       "The special-case below assumes that the platform TLS implementation is "
       "pthread.");
 
diff --git a/base/threading/thread_restrictions.h b/base/threading/thread_restrictions.h
index b88010e..2c6f5bf 100644
--- a/base/threading/thread_restrictions.h
+++ b/base/threading/thread_restrictions.h
@@ -130,27 +130,31 @@
 class ChromeNSSCryptoModuleDelegate;
 class DesktopNotificationBalloon;
 class FirefoxProfileLock;
+class GaiaConfig;
 class KeyStorageLinux;
 class NativeBackendKWallet;
 class NativeDesktopMediaList;
+class PartnerBookmarksReader;
 class Profile;
 class ProfileImpl;
-class StartupTabProviderImpl;
-class GaiaConfig;
-class WebEngineBrowserMainParts;
 class ScopedAllowBlockingForProfile;
+class StartupTabProviderImpl;
+class WebEngineBrowserMainParts;
 
 namespace base {
+class Environment;
 class File;
 class FilePath;
 }  // namespace base
 
-Profile* GetLastProfileMac();
 bool EnsureBrowserStateDirectoriesCreated(const base::FilePath&,
                                           const base::FilePath&,
                                           const base::FilePath&);
+Profile* GetLastProfileMac();
+bool HasWaylandDisplay(base::Environment* env);
 
 namespace android_webview {
+class AwBrowserContext;
 class AwFormDatabaseService;
 class CookieManager;
 class JsSandboxIsolate;
@@ -158,9 +162,9 @@
 class VizCompositorThreadRunnerWebView;
 }  // namespace android_webview
 namespace ash {
-class MojoUtils;
 class BrowserDataBackMigrator;
 class LoginEventRecorder;
+class MojoUtils;
 class StartupCustomizationDocument;
 class StartupUtils;
 bool CameraAppUIShouldEnableLocalOverride(const std::string&);
@@ -172,6 +176,7 @@
 class OutputDevice;
 }
 namespace blink {
+class AudioDestination;
 class DiskDataAllocator;
 class IdentifiabilityActiveSampler;
 class RTCVideoDecoderAdapter;
@@ -179,7 +184,6 @@
 class SourceStream;
 class VideoFrameResourceProvider;
 class WebRtcVideoFrameAdapter;
-class LegacyWebRtcVideoFrameAdapter;
 class VideoTrackRecorderImplContextProvider;
 class WorkerThread;
 namespace scheduler {
@@ -231,7 +235,6 @@
 class RenderProcessHost;
 class RenderWidgetHostViewMac;
 class RendererBlinkPlatformImpl;
-class RTCVideoDecoder;
 class SandboxHostLinux;
 class ScopedAllowWaitForDebugURL;
 class ServiceWorkerContextClient;
@@ -282,9 +285,6 @@
 namespace font_service::internal {
 class MappedFontFile;
 }
-namespace functions {
-class ExecScriptScopedAllowBaseSyncPrimitives;
-}
 namespace gl {
 struct GLImplementationParts;
 namespace init {
@@ -341,36 +341,6 @@
 }
 }  // namespace core
 }  // namespace mojo
-namespace printing {
-class LocalPrinterHandlerDefault;
-#if BUILDFLAG(IS_MAC)
-class PrintBackendServiceImpl;
-#endif
-class PrintBackendServiceManager;
-class PrinterQuery;
-}  // namespace printing
-namespace rlz_lib {
-class FinancialPing;
-}
-namespace storage {
-class ObfuscatedFileUtil;
-}
-namespace syncer {
-class GetLocalChangesRequest;
-class HttpBridge;
-}  // namespace syncer
-namespace ui {
-class DrmThreadProxy;
-class DrmDisplayHostManager;
-class SelectFileDialogLinux;
-class ScopedAllowBlockingForGbmSurface;
-}  // namespace ui
-namespace weblayer {
-class BrowserContextImpl;
-class ContentBrowserClientImpl;
-class ProfileImpl;
-class WebLayerPathProvider;
-}  // namespace weblayer
 namespace net {
 class GSSAPISharedLibrary;
 class MultiThreadedCertVerifierScopedAllowBaseSyncPrimitives;
@@ -383,16 +353,21 @@
 class AddressTrackerLinux;
 }
 }  // namespace net
-
+namespace printing {
+class LocalPrinterHandlerDefault;
+#if BUILDFLAG(IS_MAC)
+class PrintBackendServiceImpl;
+#endif
+class PrintBackendServiceManager;
+class PrinterQuery;
+}  // namespace printing
 namespace proxy_resolver {
 class ScopedAllowThreadJoinForProxyResolverV8Tracing;
 }
-
 namespace remote_cocoa {
 class DroppedScreenShotCopierMac;
 class SelectFileDialogBridge;
 }  // namespace remote_cocoa
-
 namespace remoting {
 class AutoThread;
 class ScopedAllowBlockingForCrashReporting;
@@ -400,64 +375,73 @@
 namespace protocol {
 class ScopedAllowSyncPrimitivesForWebRtcDataStreamAdapter;
 class ScopedAllowSyncPrimitivesForWebRtcTransport;
+class ScopedAllowSyncPrimitivesForWebRtcVideoStream;
 class ScopedAllowThreadJoinForWebRtcTransport;
 }  // namespace protocol
 }  // namespace remoting
-
+namespace rlz_lib {
+class FinancialPing;
+}
 namespace service_manager {
 class ServiceProcessLauncher;
 }
-
 namespace shell_integration_linux {
 class LaunchXdgUtilityScopedAllowBaseSyncPrimitives;
 }
-
+namespace storage {
+class ObfuscatedFileUtil;
+}
+namespace syncer {
+class GetLocalChangesRequest;
+class HttpBridge;
+}  // namespace syncer
 namespace tracing {
 class FuchsiaPerfettoProducerConnector;
 }
-
 namespace ui {
+class DrmThreadProxy;
+class DrmDisplayHostManager;
+class ScopedAllowBlockingForGbmSurface;
+class SelectFileDialogLinux;
 class WindowResizeHelperMac;
-}
-
+}  // namespace ui
 namespace updater {
 class SystemctlLauncherScopedAllowBaseSyncPrimitives;
 }
-
 namespace viz {
 class HostGpuMemoryBufferManager;
 class ClientGpuMemoryBufferManager;
 }  // namespace viz
-
 namespace vr {
 class VrShell;
 }
-
 namespace web {
 class WebMainLoop;
 }  // namespace web
-
-namespace webrtc {
-class DesktopConfigurationMonitor;
-}
+namespace weblayer {
+class BrowserContextImpl;
+class ContentBrowserClientImpl;
+class ProfileImpl;
+class WebLayerPathProvider;
+}  // namespace weblayer
+// NOTE: Please do not append entries here. Put them in the list above and keep
+// the list sorted.
 
 namespace base {
-class Environment;
-}
-
-bool HasWaylandDisplay(base::Environment* env);
-
-namespace base {
-
-namespace sequence_manager::internal {
-class TaskQueueImpl;
-}  // namespace sequence_manager::internal
 
 namespace android {
 class JavaHandlerThread;
 class ScopedAllowBlockingForImportantFileWriter;
 }  // namespace android
 
+namespace apple::internal {
+base::FilePath GetExecutablePath();
+}
+
+namespace debug {
+class StackTrace;
+}
+
 namespace internal {
 class GetAppOutputScopedAllowBaseSyncPrimitives;
 class JobTaskSource;
@@ -465,14 +449,14 @@
 bool ReadProcFile(const FilePath& file, std::string* buffer);
 }  // namespace internal
 
+namespace sequence_manager::internal {
+class TaskQueueImpl;
+}  // namespace sequence_manager::internal
+
 namespace subtle {
 class PlatformSharedMemoryRegion;
 }
 
-namespace debug {
-class StackTrace;
-}
-
 namespace win {
 class OSInfo;
 class ScopedAllowBlockingForUserAccountControl;
@@ -491,10 +475,6 @@
 class TestCustomDisallow;
 class Thread;
 
-namespace apple::internal {
-base::FilePath GetExecutablePath();
-}
-
 #if DCHECK_IS_ON()
 // NOT_TAIL_CALLED if dcheck-is-on so it's always evident who irrevocably
 // altered the allowance (dcheck-builds will provide the setter's stack on
@@ -586,6 +566,7 @@
   friend class ::ScopedAllowBlockingForProfile;
   friend class ::StartupTabProviderImpl;
   friend class ::WebEngineBrowserMainParts;
+  friend class android_webview::AwBrowserContext;
   friend class android_webview::ScopedAllowInitGLBindings;
   friend class ash::BrowserDataBackMigrator;
   friend class ash::LoginEventRecorder;
@@ -745,6 +726,7 @@
   // Allowed usage:
   // Sorted by class name (with namespace).
   friend class ::ChromeNSSCryptoModuleDelegate;
+  friend class ::PartnerBookmarksReader;
   friend class ::tracing::FuchsiaPerfettoProducerConnector;
   friend class android_webview::JsSandboxIsolate;
   friend class base::SimpleThread;
@@ -765,7 +747,6 @@
   friend class content::ServiceWorkerContextClient;
   friend class device::UsbContext;
   friend class enterprise_connectors::LinuxKeyRotationCommand;
-  friend class functions::ExecScriptScopedAllowBaseSyncPrimitives;
   friend class history_report::HistoryReportJniBridge;
   friend class internal::TaskTracker;
   friend class leveldb::port::CondVar;
@@ -783,7 +764,6 @@
   friend class syncer::HttpBridge;
   friend class syncer::GetLocalChangesRequest;
   friend class updater::SystemctlLauncherScopedAllowBaseSyncPrimitives;
-  friend class webrtc::DesktopConfigurationMonitor;
 
   // Usage that should be fixed:
   // Sorted by class name (with namespace).
@@ -835,7 +815,7 @@
   friend class base::StackSamplingProfiler;
   friend class base::internal::JobTaskSource;
   friend class base::sequence_manager::internal::TaskQueueImpl;
-  friend class blink::LegacyWebRtcVideoFrameAdapter;
+  friend class blink::AudioDestination;
   friend class blink::RTCVideoDecoderAdapter;
   friend class blink::RTCVideoEncoder;
   friend class blink::WebRtcVideoFrameAdapter;
@@ -847,7 +827,6 @@
   friend class content::EmergencyTraceFinalisationCoordinator;
   friend class content::InProcessUtilityThread;
   friend class content::RenderProcessHost;
-  friend class content::RTCVideoDecoder;
   friend class content::SandboxHostLinux;
   friend class content::ScopedAllowWaitForDebugURL;
   friend class content::SynchronousCompositor;
@@ -889,6 +868,8 @@
   friend class remoting::protocol::
       ScopedAllowSyncPrimitivesForWebRtcTransport;  // http://crbug.com/1198501
   friend class remoting::protocol::
+      ScopedAllowSyncPrimitivesForWebRtcVideoStream;  // http://b/304681143
+  friend class remoting::protocol::
       ScopedAllowThreadJoinForWebRtcTransport;  // http://crbug.com/660081
   // Not used in production yet, https://crbug.com/844078.
   friend class service_manager::ServiceProcessLauncher;
diff --git a/base/threading/thread_unittest.cc b/base/threading/thread_unittest.cc
index 53b4d50..e89ec64 100644
--- a/base/threading/thread_unittest.cc
+++ b/base/threading/thread_unittest.cc
@@ -10,6 +10,7 @@
 #include <utility>
 #include <vector>
 
+#include "base/dcheck_is_on.h"
 #include "base/debug/leak_annotations.h"
 #include "base/functional/bind.h"
 #include "base/logging.h"
@@ -30,16 +31,16 @@
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/platform_test.h"
 
-using base::Thread;
 using ::testing::NotNull;
 
-typedef PlatformTest ThreadTest;
+using ThreadTest = PlatformTest;
 
+namespace base {
 namespace {
 
 void ToggleValue(bool* value) {
-  ANNOTATE_BENIGN_RACE(value, "Test-only data race on boolean "
-                       "in base/thread_unittest");
+  ANNOTATE_BENIGN_RACE(
+      value, "Test-only data race on boolean in base/thread_unittest");
   *value = !*value;
 }
 
@@ -57,7 +58,7 @@
   ~SleepInsideInitThread() override { Stop(); }
 
   void Init() override {
-    base::PlatformThread::Sleep(base::Milliseconds(500));
+    PlatformThread::Sleep(Milliseconds(500));
     init_called_ = true;
   }
   bool InitCalled() { return init_called_; }
@@ -80,7 +81,7 @@
   THREAD_NUM_EVENTS
 };
 
-typedef std::vector<ThreadEvent> EventList;
+using EventList = std::vector<ThreadEvent>;
 
 class CaptureToEventList : public Thread {
  public:
@@ -88,9 +89,7 @@
   // the order they occured in. |event_list| must remain valid for the
   // lifetime of this thread.
   explicit CaptureToEventList(EventList* event_list)
-      : Thread("none"),
-        event_list_(event_list) {
-  }
+      : Thread("none"), event_list_(event_list) {}
 
   CaptureToEventList(const CaptureToEventList&) = delete;
   CaptureToEventList& operator=(const CaptureToEventList&) = delete;
@@ -107,13 +106,11 @@
 
 // Observer that writes a value into |event_list| when a message loop has been
 // destroyed.
-class CapturingDestructionObserver
-    : public base::CurrentThread::DestructionObserver {
+class CapturingDestructionObserver : public CurrentThread::DestructionObserver {
  public:
   // |event_list| must remain valid throughout the observer's lifetime.
   explicit CapturingDestructionObserver(EventList* event_list)
-      : event_list_(event_list) {
-  }
+      : event_list_(event_list) {}
 
   CapturingDestructionObserver(const CapturingDestructionObserver&) = delete;
   CapturingDestructionObserver& operator=(const CapturingDestructionObserver&) =
@@ -130,16 +127,15 @@
 };
 
 // Task that adds a destruction observer to the current message loop.
-void RegisterDestructionObserver(
-    base::CurrentThread::DestructionObserver* observer) {
-  base::CurrentThread::Get()->AddDestructionObserver(observer);
+void RegisterDestructionObserver(CurrentThread::DestructionObserver* observer) {
+  CurrentThread::Get()->AddDestructionObserver(observer);
 }
 
 // Task that calls GetThreadId() of |thread|, stores the result into |id|, then
 // signal |event|.
-void ReturnThreadId(base::Thread* thread,
-                    base::PlatformThreadId* id,
-                    base::WaitableEvent* event) {
+void ReturnThreadId(Thread* thread,
+                    PlatformThreadId* id,
+                    WaitableEvent* event) {
   *id = thread->GetThreadId();
   event->Signal();
 }
@@ -147,33 +143,37 @@
 }  // namespace
 
 TEST_F(ThreadTest, StartWithOptions_StackSize) {
-  Thread a("StartWithStackSize");
-  // Ensure that the thread can work with only 12 kb and still process a
-  // message. At the same time, we should scale with the bitness of the system
-  // where 12 kb is definitely not enough.
-  // 12 kb = 3072 Slots on a 32-bit system, so we'll scale based off of that.
-  int multiplier = 1;
-  Thread::Options options;
-#if defined(ADDRESS_SANITIZER) || !defined(NDEBUG)
-  // ASan bloats the stack variables and overflows the 3072 slot stack. Some
-  // debug builds also grow the stack too much.
-  ++multiplier;
+  // Ensure that the thread can work with a small stack and still process a
+  // message. On a 32-bit system, a release build should be able to work with
+  // 12 KiB.
+  size_t num_slots = 12 * 1024 / 4;
+  size_t slot_size = sizeof(char*);
+  int additional_space = 0;
+#if !defined(NDEBUG)
+  // Some debug builds grow the stack too much.
+  num_slots *= 2;
+#endif
+#if defined(ADDRESS_SANITIZER)
+  // ASan bloats the stack variables.
+  slot_size *= 2;
 #endif
 #if defined(LEAK_SANITIZER) && BUILDFLAG(IS_MAC)
   // The first time an LSAN disable is fired on a thread, the LSAN Mac runtime
   // initializes a 56k object on the stack.
-  ++multiplier;
+  additional_space += 56 * 1024;
 #endif
-  options.stack_size = 3072 * sizeof(uintptr_t) * multiplier;
+
+  Thread a("StartWithStackSize");
+  Thread::Options options;
+  options.stack_size = num_slots * slot_size + additional_space;
   EXPECT_TRUE(a.StartWithOptions(std::move(options)));
   EXPECT_TRUE(a.task_runner());
   EXPECT_TRUE(a.IsRunning());
 
-  base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
-                            base::WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
   a.task_runner()->PostTask(
-      FROM_HERE,
-      base::BindOnce(&base::WaitableEvent::Signal, base::Unretained(&event)));
+      FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&event)));
   event.Wait();
 }
 
@@ -202,19 +202,17 @@
   EXPECT_TRUE(a->WaitUntilThreadStarted());
 
   // Make the thread block until |block_event| is signaled.
-  base::WaitableEvent block_event(
-      base::WaitableEvent::ResetPolicy::AUTOMATIC,
-      base::WaitableEvent::InitialState::NOT_SIGNALED);
-  a->task_runner()->PostTask(FROM_HERE,
-                             base::BindOnce(&base::WaitableEvent::Wait,
-                                            base::Unretained(&block_event)));
+  WaitableEvent block_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                            WaitableEvent::InitialState::NOT_SIGNALED);
+  a->task_runner()->PostTask(
+      FROM_HERE, BindOnce(&WaitableEvent::Wait, Unretained(&block_event)));
 
   a->StopSoon();
   EXPECT_TRUE(a->IsRunning());
 
   // Unblock the task and give a bit of extra time to unwind QuitWhenIdle().
   block_event.Signal();
-  base::PlatformThread::Sleep(base::Milliseconds(20));
+  PlatformThread::Sleep(Milliseconds(20));
 
   // The thread should now have stopped on its own.
   EXPECT_FALSE(a->IsRunning());
@@ -232,11 +230,10 @@
     // destroyed.  We do this by dispatching a sleep event before the
     // event that will toggle our sentinel value.
     a.task_runner()->PostTask(
-        FROM_HERE, base::BindOnce(static_cast<void (*)(base::TimeDelta)>(
-                                      &base::PlatformThread::Sleep),
-                                  base::Milliseconds(20)));
-    a.task_runner()->PostTask(FROM_HERE,
-                              base::BindOnce(&ToggleValue, &was_invoked));
+        FROM_HERE,
+        BindOnce(static_cast<void (*)(TimeDelta)>(&PlatformThread::Sleep),
+                 Milliseconds(20)));
+    a.task_runner()->PostTask(FROM_HERE, BindOnce(&ToggleValue, &was_invoked));
   }
   EXPECT_TRUE(was_invoked);
 }
@@ -260,7 +257,7 @@
 
   // Attempt to catch use-after-frees from the non-joinable thread in the
   // scope of this test if any.
-  base::PlatformThread::Sleep(base::Milliseconds(20));
+  PlatformThread::Sleep(Milliseconds(20));
 }
 
 TEST_F(ThreadTest, StopSoon) {
@@ -303,10 +300,10 @@
   EXPECT_DCHECK_DEATH_WITH(
       {
         // Stopping |a| on |b| isn't allowed.
-        b.task_runner()->PostTask(
-            FROM_HERE, base::BindOnce(&Thread::Stop, base::Unretained(&a)));
+        b.task_runner()->PostTask(FROM_HERE,
+                                  BindOnce(&Thread::Stop, Unretained(&a)));
         // Block here so the DCHECK on |b| always happens in this scope.
-        base::PlatformThread::Sleep(base::TimeDelta::Max());
+        PlatformThread::Sleep(TimeDelta::Max());
       },
       "owning_sequence_checker_.CalledOnValidSequence()");
 }
@@ -320,19 +317,19 @@
   Thread b("TakingOwnershipThread");
   b.Start();
 
-  base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
-                            base::WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
 
   // a->DetachFromSequence() should allow |b| to use |a|'s Thread API.
   a->DetachFromSequence();
-  b.task_runner()->PostTask(
-      FROM_HERE, base::BindOnce(
-                     [](std::unique_ptr<Thread> thread_to_stop,
-                        base::WaitableEvent* event_to_signal) -> void {
-                       thread_to_stop->Stop();
-                       event_to_signal->Signal();
-                     },
-                     std::move(a), base::Unretained(&event)));
+  b.task_runner()->PostTask(FROM_HERE,
+                            BindOnce(
+                                [](std::unique_ptr<Thread> thread_to_stop,
+                                   WaitableEvent* event_to_signal) {
+                                  thread_to_stop->Stop();
+                                  event_to_signal->Signal();
+                                },
+                                std::move(a), Unretained(&event)));
 
   event.Wait();
 }
@@ -377,20 +374,18 @@
   EXPECT_TRUE(a->IsRunning());
 
   // Signaled when last task on |a| is processed.
-  base::WaitableEvent last_task_event(
-      base::WaitableEvent::ResetPolicy::AUTOMATIC,
-      base::WaitableEvent::InitialState::NOT_SIGNALED);
-  a->task_runner()->PostTask(
-      FROM_HERE, base::BindOnce(&base::WaitableEvent::Signal,
-                                base::Unretained(&last_task_event)));
+  WaitableEvent last_task_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
+  a->task_runner()->PostTask(FROM_HERE, BindOnce(&WaitableEvent::Signal,
+                                                 Unretained(&last_task_event)));
 
   // StopSoon() is non-blocking, Yield() to |a|, wait for last task to be
   // processed and a little more for QuitWhenIdle() to unwind before considering
   // the thread "stopped".
   a->StopSoon();
-  base::PlatformThread::YieldCurrentThread();
+  PlatformThread::YieldCurrentThread();
   last_task_event.Wait();
-  base::PlatformThread::Sleep(base::Milliseconds(20));
+  PlatformThread::Sleep(Milliseconds(20));
 
   // This test assumes that the above was sufficient to let the thread fully
   // stop.
@@ -414,24 +409,23 @@
   b.Start();
 
   // Post a task that calls GetThreadId() on the created thread.
-  base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
-                            base::WaitableEvent::InitialState::NOT_SIGNALED);
-  base::PlatformThreadId id_from_new_thread;
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  PlatformThreadId id_from_new_thread;
   a.task_runner()->PostTask(
-      FROM_HERE,
-      base::BindOnce(ReturnThreadId, &a, &id_from_new_thread, &event));
+      FROM_HERE, BindOnce(ReturnThreadId, &a, &id_from_new_thread, &event));
 
   // Call GetThreadId() on the current thread before calling event.Wait() so
   // that this test can find a race issue with TSAN.
-  base::PlatformThreadId id_from_current_thread = a.GetThreadId();
+  PlatformThreadId id_from_current_thread = a.GetThreadId();
 
   // Check if GetThreadId() returns consistent value in both threads.
   event.Wait();
   EXPECT_EQ(id_from_current_thread, id_from_new_thread);
 
   // A started thread should have a valid ID.
-  EXPECT_NE(base::kInvalidThreadId, a.GetThreadId());
-  EXPECT_NE(base::kInvalidThreadId, b.GetThreadId());
+  EXPECT_NE(kInvalidThreadId, a.GetThreadId());
+  EXPECT_NE(kInvalidThreadId, b.GetThreadId());
 
   // Each thread should have a different thread ID.
   EXPECT_NE(a.GetThreadId(), b.GetThreadId());
@@ -439,11 +433,11 @@
 
 TEST_F(ThreadTest, ThreadIdWithRestart) {
   Thread a("ThreadIdWithRestart");
-  base::PlatformThreadId previous_id = base::kInvalidThreadId;
+  PlatformThreadId previous_id = kInvalidThreadId;
 
   for (size_t i = 0; i < 16; ++i) {
     EXPECT_TRUE(a.Start());
-    base::PlatformThreadId current_id = a.GetThreadId();
+    PlatformThreadId current_id = a.GetThreadId();
     EXPECT_NE(previous_id, current_id);
     previous_id = current_id;
     a.Stop();
@@ -477,10 +471,9 @@
 
     // Register an observer that writes into |captured_events| once the
     // thread's message loop is destroyed.
-    t.task_runner()->PostTask(
-        FROM_HERE,
-        base::BindOnce(&RegisterDestructionObserver,
-                       base::Unretained(&loop_destruction_observer)));
+    t.task_runner()->PostTask(FROM_HERE,
+                              BindOnce(&RegisterDestructionObserver,
+                                       Unretained(&loop_destruction_observer)));
 
     // Upon leaving this scope, the thread is deleted.
   }
@@ -516,20 +509,19 @@
   // Flushing a thread with no tasks shouldn't block.
   a.FlushForTesting();
 
-  constexpr base::TimeDelta kSleepPerTestTask = base::Milliseconds(50);
+  constexpr TimeDelta kSleepPerTestTask = Milliseconds(50);
   constexpr size_t kNumSleepTasks = 5;
 
-  const base::TimeTicks ticks_before_post = base::TimeTicks::Now();
+  const TimeTicks ticks_before_post = TimeTicks::Now();
 
   for (size_t i = 0; i < kNumSleepTasks; ++i) {
     a.task_runner()->PostTask(
-        FROM_HERE,
-        base::BindOnce(&base::PlatformThread::Sleep, kSleepPerTestTask));
+        FROM_HERE, BindOnce(&PlatformThread::Sleep, kSleepPerTestTask));
   }
 
   // All tasks should have executed, as reflected by the elapsed time.
   a.FlushForTesting();
-  EXPECT_GE(base::TimeTicks::Now() - ticks_before_post,
+  EXPECT_GE(TimeTicks::Now() - ticks_before_post,
             kNumSleepTasks * kSleepPerTestTask);
 
   a.Stop();
@@ -540,15 +532,14 @@
 
 namespace {
 
-using TaskQueue = base::sequence_manager::TaskQueue;
+using TaskQueue = sequence_manager::TaskQueue;
 
 class SequenceManagerThreadDelegate : public Thread::Delegate {
  public:
   SequenceManagerThreadDelegate()
-      : sequence_manager_(
-            base::sequence_manager::CreateUnboundSequenceManager()),
+      : sequence_manager_(sequence_manager::CreateUnboundSequenceManager()),
         task_queue_(sequence_manager_->CreateTaskQueue(
-            TaskQueue::Spec(base::sequence_manager::QueueName::DEFAULT_TQ))) {
+            TaskQueue::Spec(sequence_manager::QueueName::DEFAULT_TQ))) {
     sequence_manager_->SetDefaultTaskRunner(GetDefaultTaskRunner());
   }
 
@@ -560,17 +551,17 @@
 
   // Thread::Delegate:
 
-  scoped_refptr<base::SingleThreadTaskRunner> GetDefaultTaskRunner() override {
+  scoped_refptr<SingleThreadTaskRunner> GetDefaultTaskRunner() override {
     return task_queue_->task_runner();
   }
 
   void BindToCurrentThread() override {
     sequence_manager_->BindToMessagePump(
-        base::MessagePump::Create(base::MessagePumpType::DEFAULT));
+        MessagePump::Create(MessagePumpType::DEFAULT));
   }
 
  private:
-  std::unique_ptr<base::sequence_manager::SequenceManager> sequence_manager_;
+  std::unique_ptr<sequence_manager::SequenceManager> sequence_manager_;
   TaskQueue::Handle task_queue_;
 };
 
@@ -578,17 +569,19 @@
 
 TEST_F(ThreadTest, ProvidedThreadDelegate) {
   Thread thread("ThreadDelegate");
-  base::Thread::Options options;
+  Thread::Options options;
   options.delegate = std::make_unique<SequenceManagerThreadDelegate>();
 
-  scoped_refptr<base::SingleThreadTaskRunner> task_runner =
+  scoped_refptr<SingleThreadTaskRunner> task_runner =
       options.delegate->GetDefaultTaskRunner();
   thread.StartWithOptions(std::move(options));
 
-  base::WaitableEvent event;
-  task_runner->PostTask(FROM_HERE, base::BindOnce(&base::WaitableEvent::Signal,
-                                                  base::Unretained(&event)));
+  WaitableEvent event;
+  task_runner->PostTask(FROM_HERE,
+                        BindOnce(&WaitableEvent::Signal, Unretained(&event)));
   event.Wait();
 
   thread.Stop();
 }
+
+}  // namespace base
diff --git a/base/threading/threading_features.h b/base/threading/threading_features.h
index a682df4..2b89b23 100644
--- a/base/threading/threading_features.h
+++ b/base/threading/threading_features.h
@@ -28,9 +28,11 @@
 
 #if BUILDFLAG(IS_WIN)
 BASE_EXPORT BASE_DECLARE_FEATURE(kAboveNormalCompositingBrowserWin);
+BASE_EXPORT BASE_DECLARE_FEATURE(kBackgroundThreadNormalMemoryPriorityWin);
 #endif
 
 BASE_EXPORT BASE_DECLARE_FEATURE(kEnableHangWatcher);
+BASE_EXPORT BASE_DECLARE_FEATURE(kEnableHangWatcherInZygoteChildren);
 
 }  // namespace base
 
diff --git a/base/time/buildflags/BUILD.gn b/base/time/buildflags/BUILD.gn
deleted file mode 100644
index 738a548..0000000
--- a/base/time/buildflags/BUILD.gn
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/buildflag_header.gni")
-import("buildflags.gni")
-
-# Generate a buildflag header for compile-time checking of mach absolute time
-# support in TimeTicks
-# TODO(crbug.com/1414153): this should be removed once there is a unified
-# approach to TimeTicks::Now on iOS.
-buildflag_header("buildflags") {
-  header = "buildflags.h"
-  flags = [ "ENABLE_MACH_ABSOLUTE_TIME_TICKS=$enable_mach_absolute_time_ticks" ]
-}
diff --git a/base/time/buildflags/buildflags.gni b/base/time/buildflags/buildflags.gni
deleted file mode 100644
index b72566b..0000000
--- a/base/time/buildflags/buildflags.gni
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/config/features.gni")
-
-# TODO(crbug.com/1414153): this should be removed once there is a unified
-# approach to TimeTicks::Now on iOS.
-declare_args() {
-  # use_blink currently assumes mach absolute time ticks (eg, to ensure that
-  # trace events cohere).
-  enable_mach_absolute_time_ticks = is_mac || use_blink
-}
diff --git a/base/time/time.cc b/base/time/time.cc
index 723ece8..a02505a 100644
--- a/base/time/time.cc
+++ b/base/time/time.cc
@@ -12,6 +12,7 @@
 #include <utility>
 
 #include "base/check.h"
+#include "base/format_macros.h"
 #include "base/strings/stringprintf.h"
 #include "base/third_party/nspr/prtime.h"
 #include "base/time/time_override.h"
@@ -22,12 +23,6 @@
 
 namespace {
 
-const char kWeekdayName[7][4] = {"Sun", "Mon", "Tue", "Wed",
-                                 "Thu", "Fri", "Sat"};
-
-const char kMonthName[12][4] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
-                                "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
-
 TimeTicks g_shared_time_ticks_at_unix_epoch;
 
 }  // namespace
@@ -193,15 +188,18 @@
 std::ostream& operator<<(std::ostream& os, Time time) {
   Time::Exploded exploded;
   time.UTCExplode(&exploded);
-  // Use StringPrintf because iostreams formatting is painful.
-  return os << StringPrintf("%04d-%02d-%02d %02d:%02d:%02d.%03d UTC",
-                            exploded.year,
-                            exploded.month,
-                            exploded.day_of_month,
-                            exploded.hour,
-                            exploded.minute,
-                            exploded.second,
-                            exploded.millisecond);
+  // Can't call `UnlocalizedTimeFormatWithPattern()`/`TimeFormatAsIso8601()`
+  // since `//base` can't depend on `//base:i18n`.
+  //
+  // TODO(pkasting): Consider whether `operator<<()` should move to
+  // `base/i18n/time_formatting.h` -- would let us implement in terms of
+  // existing time formatting, but might be confusing.
+  return os << StringPrintf("%04d-%02d-%02d %02d:%02d:%02d.%06" PRId64 " UTC",
+                            exploded.year, exploded.month,
+                            exploded.day_of_month, exploded.hour,
+                            exploded.minute, exploded.second,
+                            time.ToDeltaSinceWindowsEpoch().InMicroseconds() %
+                                Time::kMicrosecondsPerSecond);
 }
 
 // TimeTicks ------------------------------------------------------------------
@@ -309,13 +307,4 @@
   // clang-format on
 }
 
-std::string TimeFormatHTTP(base::Time time) {
-  base::Time::Exploded exploded;
-  time.UTCExplode(&exploded);
-  return base::StringPrintf(
-      "%s, %02d %s %04d %02d:%02d:%02d GMT", kWeekdayName[exploded.day_of_week],
-      exploded.day_of_month, kMonthName[exploded.month - 1], exploded.year,
-      exploded.hour, exploded.minute, exploded.second);
-}
-
 }  // namespace base
diff --git a/base/time/time.h b/base/time/time.h
index 42d812b..c891e45 100644
--- a/base/time/time.h
+++ b/base/time/time.h
@@ -68,6 +68,7 @@
 #include <iosfwd>
 #include <limits>
 #include <ostream>
+#include <type_traits>
 
 #include "base/base_export.h"
 #include "base/check.h"
@@ -77,10 +78,6 @@
 #include "build/build_config.h"
 #include "build/chromeos_buildflags.h"
 
-#if BUILDFLAG(IS_APPLE)
-#include "base/time/buildflags/buildflags.h"
-#endif
-
 #if BUILDFLAG(IS_FUCHSIA)
 #include <zircon/types.h>
 #endif
@@ -123,6 +120,16 @@
 template <typename T>
 constexpr TimeDelta Microseconds(T n);
 
+namespace {
+
+// TODO: Replace usage of this with std::isnan() once Chromium uses C++23,
+// where that is constexpr.
+constexpr bool isnan(double d) {
+  return d != d;
+}
+
+}
+
 // TimeDelta ------------------------------------------------------------------
 
 class BASE_EXPORT TimeDelta {
@@ -143,9 +150,7 @@
   static TimeDelta FromZxDuration(zx_duration_t nanos);
 #endif
 #if BUILDFLAG(IS_APPLE)
-#if BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
   static TimeDelta FromMachTime(uint64_t mach_time);
-#endif  // BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
 #endif  // BUILDFLAG(IS_APPLE)
 
   // Converts an integer value representing TimeDelta to a class. This is used
@@ -577,14 +582,20 @@
       (sizeof(time_t) == 4 ? 2037 : std::numeric_limits<int>::max());
 #endif
 
-  // Represents an exploded time that can be formatted nicely. This is kind of
-  // like the Win32 SYSTEMTIME structure or the Unix "struct tm" with a few
-  // additions and changes to prevent errors.
+  // Represents an exploded time. This is kind of like the Win32 SYSTEMTIME
+  // structure or the Unix "struct tm" with a few additions and changes to
+  // prevent errors.
+  //
   // This structure always represents dates in the Gregorian calendar and always
   // encodes day_of_week as Sunday==0, Monday==1, .., Saturday==6. This means
   // that base::Time::LocalExplode and base::Time::FromLocalExploded only
   // respect the current local time zone in the conversion and do *not* use a
   // calendar or day-of-week encoding from the current locale.
+  //
+  // NOTE: Generally, you should prefer the functions in
+  // base/i18n/time_formatting.h (in particular,
+  // `UnlocalizedTimeFormatWithPattern()`) over trying to create a formatted
+  // time string from this object.
   struct BASE_EXPORT Exploded {
     int year;          // Four digit year "2007"
     int month;         // 1-based month (values 1 = January, etc.)
@@ -655,13 +666,14 @@
   static constexpr Time FromTimeT(time_t tt);
   constexpr time_t ToTimeT() const;
 
-  // Converts time to/from a double which is the number of seconds since epoch
-  // (Jan 1, 1970).  Webkit uses this format to represent time.
-  // Because WebKit initializes double time value to 0 to indicate "not
-  // initialized", we map it to empty Time object that also means "not
-  // initialized".
-  static constexpr Time FromDoubleT(double dt);
-  constexpr double ToDoubleT() const;
+  // Converts time to/from a number of seconds since the Unix epoch (Jan 1,
+  // 1970).
+  //
+  // TODO(crbug.com/1495550): Add integral versions and use them.
+  // TODO(crbug.com/1495554): Add ...PreservingNull() versions; see comments in
+  // the implementation of FromSecondsSinceUnixEpoch().
+  static constexpr Time FromSecondsSinceUnixEpoch(double dt);
+  constexpr double InSecondsFSinceUnixEpoch() const;
 
 #if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
   // Converts the timespec structure to time. MacOS X 10.8.3 (and tentatively,
@@ -671,23 +683,35 @@
   static constexpr Time FromTimeSpec(const timespec& ts);
 #endif
 
-  // Converts to/from the Javascript convention for times, a number of
-  // milliseconds since the epoch:
-  // https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Date/getTime.
+  // Converts to/from a number of milliseconds since the Unix epoch.
+  // TODO(crbug.com/1495554): Add ...PreservingNull() versions; see comments in
+  // the implementation of FromMillisecondsSinceUnixEpoch().
+  static constexpr Time FromMillisecondsSinceUnixEpoch(int64_t dt);
+  static constexpr Time FromMillisecondsSinceUnixEpoch(double dt);
+  // Explicitly forward calls with smaller integral types to the int64_t
+  // version; otherwise such calls would need to manually cast their args to
+  // int64_t, since the compiler isn't sure whether to promote to int64_t or
+  // double.
+  template <typename T,
+            typename = std::enable_if_t<
+                std::is_integral_v<T> && !std::is_same_v<T, int64_t> &&
+                (sizeof(T) < sizeof(int64_t) ||
+                 (sizeof(T) == sizeof(int64_t) && std::is_signed_v<T>))>>
+  static constexpr Time FromMillisecondsSinceUnixEpoch(T ms_since_epoch) {
+    return FromMillisecondsSinceUnixEpoch(int64_t{ms_since_epoch});
+  }
+  constexpr int64_t InMillisecondsSinceUnixEpoch() const;
+  // Don't use InMillisecondsFSinceUnixEpoch() in new code, since it contains a
+  // subtle hack (only exactly 1601-01-01 00:00 UTC is represented as 1970-01-01
+  // 00:00 UTC), and that is not appropriate for general use. Try to use
+  // InMillisecondsFSinceUnixEpochIgnoringNull() unless you have a very good
+  // reason to use InMillisecondsFSinceUnixEpoch().
   //
-  // Don't use ToJsTime() in new code, since it contains a subtle hack (only
-  // exactly 1601-01-01 00:00 UTC is represented as 1970-01-01 00:00 UTC), and
-  // that is not appropriate for general use. Try to use ToJsTimeIgnoringNull()
-  // unless you have a very good reason to use ToJsTime().
-  static constexpr Time FromJsTime(double ms_since_epoch);
-  constexpr double ToJsTime() const;
-  constexpr double ToJsTimeIgnoringNull() const;
-
-  // Converts to/from Java convention for times, a number of milliseconds since
-  // the epoch. Because the Java format has less resolution, converting to Java
-  // time is a lossy operation.
-  static constexpr Time FromJavaTime(int64_t ms_since_epoch);
-  constexpr int64_t ToJavaTime() const;
+  // TODO(crbug.com/1495554): Rename the no-suffix version to
+  // "...PreservingNull()" and remove the suffix from the other version, to
+  // guide people to the preferable API.
+  constexpr double InMillisecondsFSinceUnixEpoch() const;
+  constexpr double InMillisecondsFSinceUnixEpochIgnoringNull() const;
 
 #if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
   static Time FromTimeVal(struct timeval t);
@@ -752,7 +776,7 @@
   // month is set to 31 on a 28-30 day month. Returns Time(0) on overflow.
   // FromLocalExploded respects the current time zone but does not attempt to
   // use the calendar or day-of-week encoding from the current locale - see the
-  // comments on base::Time::Exploded for more information.
+  // comments on Exploded for more information.
   [[nodiscard]] static bool FromUTCExploded(const Exploded& exploded,
                                             Time* time) {
     return FromExploded(false, exploded, time);
@@ -795,7 +819,7 @@
   // of the year 30828. Some platforms might convert over a wider input range.
   // LocalExplode respects the current time zone but does not attempt to use the
   // calendar or day-of-week encoding from the current locale - see the comments
-  // on base::Time::Exploded for more information.
+  // on Exploded for more information.
   void UTCExplode(Exploded* exploded) const { Explode(false, exploded); }
   void LocalExplode(Exploded* exploded) const { Explode(true, exploded); }
 
@@ -812,7 +836,7 @@
   // DEPRECATED - Do not use in new code. When deserializing from `base::Value`,
   // prefer the helpers from //base/json/values_util.h instead.
   // Otherwise, use `Time::FromDeltaSinceWindowsEpoch()` for `Time` and
-  // `TimeDelta::FromMicroseconds()` for `TimeDelta`. http://crbug.com/634507
+  // `Microseconds()` for `TimeDelta`. http://crbug.com/634507
   static constexpr Time FromInternalValue(int64_t us) { return Time(us); }
 
  private:
@@ -1084,14 +1108,20 @@
 }
 
 // static
-constexpr Time Time::FromDoubleT(double dt) {
-  // Preserve 0 so we can tell it doesn't exist.
-  return (dt == 0 || std::isnan(dt)) ? Time() : (UnixEpoch() + Seconds(dt));
+constexpr Time Time::FromSecondsSinceUnixEpoch(double dt) {
+  // Preserve 0.
+  //
+  // TODO(crbug.com/1495554): This is an unfortunate artifact of WebKit using 0
+  // to mean "no time". Add a "...PreservingNull()" version that does this,
+  // convert the minimum necessary set of callers to use it, and remove the zero
+  // check here.
+  return (dt == 0 || isnan(dt)) ? Time() : (UnixEpoch() + Seconds(dt));
 }
 
-constexpr double Time::ToDoubleT() const {
+constexpr double Time::InSecondsFSinceUnixEpoch() const {
+  // Preserve 0.
   if (is_null()) {
-    return 0;  // Preserve 0 so we can tell it doesn't exist.
+    return 0;
   }
   if (!is_inf()) {
     return (*this - UnixEpoch()).InSecondsF();
@@ -1103,38 +1133,29 @@
 #if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
 // static
 constexpr Time Time::FromTimeSpec(const timespec& ts) {
-  return FromDoubleT(ts.tv_sec +
-                     static_cast<double>(ts.tv_nsec) / kNanosecondsPerSecond);
+  return FromSecondsSinceUnixEpoch(ts.tv_sec + static_cast<double>(ts.tv_nsec) /
+                                                   kNanosecondsPerSecond);
 }
 #endif
 
 // static
-constexpr Time Time::FromJsTime(double ms_since_epoch) {
-  // The epoch is a valid time, so this constructor doesn't interpret 0 as the
-  // null time.
-  return UnixEpoch() + Milliseconds(ms_since_epoch);
+constexpr Time Time::FromMillisecondsSinceUnixEpoch(int64_t dt) {
+  // TODO(crbug.com/1495554): The lack of zero-preservation here doesn't match
+  // InMillisecondsSinceUnixEpoch(), which is dangerous since it means
+  // round-trips are not necessarily idempotent. Add "...PreservingNull()"
+  // versions that explicitly check for zeros, convert the minimum necessary set
+  // of callers to use them, and remove the null-check in
+  // InMillisecondsSinceUnixEpoch().
+  return UnixEpoch() + Milliseconds(dt);
 }
 
-constexpr double Time::ToJsTime() const {
-  // Preserve 0 so the invalid result doesn't depend on the platform.
-  return is_null() ? 0 : ToJsTimeIgnoringNull();
+// static
+constexpr Time Time::FromMillisecondsSinceUnixEpoch(double dt) {
+  return isnan(dt) ? Time() : (UnixEpoch() + Milliseconds(dt));
 }
 
-constexpr double Time::ToJsTimeIgnoringNull() const {
-  // Preserve max and min without offset to prevent over/underflow.
-  if (!is_inf()) {
-    return (*this - UnixEpoch()).InMillisecondsF();
-  }
-  return (us_ < 0) ? -std::numeric_limits<double>::infinity()
-                   : std::numeric_limits<double>::infinity();
-}
-
-constexpr Time Time::FromJavaTime(int64_t ms_since_epoch) {
-  return UnixEpoch() + Milliseconds(ms_since_epoch);
-}
-
-constexpr int64_t Time::ToJavaTime() const {
-  // Preserve 0 so the invalid result doesn't depend on the platform.
+constexpr int64_t Time::InMillisecondsSinceUnixEpoch() const {
+  // Preserve 0.
   if (is_null()) {
     return 0;
   }
@@ -1145,6 +1166,20 @@
                    : std::numeric_limits<int64_t>::max();
 }
 
+constexpr double Time::InMillisecondsFSinceUnixEpoch() const {
+  // Preserve 0.
+  return is_null() ? 0 : InMillisecondsFSinceUnixEpochIgnoringNull();
+}
+
+constexpr double Time::InMillisecondsFSinceUnixEpochIgnoringNull() const {
+  // Preserve max and min without offset to prevent over/underflow.
+  if (!is_inf()) {
+    return (*this - UnixEpoch()).InMillisecondsF();
+  }
+  return (us_ < 0) ? -std::numeric_limits<double>::infinity()
+                   : std::numeric_limits<double>::infinity();
+}
+
 // For logging use only.
 BASE_EXPORT std::ostream& operator<<(std::ostream& os, Time time);
 
@@ -1198,14 +1233,12 @@
 #endif
 
 #if BUILDFLAG(IS_APPLE)
-#if BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
   static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time);
 
   // Sets the current Mach timebase to `timebase`. Returns the old timebase.
   static mach_timebase_info_data_t SetMachTimebaseInfoForTesting(
       mach_timebase_info_data_t timebase);
 
-#endif  // BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
 #endif  // BUILDFLAG(IS_APPLE)
 
 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS_ASH)
@@ -1383,10 +1416,6 @@
 // For logging use only.
 BASE_EXPORT std::ostream& operator<<(std::ostream& os, ThreadTicks time_ticks);
 
-// Returns a string representation of the given time in the IMF-fixdate format
-// defined by RFC 7231 (satisfying its HTTP-date format).
-BASE_EXPORT std::string TimeFormatHTTP(base::Time time);
-
 }  // namespace base
 
 #endif  // BASE_TIME_TIME_H_
diff --git a/base/time/time_apple.mm b/base/time/time_apple.mm
new file mode 100644
index 0000000..073eab9
--- /dev/null
+++ b/base/time/time_apple.mm
@@ -0,0 +1,209 @@
+// Copyright 2012 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#import <Foundation/Foundation.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+
+#include "base/apple/mach_logging.h"
+#include "base/apple/scoped_cftyperef.h"
+#include "base/apple/scoped_mach_port.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/time/time_override.h"
+#include "build/build_config.h"
+
+namespace {
+
+// Returns a pointer to the initialized Mach timebase info struct.
+mach_timebase_info_data_t* MachTimebaseInfo() {
+  static mach_timebase_info_data_t timebase_info = []() {
+    mach_timebase_info_data_t info;
+    kern_return_t kr = mach_timebase_info(&info);
+    MACH_DCHECK(kr == KERN_SUCCESS, kr) << "mach_timebase_info";
+    DCHECK(info.numer);
+    DCHECK(info.denom);
+    return info;
+  }();
+  return &timebase_info;
+}
+
+int64_t MachTimeToMicroseconds(uint64_t mach_time) {
+  // timebase_info gives us the conversion factor between absolute time tick
+  // units and nanoseconds.
+  mach_timebase_info_data_t* timebase_info = MachTimebaseInfo();
+
+  // Take the fast path when the conversion is 1:1. The result will for sure fit
+  // into an int_64 because we're going from nanoseconds to microseconds.
+  if (timebase_info->numer == timebase_info->denom) {
+    return static_cast<int64_t>(mach_time /
+                                base::Time::kNanosecondsPerMicrosecond);
+  }
+
+  uint64_t microseconds = 0;
+  const uint64_t divisor =
+      timebase_info->denom * base::Time::kNanosecondsPerMicrosecond;
+
+  // Microseconds is mach_time * timebase.numer /
+  // (timebase.denom * kNanosecondsPerMicrosecond). Divide first to reduce
+  // the chance of overflow. Also stash the remainder right now, a likely
+  // byproduct of the division.
+  microseconds = mach_time / divisor;
+  const uint64_t mach_time_remainder = mach_time % divisor;
+
+  // Now multiply, keeping an eye out for overflow.
+  CHECK(!__builtin_umulll_overflow(microseconds, timebase_info->numer,
+                                   &microseconds));
+
+  // By dividing first we lose precision. Regain it by adding back the
+  // microseconds from the remainder, with an eye out for overflow.
+  uint64_t least_significant_microseconds =
+      (mach_time_remainder * timebase_info->numer) / divisor;
+  CHECK(!__builtin_uaddll_overflow(microseconds, least_significant_microseconds,
+                                   &microseconds));
+
+  // Don't bother with the rollover handling that the Windows version does.
+  // The returned time in microseconds is enough for 292,277 years (starting
+  // from 2^63 because the returned int64_t is signed,
+  // 9223372036854775807 / (1e6 * 60 * 60 * 24 * 365.2425) = 292,277).
+  return base::checked_cast<int64_t>(microseconds);
+}
+
+// Returns monotonically growing number of ticks in microseconds since some
+// unspecified starting point.
+int64_t ComputeCurrentTicks() {
+  // mach_absolute_time is it when it comes to ticks on the Mac.  Other calls
+  // with less precision (such as TickCount) just call through to
+  // mach_absolute_time.
+  return MachTimeToMicroseconds(mach_absolute_time());
+}
+
+int64_t ComputeThreadTicks() {
+  struct timespec ts = {};
+  CHECK(clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0);
+  base::CheckedNumeric<int64_t> absolute_micros(ts.tv_sec);
+  absolute_micros *= base::Time::kMicrosecondsPerSecond;
+  absolute_micros += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
+  return absolute_micros.ValueOrDie();
+}
+
+}  // namespace
+
+namespace base {
+
+// The Time routines in this file use Mach and CoreFoundation APIs, since the
+// POSIX definition of time_t in macOS wraps around after 2038--and
+// there are already cookie expiration dates, etc., past that time out in
+// the field.  Using CFDate prevents that problem, and using mach_absolute_time
+// for TimeTicks gives us nice high-resolution interval timing.
+
+// Time -----------------------------------------------------------------------
+
+namespace subtle {
+Time TimeNowIgnoringOverride() {
+  return Time::FromCFAbsoluteTime(CFAbsoluteTimeGetCurrent());
+}
+
+Time TimeNowFromSystemTimeIgnoringOverride() {
+  // Just use TimeNowIgnoringOverride() because it returns the system time.
+  return TimeNowIgnoringOverride();
+}
+}  // namespace subtle
+
+// static
+Time Time::FromCFAbsoluteTime(CFAbsoluteTime t) {
+  static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
+                "CFAbsoluteTime must have an infinity value");
+  if (t == 0) {
+    return Time();  // Consider 0 as a null Time.
+  }
+  return (t == std::numeric_limits<CFAbsoluteTime>::infinity())
+             ? Max()
+             : (UnixEpoch() +
+                Seconds(double{t + kCFAbsoluteTimeIntervalSince1970}));
+}
+
+CFAbsoluteTime Time::ToCFAbsoluteTime() const {
+  static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
+                "CFAbsoluteTime must have an infinity value");
+  if (is_null()) {
+    return 0;  // Consider 0 as a null Time.
+  }
+  return is_max() ? std::numeric_limits<CFAbsoluteTime>::infinity()
+                  : (CFAbsoluteTime{(*this - UnixEpoch()).InSecondsF()} -
+                     kCFAbsoluteTimeIntervalSince1970);
+}
+
+// static
+Time Time::FromNSDate(NSDate* date) {
+  DCHECK(date);
+  return FromCFAbsoluteTime(date.timeIntervalSinceReferenceDate);
+}
+
+NSDate* Time::ToNSDate() const {
+  return [NSDate dateWithTimeIntervalSinceReferenceDate:ToCFAbsoluteTime()];
+}
+
+// TimeDelta ------------------------------------------------------------------
+
+// static
+TimeDelta TimeDelta::FromMachTime(uint64_t mach_time) {
+  return Microseconds(MachTimeToMicroseconds(mach_time));
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+namespace subtle {
+TimeTicks TimeTicksNowIgnoringOverride() {
+  return TimeTicks() + Microseconds(ComputeCurrentTicks());
+}
+}  // namespace subtle
+
+// static
+bool TimeTicks::IsHighResolution() {
+  return true;
+}
+
+// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+  return true;
+}
+
+// static
+TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) {
+  return TimeTicks(MachTimeToMicroseconds(mach_absolute_time));
+}
+
+// static
+mach_timebase_info_data_t TimeTicks::SetMachTimebaseInfoForTesting(
+    mach_timebase_info_data_t timebase) {
+  mach_timebase_info_data_t orig_timebase = *MachTimebaseInfo();
+
+  *MachTimebaseInfo() = timebase;
+
+  return orig_timebase;
+}
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+  return Clock::MAC_MACH_ABSOLUTE_TIME;
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+namespace subtle {
+ThreadTicks ThreadTicksNowIgnoringOverride() {
+  return ThreadTicks() + Microseconds(ComputeThreadTicks());
+}
+}  // namespace subtle
+
+}  // namespace base
diff --git a/base/time/time_apple_unittest.mm b/base/time/time_apple_unittest.mm
new file mode 100644
index 0000000..e87bc25
--- /dev/null
+++ b/base/time/time_apple_unittest.mm
@@ -0,0 +1,178 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+class ScopedTimebase {
+ public:
+  explicit ScopedTimebase(mach_timebase_info_data_t timebase) {
+    orig_timebase_ = base::TimeTicks::SetMachTimebaseInfoForTesting(timebase);
+  }
+
+  ScopedTimebase(const ScopedTimebase&) = delete;
+
+  ScopedTimebase& operator=(const ScopedTimebase&) = delete;
+
+  ~ScopedTimebase() {
+    base::TimeTicks::SetMachTimebaseInfoForTesting(orig_timebase_);
+  }
+
+ private:
+  mach_timebase_info_data_t orig_timebase_;
+};
+
+mach_timebase_info_data_t kIntelTimebase = {1, 1};
+
+// A sample (not definitive) timebase for M1.
+mach_timebase_info_data_t kM1Timebase = {125, 3};
+
+}  // namespace
+
+namespace base {
+namespace {
+
+base::Time NoonOnDate(int year, int month, int day) {
+  const base::Time::Exploded exploded = {
+      .year = year, .month = month, .day_of_month = day, .hour = 12};
+  base::Time imploded;
+  CHECK(base::Time::FromUTCExploded(exploded, &imploded));
+  return imploded;
+}
+
+void CheckRoundTrip(int y, int m, int d) {
+  base::Time original = NoonOnDate(y, m, d);
+  base::Time roundtrip = Time::FromNSDate(original.ToNSDate());
+  EXPECT_EQ(original, roundtrip);
+}
+
+TEST(TimeMacTest, RoundTripNSDate) {
+  CheckRoundTrip(1911, 12, 14);
+  CheckRoundTrip(1924, 9, 28);
+  CheckRoundTrip(1926, 5, 12);
+  CheckRoundTrip(1969, 7, 24);
+}
+
+TEST(TimeMacTest, MachTimeToMicrosecondsIntelTimebase) {
+  ScopedTimebase timebase(kIntelTimebase);
+
+  // Perform the conversion.
+  uint64_t kArbitraryTicks = 59090101000;
+  TimeDelta result = TimeDelta::FromMachTime(kArbitraryTicks);
+
+  // With Intel the output should be the input.
+  EXPECT_EQ(Nanoseconds(kArbitraryTicks), result);
+}
+
+TEST(TimeMacTest, MachTimeToMicrosecondsM1Timebase) {
+  ScopedTimebase timebase(kM1Timebase);
+
+  // Use a tick count that's divisible by 3.
+  const uint64_t kArbitraryTicks = 92738127000;
+  TimeDelta result = TimeDelta::FromMachTime(kArbitraryTicks);
+
+  const uint64_t kExpectedResult =
+      kArbitraryTicks * kM1Timebase.numer / kM1Timebase.denom;
+  EXPECT_EQ(Nanoseconds(kExpectedResult), result);
+}
+
+// Tests MachTimeToMicroseconds when
+// mach_timebase_info_data_t.numer and mach_timebase_info_data_t.denom
+// are equal.
+TEST(TimeMacTest, MachTimeToMicrosecondsEqualTimebaseMembers) {
+  // These members would produce overflow but don't because
+  // MachTimeToMicroseconds should skip the timebase conversion
+  // when they're equal.
+  ScopedTimebase timebase({UINT_MAX, UINT_MAX});
+
+  uint64_t kArbitraryTicks = 175920053729;
+  TimeDelta result = TimeDelta::FromMachTime(kArbitraryTicks);
+
+  // With a unity timebase the output should be the input.
+  EXPECT_EQ(Nanoseconds(kArbitraryTicks), result);
+}
+
+TEST(TimeMacTest, MachTimeToMicrosecondsOverflowDetection) {
+  const uint32_t kArbitraryNumer = 1234567;
+  ScopedTimebase timebase({kArbitraryNumer, 1});
+
+  // Expect an overflow.
+  EXPECT_CHECK_DEATH(
+      TimeDelta::FromMachTime(std::numeric_limits<uint64_t>::max()));
+}
+
+// Tests that there's no overflow in MachTimeToMicroseconds even with
+// std::numeric_limits<uint64_t>::max() ticks on Intel.
+TEST(TimeMacTest, MachTimeToMicrosecondsNoOverflowIntel) {
+  ScopedTimebase timebase(kIntelTimebase);
+
+  // The incoming Mach time ticks are on the order of nanoseconds while the
+  // return result is microseconds. Even though we're passing in the largest
+  // tick count the result should be orders of magnitude smaller. On Intel the
+  // mapping from ticks to nanoseconds is 1:1 so we wouldn't ever expect an
+  // overflow when applying the timebase conversion.
+  TimeDelta::FromMachTime(std::numeric_limits<uint64_t>::max());
+}
+
+// Tests that there's no overflow in MachTimeToMicroseconds even with
+// std::numeric_limits<uint64_t>::max() ticks on M1.
+TEST(TimeMacTest, MachTimeToMicrosecondsNoOverflowM1) {
+  ScopedTimebase timebase(kM1Timebase);
+
+  // The incoming Mach time ticks are on the order of nanoseconds while the
+  // return result is microseconds. Even though we're passing in the largest
+  // tick count the result should be orders of magnitude smaller. Expect that
+  // FromMachTime(), when applying the timebase conversion, is smart enough to
+  // not multiply first and generate an overflow.
+  TimeDelta::FromMachTime(std::numeric_limits<uint64_t>::max());
+}
+
+// Tests that there's no underflow in MachTimeToMicroseconds on Intel.
+TEST(TimeMacTest, MachTimeToMicrosecondsNoUnderflowIntel) {
+  ScopedTimebase timebase(kIntelTimebase);
+
+  // On Intel the timebase conversion is 1:1, so min ticks is one microsecond
+  // worth of nanoseconds.
+  const uint64_t kMinimumTicks = base::Time::kNanosecondsPerMicrosecond;
+  const uint64_t kOneMicrosecond = 1;
+  EXPECT_EQ(kOneMicrosecond,
+            TimeDelta::FromMachTime(kMinimumTicks).InMicroseconds() * 1UL);
+
+  // If we have even one fewer tick (i.e. not enough ticks to constitute a full
+  // microsecond) the integer rounding should result in 0 microseconds.
+  const uint64_t kZeroMicroseconds = 0;
+  EXPECT_EQ(kZeroMicroseconds,
+            TimeDelta::FromMachTime(kMinimumTicks - 1).InMicroseconds() * 1UL);
+}
+
+// Tests that there's no underflow in MachTimeToMicroseconds for M1.
+TEST(TimeMacTest, MachTimeToMicrosecondsNoUnderflowM1) {
+  ScopedTimebase timebase(kM1Timebase);
+
+  // Microseconds is mach_time multiplied by kM1Timebase.numer /
+  // (kM1Timebase.denom * base::Time::kNanosecondsPerMicrosecond). Inverting
+  // that should be the minimum number of ticks to get a single microsecond in
+  // return. If we get zero it means an underflow in the conversion. For example
+  // if FromMachTime() first divides mach_time by kM1Timebase.denom *
+  // base::Time::kNanosecondsPerMicrosecond we'll get zero back.
+  const uint64_t kMinimumTicks =
+      (kM1Timebase.denom * base::Time::kNanosecondsPerMicrosecond) /
+      kM1Timebase.numer;
+  const uint64_t kOneMicrosecond = 1;
+  EXPECT_EQ(kOneMicrosecond,
+            TimeDelta::FromMachTime(kMinimumTicks).InMicroseconds() * 1UL);
+
+  // If we have even one fewer tick (i.e. not enough ticks to constitute a full
+  // microsecond) the integer rounding should result in 0 microseconds.
+  const uint64_t kZeroMicroseconds = 0;
+  EXPECT_EQ(kZeroMicroseconds,
+            TimeDelta::FromMachTime(kMinimumTicks - 1).InMicroseconds() * 1UL);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/time/time_exploded_ios.cc b/base/time/time_exploded_ios.cc
index 3ac6343..1e04e9f 100644
--- a/base/time/time_exploded_ios.cc
+++ b/base/time/time_exploded_ios.cc
@@ -8,7 +8,7 @@
 #include <CoreFoundation/CFCalendar.h>
 #include <CoreFoundation/CFTimeZone.h>
 
-#include "base/mac/scoped_cftyperef.h"
+#include "base/apple/scoped_cftyperef.h"
 #include "base/numerics/safe_conversions.h"
 
 #if __LP64__
@@ -26,11 +26,11 @@
 
 // static
 bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
-  base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
+  ScopedCFTypeRef<CFTimeZoneRef> time_zone(
       is_local
           ? CFTimeZoneCopySystem()
           : CFTimeZoneCreateWithTimeIntervalFromGMT(kCFAllocatorDefault, 0));
-  base::ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
+  ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
       kCFAllocatorDefault, kCFGregorianCalendar));
   CFCalendarSetTimeZone(gregorian, time_zone);
   CFAbsoluteTime absolute_time;
@@ -83,11 +83,11 @@
                             kMicrosecondsPerSecond) -
                            kCFAbsoluteTimeIntervalSince1970;
 
-  base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
+  ScopedCFTypeRef<CFTimeZoneRef> time_zone(
       is_local
           ? CFTimeZoneCopySystem()
           : CFTimeZoneCreateWithTimeIntervalFromGMT(kCFAllocatorDefault, 0));
-  base::ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
+  ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
       kCFAllocatorDefault, kCFGregorianCalendar));
   CFCalendarSetTimeZone(gregorian, time_zone);
   int second, day_of_week;
diff --git a/base/time/time_mac.mm b/base/time/time_mac.mm
deleted file mode 100644
index 02d3598..0000000
--- a/base/time/time_mac.mm
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2012 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/time/time.h"
-
-#import <Foundation/Foundation.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/sysctl.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <time.h>
-
-#include "base/logging.h"
-#include "base/mac/mach_logging.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "base/mac/scoped_mach_port.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/time/time_override.h"
-#include "build/build_config.h"
-
-#if !BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-#include <errno.h>
-#include <time.h>
-#include "base/ios/ios_util.h"
-#endif  // !BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-
-namespace {
-
-#if BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-// Returns a pointer to the initialized Mach timebase info struct.
-mach_timebase_info_data_t* MachTimebaseInfo() {
-  static mach_timebase_info_data_t timebase_info = []() {
-    mach_timebase_info_data_t info;
-    kern_return_t kr = mach_timebase_info(&info);
-    MACH_DCHECK(kr == KERN_SUCCESS, kr) << "mach_timebase_info";
-    DCHECK(info.numer);
-    DCHECK(info.denom);
-    return info;
-  }();
-  return &timebase_info;
-}
-
-int64_t MachTimeToMicroseconds(uint64_t mach_time) {
-  // timebase_info gives us the conversion factor between absolute time tick
-  // units and nanoseconds.
-  mach_timebase_info_data_t* timebase_info = MachTimebaseInfo();
-
-  // Take the fast path when the conversion is 1:1. The result will for sure fit
-  // into an int_64 because we're going from nanoseconds to microseconds.
-  if (timebase_info->numer == timebase_info->denom) {
-    return static_cast<int64_t>(mach_time /
-                                base::Time::kNanosecondsPerMicrosecond);
-  }
-
-  uint64_t microseconds = 0;
-  const uint64_t divisor =
-      timebase_info->denom * base::Time::kNanosecondsPerMicrosecond;
-
-  // Microseconds is mach_time * timebase.numer /
-  // (timebase.denom * kNanosecondsPerMicrosecond). Divide first to reduce
-  // the chance of overflow. Also stash the remainder right now, a likely
-  // byproduct of the division.
-  microseconds = mach_time / divisor;
-  const uint64_t mach_time_remainder = mach_time % divisor;
-
-  // Now multiply, keeping an eye out for overflow.
-  CHECK(!__builtin_umulll_overflow(microseconds, timebase_info->numer,
-                                   &microseconds));
-
-  // By dividing first we lose precision. Regain it by adding back the
-  // microseconds from the remainder, with an eye out for overflow.
-  uint64_t least_significant_microseconds =
-      (mach_time_remainder * timebase_info->numer) / divisor;
-  CHECK(!__builtin_uaddll_overflow(microseconds, least_significant_microseconds,
-                                   &microseconds));
-
-  // Don't bother with the rollover handling that the Windows version does.
-  // The returned time in microseconds is enough for 292,277 years (starting
-  // from 2^63 because the returned int64_t is signed,
-  // 9223372036854775807 / (1e6 * 60 * 60 * 24 * 365.2425) = 292,277).
-  return base::checked_cast<int64_t>(microseconds);
-}
-#endif  // BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-
-// Returns monotonically growing number of ticks in microseconds since some
-// unspecified starting point.
-int64_t ComputeCurrentTicks() {
-#if !BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-  struct timespec tp;
-  // clock_gettime() returns 0 on success and -1 on failure. Failure can only
-  // happen because of bad arguments (unsupported clock type or timespec pointer
-  // out of accessible address space). Here it is known that neither can happen
-  // since the timespec parameter is stack allocated right above and
-  // `CLOCK_MONOTONIC` is supported on all versions of iOS that Chrome is
-  // supported on.
-  int res = clock_gettime(CLOCK_MONOTONIC, &tp);
-  DCHECK_EQ(res, 0) << "Failed clock_gettime, errno: " << errno;
-
-  return (int64_t)tp.tv_sec * 1000000 + tp.tv_nsec / 1000;
-#else
-  // mach_absolute_time is it when it comes to ticks on the Mac.  Other calls
-  // with less precision (such as TickCount) just call through to
-  // mach_absolute_time.
-  return MachTimeToMicroseconds(mach_absolute_time());
-#endif  // !BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-}
-
-int64_t ComputeThreadTicks() {
-  // The pthreads library keeps a cached reference to the thread port, which
-  // does not have to be released like mach_thread_self() does.
-  mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
-  if (thread_port == MACH_PORT_NULL) {
-    DLOG(ERROR) << "Failed to get pthread_mach_thread_np()";
-    return 0;
-  }
-
-  mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
-  thread_basic_info_data_t thread_info_data;
-
-  kern_return_t kr = thread_info(
-      thread_port, THREAD_BASIC_INFO,
-      reinterpret_cast<thread_info_t>(&thread_info_data), &thread_info_count);
-  MACH_DCHECK(kr == KERN_SUCCESS, kr) << "thread_info";
-
-  base::CheckedNumeric<int64_t> absolute_micros(
-      thread_info_data.user_time.seconds +
-      thread_info_data.system_time.seconds);
-  absolute_micros *= base::Time::kMicrosecondsPerSecond;
-  absolute_micros += (thread_info_data.user_time.microseconds +
-                      thread_info_data.system_time.microseconds);
-  return absolute_micros.ValueOrDie();
-}
-
-}  // namespace
-
-namespace base {
-
-// The Time routines in this file use Mach and CoreFoundation APIs, since the
-// POSIX definition of time_t in macOS wraps around after 2038--and
-// there are already cookie expiration dates, etc., past that time out in
-// the field.  Using CFDate prevents that problem, and using mach_absolute_time
-// for TimeTicks gives us nice high-resolution interval timing.
-
-// Time -----------------------------------------------------------------------
-
-namespace subtle {
-Time TimeNowIgnoringOverride() {
-  return Time::FromCFAbsoluteTime(CFAbsoluteTimeGetCurrent());
-}
-
-Time TimeNowFromSystemTimeIgnoringOverride() {
-  // Just use TimeNowIgnoringOverride() because it returns the system time.
-  return TimeNowIgnoringOverride();
-}
-}  // namespace subtle
-
-// static
-Time Time::FromCFAbsoluteTime(CFAbsoluteTime t) {
-  static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
-                "CFAbsoluteTime must have an infinity value");
-  if (t == 0)
-    return Time();  // Consider 0 as a null Time.
-  return (t == std::numeric_limits<CFAbsoluteTime>::infinity())
-             ? Max()
-             : (UnixEpoch() +
-                Seconds(double{t + kCFAbsoluteTimeIntervalSince1970}));
-}
-
-CFAbsoluteTime Time::ToCFAbsoluteTime() const {
-  static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
-                "CFAbsoluteTime must have an infinity value");
-  if (is_null())
-    return 0;  // Consider 0 as a null Time.
-  return is_max() ? std::numeric_limits<CFAbsoluteTime>::infinity()
-                  : (CFAbsoluteTime{(*this - UnixEpoch()).InSecondsF()} -
-                     kCFAbsoluteTimeIntervalSince1970);
-}
-
-// static
-Time Time::FromNSDate(NSDate* date) {
-  DCHECK(date);
-  return FromCFAbsoluteTime(date.timeIntervalSinceReferenceDate);
-}
-
-NSDate* Time::ToNSDate() const {
-  return [NSDate dateWithTimeIntervalSinceReferenceDate:ToCFAbsoluteTime()];
-}
-
-// TimeDelta ------------------------------------------------------------------
-
-#if BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-// static
-TimeDelta TimeDelta::FromMachTime(uint64_t mach_time) {
-  return Microseconds(MachTimeToMicroseconds(mach_time));
-}
-#endif  // BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-
-// TimeTicks ------------------------------------------------------------------
-
-namespace subtle {
-TimeTicks TimeTicksNowIgnoringOverride() {
-  return TimeTicks() + Microseconds(ComputeCurrentTicks());
-}
-}  // namespace subtle
-
-// static
-bool TimeTicks::IsHighResolution() {
-  return true;
-}
-
-// static
-bool TimeTicks::IsConsistentAcrossProcesses() {
-  return true;
-}
-
-#if BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-// static
-TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) {
-  return TimeTicks(MachTimeToMicroseconds(mach_absolute_time));
-}
-
-// static
-mach_timebase_info_data_t TimeTicks::SetMachTimebaseInfoForTesting(
-    mach_timebase_info_data_t timebase) {
-  mach_timebase_info_data_t orig_timebase = *MachTimebaseInfo();
-
-  *MachTimebaseInfo() = timebase;
-
-  return orig_timebase;
-}
-
-#endif  // BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-
-// static
-TimeTicks::Clock TimeTicks::GetClock() {
-#if !BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-  return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
-#else
-  return Clock::MAC_MACH_ABSOLUTE_TIME;
-#endif  // !BUILDFLAG(ENABLE_MACH_ABSOLUTE_TIME_TICKS)
-}
-
-// ThreadTicks ----------------------------------------------------------------
-
-namespace subtle {
-ThreadTicks ThreadTicksNowIgnoringOverride() {
-  return ThreadTicks() + Microseconds(ComputeThreadTicks());
-}
-}  // namespace subtle
-
-}  // namespace base
diff --git a/base/time/time_mac_unittest.mm b/base/time/time_mac_unittest.mm
deleted file mode 100644
index 6285324..0000000
--- a/base/time/time_mac_unittest.mm
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2021 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/test/gtest_util.h"
-#include "base/time/time.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-class ScopedTimebase {
- public:
-  ScopedTimebase(mach_timebase_info_data_t timebase) {
-    orig_timebase_ = base::TimeTicks::SetMachTimebaseInfoForTesting(timebase);
-  }
-
-  ScopedTimebase(const ScopedTimebase&) = delete;
-
-  ScopedTimebase& operator=(const ScopedTimebase&) = delete;
-
-  ~ScopedTimebase() {
-    base::TimeTicks::SetMachTimebaseInfoForTesting(orig_timebase_);
-  }
-
- private:
-  mach_timebase_info_data_t orig_timebase_;
-};
-
-mach_timebase_info_data_t kIntelTimebase = {1, 1};
-
-// A sample (not definitive) timebase for M1.
-mach_timebase_info_data_t kM1Timebase = {125, 3};
-
-}  // namespace
-
-namespace base {
-namespace {
-
-base::Time NoonOnDate(int year, int month, int day) {
-  base::Time::Exploded exploded;
-  exploded.year = year;
-  exploded.month = month;
-  exploded.day_of_week = 0;  // Not correct, but FromExploded permits it
-  exploded.day_of_month = day;
-  exploded.hour = 12;
-  exploded.minute = 0;
-  exploded.second = 0;
-  exploded.millisecond = 0;
-  base::Time imploded;
-  CHECK(base::Time::FromUTCExploded(exploded, &imploded));
-  return imploded;
-}
-
-void CheckRoundTrip(int y, int m, int d) {
-  base::Time original = NoonOnDate(y, m, d);
-  base::Time roundtrip = Time::FromNSDate(original.ToNSDate());
-  EXPECT_EQ(original, roundtrip);
-}
-
-TEST(TimeMacTest, RoundTripNSDate) {
-  CheckRoundTrip(1911, 12, 14);
-  CheckRoundTrip(1924, 9, 28);
-  CheckRoundTrip(1926, 5, 12);
-  CheckRoundTrip(1969, 7, 24);
-}
-
-TEST(TimeMacTest, MachTimeToMicrosecondsIntelTimebase) {
-  ScopedTimebase timebase(kIntelTimebase);
-
-  // Perform the conversion.
-  uint64_t kArbitraryTicks = 59090101000;
-  TimeDelta result = TimeDelta::FromMachTime(kArbitraryTicks);
-
-  // With Intel the output should be the input.
-  EXPECT_EQ(Nanoseconds(kArbitraryTicks), result);
-}
-
-TEST(TimeMacTest, MachTimeToMicrosecondsM1Timebase) {
-  ScopedTimebase timebase(kM1Timebase);
-
-  // Use a tick count that's divisible by 3.
-  const uint64_t kArbitraryTicks = 92738127000;
-  TimeDelta result = TimeDelta::FromMachTime(kArbitraryTicks);
-
-  const uint64_t kExpectedResult =
-      kArbitraryTicks * kM1Timebase.numer / kM1Timebase.denom;
-  EXPECT_EQ(Nanoseconds(kExpectedResult), result);
-}
-
-// Tests MachTimeToMicroseconds when
-// mach_timebase_info_data_t.numer and mach_timebase_info_data_t.denom
-// are equal.
-TEST(TimeMacTest, MachTimeToMicrosecondsEqualTimebaseMembers) {
-  // These members would produce overflow but don't because
-  // MachTimeToMicroseconds should skip the timebase conversion
-  // when they're equal.
-  ScopedTimebase timebase({UINT_MAX, UINT_MAX});
-
-  uint64_t kArbitraryTicks = 175920053729;
-  TimeDelta result = TimeDelta::FromMachTime(kArbitraryTicks);
-
-  // With a unity timebase the output should be the input.
-  EXPECT_EQ(Nanoseconds(kArbitraryTicks), result);
-}
-
-TEST(TimeMacTest, MachTimeToMicrosecondsOverflowDetection) {
-  const uint32_t kArbitraryNumer = 1234567;
-  ScopedTimebase timebase({kArbitraryNumer, 1});
-
-  // Expect an overflow.
-  EXPECT_CHECK_DEATH(
-      TimeDelta::FromMachTime(std::numeric_limits<uint64_t>::max()));
-}
-
-// Tests that there's no overflow in MachTimeToMicroseconds even with
-// std::numeric_limits<uint64_t>::max() ticks on Intel.
-TEST(TimeMacTest, MachTimeToMicrosecondsNoOverflowIntel) {
-  ScopedTimebase timebase(kIntelTimebase);
-
-  // The incoming Mach time ticks are on the order of nanoseconds while the
-  // return result is microseconds. Even though we're passing in the largest
-  // tick count the result should be orders of magnitude smaller. On Intel the
-  // mapping from ticks to nanoseconds is 1:1 so we wouldn't ever expect an
-  // overflow when applying the timebase conversion.
-  TimeDelta::FromMachTime(std::numeric_limits<uint64_t>::max());
-}
-
-// Tests that there's no overflow in MachTimeToMicroseconds even with
-// std::numeric_limits<uint64_t>::max() ticks on M1.
-TEST(TimeMacTest, MachTimeToMicrosecondsNoOverflowM1) {
-  ScopedTimebase timebase(kM1Timebase);
-
-  // The incoming Mach time ticks are on the order of nanoseconds while the
-  // return result is microseconds. Even though we're passing in the largest
-  // tick count the result should be orders of magnitude smaller. Expect that
-  // FromMachTime(), when applying the timebase conversion, is smart enough to
-  // not multiply first and generate an overflow.
-  TimeDelta::FromMachTime(std::numeric_limits<uint64_t>::max());
-}
-
-// Tests that there's no underflow in MachTimeToMicroseconds on Intel.
-TEST(TimeMacTest, MachTimeToMicrosecondsNoUnderflowIntel) {
-  ScopedTimebase timebase(kIntelTimebase);
-
-  // On Intel the timebase conversion is 1:1, so min ticks is one microsecond
-  // worth of nanoseconds.
-  const uint64_t kMinimumTicks = base::Time::kNanosecondsPerMicrosecond;
-  const uint64_t kOneMicrosecond = 1;
-  EXPECT_EQ(kOneMicrosecond,
-            TimeDelta::FromMachTime(kMinimumTicks).InMicroseconds() * 1UL);
-
-  // If we have even one fewer tick (i.e. not enough ticks to constitute a full
-  // microsecond) the integer rounding should result in 0 microseconds.
-  const uint64_t kZeroMicroseconds = 0;
-  EXPECT_EQ(kZeroMicroseconds,
-            TimeDelta::FromMachTime(kMinimumTicks - 1).InMicroseconds() * 1UL);
-}
-
-// Tests that there's no underflow in MachTimeToMicroseconds for M1.
-TEST(TimeMacTest, MachTimeToMicrosecondsNoUnderflowM1) {
-  ScopedTimebase timebase(kM1Timebase);
-
-  // Microseconds is mach_time multiplied by kM1Timebase.numer /
-  // (kM1Timebase.denom * base::Time::kNanosecondsPerMicrosecond). Inverting
-  // that should be the minimum number of ticks to get a single microsecond in
-  // return. If we get zero it means an underflow in the conversion. For example
-  // if FromMachTime() first divides mach_time by kM1Timebase.denom *
-  // base::Time::kNanosecondsPerMicrosecond we'll get zero back.
-  const uint64_t kMinimumTicks =
-      (kM1Timebase.denom * base::Time::kNanosecondsPerMicrosecond) /
-      kM1Timebase.numer;
-  const uint64_t kOneMicrosecond = 1;
-  EXPECT_EQ(kOneMicrosecond,
-            TimeDelta::FromMachTime(kMinimumTicks).InMicroseconds() * 1UL);
-
-  // If we have even one fewer tick (i.e. not enough ticks to constitute a full
-  // microsecond) the integer rounding should result in 0 microseconds.
-  const uint64_t kZeroMicroseconds = 0;
-  EXPECT_EQ(kZeroMicroseconds,
-            TimeDelta::FromMachTime(kMinimumTicks - 1).InMicroseconds() * 1UL);
-}
-
-}  // namespace
-}  // namespace base
diff --git a/base/time/time_to_iso8601.cc b/base/time/time_to_iso8601.cc
deleted file mode 100644
index 10ca65c..0000000
--- a/base/time/time_to_iso8601.cc
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/time/time_to_iso8601.h"
-
-#include "base/check.h"
-#include "base/strings/stringprintf.h"
-#include "base/time/time.h"
-
-namespace base {
-
-std::string TimeToISO8601(const Time& t) {
-  Time::Exploded exploded;
-  t.UTCExplode(&exploded);
-  return StringPrintf("%04d-%02d-%02dT%02d:%02d:%02d.%03dZ", exploded.year,
-                      exploded.month, exploded.day_of_month, exploded.hour,
-                      exploded.minute, exploded.second, exploded.millisecond);
-}
-
-}  // namespace base
diff --git a/base/time/time_to_iso8601.h b/base/time/time_to_iso8601.h
deleted file mode 100644
index cac5754..0000000
--- a/base/time/time_to_iso8601.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TIME_TIME_TO_ISO8601_H_
-#define BASE_TIME_TIME_TO_ISO8601_H_
-
-#include <string>
-
-#include "base/base_export.h"
-
-namespace base {
-
-class Time;
-
-BASE_EXPORT std::string TimeToISO8601(const base::Time& t);
-
-}  // namespace base
-
-#endif  // BASE_TIME_TIME_TO_ISO8601_H_
diff --git a/base/time/time_unittest.cc b/base/time/time_unittest.cc
index 4b32855..61d8560 100644
--- a/base/time/time_unittest.cc
+++ b/base/time/time_unittest.cc
@@ -14,6 +14,8 @@
 #include "base/check_op.h"
 #include "base/compiler_specific.h"
 #include "base/environment.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/to_string.h"
 #include "base/test/gtest_util.h"
 #include "base/threading/platform_thread.h"
 #include "base/time/time_override.h"
@@ -60,7 +62,8 @@
   int32_t raw_offset = 0;
   int32_t dst_offset = 0;
   UErrorCode ec = U_ZERO_ERROR;
-  tz->getOffset(time.ToDoubleT(), false, raw_offset, dst_offset, ec);
+  tz->getOffset(time.InSecondsFSinceUnixEpoch(), false, raw_offset, dst_offset,
+                ec);
   if (!U_SUCCESS(ec)) {
     return {};
   }
@@ -243,7 +246,7 @@
 // Test conversion to/from time_t.
 TEST_F(TimeTest, TimeT) {
   EXPECT_EQ(10, Time().FromTimeT(10).ToTimeT());
-  EXPECT_EQ(10.0, Time().FromTimeT(10).ToDoubleT());
+  EXPECT_EQ(10.0, Time().FromTimeT(10).InSecondsFSinceUnixEpoch());
 
   // Conversions of 0 should stay 0.
   EXPECT_EQ(0, Time().ToTimeT());
@@ -325,19 +328,19 @@
 
 // Test conversions to/from javascript time.
 TEST_F(TimeTest, JsTime) {
-  Time epoch = Time::FromJsTime(0.0);
+  Time epoch = Time::FromMillisecondsSinceUnixEpoch(0.0);
   EXPECT_EQ(epoch, Time::UnixEpoch());
-  Time t = Time::FromJsTime(700000.3);
-  EXPECT_EQ(700.0003, t.ToDoubleT());
-  t = Time::FromDoubleT(800.73);
-  EXPECT_EQ(800730.0, t.ToJsTime());
+  Time t = Time::FromMillisecondsSinceUnixEpoch(700000.3);
+  EXPECT_EQ(700.0003, t.InSecondsFSinceUnixEpoch());
+  t = Time::FromSecondsSinceUnixEpoch(800.73);
+  EXPECT_EQ(800730.0, t.InMillisecondsFSinceUnixEpoch());
 
-  // 1601-01-01 isn't round-trip with ToJsTime().
+  // 1601-01-01 isn't round-trip with InMillisecondsFSinceUnixEpoch().
   const double kWindowsEpoch = -11644473600000.0;
-  Time time = Time::FromJsTime(kWindowsEpoch);
+  Time time = Time::FromMillisecondsSinceUnixEpoch(kWindowsEpoch);
   EXPECT_TRUE(time.is_null());
-  EXPECT_NE(kWindowsEpoch, time.ToJsTime());
-  EXPECT_EQ(kWindowsEpoch, time.ToJsTimeIgnoringNull());
+  EXPECT_NE(kWindowsEpoch, time.InMillisecondsFSinceUnixEpoch());
+  EXPECT_EQ(kWindowsEpoch, time.InMillisecondsFSinceUnixEpochIgnoringNull());
 }
 
 #if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
@@ -366,7 +369,7 @@
   Time zero_time(Time::FromTimeT(0));
   EXPECT_EQ(0, zero_time.ToTimeT());
 
-  EXPECT_EQ(0.0, zero_time.ToDoubleT());
+  EXPECT_EQ(0.0, zero_time.InSecondsFSinceUnixEpoch());
 }
 
 // Note that this test does not check whether the implementation correctly
@@ -603,6 +606,12 @@
   EXPECT_EQ(parsed_time, comparison_time_local_);
 }
 
+TEST_F(TimeTest, ParseTimeTest11) {
+  Time parsed_time;
+  EXPECT_TRUE(Time::FromString("2007-10-15 12:45:00", &parsed_time));
+  EXPECT_EQ(parsed_time, comparison_time_local_);
+}
+
 // Test some of edge cases around epoch, etc.
 TEST_F(TimeTest, ParseTimeTestEpoch0) {
   Time parsed_time;
@@ -866,13 +875,17 @@
   static_assert(std::numeric_limits<int64_t>::max() == kMax.ToInternalValue(),
                 "");
 
-  Time t = Time::FromDoubleT(std::numeric_limits<double>::infinity());
+  Time t =
+      Time::FromSecondsSinceUnixEpoch(std::numeric_limits<double>::infinity());
   EXPECT_TRUE(t.is_max());
-  EXPECT_EQ(std::numeric_limits<double>::infinity(), t.ToDoubleT());
+  EXPECT_EQ(std::numeric_limits<double>::infinity(),
+            t.InSecondsFSinceUnixEpoch());
 
-  t = Time::FromJsTime(std::numeric_limits<double>::infinity());
+  t = Time::FromMillisecondsSinceUnixEpoch(
+      std::numeric_limits<double>::infinity());
   EXPECT_TRUE(t.is_max());
-  EXPECT_EQ(std::numeric_limits<double>::infinity(), t.ToJsTime());
+  EXPECT_EQ(std::numeric_limits<double>::infinity(),
+            t.InMillisecondsFSinceUnixEpoch());
 
   t = Time::FromTimeT(std::numeric_limits<time_t>::max());
   EXPECT_TRUE(t.is_max());
@@ -1272,7 +1285,13 @@
 // static
 Time TimeOverride::now_time_;
 
-TEST_F(TimeTest, NowOverride) {
+// Disabled on Android due to flakes; see https://crbug.com/1474884.
+#if BUILDFLAG(IS_ANDROID)
+#define MAYBE_NowOverride DISABLED_NowOverride
+#else
+#define MAYBE_NowOverride NowOverride
+#endif
+TEST_F(TimeTest, MAYBE_NowOverride) {
   TimeOverride::now_time_ = Time::UnixEpoch();
 
   // Choose a reference time that we know to be in the past but close to now.
@@ -1325,12 +1344,6 @@
 
 #undef MAYBE_NowOverride
 
-TEST_F(TimeTest, TimeFormatHTTP) {
-  base::Time time;
-  ASSERT_TRUE(base::Time::FromString("1994-11-06T08:49:37Z", &time));
-  EXPECT_EQ("Sun, 06 Nov 1994 08:49:37 GMT", TimeFormatHTTP(time));
-}
-
 #if BUILDFLAG(IS_FUCHSIA)
 TEST(ZxTimeTest, ToFromConversions) {
   Time unix_epoch = Time::UnixEpoch();
@@ -1865,15 +1878,6 @@
   EXPECT_EQ(base::ClampRound(Hertz(60).ToHz()), 60);
 }
 
-// We could define this separately for Time, TimeTicks and TimeDelta but the
-// definitions would be identical anyway.
-template <class Any>
-std::string AnyToString(Any any) {
-  std::ostringstream oss;
-  oss << any;
-  return oss.str();
-}
-
 TEST(TimeDelta, Magnitude) {
   constexpr int64_t zero = 0;
   static_assert(Microseconds(zero) == Microseconds(zero).magnitude());
@@ -2445,17 +2449,17 @@
 
 TEST(TimeDeltaLogging, EmptyIsZero) {
   constexpr TimeDelta kZero;
-  EXPECT_EQ("0 s", AnyToString(kZero));
+  EXPECT_EQ("0 s", ToString(kZero));
 }
 
 TEST(TimeDeltaLogging, FiveHundredMs) {
   constexpr TimeDelta kFiveHundredMs = Milliseconds(500);
-  EXPECT_EQ("0.5 s", AnyToString(kFiveHundredMs));
+  EXPECT_EQ("0.5 s", ToString(kFiveHundredMs));
 }
 
 TEST(TimeDeltaLogging, MinusTenSeconds) {
   constexpr TimeDelta kMinusTenSeconds = Seconds(-10);
-  EXPECT_EQ("-10 s", AnyToString(kMinusTenSeconds));
+  EXPECT_EQ("-10 s", ToString(kMinusTenSeconds));
 }
 
 TEST(TimeDeltaLogging, DoesNotMessUpFormattingFlags) {
@@ -2478,7 +2482,31 @@
 TEST(TimeLogging, ChromeBirthdate) {
   Time birthdate;
   ASSERT_TRUE(Time::FromString("Tue, 02 Sep 2008 09:42:18 GMT", &birthdate));
-  EXPECT_EQ("2008-09-02 09:42:18.000 UTC", AnyToString(birthdate));
+  EXPECT_EQ("2008-09-02 09:42:18.000000 UTC", ToString(birthdate));
+}
+
+TEST(TimeLogging, Microseconds) {
+  // Some Time with a non-zero number of microseconds.
+  Time now = Time::Now();
+  if (now.ToDeltaSinceWindowsEpoch().InMicroseconds() %
+          Time::kMicrosecondsPerMillisecond ==
+      0) {
+    now += Microseconds(1);
+  }
+
+  // Crudely parse the microseconds portion out of the stringified Time. Use
+  // find() and ASSERTs to try to give an accurate test result, without
+  // crashing, even if the logging format changes in the future (e.g. someone
+  // removes microseconds, adds nanoseconds, changes the timezone format, etc.).
+  const std::string now_str = ToString(now);
+  ASSERT_GT(now_str.length(), 6u);
+  const size_t period = now_str.find('.');
+  ASSERT_LT(period, now_str.length() - 6);
+  int microseconds = 0;
+  EXPECT_TRUE(StringToInt(now_str.substr(period + 4, 3), &microseconds));
+
+  // The stringified microseconds should also be nonzero.
+  EXPECT_NE(0, microseconds);
 }
 
 TEST(TimeLogging, DoesNotMessUpFormattingFlags) {
@@ -2500,13 +2528,12 @@
 
 TEST(TimeTicksLogging, ZeroTime) {
   TimeTicks zero;
-  EXPECT_EQ("0 bogo-microseconds", AnyToString(zero));
+  EXPECT_EQ("0 bogo-microseconds", ToString(zero));
 }
 
 TEST(TimeTicksLogging, FortyYearsLater) {
   TimeTicks forty_years_later = TimeTicks() + Days(365.25 * 40);
-  EXPECT_EQ("1262304000000000 bogo-microseconds",
-            AnyToString(forty_years_later));
+  EXPECT_EQ("1262304000000000 bogo-microseconds", ToString(forty_years_later));
 }
 
 TEST(TimeTicksLogging, DoesNotMessUpFormattingFlags) {
diff --git a/base/timer/elapsed_timer.cc b/base/timer/elapsed_timer.cc
index e45ace7..4c71b90 100644
--- a/base/timer/elapsed_timer.cc
+++ b/base/timer/elapsed_timer.cc
@@ -12,18 +12,19 @@
 bool g_mock_elapsed_timers_for_test = false;
 }  // namespace
 
-ElapsedTimer::ElapsedTimer() : begin_(TimeTicks::Now()) {}
+ElapsedTimer::ElapsedTimer() : start_time_(TimeTicks::Now()) {}
 
-ElapsedTimer::ElapsedTimer(ElapsedTimer&& other) : begin_(other.begin_) {}
+ElapsedTimer::ElapsedTimer(ElapsedTimer&& other)
+    : start_time_(other.start_time_) {}
 
 void ElapsedTimer::operator=(ElapsedTimer&& other) {
-  begin_ = other.begin_;
+  start_time_ = other.start_time_;
 }
 
 TimeDelta ElapsedTimer::Elapsed() const {
   if (g_mock_elapsed_timers_for_test)
     return ScopedMockElapsedTimersForTest::kMockElapsedTime;
-  return TimeTicks::Now() - begin_;
+  return TimeTicks::Now() - start_time_;
 }
 
 ElapsedThreadTimer::ElapsedThreadTimer()
diff --git a/base/timer/elapsed_timer.h b/base/timer/elapsed_timer.h
index f5f4a42..1f2ecf6 100644
--- a/base/timer/elapsed_timer.h
+++ b/base/timer/elapsed_timer.h
@@ -26,10 +26,10 @@
   TimeDelta Elapsed() const;
 
   // Returns the timestamp of the creation of this timer.
-  TimeTicks Begin() const { return begin_; }
+  TimeTicks start_time() const { return start_time_; }
 
  private:
-  TimeTicks begin_;
+  TimeTicks start_time_;
 };
 
 // A simple wrapper around ThreadTicks::Now().
diff --git a/base/token.cc b/base/token.cc
index 1ac4d20..27e77d5 100644
--- a/base/token.cc
+++ b/base/token.cc
@@ -7,8 +7,10 @@
 #include <inttypes.h>
 
 #include "base/check.h"
+#include "base/hash/hash.h"
 #include "base/pickle.h"
 #include "base/rand_util.h"
+#include "base/strings/string_piece.h"
 #include "base/strings/stringprintf.h"
 #include "third_party/abseil-cpp/absl/types/optional.h"
 
@@ -73,4 +75,8 @@
   return Token(high, low);
 }
 
+size_t TokenHash::operator()(const Token& token) const {
+  return HashInts64(token.high(), token.low());
+}
+
 }  // namespace base
diff --git a/base/token.h b/base/token.h
index 098ea5b..e67532b 100644
--- a/base/token.h
+++ b/base/token.h
@@ -12,7 +12,7 @@
 
 #include "base/base_export.h"
 #include "base/containers/span.h"
-#include "base/hash/hash.h"
+#include "base/strings/string_piece_forward.h"
 #include "third_party/abseil-cpp/absl/types/optional.h"
 
 namespace base {
@@ -80,10 +80,8 @@
 };
 
 // For use in std::unordered_map.
-struct TokenHash {
-  size_t operator()(const base::Token& token) const {
-    return base::HashInts64(token.high(), token.low());
-  }
+struct BASE_EXPORT TokenHash {
+  size_t operator()(const Token& token) const;
 };
 
 class Pickle;
diff --git a/base/trace_event/address_space_dump_provider.cc b/base/trace_event/address_space_dump_provider.cc
index 3d591c3..25eb48d 100644
--- a/base/trace_event/address_space_dump_provider.cc
+++ b/base/trace_event/address_space_dump_provider.cc
@@ -4,9 +4,9 @@
 
 #include "base/trace_event/address_space_dump_provider.h"
 
-#include "base/allocator/partition_allocator/address_pool_manager.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
 #include "base/no_destructor.h"
 #include "base/trace_event/memory_allocator_dump.h"
 #include "base/trace_event/process_memory_dump.h"
diff --git a/base/trace_event/address_space_dump_provider.h b/base/trace_event/address_space_dump_provider.h
index d159e1f..8fb9899 100644
--- a/base/trace_event/address_space_dump_provider.h
+++ b/base/trace_event/address_space_dump_provider.h
@@ -5,7 +5,7 @@
 #ifndef BASE_TRACE_EVENT_ADDRESS_SPACE_DUMP_PROVIDER_H_
 #define BASE_TRACE_EVENT_ADDRESS_SPACE_DUMP_PROVIDER_H_
 
-#include "base/allocator/partition_allocator/address_space_stats.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/address_space_stats.h"
 #include "base/base_export.h"
 #include "base/memory/raw_ptr.h"
 #include "base/trace_event/memory_dump_provider.h"
diff --git a/base/trace_event/builtin_categories.h b/base/trace_event/builtin_categories.h
index 1ddaeaf..c4ed2b8 100644
--- a/base/trace_event/builtin_categories.h
+++ b/base/trace_event/builtin_categories.h
@@ -32,6 +32,7 @@
   /* The rest of the list is in alphabetical order */                    \
   X("accessibility")                                                     \
   X("AccountFetcherService")                                             \
+  X("android.ui.jank")                                                   \
   X("android_webview")                                                   \
   X("android_webview.timeline")                                          \
   /* Actions on Google Hardware, used in Google-internal code. */        \
@@ -69,6 +70,7 @@
   X("compositor")                                                        \
   X("content")                                                           \
   X("content_capture")                                                   \
+  X("interactions")                                                      \
   X("delegated_ink_trails")                                              \
   X("device")                                                            \
   X("devtools")                                                          \
@@ -94,6 +96,7 @@
   X("gpu.angle")                                                         \
   X("gpu.angle.texture_metrics")                                         \
   X("gpu.capture")                                                       \
+  X("graphics.pipeline")                                                 \
   X("headless")                                                          \
   /* Traces for //components/history. */                                 \
   X("history")                                                           \
@@ -233,6 +236,7 @@
   X(TRACE_DISABLED_BY_DEFAULT("gpu.debug"))                              \
   X(TRACE_DISABLED_BY_DEFAULT("gpu.decoder"))                            \
   X(TRACE_DISABLED_BY_DEFAULT("gpu.device"))                             \
+  X(TRACE_DISABLED_BY_DEFAULT("gpu.graphite.dawn"))                      \
   X(TRACE_DISABLED_BY_DEFAULT("gpu.service"))                            \
   X(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"))                         \
   X(TRACE_DISABLED_BY_DEFAULT("histogram_samples"))                      \
@@ -279,6 +283,7 @@
   X(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"))                       \
   X(TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan"))                       \
   X(TRACE_DISABLED_BY_DEFAULT("video_and_image_capture"))                \
+  X(TRACE_DISABLED_BY_DEFAULT("display.framedisplayed"))                 \
   X(TRACE_DISABLED_BY_DEFAULT("viz.gpu_composite_time"))                 \
   X(TRACE_DISABLED_BY_DEFAULT("viz.debug.overlay_planes"))               \
   X(TRACE_DISABLED_BY_DEFAULT("viz.hit_testing_flow"))                   \
@@ -296,6 +301,7 @@
 
 #define INTERNAL_TRACE_LIST_BUILTIN_CATEGORY_GROUPS(X)                        \
   X("android_webview,toplevel")                                               \
+  X("android_webview.timeline,android.ui.jank")                               \
   X("base,toplevel")                                                          \
   X("benchmark,drm")                                                          \
   X("benchmark,latencyInfo,rail")                                             \
@@ -304,6 +310,7 @@
   X("benchmark,rail")                                                         \
   X("benchmark,uma")                                                          \
   X("benchmark,viz")                                                          \
+  X("benchmark,viz," TRACE_DISABLED_BY_DEFAULT("display.framedisplayed"))     \
   X("blink,benchmark")                                                        \
   X("blink,benchmark,rail," TRACE_DISABLED_BY_DEFAULT("blink.debug.layout"))  \
   X("blink,blink.resource")                                                   \
@@ -333,6 +340,7 @@
   X("gpu,benchmark")                                                          \
   X("gpu,benchmark,android_webview")                                          \
   X("gpu,benchmark,webview")                                                  \
+  X("gpu,login")                                                              \
   X("gpu,startup")                                                            \
   X("gpu,toplevel.flow")                                                      \
   X("gpu.angle,startup")                                                      \
@@ -349,6 +357,7 @@
   X("ipc,security")                                                           \
   X("ipc,toplevel")                                                           \
   X("Java,devtools," TRACE_DISABLED_BY_DEFAULT("devtools.timeline"))          \
+  X("loading,interactions")                                                   \
   X("loading,rail")                                                           \
   X("loading,rail,devtools.timeline")                                         \
   X("media,gpu")                                                              \
@@ -372,6 +381,7 @@
   X("v8,devtools.timeline")                                                   \
   X("v8,devtools.timeline," TRACE_DISABLED_BY_DEFAULT("v8.compile"))          \
   X("viz,benchmark")                                                          \
+  X("viz,benchmark,graphics.pipeline")                                        \
   X("WebCore,benchmark,rail")                                                 \
   X(TRACE_DISABLED_BY_DEFAULT("cc.debug") "," TRACE_DISABLED_BY_DEFAULT(      \
       "viz.quads") "," TRACE_DISABLED_BY_DEFAULT("devtools.timeline.layers")) \
diff --git a/base/trace_event/category_registry.cc b/base/trace_event/category_registry.cc
index 66b640b..e9fc890 100644
--- a/base/trace_event/category_registry.cc
+++ b/base/trace_event/category_registry.cc
@@ -20,7 +20,7 @@
 namespace {
 
 // |categories_| might end up causing creating dynamic initializers if not POD.
-static_assert(std::is_pod<TraceCategory>::value, "TraceCategory must be POD");
+static_assert(std::is_pod_v<TraceCategory>, "TraceCategory must be POD");
 
 }  // namespace
 
diff --git a/base/trace_event/common/trace_event_common.h b/base/trace_event/common/trace_event_common.h
index f2ee4d3..c1bb2e2 100644
--- a/base/trace_event/common/trace_event_common.h
+++ b/base/trace_event/common/trace_event_common.h
@@ -195,7 +195,7 @@
 // use_perfetto_client_library GN arg. If that flag is disabled, we fall back to
 // the legacy implementation in the latter half of this file (and
 // trace_event.h).
-// TODO(skyostil): Remove the legacy macro implementation.
+// TODO(skyostil, crbug.com/1006541): Remove the legacy macro implementation.
 
 // Normally we'd use BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY) for this, but
 // because v8 includes trace_event_common.h directly (in non-Perfetto mode), we
diff --git a/base/trace_event/heap_profiler.h b/base/trace_event/heap_profiler.h
index 556ecca..07d0e21 100644
--- a/base/trace_event/heap_profiler.h
+++ b/base/trace_event/heap_profiler.h
@@ -45,7 +45,7 @@
       : context_(task_context) {
     using base::trace_event::AllocationContextTracker;
     if (UNLIKELY(AllocationContextTracker::capture_mode() !=
-                 AllocationContextTracker::CaptureMode::DISABLED)) {
+                 AllocationContextTracker::CaptureMode::kDisabled)) {
       AllocationContextTracker::GetInstanceForCurrentThread()
           ->PushCurrentTaskContext(context_);
     }
@@ -54,7 +54,7 @@
   inline ~HeapProfilerScopedTaskExecutionTracker() {
     using base::trace_event::AllocationContextTracker;
     if (UNLIKELY(AllocationContextTracker::capture_mode() !=
-                 AllocationContextTracker::CaptureMode::DISABLED)) {
+                 AllocationContextTracker::CaptureMode::kDisabled)) {
       AllocationContextTracker::GetInstanceForCurrentThread()
           ->PopCurrentTaskContext(context_);
     }
diff --git a/base/trace_event/heap_profiler_allocation_context.cc b/base/trace_event/heap_profiler_allocation_context.cc
index 3d3d2b8..377929f 100644
--- a/base/trace_event/heap_profiler_allocation_context.cc
+++ b/base/trace_event/heap_profiler_allocation_context.cc
@@ -7,6 +7,7 @@
 #include <algorithm>
 #include <cstring>
 
+#include "base/containers/span.h"
 #include "base/hash/hash.h"
 
 namespace base {
@@ -67,7 +68,8 @@
   for (size_t i = 0; i != backtrace.frame_count; ++i) {
     values[i] = backtrace.frames[i].value;
   }
-  return base::PersistentHash(values, backtrace.frame_count * sizeof(*values));
+  auto values_span = base::make_span(values).first(backtrace.frame_count);
+  return base::PersistentHash(base::as_bytes(values_span));
 }
 
 size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const {
diff --git a/base/trace_event/heap_profiler_allocation_context.h b/base/trace_event/heap_profiler_allocation_context.h
index 17c32a2..4121c65 100644
--- a/base/trace_event/heap_profiler_allocation_context.h
+++ b/base/trace_event/heap_profiler_allocation_context.h
@@ -31,15 +31,15 @@
 // pointer from process' address space.
 struct BASE_EXPORT StackFrame {
   enum class Type {
-    THREAD_NAME,        // const char* thread name
-    PROGRAM_COUNTER,    // as returned by stack tracing (e.g. by StackTrace)
+    kThreadName,      // const char* thread name
+    kProgramCounter,  // as returned by stack tracing (e.g. by StackTrace)
   };
 
   static StackFrame FromThreadName(const char* name) {
-    return {Type::THREAD_NAME, name};
+    return {Type::kThreadName, name};
   }
   static StackFrame FromProgramCounter(const void* pc) {
-    return {Type::PROGRAM_COUNTER, pc};
+    return {Type::kProgramCounter, pc};
   }
 
   Type type;
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.cc b/base/trace_event/heap_profiler_allocation_context_tracker.cc
index fb55bda..c8b97bd 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -17,7 +17,7 @@
 
 std::atomic<AllocationContextTracker::CaptureMode>
     AllocationContextTracker::capture_mode_{
-        AllocationContextTracker::CaptureMode::DISABLED};
+        AllocationContextTracker::CaptureMode::kDisabled};
 
 namespace {
 
@@ -64,7 +64,7 @@
 
 // static
 void AllocationContextTracker::SetCurrentThreadName(const char* name) {
-  if (name && capture_mode() != CaptureMode::DISABLED) {
+  if (name && capture_mode() != CaptureMode::kDisabled) {
     GetInstanceForCurrentThread()->thread_name_ = name;
   }
 }
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.h b/base/trace_event/heap_profiler_allocation_context_tracker.h
index da4f747..490fbee 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.h
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.h
@@ -23,12 +23,12 @@
 class BASE_EXPORT AllocationContextTracker {
  public:
   enum class CaptureMode : int32_t {
-    DISABLED,      // Don't capture anything
-    NATIVE_STACK,  // Backtrace has full native backtraces from stack unwinding
+    kDisabled,     // Don't capture anything
+    kNativeStack,  // Backtrace has full native backtraces from stack unwinding
   };
 
   // Globally sets capturing mode.
-  // TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
+  // TODO(primiano): How to guard against *Stack -> kDisabled -> *Stack?
   static void SetCaptureMode(CaptureMode mode);
 
   // Returns global capturing mode.
@@ -36,13 +36,15 @@
     // A little lag after heap profiling is enabled or disabled is fine, it is
     // more important that the check is as cheap as possible when capturing is
     // not enabled, so do not issue a memory barrier in the fast path.
-    if (capture_mode_.load(std::memory_order_relaxed) == CaptureMode::DISABLED)
-      return CaptureMode::DISABLED;
+    if (capture_mode_.load(std::memory_order_relaxed) ==
+        CaptureMode::kDisabled) {
+      return CaptureMode::kDisabled;
+    }
 
     // In the slow path, an acquire load is required to pair with the release
     // store in |SetCaptureMode|. This is to ensure that the TLS slot for
     // the thread-local allocation context tracker has been initialized if
-    // |capture_mode| returns something other than DISABLED.
+    // |capture_mode| returns something other than kDisabled.
     return capture_mode_.load(std::memory_order_acquire);
   }
 
diff --git a/base/trace_event/interned_args_helper.cc b/base/trace_event/interned_args_helper.cc
index fb7ec34..2e92d90 100644
--- a/base/trace_event/interned_args_helper.cc
+++ b/base/trace_event/interned_args_helper.cc
@@ -14,6 +14,23 @@
 namespace base {
 namespace trace_event {
 
+namespace {
+
+const void* const kModuleCacheForTracingKey = &kModuleCacheForTracingKey;
+
+class ModuleCacheForTracing : public perfetto::TrackEventTlsStateUserData {
+ public:
+  ModuleCacheForTracing() = default;
+  ~ModuleCacheForTracing() override = default;
+
+  base::ModuleCache& GetModuleCache() { return module_cache_; }
+
+ private:
+  base::ModuleCache module_cache_;
+};
+
+}  // namespace
+
 //  static
 void InternedSourceLocation::Add(
     perfetto::protos::pbzero::InternedData* interned_data,
@@ -99,8 +116,20 @@
     perfetto::EventContext* ctx,
     uintptr_t address) {
   auto* index_for_field = GetOrCreateIndexForField(ctx->GetIncrementalState());
+#if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
+  ModuleCacheForTracing* module_cache = static_cast<ModuleCacheForTracing*>(
+      ctx->GetTlsUserData(kModuleCacheForTracingKey));
+  if (!module_cache) {
+    auto new_module_cache = std::make_unique<ModuleCacheForTracing>();
+    module_cache = new_module_cache.get();
+    ctx->SetTlsUserData(kModuleCacheForTracingKey, std::move(new_module_cache));
+  }
+  const base::ModuleCache::Module* module =
+      module_cache->GetModuleCache().GetModuleForAddress(address);
+#else
   const base::ModuleCache::Module* module =
       index_for_field->module_cache_.GetModuleForAddress(address);
+#endif
   if (!module) {
     return absl::nullopt;
   }
diff --git a/base/trace_event/interned_args_helper.h b/base/trace_event/interned_args_helper.h
index 154436d..09941fe 100644
--- a/base/trace_event/interned_args_helper.h
+++ b/base/trace_event/interned_args_helper.h
@@ -13,6 +13,7 @@
 #include "base/location.h"
 #include "base/profiler/module_cache.h"
 #include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
 #include "third_party/abseil-cpp/absl/types/optional.h"
 #include "third_party/perfetto/include/perfetto/tracing/track_event_interned_data_index.h"
 #include "third_party/perfetto/protos/perfetto/trace/interned_data/interned_data.pbzero.h"
@@ -174,6 +175,10 @@
                   size_t iid,
                   const UnsymbolizedSourceLocation& location);
 
+// We use thread local storage for the module cache if we are using the
+// client library since it is more optimal. It was not worth it to write
+// optimal caching for not client library users since everyone will convert.
+#if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  private:
   // This implies that a module cache lifetime = incremental state.
   // We don't want unlimited lifetime because it keeps modules pinned in
@@ -181,6 +186,7 @@
   // TODO(b/237055179): Consider tying module cache to DataSource instead so
   // that the cache is not unnecessarily cleared on incremental state change.
   base::ModuleCache module_cache_;
+#endif
 };
 
 }  // namespace trace_event
diff --git a/base/trace_event/java_heap_dump_provider_android_unittest.cc b/base/trace_event/java_heap_dump_provider_android_unittest.cc
index d9c147f..30c46f1 100644
--- a/base/trace_event/java_heap_dump_provider_android_unittest.cc
+++ b/base/trace_event/java_heap_dump_provider_android_unittest.cc
@@ -12,7 +12,7 @@
 
 TEST(JavaHeapDumpProviderTest, JavaHeapDump) {
   auto* jhdp = JavaHeapDumpProvider::GetInstance();
-  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::kDetailed};
   std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(dump_args));
 
   jhdp->OnMemoryDump(dump_args, pmd.get());
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index 6a8088b..b573535 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -10,10 +10,10 @@
 
 #include "base/allocator/allocator_extension.h"
 #include "base/allocator/buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_config.h"
-#include "base/allocator/partition_allocator/partition_bucket_lookup.h"
-#include "base/allocator/partition_allocator/shim/nonscannable_allocator.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_bucket_lookup.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/nonscannable_allocator.h"
 #include "base/debug/profiler.h"
 #include "base/format_macros.h"
 #include "base/metrics/histogram_functions.h"
@@ -37,11 +37,11 @@
 #endif
 
 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
 #endif
 
 #if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h"
 #endif
 
 namespace base {
@@ -86,7 +86,7 @@
                         size_t* allocated_objects_size,
                         size_t* allocated_objects_count) {
   // This is too expensive on Windows, crbug.com/780735.
-  if (level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
+  if (level_of_detail == MemoryDumpLevelOfDetail::kDetailed) {
     WinHeapInfo main_heap_info = {};
     WinHeapMemoryDumpImpl(&main_heap_info);
     *total_virtual_size +=
@@ -122,7 +122,7 @@
                                size_t* cumulative_brp_quarantined_count) {
   MemoryDumpPartitionStatsDumper partition_stats_dumper("malloc", pmd,
                                                         level_of_detail);
-  bool is_light_dump = level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND;
+  bool is_light_dump = level_of_detail == MemoryDumpLevelOfDetail::kBackground;
 
   auto* allocator = allocator_shim::internal::PartitionAllocMalloc::Allocator();
   allocator->DumpStats("allocator", is_light_dump, &partition_stats_dumper);
@@ -461,9 +461,6 @@
 }
 
 #if BUILDFLAG(USE_PARTITION_ALLOC)
-// static
-const char* MemoryDumpPartitionStatsDumper::kPartitionsDumpName = "partitions";
-
 std::string GetPartitionDumpName(const char* root_name,
                                  const char* partition_name) {
   return base::StringPrintf("%s/%s/%s", root_name,
@@ -477,7 +474,7 @@
     MemoryDumpLevelOfDetail level_of_detail)
     : root_name_(root_name),
       memory_dump_(memory_dump),
-      detailed_(level_of_detail != MemoryDumpLevelOfDetail::BACKGROUND) {}
+      detailed_(level_of_detail != MemoryDumpLevelOfDetail::kBackground) {}
 
 void MemoryDumpPartitionStatsDumper::PartitionDumpTotals(
     const char* partition_name,
diff --git a/base/trace_event/malloc_dump_provider.h b/base/trace_event/malloc_dump_provider.h
index cd5c12a..abc2f94 100644
--- a/base/trace_event/malloc_dump_provider.h
+++ b/base/trace_event/malloc_dump_provider.h
@@ -6,7 +6,7 @@
 #define BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_
 
 #include "base/allocator/buildflags.h"
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/base_export.h"
 #include "base/memory/raw_ptr.h"
 #include "base/memory/singleton.h"
@@ -21,7 +21,7 @@
 #endif
 
 #if BUILDFLAG(USE_PARTITION_ALLOC)
-#include "base/allocator/partition_allocator/partition_stats.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_stats.h"
 #endif
 
 namespace base {
@@ -83,7 +83,7 @@
                                  ProcessMemoryDump* memory_dump,
                                  MemoryDumpLevelOfDetail level_of_detail);
 
-  static const char* kPartitionsDumpName;
+  static constexpr char kPartitionsDumpName[] = "partitions";
 
   // PartitionStatsDumper implementation.
   void PartitionDumpTotals(
diff --git a/base/trace_event/memory_allocator_dump.cc b/base/trace_event/memory_allocator_dump.cc
index 2dcc8a7..9cb3a1c 100644
--- a/base/trace_event/memory_allocator_dump.cc
+++ b/base/trace_event/memory_allocator_dump.cc
@@ -56,7 +56,7 @@
                                     const char* units,
                                     const std::string& value) {
   // String attributes are disabled in background mode.
-  if (level_of_detail_ == MemoryDumpLevelOfDetail::BACKGROUND) {
+  if (level_of_detail_ == MemoryDumpLevelOfDetail::kBackground) {
     NOTREACHED();
     return;
   }
@@ -64,7 +64,6 @@
 }
 
 void MemoryAllocatorDump::AsValueInto(TracedValue* value) const {
-  std::string string_conversion_buffer;
   value->BeginDictionaryWithCopiedName(absolute_name_);
   value->SetString("guid", guid_.ToString());
   value->BeginDictionary("attrs");
@@ -73,11 +72,9 @@
     value->BeginDictionaryWithCopiedName(entry.name);
     switch (entry.entry_type) {
       case Entry::kUint64:
-        SStringPrintf(&string_conversion_buffer, "%" PRIx64,
-                      entry.value_uint64);
         value->SetString("type", kTypeScalar);
         value->SetString("units", entry.units);
-        value->SetString("value", string_conversion_buffer);
+        value->SetString("value", StringPrintf("%" PRIx64, entry.value_uint64));
         break;
       case Entry::kString:
         value->SetString("type", kTypeString);
diff --git a/base/trace_event/memory_allocator_dump_unittest.cc b/base/trace_event/memory_allocator_dump_unittest.cc
index 1491ac2..28df3fd 100644
--- a/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/base/trace_event/memory_allocator_dump_unittest.cc
@@ -75,13 +75,13 @@
 
 TEST(MemoryAllocatorDumpTest, GuidGeneration) {
   std::unique_ptr<MemoryAllocatorDump> mad(new MemoryAllocatorDump(
-      "foo", MemoryDumpLevelOfDetail::FIRST, MemoryAllocatorDumpGuid(0x42u)));
+      "foo", MemoryDumpLevelOfDetail::kFirst, MemoryAllocatorDumpGuid(0x42u)));
   ASSERT_EQ("42", mad->guid().ToString());
 }
 
 TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
   FakeMemoryAllocatorDumpProvider fmadp;
-  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::kDetailed};
   ProcessMemoryDump pmd(dump_args);
 
   fmadp.OnMemoryDump(dump_args, &pmd);
@@ -120,7 +120,7 @@
 }
 
 TEST(MemoryAllocatorDumpTest, GetSize) {
-  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::kDetailed};
   ProcessMemoryDump pmd(dump_args);
   MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("allocator_for_size");
   dump->AddScalar(MemoryAllocatorDump::kNameSize,
@@ -130,7 +130,7 @@
 }
 
 TEST(MemoryAllocatorDumpTest, ReadValues) {
-  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::kDetailed};
   ProcessMemoryDump pmd(dump_args);
   MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("allocator_for_size");
   dump->AddScalar("one", "byte", 1);
@@ -154,7 +154,7 @@
     !BUILDFLAG(IS_FUCHSIA)
 TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
   FakeMemoryAllocatorDumpProvider fmadp;
-  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::kDetailed};
   ProcessMemoryDump pmd(dump_args);
   pmd.CreateAllocatorDump("foo_allocator");
   pmd.CreateAllocatorDump("bar_allocator/heap");
@@ -164,7 +164,7 @@
 }
 
 TEST(MemoryAllocatorDumpTest, ForbidStringsInBackgroundModeDeathTest) {
-  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::BACKGROUND};
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::kBackground};
   ProcessMemoryDump pmd(dump_args);
   MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("malloc");
   ASSERT_DEATH(dump->AddString("foo", "bar", "baz"), "");
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index ffaf95d..08b30bf 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -12,7 +12,7 @@
 #include <tuple>
 #include <utility>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/base_switches.h"
 #include "base/command_line.h"
 #include "base/debug/alias.h"
@@ -314,7 +314,7 @@
   if (TraceLog::GetInstance()
           ->GetCurrentTraceConfig()
           .IsArgumentFilterEnabled()) {
-    CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
+    CHECK_EQ(MemoryDumpLevelOfDetail::kBackground, args.level_of_detail);
   }
 
   std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
@@ -363,7 +363,7 @@
     // If we are in background mode, we should invoke only the allowed
     // providers. Ignore other providers and continue.
     if (pmd_async_state->req_args.level_of_detail ==
-            MemoryDumpLevelOfDetail::BACKGROUND &&
+            MemoryDumpLevelOfDetail::kBackground &&
         !mdpinfo->allowed_in_background_mode) {
       pmd_async_state->pending_dump_providers.pop_back();
       continue;
@@ -507,11 +507,11 @@
 
   MemoryDumpScheduler::Config periodic_config;
   for (const auto& trigger : memory_dump_config.triggers) {
-    if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
+    if (trigger.trigger_type == MemoryDumpType::kPeriodicInterval) {
       if (periodic_config.triggers.empty()) {
         periodic_config.callback =
             BindRepeating(&DoGlobalDumpWithoutCallback, request_dump_function_,
-                          MemoryDumpType::PERIODIC_INTERVAL);
+                          MemoryDumpType::kPeriodicInterval);
       }
       periodic_config.triggers.push_back(
           {trigger.level_of_detail, trigger.min_time_between_dumps_ms});
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index 348d21f..baf50b3 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -46,19 +46,19 @@
 
 // GTest matchers for MemoryDumpRequestArgs arguments.
 MATCHER(IsDetailedDump, "") {
-  return arg.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
+  return arg.level_of_detail == MemoryDumpLevelOfDetail::kDetailed;
 }
 
 MATCHER(IsLightDump, "") {
-  return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
+  return arg.level_of_detail == MemoryDumpLevelOfDetail::kLight;
 }
 
 MATCHER(IsDeterministicDump, "") {
-  return arg.determinism == MemoryDumpDeterminism::FORCE_GC;
+  return arg.determinism == MemoryDumpDeterminism::kForceGc;
 }
 
 MATCHER(IsNotDeterministicDump, "") {
-  return arg.determinism == MemoryDumpDeterminism::NONE;
+  return arg.determinism == MemoryDumpDeterminism::kNone;
 }
 
 namespace {
@@ -287,9 +287,9 @@
   EnableForTracing();
   EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(3);
   for (int i = 0; i < 3; ++i) {
-    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                          MemoryDumpLevelOfDetail::DETAILED,
-                                          MemoryDumpDeterminism::NONE));
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                          MemoryDumpLevelOfDetail::kDetailed,
+                                          MemoryDumpDeterminism::kNone));
   }
   DisableTracing();
 
@@ -300,9 +300,9 @@
   EnableForTracing();
   EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
   for (int i = 0; i < 3; ++i) {
-    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                          MemoryDumpLevelOfDetail::DETAILED,
-                                          MemoryDumpDeterminism::NONE));
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                          MemoryDumpLevelOfDetail::kDetailed,
+                                          MemoryDumpDeterminism::kNone));
   }
   DisableTracing();
 }
@@ -315,9 +315,9 @@
   RegisterDumpProvider(&mdp, SingleThreadTaskRunner::GetCurrentDefault());
   EnableForTracing();
   EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _));
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::DETAILED,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kDetailed,
+                                        MemoryDumpDeterminism::kNone));
   DisableTracing();
   mdm_->UnregisterDumpProvider(&mdp);
 
@@ -326,9 +326,9 @@
   RegisterDumpProvider(&mdp, SingleThreadTaskRunner::GetCurrentDefault());
   EnableForTracing();
   EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _));
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::LIGHT,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kLight,
+                                        MemoryDumpDeterminism::kNone));
   DisableTracing();
   mdm_->UnregisterDumpProvider(&mdp);
 }
@@ -341,9 +341,9 @@
   RegisterDumpProvider(&mdp, SingleThreadTaskRunner::GetCurrentDefault());
   EnableForTracing();
   EXPECT_CALL(mdp, OnMemoryDump(IsDeterministicDump(), _));
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::DETAILED,
-                                        MemoryDumpDeterminism::FORCE_GC));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kDetailed,
+                                        MemoryDumpDeterminism::kForceGc));
   DisableTracing();
   mdm_->UnregisterDumpProvider(&mdp);
 
@@ -352,9 +352,9 @@
   RegisterDumpProvider(&mdp, SingleThreadTaskRunner::GetCurrentDefault());
   EnableForTracing();
   EXPECT_CALL(mdp, OnMemoryDump(IsNotDeterministicDump(), _));
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::LIGHT,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kLight,
+                                        MemoryDumpDeterminism::kNone));
   DisableTracing();
   mdm_->UnregisterDumpProvider(&mdp);
 }
@@ -369,9 +369,9 @@
   EnableForTracing();
   EXPECT_CALL(mdp1, OnMemoryDump(_, _));
   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::DETAILED,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kDetailed,
+                                        MemoryDumpDeterminism::kNone));
   DisableTracing();
 
   // Invert: enable mdp2 and disable mdp1.
@@ -380,9 +380,9 @@
   EnableForTracing();
   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
   EXPECT_CALL(mdp2, OnMemoryDump(_, _));
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::DETAILED,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kDetailed,
+                                        MemoryDumpDeterminism::kNone));
   DisableTracing();
 
   // Enable both mdp1 and mdp2.
@@ -390,9 +390,9 @@
   EnableForTracing();
   EXPECT_CALL(mdp1, OnMemoryDump(_, _));
   EXPECT_CALL(mdp2, OnMemoryDump(_, _));
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::DETAILED,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kDetailed,
+                                        MemoryDumpDeterminism::kNone));
   DisableTracing();
 }
 
@@ -412,9 +412,9 @@
   {
     EXPECT_CALL(mdp, OnMemoryDump(_, _));
     EnableForTracing();
-    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                          MemoryDumpLevelOfDetail::DETAILED,
-                                          MemoryDumpDeterminism::NONE));
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                          MemoryDumpLevelOfDetail::kDetailed,
+                                          MemoryDumpDeterminism::kNone));
     DisableTracing();
   }
 
@@ -423,9 +423,9 @@
   {
     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
     EnableForTracing();
-    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                          MemoryDumpLevelOfDetail::DETAILED,
-                                          MemoryDumpDeterminism::NONE));
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                          MemoryDumpLevelOfDetail::kDetailed,
+                                          MemoryDumpDeterminism::kNone));
     DisableTracing();
   }
 
@@ -435,9 +435,9 @@
   {
     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
     EnableForTracing();
-    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                          MemoryDumpLevelOfDetail::DETAILED,
-                                          MemoryDumpDeterminism::NONE));
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                          MemoryDumpLevelOfDetail::kDetailed,
+                                          MemoryDumpDeterminism::kNone));
     DisableTracing();
   }
 
@@ -448,9 +448,9 @@
   {
     EXPECT_CALL(mdp, OnMemoryDump(_, _));
     EnableForTracing();
-    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                          MemoryDumpLevelOfDetail::DETAILED,
-                                          MemoryDumpDeterminism::NONE));
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                          MemoryDumpLevelOfDetail::kDetailed,
+                                          MemoryDumpDeterminism::kNone));
     DisableTracing();
   }
 }
@@ -487,9 +487,9 @@
   EnableForTracing();
 
   while (!threads.empty()) {
-    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                          MemoryDumpLevelOfDetail::DETAILED,
-                                          MemoryDumpDeterminism::NONE));
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                          MemoryDumpLevelOfDetail::kDetailed,
+                                          MemoryDumpDeterminism::kNone));
 
     // Unregister a MDP and destroy one thread at each iteration to check the
     // live unregistration logic. The unregistration needs to happen on the same
@@ -534,16 +534,16 @@
   EnableForTracing();
 
   task_runner1->set_enabled(false);
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::DETAILED,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kDetailed,
+                                        MemoryDumpDeterminism::kNone));
   EXPECT_EQ(1u, task_runner1->no_of_post_tasks());
   EXPECT_EQ(1u, task_runner2->no_of_post_tasks());
 
   task_runner1->set_enabled(true);
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::DETAILED,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kDetailed,
+                                        MemoryDumpDeterminism::kNone));
   EXPECT_EQ(2u, task_runner1->no_of_post_tasks());
   EXPECT_EQ(2u, task_runner2->no_of_post_tasks());
   DisableTracing();
@@ -573,9 +573,9 @@
 
   const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
   for (int i = 0; i < kNumDumps; i++) {
-    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                          MemoryDumpLevelOfDetail::DETAILED,
-                                          MemoryDumpDeterminism::NONE));
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                          MemoryDumpLevelOfDetail::kDetailed,
+                                          MemoryDumpDeterminism::kNone));
   }
 
   DisableTracing();
@@ -605,9 +605,9 @@
   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(Between(2, 3));
 
   for (int i = 0; i < 4; i++) {
-    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                          MemoryDumpLevelOfDetail::DETAILED,
-                                          MemoryDumpDeterminism::NONE));
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                          MemoryDumpLevelOfDetail::kDetailed,
+                                          MemoryDumpDeterminism::kNone));
   }
 
   DisableTracing();
@@ -639,9 +639,9 @@
   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(Between(1, 2));
 
   for (int i = 0; i < 4; i++) {
-    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                          MemoryDumpLevelOfDetail::DETAILED,
-                                          MemoryDumpDeterminism::NONE));
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                          MemoryDumpLevelOfDetail::kDetailed,
+                                          MemoryDumpDeterminism::kNone));
   }
 
   DisableTracing();
@@ -688,9 +688,9 @@
   }
 
   EnableForTracing();
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::DETAILED,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kDetailed,
+                                        MemoryDumpDeterminism::kNone));
   ASSERT_EQ(1, on_memory_dump_call_count);
 
   DisableTracing();
@@ -736,9 +736,9 @@
   }
 
   EnableForTracing();
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::DETAILED,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kDetailed,
+                                        MemoryDumpDeterminism::kNone));
   ASSERT_EQ(1, on_memory_dump_call_count);
 
   DisableTracing();
@@ -750,9 +750,9 @@
   MockMemoryDumpProvider mdp;
   RegisterDumpProvider(&mdp, nullptr);
   EXPECT_CALL(mdp, OnMemoryDump(_, _));
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::DETAILED,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kDetailed,
+                                        MemoryDumpDeterminism::kNone));
 }
 
 TEST_F(MemoryDumpManagerTest, BackgroundAllowlisting) {
@@ -766,9 +766,9 @@
   EnableForTracing();
 
   EXPECT_CALL(backgroundMdp, OnMemoryDump(_, _)).Times(1);
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::SUMMARY_ONLY,
-                                        MemoryDumpLevelOfDetail::BACKGROUND,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kSummaryOnly,
+                                        MemoryDumpLevelOfDetail::kBackground,
+                                        MemoryDumpDeterminism::kNone));
   DisableTracing();
 }
 
@@ -829,9 +829,9 @@
 
   EnableForTracing();
   for (int i = 0; i < 2; ++i) {
-    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                          MemoryDumpLevelOfDetail::DETAILED,
-                                          MemoryDumpDeterminism::NONE));
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                          MemoryDumpLevelOfDetail::kDetailed,
+                                          MemoryDumpDeterminism::kNone));
   }
   DisableTracing();
 }
@@ -883,15 +883,15 @@
   }
   stopped_thread->Stop();
 
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::DETAILED,
-                                        MemoryDumpDeterminism::NONE));
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
-                                        MemoryDumpLevelOfDetail::BACKGROUND,
-                                        MemoryDumpDeterminism::NONE));
-  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::SUMMARY_ONLY,
-                                        MemoryDumpLevelOfDetail::BACKGROUND,
-                                        MemoryDumpDeterminism::NONE));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kDetailed,
+                                        MemoryDumpDeterminism::kNone));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kExplicitlyTriggered,
+                                        MemoryDumpLevelOfDetail::kBackground,
+                                        MemoryDumpDeterminism::kNone));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::kSummaryOnly,
+                                        MemoryDumpLevelOfDetail::kBackground,
+                                        MemoryDumpDeterminism::kNone));
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/memory_dump_request_args.cc b/base/trace_event/memory_dump_request_args.cc
index 658478f..0d7ea3e 100644
--- a/base/trace_event/memory_dump_request_args.cc
+++ b/base/trace_event/memory_dump_request_args.cc
@@ -12,11 +12,11 @@
 // static
 const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
   switch (dump_type) {
-    case MemoryDumpType::PERIODIC_INTERVAL:
+    case MemoryDumpType::kPeriodicInterval:
       return "periodic_interval";
-    case MemoryDumpType::EXPLICITLY_TRIGGERED:
+    case MemoryDumpType::kExplicitlyTriggered:
       return "explicitly_triggered";
-    case MemoryDumpType::SUMMARY_ONLY:
+    case MemoryDumpType::kSummaryOnly:
       return "summary_only";
   }
   NOTREACHED();
@@ -24,24 +24,25 @@
 }
 
 MemoryDumpType StringToMemoryDumpType(const std::string& str) {
-  if (str == "periodic_interval")
-    return MemoryDumpType::PERIODIC_INTERVAL;
+  if (str == "periodic_interval") {
+    return MemoryDumpType::kPeriodicInterval;
+  }
   if (str == "explicitly_triggered")
-    return MemoryDumpType::EXPLICITLY_TRIGGERED;
+    return MemoryDumpType::kExplicitlyTriggered;
   if (str == "summary_only")
-    return MemoryDumpType::SUMMARY_ONLY;
+    return MemoryDumpType::kSummaryOnly;
   NOTREACHED();
-  return MemoryDumpType::LAST;
+  return MemoryDumpType::kLast;
 }
 
 const char* MemoryDumpLevelOfDetailToString(
     const MemoryDumpLevelOfDetail& level_of_detail) {
   switch (level_of_detail) {
-    case MemoryDumpLevelOfDetail::BACKGROUND:
+    case MemoryDumpLevelOfDetail::kBackground:
       return "background";
-    case MemoryDumpLevelOfDetail::LIGHT:
+    case MemoryDumpLevelOfDetail::kLight:
       return "light";
-    case MemoryDumpLevelOfDetail::DETAILED:
+    case MemoryDumpLevelOfDetail::kDetailed:
       return "detailed";
   }
   NOTREACHED();
@@ -51,13 +52,13 @@
 MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
     const std::string& str) {
   if (str == "background")
-    return MemoryDumpLevelOfDetail::BACKGROUND;
+    return MemoryDumpLevelOfDetail::kBackground;
   if (str == "light")
-    return MemoryDumpLevelOfDetail::LIGHT;
+    return MemoryDumpLevelOfDetail::kLight;
   if (str == "detailed")
-    return MemoryDumpLevelOfDetail::DETAILED;
+    return MemoryDumpLevelOfDetail::kDetailed;
   NOTREACHED();
-  return MemoryDumpLevelOfDetail::LAST;
+  return MemoryDumpLevelOfDetail::kLast;
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/memory_dump_request_args.h b/base/trace_event/memory_dump_request_args.h
index 4920535..e29cca4 100644
--- a/base/trace_event/memory_dump_request_args.h
+++ b/base/trace_event/memory_dump_request_args.h
@@ -27,41 +27,41 @@
 // consistent with memory_instrumentation.mojo and
 // memory_instrumentation_struct_traits.{h,cc}
 enum class MemoryDumpType {
-  PERIODIC_INTERVAL,     // Dumping memory at periodic intervals.
-  EXPLICITLY_TRIGGERED,  // Non maskable dump request.
-  SUMMARY_ONLY,          // Calculate just the summary & don't add to the trace.
-  LAST = SUMMARY_ONLY
+  kPeriodicInterval,     // Dumping memory at periodic intervals.
+  kExplicitlyTriggered,  // Non maskable dump request.
+  kSummaryOnly,          // Calculate just the summary & don't add to the trace.
+  kLast = kSummaryOnly
 };
 
 // Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
 // Keep this consistent with memory_instrumentation.mojo and
 // memory_instrumentation_struct_traits.{h,cc}
 enum class MemoryDumpLevelOfDetail : uint32_t {
-  FIRST,
+  kFirst,
 
   // For background tracing mode. The dump time is quick, and typically just the
   // totals are expected. Suballocations need not be specified. Dump name must
   // contain only pre-defined strings and string arguments cannot be added.
-  BACKGROUND = FIRST,
+  kBackground = kFirst,
 
   // For the levels below, MemoryDumpProvider instances must guarantee that the
   // total size reported in the root node is consistent. Only the granularity of
   // the child MemoryAllocatorDump(s) differs with the levels.
 
   // Few entries, typically a fixed number, per dump.
-  LIGHT,
+  kLight,
 
   // Unrestricted amount of entries per dump.
-  DETAILED,
+  kDetailed,
 
-  LAST = DETAILED
+  kLast = kDetailed
 };
 
 // Tells the MemoryDumpProvider(s) if they should try to make the result
 // more deterministic by forcing garbage collection.
 // Keep this consistent with memory_instrumentation.mojo and
 // memory_instrumentation_struct_traits.{h,cc}
-enum class MemoryDumpDeterminism : uint32_t { NONE, FORCE_GC };
+enum class MemoryDumpDeterminism : uint32_t { kNone, kForceGc };
 
 // Keep this consistent with memory_instrumentation.mojo and
 // memory_instrumentation_struct_traits.{h,cc}
diff --git a/base/trace_event/memory_dump_scheduler.cc b/base/trace_event/memory_dump_scheduler.cc
index 80d0519..47de58d 100644
--- a/base/trace_event/memory_dump_scheduler.cc
+++ b/base/trace_event/memory_dump_scheduler.cc
@@ -51,13 +51,13 @@
   for (const Config::Trigger& trigger : config.triggers) {
     DCHECK_GT(trigger.period_ms, 0u);
     switch (trigger.level_of_detail) {
-      case MemoryDumpLevelOfDetail::BACKGROUND:
+      case MemoryDumpLevelOfDetail::kBackground:
         break;
-      case MemoryDumpLevelOfDetail::LIGHT:
+      case MemoryDumpLevelOfDetail::kLight:
         DCHECK_EQ(0u, light_dump_period_ms);
         light_dump_period_ms = trigger.period_ms;
         break;
-      case MemoryDumpLevelOfDetail::DETAILED:
+      case MemoryDumpLevelOfDetail::kDetailed:
         DCHECK_EQ(0u, heavy_dump_period_ms);
         heavy_dump_period_ms = trigger.period_ms;
         break;
@@ -94,11 +94,12 @@
   if (period_ms_ == 0 || generation_ != expected_generation)
     return;
 
-  MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
+  MemoryDumpLevelOfDetail level_of_detail =
+      MemoryDumpLevelOfDetail::kBackground;
   if (light_dump_rate_ > 0 && tick_count_ % light_dump_rate_ == 0)
-    level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
+    level_of_detail = MemoryDumpLevelOfDetail::kLight;
   if (heavy_dump_rate_ > 0 && tick_count_ % heavy_dump_rate_ == 0)
-    level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
+    level_of_detail = MemoryDumpLevelOfDetail::kDetailed;
   tick_count_++;
 
   callback_.Run(level_of_detail);
diff --git a/base/trace_event/memory_dump_scheduler_unittest.cc b/base/trace_event/memory_dump_scheduler_unittest.cc
index 6436513..34567ec 100644
--- a/base/trace_event/memory_dump_scheduler_unittest.cc
+++ b/base/trace_event/memory_dump_scheduler_unittest.cc
@@ -48,7 +48,7 @@
 
 TEST_F(MemoryDumpSchedulerTest, SingleTrigger) {
   const uint32_t kPeriodMs = 1;
-  const auto kLevelOfDetail = MemoryDumpLevelOfDetail::DETAILED;
+  const auto kLevelOfDetail = MemoryDumpLevelOfDetail::kDetailed;
   const uint32_t kTicks = 5;
   MemoryDumpScheduler::Config config;
   config.triggers.push_back({kLevelOfDetail, kPeriodMs});
@@ -84,8 +84,8 @@
   const uint32_t kPeriodLightMs = 3;
   const uint32_t kPeriodDetailedMs = 9;
   MemoryDumpScheduler::Config config;
-  const MemoryDumpLevelOfDetail kLight = MemoryDumpLevelOfDetail::LIGHT;
-  const MemoryDumpLevelOfDetail kDetailed = MemoryDumpLevelOfDetail::DETAILED;
+  const MemoryDumpLevelOfDetail kLight = MemoryDumpLevelOfDetail::kLight;
+  const MemoryDumpLevelOfDetail kDetailed = MemoryDumpLevelOfDetail::kDetailed;
   config.triggers.push_back({kLight, kPeriodLightMs});
   config.triggers.push_back({kDetailed, kPeriodDetailedMs});
   config.callback =
@@ -126,22 +126,22 @@
   const uint32_t kDetailedTicks = 10;
 
   MemoryDumpScheduler::Config light_config;
-  light_config.triggers.push_back({MemoryDumpLevelOfDetail::LIGHT, kPeriodMs});
+  light_config.triggers.push_back({MemoryDumpLevelOfDetail::kLight, kPeriodMs});
   light_config.callback =
       BindRepeating(&CallbackWrapper::OnTick, Unretained(&on_tick_));
 
   MemoryDumpScheduler::Config detailed_config;
   detailed_config.triggers.push_back(
-      {MemoryDumpLevelOfDetail::DETAILED, kPeriodMs});
+      {MemoryDumpLevelOfDetail::kDetailed, kPeriodMs});
   detailed_config.callback =
       BindRepeating(&CallbackWrapper::OnTick, Unretained(&on_tick_));
 
   testing::InSequence sequence;
-  EXPECT_CALL(on_tick_, OnTick(MemoryDumpLevelOfDetail::LIGHT))
+  EXPECT_CALL(on_tick_, OnTick(MemoryDumpLevelOfDetail::kLight))
       .Times(AtMost(kQuickIterations));
-  EXPECT_CALL(on_tick_, OnTick(MemoryDumpLevelOfDetail::DETAILED))
+  EXPECT_CALL(on_tick_, OnTick(MemoryDumpLevelOfDetail::kDetailed))
       .Times(kDetailedTicks - 1);
-  EXPECT_CALL(on_tick_, OnTick(MemoryDumpLevelOfDetail::DETAILED))
+  EXPECT_CALL(on_tick_, OnTick(MemoryDumpLevelOfDetail::kDetailed))
       .WillRepeatedly(
           Invoke([this](MemoryDumpLevelOfDetail) { this->evt_.Signal(); }));
 
@@ -165,7 +165,7 @@
   const uint32_t kPeriodMs = 1;
   const uint32_t kTicks = 3;
   MemoryDumpScheduler::Config config;
-  config.triggers.push_back({MemoryDumpLevelOfDetail::DETAILED, kPeriodMs});
+  config.triggers.push_back({MemoryDumpLevelOfDetail::kDetailed, kPeriodMs});
   config.callback =
       BindRepeating(&CallbackWrapper::OnTick, Unretained(&on_tick_));
 
diff --git a/base/trace_event/memory_infra_background_allowlist.cc b/base/trace_event/memory_infra_background_allowlist.cc
index 5fa2ec0..f970711 100644
--- a/base/trace_event/memory_infra_background_allowlist.cc
+++ b/base/trace_event/memory_infra_background_allowlist.cc
@@ -8,7 +8,7 @@
 
 #include <string>
 
-#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h"
 #include "base/containers/fixed_flat_set.h"
 #include "base/strings/string_util.h"
 #include "build/build_config.h"
@@ -139,10 +139,11 @@
         "discardable/madv_free_allocated",
         "discardable/child_0x?",
         "extensions/functions",
-        "extensions/value_store/Extensions.Database.Open.Settings/0x?",
+        "extensions/value_store/Extensions.Database.Open.OriginManagedConfiguration/0x?",
         "extensions/value_store/Extensions.Database.Open.Rules/0x?",
-        "extensions/value_store/Extensions.Database.Open.State/0x?",
         "extensions/value_store/Extensions.Database.Open.Scripts/0x?",
+        "extensions/value_store/Extensions.Database.Open.Settings/0x?",
+        "extensions/value_store/Extensions.Database.Open.State/0x?",
         "extensions/value_store/Extensions.Database.Open.WebAppsLockScreen/0x?",
         "extensions/value_store/Extensions.Database.Open/0x?",
         "extensions/value_store/Extensions.Database.Restore/0x?",
@@ -251,6 +252,8 @@
         "v8/main/heap/read_only_space",
         "v8/main/heap/shared_large_object_space",
         "v8/main/heap/shared_space",
+        "v8/main/heap/trusted_space",
+        "v8/main/heap/trusted_large_object_space",
         "v8/main/malloc",
         "v8/main/zapped_for_debug",
         "v8/utility/code_stats",
@@ -267,6 +270,8 @@
         "v8/utility/heap/read_only_space",
         "v8/utility/heap/shared_large_object_space",
         "v8/utility/heap/shared_space",
+        "v8/utility/heap/trusted_space",
+        "v8/utility/heap/trusted_large_object_space",
         "v8/utility/malloc",
         "v8/utility/zapped_for_debug",
         "v8/workers/code_stats/isolate_0x?",
@@ -283,6 +288,8 @@
         "v8/workers/heap/read_only_space/isolate_0x?",
         "v8/workers/heap/shared_large_object_space/isolate_0x?",
         "v8/workers/heap/shared_space/isolate_0x?",
+        "v8/workers/heap/trusted_space/isolate_0x?",
+        "v8/workers/heap/trusted_large_object_space/isolate_0x?",
         "v8/workers/malloc/isolate_0x?",
         "v8/workers/zapped_for_debug/isolate_0x?",
         "site_storage/index_db/db_0x?",
diff --git a/base/trace_event/memory_usage_estimator.h b/base/trace_event/memory_usage_estimator.h
index a0fef11..1f1fa06 100644
--- a/base/trace_event/memory_usage_estimator.h
+++ b/base/trace_event/memory_usage_estimator.h
@@ -76,6 +76,12 @@
 // The approach is simple: first call EstimateMemoryUsage() on all members,
 // then recursively fix compilation errors that are caused by types not
 // implementing EstimateMemoryUsage().
+//
+// Note that in the above example, the memory estimates for `id_` and `success_` are
+// intentionally omitted. This is because these members do not allocate any _dynamic_ memory.
+// If, for example, `MyClass` is declared as a heap-allocated `unique_ptr` member in some parent
+// class, then `EstimateMemoryUsage` on the `unique_ptr` will automatically take into account
+// `sizeof(MyClass)`.
 
 namespace base {
 namespace trace_event {
@@ -206,11 +212,10 @@
 // EstimateMemoryUsage(const T&) that returns size_t. Simpler ways to
 // achieve this don't work on MSVC.
 template <class T>
-struct HasEMU<
-    T,
-    typename std::enable_if<std::is_same<
-        size_t,
-        decltype(EstimateMemoryUsage(std::declval<const T&>()))>::value>::type>
+struct HasEMU<T,
+              std::enable_if_t<std::is_same_v<size_t,
+                                              decltype(EstimateMemoryUsage(
+                                                  std::declval<const T&>()))>>>
     : std::true_type {};
 
 // EMUCaller<T> does three things:
@@ -229,7 +234,7 @@
 struct EMUCaller {
   // std::is_same<> below makes static_assert depend on T, in order to
   // prevent it from asserting regardless instantiation.
-  static_assert(std::is_same<T, std::false_type>::value,
+  static_assert(std::is_same_v<T, std::false_type>,
                 "Neither global function 'size_t EstimateMemoryUsage(T)' "
                 "nor member function 'size_t T::EstimateMemoryUsage() const' "
                 "is defined for the type.");
@@ -238,7 +243,7 @@
 };
 
 template <class T>
-struct EMUCaller<T, typename std::enable_if<HasEMU<T>::value>::type> {
+struct EMUCaller<T, std::enable_if_t<HasEMU<T>::value>> {
   static size_t Call(const T& value) { return EstimateMemoryUsage(value); }
 };
 
@@ -249,7 +254,7 @@
 struct IsComplexIteratorForContainer<
     Container,
     I,
-    std::enable_if_t<!std::is_pointer<I>::value &&
+    std::enable_if_t<!std::is_pointer_v<I> &&
                      base::internal::is_iterator<I>::value>> {
   using value_type = typename std::iterator_traits<I>::value_type;
   using container_type = Container<value_type>;
@@ -259,11 +264,10 @@
   //
   // The downside is - value is not of type bool.
   enum : bool {
-    value =
-        std::is_same<typename container_type::iterator, I>::value ||
-        std::is_same<typename container_type::const_iterator, I>::value ||
-        std::is_same<typename container_type::reverse_iterator, I>::value ||
-        std::is_same<typename container_type::const_reverse_iterator, I>::value,
+    value = std::is_same_v<typename container_type::iterator, I> ||
+            std::is_same_v<typename container_type::const_iterator, I> ||
+            std::is_same_v<typename container_type::reverse_iterator, I> ||
+            std::is_same_v<typename container_type::const_reverse_iterator, I>,
   };
 };
 
@@ -298,7 +302,7 @@
 // However variable template does.
 template <typename T>
 constexpr bool IsKnownNonAllocatingType_v =
-    std::is_trivially_destructible<T>::value ||
+    std::is_trivially_destructible_v<T> ||
     IsStandardContainerComplexIterator<T>();
 
 template <class T>
@@ -330,9 +334,8 @@
 template <class T>
 auto EstimateMemoryUsage(const T& object)
     -> decltype(object.EstimateMemoryUsage()) {
-  static_assert(
-      std::is_same<decltype(object.EstimateMemoryUsage()), size_t>::value,
-      "'T::EstimateMemoryUsage() const' must return size_t.");
+  static_assert(std::is_same_v<decltype(object.EstimateMemoryUsage()), size_t>,
+                "'T::EstimateMemoryUsage() const' must return size_t.");
   return object.EstimateMemoryUsage();
 }
 
diff --git a/base/trace_event/process_memory_dump.cc b/base/trace_event/process_memory_dump.cc
index dbec544..d797e7c 100644
--- a/base/trace_event/process_memory_dump.cc
+++ b/base/trace_event/process_memory_dump.cc
@@ -198,9 +198,9 @@
       mapped_size + static_cast<size_t>(static_cast<uint8_t*>(start_address) -
                                         aligned_start_address);
 
-#if BUILDFLAG(IS_MAC)
-  // On macOS, use mach_vm_region instead of mincore for performance
-  // (crbug.com/742042).
+#if BUILDFLAG(IS_APPLE)
+  // On macOS and iOS, use mach_vm_region|vm_region_64 instead of mincore for
+  // performance (crbug.com/742042).
   mach_vm_size_t dummy_size = 0;
   mach_vm_address_t address =
       reinterpret_cast<mach_vm_address_t>(aligned_start_address);
@@ -216,7 +216,7 @@
   size_t resident_pages =
       info.private_pages_resident + info.shared_pages_resident;
 
-  // On macOS, measurements for private memory footprint overcount by
+  // On macOS and iOS, measurements for private memory footprint overcount by
   // faulted pages in anonymous shared memory. To discount for this, we touch
   // all the resident pages in anonymous shared memory here, thus making them
   // faulted as well. This relies on two assumptions:
@@ -289,7 +289,7 @@
     std::unique_ptr<MemoryAllocatorDump> mad) {
   // In background mode return the black hole dump, if invalid dump name is
   // given.
-  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
+  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::kBackground &&
       !IsMemoryAllocatorDumpNameInAllowlist(mad->absolute_name())) {
     return GetBlackHoleMad(mad->absolute_name());
   }
@@ -532,8 +532,9 @@
 void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
                                          const std::string& target_node_name) {
   // Do not create new dumps for suballocations in background mode.
-  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::kBackground) {
     return;
+  }
 
   std::string child_mad_name = target_node_name + "/__" + source.ToString();
   MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
diff --git a/base/trace_event/process_memory_dump_unittest.cc b/base/trace_event/process_memory_dump_unittest.cc
index 3ad2dca..2a68c72 100644
--- a/base/trace_event/process_memory_dump_unittest.cc
+++ b/base/trace_event/process_memory_dump_unittest.cc
@@ -33,7 +33,7 @@
 
 namespace {
 
-const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::DETAILED};
+const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::kDetailed};
 const char* const kTestDumpNameAllowlist[] = {
     "Allowlisted/TestName", "Allowlisted/TestName_0x?",
     "Allowlisted/0x?/TestName", "Allowlisted/0x?", nullptr};
@@ -71,7 +71,7 @@
 
   EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
   EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
-  EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+  EXPECT_EQ(MemoryDumpLevelOfDetail::kDetailed,
             pmd2.dump_args().level_of_detail);
   EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
 
@@ -87,14 +87,14 @@
   pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
                         MemoryAllocatorDumpGuid(4242));
 
-  ProcessMemoryDump pmd2({MemoryDumpLevelOfDetail::BACKGROUND});
+  ProcessMemoryDump pmd2({MemoryDumpLevelOfDetail::kBackground});
   pmd2.CreateAllocatorDump("malloc");
 
   pmd2 = std::move(pmd1);
   EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
   EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
   EXPECT_EQ(0u, pmd2.allocator_dumps().count("mad3"));
-  EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+  EXPECT_EQ(MemoryDumpLevelOfDetail::kDetailed,
             pmd2.dump_args().level_of_detail);
   EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
 
@@ -379,7 +379,7 @@
 }
 
 TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
-  MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::BACKGROUND};
+  MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::kBackground};
   std::unique_ptr<ProcessMemoryDump> pmd(
       new ProcessMemoryDump(background_args));
   ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
@@ -443,7 +443,7 @@
 }
 
 TEST(ProcessMemoryDumpTest, GuidsTest) {
-  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::kDetailed};
 
   const auto process_token_one = UnguessableToken::Create();
   const auto process_token_two = UnguessableToken::Create();
diff --git a/base/trace_event/trace_arguments.cc b/base/trace_event/trace_arguments.cc
index ad202ec..e0081df 100644
--- a/base/trace_event/trace_arguments.cc
+++ b/base/trace_event/trace_arguments.cc
@@ -16,6 +16,7 @@
 #include "base/json/string_escape.h"
 #include "base/memory/raw_ptr.h"
 #include "base/notreached.h"
+#include "base/strings/strcat.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
@@ -164,7 +165,7 @@
 }
 
 static_assert(
-    std::is_pod<TraceValue>::value,
+    std::is_pod_v<TraceValue>,
     "TraceValue must be plain-old-data type for performance reasons!");
 
 void TraceValue::AppendAsJSON(unsigned char type, std::string* out) const {
@@ -196,10 +197,10 @@
       // So as not to lose bits from a 64-bit pointer, output as a hex string.
       // For consistency, do the same for non-JSON strings, but without the
       // surrounding quotes.
-      const char* format_string = as_json ? "\"0x%" PRIx64 "\"" : "0x%" PRIx64;
-      StringAppendF(
-          out, format_string,
+      const std::string value = StringPrintf(
+          "0x%" PRIx64,
           static_cast<uint64_t>(reinterpret_cast<uintptr_t>(this->as_pointer)));
+      *out += as_json ? StrCat({"\"", value, "\""}) : value;
     } break;
     case TRACE_VALUE_TYPE_STRING:
     case TRACE_VALUE_TYPE_COPY_STRING:
diff --git a/base/trace_event/trace_arguments.h b/base/trace_event/trace_arguments.h
index fb614a8..5d000e8 100644
--- a/base/trace_event/trace_arguments.h
+++ b/base/trace_event/trace_arguments.h
@@ -338,18 +338,18 @@
   struct TypeFor;
 
   template <typename T>
-  struct TypeFor<T,
-                 typename std::enable_if<HasHelperSupport<
-                     typename InnerType<T>::type>::value>::type> {
+  struct TypeFor<
+      T,
+      std::enable_if_t<HasHelperSupport<typename InnerType<T>::type>::value>> {
     using ValueType = typename InnerType<T>::type;
     static const unsigned char value = Helper<ValueType>::kType;
   };
   template <typename T>
-  struct TypeFor<T,
-                 typename std::enable_if<
-                     !HasHelperSupport<typename InnerType<T>::type>::value &&
-                     perfetto::internal::has_traced_value_support<
-                         typename InnerType<T>::type>::value>::type> {
+  struct TypeFor<
+      T,
+      std::enable_if_t<!HasHelperSupport<typename InnerType<T>::type>::value &&
+                       perfetto::internal::has_traced_value_support<
+                           typename InnerType<T>::type>::value>> {
     static const unsigned char value = TRACE_VALUE_TYPE_PROTO;
   };
 
@@ -380,18 +380,16 @@
   //
   // NOTE: For ConvertableToTraceFormat values, see the notes above.
   template <class T>
-  typename std::enable_if<
-      HasHelperSupport<typename InnerType<T>::type>::value>::type
-  Init(T&& value) {
+  std::enable_if_t<HasHelperSupport<typename InnerType<T>::type>::value> Init(
+      T&& value) {
     using ValueType = typename InnerType<T>::type;
     Helper<ValueType>::SetValue(this, std::forward<T>(value));
   }
 
   template <class T>
-  typename std::enable_if<
-      !HasHelperSupport<typename InnerType<T>::type>::value &&
-      perfetto::internal::has_traced_value_support<
-          typename InnerType<T>::type>::value>::type
+  std::enable_if_t<!HasHelperSupport<typename InnerType<T>::type>::value &&
+                   perfetto::internal::has_traced_value_support<
+                       typename InnerType<T>::type>::value>
   Init(T&& value) {
     as_proto = new protozero::HeapBuffered<
         perfetto::protos::pbzero::DebugAnnotation>();
@@ -403,12 +401,10 @@
 
 // TraceValue::Helper for integers and enums.
 template <typename T>
-struct TraceValue::Helper<
-    T,
-    typename std::enable_if<std::is_integral<T>::value ||
-                            std::is_enum<T>::value>::type> {
+struct TraceValue::
+    Helper<T, std::enable_if_t<std::is_integral_v<T> || std::is_enum_v<T>>> {
   static constexpr unsigned char kType =
-      std::is_signed<T>::value ? TRACE_VALUE_TYPE_INT : TRACE_VALUE_TYPE_UINT;
+      std::is_signed_v<T> ? TRACE_VALUE_TYPE_INT : TRACE_VALUE_TYPE_UINT;
   static inline void SetValue(TraceValue* v, T value) {
     v->as_uint = static_cast<unsigned long long>(value);
   }
@@ -416,8 +412,7 @@
 
 // TraceValue::Helper for floating-point types
 template <typename T>
-struct TraceValue::
-    Helper<T, typename std::enable_if<std::is_floating_point<T>::value>::type> {
+struct TraceValue::Helper<T, std::enable_if_t<std::is_floating_point_v<T>>> {
   static constexpr unsigned char kType = TRACE_VALUE_TYPE_DOUBLE;
   static inline void SetValue(TraceValue* v, T value) { v->as_double = value; }
 };
@@ -470,10 +465,10 @@
 // IMPORTANT: This takes an std::unique_ptr<CONVERTABLE_TYPE> value, and takes
 // ownership of the pointed object!
 template <typename CONVERTABLE_TYPE>
-struct TraceValue::Helper<std::unique_ptr<CONVERTABLE_TYPE>,
-                          typename std::enable_if<std::is_convertible<
-                              CONVERTABLE_TYPE*,
-                              ConvertableToTraceFormat*>::value>::type> {
+struct TraceValue::Helper<
+    std::unique_ptr<CONVERTABLE_TYPE>,
+    std::enable_if_t<
+        std::is_convertible_v<CONVERTABLE_TYPE*, ConvertableToTraceFormat*>>> {
   static constexpr unsigned char kType = TRACE_VALUE_TYPE_CONVERTABLE;
   static inline void SetValue(TraceValue* v,
                               std::unique_ptr<CONVERTABLE_TYPE> value) {
@@ -486,9 +481,9 @@
 template <typename T>
 struct TraceValue::Helper<
     T,
-    typename std::enable_if<std::is_same<T, base::Time>::value ||
-                            std::is_same<T, base::TimeTicks>::value ||
-                            std::is_same<T, base::ThreadTicks>::value>::type> {
+    std::enable_if_t<std::is_same_v<T, base::Time> ||
+                     std::is_same_v<T, base::TimeTicks> ||
+                     std::is_same_v<T, base::ThreadTicks>>> {
   static constexpr unsigned char kType = TRACE_VALUE_TYPE_INT;
   static inline void SetValue(TraceValue* v, const T& value) {
     v->as_int = value.ToInternalValue();
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index 068795b..b0e5a66 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -87,8 +87,8 @@
 
 std::set<MemoryDumpLevelOfDetail> GetDefaultAllowedMemoryDumpModes() {
   std::set<MemoryDumpLevelOfDetail> all_modes;
-  for (uint32_t mode = static_cast<uint32_t>(MemoryDumpLevelOfDetail::FIRST);
-       mode <= static_cast<uint32_t>(MemoryDumpLevelOfDetail::LAST); mode++) {
+  for (uint32_t mode = static_cast<uint32_t>(MemoryDumpLevelOfDetail::kFirst);
+       mode <= static_cast<uint32_t>(MemoryDumpLevelOfDetail::kLast); mode++) {
     all_modes.insert(static_cast<MemoryDumpLevelOfDetail>(mode));
   }
   return all_modes;
@@ -576,7 +576,7 @@
         // If "min_time_between_dumps_ms" param was not given, then the trace
         // config uses old format where only periodic dumps are supported.
         interval = trigger_dict.FindInt(kPeriodicIntervalLegacyParam);
-        dump_config.trigger_type = MemoryDumpType::PERIODIC_INTERVAL;
+        dump_config.trigger_type = MemoryDumpType::kPeriodicInterval;
       } else {
         const std::string* trigger_type_str =
             trigger_dict.FindString(kTriggerTypeParam);
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
index 63fa4b2..f965863 100644
--- a/base/trace_event/trace_config_unittest.cc
+++ b/base/trace_event/trace_config_unittest.cc
@@ -665,12 +665,12 @@
 
   EXPECT_EQ(200u,
             tc1.memory_dump_config().triggers[0].min_time_between_dumps_ms);
-  EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
+  EXPECT_EQ(MemoryDumpLevelOfDetail::kLight,
             tc1.memory_dump_config().triggers[0].level_of_detail);
 
   EXPECT_EQ(2000u,
             tc1.memory_dump_config().triggers[1].min_time_between_dumps_ms);
-  EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+  EXPECT_EQ(MemoryDumpLevelOfDetail::kDetailed,
             tc1.memory_dump_config().triggers[1].level_of_detail);
   EXPECT_EQ(
       2048u,
@@ -684,7 +684,7 @@
   EXPECT_TRUE(tc3.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
   ASSERT_EQ(1u, tc3.memory_dump_config().triggers.size());
   EXPECT_EQ(1u, tc3.memory_dump_config().triggers[0].min_time_between_dumps_ms);
-  EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
+  EXPECT_EQ(MemoryDumpLevelOfDetail::kBackground,
             tc3.memory_dump_config().triggers[0].level_of_detail);
 }
 
diff --git a/base/trace_event/trace_conversion_helper.h b/base/trace_event/trace_conversion_helper.h
index 9d9c0d1..da31403 100644
--- a/base/trace_event/trace_conversion_helper.h
+++ b/base/trace_event/trace_conversion_helper.h
@@ -103,23 +103,23 @@
 // is also |std::is_integral|, so we need to test |bool| before testing for
 // integral.
 template <typename T>
-typename std::enable_if<std::is_same<T, bool>::value>::type
-SetTracedValueArgHelper(base::internal::priority_tag<6>,
-                        TracedValue* traced_value,
-                        const char* name,
-                        const T& value) {
+std::enable_if_t<std::is_same_v<T, bool>> SetTracedValueArgHelper(
+    base::internal::priority_tag<6>,
+    TracedValue* traced_value,
+    const char* name,
+    const T& value) {
   traced_value->SetBoolean(name, value);
 }
 
-// std::is_integral<bool>::value == true
+// std::is_integral_v<bool> == true
 // This needs to be considered only when T is not bool (has higher
 // base::internal::priority_tag).
 template <typename T>
-typename std::enable_if<std::is_integral<T>::value>::type
-SetTracedValueArgHelper(base::internal::priority_tag<5>,
-                        TracedValue* traced_value,
-                        const char* name,
-                        const T& value) {
+std::enable_if_t<std::is_integral_v<T>> SetTracedValueArgHelper(
+    base::internal::priority_tag<5>,
+    TracedValue* traced_value,
+    const char* name,
+    const T& value) {
   // Avoid loss of precision.
   if (sizeof(int) < sizeof(value)) {
     // TODO(crbug.com/1111787): Add 64-bit support to TracedValue.
@@ -131,31 +131,31 @@
 
 // Any floating point type is converted to double.
 template <typename T>
-typename std::enable_if<std::is_floating_point<T>::value>::type
-SetTracedValueArgHelper(base::internal::priority_tag<4>,
-                        TracedValue* traced_value,
-                        const char* name,
-                        const T& value) {
+std::enable_if_t<std::is_floating_point_v<T>> SetTracedValueArgHelper(
+    base::internal::priority_tag<4>,
+    TracedValue* traced_value,
+    const char* name,
+    const T& value) {
   traced_value->SetDouble(name, static_cast<double>(value));
 }
 
 // |void*| is traced natively.
 template <typename T>
-typename std::enable_if<std::is_same<T, void*>::value>::type
-SetTracedValueArgHelper(base::internal::priority_tag<3>,
-                        TracedValue* traced_value,
-                        const char* name,
-                        const T& value) {
+std::enable_if_t<std::is_same_v<T, void*>> SetTracedValueArgHelper(
+    base::internal::priority_tag<3>,
+    TracedValue* traced_value,
+    const char* name,
+    const T& value) {
   traced_value->SetPointer(name, value);
 }
 
 // |const char*| is traced natively.
 template <typename T>
-typename std::enable_if<std::is_same<T, const char*>::value>::type
-SetTracedValueArgHelper(base::internal::priority_tag<2>,
-                        TracedValue* traced_value,
-                        const char* name,
-                        const T& value) {
+std::enable_if_t<std::is_same_v<T, const char*>> SetTracedValueArgHelper(
+    base::internal::priority_tag<2>,
+    TracedValue* traced_value,
+    const char* name,
+    const T& value) {
   traced_value->SetString(name, value);
 }
 
diff --git a/base/trace_event/trace_event_etw_export_win.cc b/base/trace_event/trace_event_etw_export_win.cc
index a7863e1..3113e96 100644
--- a/base/trace_event/trace_event_etw_export_win.cc
+++ b/base/trace_event/trace_event_etw_export_win.cc
@@ -141,42 +141,16 @@
 // These must be kept as the last two entries in the above array.
 constexpr uint8_t kOtherEventsGroupNameIndex = 46;
 constexpr uint8_t kDisabledOtherEventsGroupNameIndex = 47;
+constexpr uint64_t kCategoryKeywordMask = ~0xFFFF000000000000;
 
 // Max number of available keyword bits.
 constexpr size_t kMaxNumberOfGroupNames = 48;
-uint64_t g_callback_match_any_keyword = 0;
-
-static void __stdcall EtwEnableCallback(LPCGUID SourceId,
-                                        ULONG ControlCode,
-                                        UCHAR Level,
-                                        ULONGLONG MatchAnyKeyword,
-                                        ULONGLONG MatchAllKeyword,
-                                        PEVENT_FILTER_DESCRIPTOR FilterData,
-                                        PVOID CallbackContext) {
-  // This callback is called in the context of an ETW OS thread to
-  // inform the process of the global state of the level and keyword
-  // across all sessions for this provider. We need to update the
-  // local keywords so we log the corresponding events. Protect the
-  // upper 16 bits reserved by winmeta.xml as they should not be used
-  // but older logging code and tools incorrectly used them.
-  g_callback_match_any_keyword = MatchAnyKeyword;
-  g_callback_match_any_keyword &= ~0xFFFF000000000000;
-
-  DVLOG(1) << "ETW Keyword"
-           << " Bits enabled in global context: " << std::hex << MatchAnyKeyword
-           << " Bits enabled in our code: " << std::hex
-           << g_callback_match_any_keyword;
-
-  base::trace_event::TraceEventETWExport::OnETWEnableUpdate();
-}
 
 }  // namespace
 
 namespace base {
 namespace trace_event {
 
-bool TraceEventETWExport::is_registration_complete_ = false;
-
 TraceEventETWExport::TraceEventETWExport() {
   // Construct the ETW provider. If construction fails then the event logging
   // calls will fail. We're passing a callback function as part of registration.
@@ -191,9 +165,11 @@
       0x45B6,
       {0xA0, 0x9F, 0x30, 0xE3, 0x27, 0x15, 0xF4, 0x2D}};
 
-  etw_provider_ = std::make_unique<TlmProvider>("Google.Chrome", Chrome_GUID,
-                                                &EtwEnableCallback);
-  TraceEventETWExport::is_registration_complete_ = true;
+  etw_provider_ = std::make_unique<TlmProvider>(
+      "Google.Chrome", Chrome_GUID,
+      base::BindRepeating(&TraceEventETWExport::OnETWEnableUpdate,
+                          base::Unretained(this)));
+  is_registration_complete_ = true;
 
   // Make sure to initialize the map with all the group names. Subsequent
   // modifications will be made by the background thread and only affect the
@@ -277,6 +253,7 @@
                                    const unsigned char* category_group_enabled,
                                    const char* name,
                                    unsigned long long id,
+                                   TimeTicks timestamp,
                                    const TraceArguments* args) {
   // We bail early in case exporting is disabled or no consumer is listening.
   auto* instance = GetInstance();
@@ -376,20 +353,24 @@
     }
   }
 
+  int64_t timestamp_ms = (timestamp - TimeTicks()).InMilliseconds();
   // Log the event and include the info needed to decode it via TraceLogging
   if (num_args == 0) {
     instance->etw_provider_->WriteEvent(
         name, TlmEventDescriptor(0, keyword),
-        TlmMbcsStringField("Phase", phase_string));
+        TlmMbcsStringField("Phase", phase_string),
+        TlmInt64Field("Timestamp", timestamp_ms));
   } else if (num_args == 1) {
     instance->etw_provider_->WriteEvent(
         name, TlmEventDescriptor(0, keyword),
         TlmMbcsStringField("Phase", phase_string),
+        TlmInt64Field("Timestamp", timestamp_ms),
         TlmMbcsStringField((args->names()[0]), (arg_values_string[0].c_str())));
   } else if (num_args == 2) {
     instance->etw_provider_->WriteEvent(
         name, TlmEventDescriptor(0, keyword),
         TlmMbcsStringField("Phase", phase_string),
+        TlmInt64Field("Timestamp", timestamp_ms),
         TlmMbcsStringField((args->names()[0]), (arg_values_string[0].c_str())),
         TlmMbcsStringField((args->names()[1]), (arg_values_string[1].c_str())));
   } else {
@@ -438,15 +419,17 @@
 }
 
 bool TraceEventETWExport::UpdateEnabledCategories() {
-  if (etw_match_any_keyword_ == g_callback_match_any_keyword)
+  if (etw_match_any_keyword_ ==
+      (etw_provider_->keyword_any() & kCategoryKeywordMask)) {
     return false;
+  }
 
-  // If the global keyword has changed, update each category. The global
+  // If keyword_any() has changed, update each category. The global
   // context is set by UIforETW (or other ETW trace recording tools)
   // using the ETW infrastructure. When the global context changes the
   // callback will be called to set the updated keyword bits in each
-  // browser process that has registered their ETW provider.
-  etw_match_any_keyword_ = g_callback_match_any_keyword;
+  // process that has registered their ETW provider.
+  etw_match_any_keyword_ = etw_provider_->keyword_any() & kCategoryKeywordMask;
   for (size_t i = 0; i < ARRAYSIZE(kFilteredEventGroupNames); i++) {
     if (etw_match_any_keyword_ & (1ULL << i)) {
       categories_status_[kFilteredEventGroupNames[i]] = true;
@@ -486,16 +469,14 @@
   }
 }
 
-// static
-void TraceEventETWExport::OnETWEnableUpdate() {
+void TraceEventETWExport::OnETWEnableUpdate(
+    TlmProvider::EventControlCode enabled) {
   // During construction, if tracing is already enabled, we'll get
   // a callback synchronously on the same thread. Calling GetInstance
   // in that case will hang since we're in the process of creating the
   // singleton.
   if (is_registration_complete_) {
-    auto* instance = GetInstance();
-    if (instance)
-      instance->UpdateEnabledCategories();
+    UpdateEnabledCategories();
   }
 }
 
diff --git a/base/trace_event/trace_event_etw_export_win.h b/base/trace_event/trace_event_etw_export_win.h
index f0f8070..f7d5481 100644
--- a/base/trace_event/trace_event_etw_export_win.h
+++ b/base/trace_event/trace_event_etw_export_win.h
@@ -50,6 +50,7 @@
                        const unsigned char* category_group_enabled,
                        const char* name,
                        unsigned long long id,
+                       TimeTicks timestamp,
                        const TraceArguments* args);
 
   // Exports an ETW event that marks the end of a complete event.
@@ -59,15 +60,15 @@
   // Returns true if any category in the group is enabled.
   static bool IsCategoryGroupEnabled(StringPiece category_group_name);
 
-  // Called from the ETW EnableCallback when the state of the provider or
-  // keywords has changed.
-  static void OnETWEnableUpdate();
-
  private:
   // Ensure only the provider can construct us.
   friend struct StaticMemorySingletonTraits<TraceEventETWExport>;
   TraceEventETWExport();
 
+  // Called from the ETW EnableCallback when the state of the provider or
+  // keywords has changed.
+  void OnETWEnableUpdate(TlmProvider::EventControlCode enabled);
+
   // Updates the list of enabled categories by consulting the ETW keyword.
   // Returns true if there was a change, false otherwise.
   bool UpdateEnabledCategories();
@@ -77,7 +78,7 @@
   // Returns true if the category is enabled.
   bool IsCategoryEnabled(StringPiece category_name) const;
 
-  static bool is_registration_complete_;
+  bool is_registration_complete_ = false;
 
   // The keywords that were enabled last time the callback was made.
   uint64_t etw_match_any_keyword_ = 0;
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index 87cf11a..dd3b5db 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -597,7 +597,7 @@
 struct TraceLog::RegisteredAsyncObserver {
   explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
       : observer(observer),
-        task_runner(SingleThreadTaskRunner::GetCurrentDefault()) {}
+        task_runner(SequencedTaskRunner::GetCurrentDefault()) {}
   ~RegisteredAsyncObserver() = default;
 
   WeakPtr<AsyncEnabledStateObserver> observer;
@@ -971,6 +971,9 @@
   g_perfetto_initialized_by_tracelog = true;
   perfetto::TracingInitArgs init_args;
   init_args.backends = perfetto::BackendType::kInProcessBackend;
+  init_args.shmem_batch_commits_duration_ms = 1000;
+  init_args.shmem_size_hint_kb = 4 * 1024;
+  init_args.shmem_direct_patching_enabled = true;
   init_args.disallow_merging_with_system_tracks = true;
   perfetto::Tracing::Initialize(init_args);
   TrackEvent::Register();
@@ -1561,6 +1564,7 @@
     const char* name,
     uint64_t id,
     PlatformThreadId thread_id,
+    const TimeTicks timestamp,
     TraceArguments* args) {
   if (!*category_group_enabled)
     return false;
@@ -1614,7 +1618,7 @@
     // ETW export expects non-null event names.
     name = name ? name : "";
     TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
-                                  args);
+                                  timestamp, args);
   }
 #endif  // BUILDFLAG(IS_WIN)
   return true;
@@ -1726,7 +1730,7 @@
     unsigned int flags) NO_THREAD_SAFETY_ANALYSIS {
   TraceEventHandle handle = {0, 0, 0};
   if (!ShouldAddAfterUpdatingState(phase, category_group_enabled, name, id,
-                                   thread_id, args)) {
+                                   thread_id, timestamp, args)) {
     return handle;
   }
   DCHECK(!timestamp.is_null());
diff --git a/base/trace_event/trace_log.h b/base/trace_event/trace_log.h
index f68955c..bfa4e08 100644
--- a/base/trace_event/trace_log.h
+++ b/base/trace_event/trace_log.h
@@ -158,9 +158,10 @@
 
   // Asynchronous enabled state listeners. When tracing is enabled or disabled,
   // for each observer, a task for invoking its appropriate callback is posted
-  // to the thread from which AddAsyncEnabledStateObserver() was called. This
-  // allows the observer to be safely destroyed, provided that it happens on the
-  // same thread that invoked AddAsyncEnabledStateObserver().
+  // to the `SequencedTaskRunner` from which AddAsyncEnabledStateObserver() was
+  // called. This allows the observer to be safely destroyed, provided that it
+  // happens on the same `SequencedTaskRunner` that invoked
+  // AddAsyncEnabledStateObserver().
   class BASE_EXPORT AsyncEnabledStateObserver {
    public:
     virtual ~AsyncEnabledStateObserver() = default;
@@ -279,6 +280,7 @@
                                    const char* name,
                                    uint64_t id,
                                    PlatformThreadId thread_id,
+                                   const TimeTicks timestamp,
                                    TraceArguments* args);
   TraceEventHandle AddTraceEvent(char phase,
                                  const unsigned char* category_group_enabled,
diff --git a/base/trace_event/trace_logging_minimal_win.cc b/base/trace_event/trace_logging_minimal_win.cc
index 5fb2a8c..d7284bc 100644
--- a/base/trace_event/trace_logging_minimal_win.cc
+++ b/base/trace_event/trace_logging_minimal_win.cc
@@ -10,16 +10,18 @@
 #include "base/logging.h"
 #include "base/numerics/checked_math.h"
 
+TlmProvider::TlmProvider() noexcept = default;
+
 TlmProvider::~TlmProvider() {
   Unregister();
 }
 
 TlmProvider::TlmProvider(const char* provider_name,
                          const GUID& provider_guid,
-                         PENABLECALLBACK enable_callback,
-                         void* enable_callback_context) noexcept {
-  ULONG status = Register(provider_name, provider_guid, enable_callback,
-                          enable_callback_context);
+                         base::RepeatingCallback<void(EventControlCode)>
+                             on_updated_callback) noexcept {
+  ULONG status =
+      Register(provider_name, provider_guid, std::move(on_updated_callback));
   LOG_IF(ERROR, status != ERROR_SUCCESS) << "Provider resistration failure";
 }
 
@@ -53,8 +55,8 @@
 
 ULONG TlmProvider::Register(const char* provider_name,
                             const GUID& provider_guid,
-                            PENABLECALLBACK enable_callback,
-                            void* enable_callback_context) noexcept {
+                            base::RepeatingCallback<void(EventControlCode)>
+                                on_updated_callback) noexcept {
   // Calling Register when already registered is a fatal error.
   CHECK_EQ(reg_handle_, 0ULL);
 
@@ -72,8 +74,7 @@
   // Fill in MetadataSize field at offset 0.
   *reinterpret_cast<uint16_t*>(provider_metadata_) = provider_metadata_size_;
 
-  enable_callback_ = enable_callback;
-  enable_callback_context_ = enable_callback_context;
+  on_updated_callback_ = std::move(on_updated_callback);
   ULONG status =
       EventRegister(&provider_guid, StaticEnableCallback, this, &reg_handle_);
   if (status != ERROR_SUCCESS)
@@ -112,23 +113,23 @@
   if (!callback_context)
     return;
 
-  TlmProvider* pProvider = static_cast<TlmProvider*>(callback_context);
+  TlmProvider* provider = static_cast<TlmProvider*>(callback_context);
   switch (is_enabled) {
     case EVENT_CONTROL_CODE_DISABLE_PROVIDER:
-      pProvider->level_plus1_ = 0;
+      provider->level_plus1_ = 0;
       break;
     case EVENT_CONTROL_CODE_ENABLE_PROVIDER:
-      pProvider->level_plus1_ =
+      provider->level_plus1_ =
           level != 0 ? static_cast<unsigned>(level) + 1u : 256u;
-      pProvider->keyword_any_ = match_any_keyword;
-      pProvider->keyword_all_ = match_all_keyword;
       break;
   }
+  provider->keyword_any_ = match_any_keyword;
+  provider->keyword_all_ = match_all_keyword;
 
-  if (pProvider->enable_callback_) {
-    pProvider->enable_callback_(source_id, is_enabled, level, match_any_keyword,
-                                match_all_keyword, filter_data,
-                                pProvider->enable_callback_context_);
+  if (provider->on_updated_callback_ &&
+      is_enabled <= static_cast<size_t>(EventControlCode::kHighest)) {
+    provider->on_updated_callback_.Run(
+        static_cast<EventControlCode>(is_enabled));
   }
 }
 
@@ -227,6 +228,18 @@
          ((keyword & keyword_any_) && (keyword & keyword_all_) == keyword_all_);
 }
 
+TlmInt64Field::TlmInt64Field(const char* name, const int64_t value) noexcept
+    : TlmFieldBase(name), value_(value) {
+  DCHECK_NE(Name(), nullptr);
+}
+int64_t TlmInt64Field::Value() const noexcept {
+  return value_;
+}
+void TlmInt64Field::FillEventDescriptor(
+    EVENT_DATA_DESCRIPTOR* descriptors) const noexcept {
+  EventDataDescCreate(&descriptors[0], (void*)&value_, sizeof(value_));
+}
+
 TlmMbcsStringField::TlmMbcsStringField(const char* name,
                                        const char* value) noexcept
     : TlmFieldBase(name), value_(value) {
diff --git a/base/trace_event/trace_logging_minimal_win.h b/base/trace_event/trace_logging_minimal_win.h
index 346e687..3a42a10 100644
--- a/base/trace_event/trace_logging_minimal_win.h
+++ b/base/trace_event/trace_logging_minimal_win.h
@@ -27,6 +27,7 @@
 #include <windows.h>
 // Evntprov.h must come after windows.h.
 #include <evntprov.h>
+#include <cstdint>
 // TODO([email protected]) Update headers and use defined constants instead
 // of magic numbers after crbug.com/1089996 is resolved.
 
@@ -107,23 +108,31 @@
  *     my_provider.Unregister();
  */
 
+#include "base/functional/callback.h"
 #include "base/memory/raw_ptr.h"
 
 class TlmProvider {
  public:
+  enum class EventControlCode {
+    kDisableProvider = 0,
+    kEnableProvider = 1,
+    kCaptureState = 2,
+    kHighest = kCaptureState
+  };
+
   // Initialize a provider in the unregistered state.
   // Note that WriteEvent and Unregister operations on an unregistered
   // provider are safe no-ops.
-  constexpr TlmProvider() noexcept = default;
+  TlmProvider() noexcept;
 
   // Initializes a provider and attempts to register it.
   // If there is an error, provider will be left unregistered.
   // Note that WriteEvent and Unregister operations on an unregistered
   // provider are safe no-ops.
-  TlmProvider(const char* provider_name,
-              const GUID& provider_guid,
-              PENABLECALLBACK enable_callback = nullptr,
-              void* enable_callback_context = nullptr) noexcept;
+  TlmProvider(
+      const char* provider_name,
+      const GUID& provider_guid,
+      base::RepeatingCallback<void(EventControlCode)> on_updated) noexcept;
 
   // If provider is registered, unregisters provider.
   ~TlmProvider();
@@ -145,10 +154,10 @@
   // Calling Register on an already-registered provider is a fatal error.
   // Not thread safe - caller must ensure serialization between calls to
   // Register() and calls to Unregister().
-  ULONG Register(const char* provider_name,
-                 const GUID& provider_guid,
-                 PENABLECALLBACK enable_callback = nullptr,
-                 void* enable_callback_context = nullptr) noexcept;
+  ULONG Register(
+      const char* provider_name,
+      const GUID& provider_guid,
+      base::RepeatingCallback<void(EventControlCode)> on_updated) noexcept;
 
   // Returns true if any active trace listeners are interested in any events
   // from this provider.
@@ -169,6 +178,8 @@
   // Equivalent to IsEnabled(event_descriptor.level, event_descriptor.keyword).
   bool IsEnabled(const EVENT_DESCRIPTOR& event_descriptor) const noexcept;
 
+  uint64_t keyword_any() const { return keyword_any_; }
+
   // If any active trace listeners are interested in events from this provider
   // with the specified level and keyword, packs the data into an event and
   // sends it to ETW. Returns Win32 error code or 0 for success.
@@ -282,8 +293,7 @@
   uint64_t keyword_any_ = 0;
   uint64_t keyword_all_ = 0;
   uint64_t reg_handle_ = 0;
-  PENABLECALLBACK enable_callback_ = nullptr;
-  raw_ptr<void> enable_callback_context_ = nullptr;
+  base::RepeatingCallback<void(EventControlCode)> on_updated_callback_;
   char provider_metadata_[kMaxProviderMetadataSize] = {};
 };
 
@@ -343,6 +353,21 @@
   const char* value_;
 };
 
+// Class that represents an event field containing a 64 bit signed integer.
+class TlmInt64Field
+    : public TlmFieldBase<1, 9>  // 1 data descriptor, Type = _TlgInINT64
+{
+ public:
+  // name is a utf-8 nul-terminated string.
+  // value is 64 bit signed integer
+  TlmInt64Field(const char* name, const int64_t value) noexcept;
+  int64_t Value() const noexcept;
+  void FillEventDescriptor(EVENT_DATA_DESCRIPTOR* descriptors) const noexcept;
+
+ private:
+  const int64_t value_;
+};
+
 // Helper for creating event descriptors for use with WriteEvent.
 constexpr EVENT_DESCRIPTOR TlmEventDescriptor(uint8_t level,
                                               uint64_t keyword) noexcept {
diff --git a/base/trace_event/traced_value_support.h b/base/trace_event/traced_value_support.h
index 6e9c106..b797eb3 100644
--- a/base/trace_event/traced_value_support.h
+++ b/base/trace_event/traced_value_support.h
@@ -215,14 +215,6 @@
 
 // base::StringPiece support.
 template <>
-struct TraceFormatTraits<::base::StringPiece> {
-  static void WriteIntoTrace(perfetto::TracedValue context,
-                             ::base::StringPiece value) {
-    return std::move(context).WriteString(value.data(), value.length());
-  }
-};
-
-template <>
 struct TraceFormatTraits<::base::StringPiece16> {
   static void WriteIntoTrace(perfetto::TracedValue context,
                              ::base::StringPiece16 value) {
diff --git a/base/trace_event/typed_macros_internal.cc b/base/trace_event/typed_macros_internal.cc
index 2fbda56..cf759d1 100644
--- a/base/trace_event/typed_macros_internal.cc
+++ b/base/trace_event/typed_macros_internal.cc
@@ -140,12 +140,6 @@
   auto phase_and_id_for_trace_log =
       GetPhaseAndIdForTraceLog(explicit_track, track_uuid, phase);
 
-  if (!trace_log->ShouldAddAfterUpdatingState(
-          phase_and_id_for_trace_log.first, category_group_enabled, name.value,
-          phase_and_id_for_trace_log.second, thread_id, nullptr)) {
-    return base::trace_event::TrackEventHandle();
-  }
-
   unsigned int flags = TRACE_EVENT_FLAG_NONE;
   if (ts.is_null()) {
     ts = TRACE_TIME_TICKS_NOW();
@@ -153,6 +147,12 @@
     flags |= TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP;
   }
 
+  if (!trace_log->ShouldAddAfterUpdatingState(
+          phase_and_id_for_trace_log.first, category_group_enabled, name.value,
+          phase_and_id_for_trace_log.second, thread_id, ts, nullptr)) {
+    return base::trace_event::TrackEventHandle();
+  }
+
   if (phase == TRACE_EVENT_PHASE_INSTANT && !explicit_track) {
     flags |= TRACE_EVENT_SCOPE_THREAD;
   }
diff --git a/base/trace_event/typed_macros_internal.h b/base/trace_event/typed_macros_internal.h
index a208dd7..a046404 100644
--- a/base/trace_event/typed_macros_internal.h
+++ b/base/trace_event/typed_macros_internal.h
@@ -147,8 +147,8 @@
 
 template <typename TrackType,
           typename... Args,
-          typename TrackTypeCheck = typename std::enable_if<
-              std::is_convertible<TrackType, perfetto::Track>::value>::type>
+          typename TrackTypeCheck = std::enable_if_t<
+              std::is_convertible_v<TrackType, perfetto::Track>>>
 inline void AddTypedTraceEvent(char phase,
                                const unsigned char* category_group_enabled,
                                perfetto::StaticString name,
@@ -162,8 +162,8 @@
 
 template <typename TrackType,
           typename... Args,
-          typename TrackTypeCheck = typename std::enable_if<
-              std::is_convertible<TrackType, perfetto::Track>::value>::type>
+          typename TrackTypeCheck = std::enable_if_t<
+              std::is_convertible_v<TrackType, perfetto::Track>>>
 inline void AddTypedTraceEvent(char phase,
                                const unsigned char* category_group_enabled,
                                perfetto::StaticString name,
diff --git a/base/tracing/BUILD.gn b/base/tracing/BUILD.gn
new file mode 100644
index 0000000..624cc28
--- /dev/null
+++ b/base/tracing/BUILD.gn
@@ -0,0 +1,81 @@
+# Copyright 2023 The Chromium Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/util/generate_wrapper.gni")
+import("//build_overrides/build.gni")
+
+if (enable_base_tracing) {
+  generate_wrapper("perfetto_diff_tests") {
+    testonly = true
+
+    _protos_path = "$root_gen_dir/third_party/perfetto/protos"
+    _trace_descriptor = "$_protos_path/perfetto/trace/trace.descriptor"
+    _rebased_trace_descriptor = rebase_path(_trace_descriptor, root_build_dir)
+    _test_extensions_descriptor =
+        "$_protos_path/perfetto/trace/test_extensions.descriptor"
+    _rebased_test_extensions_descriptor =
+        rebase_path(_test_extensions_descriptor, root_build_dir)
+    _metrics_descriptor = "$_protos_path/perfetto/metrics/metrics.descriptor"
+    _rebased_metrics_descriptor =
+        rebase_path(_metrics_descriptor, root_build_dir)
+    _all_chrome_metrics_descriptor =
+        "$_protos_path/perfetto/metrics/chrome/all_chrome_metrics.descriptor"
+    _rebased_all_chrome_metrics_descriptor =
+        rebase_path(_all_chrome_metrics_descriptor, root_build_dir)
+    _chrome_track_event_descriptor =
+        "$root_gen_dir/base/tracing/protos/chrome_track_event.descriptor"
+    _rebased_chrome_track_event_descriptor =
+        rebase_path(_chrome_track_event_descriptor, root_build_dir)
+
+    _perfetto_script =
+        rebase_path("//third_party/perfetto/tools/diff_test_trace_processor.py",
+                    root_build_dir)
+    _chrome_stdlib = rebase_path("//base/tracing/stdlib/chrome", root_build_dir)
+    _test_dir = rebase_path("//base/tracing", root_build_dir)
+
+    executable = "//base/tracing/test/run_perfetto_diff_tests.py"
+    wrapper_script = "$root_build_dir/bin/run_perfetto_diff_tests"
+
+    executable_args = [
+      "--trace-processor-shell",
+      "@WrappedPath(./trace_processor_shell)",
+      "--chrome-stdlib",
+      "@WrappedPath($_chrome_stdlib)",
+      "--test-dir",
+      "@WrappedPath($_test_dir)",
+      "--trace-descriptor",
+      "@WrappedPath($_rebased_trace_descriptor)",
+      "--test-extensions-descriptor",
+      "@WrappedPath($_rebased_test_extensions_descriptor)",
+      "--metrics-descriptor",
+      "@WrappedPath($_rebased_metrics_descriptor)",
+      "--all-chrome-metrics-descriptor",
+      "@WrappedPath($_rebased_all_chrome_metrics_descriptor)",
+      "--chrome-track-event-descriptor",
+      "@WrappedPath($_rebased_chrome_track_event_descriptor)",
+      "--script",
+      "@WrappedPath($_perfetto_script)",
+    ]
+
+    data_deps = [
+      "//base/tracing/protos:chrome_track_event",
+      "//third_party/perfetto/protos/perfetto/metrics:descriptor",
+      "//third_party/perfetto/protos/perfetto/metrics/chrome:descriptor",
+      "//third_party/perfetto/protos/perfetto/trace:descriptor",
+      "//third_party/perfetto/protos/perfetto/trace:test_extensions_descriptor",
+      "//third_party/perfetto/src/trace_processor:trace_processor_shell",
+    ]
+
+    data = [
+      "//third_party/perfetto/tools/diff_test_trace_processor.py",
+      "//third_party/perfetto/python/generators/diff_tests/",
+      "//base/tracing/",
+      _trace_descriptor,
+      _test_extensions_descriptor,
+      _metrics_descriptor,
+      _all_chrome_metrics_descriptor,
+      _chrome_track_event_descriptor,
+    ]
+  }
+}
diff --git a/base/tracing/PRESUBMIT.py b/base/tracing/PRESUBMIT.py
index 84a9c88..f8171e6 100644
--- a/base/tracing/PRESUBMIT.py
+++ b/base/tracing/PRESUBMIT.py
@@ -2,13 +2,14 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import subprocess
-
 PRESUBMIT_VERSION = '2.0.0'
 
 def CheckSqlModules(input_api, output_api):
   stdlib_dir = input_api.PresubmitLocalPath()
-  chromium_src_dir = input_api.os_path.join(stdlib_dir, '..', '..')
+  chromium_src_dir = input_api.os_path.abspath(
+    input_api.os_path.join(stdlib_dir, '..', '..'))
+  perfetto_src_dir = input_api.os_path.join(
+    chromium_src_dir, 'third_party', 'perfetto')
   tool = input_api.os_path.join(
     chromium_src_dir,
     'third_party', 'perfetto', 'tools', 'check_sql_modules.py')
@@ -16,10 +17,42 @@
     input_api.python3_executable,
     tool,
     '--stdlib-sources',
-    './stdlib/chrome'
+    input_api.os_path.join(
+    perfetto_src_dir,
+    '..', '..', 'base', 'tracing', 'stdlib', 'chrome')
     ]
-  if subprocess.call(cmd):
-    # TODO(b/283962174): Add presubmit failure when TP stdlib migration
-    # is complete.
+  test_cmd = input_api.Command(
+    name='check_sql_modules',
+    cmd=cmd,
+    kwargs={},
+    message=output_api.PresubmitNotifyResult)
+  return input_api.RunTests([test_cmd])
+
+_STDLIB_PATHS = (
+  r"^base/tracing/stdlib/",
+  r"^base/tracing/test/",
+  r"^base/tracing/protos/"
+)
+
+def CheckPerfettoTestsTag(input_api, output_api):
+  """Checks that commits to the trace processor chrome stdlib or the
+  Perfetto diff tests contain a PERFETTO_TESTS tag in their commit
+  message."""
+  def FileFilter(affected_file):
+    return input_api.FilterSourceFile(affected_file,
+                                      files_to_check=_STDLIB_PATHS)
+
+  # Only consider changes to chrome stdlib or tests paths
+  if not any (input_api.AffectedFiles(file_filter=FileFilter)):
     return []
-  return []
+
+  if input_api.change.PERFETTO_TESTS:
+    return []
+
+  message = (
+    'Must provide PERFETTO_TESTS='
+    '`autoninja -C out/Default perfetto_diff_tests && '
+    'out/Default/bin/run_perfetto_diff_tests` line in CL description.'
+    '\nPlease ensure the Perfetto diff tests pass before submitting.'
+  )
+  return [output_api.PresubmitNotifyResult(message)]
diff --git a/base/tracing/README.md b/base/tracing/README.md
index fac9f22..1231441 100644
--- a/base/tracing/README.md
+++ b/base/tracing/README.md
@@ -22,5 +22,5 @@
 [Typed trace events in
 //base](https://docs.google.com/document/d/1UQ4Ez7B-TeowijOUuMXuoWj1amZcQ7E2abt3s4jaAEY/edit#).
 
-Note: The integration with Perfetto's SDK is work-in-progress behind the gn flag
-"use_perfetto_client_library".
\ No newline at end of file
+Note: The integration with Perfetto's SDK is work-in-progress (https://crbug.com/1006541) behind
+the gn flag "use_perfetto_client_library".
diff --git a/base/tracing/protos/chrome_track_event.proto b/base/tracing/protos/chrome_track_event.proto
index 29aad1c..6642302 100644
--- a/base/tracing/protos/chrome_track_event.proto
+++ b/base/tracing/protos/chrome_track_event.proto
@@ -977,6 +977,7 @@
     BLOCKED_TAB_SWITCHER_MODE = 10;
     BLOCKED_COMPOSITOR_IN_MOTION = 11;
     BLOCKED_NTP_Y_TRANSLATION = 12;
+    BLOCKED_FULLSCREEN = 13;
   }
   enum AllowCaptureReason {
     ALLOWED_UNKNOWN = 0;
@@ -1327,6 +1328,26 @@
   optional uint32 frame_size_bytes = 5;
 };
 
+// Debug information for system layer of audio rendering on ChromeOS.
+message CrasUnified {
+  // The total duration of silence due to missing samples during the stream.
+  optional int64 underrun_duration_us = 1;
+  // Previous underrun duration, used for calculating the length of silence
+  // since the last callback.
+  optional int64 last_underrun_duration_us = 2;
+  // Difference in total underrun duration since the last callback. Logged only
+  // when positive, which is when a glitch occurs.
+  optional int64 underrun_glitch_duration_us = 3;
+  // OS playout latency reported by cras.
+  optional int64 latency_us = 4;
+  // The number of frames that the stream requests from the audio source.
+  optional int32 requested_frames = 5;
+  // The number of frames that the source provided.
+  optional uint32 filled_frames = 6;
+  // Sample rate of the stream.
+  optional int32 sample_rate = 7;
+};
+
 message ChromeUnguessableToken {
   optional uint64 low_token = 1;
   optional uint64 high_token = 2;
@@ -1356,6 +1377,10 @@
     STEP_SEND_BEGIN_MAIN_FRAME = 8;
     STEP_SUBMIT_COMPOSITOR_FRAME = 9;
     STEP_SURFACE_AGGREGATION = 10;
+    STEP_SEND_BUFFER_SWAP = 11;
+    STEP_BUFFER_SWAP_POST_SUBMIT = 12;
+    STEP_FINISH_BUFFER_SWAP = 13;
+    STEP_SWAP_BUFFERS_ACK = 14;
   }
   enum FrameSkippedReason {
     SKIPPED_REASON_UNKNOWN = 0;
@@ -1372,9 +1397,66 @@
   optional FrameSkippedReason frame_skipped_reason = 6;
 };
 
+message LibunwindstackUnwinder {
+  // The enum is a copy of ErrorCode enum inside third_party/libunwindstack,
+  // ideally this should be in sync with that.
+  enum ErrorCode {
+    ERROR_NONE = 0;            // No error.
+    ERROR_MEMORY_INVALID = 1;  // Memory read failed.
+    ERROR_UNWIND_INFO = 2;     // Unable to use unwind information to unwind.
+    ERROR_UNSUPPORTED = 3;     // Encountered unsupported feature.
+    ERROR_INVALID_MAP = 4;     // Unwind in an invalid map.
+    ERROR_MAX_FRAMES_EXCEEDED = 5;  // The number of frames exceed the total
+                                    // allowed.
+    ERROR_REPEATED_FRAME = 6;  // The last frame has the same pc/sp as the next.
+    ERROR_INVALID_ELF = 7;     // Unwind in an invalid elf.
+    ERROR_THREAD_DOES_NOT_EXIST = 8;  // Attempt to unwind a local thread that
+                                      // does not exist.
+    ERROR_THREAD_TIMEOUT = 9;  // Timeout trying to unwind a local thread.
+    ERROR_SYSTEM_CALL = 10;    // System call failed while unwinding.
+    ERROR_BAD_ARCH = 11;       // Arch invalid (none, or mismatched).
+    ERROR_MAPS_PARSE = 12;     // Failed to parse maps data.
+    ERROR_INVALID_PARAMETER_LIBUNWINDSTACK =
+        13;                  // Invalid parameter passed to function.
+    ERROR_PTRACE_CALL = 14;  // Ptrace call failed while unwinding.
+  }
+  optional ErrorCode error_code = 1;
+  optional int32 num_frames = 2;
+};
+
+message ScrollPredictorMetrics {
+  message EventFrameValue {
+    optional int64 event_trace_id = 1;
+    // The fractional pixels (can be fractional after the predictor adjusts in
+    // resampling of input) that the page was scrolled by this frame.
+    optional float delta_value_pixels = 2;
+  };
+  // Data from the previous, current, and next frame used to determine the
+  // values below as according to the metric doc:
+  // http://doc/1Y0u0Tq5eUZff75nYUzQVw6JxmbZAW9m64pJidmnGWsY.
+  optional EventFrameValue prev_event_frame_value = 1;
+  optional EventFrameValue cur_event_frame_value = 2;
+  optional EventFrameValue next_event_frame_value = 3;
+  // This is the amount of delta processed in this frame that was above the
+  // janky threshold (as defined by
+  // http://doc/1Y0u0Tq5eUZff75nYUzQVw6JxmbZAW9m64pJidmnGWsY)
+  optional float janky_value_pixels = 4;
+  // True if we are also missing frames (so multiple frames are being presented
+  // at once).
+  optional bool has_missed_vsyncs = 5;
+  // True if we're moving less than the slow scroll threshold as defined by the
+  // doc above.
+  optional bool is_slow_scroll = 6;
+};
+
+message PageLoad {
+  optional int64 navigation_id = 1;
+  optional string url = 2;
+}
+
 message ChromeTrackEvent {
   // Extension range for Chrome: 1000-1999
-  // Next ID: 1053
+  // Next ID: 1057
   extend TrackEvent {
     optional ChromeAppState chrome_app_state = 1000;
 
@@ -1486,5 +1568,13 @@
     optional LinuxPulseOutput linux_pulse_output = 1051;
 
     optional ChromeGraphicsPipeline chrome_graphics_pipeline = 1052;
+
+    optional CrasUnified chromeos_cras_unified = 1053;
+
+    optional LibunwindstackUnwinder libunwindstack_unwinder = 1054;
+
+    optional ScrollPredictorMetrics scroll_predictor_metrics = 1055;
+
+    optional PageLoad page_load = 1056;
   }
 }
diff --git a/base/tracing/stdlib/chrome/BUILD.gn b/base/tracing/stdlib/chrome/BUILD.gn
new file mode 100644
index 0000000..7e54d5b
--- /dev/null
+++ b/base/tracing/stdlib/chrome/BUILD.gn
@@ -0,0 +1,15 @@
+# Copyright 2023 The Chromium Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# List the SQL files in the perfetto_sql_files.gni so the list can be
+# used in Chromium targets as well.
+import("perfetto_sql_files.gni")
+
+# This file is rolled to Perfetto so this relative path is intended to
+# work in the Perfetto repository but does not make sense here.
+import("../../../../../gn/perfetto_sql.gni")
+perfetto_sql_source_set("chrome_sql") {
+  deps = [ "scroll_jank" ]
+  sources = chrome_stdlib_sql_files
+}
diff --git a/base/tracing/stdlib/chrome/README b/base/tracing/stdlib/chrome/README
new file mode 100644
index 0000000..1d70202
--- /dev/null
+++ b/base/tracing/stdlib/chrome/README
@@ -0,0 +1,5 @@
+# PerfettoSQL Chrome Standard Library
+
+The [PerfettoSQL Standard Library](https://perfetto.dev/docs/analysis/stdlib-docs) contains commonly used SQL tables, views, functions and macros to make it easier for users to query traces. The Chrome Standard Library contains those metrics that are specific to Chrome.
+
+The source of truth of the Perfetto SQL Chrome stdlib has been moved from Perfetto to the Chromium repository to make it easier to develop new metrics and add tests for them in a single Chromium CL.
\ No newline at end of file
diff --git a/base/tracing/stdlib/chrome/chrome_scroll_janks.sql b/base/tracing/stdlib/chrome/chrome_scroll_janks.sql
deleted file mode 100644
index e53e8d9..0000000
--- a/base/tracing/stdlib/chrome/chrome_scroll_janks.sql
+++ /dev/null
@@ -1,109 +0,0 @@
--- Copyright 2023 The Chromium Authors
--- Use of this source code is governed by a BSD-style license that can be
--- found in the LICENSE file.
-
--- TODO(b/286187288): Move this dependency to stdlib.
-SELECT RUN_METRIC('chrome/chrome_scroll_jank_v3.sql');
-SELECT IMPORT('common.slices');
-
--- Selects EventLatency slices that correspond with janks in a scroll. This is
--- based on the V3 version of scroll jank metrics.
---
--- @column id INT                     The slice id.
--- @column ts INT                     The start timestamp of the slice.
--- @column dur INT                    The duration of the slice.
--- @column track_id INT               The track_id for the slice.
--- @column name STRING                The name of the slice (EventLatency).
--- @column cause_of_jank STRING       The stage of EventLatency that the caused
---                                    the jank.
--- @column sub_cause_of_jank STRING   The stage of cause_of_jank that caused the
---                                    jank.
--- @column delayed_frame_count INT    How many vsyncs this frame missed its
---                                    deadline by.
--- @column frame_jank_ts INT          The start timestamp where frame
---                                    frame presentation was delayed.
--- @column frame_jank_dur INT         The duration in ms of the delay in frame
---                                    presentation.
-CREATE TABLE chrome_janky_event_latencies_v3 AS
-SELECT
-    s.id,
-    s.ts,
-    s.dur,
-    s.track_id,
-    s.name,
-    e.cause_of_jank,
-    e.sub_cause_of_jank,
-    CAST((e.delay_since_last_frame/e.vsync_interval) AS INT) AS delayed_frame_count,
-    CAST(s.ts + s.dur - ((e.delay_since_last_frame - e.vsync_interval) * 1e6) AS INT) AS frame_jank_ts,
-    CAST((e.delay_since_last_frame - e.vsync_interval) * 1e6 AS INT) AS frame_jank_dur
-FROM slice s
-JOIN chrome_janky_frames e
-  ON s.id = e. event_latency_id;
-
--- Frame presentation interval is the delta between when the frame was supposed
--- to be presented and when it was actually presented.
---
--- @column id INT                     Unique id.
--- @column ts INT                     The start timestamp of the slice.
--- @column dur INT                    The duration of the slice.
--- @column delayed_frame_count INT    How many vsyncs this frame missed its
---                                    deadline by.
--- @column event_latency_id STRING    The id of the associated event latency in
---                                    the slice table.
-CREATE VIEW chrome_janky_frame_presentation_intervals AS
-SELECT
-    ROW_NUMBER() OVER(ORDER BY frame_jank_ts) AS id,
-    frame_jank_ts AS ts,
-    frame_jank_dur AS dur,
-    delayed_frame_count,
-    id AS event_latency_id
-FROM chrome_janky_event_latencies_v3;
-
--- Defines slices for all of janky scrolling intervals in a trace.
---
--- @column id            The unique identifier of the janky interval.
--- @column ts            The start timestamp of the janky interval.
--- @column dur           The duration of the janky interval.
-CREATE TABLE chrome_scroll_jank_intervals_v3 AS
--- Sub-table to retrieve all janky slice timestamps. Ordering calculations are
--- based on timestamps rather than durations.
-WITH janky_latencies AS (
-  SELECT
-    s.frame_jank_ts AS start_ts,
-    s.frame_jank_ts + s.frame_jank_dur AS end_ts
-  FROM chrome_janky_event_latencies_v3 s),
--- Determine the local maximum timestamp for janks thus far; this will allow
--- us to coalesce all earlier events up to the maximum.
-ordered_jank_end_ts AS (
-  SELECT
-    *,
-    MAX(end_ts) OVER (
-      ORDER BY start_ts ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
-      AS max_end_ts_so_far
-  FROM janky_latencies),
--- Determine the local minimum timestamp for janks thus far; this will allow
--- us to coalesce all later events up to the nearest local maximum.
-range_starts AS (
-  SELECT
-    *,
-    CASE
-      -- This is a two-pass calculation to calculate the first event in the
-      -- group. An event is considered the first event in a group if all events
-      -- which started before it also finished the current one started.
-      WHEN start_ts <= 1 + LAG(max_end_ts_so_far) OVER (ORDER BY start_ts) THEN 0
-      ELSE 1
-    END AS range_start
-  FROM ordered_jank_end_ts),
--- Assign an id to allow coalescing of individual slices.
-range_groups AS (
-  SELECT
-    *,
-    SUM(range_start) OVER (ORDER BY start_ts) AS range_group
-  FROM range_starts)
--- Coalesce all slices within an interval.
-SELECT
-  range_group AS id,
-  MIN(start_ts) AS ts,
-  MAX(end_ts) - MIN(start_ts) AS dur
-FROM range_groups
-GROUP BY range_group;
diff --git a/base/tracing/stdlib/chrome/chrome_scrolls.sql b/base/tracing/stdlib/chrome/chrome_scrolls.sql
index 5c01a20..b40aed3 100644
--- a/base/tracing/stdlib/chrome/chrome_scrolls.sql
+++ b/base/tracing/stdlib/chrome/chrome_scrolls.sql
@@ -5,9 +5,15 @@
 -- Defines slices for all of the individual scrolls in a trace based on the
 -- LatencyInfo-based scroll definition.
 --
--- @column id            The unique identifier of the scroll.
--- @column ts            The start timestamp of the scroll.
--- @column dur           The duration of the scroll.
+-- @column id                          The unique identifier of the scroll.
+-- @column ts                          The start timestamp of the scroll.
+-- @column dur                         The duration of the scroll.
+-- @column gesture_scroll_begin_ts     The earliest timestamp of the
+--                                     InputLatency::GestureScrollBegin for the
+--                                     corresponding scroll id.
+-- @column gesture_scroll_end_ts       The earliest timestamp of the
+--                                     InputLatency::GestureScrollEnd for the
+--                                     corresponding scroll id.
 --
 -- NOTE: this view of top level scrolls is based on the LatencyInfo definition
 -- of a scroll, which differs subtly from the definition based on
@@ -16,7 +22,7 @@
 -- WebView instances. Currently gesture_scroll_id unique within an instance, but
 -- is not unique across multiple instances. Switching to an EventLatency based
 -- definition of scrolls should resolve this.
-CREATE TABLE chrome_scrolls AS
+CREATE PERFETTO TABLE chrome_scrolls AS
 WITH all_scrolls AS (
   SELECT
     name,
@@ -30,14 +36,14 @@
 scroll_starts AS (
   SELECT
     scroll_id,
-    MIN(ts) AS scroll_start_ts
+    MIN(ts) AS gesture_scroll_begin_ts
   FROM all_scrolls
   WHERE name = 'InputLatency::GestureScrollBegin'
   GROUP BY scroll_id
 ), scroll_ends AS (
   SELECT
     scroll_id,
-    MIN(ts) AS scroll_end_ts
+    MIN(ts) AS gesture_scroll_end_ts
   FROM all_scrolls
   WHERE name = 'InputLatency::GestureScrollEnd'
   GROUP BY scroll_id
@@ -46,8 +52,8 @@
   sa.scroll_id AS id,
   MIN(ts) AS ts,
   CAST(MAX(ts + dur) - MIN(ts) AS INT) AS dur,
-  ss.scroll_start_ts AS scroll_start_ts,
-  se.scroll_end_ts AS scroll_end_ts
+  ss.gesture_scroll_begin_ts AS gesture_scroll_begin_ts,
+  se.gesture_scroll_end_ts AS gesture_scroll_end_ts
 FROM all_scrolls sa
   LEFT JOIN scroll_starts ss ON
     sa.scroll_id = ss.scroll_id
diff --git a/base/tracing/stdlib/chrome/cpu_powerups.sql b/base/tracing/stdlib/chrome/cpu_powerups.sql
index 46183f3..f21ad51 100644
--- a/base/tracing/stdlib/chrome/cpu_powerups.sql
+++ b/base/tracing/stdlib/chrome/cpu_powerups.sql
@@ -100,7 +100,7 @@
 -- @column sched_id    Id for the sched_slice table.
 -- @column utid        Unique id for the thread that ran within the slice.
 -- @column previous_power_state   The CPU's power state before this slice.
-CREATE TABLE chrome_cpu_power_first_sched_slice_after_powerup AS
+CREATE PERFETTO TABLE chrome_cpu_power_first_sched_slice_after_powerup AS
   SELECT
     ts,
     dur,
diff --git a/base/tracing/stdlib/chrome/metadata.sql b/base/tracing/stdlib/chrome/metadata.sql
index 0f40530..a8aa202 100644
--- a/base/tracing/stdlib/chrome/metadata.sql
+++ b/base/tracing/stdlib/chrome/metadata.sql
@@ -5,7 +5,7 @@
 -- Returns hardware class of the device, often use to find device brand
 -- and model.
 -- @ret STRING Hardware class name.
-CREATE PERFETTO FUNCTION CHROME_HARDWARE_CLASS()
+CREATE PERFETTO FUNCTION chrome_hardware_class()
 RETURNS STRING AS
 SELECT
   str_value
diff --git a/base/tracing/stdlib/chrome/perfetto_sql_files.gni b/base/tracing/stdlib/chrome/perfetto_sql_files.gni
new file mode 100644
index 0000000..bebd821
--- /dev/null
+++ b/base/tracing/stdlib/chrome/perfetto_sql_files.gni
@@ -0,0 +1,19 @@
+# Copyright 2023 The Chromium Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# List of files in //base/tracing/stdlib/chrome and subdirectories.
+chrome_stdlib_sql_files = [
+  "chrome_scrolls.sql",
+  "cpu_powerups.sql",
+  "histograms.sql",
+  "metadata.sql",
+  "speedometer.sql",
+  "tasks.sql",
+  "vsync_intervals.sql",
+  "scroll_jank/scroll_jank_intervals.sql",
+  "scroll_jank/scroll_jank_v3_cause.sql",
+  "scroll_jank/scroll_jank_v3.sql",
+  "scroll_jank/scroll_offsets.sql",
+  "scroll_jank/utils.sql",
+]
diff --git a/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_intervals.sql b/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_intervals.sql
new file mode 100644
index 0000000..1a44f73
--- /dev/null
+++ b/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_intervals.sql
@@ -0,0 +1,160 @@
+-- Copyright 2023 The Chromium Authors
+-- Use of this source code is governed by a BSD-style license that can be
+-- found in the LICENSE file.
+
+INCLUDE PERFETTO MODULE chrome.chrome_scrolls;
+INCLUDE PERFETTO MODULE chrome.scroll_jank.scroll_jank_v3;
+INCLUDE PERFETTO MODULE common.slices;
+
+-- Selects EventLatency slices that correspond with janks in a scroll. This is
+-- based on the V3 version of scroll jank metrics.
+--
+-- @column id INT                     The slice id.
+-- @column ts INT                     The start timestamp of the slice.
+-- @column dur INT                    The duration of the slice.
+-- @column track_id INT               The track_id for the slice.
+-- @column name STRING                The name of the slice (EventLatency).
+-- @column cause_of_jank STRING       The stage of EventLatency that the caused
+--                                    the jank.
+-- @column sub_cause_of_jank STRING   The stage of cause_of_jank that caused the
+--                                    jank.
+-- @column delayed_frame_count INT    How many vsyncs this frame missed its
+--                                    deadline by.
+-- @column frame_jank_ts INT          The start timestamp where frame
+--                                    frame presentation was delayed.
+-- @column frame_jank_dur INT         The duration in ms of the delay in frame
+--                                    presentation.
+CREATE PERFETTO TABLE chrome_janky_event_latencies_v3 AS
+SELECT
+  s.id,
+  s.ts,
+  s.dur,
+  s.track_id,
+  s.name,
+  e.cause_of_jank,
+  e.sub_cause_of_jank,
+  CAST((e.delay_since_last_frame/e.vsync_interval) - 1 AS INT) AS delayed_frame_count,
+  CAST(s.ts + s.dur - ((e.delay_since_last_frame - e.vsync_interval) * 1e6) AS INT) AS frame_jank_ts,
+  CAST((e.delay_since_last_frame - e.vsync_interval) * 1e6 AS INT) AS frame_jank_dur
+FROM slice s
+JOIN chrome_janky_frames e
+  ON s.id = e. event_latency_id;
+
+-- Frame presentation interval is the delta between when the frame was supposed
+-- to be presented and when it was actually presented.
+--
+-- @column id INT                     Unique id.
+-- @column ts INT                     The start timestamp of the slice.
+-- @column dur INT                    The duration of the slice.
+-- @column delayed_frame_count INT    How many vsyncs this frame missed its
+--                                    deadline by.
+-- @column cause_of_jank STRING       The stage of EventLatency that the caused
+--                                    the jank.
+-- @column sub_cause_of_jank STRING   The stage of cause_of_jank that caused the
+--                                    jank.
+-- @column event_latency_id STRING    The id of the associated event latency in
+--                                    the slice table.
+CREATE VIEW chrome_janky_frame_presentation_intervals AS
+SELECT
+  ROW_NUMBER() OVER(ORDER BY frame_jank_ts) AS id,
+  frame_jank_ts AS ts,
+  frame_jank_dur AS dur,
+  delayed_frame_count,
+  cause_of_jank,
+  sub_cause_of_jank,
+  id AS event_latency_id
+FROM chrome_janky_event_latencies_v3;
+
+-- Scroll jank frame presentation stats for individual scrolls.
+--
+-- @column scroll_id INT              Id of the individual scroll.
+-- @column missed_vsyncs INT          The number of missed vsyncs in the scroll.
+-- @column frame_count INT            The number of frames in the scroll.
+-- @column presented_frame_count INT  The number presented frames in the scroll.
+-- @column janky_frame_count INT      The number of janky frames in the scroll.
+-- @column janky_frame_percent FLOAT  The % of frames that janked in the scroll.
+CREATE VIEW chrome_scroll_stats AS
+WITH vsyncs AS (
+  SELECT
+    COUNT() AS presented_vsync_count,
+    scroll.id AS scroll_id
+  FROM chrome_unique_frame_presentation_ts frame
+  JOIN chrome_scrolls scroll
+    ON frame.presentation_timestamp >= scroll.ts
+    AND frame.presentation_timestamp <= scroll.ts + scroll.dur
+  GROUP BY scroll_id),
+missed_vsyncs AS (
+  SELECT
+    CAST(SUM((delay_since_last_frame / vsync_interval) - 1) AS INT)  AS total_missed_vsyncs,
+    scroll_id
+  FROM chrome_janky_frames
+  GROUP BY scroll_id),
+frame_stats AS (
+  SELECT
+    scroll_id,
+    num_frames AS presented_frame_count,
+    IFNULL(num_janky_frames, 0) AS janky_frame_count,
+    ROUND(IFNULL(scroll_jank_percentage, 0), 2) AS janky_frame_percent
+  FROM chrome_frames_per_scroll
+)
+SELECT
+  vsyncs.scroll_id,
+  presented_vsync_count + IFNULL(total_missed_vsyncs, 0) AS frame_count,
+  total_missed_vsyncs AS missed_vsyncs,
+  presented_frame_count,
+  janky_frame_count,
+  janky_frame_percent
+FROM vsyncs
+LEFT JOIN missed_vsyncs
+  USING (scroll_id)
+LEFT JOIN frame_stats
+  USING (scroll_id);
+
+-- Defines slices for all of janky scrolling intervals in a trace.
+--
+-- @column id            The unique identifier of the janky interval.
+-- @column ts            The start timestamp of the janky interval.
+-- @column dur           The duration of the janky interval.
+CREATE PERFETTO TABLE chrome_scroll_jank_intervals_v3 AS
+-- Sub-table to retrieve all janky slice timestamps. Ordering calculations are
+-- based on timestamps rather than durations.
+WITH janky_latencies AS (
+  SELECT
+    s.frame_jank_ts AS start_ts,
+    s.frame_jank_ts + s.frame_jank_dur AS end_ts
+  FROM chrome_janky_event_latencies_v3 s),
+-- Determine the local maximum timestamp for janks thus far; this will allow
+-- us to coalesce all earlier events up to the maximum.
+ordered_jank_end_ts AS (
+  SELECT
+    *,
+    MAX(end_ts) OVER (
+      ORDER BY start_ts ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
+      AS max_end_ts_so_far
+  FROM janky_latencies),
+-- Determine the local minimum timestamp for janks thus far; this will allow
+-- us to coalesce all later events up to the nearest local maximum.
+range_starts AS (
+  SELECT
+    *,
+    CASE
+      -- This is a two-pass calculation to calculate the first event in the
+      -- group. An event is considered the first event in a group if all events
+      -- which started before it also finished the current one started.
+      WHEN start_ts <= 1 + LAG(max_end_ts_so_far) OVER (ORDER BY start_ts) THEN 0
+      ELSE 1
+    END AS range_start
+  FROM ordered_jank_end_ts),
+-- Assign an id to allow coalescing of individual slices.
+range_groups AS (
+  SELECT
+    *,
+    SUM(range_start) OVER (ORDER BY start_ts) AS range_group
+  FROM range_starts)
+-- Coalesce all slices within an interval.
+SELECT
+  range_group AS id,
+  MIN(start_ts) AS ts,
+  MAX(end_ts) - MIN(start_ts) AS dur
+FROM range_groups
+GROUP BY range_group;
diff --git a/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_v3.sql b/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_v3.sql
new file mode 100644
index 0000000..4116e63
--- /dev/null
+++ b/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_v3.sql
@@ -0,0 +1,446 @@
+-- Copyright 2023 The Chromium Authors
+-- Use of this source code is governed by a BSD-style license that can be
+-- found in the LICENSE file.
+
+INCLUDE PERFETTO MODULE common.slices;
+
+-- Hardware info is useful when using sql metrics for analysis
+-- in BTP.
+INCLUDE PERFETTO MODULE chrome.metadata;
+INCLUDE PERFETTO MODULE chrome.scroll_jank.scroll_jank_v3_cause;
+
+-- Grabs all gesture updates with respective scroll ids and start/end
+-- timestamps, regardless of being coalesced.
+--
+-- @column ts                       The start timestamp of the scroll.
+-- @column dur                      The duration of the scroll.
+-- @column id                       Slice id for the scroll.
+-- @column scroll_update_id         The id of the scroll update event.
+-- @column scroll_id                The id of the scroll.
+-- @column is_coalesced             Whether this input event was coalesced.
+CREATE PERFETTO TABLE chrome_gesture_scroll_updates AS
+SELECT
+  ts,
+  dur,
+  id,
+  -- TODO(b/250089570) Add trace_id to EventLatency and update this script to use it.
+  EXTRACT_ARG(arg_set_id, 'chrome_latency_info.trace_id') AS scroll_update_id,
+  EXTRACT_ARG(arg_set_id, 'chrome_latency_info.gesture_scroll_id') AS scroll_id,
+  EXTRACT_ARG(arg_set_id, 'chrome_latency_info.is_coalesced') AS is_coalesced
+FROM slice
+WHERE name = "InputLatency::GestureScrollUpdate" AND dur != -1;
+
+CREATE PERFETTO TABLE internal_non_coalesced_gesture_scrolls AS
+SELECT
+  id,
+  ts,
+  dur,
+  scroll_update_id,
+  scroll_id
+FROM  chrome_gesture_scroll_updates
+WHERE is_coalesced = false
+ORDER BY ts ASC;
+
+-- Scroll updates, corresponding to all input events that were converted to a
+-- presented scroll update.
+--
+-- @column id                       Minimum slice id for input presented in this
+--                                  frame, the non coalesced input.
+-- @column ts                       The start timestamp for producing the frame.
+-- @column dur                      The duration between producing and
+--                                  presenting the frame.
+-- @column last_coalesced_input_ts  The timestamp of the last input that arrived
+--                                  and got coalesced into the frame.
+-- @column scroll_update_id         The id of the scroll update event, a unique
+--                                  identifier to the gesture.
+-- @column scroll_id                The id of the ongoing scroll.
+CREATE PERFETTO TABLE chrome_presented_gesture_scrolls AS
+WITH
+scroll_updates_with_coalesce_info as MATERIALIZED (
+  SELECT
+    id,
+    ts,
+    -- For each scroll update, find the latest non-coalesced update which
+    -- happened before it. For coalesced scroll updates, this will be the
+    -- presented scroll update they have been coalesced into.
+    (
+      SELECT id
+      FROM internal_non_coalesced_gesture_scrolls non_coalesced
+      WHERE non_coalesced.ts <= scroll_update.ts
+      ORDER BY ts DESC
+      LIMIT 1
+     ) as coalesced_to_scroll_update_slice_id
+  FROM chrome_gesture_scroll_updates scroll_update
+  ORDER BY coalesced_to_scroll_update_slice_id, ts
+)
+SELECT
+  id,
+  ts,
+  dur,
+  -- Find the latest input that was coalesced into this scroll update.
+  (
+    SELECT coalesce_info.ts
+    FROM scroll_updates_with_coalesce_info coalesce_info
+    WHERE
+      coalesce_info.coalesced_to_scroll_update_slice_id =
+        internal_non_coalesced_gesture_scrolls.id
+    ORDER BY ts DESC
+    LIMIT 1
+  ) as last_coalesced_input_ts,
+  scroll_update_id,
+  scroll_id
+FROM internal_non_coalesced_gesture_scrolls;
+
+-- Associate every trace_id with it's perceived delta_y on the screen after
+-- prediction.
+--
+-- @column scroll_update_id         The id of the scroll update event.
+-- @column delta_y                  The perceived delta_y on the screen post
+--                                  prediction.
+CREATE PERFETTO TABLE chrome_scroll_updates_with_deltas AS
+SELECT
+  EXTRACT_ARG(arg_set_id, 'scroll_deltas.trace_id') AS scroll_update_id,
+  EXTRACT_ARG(arg_set_id, 'scroll_deltas.provided_to_compositor_delta_y') AS delta_y
+FROM slice
+WHERE name = "InputHandlerProxy::HandleGestureScrollUpdate_Result";
+
+-- Extract event latency timestamps, to later use it for joining
+-- with gesture scroll updates, as event latencies don't have trace
+-- ids associated with it.
+--
+-- @column ts                           Start timestamp for the EventLatency.
+-- @column event_latency_id             Slice id of the EventLatency.
+-- @column dur                          Duration of the EventLatency.
+-- @column input_latency_end_ts         End timestamp for input aka the
+--                                      timestamp of the LatchToSwapEnd
+--                                      substage.
+-- @column presentation_timestamp       Frame presentation timestamp aka the
+--                                      timestamp of the
+--                                      SwapEndToPresentationCompositorFrame
+--                                      substage.
+-- @column event_type                   EventLatency event type.
+CREATE PERFETTO TABLE chrome_gesture_scroll_event_latencies AS
+SELECT
+  slice.ts,
+  slice.id AS event_latency_id,
+  slice.dur AS dur,
+  descendant_slice_end(slice.id, "LatchToSwapEnd") AS input_latency_end_ts,
+  descendant_slice_end(slice.id, "SwapEndToPresentationCompositorFrame") AS presentation_timestamp,
+  EXTRACT_ARG(arg_set_id, 'event_latency.event_type') AS event_type
+FROM slice
+WHERE name = "EventLatency"
+      AND event_type in (
+          "GESTURE_SCROLL_UPDATE",
+          "FIRST_GESTURE_SCROLL_UPDATE",
+          "INERTIAL_GESTURE_SCROLL_UPDATE")
+      AND has_descendant_slice_with_name(slice.id, "SwapEndToPresentationCompositorFrame");
+
+-- Join presented gesture scrolls with their respective event
+-- latencies based on |LatchToSwapEnd| timestamp, as it's the
+-- end timestamp for both the gesture scroll update slice and
+-- the LatchToSwapEnd slice.
+--
+-- @column id                           ID of the frame.
+-- @column ts                           Start timestamp of the frame.
+-- @column last_coalesced_input_ts      The timestamp of the last coalesced
+--                                      input.
+-- @column scroll_id                    ID of the associated scroll.
+-- @column scroll_update_id             ID of the associated scroll update.
+-- @column event_latency_id             ID of the associated EventLatency.
+-- @column dur                          Duration of the associated EventLatency.
+-- @column presentation_timestamp       Frame presentation timestamp.
+CREATE PERFETTO TABLE chrome_full_frame_view AS
+SELECT
+  frames.id,
+  frames.ts,
+  frames.last_coalesced_input_ts,
+  frames.scroll_id,
+  frames.scroll_update_id,
+  events.event_latency_id,
+  events.dur,
+  events.presentation_timestamp
+FROM chrome_presented_gesture_scrolls frames
+JOIN chrome_gesture_scroll_event_latencies events
+  ON frames.ts = events.ts
+  AND events.input_latency_end_ts = (frames.ts + frames.dur);
+
+-- Join deltas with EventLatency data.
+--
+-- @column id                           ID of the frame.
+-- @column ts                           Start timestamp of the frame.
+-- @column scroll_id                    ID of the associated scroll.
+-- @column scroll_update_id             ID of the associated scroll update.
+-- @column last_coalesced_input_ts      The timestamp of the last coalesced
+--                                      input.
+-- @column delta_y                      The perceived delta_y on the screen post
+-- --                                   prediction.
+-- @column event_latency_id             ID of the associated EventLatency.
+-- @column dur                          Duration of the associated EventLatency.
+-- @column presentation_timestamp       Frame presentation timestamp.
+CREATE PERFETTO TABLE chrome_full_frame_delta_view AS
+SELECT
+  frames.id,
+  frames.ts,
+  frames.scroll_id,
+  frames.scroll_update_id,
+  frames.last_coalesced_input_ts,
+  deltas.delta_y,
+  frames.event_latency_id,
+  frames.dur,
+  frames.presentation_timestamp
+FROM chrome_full_frame_view frames
+LEFT JOIN chrome_scroll_updates_with_deltas deltas
+  ON deltas.scroll_update_id = frames.scroll_update_id;
+
+-- Group all gestures presented at the same timestamp together in
+-- a single row.
+--
+-- @column id                           ID of the frame.
+-- @column max_start_ts                 The timestamp of the last coalesced
+--                                      input.
+-- @column min_start_ts                 The earliest frame start timestamp.
+-- @column scroll_id                    ID of the associated scroll.
+-- @column scroll_update_id             ID of the associated scroll update.
+-- @column encapsulated_scroll_ids      All scroll updates associated with the
+--                                      frame presentation timestamp.
+-- @column total_delta                  Sum of all perceived delta_y values at
+--                                      the frame presentation timestamp.
+-- @column segregated_delta_y           Lists all of the perceived delta_y
+--                                      values at the frame presentation
+--                                      timestamp.
+-- @column event_latency_id             ID of the associated EventLatency.
+-- @column dur                          Maximum duration of the associated
+--                                      EventLatency.
+-- @column presentation_timestamp       Frame presentation timestamp.
+CREATE VIEW chrome_merged_frame_view AS
+SELECT
+  id,
+  MAX(last_coalesced_input_ts) AS max_start_ts,
+  MIN(ts) AS min_start_ts,
+  scroll_id,
+  scroll_update_id,
+  GROUP_CONCAT(scroll_update_id,',') AS encapsulated_scroll_ids,
+  SUM(delta_y) AS total_delta,
+  GROUP_CONCAT(delta_y, ',') AS segregated_delta_y,
+  event_latency_id,
+  MAX(dur) AS dur,
+  presentation_timestamp
+FROM chrome_full_frame_delta_view
+GROUP BY presentation_timestamp
+ORDER BY presentation_timestamp;
+
+-- View contains all chrome presented frames during gesture updates
+-- while calculating delay since last presented which usually should
+-- equal to |VSYNC_INTERVAL| if no jank is present.
+--
+-- @column id                      gesture scroll slice id.
+-- @column min_start_ts            OS timestamp of the first touch move arrival
+--                                 within a frame.
+-- @column max_start_ts            OS timestamp of the last touch move arrival
+--                                 within a frame.
+-- @column scroll_id               The scroll which the touch belongs to.
+-- @column encapsulated_scroll_ids Trace ids of all frames presented in at this
+--                                 vsync.
+-- @column total_delta             Summation of all delta_y of all gesture
+--                                 scrolls in this frame.
+-- @column segregated_delta_y      All delta y of all gesture scrolls comma
+--                                 separated, summing those gives |total_delta|.
+-- @column event_latency_id        Event latency id of the presented frame.
+-- @column dur                     Duration of the EventLatency.
+-- @column presentation_timestamp  Timestamp at which the frame was shown on the
+--                                 screen.
+-- @column delay_since_last_frame  Time elapsed since the previous frame was
+--                                 presented, usually equals |VSYNC| if no frame
+--                                 drops happened.
+-- @column delay_since_last_input  Difference in OS timestamps of inputs in the
+--                                 current and the previous frame.
+-- @column prev_event_latency_id   The event latency id that will be used as a
+--                                 reference to determine the jank cause.
+CREATE VIEW chrome_frame_info_with_delay AS
+SELECT
+  *,
+  (presentation_timestamp -
+  LAG(presentation_timestamp, 1, presentation_timestamp)
+  OVER (PARTITION BY scroll_id ORDER BY presentation_timestamp)) / 1e6 AS delay_since_last_frame,
+  (min_start_ts -
+  LAG(max_start_ts, 1, min_start_ts)
+  OVER (PARTITION BY scroll_id ORDER BY min_start_ts)) / 1e6 AS delay_since_last_input,
+  LAG(event_latency_id, 1, -1) OVER (PARTITION BY scroll_id ORDER BY min_start_ts) AS prev_event_latency_id
+FROM chrome_merged_frame_view;
+
+-- Calculate |VSYNC_INTERVAL| as the lowest delay between frames larger than
+-- zero.
+-- TODO(b/286222128): Emit this data from Chrome instead of calculating it.
+--
+-- @column vsync_interval           The lowest delay between frames larger than
+--                                  zero.
+CREATE VIEW chrome_vsyncs AS
+SELECT
+  MIN(delay_since_last_frame) AS vsync_interval
+FROM chrome_frame_info_with_delay
+WHERE delay_since_last_frame > 0;
+
+-- Filter the frame view only to frames that had missed vsyncs.
+--
+-- @column delay_since_last_frame Time elapsed since the previous frame was
+--                                presented, will be more than |VSYNC| in this
+--                                view.
+-- @column event_latency_id       Event latency id of the presented frame.
+-- @column vsync_interval         Vsync interval at the time of recording the
+--                                trace.
+-- @column hardware_class         Device brand and model.
+-- @column scroll_id              The scroll corresponding to this frame.
+-- @column prev_event_latency_id  The event latency id that will be used as a
+--                                reference to determine the jank cause.
+CREATE VIEW chrome_janky_frames_no_cause AS
+SELECT
+  delay_since_last_frame,
+  event_latency_id,
+  (SELECT vsync_interval FROM chrome_vsyncs) AS vsync_interval,
+  chrome_hardware_class() AS hardware_class,
+  scroll_id,
+  prev_event_latency_id
+FROM chrome_frame_info_with_delay
+WHERE delay_since_last_frame > (select vsync_interval + vsync_interval / 2 from chrome_vsyncs)
+      AND delay_since_last_input < (select vsync_interval + vsync_interval / 2 from chrome_vsyncs);
+
+-- Janky frame information including the jank cause.
+-- @column delay_since_last_frame Time elapsed since the previous frame was
+--                                presented, will be more than |VSYNC| in this
+--                                view.
+-- @column event_latency_id       Event latency id of the presented frame.
+-- @column vsync_interval         Vsync interval at the time of recording the
+--                                trace.
+-- @column hardware_class         Device brand and model.
+-- @column scroll_id              The scroll corresponding to this frame.
+-- @column prev_event_latency_id  The event latency id that will be used as a
+--                                reference to determine the jank cause.
+-- @column cause_id               Id of the slice corresponding to the offending stage.
+CREATE VIEW chrome_janky_frames_no_subcause AS
+SELECT
+  *,
+  get_v3_jank_cause_id(event_latency_id, prev_event_latency_id) AS cause_id
+FROM chrome_janky_frames_no_cause;
+
+-- Finds all causes of jank for all janky frames, and a cause of sub jank
+-- if the cause of jank was GPU related.
+--
+-- @column cause_of_jank          The reason the Vsync was missed.
+-- @column sub_cause_of_jank      Further breakdown if the root cause was GPU
+--                                related.
+-- @column delay_since_last_frame Time elapsed since the previous frame was
+--                                presented, will be more than |VSYNC| in this
+--                                view.
+-- @column event_latency_id       Event latency id of the presented frame.
+-- @column vsync_interval         Vsync interval at the time of recording the
+--                                trace.
+-- @column hardware_class         Device brand and model.
+-- @column scroll_id              The scroll corresponding to this frame.
+CREATE VIEW chrome_janky_frames AS
+SELECT
+  slice_name_from_id(cause_id) AS cause_of_jank,
+  slice_name_from_id(
+    -- Getting sub-cause
+    get_v3_jank_cause_id(
+      -- Here the cause itself is the parent.
+      cause_id,
+      -- Get the previous cause id as a child to the previous |EventLatency|.
+     (SELECT
+      id
+      FROM slice
+      WHERE name = slice_name_from_id(cause_id)
+        AND parent_id = prev_event_latency_id)
+    )) AS sub_cause_of_jank,
+  delay_since_last_frame,
+  event_latency_id,
+  vsync_interval,
+  hardware_class,
+  scroll_id
+FROM chrome_janky_frames_no_subcause;
+
+-- Counting all unique frame presentation timestamps.
+--
+-- @column presentation_timestamp     The unique frame presentation timestamp.
+CREATE VIEW chrome_unique_frame_presentation_ts AS
+SELECT DISTINCT
+presentation_timestamp
+FROM chrome_gesture_scroll_event_latencies;
+
+-- Dividing missed frames over total frames to get janky frame percentage.
+-- This represents the v3 scroll jank metrics.
+-- Reflects Event.Jank.DelayedFramesPercentage UMA metric.
+--
+-- @column delayed_frame_percentage       The percent of missed frames relative
+--                                        to total frames - aka the percent of
+--                                        janky frames.
+CREATE VIEW chrome_janky_frames_percentage AS
+SELECT
+(SELECT
+  COUNT()
+ FROM chrome_janky_frames) * 1.0
+/ (SELECT
+    COUNT()
+  FROM chrome_unique_frame_presentation_ts) * 100 AS delayed_frame_percentage;
+
+-- Number of frames and janky frames per scroll.
+--
+-- @column scroll_id                  The ID of the scroll.
+-- @column num_frames                 The number of frames in the scroll.
+-- @column num_janky_frames           The number of delayed/janky frames.
+-- @column scroll_jank_percentage     The percentage of janky frames relative to
+--                                    total frames.
+CREATE VIEW chrome_frames_per_scroll AS
+WITH
+  frames AS (
+    SELECT scroll_id, COUNT(*) AS num_frames
+    FROM
+      chrome_frame_info_with_delay
+    GROUP BY scroll_id
+  ),
+  janky_frames AS (
+    SELECT scroll_id, COUNT(*) AS num_janky_frames
+    FROM
+      chrome_janky_frames
+    GROUP BY scroll_id
+  )
+SELECT
+  frames.scroll_id AS scroll_id,
+  frames.num_frames AS num_frames,
+  janky_frames.num_janky_frames AS num_janky_frames,
+  100.0 * janky_frames.num_janky_frames / frames.num_frames
+    AS scroll_jank_percentage
+FROM frames
+LEFT JOIN janky_frames
+  ON frames.scroll_id = janky_frames.scroll_id;
+
+-- Scroll jank causes per scroll.
+--
+-- @column scroll_id                   The ID of the scroll.
+-- @column max_delay_since_last_frame  The maximum time a frame was delayed
+--                                     after the presentation of the previous
+--                                     frame.
+-- @column vsync_interval              The expected vsync interval.
+-- @column scroll_jank_causes          A proto amalgamation of each scroll
+--                                     jank cause including cause name, sub
+--                                     cause and the duration of the delay
+--                                     since the previous frame was presented.
+CREATE VIEW chrome_causes_per_scroll AS
+SELECT
+  scroll_id,
+  MAX(1.0 * delay_since_last_frame / vsync_interval)
+    AS max_delay_since_last_frame,
+  -- MAX does not matter, since `vsync_interval` is the computed as the
+  -- same value for a single trace.
+  MAX(vsync_interval) AS vsync_interval,
+  RepeatedField(
+    ChromeScrollJankV3_Scroll_ScrollJankCause(
+      'cause',
+      cause_of_jank,
+      'sub_cause',
+      sub_cause_of_jank,
+      'delay_since_last_frame',
+      1.0 * delay_since_last_frame / vsync_interval))
+    AS scroll_jank_causes
+FROM
+  chrome_janky_frames
+GROUP BY scroll_id;
\ No newline at end of file
diff --git a/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_v3_cause.sql b/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_v3_cause.sql
new file mode 100644
index 0000000..9e07128
--- /dev/null
+++ b/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_v3_cause.sql
@@ -0,0 +1,55 @@
+-- Copyright 2023 The Chromium Authors
+-- Use of this source code is governed by a BSD-style license that can be
+-- found in the LICENSE file.
+
+-- Helper functions for scroll_jank_v3 metric computation.
+
+INCLUDE PERFETTO MODULE common.slices;
+
+
+-- Given two slice Ids A and B, find the maximum difference
+-- between the durations of it's direct children with matching names
+-- for example if slice A has children named (X, Y, Z) with durations of (10, 10, 5)
+-- and slice B has children named (X, Y) with durations of (9, 9), the function will return
+-- the slice id of the slice named Z that is A's child, as no matching slice named Z was found
+-- under B, making 5 - 0 = 5 the maximum delta between both slice's direct children
+--
+-- @arg janky_slice_id LONG The slice id of the parent slice that we want to
+--                          cause among it's children.
+-- @arg prev_slice_id  LONG The slice id of the parent slice that's the reference
+--                          in comparison to |janky_slice_id|.
+-- @ret breakdown_id   LONG The slice id of the breakdown that has the maximum
+--                          duration delta.
+CREATE PERFETTO FUNCTION get_v3_jank_cause_id(
+  janky_slice_id LONG,
+  prev_slice_id LONG
+)
+RETURNS LONG AS
+WITH
+  current_breakdowns AS (
+    SELECT
+      *
+    FROM direct_children_slice($janky_slice_id)
+  ),
+  prev_breakdowns AS (
+    SELECT
+      *
+    FROM direct_children_slice($prev_slice_id)
+  ),
+  joint_breakdowns AS (
+    SELECT
+      cur.id AS breakdown_id,
+      (cur.dur - COALESCE(prev.dur, 0)) AS breakdown_delta
+    FROM current_breakdowns cur
+    LEFT JOIN prev_breakdowns prev ON
+      cur.name = prev.name
+  ),
+  max_breakdown AS (
+    SELECT
+      MAX(breakdown_delta) AS breakdown_delta,
+      breakdown_id
+    FROM joint_breakdowns
+  )
+  SELECT
+    breakdown_id
+  FROM max_breakdown;
diff --git a/base/tracing/stdlib/chrome/scroll_jank/scroll_offsets.sql b/base/tracing/stdlib/chrome/scroll_jank/scroll_offsets.sql
new file mode 100644
index 0000000..80fba04
--- /dev/null
+++ b/base/tracing/stdlib/chrome/scroll_jank/scroll_offsets.sql
@@ -0,0 +1,210 @@
+-- Copyright 2023 The Chromium Authors
+-- Use of this source code is governed by a BSD-style license that can be
+-- found in the LICENSE file.
+
+-- This file creates two public views:
+--     - chrome_scroll_input_offsets and
+--     - chrome_presented_scroll_offsets
+--
+-- These views store the pixel deltas and offsets for (respectively) all chrome
+-- scroll inputs (coalesced and not coalesced), and for chrome presented frames
+-- (not coalesced), along with the associated timestamp, and id.
+--
+-- Raw deltas are recorded as changes in pixel positions along the y-axis of a
+-- screen, and are scaled to the viewport size. The corresponding trace event
+-- for this is TranslateAndScaleWebInputEvent. These are the deltas for all
+-- chrome scroll inputs.
+--
+-- For presented frames, the delta is calculated from the visual offset,
+-- recorded once the input has been processed, in the
+-- InputHandlerProxy::HandleGestureScrollUpdate_Result event. These values are
+-- also scaled to the screen size.
+--
+-- Offsets are calculated by summing all of the deltas, ordered by timestamp.
+-- For a given input/frame, the offset is the sum of its corresponding delta and
+-- all previous deltas.
+--
+--
+-- All values required for calculating deltas and offsets are recorded at
+-- various stages of input processing, and are unified by a single
+-- scroll_update_id value, recorded as scroll_deltas.trace_id in each event.
+
+INCLUDE PERFETTO MODULE chrome.scroll_jank.scroll_jank_v3;
+
+-- Non-coalesced scroll update events and their timestamps.
+CREATE VIEW internal_non_coalesced_scrolls AS
+SELECT
+  scroll_update_id,
+  ts
+FROM chrome_gesture_scroll_updates
+WHERE is_coalesced = False;
+
+-- All (coalesced and non-coalesced) vertical scrolling deltas and their
+-- associated scroll ids. Delta values are recorded after being scaled to the
+-- device's screen size in the TranslateAndScaleWebInputEvent trace event. In
+-- this trace event, the deltas recorded represent the true (read "original")
+-- values that the Browser receives from Android, and the only processing is
+-- scaling and translation.
+CREATE PERFETTO TABLE internal_scroll_deltas AS
+SELECT
+  EXTRACT_ARG(arg_set_id, 'scroll_deltas.trace_id') AS scroll_update_id,
+  EXTRACT_ARG(arg_set_id, 'scroll_deltas.original_delta_y') AS delta_y,
+  EXTRACT_ARG(arg_set_id, 'scroll_deltas.original_delta_y') IS NOT NULL AS is_coalesced
+FROM slice
+WHERE name = "TranslateAndScaleWebInputEvent";
+
+-- Associate the raw (original) deltas (internal_scroll_deltas) with the
+-- corresponding non-coalesced scroll updates
+-- (internal_non_coalesced_scroll_updates) to get the timestamp of the event
+-- those deltas. This allows for ordering delta recordings to track them over
+-- time.
+CREATE VIEW internal_non_coalesced_deltas AS
+SELECT
+  scroll_update_id,
+  ts,
+  delta_y
+FROM internal_non_coalesced_scrolls
+INNER JOIN internal_scroll_deltas
+  USING (scroll_update_id);
+
+-- Selecting information scroll update events that have been coalesced,
+-- including timestamp and the specific event (scroll update id) it was
+-- coalesced into. Recordings of deltas will need to be associated with the
+-- timestamp of the scroll update they were coalesced into.
+CREATE PERFETTO TABLE internal_scroll_update_coalesce_info AS
+SELECT
+  ts,
+  EXTRACT_ARG(arg_set_id, 'scroll_deltas.coalesced_to_trace_id') AS coalesced_to_scroll_update_id,
+  EXTRACT_ARG(arg_set_id, 'scroll_deltas.trace_id') AS scroll_update_id
+FROM slice
+WHERE name = "WebCoalescedInputEvent::CoalesceWith" AND
+  coalesced_to_scroll_update_id IS NOT NULL;
+
+-- Associate the raw (original) deltas (internal_scroll_deltas) with the
+-- corresponding coalesced scroll updates (internal_scroll_update_coalesce_info)
+-- to get the timestamp of the event those deltas were coalesced into. This
+-- allows us to get the scaled coordinates for all of the input events
+-- (original input coordinates can't be used due to scaling).
+CREATE VIEW internal_coalesced_deltas AS
+SELECT
+  internal_scroll_update_coalesce_info.coalesced_to_scroll_update_id AS scroll_update_id,
+  ts,
+  internal_scroll_deltas.delta_y AS delta_y,
+  TRUE AS is_coalesced
+FROM internal_scroll_update_coalesce_info
+LEFT JOIN internal_scroll_deltas
+  USING (scroll_update_id);
+
+-- All of the presented frame scroll update ids.
+-- @column arg_set_id                ID slice of the presented frame.
+-- @column scroll_update_id          A scroll update id that was included in the
+--                                   presented frame. There may be zero, one, or
+--                                   more.
+CREATE VIEW chrome_deltas_presented_frame_scroll_update_ids AS
+SELECT
+  args.int_value AS scroll_update_id,
+  slice.id
+FROM args
+LEFT JOIN slice
+  USING (arg_set_id)
+WHERE slice.name = 'PresentedFrameInformation'
+AND args.flat_key GLOB 'scroll_deltas.trace_ids_in_gpu_frame*';;
+
+-- When every GestureScrollUpdate event is processed, the offset set by the
+-- compositor is recorded. This offset is scaled to the device screen size, and
+-- can be used to calculate deltas.
+CREATE VIEW internal_presented_frame_offsets AS
+SELECT
+  EXTRACT_ARG(arg_set_id, 'scroll_deltas.trace_id') AS scroll_update_id,
+  EXTRACT_ARG(arg_set_id, 'scroll_deltas.visual_offset_y') AS visual_offset_y
+FROM slice
+WHERE name = 'InputHandlerProxy::HandleGestureScrollUpdate_Result';
+
+-- The raw coordinates and pixel offsets for all input events which were part of
+-- a scroll. This includes input events that were converted to scroll events
+-- which were presented (internal_non_coalesced_scrolls) and scroll events which
+-- were coalesced (internal_coalesced_deltas).
+--
+-- @column scroll_update_id          Trace Id associated with the scroll.
+-- @column ts                        Timestamp the of the scroll input event.
+-- @column delta_y                   The delta in raw coordinates between this
+--                                   scroll update event and the previous.
+-- @column offset_y                  The pixel offset of this scroll update
+--                                   event compared to the previous one.
+CREATE PERFETTO TABLE chrome_scroll_input_offsets AS
+-- First collect all coalesced and non-coalesced deltas so that the offsets
+-- can be calculated from them in order of timestamp.
+WITH all_deltas AS (
+  SELECT
+    scroll_update_id,
+    ts,
+    delta_y
+  FROM internal_non_coalesced_deltas
+  WHERE delta_y IS NOT NULL
+  UNION
+  SELECT
+    scroll_update_id,
+    ts,
+    delta_y
+  FROM internal_coalesced_deltas
+  WHERE delta_y IS NOT NULL
+  ORDER BY scroll_update_id, ts)
+SELECT
+  scroll_update_id,
+  ts,
+  delta_y,
+  SUM(IFNULL(delta_y, 0)) OVER (
+    ORDER BY scroll_update_id, ts
+    ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS offset_y
+FROM all_deltas;
+
+-- Calculate the total visual offset for all presented frames (non-coalesced
+-- scroll updates) that have raw deltas recorded. These visual offsets
+-- correspond with the inverse of the deltas for the presented frame.
+CREATE VIEW internal_preprocessed_presented_frame_offsets AS
+SELECT
+  internal_non_coalesced_scrolls.scroll_update_id,
+  internal_non_coalesced_scrolls.ts,
+  chrome_deltas_presented_frame_scroll_update_ids.id,
+  internal_presented_frame_offsets.visual_offset_y -
+    LAG(internal_presented_frame_offsets.visual_offset_y)
+    OVER (ORDER BY internal_non_coalesced_scrolls.ts) AS presented_frame_visual_offset_y
+FROM internal_non_coalesced_scrolls
+LEFT JOIN internal_scroll_deltas
+  USING (scroll_update_id)
+LEFT JOIN chrome_deltas_presented_frame_scroll_update_ids
+  USING (scroll_update_id)
+LEFT JOIN internal_presented_frame_offsets
+  USING (scroll_update_id)
+WHERE internal_scroll_deltas.delta_y IS NOT NULL;
+
+-- The scrolling offsets for the actual (applied) scroll events. These are not
+-- necessarily inclusive of all user scroll events, rather those scroll events
+-- that are actually processed.
+--
+-- @column scroll_update_id          Trace Id associated with the scroll.
+-- @column ts                        Presentation timestamp.
+-- @column delta_y                   The delta in coordinates as processed by
+--                                   Chrome between this scroll update event and
+--                                   the previous.
+-- @column offset_y                  The pixel offset of this scroll update (the
+--                                   presented frame) compared to the previous
+--                                   one.
+CREATE PERFETTO TABLE chrome_presented_scroll_offsets AS
+WITH all_deltas AS (
+  SELECT
+    scroll_update_id,
+    id,
+    MAX(ts) AS ts,
+    SUM(presented_frame_visual_offset_y) * -1 AS delta_y
+  FROM internal_preprocessed_presented_frame_offsets
+  GROUP BY id
+  ORDER BY ts)
+SELECT
+  scroll_update_id,
+  ts,
+  delta_y,
+  SUM(IFNULL(delta_y, 0)) OVER (
+    ORDER BY scroll_update_id, ts
+    ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS offset_y
+FROM all_deltas;
diff --git a/base/tracing/stdlib/chrome/scroll_jank/utils.sql b/base/tracing/stdlib/chrome/scroll_jank/utils.sql
new file mode 100644
index 0000000..b307cdd
--- /dev/null
+++ b/base/tracing/stdlib/chrome/scroll_jank/utils.sql
@@ -0,0 +1,105 @@
+-- Copyright 2023 The Chromium Authors
+-- Use of this source code is governed by a BSD-style license that can be
+-- found in the LICENSE file.
+--
+-- Those are helper functions used in computing jank metrics
+
+-- This function takes timestamps of two consecutive frames and determines if
+-- its janky by a delay of more than 0.5 of a frame  in order to make sure that
+-- the comparison does not filter out ratios that are precisely 0.5, which can
+-- fall a little above or below exact value due to inherent inaccuracy of operations with
+-- floating-point numbers. Value 1e-9 have been chosen as follows: the ratio has
+-- nanoseconds in numerator and VSync interval in denominator. Assuming refresh
+-- rate more than 1 FPS (and therefore VSync interval less than a second), this
+-- ratio should increase with increments more than minimal value in numerator
+-- (1ns) divided by maximum value in denominator, giving 1e-9.
+
+-- Function : function takes scroll ids of frames to verify it's from
+-- the same scroll, and makes sure the frame ts occured within the scroll
+-- timestamp of the neighbour and computes whether the frame was janky or not.
+CREATE PERFETTO FUNCTION is_janky_frame(cur_gesture_id LONG,
+                                      neighbour_gesture_id LONG,
+                                      neighbour_ts LONG,
+                                      cur_gesture_begin_ts LONG,
+                                      cur_gesture_end_ts LONG,
+                                      cur_frame_exact FLOAT,
+                                      neighbour_frame_exact FLOAT)
+-- Returns true if the frame was janky, false otherwise
+RETURNS BOOL AS
+SELECT
+    CASE WHEN
+      $cur_gesture_id != $neighbour_gesture_id OR
+      $neighbour_ts IS NULL OR
+      $neighbour_ts < $cur_gesture_begin_ts OR
+      $neighbour_ts > $cur_gesture_end_ts THEN
+        FALSE ELSE
+        $cur_frame_exact > $neighbour_frame_exact + 0.5 + 1e-9
+    END;
+
+-- Function : function takes the cur_frame_exact, prev_frame_exact and
+-- next_frame_exact and returns the value of the jank budget of the current
+-- frame.
+--
+-- jank_budget is the minimum amount of frames/time we need to reduce the frame
+-- duration by for it to be no longer considered janky.
+--
+-- Returns the jank budget in percentage (i.e. 0.75) of vsync interval
+-- percentage.
+CREATE PERFETTO FUNCTION jank_budget(
+  cur_frame_exact FLOAT,
+  prev_frame_exact FLOAT,
+  next_frame_exact FLOAT
+)
+RETURNS FLOAT AS
+-- We determine the difference between the frame count of the current frame
+-- and its consecutive frames by subtracting with the frame_exact values. We
+-- null check for cases when the neighbor frame count can be null for the
+-- first and last frames.
+--
+-- Since a frame is considered janky, if the difference in the frame count
+-- with its adjacent frame is greater than 0.5 (half a vsync) which means we
+-- need to reduce the frame count by a value less than 0.5 of maximum
+-- difference in frame count for it to be no longer janky. We subtract 1e-9 as
+-- we want to output minimum amount required.
+SELECT
+  COALESCE(
+    -- Could be null if next or previous is null.
+    MAX(
+      ($cur_frame_exact - $prev_frame_exact),
+      ($cur_frame_exact - $next_frame_exact)
+    ),
+    -- If one of them is null output the first non-null.
+    ($cur_frame_exact - $prev_frame_exact),
+    ($cur_frame_exact - $next_frame_exact)
+    -- Otherwise return null
+  ) - 0.5 - 1e-9;
+
+-- Extract mojo information for the long-task-tracking scenario for specific
+-- names. For example, LongTaskTracker slices may have associated IPC
+-- metadata, or InterestingTask slices for input may have associated IPC to
+-- determine whether the task is fling/etc.
+--
+-- @arg name STRING            The name of slice.
+-- @column interface_name      Name of the interface of the IPC call.
+-- @column ipc_hash            Hash of the IPC call.
+-- @column message_type        Message type (e.g. reply).
+-- @column id                  The slice ID.
+CREATE PERFETTO FUNCTION chrome_select_long_task_slices(name STRING)
+RETURNS TABLE(
+  interface_name STRING,
+  ipc_hash INT,
+  message_type STRING,
+  id INT
+) AS
+SELECT
+  EXTRACT_ARG(s.arg_set_id, "chrome_mojo_event_info.mojo_interface_tag") AS interface_name,
+  EXTRACT_ARG(arg_set_id, "chrome_mojo_event_info.ipc_hash") AS ipc_hash,
+  CASE
+    WHEN EXTRACT_ARG(arg_set_id, "chrome_mojo_event_info.is_reply") THEN "reply"
+    ELSE "message"
+  END AS message_type,
+  s.id
+FROM slice s
+WHERE
+  category GLOB "*scheduler.long_tasks*"
+  AND name = $name;
diff --git a/base/tracing/stdlib/chrome/tasks.sql b/base/tracing/stdlib/chrome/tasks.sql
index 877f8b9..766a9e5 100644
--- a/base/tracing/stdlib/chrome/tasks.sql
+++ b/base/tracing/stdlib/chrome/tasks.sql
@@ -2,13 +2,13 @@
 -- Use of this source code is governed by a BSD-style license that can be
 -- found in the LICENSE file.
 
-SELECT IMPORT("common.slices");
+INCLUDE PERFETTO MODULE common.slices;
 
 -- Returns the mojo ipc hash for a given task, looking it up from the
 -- argument of descendant ScopedSetIpcHash slice.
 -- This is relevant only for the older Chrome traces, where mojo IPC
 -- hash was reported in a separate ScopedSetIpcHash slice.
-CREATE PERFETTO FUNCTION INTERNAL_EXTRACT_MOJO_IPC_HASH(slice_id INT)
+CREATE PERFETTO FUNCTION internal_extract_mojo_ipc_hash(slice_id INT)
 RETURNS INT AS
 SELECT EXTRACT_ARG(arg_set_id, "chrome_mojo_event_info.ipc_hash")
 FROM descendant_slice($slice_id)
@@ -18,7 +18,7 @@
 
 -- Returns the frame type (main frame vs subframe) for key navigation tasks
 -- which capture the associated RenderFrameHost in an argument.
-CREATE PERFETTO FUNCTION INTERNAL_EXTRACT_FRAME_TYPE(slice_id INT)
+CREATE PERFETTO FUNCTION internal_extract_frame_type(slice_id INT)
 RETURNS INT AS
 SELECT EXTRACT_ARG(arg_set_id, "render_frame_host.frame_type")
 FROM descendant_slice($slice_id)
@@ -31,7 +31,8 @@
 LIMIT 1;
 
 -- Human-readable aliases for a few key navigation tasks.
-CREATE PERFETTO FUNCTION INTERNAL_HUMAN_READABLE_NAVIGATION_TASK_NAME(task_name STRING)
+CREATE PERFETTO FUNCTION internal_human_readable_navigation_task_name(
+  task_name STRING)
 RETURNS STRING AS
 SELECT
   CASE
@@ -46,19 +47,19 @@
   END;
 
 -- Takes a task name and formats it correctly for scheduler tasks.
-CREATE PERFETTO FUNCTION INTERNAL_FORMAT_SCHEDULER_TASK_NAME(task_name STRING)
+CREATE PERFETTO FUNCTION internal_format_scheduler_task_name(task_name STRING)
 RETURNS STRING AS
 SELECT printf("RunTask(posted_from=%s)", $task_name);
 
 -- Takes the category and determines whether it is "Java" only, as opposed to
 -- "toplevel,Java".
-CREATE PERFETTO FUNCTION INTERNAL_JAVA_NOT_TOP_LEVEL_CATEGORY(category STRING)
+CREATE PERFETTO FUNCTION internal_java_not_top_level_category(category STRING)
 RETURNS BOOL AS
 SELECT $category GLOB "*Java*" AND $category not GLOB "*toplevel*";
 
 -- Takes the category and determines whether is any valid
 -- toplevel category or combination of categories.
-CREATE PERFETTO FUNCTION INTERNAL_ANY_TOP_LEVEL_CATEGORY(category STRING)
+CREATE PERFETTO FUNCTION internal_any_top_level_category(category STRING)
 RETURNS BOOL AS
 SELECT $category IN ("toplevel", "toplevel,viz", "toplevel,Java");
 
@@ -67,10 +68,10 @@
 -- a "child" table with more information about the task (e.g. posted_from for
 -- scheduler tasks). Currently this is not the case and needs a cleanup.
 -- Also we should align this with how table inheritance should work for
--- `create perfetto table`.
+-- `CREATE PERFETTO TABLE`.
 
 -- Get task type for a given task kind.
-CREATE PERFETTO FUNCTION INTERNAL_GET_JAVA_VIEWS_TASK_TYPE(kind STRING)
+CREATE PERFETTO FUNCTION internal_get_java_views_task_type(kind STRING)
 RETURNS STRING AS
 SELECT
   CASE $kind
@@ -118,7 +119,7 @@
 old_associated_mojo_slices AS (
   SELECT
     name AS interface_name,
-    INTERNAL_EXTRACT_MOJO_IPC_HASH(id) AS ipc_hash,
+    internal_extract_mojo_ipc_hash(id) AS ipc_hash,
     "message" AS message_type,
     id
   FROM slice
@@ -133,7 +134,7 @@
       EXTRACT_ARG(arg_set_id, "chrome_mojo_event_info.watcher_notify_interface_tag"),
       EXTRACT_ARG(arg_set_id, "chrome_mojo_event_info.mojo_interface_tag")
     ) AS interface_name,
-    INTERNAL_EXTRACT_MOJO_IPC_HASH(id) AS ipc_hash,
+    internal_extract_mojo_ipc_hash(id) AS ipc_hash,
     "message" AS message_type,
     id
   FROM slice
@@ -189,7 +190,7 @@
     -- with either category = "toplevel" or category = "toplevel,Java".
     -- Also filter out the zero duration slices as an attempt to reduce noise as
     -- "Java" category contains misc events (as it's hard to add new categories).
-    WHERE INTERNAL_JAVA_NOT_TOP_LEVEL_CATEGORY(category) AND dur > 0
+    WHERE internal_java_not_top_level_category(category) AND dur > 0
   ),
   -- We filter out generic slices from various UI frameworks which don't tell us much about
   -- what exactly this view is doing.
@@ -228,11 +229,11 @@
   s1.*,
   -- While the parent slices are too generic to be used by themselves,
   -- they can provide some useful metadata.
-  HAS_PARENT_SLICE_WITH_NAME(
+  has_parent_slice_with_name(
     s1.id,
     "ViewResourceAdapter:captureWithSoftwareDraw"
   ) AS is_software_screenshot,
-  HAS_PARENT_SLICE_WITH_NAME(
+  has_parent_slice_with_name(
     s1.id,
     "ViewResourceAdapter:captureWithHardwareDraw"
   ) AS is_hardware_screenshot
@@ -276,7 +277,7 @@
 WHERE name GLOB "Looper.dispatch: android.view.Choreographer$FrameHandler*";
 
 -- Extract task's posted_from information from task's arguments.
-CREATE PERFETTO FUNCTION INTERNAL_GET_POSTED_FROM(arg_set_id INT)
+CREATE PERFETTO FUNCTION internal_get_posted_from(arg_set_id INT)
 RETURNS STRING AS
 WITH posted_from as (
   SELECT
@@ -298,22 +299,20 @@
 -- @column kind          The type of Java slice.
 -- @column ts            The timestamp of the slice.
 -- @column name          The name of the slice.
-SELECT CREATE_VIEW_FUNCTION(
-  'INTERNAL_SELECT_BEGIN_MAIN_FRAME_JAVA_SLICES(name STRING)',
-  'id INT, kind STRING, ts LONG, dur LONG, name STRING',
-  'SELECT
-      id,
-      "SingleThreadProxy::BeginMainFrame" AS kind,
-      ts,
-      dur,
-      name
-    FROM slice
-    WHERE
-      (name = $name
-        AND INTERNAL_GET_POSTED_FROM(arg_set_id) =
-            "cc/trees/single_thread_proxy.cc:ScheduledActionSendBeginMainFrame")
-  '
-);
+CREATE PERFETTO FUNCTION internal_select_begin_main_frame_java_slices(
+  name STRING)
+RETURNS TABLE(id INT, kind STRING, ts LONG, dur LONG, name STRING) AS
+SELECT
+  id,
+  "SingleThreadProxy::BeginMainFrame" AS kind,
+  ts,
+  dur,
+  name
+FROM slice
+WHERE
+  (name = $name
+    AND internal_get_posted_from(arg_set_id) =
+        "cc/trees/single_thread_proxy.cc:ScheduledActionSendBeginMainFrame");
 
 -- A list of Chrome tasks which were performing operations with Java views,
 -- together with the names of the these views.
@@ -377,8 +376,8 @@
 SELECT
   task.id,
   "chrome_scheduler_tasks" as type,
-  INTERNAL_FORMAT_SCHEDULER_TASK_NAME(
-    INTERNAL_GET_POSTED_FROM(slice.arg_set_id)) as name,
+  internal_format_scheduler_task_name(
+    internal_get_posted_from(slice.arg_set_id)) as name,
   slice.ts,
   slice.dur,
   thread.utid,
@@ -392,7 +391,7 @@
   slice.arg_set_id,
   slice.thread_ts,
   slice.thread_dur,
-  INTERNAL_GET_POSTED_FROM(slice.arg_set_id) as posted_from
+  internal_get_posted_from(slice.arg_set_id) as posted_from
 FROM internal_chrome_scheduler_tasks task
 JOIN slice using (id)
 JOIN thread_track ON slice.track_id = thread_track.id
@@ -402,7 +401,7 @@
 
 -- Select the slice that might be the descendant mojo slice for the given task
 -- slice if it exists.
-CREATE PERFETTO FUNCTION INTERNAL_GET_DESCENDANT_MOJO_SLICE_CANDIDATE(
+CREATE PERFETTO FUNCTION internal_get_descendant_mojo_slice_candidate(
   slice_id INT
 )
 RETURNS INT AS
@@ -424,17 +423,15 @@
 ORDER by depth, ts
 LIMIT 1;
 
-SELECT CREATE_VIEW_FUNCTION('INTERNAL_DESCENDANT_MOJO_SLICE(slice_id INT)',
-  'task_name STRING',
-  '
-  SELECT
-    printf("%s %s (hash=%d)",
-      mojo.interface_name, mojo.message_type, mojo.ipc_hash) AS task_name
-  FROM slice task
-  JOIN internal_chrome_mojo_slices mojo
-    ON mojo.id = INTERNAL_GET_DESCENDANT_MOJO_SLICE_CANDIDATE($slice_id)
-  WHERE task.id = $slice_id
-  ');
+CREATE PERFETTO FUNCTION internal_descendant_mojo_slice(slice_id INT)
+RETURNS TABLE(task_name STRING) AS
+SELECT
+  printf("%s %s (hash=%d)",
+    mojo.interface_name, mojo.message_type, mojo.ipc_hash) AS task_name
+FROM slice task
+JOIN internal_chrome_mojo_slices mojo
+  ON mojo.id = internal_get_descendant_mojo_slice_candidate($slice_id)
+WHERE task.id = $slice_id;
 
 -- A list of "Chrome tasks": top-level execution units (e.g. scheduler tasks /
 -- IPCs / system callbacks) run by Chrome. For a given thread, the tasks
@@ -452,7 +449,7 @@
 non_embedded_toplevel_slices AS (
   SELECT * FROM slice
   WHERE
-    INTERNAL_ANY_TOP_LEVEL_CATEGORY(category)
+    internal_any_top_level_category(category)
     AND (SELECT count() FROM ancestor_slice(slice.id) anc
       WHERE anc.category GLOB "*toplevel*" or anc.category GLOB "*toplevel.viz*") = 0
 ),
@@ -468,7 +465,7 @@
     "java" as task_type
   FROM slice s
   WHERE
-    INTERNAL_JAVA_NOT_TOP_LEVEL_CATEGORY(category)
+    internal_java_not_top_level_category(category)
     AND (SELECT count()
       FROM ancestor_slice(s.id) s2
       WHERE s2.category GLOB "*toplevel*" OR s2.category GLOB "*Java*") = 0
@@ -478,7 +475,7 @@
   SELECT
     id,
     printf('%s(java_views=%s)', kind, java_views) AS task_name,
-    INTERNAL_GET_JAVA_VIEWS_TASK_TYPE(kind) AS task_type
+    internal_get_java_views_task_type(kind) AS task_type
   FROM internal_chrome_slices_with_java_views
 ),
 scheduler_tasks AS (
@@ -513,8 +510,8 @@
   WITH tasks_with_readable_names AS (
     SELECT
       id,
-      INTERNAL_HUMAN_READABLE_NAVIGATION_TASK_NAME(task_name) as readable_name,
-      IFNULL(INTERNAL_EXTRACT_FRAME_TYPE(id), 'unknown frame type') as frame_type
+      internal_human_readable_navigation_task_name(task_name) as readable_name,
+      IFNULL(internal_extract_frame_type(id), 'unknown frame type') as frame_type
     FROM
       scheduler_tasks_with_mojo
   )
diff --git a/base/tracing/stdlib/chrome/vsync_intervals.sql b/base/tracing/stdlib/chrome/vsync_intervals.sql
new file mode 100644
index 0000000..bf8c628
--- /dev/null
+++ b/base/tracing/stdlib/chrome/vsync_intervals.sql
@@ -0,0 +1,48 @@
+-- Copyright 2023 The Chromium Authors
+-- Use of this source code is governed by a BSD-style license that can be
+-- found in the LICENSE file.
+
+DROP TABLE IF EXISTS chrome_vsync_intervals;
+
+-- A simple table that checks the time between VSync (this can be used to
+-- determine if we're refreshing at 90 FPS or 60 FPS).
+--
+-- Note: In traces without the "Java" category there will be no VSync
+--       TraceEvents and this table will be empty.
+CREATE PERFETTO TABLE chrome_vsync_intervals AS
+SELECT
+  slice_id,
+  ts,
+  dur,
+  track_id,
+  LEAD(ts) OVER(PARTITION BY track_id ORDER BY ts) - ts AS time_to_next_vsync
+FROM slice
+WHERE name = "VSync"
+ORDER BY track_id, ts;
+
+-- Function: compute the average Vysnc interval of the
+-- gesture (hopefully this would be either 60 FPS for the whole gesture or 90
+-- FPS but that isn't always the case) on the given time segment.
+-- If the trace doesn't contain the VSync TraceEvent we just fall back on
+-- assuming its 60 FPS (this is the 1.6e+7 in the COALESCE which
+-- corresponds to 16 ms or 60 FPS).
+--
+-- begin_ts: segment start time
+-- end_ts: segment end time
+CREATE PERFETTO FUNCTION calculate_avg_vsync_interval(
+  begin_ts LONG,
+  end_ts LONG
+)
+-- Returns: the average Vysnc interval on this time segment
+-- or 1.6e+7, if trace doesnt contain the VSync TraceEvent.
+RETURNS FLOAT AS
+SELECT
+  COALESCE((
+    SELECT
+      CAST(AVG(time_to_next_vsync) AS FLOAT)
+    FROM chrome_vsync_intervals in_query
+    WHERE
+      time_to_next_vsync IS NOT NULL AND
+      in_query.ts > $begin_ts AND
+      in_query.ts < $end_ts
+  ), 1e+9 / 60);
diff --git a/base/tracing/test/README b/base/tracing/test/README
new file mode 100644
index 0000000..b5cd7f5
--- /dev/null
+++ b/base/tracing/test/README
@@ -0,0 +1,15 @@
+# PerfettoSQL Chrome Standard Library tests
+
+This directory contains the [Perfetto Diff Tests](https://perfetto.dev/docs/analysis/trace-processor#diff-tests) to test changes to the Chrome standard library.
+
+The diff tests themselves are in `./trace_processor/diff_tests/chrome`. The `./data` directory contains the Perfetto traces that are used by the diff tests.
+
+## Running Diff Tests
+
+Currently, the diff tests only run on Linux. You can build and run the diff tests with the following.
+
+```
+$ gn gen --args='' out/Linux
+$ autoninja -C out/Linux perfetto_diff_tests
+$ out/Linux/bin/run_perfetto_diff_tests
+```
\ No newline at end of file
diff --git a/base/tracing/test/data/async-trace-1.json b/base/tracing/test/data/async-trace-1.json
new file mode 100644
index 0000000..bc2ab13
--- /dev/null
+++ b/base/tracing/test/data/async-trace-1.json
@@ -0,0 +1,3 @@
+{"traceEvents":[{"name":"B","cat":"Dart","tid":19719,"pid":42599,"ts":724095664621,"ph":"b","id":"2","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"B1","cat":"Dart","tid":19719,"pid":42599,"ts":724095686164,"ph":"b","id":"2","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"B2","cat":"Dart","tid":19719,"pid":42599,"ts":724095718355,"ph":"b","id":"2","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"B2","cat":"Dart","tid":19719,"pid":42599,"ts":724095770250,"ph":"e","id":"2","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"B1","cat":"Dart","tid":19719,"pid":42599,"ts":724095822549,"ph":"e","id":"2","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"B","cat":"Dart","tid":19719,"pid":42599,"ts":724095923987,"ph":"e","id":"2","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C","cat":"Dart","tid":19719,"pid":42599,"ts":724095976046,"ph":"b","id":"3","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C1","cat":"Dart","tid":19719,"pid":42599,"ts":724096028033,"ph":"b","id":"3","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C2","cat":"Dart","tid":19719,"pid":42599,"ts":724096079618,"ph":"b","id":"3","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C2","cat":"Dart","tid":19719,"pid":42599,"ts":724096131862,"ph":"e","id":"3","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C1","cat":"Dart","tid":19719,"pid":42599,"ts":724096233365,"ph":"e","id":"3","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C","cat":"Dart","tid":19719,"pid":42599,"ts":724096288521,"ph":"e","id":"3","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},
+    {"name":"Task1","cat":"Dart","tid":19719,"pid":42599,"ts":724097290236,"ph":"b","id":"b","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"First instant event for Task1.","cat":"Dart","tid":19719,"pid":42599,"ts":724097290483,"ph":"n","id":"b","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"Second instant event for Task1.","cat":"Dart","tid":19719,"pid":42599,"ts":724097290488,"ph":"n","id":"b","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"Third instant event for Task1.","cat":"Dart","tid":19719,"pid":42599,"ts":724097290490,"ph":"n","id":"b","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"Task1","cat":"Dart","tid":19719,"pid":42599,"ts":724097290495,"ph":"e","id":"b","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},
+    {"name":"B","cat":"Dart","tid":19719,"pid":42599,"ts":724097443236,"ph":"b","id":"d","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"B1","cat":"Dart","tid":19719,"pid":42599,"ts":724097464678,"ph":"b","id":"d","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"B2","cat":"Dart","tid":19719,"pid":42599,"ts":724097496801,"ph":"b","id":"d","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"B2","cat":"Dart","tid":19719,"pid":42599,"ts":724097548118,"ph":"e","id":"d","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"B1","cat":"Dart","tid":19719,"pid":42599,"ts":724097599467,"ph":"e","id":"d","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"B","cat":"Dart","tid":19719,"pid":42599,"ts":724097701394,"ph":"e","id":"d","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C","cat":"Dart","tid":19719,"pid":42599,"ts":724097752875,"ph":"b","id":"e","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C1","cat":"Dart","tid":19719,"pid":42599,"ts":724097805130,"ph":"b","id":"e","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C2","cat":"Dart","tid":19719,"pid":42599,"ts":724097856349,"ph":"b","id":"e","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C2","cat":"Dart","tid":19719,"pid":42599,"ts":724097907581,"ph":"e","id":"e","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C1","cat":"Dart","tid":19719,"pid":42599,"ts":724098009737,"ph":"e","id":"e","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}},{"name":"C","cat":"Dart","tid":19719,"pid":42599,"ts":724098061723,"ph":"e","id":"e","args":{"isolateId":"isolates/3975583187945063","isolateGroupId":"isolateGroups/9173736828428533122"}}]}
\ No newline at end of file
diff --git a/base/tracing/test/data/async-trace-1.json.sha256 b/base/tracing/test/data/async-trace-1.json.sha256
new file mode 100644
index 0000000..64b4b08
--- /dev/null
+++ b/base/tracing/test/data/async-trace-1.json.sha256
@@ -0,0 +1 @@
+758ad1ffc85d002ec1168b40bc3733793799d4ff6719341ef4f92d469c6e4d01
\ No newline at end of file
diff --git a/base/tracing/test/data/async-trace-2.json b/base/tracing/test/data/async-trace-2.json
new file mode 100644
index 0000000..d37dc7c
--- /dev/null
+++ b/base/tracing/test/data/async-trace-2.json
@@ -0,0 +1 @@
+{"traceEvents":[{"name":"Frame Request Pending","cat":"Embedder","tid":42499,"pid":70727,"ts":736067070938,"ph":"b","id":"e","args":{}},{"name":"VsyncSchedulingOverhead","cat":"Embedder","tid":42499,"pid":70727,"ts":736067081389,"ph":"b","id":"1f","args":{}},{"name":"Frame Request Pending","cat":"Embedder","tid":42499,"pid":70727,"ts":736067082544,"ph":"e","id":"e","args":{}},{"name":"VsyncSchedulingOverhead","cat":"Embedder","tid":42499,"pid":70727,"ts":736067082545,"ph":"e","id":"1f","args":{}},{"name":"PipelineItem","cat":"Embedder","tid":42499,"pid":70727,"ts":736067082548,"ph":"b","id":"e","args":{}},{"name":"PipelineProduce","cat":"Embedder","tid":42499,"pid":70727,"ts":736067082548,"ph":"b","id":"e","args":{}},{"name":"Frame","cat":"Dart","tid":42499,"pid":70727,"ts":736067082649,"ph":"b","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"Animate","cat":"Dart","tid":42499,"pid":70727,"ts":736067082667,"ph":"b","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"Animate","cat":"Dart","tid":42499,"pid":70727,"ts":736067082691,"ph":"e","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleHandler","cat":"Embedder","tid":42499,"pid":70727,"ts":736067093313,"ph":"b","id":"4","args":{"channel":"flutter/platform","isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleHandler","cat":"Embedder","tid":259,"pid":70727,"ts":736067093396,"ph":"e","id":"4","args":{}},{"name":"PlatformChannel ScheduleResult","cat":"Embedder","tid":259,"pid":70727,"ts":736067093400,"ph":"b","id":"4","args":{"channel":"flutter/platform"}},{"name":"SceneDisplayLag","cat":"Embedder","tid":40963,"pid":70727,"ts":736067114722,"ph":"b","id":"20","args":{"frame_target_time":"736067098055957","current_frame_target_time":"736067114722623","vsync_transitions_missed":"1"}},{"name":"PipelineProduce","cat":"Embedder","tid":42499,"pid":70727,"ts":736067138945,"ph":"e","id":"e","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"Frame","cat":"Dart","tid":42499,"pid":70727,"ts":736067145038,"ph":"e","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleResult","cat":"Embedder","tid":42499,"pid":70727,"ts":736067145840,"ph":"e","id":"4","args":{}},{"name":"SceneDisplayLag","cat":"Embedder","tid":40963,"pid":70727,"ts":736067154801,"ph":"e","id":"20","args":{"frame_target_time":"736067098055957","current_frame_target_time":"736067114722623","vsync_transitions_missed":"1"}},{"name":"PipelineItem","cat":"Embedder","tid":40963,"pid":70727,"ts":736067154937,"ph":"e","id":"e","args":{}},{"name":"Frame Request Pending","cat":"Embedder","tid":42499,"pid":70727,"ts":736067655788,"ph":"b","id":"f","args":{}},{"name":"VsyncSchedulingOverhead","cat":"Embedder","tid":42499,"pid":70727,"ts":736067664723,"ph":"b","id":"22","args":{}},{"name":"Frame Request Pending","cat":"Embedder","tid":42499,"pid":70727,"ts":736067665837,"ph":"e","id":"f","args":{}},{"name":"VsyncSchedulingOverhead","cat":"Embedder","tid":42499,"pid":70727,"ts":736067665837,"ph":"e","id":"22","args":{}},{"name":"PipelineItem","cat":"Embedder","tid":42499,"pid":70727,"ts":736067665840,"ph":"b","id":"f","args":{}},{"name":"PipelineProduce","cat":"Embedder","tid":42499,"pid":70727,"ts":736067665840,"ph":"b","id":"f","args":{}},{"name":"Frame","cat":"Dart","tid":42499,"pid":70727,"ts":736067665950,"ph":"b","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"Animate","cat":"Dart","tid":42499,"pid":70727,"ts":736067665979,"ph":"b","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"Animate","cat":"Dart","tid":42499,"pid":70727,"ts":736067666014,"ph":"e","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleHandler","cat":"Embedder","tid":42499,"pid":70727,"ts":736067669713,"ph":"b","id":"5","args":{"channel":"flutter/platform","isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleHandler","cat":"Embedder","tid":259,"pid":70727,"ts":736067669778,"ph":"e","id":"5","args":{}},{"name":"PlatformChannel ScheduleResult","cat":"Embedder","tid":259,"pid":70727,"ts":736067669781,"ph":"b","id":"5","args":{"channel":"flutter/platform"}},{"name":"PipelineProduce","cat":"Embedder","tid":42499,"pid":70727,"ts":736067683293,"ph":"e","id":"f","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"SceneDisplayLag","cat":"Embedder","tid":40963,"pid":70727,"ts":736067684739,"ph":"b","id":"23","args":{"frame_target_time":"736067681390650","current_frame_target_time":"736067698057316","vsync_transitions_missed":"1"}},{"name":"PipelineItem","cat":"Embedder","tid":40963,"pid":70727,"ts":736067684801,"ph":"e","id":"f","args":{}},{"name":"Frame","cat":"Dart","tid":42499,"pid":70727,"ts":736067685464,"ph":"e","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleResult","cat":"Embedder","tid":42499,"pid":70727,"ts":736067687583,"ph":"e","id":"5","args":{}},{"name":"SceneDisplayLag","cat":"Embedder","tid":40963,"pid":70727,"ts":736067698057,"ph":"e","id":"23","args":{"frame_target_time":"736067681390650","current_frame_target_time":"736067698057316","vsync_transitions_missed":"1"}},{"name":"Frame Request Pending","cat":"Embedder","tid":42499,"pid":70727,"ts":736068209875,"ph":"b","id":"10","args":{}},{"name":"VsyncSchedulingOverhead","cat":"Embedder","tid":42499,"pid":70727,"ts":736068214722,"ph":"b","id":"25","args":{}},{"name":"Frame Request Pending","cat":"Embedder","tid":42499,"pid":70727,"ts":736068215269,"ph":"e","id":"10","args":{}},{"name":"VsyncSchedulingOverhead","cat":"Embedder","tid":42499,"pid":70727,"ts":736068215269,"ph":"e","id":"25","args":{}},{"name":"PipelineItem","cat":"Embedder","tid":42499,"pid":70727,"ts":736068215273,"ph":"b","id":"10","args":{}},{"name":"PipelineProduce","cat":"Embedder","tid":42499,"pid":70727,"ts":736068215274,"ph":"b","id":"10","args":{}},{"name":"Frame","cat":"Dart","tid":42499,"pid":70727,"ts":736068215382,"ph":"b","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"Animate","cat":"Dart","tid":42499,"pid":70727,"ts":736068215403,"ph":"b","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"Animate","cat":"Dart","tid":42499,"pid":70727,"ts":736068215427,"ph":"e","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleHandler","cat":"Embedder","tid":42499,"pid":70727,"ts":736068217799,"ph":"b","id":"6","args":{"channel":"flutter/platform","isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleHandler","cat":"Embedder","tid":259,"pid":70727,"ts":736068217862,"ph":"e","id":"6","args":{}},{"name":"PlatformChannel ScheduleResult","cat":"Embedder","tid":259,"pid":70727,"ts":736068217864,"ph":"b","id":"6","args":{"channel":"flutter/platform"}},{"name":"PipelineProduce","cat":"Embedder","tid":42499,"pid":70727,"ts":736068229674,"ph":"e","id":"10","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"Frame","cat":"Dart","tid":42499,"pid":70727,"ts":736068231029,"ph":"e","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleResult","cat":"Embedder","tid":42499,"pid":70727,"ts":736068231704,"ph":"e","id":"6","args":{}},{"name":"SceneDisplayLag","cat":"Embedder","tid":40963,"pid":70727,"ts":736068243108,"ph":"b","id":"27","args":{"frame_target_time":"736068231389431","current_frame_target_time":"736068248056097","vsync_transitions_missed":"1"}},{"name":"PipelineItem","cat":"Embedder","tid":40963,"pid":70727,"ts":736068243174,"ph":"e","id":"10","args":{}},{"name":"SceneDisplayLag","cat":"Embedder","tid":40963,"pid":70727,"ts":736068248056,"ph":"e","id":"27","args":{"frame_target_time":"736068231389431","current_frame_target_time":"736068248056097","vsync_transitions_missed":"1"}},{"name":"Frame Request Pending","cat":"Embedder","tid":42499,"pid":70727,"ts":736068656515,"ph":"b","id":"11","args":{}},{"name":"VsyncSchedulingOverhead","cat":"Embedder","tid":42499,"pid":70727,"ts":736068664722,"ph":"b","id":"29","args":{}},{"name":"Frame Request Pending","cat":"Embedder","tid":42499,"pid":70727,"ts":736068664849,"ph":"e","id":"11","args":{}},{"name":"VsyncSchedulingOverhead","cat":"Embedder","tid":42499,"pid":70727,"ts":736068664849,"ph":"e","id":"29","args":{}},{"name":"PipelineItem","cat":"Embedder","tid":42499,"pid":70727,"ts":736068664853,"ph":"b","id":"11","args":{}},{"name":"PipelineProduce","cat":"Embedder","tid":42499,"pid":70727,"ts":736068664853,"ph":"b","id":"11","args":{}},{"name":"Frame","cat":"Dart","tid":42499,"pid":70727,"ts":736068664951,"ph":"b","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"Animate","cat":"Dart","tid":42499,"pid":70727,"ts":736068664968,"ph":"b","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"Animate","cat":"Dart","tid":42499,"pid":70727,"ts":736068664991,"ph":"e","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleHandler","cat":"Embedder","tid":42499,"pid":70727,"ts":736068666701,"ph":"b","id":"7","args":{"channel":"flutter/platform","isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleHandler","cat":"Embedder","tid":259,"pid":70727,"ts":736068666761,"ph":"e","id":"7","args":{}},{"name":"PlatformChannel ScheduleResult","cat":"Embedder","tid":259,"pid":70727,"ts":736068666764,"ph":"b","id":"7","args":{"channel":"flutter/platform"}},{"name":"PipelineProduce","cat":"Embedder","tid":42499,"pid":70727,"ts":736068677493,"ph":"e","id":"11","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PipelineItem","cat":"Embedder","tid":40963,"pid":70727,"ts":736068678760,"ph":"e","id":"11","args":{}},{"name":"Frame","cat":"Dart","tid":42499,"pid":70727,"ts":736068679329,"ph":"e","id":"0","args":{"isolateId":"isolates/3135891960345487","isolateGroupId":"isolateGroups/12625356878627803287"}},{"name":"PlatformChannel ScheduleResult","cat":"Embedder","tid":42499,"pid":70727,"ts":736068680104,"ph":"e","id":"7","args":{}}],"performance":{"selectedFrameId":36,"flutterFrames":[{"number":30,"startTime":736067082545,"elapsed":73412,"build":56397,"raster":15682,"vsyncOverhead":1156},{"number":32,"startTime":736067665837,"elapsed":20016,"build":17453,"raster":1262,"vsyncOverhead":1114},{"number":34,"startTime":736068215269,"elapsed":28386,"build":14403,"raster":13313,"vsyncOverhead":547},{"number":36,"startTime":736068664849,"elapsed":14001,"build":12641,"raster":885,"vsyncOverhead":127}]}}
\ No newline at end of file
diff --git a/base/tracing/test/data/async-trace-2.json.sha256 b/base/tracing/test/data/async-trace-2.json.sha256
new file mode 100644
index 0000000..a3ba889
--- /dev/null
+++ b/base/tracing/test/data/async-trace-2.json.sha256
@@ -0,0 +1 @@
+2da7fa65cce487d602e2ccfb333c9ecab298ef6e45b745c95240cebed150b577
\ No newline at end of file
diff --git a/base/tracing/test/data/chrome_android_systrace.pftrace b/base/tracing/test/data/chrome_android_systrace.pftrace
new file mode 100644
index 0000000..a36a9b7
--- /dev/null
+++ b/base/tracing/test/data/chrome_android_systrace.pftrace
Binary files differ
diff --git a/base/tracing/test/data/chrome_android_systrace.pftrace.sha256 b/base/tracing/test/data/chrome_android_systrace.pftrace.sha256
new file mode 100644
index 0000000..63f5b7d
--- /dev/null
+++ b/base/tracing/test/data/chrome_android_systrace.pftrace.sha256
@@ -0,0 +1 @@
+a96005e5ee9059b89347573f7a7ca53097244fb3532956e0a62897ad77be2ddf
\ No newline at end of file
diff --git a/base/tracing/test/data/chrome_input_with_frame_view.pftrace b/base/tracing/test/data/chrome_input_with_frame_view.pftrace
new file mode 100644
index 0000000..53adcc5
--- /dev/null
+++ b/base/tracing/test/data/chrome_input_with_frame_view.pftrace
Binary files differ
diff --git a/base/tracing/test/data/chrome_input_with_frame_view.pftrace.sha256 b/base/tracing/test/data/chrome_input_with_frame_view.pftrace.sha256
new file mode 100644
index 0000000..ea5a606
--- /dev/null
+++ b/base/tracing/test/data/chrome_input_with_frame_view.pftrace.sha256
@@ -0,0 +1 @@
+1e4e1b7098c3c1b900d31fa6d6791e7b022e85ecebbb560123ce7139b3f82231
\ No newline at end of file
diff --git a/base/tracing/test/data/chrome_memory_snapshot.pftrace b/base/tracing/test/data/chrome_memory_snapshot.pftrace
new file mode 100644
index 0000000..0851d4d
--- /dev/null
+++ b/base/tracing/test/data/chrome_memory_snapshot.pftrace
Binary files differ
diff --git a/base/tracing/test/data/chrome_memory_snapshot.pftrace.sha256 b/base/tracing/test/data/chrome_memory_snapshot.pftrace.sha256
new file mode 100644
index 0000000..c55016e
--- /dev/null
+++ b/base/tracing/test/data/chrome_memory_snapshot.pftrace.sha256
@@ -0,0 +1 @@
+4a06b393bf14147b25296797756a4185abf31510aca2db22ddb3c5dbd21123e4
\ No newline at end of file
diff --git a/base/tracing/test/data/chrome_rendering_desktop.pftrace b/base/tracing/test/data/chrome_rendering_desktop.pftrace
new file mode 100644
index 0000000..b70e96b
--- /dev/null
+++ b/base/tracing/test/data/chrome_rendering_desktop.pftrace
Binary files differ
diff --git a/base/tracing/test/data/chrome_rendering_desktop.pftrace.sha256 b/base/tracing/test/data/chrome_rendering_desktop.pftrace.sha256
new file mode 100644
index 0000000..28c7589
--- /dev/null
+++ b/base/tracing/test/data/chrome_rendering_desktop.pftrace.sha256
@@ -0,0 +1 @@
+f61971e42ea0ce0f6da71c87a0ab19da0e13deca0fa90c6bdc98782af01ae702
\ No newline at end of file
diff --git a/base/tracing/test/data/chrome_touch_gesture_scroll.pftrace b/base/tracing/test/data/chrome_touch_gesture_scroll.pftrace
new file mode 100644
index 0000000..928f3ee
--- /dev/null
+++ b/base/tracing/test/data/chrome_touch_gesture_scroll.pftrace
Binary files differ
diff --git a/base/tracing/test/data/chrome_touch_gesture_scroll.pftrace.sha256 b/base/tracing/test/data/chrome_touch_gesture_scroll.pftrace.sha256
new file mode 100644
index 0000000..9d04d5d
--- /dev/null
+++ b/base/tracing/test/data/chrome_touch_gesture_scroll.pftrace.sha256
@@ -0,0 +1 @@
+2fe40090c41ebeb5dc6ce0bea5bc9aef4d7f4cf7fd625209641e0c5ea2210fb7
\ No newline at end of file
diff --git a/base/tracing/test/data/event_latency_with_args.perfetto-trace b/base/tracing/test/data/event_latency_with_args.perfetto-trace
new file mode 100644
index 0000000..9f837c5
--- /dev/null
+++ b/base/tracing/test/data/event_latency_with_args.perfetto-trace
Binary files differ
diff --git a/base/tracing/test/data/event_latency_with_args.perfetto-trace.sha256 b/base/tracing/test/data/event_latency_with_args.perfetto-trace.sha256
new file mode 100644
index 0000000..6b5afdc
--- /dev/null
+++ b/base/tracing/test/data/event_latency_with_args.perfetto-trace.sha256
@@ -0,0 +1 @@
+c3d28fa97b1fcb515cacccdcddcda8981f5f2c6d20df682c59bcfdd8762954c2
\ No newline at end of file
diff --git a/base/tracing/test/data/fling_with_input_delay.pftrace b/base/tracing/test/data/fling_with_input_delay.pftrace
new file mode 100644
index 0000000..f8d04e8
--- /dev/null
+++ b/base/tracing/test/data/fling_with_input_delay.pftrace
Binary files differ
diff --git a/base/tracing/test/data/fling_with_input_delay.pftrace.sha256 b/base/tracing/test/data/fling_with_input_delay.pftrace.sha256
new file mode 100644
index 0000000..c8ea1a2
--- /dev/null
+++ b/base/tracing/test/data/fling_with_input_delay.pftrace.sha256
@@ -0,0 +1 @@
+8c968b20e71481475a429399b4366bd796c527293218fe80789f9ed6ab9db5b4
\ No newline at end of file
diff --git a/base/tracing/test/data/long_task_tracking_trace b/base/tracing/test/data/long_task_tracking_trace
new file mode 100644
index 0000000..52e4a2d
--- /dev/null
+++ b/base/tracing/test/data/long_task_tracking_trace
Binary files differ
diff --git a/base/tracing/test/data/long_task_tracking_trace.sha256 b/base/tracing/test/data/long_task_tracking_trace.sha256
new file mode 100644
index 0000000..6651eee
--- /dev/null
+++ b/base/tracing/test/data/long_task_tracking_trace.sha256
@@ -0,0 +1 @@
+dbcb9f6baa0e89ea519b93aaee373e37a4b4e453a9d0f4ad8fef0279f25c33f2
\ No newline at end of file
diff --git a/base/tracing/test/data/scroll_offsets.pftrace b/base/tracing/test/data/scroll_offsets.pftrace
new file mode 100644
index 0000000..0863550
--- /dev/null
+++ b/base/tracing/test/data/scroll_offsets.pftrace
Binary files differ
diff --git a/base/tracing/test/data/scroll_offsets.pftrace.sha256 b/base/tracing/test/data/scroll_offsets.pftrace.sha256
new file mode 100644
index 0000000..5f6cbee
--- /dev/null
+++ b/base/tracing/test/data/scroll_offsets.pftrace.sha256
@@ -0,0 +1 @@
+62101edb5204fec8bea30124f65d4e49bda0808d7b036e95f89445aaad6d8d98
\ No newline at end of file
diff --git a/base/tracing/test/data/scrolling_with_blocked_nonblocked_frames.pftrace b/base/tracing/test/data/scrolling_with_blocked_nonblocked_frames.pftrace
new file mode 100644
index 0000000..8d9bb04
--- /dev/null
+++ b/base/tracing/test/data/scrolling_with_blocked_nonblocked_frames.pftrace
Binary files differ
diff --git a/base/tracing/test/data/scrolling_with_blocked_nonblocked_frames.pftrace.sha256 b/base/tracing/test/data/scrolling_with_blocked_nonblocked_frames.pftrace.sha256
new file mode 100644
index 0000000..b2459c7
--- /dev/null
+++ b/base/tracing/test/data/scrolling_with_blocked_nonblocked_frames.pftrace.sha256
@@ -0,0 +1 @@
+25f78adb06ccbff3bc0d5a80993370c22eadc6685d66b005f1a35268752f847f
\ No newline at end of file
diff --git a/base/tracing/test/run_perfetto_diff_tests.py b/base/tracing/test/run_perfetto_diff_tests.py
new file mode 100755
index 0000000..6bbd894
--- /dev/null
+++ b/base/tracing/test/run_perfetto_diff_tests.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env vpython3
+# Copyright 2023 The Chromium Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# A wrapper script for //third_party/perfetto/diff_test_trace_processor.py.
+
+import argparse
+import subprocess
+import sys
+import os
+import time
+import json
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--trace-descriptor', type=str, required=True)
+  parser.add_argument('--test-extensions-descriptor', type=str, required=True)
+  parser.add_argument('--metrics-descriptor', type=str, required=True)
+  parser.add_argument(
+    '--all-chrome-metrics-descriptor', type=str, required=True)
+  parser.add_argument(
+    '--chrome-track-event-descriptor', type=str, required=True)
+  parser.add_argument(
+      '--chrome-stdlib', type=str, required=True)
+  parser.add_argument('--test-dir', type=str, required=True)
+  parser.add_argument(
+      '--trace-processor-shell', type=str, required=True)
+  parser.add_argument("--name-filter", type=str, required=False)
+  parser.add_argument("--script", type=str, required=True)
+  args, _ = parser.parse_known_args()
+
+  cmd = [
+    "vpython3", args.script,
+    "--trace-descriptor", args.trace_descriptor,
+    "--test-extensions", args.test_extensions_descriptor,
+    "--metrics-descriptor", args.metrics_descriptor,
+                            args.all_chrome_metrics_descriptor,
+    "--chrome-track-event-descriptor", args.chrome_track_event_descriptor,
+    "--override-sql-module", os.path.abspath(args.chrome_stdlib),
+    "--test-dir", args.test_dir,
+    # TODO(b/301093584): This test fails with Chrome's trace_processor_shell
+    # most likely due to Chromium using a different version of sqlite.
+    # This name filter will be removed when fixed.
+    "--name-filter",
+    "(?=^((?!ChromeScrollJank:frame_times_metric).)*$)(?={})"
+      .format(args.name_filter),
+    args.trace_processor_shell,
+  ]
+
+  test_start_time = time.time()
+  completed_process = subprocess.run(cmd, capture_output=True)
+
+  sys.stderr.buffer.write(completed_process.stderr)
+  return completed_process.returncode
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/actual_power_by_combined_rail_mode.py b/base/tracing/test/trace_processor/diff_tests/chrome/actual_power_by_combined_rail_mode.py
old mode 100644
new mode 100755
index 1ade1c2..ecb3bf4
--- a/base/tracing/test/trace_processor/diff_tests/chrome/actual_power_by_combined_rail_mode.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/actual_power_by_combined_rail_mode.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/chrome_scroll_check.py b/base/tracing/test/trace_processor/diff_tests/chrome/chrome_scroll_check.py
old mode 100644
new mode 100755
index 1d38ac8..077a7f5
--- a/base/tracing/test/trace_processor/diff_tests/chrome/chrome_scroll_check.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/chrome_scroll_check.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/chrome_scroll_helper.py b/base/tracing/test/trace_processor/diff_tests/chrome/chrome_scroll_helper.py
old mode 100644
new mode 100755
index 831f43f..06a6993
--- a/base/tracing/test/trace_processor/diff_tests/chrome/chrome_scroll_helper.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/chrome_scroll_helper.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/chrome_speedometer_test.sql b/base/tracing/test/trace_processor/diff_tests/chrome/chrome_speedometer_test.sql
index 374d72e..c5e8787 100644
--- a/base/tracing/test/trace_processor/diff_tests/chrome/chrome_speedometer_test.sql
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/chrome_speedometer_test.sql
@@ -1,4 +1,8 @@
-SELECT IMPORT('chrome.speedometer');
+-- Copyright 2023 The Chromium Authors
+-- Use of this source code is governed by a BSD-style license that can be
+-- found in the LICENSE file.
+
+INCLUDE PERFETTO MODULE chrome.speedometer;
 
 SELECT
   iteration,
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/combined_rail_modes.py b/base/tracing/test/trace_processor/diff_tests/chrome/combined_rail_modes.py
old mode 100644
new mode 100755
index 6a659a3..f6c4ee5
--- a/base/tracing/test/trace_processor/diff_tests/chrome/combined_rail_modes.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/combined_rail_modes.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/cpu_time_by_combined_rail_mode.py b/base/tracing/test/trace_processor/diff_tests/chrome/cpu_time_by_combined_rail_mode.py
old mode 100644
new mode 100755
index f06992e..50fe7c4
--- a/base/tracing/test/trace_processor/diff_tests/chrome/cpu_time_by_combined_rail_mode.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/cpu_time_by_combined_rail_mode.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/estimated_power_by_combined_rail_mode.py b/base/tracing/test/trace_processor/diff_tests/chrome/estimated_power_by_combined_rail_mode.py
old mode 100644
new mode 100755
index af2891d..cddba03
--- a/base/tracing/test/trace_processor/diff_tests/chrome/estimated_power_by_combined_rail_mode.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/estimated_power_by_combined_rail_mode.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes.py b/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes.py
old mode 100644
new mode 100755
index 0343759..42de8ff
--- a/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_extra_long.py b/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_extra_long.py
old mode 100644
new mode 100755
index aabae72..f56fbcd
--- a/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_extra_long.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_extra_long.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_long.py b/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_long.py
old mode 100644
new mode 100755
index f23157c..7ec07a6
--- a/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_long.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_long.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_no_vsyncs.py b/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_no_vsyncs.py
old mode 100644
new mode 100755
index d377546..8a9be67
--- a/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_no_vsyncs.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_no_vsyncs.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_with_input.py b/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_with_input.py
old mode 100644
new mode 100755
index 8852c34..e1e6b99
--- a/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_with_input.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/modified_rail_modes_with_input.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_gpu_check.py b/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_gpu_check.py
old mode 100644
new mode 100755
index f73a202..9defcce
--- a/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_gpu_check.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_gpu_check.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_mojo_simple_watcher.py b/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_mojo_simple_watcher.py
old mode 100644
new mode 100755
index 3e66861..3919694
--- a/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_mojo_simple_watcher.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_mojo_simple_watcher.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_v3.out b/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_v3.out
index e539208..f3d91cd 100644
--- a/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_v3.out
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/scroll_jank_v3.out
@@ -1,4 +1,4 @@
 "cause_of_jank","sub_cause_of_jank","delay_since_last_frame","vsync_interval"
-"[NULL]","[NULL]",33.462000,16.368000
+"RendererCompositorQueueingDelay","[NULL]",33.462000,16.368000
 "RendererCompositorFinishedToBeginImplFrame","[NULL]",100.274000,16.368000
 "RendererCompositorQueueingDelay","[NULL]",33.404000,16.368000
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/tests.py b/base/tracing/test/trace_processor/diff_tests/chrome/tests.py
old mode 100644
new mode 100755
index dcf61ba..784af81
--- a/base/tracing/test/trace_processor/diff_tests/chrome/tests.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/tests.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
@@ -240,7 +241,7 @@
         trace=DataPath(
             'chrome_page_load_all_categories_not_extended.pftrace.gz'),
         query="""
-        SELECT IMPORT('chrome.tasks');
+        INCLUDE PERFETTO MODULE chrome.tasks;
 
         SELECT full_name as name, task_type, count() AS count
         FROM chrome_tasks
@@ -255,7 +256,7 @@
     return DiffTestBlueprint(
         trace=DataPath('top_level_java_choreographer_slices'),
         query="""
-        SELECT IMPORT('chrome.tasks');
+        INCLUDE PERFETTO MODULE chrome.tasks;
 
         SELECT
           full_name,
@@ -469,7 +470,7 @@
     return DiffTestBlueprint(
         trace=DataPath('chrome_custom_navigation_trace.gz'),
         query="""
-        SELECT IMPORT('chrome.tasks');
+        INCLUDE PERFETTO MODULE chrome.tasks;
 
         SELECT full_name, task_type, count() AS count
         FROM chrome_tasks
@@ -494,7 +495,7 @@
     return DiffTestBlueprint(
         trace=DataPath('chrome_5672_histograms.pftrace.gz'),
         query="""
-        SELECT IMPORT('chrome.histograms');
+        INCLUDE PERFETTO MODULE chrome.histograms;
 
         SELECT
           name,
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/tests_args.py b/base/tracing/test/trace_processor/diff_tests/chrome/tests_args.py
old mode 100644
new mode 100755
index 14e24df..29b268f
--- a/base/tracing/test/trace_processor/diff_tests/chrome/tests_args.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/tests_args.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/tests_memory_snapshots.py b/base/tracing/test/trace_processor/diff_tests/chrome/tests_memory_snapshots.py
old mode 100644
new mode 100755
index 809cb29..8337a11
--- a/base/tracing/test/trace_processor/diff_tests/chrome/tests_memory_snapshots.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/tests_memory_snapshots.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/tests_processes.py b/base/tracing/test/trace_processor/diff_tests/chrome/tests_processes.py
old mode 100644
new mode 100755
index f7374a5..2bbf29f
--- a/base/tracing/test/trace_processor/diff_tests/chrome/tests_processes.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/tests_processes.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
@@ -137,7 +138,7 @@
     return DiffTestBlueprint(
         trace=DataPath('chrome_scroll_without_vsync.pftrace'),
         query="""
-        SELECT upid, pid, reliable_from
+        SELECT pid, reliable_from
         FROM
           experimental_missing_chrome_processes
         JOIN
@@ -146,7 +147,7 @@
         ORDER BY upid;
         """,
         out=Csv("""
-        "upid","pid","reliable_from"
+        "pid","reliable_from"
         """))
 
   def test_chrome_missing_processes(self):
@@ -190,7 +191,7 @@
         }
         """),
         query="""
-        SELECT upid, pid, reliable_from
+        SELECT pid, reliable_from
         FROM
           experimental_missing_chrome_processes
         JOIN
@@ -199,9 +200,9 @@
         ORDER BY upid;
         """,
         out=Csv("""
-        "upid","pid","reliable_from"
-        2,100,1000000000
-        3,1000,"[NULL]"
+        "pid","reliable_from"
+        100,1000000000
+        1000,"[NULL]"
         """))
 
   def test_chrome_missing_processes_args(self):
@@ -245,7 +246,7 @@
         }
         """),
         query="""
-        SELECT arg_set_id, key, int_value
+        SELECT slice.name, key, int_value
         FROM
           slice
         JOIN
@@ -254,10 +255,10 @@
         ORDER BY arg_set_id, key;
         """,
         out=Csv("""
-        "arg_set_id","key","int_value"
-        2,"chrome_active_processes.pid[0]",10
-        2,"chrome_active_processes.pid[1]",100
-        2,"chrome_active_processes.pid[2]",1000
+        "name","key","int_value"
+        "ActiveProcesses","chrome_active_processes.pid[0]",10
+        "ActiveProcesses","chrome_active_processes.pid[1]",100
+        "ActiveProcesses","chrome_active_processes.pid[2]",1000
         """))
 
   def test_chrome_missing_processes_2(self):
@@ -301,7 +302,7 @@
         }
         """),
         query="""
-        SELECT upid, pid, reliable_from
+        SELECT pid, reliable_from
         FROM
           experimental_missing_chrome_processes
         JOIN
@@ -310,9 +311,9 @@
         ORDER BY upid;
         """,
         out=Csv("""
-        "upid","pid","reliable_from"
-        2,100,1000000000
-        3,1000,"[NULL]"
+        "pid","reliable_from"
+        100,1000000000
+        1000,"[NULL]"
         """))
 
   def test_chrome_missing_processes_extension_args(self):
@@ -356,7 +357,7 @@
         }
         """),
         query="""
-        SELECT arg_set_id, key, int_value
+        SELECT slice.name, key, int_value
         FROM
           slice
         JOIN
@@ -365,8 +366,8 @@
         ORDER BY arg_set_id, key;
         """,
         out=Csv("""
-        "arg_set_id","key","int_value"
-        2,"active_processes.pid[0]",10
-        2,"active_processes.pid[1]",100
-        2,"active_processes.pid[2]",1000
+        "name","key","int_value"
+        "ActiveProcesses","active_processes.pid[0]",10
+        "ActiveProcesses","active_processes.pid[1]",100
+        "ActiveProcesses","active_processes.pid[2]",1000
         """))
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/tests_rail_modes.py b/base/tracing/test/trace_processor/diff_tests/chrome/tests_rail_modes.py
old mode 100644
new mode 100755
index b49d346..672fa6e
--- a/base/tracing/test/trace_processor/diff_tests/chrome/tests_rail_modes.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/tests_rail_modes.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/tests_scroll_jank.py b/base/tracing/test/trace_processor/diff_tests/chrome/tests_scroll_jank.py
old mode 100644
new mode 100755
index 0fd7dcd..fb9c924
--- a/base/tracing/test/trace_processor/diff_tests/chrome/tests_scroll_jank.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/tests_scroll_jank.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
@@ -33,32 +34,11 @@
         """,
         out=Path('scroll_jank.out'))
 
-  def test_event_latency_to_breakdowns(self):
-    return DiffTestBlueprint(
-        trace=DataPath('event_latency_with_args.perfetto-trace'),
-        query="""
-        SELECT RUN_METRIC('chrome/event_latency_to_breakdowns.sql');
-
-        SELECT
-          event_latency_ts,
-          event_latency_dur,
-          event_type,
-          GenerationToRendererCompositorNs,
-          GenerationToBrowserMainNs,
-          BrowserMainToRendererCompositorNs,
-          RendererCompositorQueueingDelayNs,
-          unknown_stages_seen
-        FROM event_latency_to_breakdowns
-        ORDER BY event_latency_id
-        LIMIT 30;
-        """,
-        out=Path('event_latency_to_breakdowns.out'))
-
   def test_chrome_frames_with_missed_vsyncs(self):
     return DiffTestBlueprint(
         trace=DataPath('chrome_input_with_frame_view.pftrace'),
         query="""
-        SELECT RUN_METRIC('chrome/chrome_scroll_jank_v3.sql');
+        INCLUDE PERFETTO MODULE chrome.scroll_jank.scroll_jank_v3;
 
         SELECT
           cause_of_jank,
@@ -73,7 +53,7 @@
     return DiffTestBlueprint(
         trace=DataPath('chrome_input_with_frame_view.pftrace'),
         query="""
-        SELECT RUN_METRIC('chrome/chrome_scroll_jank_v3.sql');
+        INCLUDE PERFETTO MODULE chrome.scroll_jank.scroll_jank_v3;
 
         SELECT
           delayed_frame_percentage
@@ -81,53 +61,6 @@
         """,
         out=Path('scroll_jank_v3_percentage.out'))
 
-  def test_event_latency_scroll_jank(self):
-    return DiffTestBlueprint(
-        trace=DataPath('event_latency_with_args.perfetto-trace'),
-        query="""
-        SELECT RUN_METRIC('chrome/event_latency_scroll_jank.sql');
-
-        SELECT
-          jank,
-          next_jank,
-          prev_jank,
-          gesture_begin_ts,
-          gesture_end_ts,
-          ts,
-          dur,
-          event_type,
-          next_ts,
-          next_dur,
-          prev_ts,
-          prev_dur
-        FROM scroll_event_latency_jank
-        ORDER BY jank DESC
-        LIMIT 10;
-        """,
-        out=Path('event_latency_scroll_jank.out'))
-
-  def test_event_latency_scroll_jank_cause(self):
-    return DiffTestBlueprint(
-        trace=DataPath('event_latency_with_args.perfetto-trace'),
-        query="""
-        SELECT RUN_METRIC('chrome/event_latency_scroll_jank_cause.sql');
-
-        SELECT
-          dur,
-          ts,
-          event_type,
-          next_jank,
-          prev_jank,
-          next_delta_dur_ns,
-          prev_delta_dur_ns,
-          cause_of_jank,
-          max_delta_dur_ns,
-          sub_cause_of_jank
-        FROM event_latency_scroll_jank_cause
-        ORDER by ts;
-        """,
-        out=Path('event_latency_scroll_jank_cause.out'))
-
   def test_scroll_flow_event(self):
     return DiffTestBlueprint(
         trace=DataPath('chrome_scroll_without_vsync.pftrace'),
@@ -158,8 +91,8 @@
 
         SELECT
           -- Each trace_id (in our example trace not true in general) has 8
-          -- steps. There are 139 scrolls. So we expect 1112 rows in total 72 of
-          -- which are janky.
+          -- steps. There are 139 scrolls. So we expect 1112 rows in total 72
+          -- of which are janky.
           (
             SELECT
               COUNT(*)
@@ -184,35 +117,6 @@
         """,
         out=Path('scroll_flow_event_general_validation.out'))
 
-  def test_scroll_jank_cause(self):
-    return DiffTestBlueprint(
-        trace=DataPath('chrome_scroll_without_vsync.pftrace'),
-        query="""
-        SELECT RUN_METRIC('chrome/scroll_jank_cause.sql');
-
-        SELECT
-          COUNT(*) AS total,
-          SUM(jank) AS total_jank,
-          SUM(explained_jank + unexplained_jank)
-          AS sum_explained_and_unexplained,
-          SUM(
-            CASE WHEN explained_jank THEN
-              unexplained_jank
-              ELSE
-                CASE WHEN jank AND NOT unexplained_jank THEN
-                  1
-                  ELSE
-                    0
-                END
-            END
-          ) AS error_rows
-        FROM scroll_jank_cause;
-        """,
-        out=Csv("""
-        "total","total_jank","sum_explained_and_unexplained","error_rows"
-        139,7,7,0
-        """))
-
   def test_scroll_flow_event_queuing_delay(self):
     return DiffTestBlueprint(
         trace=DataPath('chrome_scroll_without_vsync.pftrace'),
@@ -391,7 +295,7 @@
         trace=DataPath('long_task_tracking_trace'),
         query="""
         SELECT
-          RUN_METRIC('chrome/chrome_long_tasks_delaying_input_processing.sql');
+        RUN_METRIC('chrome/chrome_long_tasks_delaying_input_processing.sql');
 
         SELECT
           full_name,
@@ -543,19 +447,19 @@
     return DiffTestBlueprint(
         trace=Path('chrome_scroll_check.py'),
         query="""
-        SELECT IMPORT('chrome.chrome_scrolls');
+        INCLUDE PERFETTO MODULE chrome.chrome_scrolls;
 
         SELECT
           id,
           ts,
           dur,
-          scroll_start_ts,
-          scroll_end_ts
+          gesture_scroll_begin_ts,
+          gesture_scroll_end_ts
         FROM chrome_scrolls
         ORDER by id;
         """,
         out=Csv("""
-        "id","ts","dur","scroll_start_ts","scroll_end_ts"
+        "id","ts","dur","gesture_scroll_begin_ts","gesture_scroll_end_ts"
         5678,0,55000000,0,45000000
         5679,60000000,40000000,60000000,90000000
         5680,80000000,30000000,80000000,100000000
@@ -566,7 +470,7 @@
     return DiffTestBlueprint(
         trace=Path('chrome_scroll_check.py'),
         query="""
-        SELECT IMPORT('chrome.chrome_scrolls');
+        INCLUDE PERFETTO MODULE chrome.chrome_scrolls;
 
         SELECT
           id,
@@ -582,76 +486,6 @@
         3,120000000,70000000
         """))
 
-  def test_chrome_scroll_jank_v2_with_sub_cause(self):
-    return DiffTestBlueprint(
-        trace=DataPath('event_latency_with_args.perfetto-trace'),
-        query=Metric('chrome_scroll_jank_v2'),
-        out=TextProto(r"""
-        [perfetto.protos.chrome_scroll_jank_v2] {
-          scroll_processing_ms: 12374.56
-          scroll_jank_processing_ms: 154.217
-          scroll_jank_percentage: 1.2462422906349802
-          num_scroll_janks: 4
-          scroll_jank_causes_and_durations {
-            cause: "SubmitCompositorFrameToPresentationCompositorFrame"
-            sub_cause: "BufferReadyToLatch"
-            duration_ms: 39.44
-          }
-          scroll_jank_causes_and_durations {
-            cause: "SubmitCompositorFrameToPresentationCompositorFrame"
-            sub_cause: "BufferReadyToLatch"
-            duration_ms: 35.485
-          }
-          scroll_jank_causes_and_durations {
-            cause: "SubmitCompositorFrameToPresentationCompositorFrame"
-            sub_cause: "BufferReadyToLatch"
-            duration_ms: 43.838
-          }
-          scroll_jank_causes_and_durations {
-            cause: "SubmitCompositorFrameToPresentationCompositorFrame"
-            sub_cause: "StartDrawToSwapStart"
-            duration_ms: 35.454
-          }
-        }
-        """))
-
-  def test_chrome_scroll_jank_v2_without_sub_cause(self):
-    return DiffTestBlueprint(
-        trace=DataPath('chrome_input_with_frame_view.pftrace'),
-        query=Metric('chrome_scroll_jank_v2'),
-        out=TextProto(r"""
-        [perfetto.protos.chrome_scroll_jank_v2] {
-          scroll_processing_ms: 14434.053
-          scroll_jank_processing_ms: 550.359
-          scroll_jank_percentage: 3.8129207368159173
-          num_scroll_janks: 6
-          scroll_jank_causes_and_durations {
-            cause: "BrowserMainToRendererCompositor"
-            duration_ms: 60.05
-          }
-          scroll_jank_causes_and_durations {
-            cause: "RendererCompositorFinishedToBeginImplFrame"
-            duration_ms: 131.289
-          }
-          scroll_jank_causes_and_durations {
-            cause: "RendererCompositorFinishedToBeginImplFrame"
-            duration_ms: 115.174
-          }
-          scroll_jank_causes_and_durations {
-            cause: "RendererCompositorFinishedToBeginImplFrame"
-            duration_ms: 99.18
-          }
-          scroll_jank_causes_and_durations {
-            cause: "RendererCompositorFinishedToBeginImplFrame"
-            duration_ms: 83.038
-          }
-          scroll_jank_causes_and_durations {
-            cause: "RendererCompositorFinishedToBeginImplFrame"
-            duration_ms: 61.628
-          }
-        }
-        """))
-
   def test_chrome_scroll_jank_v3(self):
     return DiffTestBlueprint(
         trace=DataPath('chrome_input_with_frame_view.pftrace'),
@@ -668,6 +502,7 @@
             scroll_jank_percentage: 1.9047619047619047
             max_delay_since_last_frame: 6.126221896383187
             scroll_jank_causes {
+              cause: "RendererCompositorQueueingDelay"
               delay_since_last_frame: 2.044354838709678
             }
             scroll_jank_causes {
@@ -687,3 +522,51 @@
           }
         }
         """))
+
+  def test_chrome_scroll_input_offsets(self):
+    return DiffTestBlueprint(
+        trace=DataPath('scroll_offsets.pftrace'),
+        query="""
+        SELECT IMPORT('chrome.scroll_jank.scroll_offsets');
+
+        SELECT
+          scroll_update_id,
+          ts,
+          delta_y,
+          offset_y
+        FROM chrome_scroll_input_offsets
+        ORDER by ts
+        LIMIT 5;
+        """,
+        out=Csv("""
+        "scroll_update_id","ts","delta_y","offset_y"
+        1983,4687296612739,-36.999939,-36.999939
+        1983,4687307175845,-39.000092,-76.000031
+        1987,4687313206739,-35.999969,-112.000000
+        1987,4687323152462,-35.000000,-147.000000
+        1991,4687329240739,-28.999969,-175.999969
+        """))
+
+  def test_chrome_presented_scroll_offsets(self):
+    return DiffTestBlueprint(
+        trace=DataPath('scroll_offsets.pftrace'),
+        query="""
+        SELECT IMPORT('chrome.scroll_jank.scroll_offsets');
+
+        SELECT
+          scroll_update_id,
+          ts,
+          delta_y,
+          offset_y
+        FROM chrome_presented_scroll_offsets
+        ORDER by ts
+        LIMIT 5;
+        """,
+        out=Csv("""
+        "scroll_update_id","ts","delta_y","offset_y"
+        1983,4687296612739,"[NULL]",0
+        1987,4687313206739,-50,-50
+        1991,4687329240739,-50,-100
+        1993,4687336155739,-81,-181
+        1996,4687346164739,-66,-247
+        """))
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/tests_touch_gesture.py b/base/tracing/test/trace_processor/diff_tests/chrome/tests_touch_gesture.py
old mode 100644
new mode 100755
index 2091c1f..47e1944
--- a/base/tracing/test/trace_processor/diff_tests/chrome/tests_touch_gesture.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/tests_touch_gesture.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/chrome/touch_jank.py b/base/tracing/test/trace_processor/diff_tests/chrome/touch_jank.py
old mode 100644
new mode 100755
index f991939..76b1edd
--- a/base/tracing/test/trace_processor/diff_tests/chrome/touch_jank.py
+++ b/base/tracing/test/trace_processor/diff_tests/chrome/touch_jank.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
diff --git a/base/tracing/test/trace_processor/diff_tests/include_index.py b/base/tracing/test/trace_processor/diff_tests/include_index.py
old mode 100644
new mode 100755
index aa9a993..fdd8290
--- a/base/tracing/test/trace_processor/diff_tests/include_index.py
+++ b/base/tracing/test/trace_processor/diff_tests/include_index.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2023 The Chromium Authors
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
@@ -6,8 +7,21 @@
 
 from python.generators.diff_tests import testing
 from chrome.tests import Chrome
+from chrome.tests_scroll_jank import ChromeScrollJank
+from chrome.tests_args import ChromeArgs
+from chrome.tests_memory_snapshots import ChromeMemorySnapshots
+from chrome.tests_processes import ChromeProcesses
+from chrome.tests_rail_modes import ChromeRailModes
+from chrome.tests_touch_gesture import ChromeTouchGesture
 
 def fetch_all_diff_tests(index_path: str) -> List['testing.TestCase']:
   return [
-      *Chrome(index_path, 'chrome', 'Chrome').fetch()
+      *ChromeScrollJank(index_path, 'chrome', 'ChromeScrollJank').fetch(),
+      *Chrome(index_path, 'chrome', 'Chrome').fetch(),
+      *ChromeArgs(index_path, 'chrome', 'ChromeArgs').fetch(),
+      *ChromeMemorySnapshots(index_path, 'chrome', 'ChromeMemorySnapshots')
+              .fetch(),
+      *ChromeProcesses(index_path, 'chrome', 'ChromeProcesses').fetch(),
+      *ChromeRailModes(index_path, 'chrome', 'ChromeRailModes').fetch(),
+      *ChromeTouchGesture(index_path, 'chrome', 'ChromeTouchGesture').fetch(),
       ]
diff --git a/base/tracing/test/trace_processor/diff_tests/track_event/track_event_counters.textproto b/base/tracing/test/trace_processor/diff_tests/track_event/track_event_counters.textproto
new file mode 100644
index 0000000..6fe3b59
--- /dev/null
+++ b/base/tracing/test/trace_processor/diff_tests/track_event/track_event_counters.textproto
@@ -0,0 +1,335 @@
+# Sequence 1 defaults to track for "t1" and extra_counter_values for "c1".
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 0
+  incremental_state_cleared: true
+  track_descriptor {
+    uuid: 1
+    parent_uuid: 3
+    thread {
+      pid: 5
+      tid: 1
+      thread_name: "t1"
+    }
+  }
+  trace_packet_defaults {
+    track_event_defaults {
+      track_uuid: 1
+      extra_counter_track_uuids: 10  # Counter "c1", defined below.
+    }
+  }
+}
+
+# Process track for the thread.
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 0
+  track_descriptor {
+    uuid: 3
+    process {
+      pid: 5
+      process_name: "Browser"
+    }
+    chrome_process {}
+  }
+}
+
+# Counter track "c1", a thread-scoped counter for "t1".
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 0
+  track_descriptor {
+    uuid: 10
+    parent_uuid: 1
+    counter {
+      type: 1                # COUNTER_THREAD_TIME_NS.
+      unit_multiplier: 1000  # provided in ys.
+      is_incremental: true   # use delta encoding.
+    }
+  }
+}
+
+# Sequence 2 has no defaults. Define a new global counter "MySizeCounter".
+packet {
+  trusted_packet_sequence_id: 2
+  timestamp: 0
+  incremental_state_cleared: true
+  track_descriptor {
+    uuid: 11
+    name: "MySizeCounter"
+    counter {
+      unit: 3  # UNIT_SIZE_BYTES.
+    }
+  }
+}
+
+# Should appear on default track "t1" with extra_counter_values for "c1".
+packet {
+  trusted_packet_sequence_id: 1
+  sequence_flags: 2  # SEQ_NEEDS_INCREMENTAL_STATE
+  timestamp: 1000
+  track_event {
+    categories: "cat"
+    name: "event1_on_t1"
+    type: 1                     # TYPE_SLICE_BEGIN.
+    extra_counter_values: 1000  # First value, so effectively absolute.
+  }
+}
+
+# End for event above.
+packet {
+  trusted_packet_sequence_id: 1
+  sequence_flags: 2  # SEQ_NEEDS_INCREMENTAL_STATE
+  timestamp: 1100
+  track_event {
+    type: 2                   # TYPE_SLICE_END.
+    extra_counter_values: 10  # Absolute: 1010.
+  }
+}
+
+# Resetting incremental state on sequence 1 will restart counter at 0.
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 2000
+  incremental_state_cleared: true
+  track_descriptor {
+    uuid: 1
+    parent_uuid: 3
+    thread {
+      pid: 5
+      tid: 1
+      thread_name: "t1"
+    }
+  }
+  trace_packet_defaults {
+    track_event_defaults {
+      track_uuid: 1
+      extra_counter_track_uuids: 10  # Counter "c1", defined below.
+    }
+  }
+}
+
+# Reemit process track for the thread.
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 2000
+  track_descriptor {
+    uuid: 3
+    process {
+      pid: 5
+      process_name: "Browser"
+    }
+  }
+}
+
+# Reemit counter descriptor, too.
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 2000
+  track_descriptor {
+    uuid: 10
+    parent_uuid: 1
+    counter {
+      type: 1                # COUNTER_THREAD_TIME_NS.
+      unit_multiplier: 1000  # provided in us.
+      is_incremental: true   # use delta encoding.
+    }
+  }
+}
+
+# Should appear on default track "t1" with extra_counter_values for "c1".
+packet {
+  trusted_packet_sequence_id: 1
+  sequence_flags: 2  # SEQ_NEEDS_INCREMENTAL_STATE
+  timestamp: 2000
+  track_event {
+    categories: "cat"
+    name: "event2_on_t1"
+    type: 1                     # TYPE_SLICE_BEGIN.
+    extra_counter_values: 2000  # First value after reset, so absolute.
+  }
+}
+
+# Nested value that happens to be emitted at the same timestamp but different
+# thread time value.
+packet {
+  trusted_packet_sequence_id: 1
+  sequence_flags: 2  # SEQ_NEEDS_INCREMENTAL_STATE
+  timestamp: 2000
+  track_event {
+    categories: "cat"
+    name: "event3_on_t1"
+    type: 1                     # TYPE_SLICE_BEGIN.
+    extra_counter_values: 10    # Absolute: 2010
+  }
+}
+
+# End for event above.
+packet {
+  trusted_packet_sequence_id: 1
+  sequence_flags: 2  # SEQ_NEEDS_INCREMENTAL_STATE
+  timestamp: 2200
+  track_event {
+    type: 2                   # TYPE_SLICE_END.
+    extra_counter_values: 10  # Absolute: 2020.
+  }
+}
+
+# End for event for "event2_on_t1".
+packet {
+  trusted_packet_sequence_id: 1
+  sequence_flags: 2  # SEQ_NEEDS_INCREMENTAL_STATE
+  timestamp: 2200
+  track_event {
+    type: 2                   # TYPE_SLICE_END.
+    extra_counter_values: 10  # Absolute: 2030.
+  }
+}
+
+# Counter type event for "MySizeCounter" on sequence 1.
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 3000
+  track_event {
+    track_uuid: 11       # "MySizeCounter".
+    type: 4              # TYPE_COUNTER.
+    counter_value: 1024  # Absolute.
+  }
+}
+
+# Counter type event for "MySizeCounter" on sequence 2.
+packet {
+  trusted_packet_sequence_id: 2
+  timestamp: 3100
+  track_event {
+    track_uuid: 11       # "MySizeCounter".
+    type: 4              # TYPE_COUNTER.
+    counter_value: 2048  # Absolute.
+  }
+}
+
+# Override the default extra_counter_values.
+packet {
+  trusted_packet_sequence_id: 1
+  sequence_flags: 2  # SEQ_NEEDS_INCREMENTAL_STATE
+  timestamp: 4000
+  track_event {
+    categories: "cat"
+    name: "event4_on_t1"
+    type: 3                        # TYPE_INSTANT.
+    extra_counter_track_uuids: 10  # "c1".
+    extra_counter_track_uuids: 11  # "MySizeCounter".
+    extra_counter_values: 10       # Absolute: 2040.
+    extra_counter_values: 1024     # Absolute: 2040.
+  }
+}
+
+# Sequence 3 defaults to track for "t4" and uses legacy thread time and
+# instruction count.
+packet {
+  trusted_packet_sequence_id: 3
+  timestamp: 0
+  incremental_state_cleared: true
+  track_descriptor {
+    uuid: 4
+    parent_uuid: 3
+    thread {
+      pid: 5
+      tid: 4
+      thread_name: "t4"
+    }
+  }
+  trace_packet_defaults {
+    track_event_defaults {
+      track_uuid: 4
+    }
+  }
+}
+
+packet {
+  trusted_packet_sequence_id: 3
+  timestamp: 4000
+  track_event {
+    categories: "cat"
+    name: "event1_on_t3"
+    type: 1                        # TYPE_SLICE_BEGIN.
+    thread_time_absolute_us: 10
+    thread_instruction_count_absolute: 20
+  }
+}
+
+packet {
+  trusted_packet_sequence_id: 3
+  timestamp: 4100
+  track_event {
+    categories: "cat"
+    name: "event1_on_t3"
+    type: 2                        # TYPE_SLICE_END.
+    thread_time_absolute_us: 15
+    thread_instruction_count_absolute: 25
+  }
+}
+
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 4200
+  incremental_state_cleared: true
+  track_descriptor {
+    uuid: 12
+    name: "MyDoubleCounter"
+    counter {
+    }
+  }
+}
+
+# Floating point counter value.
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 4200
+  track_event {
+    track_uuid: 12                   # "MyDoubleCounter".
+    type: 4                          # TYPE_COUNTER.
+    double_counter_value: 3.1415926  # Floating point.
+  }
+}
+
+# Floating point extra counter value.
+packet {
+  trusted_packet_sequence_id: 1
+  sequence_flags: 2  # SEQ_NEEDS_INCREMENTAL_STATE
+  timestamp: 4300
+  track_event {
+    categories: "cat"
+    name: "float_counter_on_t1"
+    type: 3                               # TYPE_INSTANT.
+    extra_double_counter_track_uuids: 12  # "MyDoubleCounter".
+    extra_double_counter_values: 0.5
+  }
+}
+
+# Floating point extra counter value with sequence defaults (used together with
+# an integer counter).
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 4400
+  trace_packet_defaults {
+    track_event_defaults {
+      track_uuid: 1
+      extra_counter_track_uuids: 11         # "MySizeCounter"
+      extra_double_counter_track_uuids: 12  # "MyDoubleCounter"
+    }
+  }
+}
+
+packet {
+  trusted_packet_sequence_id: 1
+  sequence_flags: 2  # SEQ_NEEDS_INCREMENTAL_STATE
+  timestamp: 4500
+  track_event {
+    categories: "cat"
+    name: "float_counter_on_t1"
+    type: 3                        # TYPE_INSTANT.
+    extra_counter_values: 4096
+    extra_double_counter_values: 2.71828
+  }
+}
\ No newline at end of file
diff --git a/base/traits_bag.h b/base/traits_bag.h
index b8f9d9d..cbf9d95 100644
--- a/base/traits_bag.h
+++ b/base/traits_bag.h
@@ -111,7 +111,7 @@
 template <class TraitFilterType,
           class ArgType,
           class CheckArgumentIsCompatible = std::enable_if_t<
-              std::is_constructible<TraitFilterType, ArgType>::value>>
+              std::is_constructible_v<TraitFilterType, ArgType>>>
 constexpr TraitFilterType GetTraitFromArg(CallFirstTag, ArgType arg) {
   return TraitFilterType(arg);
 }
@@ -127,8 +127,8 @@
 // disambiguation tag.
 template <class TraitFilterType,
           class... ArgTypes,
-          class TestCompatibleArgument = std::enable_if_t<any_of(
-              {std::is_constructible<TraitFilterType, ArgTypes>::value...})>>
+          class TestCompatibleArgument = std::enable_if_t<
+              any_of({std::is_constructible_v<TraitFilterType, ArgTypes>...})>>
 constexpr TraitFilterType GetTraitFromArgListImpl(CallFirstTag,
                                                   ArgTypes... args) {
   return std::get<TraitFilterType>(std::make_tuple(
@@ -138,7 +138,7 @@
 template <class TraitFilterType, class... ArgTypes>
 constexpr TraitFilterType GetTraitFromArgListImpl(CallSecondTag,
                                                   ArgTypes... args) {
-  static_assert(std::is_constructible<TraitFilterType>::value,
+  static_assert(std::is_constructible_v<TraitFilterType>,
                 "The traits bag is missing a required trait.");
   return TraitFilterType();
 }
@@ -151,8 +151,7 @@
 constexpr typename TraitFilterType::ValueType GetTraitFromArgList(
     ArgTypes... args) {
   static_assert(
-      count({std::is_constructible<TraitFilterType, ArgTypes>::value...},
-            true) <= 1,
+      count({std::is_constructible_v<TraitFilterType, ArgTypes>...}, true) <= 1,
       "The traits bag contains multiple traits of the same type.");
   return GetTraitFromArgListImpl<TraitFilterType>(CallFirstTag(), args...);
 }
@@ -239,9 +238,8 @@
 // Helper to make checking for the presence of a trait more readable.
 template <typename Trait, typename... Args>
 struct HasTrait : ParameterPack<Args...>::template HasType<Trait> {
-  static_assert(
-      count({std::is_constructible<Trait, Args>::value...}, true) <= 1,
-      "The traits bag contains multiple traits of the same type.");
+  static_assert(count({std::is_constructible_v<Trait, Args>...}, true) <= 1,
+                "The traits bag contains multiple traits of the same type.");
 };
 
 // If you need a template vararg constructor to delegate to a private
diff --git a/base/traits_bag_unittest.cc b/base/traits_bag_unittest.cc
index 4d141fe..b4b11c2 100644
--- a/base/traits_bag_unittest.cc
+++ b/base/traits_bag_unittest.cc
@@ -180,28 +180,25 @@
 
 TEST(TraitsBagTest, Filtering) {
   using Predicate = Exclude<ExampleTrait, EnumTraitA>;
-  static_assert(
-      std::is_same<ExampleTrait2,
-                   decltype(Predicate::Filter(ExampleTrait2{}))>::value,
-      "ExampleTrait2 should not be filtered");
+  static_assert(std::is_same_v<ExampleTrait2,
+                               decltype(Predicate::Filter(ExampleTrait2{}))>,
+                "ExampleTrait2 should not be filtered");
 
   static_assert(
-      std::is_same<EmptyTrait,
-                   decltype(Predicate::Filter(ExampleTrait{}))>::value,
+      std::is_same_v<EmptyTrait, decltype(Predicate::Filter(ExampleTrait{}))>,
       "ExampleTrait should be filtered");
 
-  static_assert(std::is_same<EmptyTrait,
-                             decltype(Predicate::Filter(EnumTraitA::A))>::value,
-                "EnumTraitA should be filtered");
+  static_assert(
+      std::is_same_v<EmptyTrait, decltype(Predicate::Filter(EnumTraitA::A))>,
+      "EnumTraitA should be filtered");
 
   static_assert(
-      std::is_same<EnumTraitB,
-                   decltype(Predicate::Filter(EnumTraitB::TWO))>::value,
+      std::is_same_v<EnumTraitB, decltype(Predicate::Filter(EnumTraitB::TWO))>,
       "EnumTraitB should not be filtered");
 
-  static_assert(std::is_same<EmptyTrait,
-                             decltype(Predicate::Filter(EmptyTrait{}))>::value,
-                "EmptyTrait should not be filtered");
+  static_assert(
+      std::is_same_v<EmptyTrait, decltype(Predicate::Filter(EmptyTrait{}))>,
+      "EmptyTrait should not be filtered");
 }
 
 TEST(TraitsBagTest, FilteredTestTraits) {
diff --git a/base/tuple.h b/base/tuple.h
index 525f4f7..44d992b 100644
--- a/base/tuple.h
+++ b/base/tuple.h
@@ -56,7 +56,7 @@
 inline void DispatchToMethod(const ObjT& obj,
                              Method method,
                              Tuple&& args) {
-  constexpr size_t size = std::tuple_size<std::decay_t<Tuple>>::value;
+  constexpr size_t size = std::tuple_size_v<std::decay_t<Tuple>>;
   DispatchToMethodImpl(obj, method, std::forward<Tuple>(args),
                        std::make_index_sequence<size>());
 }
@@ -72,7 +72,7 @@
 
 template <typename Function, typename Tuple>
 inline void DispatchToFunction(Function function, Tuple&& args) {
-  constexpr size_t size = std::tuple_size<std::decay_t<Tuple>>::value;
+  constexpr size_t size = std::tuple_size_v<std::decay_t<Tuple>>;
   DispatchToFunctionImpl(function, std::forward<Tuple>(args),
                          std::make_index_sequence<size>());
 }
@@ -100,8 +100,8 @@
                              Method method,
                              InTuple&& in,
                              OutTuple* out) {
-  constexpr size_t in_size = std::tuple_size<std::decay_t<InTuple>>::value;
-  constexpr size_t out_size = std::tuple_size<OutTuple>::value;
+  constexpr size_t in_size = std::tuple_size_v<std::decay_t<InTuple>>;
+  constexpr size_t out_size = std::tuple_size_v<OutTuple>;
   DispatchToMethodImpl(obj, method, std::forward<InTuple>(in), out,
                        std::make_index_sequence<in_size>(),
                        std::make_index_sequence<out_size>());
diff --git a/base/types/DEPS b/base/types/DEPS
new file mode 100644
index 0000000..2d16b79
--- /dev/null
+++ b/base/types/DEPS
@@ -0,0 +1,6 @@
+specific_include_rules = {
+  # Provides the canonical access point for this type
+  "fixed_array.h": [
+    "+third_party/abseil-cpp/absl/container/fixed_array.h",
+  ],
+}
diff --git a/base/types/cxx23_to_underlying_unittest.cc b/base/types/cxx23_to_underlying_unittest.cc
index 1cb1970..0c03950 100644
--- a/base/types/cxx23_to_underlying_unittest.cc
+++ b/base/types/cxx23_to_underlying_unittest.cc
@@ -18,15 +18,15 @@
     kTwo = 2,
   };
 
-  static_assert(std::is_same<decltype(to_underlying(kOne)), int>::value, "");
-  static_assert(std::is_same<decltype(to_underlying(kTwo)), int>::value, "");
+  static_assert(std::is_same_v<decltype(to_underlying(kOne)), int>, "");
+  static_assert(std::is_same_v<decltype(to_underlying(kTwo)), int>, "");
   static_assert(to_underlying(kOne) == 1, "");
   static_assert(to_underlying(kTwo) == 2, "");
 
-  static_assert(
-      std::is_same<decltype(to_underlying(ScopedEnum::kOne)), char>::value, "");
-  static_assert(
-      std::is_same<decltype(to_underlying(ScopedEnum::kTwo)), char>::value, "");
+  static_assert(std::is_same_v<decltype(to_underlying(ScopedEnum::kOne)), char>,
+                "");
+  static_assert(std::is_same_v<decltype(to_underlying(ScopedEnum::kTwo)), char>,
+                "");
   static_assert(to_underlying(ScopedEnum::kOne) == 1, "");
   static_assert(to_underlying(ScopedEnum::kTwo) == 2, "");
 }
diff --git a/base/types/expected_unittest.cc b/base/types/expected_unittest.cc
index f21bcd0..1f8ebeb 100644
--- a/base/types/expected_unittest.cc
+++ b/base/types/expected_unittest.cc
@@ -871,7 +871,7 @@
   EXPECT_NE(unexpected(123), ExInt(123));
 }
 
-TEST(ExpectedTest, DeathTests) {
+TEST(ExpectedDeathTest, UseAfterMove) {
   using ExpectedInt = expected<int, int>;
   using ExpectedDouble = expected<double, double>;
 
diff --git a/base/types/fixed_array.h b/base/types/fixed_array.h
new file mode 100644
index 0000000..1ca3956
--- /dev/null
+++ b/base/types/fixed_array.h
@@ -0,0 +1,50 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TYPES_FIXED_ARRAY_H_
+#define BASE_TYPES_FIXED_ARRAY_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <type_traits>
+
+#include "third_party/abseil-cpp/absl/container/fixed_array.h"
+
+namespace base {
+
+// `FixedArray` provides `absl::FixedArray` in Chromium, but when `T` is
+// trivially-default-constructible, forces the no-default-value constructor to
+// initialize the elements to `T()`, instead of leaving them uninitialized. This
+// makes `base::FixedArray` behave like `std::vector` instead of `std::array`
+// and avoids the risk of UB.
+
+// Trivially-default-constructible case: no-value constructor should init
+template <typename T,
+          size_t N = absl::kFixedArrayUseDefault,
+          typename A = std::allocator<T>,
+          typename = void>
+class FixedArray : public absl::FixedArray<T, N, A> {
+ public:
+  using absl::FixedArray<T, N, A>::FixedArray;
+  explicit FixedArray(absl::FixedArray<T, N, A>::size_type n,
+                      const absl::FixedArray<T, N, A>::allocator_type& a =
+                          typename absl::FixedArray<T, N, A>::allocator_type())
+      : FixedArray(n, T(), a) {}
+};
+
+// Non-trivially-default-constructible case: Pass through all constructors
+template <typename T, size_t N, typename A>
+struct FixedArray<
+    T,
+    N,
+    A,
+    std::enable_if_t<!std::is_trivially_default_constructible_v<T>>>
+    : public absl::FixedArray<T, N, A> {
+  using absl::FixedArray<T, N, A>::FixedArray;
+};
+
+}  // namespace base
+
+#endif  // BASE_TYPES_FIXED_ARRAY_H_
diff --git a/base/types/fixed_array_unittest.cc b/base/types/fixed_array_unittest.cc
new file mode 100644
index 0000000..4d0b5f2
--- /dev/null
+++ b/base/types/fixed_array_unittest.cc
@@ -0,0 +1,42 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/types/fixed_array.h"
+
+#include <stddef.h>
+
+#include <cstring>
+#include <memory>
+#include <type_traits>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TEST(FixedArrayTest, TriviallyDefaultConstructibleInitializes) {
+  using T = int;
+  static_assert(std::is_trivially_default_constructible_v<T>);
+  using Array = FixedArray<T, 1>;
+
+  // First try an array on the stack.
+  Array stack_array(1);
+  // This read and the one below are UB if `FixedArray` does not initialize the
+  // elements, but hopefully even if the compiler chooses to zero memory anyway,
+  // the test will fail under the memory sanitizer.
+  EXPECT_EQ(0, stack_array[0]);
+
+  // Now try an array on the heap, where we've purposefully written a non-zero
+  // bitpattern in hopes of increasing the chance of catching incorrect
+  // behavior.
+  constexpr size_t kSize = sizeof(Array);
+  alignas(Array) char storage[kSize];
+  std::memset(storage, 0xAA, kSize);
+  Array* placement_new_array = new (storage) Array(1);
+  EXPECT_EQ(0, (*placement_new_array)[0]);
+  placement_new_array->~Array();
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/types/id_type.h b/base/types/id_type.h
index e179f09..63e7c6c 100644
--- a/base/types/id_type.h
+++ b/base/types/id_type.h
@@ -8,6 +8,7 @@
 #include <cstdint>
 #include <type_traits>
 
+#include "base/ranges/algorithm.h"
 #include "base/types/strong_alias.h"
 
 namespace base {
@@ -32,7 +33,7 @@
 //   IdType32<T> / IdTypeU32<T>: Signed / unsigned 32-bit IDs
 //   IdType64<T> / IdTypeU64<T>: Signed / unsigned 64-bit IDs
 //   IdType<>: For when you need a different underlying type or
-//             a default/null value other than zero.
+//             default/null values other than zero.
 //
 // IdType32<Foo> behaves just like an int32_t in the following aspects:
 // - it can be used as a key in std::map;
@@ -48,25 +49,37 @@
 // - it restricts the set of available operations (e.g. no multiplication);
 // - it default-constructs to a null value and allows checking against the null
 //   value via is_null method.
+// - optionally it may have additional values that are all considered null.
 template <typename TypeMarker,
           typename WrappedType,
           WrappedType kInvalidValue,
-          WrappedType kFirstGeneratedId = kInvalidValue + 1>
+          WrappedType kFirstGeneratedId = kInvalidValue + 1,
+          WrappedType... kExtraInvalidValues>
 class IdType : public StrongAlias<TypeMarker, WrappedType> {
  public:
-  static_assert(
-      std::is_unsigned<WrappedType>::value || kInvalidValue <= 0,
-      "If signed, the invalid value should be negative or equal to zero to "
-      "avoid overflow issues.");
+  static constexpr WrappedType kAllInvalidValues[] = {kInvalidValue,
+                                                      kExtraInvalidValues...};
 
-  static_assert(kFirstGeneratedId != kInvalidValue,
+  static_assert(std::is_unsigned_v<WrappedType> ||
+                    base::ranges::all_of(kAllInvalidValues,
+                                         [](WrappedType v) { return v <= 0; }),
+                "If signed, invalid values should be negative or equal to zero "
+                "to avoid overflow issues.");
+
+  static_assert(base::ranges::all_of(kAllInvalidValues,
+                                     [](WrappedType v) {
+                                       return kFirstGeneratedId != v;
+                                     }),
                 "The first generated ID cannot be invalid.");
 
-  static_assert(std::is_unsigned<WrappedType>::value ||
-                    kFirstGeneratedId > kInvalidValue,
-                "If signed, the first generated ID must be greater than the "
-                "invalid value so that the monotonically increasing "
-                "GenerateNextId method will never return the invalid value.");
+  static_assert(std::is_unsigned_v<WrappedType> ||
+                    base::ranges::all_of(kAllInvalidValues,
+                                         [](WrappedType v) {
+                                           return kFirstGeneratedId > v;
+                                         }),
+                "If signed, the first generated ID must be greater than all "
+                "invalid values so that the monotonically increasing "
+                "GenerateNextId method will never return an invalid value.");
 
   using StrongAlias<TypeMarker, WrappedType>::StrongAlias;
 
@@ -91,7 +104,12 @@
   constexpr IdType()
       : StrongAlias<TypeMarker, WrappedType>::StrongAlias(kInvalidValue) {}
 
-  constexpr bool is_null() const { return this->value() == kInvalidValue; }
+  constexpr bool is_null() const {
+    return base::ranges::any_of(kAllInvalidValues, [this](WrappedType value) {
+      return this->value() == value;
+    });
+  }
+
   constexpr explicit operator bool() const { return !is_null(); }
 
   // TODO(mpawlowski) Replace these with constructor/value() getter. The
diff --git a/base/types/id_type_unittest.cc b/base/types/id_type_unittest.cc
index 7be350b..636d280 100644
--- a/base/types/id_type_unittest.cc
+++ b/base/types/id_type_unittest.cc
@@ -14,16 +14,30 @@
 class Foo;
 using FooId = IdType<Foo, int, 0>;
 
+// A type that uses both 0 and -1 as invalid values.
+using MultipleInvalidId = IdType<class MultipleInvalid, int, 0, 1, -1>;
+
 }  // namespace
 
 TEST(IdType, DefaultValueIsInvalid) {
   FooId foo_id;
   EXPECT_TRUE(foo_id.is_null());
+
+  MultipleInvalidId multi_id;
+  EXPECT_TRUE(multi_id.is_null());
 }
 
 TEST(IdType, NormalValueIsValid) {
   FooId foo_id = FooId::FromUnsafeValue(123);
   EXPECT_FALSE(foo_id.is_null());
+
+  MultipleInvalidId multi_id = MultipleInvalidId::FromUnsafeValue(123);
+  EXPECT_FALSE(multi_id.is_null());
+}
+
+TEST(IdType, ExtraInvalidValue) {
+  MultipleInvalidId multi_id = MultipleInvalidId::FromUnsafeValue(-1);
+  EXPECT_TRUE(multi_id.is_null());
 }
 
 TEST(IdType, Generator) {
@@ -73,13 +87,23 @@
   static_assert(kZero.GetUnsafeValue() == 0, "");
   static_assert(kOne.GetUnsafeValue() == 1, "");
 
+  static constexpr MultipleInvalidId kMultiZero;
+  static constexpr auto kMultiNegative = MultipleInvalidId::FromUnsafeValue(-1);
+  static constexpr auto kMultiOne = MultipleInvalidId::FromUnsafeValue(1);
+
   // Test is_null().
   static_assert(kZero.is_null(), "");
   static_assert(!kOne.is_null(), "");
+  static_assert(kMultiZero.is_null(), "");
+  static_assert(kMultiNegative.is_null(), "");
+  static_assert(!kMultiOne.is_null(), "");
 
   // Test operator bool.
   static_assert(!kZero, "");
   static_assert(kOne, "");
+  static_assert(!kMultiZero, "");
+  static_assert(!kMultiNegative, "");
+  static_assert(kMultiOne, "");
 }
 
 class IdTypeSpecificValueTest : public ::testing::TestWithParam<int> {
diff --git a/base/types/optional_ref.h b/base/types/optional_ref.h
index 1d69f35..ce26f8a 100644
--- a/base/types/optional_ref.h
+++ b/base/types/optional_ref.h
@@ -6,6 +6,7 @@
 #define BASE_TYPES_OPTIONAL_REF_H_
 
 #include <memory>
+#include <optional>
 #include <type_traits>
 
 #include "base/check.h"
@@ -200,6 +201,16 @@
 template <typename T>
 optional_ref(T*) -> optional_ref<T>;
 
+template <typename T>
+constexpr bool operator==(std::nullopt_t, optional_ref<T> x) {
+  return !x.has_value();
+}
+
+template <typename T>
+constexpr bool operator==(optional_ref<T> x, std::nullopt_t) {
+  return !x.has_value();
+}
+
 }  // namespace base
 
 #endif  // BASE_TYPES_OPTIONAL_REF_H_
diff --git a/base/types/optional_ref_unittest.cc b/base/types/optional_ref_unittest.cc
index 2cfd7c1..07ef2c7 100644
--- a/base/types/optional_ref_unittest.cc
+++ b/base/types/optional_ref_unittest.cc
@@ -5,6 +5,7 @@
 #include "base/types/optional_ref.h"
 
 #include <cstddef>
+#include <optional>
 #include <type_traits>
 #include <utility>
 
@@ -379,6 +380,21 @@
   EXPECT_EQ(6, o2);
 }
 
+TEST(OptionalRefTest, EqualityComparisonWithNullOpt) {
+  {
+    optional_ref<int> r;
+    EXPECT_EQ(r, std::nullopt);
+    EXPECT_EQ(std::nullopt, r);
+  }
+
+  {
+    int value = 5;
+    optional_ref<int> r(value);
+    EXPECT_NE(r, std::nullopt);
+    EXPECT_NE(std::nullopt, r);
+  }
+}
+
 TEST(OptionalRefDeathTest, ArrowOnEmpty) {
   [](optional_ref<const TestClass> r) {
     EXPECT_CHECK_DEATH(r->ConstMethod());
diff --git a/base/types/optional_unittest.cc b/base/types/optional_unittest.cc
index f890129..6aac2c7 100644
--- a/base/types/optional_unittest.cc
+++ b/base/types/optional_unittest.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include <memory>
+#include <optional>
 #include <set>
 #include <string>
 #include <type_traits>
@@ -12,8 +13,6 @@
 #include "base/test/gtest_util.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/abseil-cpp/absl/types/optional.h"
-#include "third_party/abseil-cpp/absl/utility/utility.h"
 
 using ::testing::ElementsAre;
 
@@ -21,7 +20,7 @@
 
 namespace {
 
-// Object used to test complex object with absl::optional<T> in addition of the
+// Object used to test complex object with std::optional<T> in addition of the
 // move semantics.
 class TestObject {
  public:
@@ -205,34 +204,34 @@
 
 }  // anonymous namespace
 
-static_assert(std::is_trivially_destructible<absl::optional<int>>::value,
+static_assert(std::is_trivially_destructible_v<std::optional<int>>,
               "OptionalIsTriviallyDestructible");
 
-static_assert(!std::is_trivially_destructible<
-                  absl::optional<NonTriviallyDestructible>>::value,
-              "OptionalIsTriviallyDestructible");
+static_assert(
+    !std::is_trivially_destructible_v<std::optional<NonTriviallyDestructible>>,
+    "OptionalIsTriviallyDestructible");
 
 TEST(OptionalTest, DefaultConstructor) {
   {
-    constexpr absl::optional<float> o;
+    constexpr std::optional<float> o;
     EXPECT_FALSE(o);
   }
 
   {
-    absl::optional<std::string> o;
+    std::optional<std::string> o;
     EXPECT_FALSE(o);
   }
 
   {
-    absl::optional<TestObject> o;
+    std::optional<TestObject> o;
     EXPECT_FALSE(o);
   }
 }
 
 TEST(OptionalTest, CopyConstructor) {
   {
-    constexpr absl::optional<float> first(0.1f);
-    constexpr absl::optional<float> other(first);
+    constexpr std::optional<float> first(0.1f);
+    constexpr std::optional<float> other(first);
 
     EXPECT_TRUE(other);
     EXPECT_EQ(other.value(), 0.1f);
@@ -240,8 +239,8 @@
   }
 
   {
-    absl::optional<std::string> first("foo");
-    absl::optional<std::string> other(first);
+    std::optional<std::string> first("foo");
+    std::optional<std::string> other(first);
 
     EXPECT_TRUE(other);
     EXPECT_EQ(other.value(), "foo");
@@ -249,8 +248,8 @@
   }
 
   {
-    const absl::optional<std::string> first("foo");
-    absl::optional<std::string> other(first);
+    const std::optional<std::string> first("foo");
+    std::optional<std::string> other(first);
 
     EXPECT_TRUE(other);
     EXPECT_EQ(other.value(), "foo");
@@ -258,8 +257,8 @@
   }
 
   {
-    absl::optional<TestObject> first(TestObject(3, 0.1));
-    absl::optional<TestObject> other(first);
+    std::optional<TestObject> first(TestObject(3, 0.1));
+    std::optional<TestObject> other(first);
 
     EXPECT_TRUE(!!other);
     EXPECT_TRUE(other.value() == TestObject(3, 0.1));
@@ -270,7 +269,7 @@
 TEST(OptionalTest, ValueConstructor) {
   {
     constexpr float value = 0.1f;
-    constexpr absl::optional<float> o(value);
+    constexpr std::optional<float> o(value);
 
     EXPECT_TRUE(o);
     EXPECT_EQ(value, o.value());
@@ -278,7 +277,7 @@
 
   {
     std::string value("foo");
-    absl::optional<std::string> o(value);
+    std::optional<std::string> o(value);
 
     EXPECT_TRUE(o);
     EXPECT_EQ(value, o.value());
@@ -286,7 +285,7 @@
 
   {
     TestObject value(3, 0.1);
-    absl::optional<TestObject> o(value);
+    std::optional<TestObject> o(value);
 
     EXPECT_TRUE(o);
     EXPECT_EQ(TestObject::State::COPY_CONSTRUCTED, o->state());
@@ -295,9 +294,10 @@
 }
 
 TEST(OptionalTest, MoveConstructor) {
+  // NOLINTBEGIN(bugprone-use-after-move)
   {
-    constexpr absl::optional<float> first(0.1f);
-    constexpr absl::optional<float> second(std::move(first));
+    constexpr std::optional<float> first(0.1f);
+    constexpr std::optional<float> second(std::move(first));
 
     EXPECT_TRUE(second.has_value());
     EXPECT_EQ(second.value(), 0.1f);
@@ -306,8 +306,8 @@
   }
 
   {
-    absl::optional<std::string> first("foo");
-    absl::optional<std::string> second(std::move(first));
+    std::optional<std::string> first("foo");
+    std::optional<std::string> second(std::move(first));
 
     EXPECT_TRUE(second.has_value());
     EXPECT_EQ("foo", second.value());
@@ -316,8 +316,8 @@
   }
 
   {
-    absl::optional<TestObject> first(TestObject(3, 0.1));
-    absl::optional<TestObject> second(std::move(first));
+    std::optional<TestObject> first(TestObject(3, 0.1));
+    std::optional<TestObject> second(std::move(first));
 
     EXPECT_TRUE(second.has_value());
     EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, second->state());
@@ -330,8 +330,8 @@
   // Even if copy constructor is deleted, move constructor needs to work.
   // Note that it couldn't be constexpr.
   {
-    absl::optional<DeletedCopy> first(absl::in_place, 42);
-    absl::optional<DeletedCopy> second(std::move(first));
+    std::optional<DeletedCopy> first(std::in_place, 42);
+    std::optional<DeletedCopy> second(std::move(first));
 
     EXPECT_TRUE(second.has_value());
     EXPECT_EQ(42, second->foo());
@@ -340,8 +340,8 @@
   }
 
   {
-    absl::optional<DeletedMove> first(absl::in_place, 42);
-    absl::optional<DeletedMove> second(std::move(first));
+    std::optional<DeletedMove> first(std::in_place, 42);
+    std::optional<DeletedMove> second(std::move(first));
 
     EXPECT_TRUE(second.has_value());
     EXPECT_EQ(42, second->foo());
@@ -350,9 +350,9 @@
   }
 
   {
-    absl::optional<NonTriviallyDestructibleDeletedCopyConstructor> first(
-        absl::in_place, 42);
-    absl::optional<NonTriviallyDestructibleDeletedCopyConstructor> second(
+    std::optional<NonTriviallyDestructibleDeletedCopyConstructor> first(
+        std::in_place, 42);
+    std::optional<NonTriviallyDestructibleDeletedCopyConstructor> second(
         std::move(first));
 
     EXPECT_TRUE(second.has_value());
@@ -360,12 +360,13 @@
 
     EXPECT_TRUE(first.has_value());
   }
+  // NOLINTEND(bugprone-use-after-move)
 }
 
 TEST(OptionalTest, MoveValueConstructor) {
   {
     constexpr float value = 0.1f;
-    constexpr absl::optional<float> o(std::move(value));
+    constexpr std::optional<float> o(std::move(value));
 
     EXPECT_TRUE(o);
     EXPECT_EQ(0.1f, o.value());
@@ -373,7 +374,7 @@
 
   {
     float value = 0.1f;
-    absl::optional<float> o(std::move(value));
+    std::optional<float> o(std::move(value));
 
     EXPECT_TRUE(o);
     EXPECT_EQ(0.1f, o.value());
@@ -381,7 +382,7 @@
 
   {
     std::string value("foo");
-    absl::optional<std::string> o(std::move(value));
+    std::optional<std::string> o(std::move(value));
 
     EXPECT_TRUE(o);
     EXPECT_EQ("foo", o.value());
@@ -389,7 +390,7 @@
 
   {
     TestObject value(3, 0.1);
-    absl::optional<TestObject> o(std::move(value));
+    std::optional<TestObject> o(std::move(value));
 
     EXPECT_TRUE(o);
     EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, o->state());
@@ -399,26 +400,26 @@
 
 TEST(OptionalTest, ConvertingCopyConstructor) {
   {
-    absl::optional<int> first(1);
-    absl::optional<double> second(first);
+    std::optional<int> first(1);
+    std::optional<double> second(first);
     EXPECT_TRUE(second.has_value());
     EXPECT_EQ(1.0, second.value());
   }
 
   // Make sure explicit is not marked for convertible case.
-  { [[maybe_unused]] absl::optional<int> o(1); }
+  { [[maybe_unused]] std::optional<int> o(1); }
 }
 
 TEST(OptionalTest, ConvertingMoveConstructor) {
   {
-    absl::optional<int> first(1);
-    absl::optional<double> second(std::move(first));
+    std::optional<int> first(1);
+    std::optional<double> second(std::move(first));
     EXPECT_TRUE(second.has_value());
     EXPECT_EQ(1.0, second.value());
   }
 
   // Make sure explicit is not marked for convertible case.
-  { [[maybe_unused]] absl::optional<int> o(1); }
+  { [[maybe_unused]] std::optional<int> o(1); }
 
   {
     class Test1 {
@@ -443,8 +444,8 @@
       double bar_;
     };
 
-    absl::optional<Test1> first(absl::in_place, 42);
-    absl::optional<Test2> second(std::move(first));
+    std::optional<Test1> first(std::in_place, 42);
+    std::optional<Test2> second(std::move(first));
     EXPECT_TRUE(second.has_value());
     EXPECT_EQ(42.0, second->bar());
   }
@@ -452,25 +453,25 @@
 
 TEST(OptionalTest, ConstructorForwardArguments) {
   {
-    constexpr absl::optional<float> a(absl::in_place, 0.1f);
+    constexpr std::optional<float> a(std::in_place, 0.1f);
     EXPECT_TRUE(a);
     EXPECT_EQ(0.1f, a.value());
   }
 
   {
-    absl::optional<float> a(absl::in_place, 0.1f);
+    std::optional<float> a(std::in_place, 0.1f);
     EXPECT_TRUE(a);
     EXPECT_EQ(0.1f, a.value());
   }
 
   {
-    absl::optional<std::string> a(absl::in_place, "foo");
+    std::optional<std::string> a(std::in_place, "foo");
     EXPECT_TRUE(a);
     EXPECT_EQ("foo", a.value());
   }
 
   {
-    absl::optional<TestObject> a(absl::in_place, 0, 0.1);
+    std::optional<TestObject> a(std::in_place, 0, 0.1);
     EXPECT_TRUE(!!a);
     EXPECT_TRUE(TestObject(0, 0.1) == a.value());
   }
@@ -478,15 +479,15 @@
 
 TEST(OptionalTest, ConstructorForwardInitListAndArguments) {
   {
-    absl::optional<std::vector<int>> opt(absl::in_place, {3, 1});
+    std::optional<std::vector<int>> opt(std::in_place, {3, 1});
     EXPECT_TRUE(opt);
     EXPECT_THAT(*opt, ElementsAre(3, 1));
     EXPECT_EQ(2u, opt->size());
   }
 
   {
-    absl::optional<std::vector<int>> opt(absl::in_place, {3, 1},
-                                         std::allocator<int>());
+    std::optional<std::vector<int>> opt(std::in_place, {3, 1},
+                                        std::allocator<int>());
     EXPECT_TRUE(opt);
     EXPECT_THAT(*opt, ElementsAre(3, 1));
     EXPECT_EQ(2u, opt->size());
@@ -495,7 +496,7 @@
 
 TEST(OptionalTest, ForwardConstructor) {
   {
-    absl::optional<double> a(1);
+    std::optional<double> a(1);
     EXPECT_TRUE(a.has_value());
     EXPECT_EQ(1.0, a.value());
   }
@@ -508,16 +509,16 @@
       bool c;
     };
 
-    absl::optional<TestData> a({1, 2.0, true});
+    std::optional<TestData> a({1, 2.0, true});
     EXPECT_TRUE(a.has_value());
     EXPECT_EQ(1, a->a);
     EXPECT_EQ(2.0, a->b);
     EXPECT_TRUE(a->c);
   }
 
-  // If T has a constructor with a param absl::optional<U>, and another ctor
-  // with a param U, then T(absl::optional<U>) should be used for
-  // absl::optional<T>(absl::optional<U>) constructor.
+  // If T has a constructor with a param std::optional<U>, and another ctor
+  // with a param U, then T(std::optional<U>) should be used for
+  // std::optional<T>(std::optional<U>) constructor.
   {
     enum class ParamType {
       DEFAULT_CONSTRUCTED,
@@ -532,8 +533,8 @@
       Test(const Test& param) : param_type(ParamType::COPY_CONSTRUCTED) {}
       Test(Test&& param) : param_type(ParamType::MOVE_CONSTRUCTED) {}
       explicit Test(int param) : param_type(ParamType::INT) {}
-      explicit Test(absl::in_place_t param) : param_type(ParamType::IN_PLACE) {}
-      explicit Test(absl::optional<int> param)
+      explicit Test(std::in_place_t param) : param_type(ParamType::IN_PLACE) {}
+      explicit Test(std::optional<int> param)
           : param_type(ParamType::OPTIONAL_INT) {}
 
       ParamType param_type;
@@ -541,20 +542,20 @@
 
     // Overload resolution with copy-conversion constructor.
     {
-      const absl::optional<int> arg(absl::in_place, 1);
-      absl::optional<Test> testee(arg);
+      const std::optional<int> arg(std::in_place, 1);
+      std::optional<Test> testee(arg);
       EXPECT_EQ(ParamType::OPTIONAL_INT, testee->param_type);
     }
 
     // Overload resolution with move conversion constructor.
     {
-      absl::optional<Test> testee(absl::optional<int>(absl::in_place, 1));
+      std::optional<Test> testee(std::optional<int>(std::in_place, 1));
       EXPECT_EQ(ParamType::OPTIONAL_INT, testee->param_type);
     }
 
     // Default constructor should be used.
     {
-      absl::optional<Test> testee(absl::in_place);
+      std::optional<Test> testee(std::in_place);
       EXPECT_EQ(ParamType::DEFAULT_CONSTRUCTED, testee->param_type);
     }
   }
@@ -564,63 +565,63 @@
       Test(int a) {}  // NOLINT(runtime/explicit)
     };
     // If T is convertible from U, it is not marked as explicit.
-    static_assert(std::is_convertible<int, Test>::value,
+    static_assert(std::is_convertible_v<int, Test>,
                   "Int should be convertible to Test.");
-    ([](absl::optional<Test> param) {})(1);
+    ([](std::optional<Test> param) {})(1);
   }
 }
 
 TEST(OptionalTest, NulloptConstructor) {
-  constexpr absl::optional<int> a(absl::nullopt);
+  constexpr std::optional<int> a(std::nullopt);
   EXPECT_FALSE(a);
 }
 
 TEST(OptionalTest, AssignValue) {
   {
-    absl::optional<float> a;
+    std::optional<float> a;
     EXPECT_FALSE(a);
     a = 0.1f;
     EXPECT_TRUE(a);
 
-    absl::optional<float> b(0.1f);
+    std::optional<float> b(0.1f);
     EXPECT_TRUE(a == b);
   }
 
   {
-    absl::optional<std::string> a;
+    std::optional<std::string> a;
     EXPECT_FALSE(a);
     a = std::string("foo");
     EXPECT_TRUE(a);
 
-    absl::optional<std::string> b(std::string("foo"));
+    std::optional<std::string> b(std::string("foo"));
     EXPECT_EQ(a, b);
   }
 
   {
-    absl::optional<TestObject> a;
+    std::optional<TestObject> a;
     EXPECT_FALSE(!!a);
     a = TestObject(3, 0.1);
     EXPECT_TRUE(!!a);
 
-    absl::optional<TestObject> b(TestObject(3, 0.1));
+    std::optional<TestObject> b(TestObject(3, 0.1));
     EXPECT_TRUE(a == b);
   }
 
   {
-    absl::optional<TestObject> a = TestObject(4, 1.0);
+    std::optional<TestObject> a = TestObject(4, 1.0);
     EXPECT_TRUE(!!a);
     a = TestObject(3, 0.1);
     EXPECT_TRUE(!!a);
 
-    absl::optional<TestObject> b(TestObject(3, 0.1));
+    std::optional<TestObject> b(TestObject(3, 0.1));
     EXPECT_TRUE(a == b);
   }
 }
 
 TEST(OptionalTest, AssignObject) {
   {
-    absl::optional<float> a;
-    absl::optional<float> b(0.1f);
+    std::optional<float> a;
+    std::optional<float> b(0.1f);
     a = b;
 
     EXPECT_TRUE(a);
@@ -629,8 +630,8 @@
   }
 
   {
-    absl::optional<std::string> a;
-    absl::optional<std::string> b("foo");
+    std::optional<std::string> a;
+    std::optional<std::string> b("foo");
     a = b;
 
     EXPECT_TRUE(a);
@@ -639,8 +640,8 @@
   }
 
   {
-    absl::optional<TestObject> a;
-    absl::optional<TestObject> b(TestObject(3, 0.1));
+    std::optional<TestObject> a;
+    std::optional<TestObject> b(TestObject(3, 0.1));
     a = b;
 
     EXPECT_TRUE(!!a);
@@ -649,8 +650,8 @@
   }
 
   {
-    absl::optional<TestObject> a(TestObject(4, 1.0));
-    absl::optional<TestObject> b(TestObject(3, 0.1));
+    std::optional<TestObject> a(TestObject(4, 1.0));
+    std::optional<TestObject> b(TestObject(3, 0.1));
     a = b;
 
     EXPECT_TRUE(!!a);
@@ -659,8 +660,8 @@
   }
 
   {
-    absl::optional<DeletedMove> a(absl::in_place, 42);
-    absl::optional<DeletedMove> b;
+    std::optional<DeletedMove> a(std::in_place, 42);
+    std::optional<DeletedMove> b;
     b = a;
 
     EXPECT_TRUE(!!a);
@@ -669,8 +670,8 @@
   }
 
   {
-    absl::optional<DeletedMove> a(absl::in_place, 42);
-    absl::optional<DeletedMove> b(absl::in_place, 1);
+    std::optional<DeletedMove> a(std::in_place, 42);
+    std::optional<DeletedMove> b(std::in_place, 1);
     b = a;
 
     EXPECT_TRUE(!!a);
@@ -680,8 +681,8 @@
 
   // Converting assignment.
   {
-    absl::optional<int> a(absl::in_place, 1);
-    absl::optional<double> b;
+    std::optional<int> a(std::in_place, 1);
+    std::optional<double> b;
     b = a;
 
     EXPECT_TRUE(!!a);
@@ -691,8 +692,8 @@
   }
 
   {
-    absl::optional<int> a(absl::in_place, 42);
-    absl::optional<double> b(absl::in_place, 1);
+    std::optional<int> a(std::in_place, 42);
+    std::optional<double> b(std::in_place, 1);
     b = a;
 
     EXPECT_TRUE(!!a);
@@ -702,8 +703,8 @@
   }
 
   {
-    absl::optional<int> a;
-    absl::optional<double> b(absl::in_place, 1);
+    std::optional<int> a;
+    std::optional<double> b(std::in_place, 1);
     b = a;
     EXPECT_FALSE(!!a);
     EXPECT_FALSE(!!b);
@@ -711,9 +712,10 @@
 }
 
 TEST(OptionalTest, AssignObject_rvalue) {
+  // NOLINTBEGIN(bugprone-use-after-move)
   {
-    absl::optional<float> a;
-    absl::optional<float> b(0.1f);
+    std::optional<float> a;
+    std::optional<float> b(0.1f);
     a = std::move(b);
 
     EXPECT_TRUE(a);
@@ -722,8 +724,8 @@
   }
 
   {
-    absl::optional<std::string> a;
-    absl::optional<std::string> b("foo");
+    std::optional<std::string> a;
+    std::optional<std::string> b("foo");
     a = std::move(b);
 
     EXPECT_TRUE(a);
@@ -732,8 +734,8 @@
   }
 
   {
-    absl::optional<TestObject> a;
-    absl::optional<TestObject> b(TestObject(3, 0.1));
+    std::optional<TestObject> a;
+    std::optional<TestObject> b(TestObject(3, 0.1));
     a = std::move(b);
 
     EXPECT_TRUE(!!a);
@@ -745,8 +747,8 @@
   }
 
   {
-    absl::optional<TestObject> a(TestObject(4, 1.0));
-    absl::optional<TestObject> b(TestObject(3, 0.1));
+    std::optional<TestObject> a(TestObject(4, 1.0));
+    std::optional<TestObject> b(TestObject(3, 0.1));
     a = std::move(b);
 
     EXPECT_TRUE(!!a);
@@ -758,8 +760,8 @@
   }
 
   {
-    absl::optional<DeletedMove> a(absl::in_place, 42);
-    absl::optional<DeletedMove> b;
+    std::optional<DeletedMove> a(std::in_place, 42);
+    std::optional<DeletedMove> b;
     b = std::move(a);
 
     EXPECT_TRUE(!!a);
@@ -768,8 +770,8 @@
   }
 
   {
-    absl::optional<DeletedMove> a(absl::in_place, 42);
-    absl::optional<DeletedMove> b(absl::in_place, 1);
+    std::optional<DeletedMove> a(std::in_place, 42);
+    std::optional<DeletedMove> b(std::in_place, 1);
     b = std::move(a);
 
     EXPECT_TRUE(!!a);
@@ -779,8 +781,8 @@
 
   // Converting assignment.
   {
-    absl::optional<int> a(absl::in_place, 1);
-    absl::optional<double> b;
+    std::optional<int> a(std::in_place, 1);
+    std::optional<double> b;
     b = std::move(a);
 
     EXPECT_TRUE(!!a);
@@ -789,8 +791,8 @@
   }
 
   {
-    absl::optional<int> a(absl::in_place, 42);
-    absl::optional<double> b(absl::in_place, 1);
+    std::optional<int> a(std::in_place, 42);
+    std::optional<double> b(std::in_place, 1);
     b = std::move(a);
 
     EXPECT_TRUE(!!a);
@@ -799,37 +801,38 @@
   }
 
   {
-    absl::optional<int> a;
-    absl::optional<double> b(absl::in_place, 1);
+    std::optional<int> a;
+    std::optional<double> b(std::in_place, 1);
     b = std::move(a);
 
     EXPECT_FALSE(!!a);
     EXPECT_FALSE(!!b);
   }
+  // NOLINTEND(bugprone-use-after-move)
 }
 
 TEST(OptionalTest, AssignNull) {
   {
-    absl::optional<float> a(0.1f);
-    absl::optional<float> b(0.2f);
-    a = absl::nullopt;
-    b = absl::nullopt;
+    std::optional<float> a(0.1f);
+    std::optional<float> b(0.2f);
+    a = std::nullopt;
+    b = std::nullopt;
     EXPECT_EQ(a, b);
   }
 
   {
-    absl::optional<std::string> a("foo");
-    absl::optional<std::string> b("bar");
-    a = absl::nullopt;
-    b = absl::nullopt;
+    std::optional<std::string> a("foo");
+    std::optional<std::string> b("bar");
+    a = std::nullopt;
+    b = std::nullopt;
     EXPECT_EQ(a, b);
   }
 
   {
-    absl::optional<TestObject> a(TestObject(3, 0.1));
-    absl::optional<TestObject> b(TestObject(4, 1.0));
-    a = absl::nullopt;
-    b = absl::nullopt;
+    std::optional<TestObject> a(TestObject(3, 0.1));
+    std::optional<TestObject> b(TestObject(4, 1.0));
+    a = std::nullopt;
+    b = std::nullopt;
     EXPECT_TRUE(a == b);
   }
 }
@@ -843,7 +846,7 @@
     State state = State::CONSTRUCTED;
   };
 
-  // Here, absl::optional<Test2> can be assigned from absl::optional<Test1>.  In
+  // Here, std::optional<Test2> can be assigned from std::optional<Test1>.  In
   // case of move, marks MOVED to Test1 instance.
   struct Test2 {
     enum class State {
@@ -874,8 +877,8 @@
   };
 
   {
-    absl::optional<Test1> a(absl::in_place);
-    absl::optional<Test2> b;
+    std::optional<Test1> a(std::in_place);
+    std::optional<Test2> b;
 
     b = a;
     EXPECT_TRUE(!!a);
@@ -885,8 +888,8 @@
   }
 
   {
-    absl::optional<Test1> a(absl::in_place);
-    absl::optional<Test2> b(absl::in_place);
+    std::optional<Test1> a(std::in_place);
+    std::optional<Test2> b(std::in_place);
 
     b = a;
     EXPECT_TRUE(!!a);
@@ -896,8 +899,8 @@
   }
 
   {
-    absl::optional<Test1> a(absl::in_place);
-    absl::optional<Test2> b;
+    std::optional<Test1> a(std::in_place);
+    std::optional<Test2> b;
 
     b = std::move(a);
     EXPECT_TRUE(!!a);
@@ -907,8 +910,8 @@
   }
 
   {
-    absl::optional<Test1> a(absl::in_place);
-    absl::optional<Test2> b(absl::in_place);
+    std::optional<Test1> a(std::in_place);
+    std::optional<Test2> b(std::in_place);
 
     b = std::move(a);
     EXPECT_TRUE(!!a);
@@ -918,10 +921,10 @@
   }
 
   // Similar to Test2, but Test3 also has copy/move ctor and assign operators
-  // from absl::optional<Test1>, too. In this case, for a = b where a is
-  // absl::optional<Test3> and b is absl::optional<Test1>,
-  // absl::optional<T>::operator=(U&&) where U is absl::optional<Test1> should
-  // be used rather than absl::optional<T>::operator=(absl::optional<U>&&) where
+  // from std::optional<Test1>, too. In this case, for a = b where a is
+  // std::optional<Test3> and b is std::optional<Test1>,
+  // std::optional<T>::operator=(U&&) where U is std::optional<Test1> should
+  // be used rather than std::optional<T>::operator=(std::optional<U>&&) where
   // U is Test1.
   struct Test3 {
     enum class State {
@@ -942,9 +945,9 @@
     explicit Test3(Test1&& test1) : state(State::MOVE_CONSTRUCTED_FROM_TEST1) {
       test1.state = Test1::State::MOVED;
     }
-    explicit Test3(const absl::optional<Test1>& test1)
+    explicit Test3(const std::optional<Test1>& test1)
         : state(State::COPY_CONSTRUCTED_FROM_OPTIONAL_TEST1) {}
-    explicit Test3(absl::optional<Test1>&& test1)
+    explicit Test3(std::optional<Test1>&& test1)
         : state(State::MOVE_CONSTRUCTED_FROM_OPTIONAL_TEST1) {
       // In the following senarios, given |test1| should always have value.
       DCHECK(test1.has_value());
@@ -959,11 +962,11 @@
       test1.state = Test1::State::MOVED;
       return *this;
     }
-    Test3& operator=(const absl::optional<Test1>& test1) {
+    Test3& operator=(const std::optional<Test1>& test1) {
       state = State::COPY_ASSIGNED_FROM_OPTIONAL_TEST1;
       return *this;
     }
-    Test3& operator=(absl::optional<Test1>&& test1) {
+    Test3& operator=(std::optional<Test1>&& test1) {
       state = State::MOVE_ASSIGNED_FROM_OPTIONAL_TEST1;
       // In the following senarios, given |test1| should always have value.
       DCHECK(test1.has_value());
@@ -975,8 +978,8 @@
   };
 
   {
-    absl::optional<Test1> a(absl::in_place);
-    absl::optional<Test3> b;
+    std::optional<Test1> a(std::in_place);
+    std::optional<Test3> b;
 
     b = a;
     EXPECT_TRUE(!!a);
@@ -986,8 +989,8 @@
   }
 
   {
-    absl::optional<Test1> a(absl::in_place);
-    absl::optional<Test3> b(absl::in_place);
+    std::optional<Test1> a(std::in_place);
+    std::optional<Test3> b(std::in_place);
 
     b = a;
     EXPECT_TRUE(!!a);
@@ -997,8 +1000,8 @@
   }
 
   {
-    absl::optional<Test1> a(absl::in_place);
-    absl::optional<Test3> b;
+    std::optional<Test1> a(std::in_place);
+    std::optional<Test3> b;
 
     b = std::move(a);
     EXPECT_TRUE(!!a);
@@ -1008,8 +1011,8 @@
   }
 
   {
-    absl::optional<Test1> a(absl::in_place);
-    absl::optional<Test3> b(absl::in_place);
+    std::optional<Test1> a(std::in_place);
+    std::optional<Test3> b(std::in_place);
 
     b = std::move(a);
     EXPECT_TRUE(!!a);
@@ -1021,89 +1024,89 @@
 
 TEST(OptionalTest, OperatorStar) {
   {
-    absl::optional<float> a(0.1f);
+    std::optional<float> a(0.1f);
     EXPECT_EQ(a.value(), *a);
   }
 
   {
-    absl::optional<std::string> a("foo");
+    std::optional<std::string> a("foo");
     EXPECT_EQ(a.value(), *a);
   }
 
   {
-    absl::optional<TestObject> a(TestObject(3, 0.1));
+    std::optional<TestObject> a(TestObject(3, 0.1));
     EXPECT_EQ(a.value(), *a);
   }
 }
 
 TEST(OptionalTest, OperatorStar_rvalue) {
-  EXPECT_EQ(0.1f, *absl::optional<float>(0.1f));
-  EXPECT_EQ(std::string("foo"), *absl::optional<std::string>("foo"));
+  EXPECT_EQ(0.1f, *std::optional<float>(0.1f));
+  EXPECT_EQ(std::string("foo"), *std::optional<std::string>("foo"));
   EXPECT_TRUE(TestObject(3, 0.1) ==
-              *absl::optional<TestObject>(TestObject(3, 0.1)));
+              *std::optional<TestObject>(TestObject(3, 0.1)));
 }
 
 TEST(OptionalTest, OperatorArrow) {
-  absl::optional<TestObject> a(TestObject(3, 0.1));
+  std::optional<TestObject> a(TestObject(3, 0.1));
   EXPECT_EQ(a->foo(), 3);
 }
 
 TEST(OptionalTest, Value_rvalue) {
-  EXPECT_EQ(0.1f, absl::optional<float>(0.1f).value());
-  EXPECT_EQ(std::string("foo"), absl::optional<std::string>("foo").value());
+  EXPECT_EQ(0.1f, std::optional<float>(0.1f).value());
+  EXPECT_EQ(std::string("foo"), std::optional<std::string>("foo").value());
   EXPECT_TRUE(TestObject(3, 0.1) ==
-              absl::optional<TestObject>(TestObject(3, 0.1)).value());
+              std::optional<TestObject>(TestObject(3, 0.1)).value());
 }
 
 TEST(OptionalTest, ValueOr) {
   {
-    absl::optional<float> a;
+    std::optional<float> a;
     EXPECT_EQ(0.0f, a.value_or(0.0f));
 
     a = 0.1f;
     EXPECT_EQ(0.1f, a.value_or(0.0f));
 
-    a = absl::nullopt;
+    a = std::nullopt;
     EXPECT_EQ(0.0f, a.value_or(0.0f));
   }
 
   // value_or() can be constexpr.
   {
-    constexpr absl::optional<int> a(absl::in_place, 1);
+    constexpr std::optional<int> a(std::in_place, 1);
     constexpr int value = a.value_or(10);
     EXPECT_EQ(1, value);
   }
   {
-    constexpr absl::optional<int> a;
+    constexpr std::optional<int> a;
     constexpr int value = a.value_or(10);
     EXPECT_EQ(10, value);
   }
 
   {
-    absl::optional<std::string> a;
+    std::optional<std::string> a;
     EXPECT_EQ("bar", a.value_or("bar"));
 
     a = std::string("foo");
     EXPECT_EQ(std::string("foo"), a.value_or("bar"));
 
-    a = absl::nullopt;
+    a = std::nullopt;
     EXPECT_EQ(std::string("bar"), a.value_or("bar"));
   }
 
   {
-    absl::optional<TestObject> a;
+    std::optional<TestObject> a;
     EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(1, 0.3));
 
     a = TestObject(3, 0.1);
     EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(3, 0.1));
 
-    a = absl::nullopt;
+    a = std::nullopt;
     EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(1, 0.3));
   }
 }
 
 TEST(OptionalTest, Swap_bothNoValue) {
-  absl::optional<TestObject> a, b;
+  std::optional<TestObject> a, b;
   a.swap(b);
 
   EXPECT_FALSE(a);
@@ -1113,8 +1116,8 @@
 }
 
 TEST(OptionalTest, Swap_inHasValue) {
-  absl::optional<TestObject> a(TestObject(1, 0.3));
-  absl::optional<TestObject> b;
+  std::optional<TestObject> a(TestObject(1, 0.3));
+  std::optional<TestObject> b;
   a.swap(b);
 
   EXPECT_FALSE(a);
@@ -1125,8 +1128,8 @@
 }
 
 TEST(OptionalTest, Swap_outHasValue) {
-  absl::optional<TestObject> a;
-  absl::optional<TestObject> b(TestObject(1, 0.3));
+  std::optional<TestObject> a;
+  std::optional<TestObject> b(TestObject(1, 0.3));
   a.swap(b);
 
   EXPECT_TRUE(!!a);
@@ -1136,8 +1139,8 @@
 }
 
 TEST(OptionalTest, Swap_bothValue) {
-  absl::optional<TestObject> a(TestObject(0, 0.1));
-  absl::optional<TestObject> b(TestObject(1, 0.3));
+  std::optional<TestObject> a(TestObject(0, 0.1));
+  std::optional<TestObject> b(TestObject(1, 0.3));
   a.swap(b);
 
   EXPECT_TRUE(!!a);
@@ -1150,7 +1153,7 @@
 
 TEST(OptionalTest, Emplace) {
   {
-    absl::optional<float> a(0.1f);
+    std::optional<float> a(0.1f);
     EXPECT_EQ(0.3f, a.emplace(0.3f));
 
     EXPECT_TRUE(a);
@@ -1158,7 +1161,7 @@
   }
 
   {
-    absl::optional<std::string> a("foo");
+    std::optional<std::string> a("foo");
     EXPECT_EQ("bar", a.emplace("bar"));
 
     EXPECT_TRUE(a);
@@ -1166,7 +1169,7 @@
   }
 
   {
-    absl::optional<TestObject> a(TestObject(0, 0.1));
+    std::optional<TestObject> a(TestObject(0, 0.1));
     EXPECT_EQ(TestObject(1, 0.2), a.emplace(TestObject(1, 0.2)));
 
     EXPECT_TRUE(!!a);
@@ -1174,18 +1177,18 @@
   }
 
   {
-    absl::optional<std::vector<int>> a;
+    std::optional<std::vector<int>> a;
     auto& ref = a.emplace({2, 3});
-    static_assert(std::is_same<std::vector<int>&, decltype(ref)>::value, "");
+    static_assert(std::is_same_v<std::vector<int>&, decltype(ref)>, "");
     EXPECT_TRUE(a);
     EXPECT_THAT(*a, ElementsAre(2, 3));
     EXPECT_EQ(&ref, &*a);
   }
 
   {
-    absl::optional<std::vector<int>> a;
+    std::optional<std::vector<int>> a;
     auto& ref = a.emplace({4, 5}, std::allocator<int>());
-    static_assert(std::is_same<std::vector<int>&, decltype(ref)>::value, "");
+    static_assert(std::is_same_v<std::vector<int>&, decltype(ref)>, "");
     EXPECT_TRUE(a);
     EXPECT_THAT(*a, ElementsAre(4, 5));
     EXPECT_EQ(&ref, &*a);
@@ -1193,86 +1196,86 @@
 }
 
 TEST(OptionalTest, Equals_TwoEmpty) {
-  absl::optional<int> a;
-  absl::optional<int> b;
+  std::optional<int> a;
+  std::optional<int> b;
 
   EXPECT_TRUE(a == b);
 }
 
 TEST(OptionalTest, Equals_TwoEquals) {
-  absl::optional<int> a(1);
-  absl::optional<int> b(1);
+  std::optional<int> a(1);
+  std::optional<int> b(1);
 
   EXPECT_TRUE(a == b);
 }
 
 TEST(OptionalTest, Equals_OneEmpty) {
-  absl::optional<int> a;
-  absl::optional<int> b(1);
+  std::optional<int> a;
+  std::optional<int> b(1);
 
   EXPECT_FALSE(a == b);
 }
 
 TEST(OptionalTest, Equals_TwoDifferent) {
-  absl::optional<int> a(0);
-  absl::optional<int> b(1);
+  std::optional<int> a(0);
+  std::optional<int> b(1);
 
   EXPECT_FALSE(a == b);
 }
 
 TEST(OptionalTest, Equals_DifferentType) {
-  absl::optional<int> a(0);
-  absl::optional<double> b(0);
+  std::optional<int> a(0);
+  std::optional<double> b(0);
 
   EXPECT_TRUE(a == b);
 }
 
 TEST(OptionalTest, Equals_Value) {
-  absl::optional<int> a(0);
-  absl::optional<int> b;
+  std::optional<int> a(0);
+  std::optional<int> b;
 
   EXPECT_TRUE(a == 0);
   EXPECT_FALSE(b == 0);
 }
 
 TEST(OptionalTest, NotEquals_TwoEmpty) {
-  absl::optional<int> a;
-  absl::optional<int> b;
+  std::optional<int> a;
+  std::optional<int> b;
 
   EXPECT_FALSE(a != b);
 }
 
 TEST(OptionalTest, NotEquals_TwoEquals) {
-  absl::optional<int> a(1);
-  absl::optional<int> b(1);
+  std::optional<int> a(1);
+  std::optional<int> b(1);
 
   EXPECT_FALSE(a != b);
 }
 
 TEST(OptionalTest, NotEquals_OneEmpty) {
-  absl::optional<int> a;
-  absl::optional<int> b(1);
+  std::optional<int> a;
+  std::optional<int> b(1);
 
   EXPECT_TRUE(a != b);
 }
 
 TEST(OptionalTest, NotEquals_TwoDifferent) {
-  absl::optional<int> a(0);
-  absl::optional<int> b(1);
+  std::optional<int> a(0);
+  std::optional<int> b(1);
 
   EXPECT_TRUE(a != b);
 }
 
 TEST(OptionalTest, NotEquals_DifferentType) {
-  absl::optional<int> a(0);
-  absl::optional<double> b(0.0);
+  std::optional<int> a(0);
+  std::optional<double> b(0.0);
 
   EXPECT_FALSE(a != b);
 }
 
 TEST(OptionalTest, NotEquals_Value) {
-  absl::optional<int> a(0);
-  absl::optional<int> b;
+  std::optional<int> a(0);
+  std::optional<int> b;
 
   EXPECT_TRUE(a != 1);
   EXPECT_FALSE(a == 1);
@@ -1282,703 +1285,705 @@
 }
 
 TEST(OptionalTest, Less_LeftEmpty) {
-  absl::optional<int> l;
-  absl::optional<int> r(1);
+  std::optional<int> l;
+  std::optional<int> r(1);
 
   EXPECT_TRUE(l < r);
 }
 
 TEST(OptionalTest, Less_RightEmpty) {
-  absl::optional<int> l(1);
-  absl::optional<int> r;
+  std::optional<int> l(1);
+  std::optional<int> r;
 
   EXPECT_FALSE(l < r);
 }
 
 TEST(OptionalTest, Less_BothEmpty) {
-  absl::optional<int> l;
-  absl::optional<int> r;
+  std::optional<int> l;
+  std::optional<int> r;
 
   EXPECT_FALSE(l < r);
 }
 
 TEST(OptionalTest, Less_BothValues) {
   {
-    absl::optional<int> l(1);
-    absl::optional<int> r(2);
+    std::optional<int> l(1);
+    std::optional<int> r(2);
 
     EXPECT_TRUE(l < r);
   }
   {
-    absl::optional<int> l(2);
-    absl::optional<int> r(1);
+    std::optional<int> l(2);
+    std::optional<int> r(1);
 
     EXPECT_FALSE(l < r);
   }
   {
-    absl::optional<int> l(1);
-    absl::optional<int> r(1);
+    std::optional<int> l(1);
+    std::optional<int> r(1);
 
     EXPECT_FALSE(l < r);
   }
 }
 
 TEST(OptionalTest, Less_DifferentType) {
-  absl::optional<int> l(1);
-  absl::optional<double> r(2.0);
+  std::optional<int> l(1);
+  std::optional<double> r(2.0);
 
   EXPECT_TRUE(l < r);
 }
 
 TEST(OptionalTest, LessEq_LeftEmpty) {
-  absl::optional<int> l;
-  absl::optional<int> r(1);
+  std::optional<int> l;
+  std::optional<int> r(1);
 
   EXPECT_TRUE(l <= r);
 }
 
 TEST(OptionalTest, LessEq_RightEmpty) {
-  absl::optional<int> l(1);
-  absl::optional<int> r;
+  std::optional<int> l(1);
+  std::optional<int> r;
 
   EXPECT_FALSE(l <= r);
 }
 
 TEST(OptionalTest, LessEq_BothEmpty) {
-  absl::optional<int> l;
-  absl::optional<int> r;
+  std::optional<int> l;
+  std::optional<int> r;
 
   EXPECT_TRUE(l <= r);
 }
 
 TEST(OptionalTest, LessEq_BothValues) {
   {
-    absl::optional<int> l(1);
-    absl::optional<int> r(2);
+    std::optional<int> l(1);
+    std::optional<int> r(2);
 
     EXPECT_TRUE(l <= r);
   }
   {
-    absl::optional<int> l(2);
-    absl::optional<int> r(1);
+    std::optional<int> l(2);
+    std::optional<int> r(1);
 
     EXPECT_FALSE(l <= r);
   }
   {
-    absl::optional<int> l(1);
-    absl::optional<int> r(1);
+    std::optional<int> l(1);
+    std::optional<int> r(1);
 
     EXPECT_TRUE(l <= r);
   }
 }
 
 TEST(OptionalTest, LessEq_DifferentType) {
-  absl::optional<int> l(1);
-  absl::optional<double> r(2.0);
+  std::optional<int> l(1);
+  std::optional<double> r(2.0);
 
   EXPECT_TRUE(l <= r);
 }
 
 TEST(OptionalTest, Greater_BothEmpty) {
-  absl::optional<int> l;
-  absl::optional<int> r;
+  std::optional<int> l;
+  std::optional<int> r;
 
   EXPECT_FALSE(l > r);
 }
 
 TEST(OptionalTest, Greater_LeftEmpty) {
-  absl::optional<int> l;
-  absl::optional<int> r(1);
+  std::optional<int> l;
+  std::optional<int> r(1);
 
   EXPECT_FALSE(l > r);
 }
 
 TEST(OptionalTest, Greater_RightEmpty) {
-  absl::optional<int> l(1);
-  absl::optional<int> r;
+  std::optional<int> l(1);
+  std::optional<int> r;
 
   EXPECT_TRUE(l > r);
 }
 
 TEST(OptionalTest, Greater_BothValue) {
   {
-    absl::optional<int> l(1);
-    absl::optional<int> r(2);
+    std::optional<int> l(1);
+    std::optional<int> r(2);
 
     EXPECT_FALSE(l > r);
   }
   {
-    absl::optional<int> l(2);
-    absl::optional<int> r(1);
+    std::optional<int> l(2);
+    std::optional<int> r(1);
 
     EXPECT_TRUE(l > r);
   }
   {
-    absl::optional<int> l(1);
-    absl::optional<int> r(1);
+    std::optional<int> l(1);
+    std::optional<int> r(1);
 
     EXPECT_FALSE(l > r);
   }
 }
 
 TEST(OptionalTest, Greater_DifferentType) {
-  absl::optional<int> l(1);
-  absl::optional<double> r(2.0);
+  std::optional<int> l(1);
+  std::optional<double> r(2.0);
 
   EXPECT_FALSE(l > r);
 }
 
 TEST(OptionalTest, GreaterEq_BothEmpty) {
-  absl::optional<int> l;
-  absl::optional<int> r;
+  std::optional<int> l;
+  std::optional<int> r;
 
   EXPECT_TRUE(l >= r);
 }
 
 TEST(OptionalTest, GreaterEq_LeftEmpty) {
-  absl::optional<int> l;
-  absl::optional<int> r(1);
+  std::optional<int> l;
+  std::optional<int> r(1);
 
   EXPECT_FALSE(l >= r);
 }
 
 TEST(OptionalTest, GreaterEq_RightEmpty) {
-  absl::optional<int> l(1);
-  absl::optional<int> r;
+  std::optional<int> l(1);
+  std::optional<int> r;
 
   EXPECT_TRUE(l >= r);
 }
 
 TEST(OptionalTest, GreaterEq_BothValue) {
   {
-    absl::optional<int> l(1);
-    absl::optional<int> r(2);
+    std::optional<int> l(1);
+    std::optional<int> r(2);
 
     EXPECT_FALSE(l >= r);
   }
   {
-    absl::optional<int> l(2);
-    absl::optional<int> r(1);
+    std::optional<int> l(2);
+    std::optional<int> r(1);
 
     EXPECT_TRUE(l >= r);
   }
   {
-    absl::optional<int> l(1);
-    absl::optional<int> r(1);
+    std::optional<int> l(1);
+    std::optional<int> r(1);
 
     EXPECT_TRUE(l >= r);
   }
 }
 
 TEST(OptionalTest, GreaterEq_DifferentType) {
-  absl::optional<int> l(1);
-  absl::optional<double> r(2.0);
+  std::optional<int> l(1);
+  std::optional<double> r(2.0);
 
   EXPECT_FALSE(l >= r);
 }
 
 TEST(OptionalTest, OptNullEq) {
   {
-    absl::optional<int> opt;
-    EXPECT_TRUE(opt == absl::nullopt);
+    std::optional<int> opt;
+    EXPECT_TRUE(opt == std::nullopt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_FALSE(opt == absl::nullopt);
+    std::optional<int> opt(1);
+    EXPECT_FALSE(opt == std::nullopt);
   }
 }
 
 TEST(OptionalTest, NullOptEq) {
   {
-    absl::optional<int> opt;
-    EXPECT_TRUE(absl::nullopt == opt);
+    std::optional<int> opt;
+    EXPECT_TRUE(std::nullopt == opt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_FALSE(absl::nullopt == opt);
+    std::optional<int> opt(1);
+    EXPECT_FALSE(std::nullopt == opt);
   }
 }
 
 TEST(OptionalTest, OptNullNotEq) {
   {
-    absl::optional<int> opt;
-    EXPECT_FALSE(opt != absl::nullopt);
+    std::optional<int> opt;
+    EXPECT_FALSE(opt != std::nullopt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_TRUE(opt != absl::nullopt);
+    std::optional<int> opt(1);
+    EXPECT_TRUE(opt != std::nullopt);
   }
 }
 
 TEST(OptionalTest, NullOptNotEq) {
   {
-    absl::optional<int> opt;
-    EXPECT_FALSE(absl::nullopt != opt);
+    std::optional<int> opt;
+    EXPECT_FALSE(std::nullopt != opt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_TRUE(absl::nullopt != opt);
+    std::optional<int> opt(1);
+    EXPECT_TRUE(std::nullopt != opt);
   }
 }
 
 TEST(OptionalTest, OptNullLower) {
   {
-    absl::optional<int> opt;
-    EXPECT_FALSE(opt < absl::nullopt);
+    std::optional<int> opt;
+    EXPECT_FALSE(opt < std::nullopt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_FALSE(opt < absl::nullopt);
+    std::optional<int> opt(1);
+    EXPECT_FALSE(opt < std::nullopt);
   }
 }
 
 TEST(OptionalTest, NullOptLower) {
   {
-    absl::optional<int> opt;
-    EXPECT_FALSE(absl::nullopt < opt);
+    std::optional<int> opt;
+    EXPECT_FALSE(std::nullopt < opt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_TRUE(absl::nullopt < opt);
+    std::optional<int> opt(1);
+    EXPECT_TRUE(std::nullopt < opt);
   }
 }
 
 TEST(OptionalTest, OptNullLowerEq) {
   {
-    absl::optional<int> opt;
-    EXPECT_TRUE(opt <= absl::nullopt);
+    std::optional<int> opt;
+    EXPECT_TRUE(opt <= std::nullopt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_FALSE(opt <= absl::nullopt);
+    std::optional<int> opt(1);
+    EXPECT_FALSE(opt <= std::nullopt);
   }
 }
 
 TEST(OptionalTest, NullOptLowerEq) {
   {
-    absl::optional<int> opt;
-    EXPECT_TRUE(absl::nullopt <= opt);
+    std::optional<int> opt;
+    EXPECT_TRUE(std::nullopt <= opt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_TRUE(absl::nullopt <= opt);
+    std::optional<int> opt(1);
+    EXPECT_TRUE(std::nullopt <= opt);
   }
 }
 
 TEST(OptionalTest, OptNullGreater) {
   {
-    absl::optional<int> opt;
-    EXPECT_FALSE(opt > absl::nullopt);
+    std::optional<int> opt;
+    EXPECT_FALSE(opt > std::nullopt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_TRUE(opt > absl::nullopt);
+    std::optional<int> opt(1);
+    EXPECT_TRUE(opt > std::nullopt);
   }
 }
 
 TEST(OptionalTest, NullOptGreater) {
   {
-    absl::optional<int> opt;
-    EXPECT_FALSE(absl::nullopt > opt);
+    std::optional<int> opt;
+    EXPECT_FALSE(std::nullopt > opt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_FALSE(absl::nullopt > opt);
+    std::optional<int> opt(1);
+    EXPECT_FALSE(std::nullopt > opt);
   }
 }
 
 TEST(OptionalTest, OptNullGreaterEq) {
   {
-    absl::optional<int> opt;
-    EXPECT_TRUE(opt >= absl::nullopt);
+    std::optional<int> opt;
+    EXPECT_TRUE(opt >= std::nullopt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_TRUE(opt >= absl::nullopt);
+    std::optional<int> opt(1);
+    EXPECT_TRUE(opt >= std::nullopt);
   }
 }
 
 TEST(OptionalTest, NullOptGreaterEq) {
   {
-    absl::optional<int> opt;
-    EXPECT_TRUE(absl::nullopt >= opt);
+    std::optional<int> opt;
+    EXPECT_TRUE(std::nullopt >= opt);
   }
   {
-    absl::optional<int> opt(1);
-    EXPECT_FALSE(absl::nullopt >= opt);
+    std::optional<int> opt(1);
+    EXPECT_FALSE(std::nullopt >= opt);
   }
 }
 
 TEST(OptionalTest, ValueEq_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_FALSE(opt == 1);
 }
 
 TEST(OptionalTest, ValueEq_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_FALSE(opt == 1);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_TRUE(opt == 1);
   }
 }
 
 TEST(OptionalTest, ValueEq_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_TRUE(opt == 0.0);
 }
 
 TEST(OptionalTest, EqValue_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_FALSE(1 == opt);
 }
 
 TEST(OptionalTest, EqValue_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_FALSE(1 == opt);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_TRUE(1 == opt);
   }
 }
 
 TEST(OptionalTest, EqValue_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_TRUE(0.0 == opt);
 }
 
 TEST(OptionalTest, ValueNotEq_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_TRUE(opt != 1);
 }
 
 TEST(OptionalTest, ValueNotEq_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_TRUE(opt != 1);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_FALSE(opt != 1);
   }
 }
 
 TEST(OptionalTest, ValueNotEq_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_FALSE(opt != 0.0);
 }
 
 TEST(OptionalTest, NotEqValue_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_TRUE(1 != opt);
 }
 
 TEST(OptionalTest, NotEqValue_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_TRUE(1 != opt);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_FALSE(1 != opt);
   }
 }
 
 TEST(OptionalTest, NotEqValue_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_FALSE(0.0 != opt);
 }
 
 TEST(OptionalTest, ValueLess_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_TRUE(opt < 1);
 }
 
 TEST(OptionalTest, ValueLess_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_TRUE(opt < 1);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_FALSE(opt < 1);
   }
   {
-    absl::optional<int> opt(2);
+    std::optional<int> opt(2);
     EXPECT_FALSE(opt < 1);
   }
 }
 
 TEST(OptionalTest, ValueLess_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_TRUE(opt < 1.0);
 }
 
 TEST(OptionalTest, LessValue_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_FALSE(1 < opt);
 }
 
 TEST(OptionalTest, LessValue_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_FALSE(1 < opt);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_FALSE(1 < opt);
   }
   {
-    absl::optional<int> opt(2);
+    std::optional<int> opt(2);
     EXPECT_TRUE(1 < opt);
   }
 }
 
 TEST(OptionalTest, LessValue_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_FALSE(0.0 < opt);
 }
 
 TEST(OptionalTest, ValueLessEq_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_TRUE(opt <= 1);
 }
 
 TEST(OptionalTest, ValueLessEq_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_TRUE(opt <= 1);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_TRUE(opt <= 1);
   }
   {
-    absl::optional<int> opt(2);
+    std::optional<int> opt(2);
     EXPECT_FALSE(opt <= 1);
   }
 }
 
 TEST(OptionalTest, ValueLessEq_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_TRUE(opt <= 0.0);
 }
 
 TEST(OptionalTest, LessEqValue_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_FALSE(1 <= opt);
 }
 
 TEST(OptionalTest, LessEqValue_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_FALSE(1 <= opt);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_TRUE(1 <= opt);
   }
   {
-    absl::optional<int> opt(2);
+    std::optional<int> opt(2);
     EXPECT_TRUE(1 <= opt);
   }
 }
 
 TEST(OptionalTest, LessEqValue_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_TRUE(0.0 <= opt);
 }
 
 TEST(OptionalTest, ValueGreater_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_FALSE(opt > 1);
 }
 
 TEST(OptionalTest, ValueGreater_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_FALSE(opt > 1);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_FALSE(opt > 1);
   }
   {
-    absl::optional<int> opt(2);
+    std::optional<int> opt(2);
     EXPECT_TRUE(opt > 1);
   }
 }
 
 TEST(OptionalTest, ValueGreater_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_FALSE(opt > 0.0);
 }
 
 TEST(OptionalTest, GreaterValue_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_TRUE(1 > opt);
 }
 
 TEST(OptionalTest, GreaterValue_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_TRUE(1 > opt);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_FALSE(1 > opt);
   }
   {
-    absl::optional<int> opt(2);
+    std::optional<int> opt(2);
     EXPECT_FALSE(1 > opt);
   }
 }
 
 TEST(OptionalTest, GreaterValue_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_FALSE(0.0 > opt);
 }
 
 TEST(OptionalTest, ValueGreaterEq_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_FALSE(opt >= 1);
 }
 
 TEST(OptionalTest, ValueGreaterEq_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_FALSE(opt >= 1);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_TRUE(opt >= 1);
   }
   {
-    absl::optional<int> opt(2);
+    std::optional<int> opt(2);
     EXPECT_TRUE(opt >= 1);
   }
 }
 
 TEST(OptionalTest, ValueGreaterEq_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_TRUE(opt <= 0.0);
 }
 
 TEST(OptionalTest, GreaterEqValue_Empty) {
-  absl::optional<int> opt;
+  std::optional<int> opt;
   EXPECT_TRUE(1 >= opt);
 }
 
 TEST(OptionalTest, GreaterEqValue_NotEmpty) {
   {
-    absl::optional<int> opt(0);
+    std::optional<int> opt(0);
     EXPECT_TRUE(1 >= opt);
   }
   {
-    absl::optional<int> opt(1);
+    std::optional<int> opt(1);
     EXPECT_TRUE(1 >= opt);
   }
   {
-    absl::optional<int> opt(2);
+    std::optional<int> opt(2);
     EXPECT_FALSE(1 >= opt);
   }
 }
 
 TEST(OptionalTest, GreaterEqValue_DifferentType) {
-  absl::optional<int> opt(0);
+  std::optional<int> opt(0);
   EXPECT_TRUE(0.0 >= opt);
 }
 
 TEST(OptionalTest, NotEquals) {
   {
-    absl::optional<float> a(0.1f);
-    absl::optional<float> b(0.2f);
+    std::optional<float> a(0.1f);
+    std::optional<float> b(0.2f);
     EXPECT_NE(a, b);
   }
 
   {
-    absl::optional<std::string> a("foo");
-    absl::optional<std::string> b("bar");
+    std::optional<std::string> a("foo");
+    std::optional<std::string> b("bar");
     EXPECT_NE(a, b);
   }
 
   {
-    absl::optional<int> a(1);
-    absl::optional<double> b(2);
+    std::optional<int> a(1);
+    std::optional<double> b(2);
     EXPECT_NE(a, b);
   }
 
   {
-    absl::optional<TestObject> a(TestObject(3, 0.1));
-    absl::optional<TestObject> b(TestObject(4, 1.0));
+    std::optional<TestObject> a(TestObject(3, 0.1));
+    std::optional<TestObject> b(TestObject(4, 1.0));
     EXPECT_TRUE(a != b);
   }
 }
 
 TEST(OptionalTest, NotEqualsNull) {
   {
-    absl::optional<float> a(0.1f);
-    absl::optional<float> b(0.1f);
-    b = absl::nullopt;
+    std::optional<float> a(0.1f);
+    std::optional<float> b(0.1f);
+    b = std::nullopt;
     EXPECT_NE(a, b);
   }
 
   {
-    absl::optional<std::string> a("foo");
-    absl::optional<std::string> b("foo");
-    b = absl::nullopt;
+    std::optional<std::string> a("foo");
+    std::optional<std::string> b("foo");
+    b = std::nullopt;
     EXPECT_NE(a, b);
   }
 
   {
-    absl::optional<TestObject> a(TestObject(3, 0.1));
-    absl::optional<TestObject> b(TestObject(3, 0.1));
-    b = absl::nullopt;
+    std::optional<TestObject> a(TestObject(3, 0.1));
+    std::optional<TestObject> b(TestObject(3, 0.1));
+    b = std::nullopt;
     EXPECT_TRUE(a != b);
   }
 }
 
 TEST(OptionalTest, MakeOptional) {
   {
-    absl::optional<float> o = absl::make_optional(32.f);
+    std::optional<float> o = std::make_optional(32.f);
     EXPECT_TRUE(o);
     EXPECT_EQ(32.f, *o);
 
     float value = 3.f;
-    o = absl::make_optional(std::move(value));
+    o = std::make_optional(std::move(value));
     EXPECT_TRUE(o);
     EXPECT_EQ(3.f, *o);
   }
 
   {
-    absl::optional<std::string> o = absl::make_optional(std::string("foo"));
+    std::optional<std::string> o = std::make_optional(std::string("foo"));
     EXPECT_TRUE(o);
     EXPECT_EQ("foo", *o);
 
     std::string value = "bar";
-    o = absl::make_optional(std::move(value));
+    o = std::make_optional(std::move(value));
     EXPECT_TRUE(o);
     EXPECT_EQ(std::string("bar"), *o);
   }
 
   {
-    absl::optional<TestObject> o = absl::make_optional(TestObject(3, 0.1));
+    // NOLINTBEGIN(bugprone-use-after-move)
+    std::optional<TestObject> o = std::make_optional(TestObject(3, 0.1));
     EXPECT_TRUE(!!o);
     EXPECT_TRUE(TestObject(3, 0.1) == *o);
 
     TestObject value = TestObject(0, 0.42);
-    o = absl::make_optional(std::move(value));
+    o = std::make_optional(std::move(value));
     EXPECT_TRUE(!!o);
     EXPECT_TRUE(TestObject(0, 0.42) == *o);
     EXPECT_EQ(TestObject::State::MOVED_FROM, value.state());
     EXPECT_EQ(TestObject::State::MOVE_ASSIGNED, o->state());
 
     EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED,
-              absl::make_optional(std::move(value))->state());
+              std::make_optional(std::move(value))->state());
+    // NOLINTEND(bugprone-use-after-move)
   }
 
   {
@@ -1990,7 +1995,7 @@
       bool c;
     };
 
-    absl::optional<Test> o = absl::make_optional<Test>(1, 2.0, true);
+    std::optional<Test> o = std::make_optional<Test>(1, 2.0, true);
     EXPECT_TRUE(!!o);
     EXPECT_EQ(1, o->a);
     EXPECT_EQ(2.0, o->b);
@@ -1998,18 +2003,18 @@
   }
 
   {
-    auto str1 = absl::make_optional<std::string>({'1', '2', '3'});
+    auto str1 = std::make_optional<std::string>({'1', '2', '3'});
     EXPECT_EQ("123", *str1);
 
-    auto str2 = absl::make_optional<std::string>({'a', 'b', 'c'},
-                                                 std::allocator<char>());
+    auto str2 = std::make_optional<std::string>({'a', 'b', 'c'},
+                                                std::allocator<char>());
     EXPECT_EQ("abc", *str2);
   }
 }
 
 TEST(OptionalTest, NonMemberSwap_bothNoValue) {
-  absl::optional<TestObject> a, b;
-  absl::swap(a, b);
+  std::optional<TestObject> a, b;
+  std::swap(a, b);
 
   EXPECT_FALSE(!!a);
   EXPECT_FALSE(!!b);
@@ -2018,9 +2023,9 @@
 }
 
 TEST(OptionalTest, NonMemberSwap_inHasValue) {
-  absl::optional<TestObject> a(TestObject(1, 0.3));
-  absl::optional<TestObject> b;
-  absl::swap(a, b);
+  std::optional<TestObject> a(TestObject(1, 0.3));
+  std::optional<TestObject> b;
+  std::swap(a, b);
 
   EXPECT_FALSE(!!a);
   EXPECT_TRUE(!!b);
@@ -2029,9 +2034,9 @@
 }
 
 TEST(OptionalTest, NonMemberSwap_outHasValue) {
-  absl::optional<TestObject> a;
-  absl::optional<TestObject> b(TestObject(1, 0.3));
-  absl::swap(a, b);
+  std::optional<TestObject> a;
+  std::optional<TestObject> b(TestObject(1, 0.3));
+  std::swap(a, b);
 
   EXPECT_TRUE(!!a);
   EXPECT_FALSE(!!b);
@@ -2040,9 +2045,9 @@
 }
 
 TEST(OptionalTest, NonMemberSwap_bothValue) {
-  absl::optional<TestObject> a(TestObject(0, 0.1));
-  absl::optional<TestObject> b(TestObject(1, 0.3));
-  absl::swap(a, b);
+  std::optional<TestObject> a(TestObject(0, 0.1));
+  std::optional<TestObject> b(TestObject(1, 0.3));
+  std::swap(a, b);
 
   EXPECT_TRUE(!!a);
   EXPECT_TRUE(!!b);
@@ -2055,57 +2060,57 @@
 TEST(OptionalTest, Hash_OptionalReflectsInternal) {
   {
     std::hash<int> int_hash;
-    std::hash<absl::optional<int>> opt_int_hash;
+    std::hash<std::optional<int>> opt_int_hash;
 
-    EXPECT_EQ(int_hash(1), opt_int_hash(absl::optional<int>(1)));
+    EXPECT_EQ(int_hash(1), opt_int_hash(std::optional<int>(1)));
   }
 
   {
     std::hash<std::string> str_hash;
-    std::hash<absl::optional<std::string>> opt_str_hash;
+    std::hash<std::optional<std::string>> opt_str_hash;
 
     EXPECT_EQ(str_hash(std::string("foobar")),
-              opt_str_hash(absl::optional<std::string>(std::string("foobar"))));
+              opt_str_hash(std::optional<std::string>(std::string("foobar"))));
   }
 }
 
 TEST(OptionalTest, Hash_NullOptEqualsNullOpt) {
-  std::hash<absl::optional<int>> opt_int_hash;
-  std::hash<absl::optional<std::string>> opt_str_hash;
+  std::hash<std::optional<int>> opt_int_hash;
+  std::hash<std::optional<std::string>> opt_str_hash;
 
-  EXPECT_EQ(opt_str_hash(absl::optional<std::string>()),
-            opt_int_hash(absl::optional<int>()));
+  EXPECT_EQ(opt_str_hash(std::optional<std::string>()),
+            opt_int_hash(std::optional<int>()));
 }
 
 TEST(OptionalTest, Hash_UseInSet) {
-  std::set<absl::optional<int>> setOptInt;
+  std::set<std::optional<int>> setOptInt;
 
   EXPECT_EQ(setOptInt.end(), setOptInt.find(42));
 
-  setOptInt.insert(absl::optional<int>(3));
+  setOptInt.insert(std::optional<int>(3));
   EXPECT_EQ(setOptInt.end(), setOptInt.find(42));
   EXPECT_NE(setOptInt.end(), setOptInt.find(3));
 }
 
 TEST(OptionalTest, HasValue) {
-  absl::optional<int> a;
+  std::optional<int> a;
   EXPECT_FALSE(a.has_value());
 
   a = 42;
   EXPECT_TRUE(a.has_value());
 
-  a = absl::nullopt;
+  a = std::nullopt;
   EXPECT_FALSE(a.has_value());
 
   a = 0;
   EXPECT_TRUE(a.has_value());
 
-  a = absl::optional<int>();
+  a = std::optional<int>();
   EXPECT_FALSE(a.has_value());
 }
 
 TEST(OptionalTest, Reset_int) {
-  absl::optional<int> a(0);
+  std::optional<int> a(0);
   EXPECT_TRUE(a.has_value());
   EXPECT_EQ(0, a.value());
 
@@ -2115,7 +2120,7 @@
 }
 
 TEST(OptionalTest, Reset_Object) {
-  absl::optional<TestObject> a(TestObject(0, 0.1));
+  std::optional<TestObject> a(TestObject(0, 0.1));
   EXPECT_TRUE(a.has_value());
   EXPECT_EQ(TestObject(0, 0.1), a.value());
 
@@ -2125,7 +2130,7 @@
 }
 
 TEST(OptionalTest, Reset_NoOp) {
-  absl::optional<int> a;
+  std::optional<int> a;
   EXPECT_FALSE(a.has_value());
 
   a.reset();
@@ -2133,7 +2138,7 @@
 }
 
 TEST(OptionalTest, AssignFromRValue) {
-  absl::optional<TestObject> a;
+  std::optional<TestObject> a;
   EXPECT_FALSE(a.has_value());
 
   TestObject obj;
@@ -2143,16 +2148,16 @@
 }
 
 TEST(OptionalTest, DontCallDefaultCtor) {
-  absl::optional<DeletedDefaultConstructor> a;
+  std::optional<DeletedDefaultConstructor> a;
   EXPECT_FALSE(a.has_value());
 
-  a = absl::make_optional<DeletedDefaultConstructor>(42);
+  a = std::make_optional<DeletedDefaultConstructor>(42);
   EXPECT_TRUE(a.has_value());
   EXPECT_EQ(42, a->foo());
 }
 
 TEST(OptionalTest, DontCallNewMemberFunction) {
-  absl::optional<DeleteNewOperators> a;
+  std::optional<DeleteNewOperators> a;
   EXPECT_FALSE(a.has_value());
 
   a = DeleteNewOperators();
@@ -2166,7 +2171,7 @@
   };
 
   {
-    const absl::optional<C> const_optional;
+    const std::optional<C> const_optional;
     EXPECT_DEATH_IF_SUPPORTED(const_optional.value(), "");
     EXPECT_DEATH_IF_SUPPORTED(const_optional->Method(), "");
     EXPECT_DEATH_IF_SUPPORTED(*const_optional, "");
@@ -2174,7 +2179,7 @@
   }
 
   {
-    absl::optional<C> non_const_optional;
+    std::optional<C> non_const_optional;
     EXPECT_DEATH_IF_SUPPORTED(non_const_optional.value(), "");
     EXPECT_DEATH_IF_SUPPORTED(non_const_optional->Method(), "");
     EXPECT_DEATH_IF_SUPPORTED(*non_const_optional, "");
@@ -2212,51 +2217,51 @@
   };
 
   static_assert(
-      noexcept(absl::optional<int>(std::declval<absl::optional<int>>())),
+      noexcept(std::optional<int>(std::declval<std::optional<int>>())),
       "move constructor for noexcept move-constructible T must be noexcept "
       "(trivial copy, trivial move)");
   static_assert(
-      !noexcept(absl::optional<Test1>(std::declval<absl::optional<Test1>>())),
+      !noexcept(std::optional<Test1>(std::declval<std::optional<Test1>>())),
       "move constructor for non-noexcept move-constructible T must not be "
       "noexcept (trivial copy)");
   static_assert(
-      noexcept(absl::optional<Test2>(std::declval<absl::optional<Test2>>())),
+      noexcept(std::optional<Test2>(std::declval<std::optional<Test2>>())),
       "move constructor for noexcept move-constructible T must be noexcept "
       "(non-trivial copy, trivial move)");
   static_assert(
-      noexcept(absl::optional<Test3>(std::declval<absl::optional<Test3>>())),
+      noexcept(std::optional<Test3>(std::declval<std::optional<Test3>>())),
       "move constructor for noexcept move-constructible T must be noexcept "
       "(trivial copy, non-trivial move)");
   static_assert(
-      noexcept(absl::optional<Test4>(std::declval<absl::optional<Test4>>())),
+      noexcept(std::optional<Test4>(std::declval<std::optional<Test4>>())),
       "move constructor for noexcept move-constructible T must be noexcept "
       "(non-trivial copy, non-trivial move)");
   static_assert(
-      !noexcept(absl::optional<Test5>(std::declval<absl::optional<Test5>>())),
+      !noexcept(std::optional<Test5>(std::declval<std::optional<Test5>>())),
       "move constructor for non-noexcept move-constructible T must not be "
       "noexcept (non-trivial copy)");
 
-  static_assert(noexcept(std::declval<absl::optional<int>>() =
-                             std::declval<absl::optional<int>>()),
+  static_assert(noexcept(std::declval<std::optional<int>>() =
+                             std::declval<std::optional<int>>()),
                 "move assign for noexcept move-constructible/move-assignable T "
                 "must be noexcept");
   static_assert(
-      !noexcept(std::declval<absl::optional<Test1>>() =
-                    std::declval<absl::optional<Test1>>()),
+      !noexcept(std::declval<std::optional<Test1>>() =
+                    std::declval<std::optional<Test1>>()),
       "move assign for non-noexcept move-constructible T must not be noexcept");
   static_assert(
-      !noexcept(std::declval<absl::optional<Test2>>() =
-                    std::declval<absl::optional<Test2>>()),
+      !noexcept(std::declval<std::optional<Test2>>() =
+                    std::declval<std::optional<Test2>>()),
       "move assign for non-noexcept move-assignable T must not be noexcept");
 }
 
 TEST(OptionalTest, OverrideAddressOf) {
   // Objects with an overloaded address-of should not trigger the overload for
   // arrow or copy assignment.
-  static_assert(std::is_trivially_destructible<
-                    TriviallyDestructibleOverloadAddressOf>::value,
-                "Trivially...AddressOf must be trivially destructible.");
-  absl::optional<TriviallyDestructibleOverloadAddressOf> optional;
+  static_assert(
+      std::is_trivially_destructible_v<TriviallyDestructibleOverloadAddressOf>,
+      "Trivially...AddressOf must be trivially destructible.");
+  std::optional<TriviallyDestructibleOverloadAddressOf> optional;
   TriviallyDestructibleOverloadAddressOf n;
   optional = n;
 
@@ -2268,10 +2273,10 @@
   const auto& const_optional = optional;
   const_optional->const_method();
 
-  static_assert(!std::is_trivially_destructible<
-                    NonTriviallyDestructibleOverloadAddressOf>::value,
+  static_assert(!std::is_trivially_destructible_v<
+                    NonTriviallyDestructibleOverloadAddressOf>,
                 "NotTrivially...AddressOf must not be trivially destructible.");
-  absl::optional<NonTriviallyDestructibleOverloadAddressOf> nontrivial_optional;
+  std::optional<NonTriviallyDestructibleOverloadAddressOf> nontrivial_optional;
   NonTriviallyDestructibleOverloadAddressOf n1;
   nontrivial_optional = n1;
 }
diff --git a/base/types/strong_alias_unittest.cc b/base/types/strong_alias_unittest.cc
index 328d345..9f8f516 100644
--- a/base/types/strong_alias_unittest.cc
+++ b/base/types/strong_alias_unittest.cc
@@ -67,9 +67,10 @@
   // Const value getter.
   const FooAlias const_alias(GetExampleValue<TypeParam>(1));
   EXPECT_EQ(GetExampleValue<TypeParam>(1), const_alias.value());
-  static_assert(std::is_const<typename std::remove_reference<decltype(
-                    const_alias.value())>::type>::value,
-                "Reference returned by const value getter should be const.");
+  static_assert(
+      std::is_const_v<
+          typename std::remove_reference<decltype(const_alias.value())>::type>,
+      "Reference returned by const value getter should be const.");
 }
 
 TYPED_TEST(StrongAliasTest, ExplicitConversionToUnderlyingValue) {
@@ -103,7 +104,7 @@
 
   // Check that FooAlias is nothrow move constructible. This matters for
   // performance when used in std::vectors.
-  static_assert(std::is_nothrow_move_constructible<FooAlias>::value,
+  static_assert(std::is_nothrow_move_constructible_v<FooAlias>,
                 "Error: Alias is not nothow move constructible");
 }
 
@@ -186,35 +187,35 @@
 
 TYPED_TEST(StrongAliasTest, IsDefaultConstructible) {
   using FooAlias = StrongAlias<class FooTag, TypeParam>;
-  static_assert(std::is_default_constructible<FooAlias>::value,
+  static_assert(std::is_default_constructible_v<FooAlias>,
                 "Should be possible to default-construct a StrongAlias.");
   static_assert(
-      std::is_trivially_default_constructible<FooAlias>::value ==
-          std::is_trivially_default_constructible<TypeParam>::value,
+      std::is_trivially_default_constructible_v<FooAlias> ==
+          std::is_trivially_default_constructible_v<TypeParam>,
       "Should be possible to trivially default-construct a StrongAlias iff the "
       "underlying type is trivially default constructible.");
 }
 
 TEST(StrongAliasTest, TrivialTypeAliasIsStandardLayout) {
   using FooAlias = StrongAlias<class FooTag, int>;
-  static_assert(std::is_standard_layout<FooAlias>::value,
+  static_assert(std::is_standard_layout_v<FooAlias>,
                 "int-based alias should have standard layout. ");
-  static_assert(std::is_trivially_copyable<FooAlias>::value,
+  static_assert(std::is_trivially_copyable_v<FooAlias>,
                 "int-based alias should be trivially copyable. ");
 }
 
 TYPED_TEST(StrongAliasTest, CannotBeCreatedFromDifferentAlias) {
   using FooAlias = StrongAlias<class FooTag, TypeParam>;
   using BarAlias = StrongAlias<class BarTag, TypeParam>;
-  static_assert(!std::is_constructible<FooAlias, BarAlias>::value,
+  static_assert(!std::is_constructible_v<FooAlias, BarAlias>,
                 "Should be impossible to construct FooAlias from a BarAlias.");
-  static_assert(!std::is_convertible<BarAlias, FooAlias>::value,
+  static_assert(!std::is_convertible_v<BarAlias, FooAlias>,
                 "Should be impossible to convert a BarAlias into FooAlias.");
 }
 
 TYPED_TEST(StrongAliasTest, CannotBeImplicitlyConverterToUnderlyingValue) {
   using FooAlias = StrongAlias<class FooTag, TypeParam>;
-  static_assert(!std::is_convertible<FooAlias, TypeParam>::value,
+  static_assert(!std::is_convertible_v<FooAlias, TypeParam>,
                 "Should be impossible to implicitly convert a StrongAlias into "
                 "an underlying type.");
 }
diff --git a/base/unguessable_token.h b/base/unguessable_token.h
index 11e65c7..8dd7705 100644
--- a/base/unguessable_token.h
+++ b/base/unguessable_token.h
@@ -13,7 +13,6 @@
 #include "base/base_export.h"
 #include "base/check.h"
 #include "base/containers/span.h"
-#include "base/hash/hash.h"
 #include "base/token.h"
 
 namespace base {
diff --git a/base/unguessable_token_unittest.cc b/base/unguessable_token_unittest.cc
index 1064a35..7028571 100644
--- a/base/unguessable_token_unittest.cc
+++ b/base/unguessable_token_unittest.cc
@@ -8,6 +8,7 @@
 #include <sstream>
 #include <type_traits>
 
+#include "base/hash/hash.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
diff --git a/base/uuid.cc b/base/uuid.cc
index 979f265..6125d5d 100644
--- a/base/uuid.cc
+++ b/base/uuid.cc
@@ -9,6 +9,7 @@
 
 #include <ostream>
 
+#include "base/hash/hash.h"
 #include "base/rand_util.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
@@ -183,4 +184,10 @@
   return out << uuid.AsLowercaseString();
 }
 
+size_t UuidHash::operator()(const Uuid& uuid) const {
+  // TODO(crbug.com/1026195): Avoid converting to string to take the hash when
+  // the internal type is migrated to a non-string type.
+  return FastHash(uuid.AsLowercaseString());
+}
+
 }  // namespace base
diff --git a/base/uuid.h b/base/uuid.h
index f5b1752..e73a06a 100644
--- a/base/uuid.h
+++ b/base/uuid.h
@@ -12,7 +12,6 @@
 
 #include "base/base_export.h"
 #include "base/containers/span.h"
-#include "base/hash/hash.h"
 #include "base/strings/string_piece.h"
 #include "base/types/pass_key.h"
 #include "build/build_config.h"
@@ -107,11 +106,7 @@
 // For runtime usage only. Do not store the result of this hash, as it may
 // change in future Chromium revisions.
 struct BASE_EXPORT UuidHash {
-  size_t operator()(const Uuid& uuid) const {
-    // TODO(crbug.com/1026195): Avoid converting to string to take the hash when
-    // the internal type is migrated to a non-string type.
-    return FastHash(uuid.AsLowercaseString());
-  }
+  size_t operator()(const Uuid& uuid) const;
 };
 
 // Stream operator so Uuid objects can be used in logging statements.
diff --git a/base/values.cc b/base/values.cc
index af08d13..8b3317a 100644
--- a/base/values.cc
+++ b/base/values.cc
@@ -14,6 +14,7 @@
 #include "base/check_op.h"
 #include "base/containers/checked_iterators.h"
 #include "base/containers/cxx20_erase_vector.h"
+#include "base/containers/map_util.h"
 #include "base/cxx20_to_address.h"
 #include "base/json/json_writer.h"
 #include "base/logging.h"
@@ -420,14 +421,11 @@
 
 const Value* Value::Dict::Find(StringPiece key) const {
   DCHECK(IsStringUTF8AllowingNoncharacters(key));
-
-  auto it = storage_.find(key);
-  return it != storage_.end() ? it->second.get() : nullptr;
+  return FindPtrOrNull(storage_, key);
 }
 
 Value* Value::Dict::Find(StringPiece key) {
-  auto it = storage_.find(key);
-  return it != storage_.end() ? it->second.get() : nullptr;
+  return FindPtrOrNull(storage_, key);
 }
 
 absl::optional<bool> Value::Dict::FindBool(StringPiece key) const {
diff --git a/base/values.h b/base/values.h
index 465a7f4..9826ea9 100644
--- a/base/values.h
+++ b/base/values.h
@@ -400,7 +400,7 @@
     Value* Set(StringPiece key, Value&& value) &;
     Value* Set(StringPiece key, bool value) &;
     template <typename T>
-    Value* Set(StringPiece, const T*) = delete;
+    Value* Set(StringPiece, const T*) & = delete;
     Value* Set(StringPiece key, int value) &;
     Value* Set(StringPiece key, double value) &;
     Value* Set(StringPiece key, StringPiece value) &;
@@ -704,7 +704,7 @@
     void Append(Value&& value) &;
     void Append(bool value) &;
     template <typename T>
-    void Append(const T*) = delete;
+    void Append(const T*) & = delete;
     void Append(int value) &;
     void Append(double value) &;
     void Append(StringPiece value) &;
diff --git a/base/values_nocompile.nc b/base/values_nocompile.nc
new file mode 100644
index 0000000..1b79ec2
--- /dev/null
+++ b/base/values_nocompile.nc
@@ -0,0 +1,115 @@
+// Copyright 2013 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/values.h"
+
+#include <stdint.h>
+
+namespace base {
+
+// Trying to construct a Value from a pointer should not work or implicitly
+// convert to bool.
+void DisallowValueConstructionFromPointers() {
+  int* ptr = nullptr;
+
+  {
+    Value v(ptr);  // expected-error {{call to deleted constructor of 'Value'}}
+  }
+
+  {
+    Value::Dict dict;
+    dict.Set("moo", ptr);  // expected-error {{call to deleted member function 'Set'}}
+    dict.SetByDottedPath("moo.moo", ptr);  // expected-error {{call to deleted member function 'SetByDottedPath'}}
+
+    Value::Dict().Set("moo", ptr);  // expected-error {{call to deleted member function 'Set'}}
+    Value::Dict().SetByDottedPath("moo", ptr);  // expected-error {{call to deleted member function 'SetByDottedPath'}}
+  }
+
+  {
+    Value::List list;
+    list.Append(ptr);  // expected-error {{call to deleted member function 'Append'}}
+
+    Value::List().Append(ptr);  // expected-error {{call to deleted member function 'Append'}}
+  }
+}
+
+// Value (largely) follows the semantics of JSON, which does not support 64-bit
+// integers. Constructing a Value from a 64-bit integer should not work.
+
+void DisallowValueConstructionFromInt64() {
+  int64_t big_int = 0;
+
+  {
+    Value v(big_int);  // expected-error {{call to constructor of 'Value' is ambiguous}}
+  }
+
+  {
+    Value::Dict dict;
+    dict.Set("あいうえお", big_int);  // expected-error {{call to member function 'Set' is ambiguous}}
+    dict.SetByDottedPath("あいうえお", big_int);  // expected-error {{call to member function 'SetByDottedPath' is ambiguous}}
+
+    Value::Dict().Set("あいうえお", big_int);  // expected-error {{call to member function 'Set' is ambiguous}}
+    Value::Dict().SetByDottedPath("あいうえお", big_int);  // expected-error {{call to member function 'SetByDottedPath' is ambiguous}}
+  }
+
+  {
+    Value::List list;
+    list.Append(big_int);  // expected-error {{call to member function 'Append' is ambiguous}}
+
+    Value::List().Append(big_int);  // expected-error {{call to member function 'Append' is ambiguous}}
+  }
+}
+
+void TakesValueView(ValueView v) {}
+
+// Trying to construct a ValueView from a pointer should not work or implicitly
+// convert to bool.
+void DisallowValueViewConstructionFromPointers() {
+  int* ptr = nullptr;
+
+  ValueView v(ptr);  // expected-error {{call to deleted constructor of 'ValueView'}}
+  TakesValueView(ptr);  // expected-error {{conversion function from 'int *' to 'ValueView' invokes a deleted function}}
+}
+
+// Verify that obvious ways of unsafely constructing a ValueView from a
+// temporary are disallowed.
+void DisallowValueViewConstructionFromTemporaryString() {
+  [[maybe_unused]] ValueView v = std::string();  // expected-error {{object backing the pointer will be destroyed at the end of the full-expression}}
+  // Not an error here since the lifetime of the temporary lasts until the end
+  // of the full expression, i.e. until TakesValueView() returns.
+  TakesValueView(std::string());
+}
+
+void DisallowValueViewConstructionFromTemporaryBlob() {
+  [[maybe_unused]] ValueView v = Value::BlobStorage();  // expected-error {{object backing the pointer will be destroyed at the end of the full-expression}}
+  // Not an error here since the lifetime of the temporary lasts until the end
+  // of the full expression, i.e. until TakesValueView() returns.
+  TakesValueView(Value::BlobStorage());
+}
+
+void DisallowValueViewConstructionFromTemporaryDict() {
+  [[maybe_unused]] ValueView v = Value::Dict();  // expected-error {{object backing the pointer will be destroyed at the end of the full-expression}}
+  // Not an error here since the lifetime of the temporary lasts until the end
+  // of the full expression, i.e. until TakesValueView() returns.
+  TakesValueView(Value::Dict());
+}
+
+void DisallowValueViewConstructionFromTemporaryList() {
+  [[maybe_unused]] ValueView v = Value::List();  // expected-error {{object backing the pointer will be destroyed at the end of the full-expression}}
+  // Not an error here since the lifetime of the temporary lasts until the end
+  // of the full expression, i.e. until TakesValueView() returns.
+  TakesValueView(Value::List());
+}
+
+void DisallowValueViewConstructionFromTemporaryValue() {
+  [[maybe_unused]] ValueView v = Value();  // expected-error {{object backing the pointer will be destroyed at the end of the full-expression}}
+  // Not an error here since the lifetime of the temporary lasts until the end
+  // of the full expression, i.e. until TakesValueView() returns.
+  TakesValueView(Value());
+}
+
+}  // namespace base
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
index 5d2aa32..af4c9c2 100644
--- a/base/values_unittest.cc
+++ b/base/values_unittest.cc
@@ -69,16 +69,15 @@
 #endif
 
 TEST(ValuesTest, TestNothrow) {
-  static_assert(std::is_nothrow_move_constructible<Value>::value,
+  static_assert(std::is_nothrow_move_constructible_v<Value>,
                 "IsNothrowMoveConstructible");
-  static_assert(std::is_nothrow_default_constructible<Value>::value,
+  static_assert(std::is_nothrow_default_constructible_v<Value>,
                 "IsNothrowDefaultConstructible");
-  static_assert(std::is_nothrow_constructible<Value, std::string&&>::value,
+  static_assert(std::is_nothrow_constructible_v<Value, std::string&&>,
                 "IsNothrowMoveConstructibleFromString");
-  static_assert(
-      std::is_nothrow_constructible<Value, Value::BlobStorage&&>::value,
-      "IsNothrowMoveConstructibleFromBlob");
-  static_assert(std::is_nothrow_move_assignable<Value>::value,
+  static_assert(std::is_nothrow_constructible_v<Value, Value::BlobStorage&&>,
+                "IsNothrowMoveConstructibleFromBlob");
+  static_assert(std::is_nothrow_move_assignable_v<Value>,
                 "IsNothrowMoveAssignable");
 }
 
@@ -106,15 +105,15 @@
 }
 
 TEST(ValuesTest, ConstructFromPtrs) {
-  static_assert(!std::is_constructible<Value, int*>::value, "");
-  static_assert(!std::is_constructible<Value, const int*>::value, "");
-  static_assert(!std::is_constructible<Value, wchar_t*>::value, "");
-  static_assert(!std::is_constructible<Value, const wchar_t*>::value, "");
+  static_assert(!std::is_constructible_v<Value, int*>, "");
+  static_assert(!std::is_constructible_v<Value, const int*>, "");
+  static_assert(!std::is_constructible_v<Value, wchar_t*>, "");
+  static_assert(!std::is_constructible_v<Value, const wchar_t*>, "");
 
-  static_assert(std::is_constructible<Value, char*>::value, "");
-  static_assert(std::is_constructible<Value, const char*>::value, "");
-  static_assert(std::is_constructible<Value, char16_t*>::value, "");
-  static_assert(std::is_constructible<Value, const char16_t*>::value, "");
+  static_assert(std::is_constructible_v<Value, char*>, "");
+  static_assert(std::is_constructible_v<Value, const char*>, "");
+  static_assert(std::is_constructible_v<Value, char16_t*>, "");
+  static_assert(std::is_constructible_v<Value, const char16_t*>, "");
 }
 
 TEST(ValuesTest, ConstructInt) {
diff --git a/base/values_unittest.nc b/base/values_unittest.nc
deleted file mode 100644
index dbb12fd..0000000
--- a/base/values_unittest.nc
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2013 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a "No Compile Test" suite.
-// http://dev.chromium.org/developers/testing/no-compile-tests
-
-#include "base/values.h"
-
-#include <stdint.h>
-
-namespace base {
-
-void G(ValueView v) {}
-
-#if defined(NCTEST_VALUE_CTOR_PTR_DOES_NOT_CONVERT_TO_BOOL)  // [r"fatal error: call to deleted constructor"]
-
-void F() {
-  int* ptr = nullptr;
-  Value v(ptr);
-}
-
-#elif defined(NCTEST_DICT_SET_PTR_DOES_NOT_CONVERT_TO_BOOL)  // [r"fatal error: call to deleted member function"]
-
-void F() {
-  int* ptr = nullptr;
-  Value::Dict dict;
-  dict.Set("moo", ptr);
-}
-
-#elif defined(NCTEST_DICT_SETBYDOTTEDPATH_PTR_DOES_NOT_CONVERT_TO_BOOL)  // [r"fatal error: call to deleted member function"]
-
-void F() {
-  int* ptr = nullptr;
-  Value::Dict dict;
-  dict.SetByDottedPath("moo.moo", ptr);
-}
-
-#elif defined(NCTEST_LIST_APPEND_PTR_DOES_NOT_CONVERT_TO_BOOL)  // [r"fatal error: call to deleted member function"]
-
-void F() {
-  int* ptr = nullptr;
-  Value::List list;
-  list.Append(ptr);
-}
-
-#elif defined(NCTEST_VALUE_CTOR_INT64_T)  // [r"fatal error: ambiguous conversion for functional-style cast from 'int64_t' \(aka '.+?'\) to 'Value'"]
-
-Value F(int64_t value) {
-  return Value(value);
-}
-
-#elif defined(NCTEST_SET_INT64_T)  // [r"fatal error: call to member function 'Set' is ambiguous"]
-
-Value::Dict F(int64_t value) {
-  Value::Dict dict;
-  dict.Set("あいうえお", value);
-  return dict;
-}
-
-#elif defined(NCTEST_SETBYDOTTEDPATH_INT64_T)  // [r"fatal error: call to member function 'SetByDottedPath' is ambiguous"]
-
-Value::Dict F(int64_t value) {
-  Value::Dict dict;
-  dict.SetByDottedPath("あいうえお", value);
-  return dict;
-}
-
-#elif defined(NCTEST_LIST_APPEND_INT64_T)  // [r"fatal error: call to member function 'Append' is ambiguous"]
-
-Value::List F(int64_t value) {
-  Value::List list;
-  list.Append(value);
-  return list;
-}
-
-#elif defined(NCTEST_VALUEVIEW_FROM_CONST_NON_CHAR_POINTER)  // [r"fatal error: conversion function from 'const int \*' to 'ValueView' invokes a deleted function"]
-
-void F() {
-  const int* ptr = nullptr;
-  ValueView v = ptr;
-  G(v);
-}
-
-#elif defined(NCTEST_VALUEVIEW_FROM_NON_CHAR_POINTER)  // [r"fatal error: conversion function from 'int \*' to 'ValueView' invokes a deleted function"]
-
-void F() {
-  int* ptr = nullptr;
-  ValueView v = ptr;
-  G(v);
-}
-
-#elif defined(NCTEST_VALUEVIEW_FROM_STRING_TEMPORARY)  // [r"fatal error: object backing the pointer will be destroyed at the end of the full-expression"]
-
-void F() {
-  ValueView v = std::string();
-  G(v);
-}
-
-#elif defined(NCTEST_VALUEVIEW_FROM_BLOB_TEMPORARY)  // [r"fatal error: object backing the pointer will be destroyed at the end of the full-expression"]
-
-void F() {
-  ValueView v = Value::BlobStorage();
-  G(v);
-}
-
-#elif defined(NCTEST_VALUEVIEW_FROM_DICT_TEMPORARY)  // [r"fatal error: object backing the pointer will be destroyed at the end of the full-expression"]
-
-void F() {
-  ValueView v = Value::Dict();
-  G(v);
-}
-
-#elif defined(NCTEST_VALUEVIEW_FROM_LIST_TEMPORARY)  // [r"fatal error: object backing the pointer will be destroyed at the end of the full-expression"]
-
-void F() {
-  ValueView v = Value::List();
-  G(v);
-}
-
-#elif defined(NCTEST_VALUEVIEW_FROM_VALUE_TEMPORARY)  // [r"fatal error: object backing the pointer will be destroyed at the end of the full-expression"]
-
-void F() {
-  ValueView v = Value();
-  G(v);
-}
-
-#endif
-
-}  // namespace base
diff --git a/base/version.h b/base/version.h
index 2740219..f0cb75d 100644
--- a/base/version.h
+++ b/base/version.h
@@ -27,7 +27,7 @@
   Version(const Version& other);
 
   // Initializes from a decimal dotted version number, like "0.1.1".
-  // Each component is limited to a uint16_t. Call IsValid() to learn
+  // Each component is limited to a uint32_t. Call IsValid() to learn
   // the outcome.
   explicit Version(StringPiece version_str);
 
diff --git a/base/win/access_control_list_unittest.cc b/base/win/access_control_list_unittest.cc
index 53332ea..3369b32 100644
--- a/base/win/access_control_list_unittest.cc
+++ b/base/win/access_control_list_unittest.cc
@@ -12,7 +12,7 @@
 #include <vector>
 
 #include "base/check.h"
-#include "base/strings/stringprintf.h"
+#include "base/strings/string_number_conversions_win.h"
 #include "base/win/scoped_localalloc.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -213,8 +213,8 @@
   EXPECT_EQ(ConvertAclToSddl(acl), kEvent);
   // ACL has a maximum capacity of 2^16-1 bytes or 2^16-1 ACEs. Force a fail.
   while (ace_list.size() < 0x10000) {
-    auto sid = Sid::FromSddlString(
-        base::StringPrintf(L"S-1-5-1234-%zu", ace_list.size()).c_str());
+    auto sid =
+        Sid::FromSddlString(L"S-1-5-1234-" + NumberToWString(ace_list.size()));
     ASSERT_TRUE(sid);
     ace_list.emplace_back(*sid, SecurityAccessMode::kGrant, GENERIC_ALL, 0);
   }
diff --git a/base/win/access_token.cc b/base/win/access_token.cc
index a46080c..c648851 100644
--- a/base/win/access_token.cc
+++ b/base/win/access_token.cc
@@ -11,6 +11,7 @@
 
 #include "base/numerics/checked_math.h"
 #include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
 
 namespace base::win {
 
@@ -230,9 +231,10 @@
   luid.LowPart = luid_.LowPart;
   luid.HighPart = luid_.HighPart;
   DWORD size = std::size(name);
-  if (!::LookupPrivilegeName(nullptr, &luid, name, &size))
-    return base::StringPrintf(L"%08X-%08X", luid.HighPart, luid.LowPart);
-  return name;
+  return ::LookupPrivilegeName(nullptr, &luid, name, &size)
+             ? name
+             : ASCIIToWide(
+                   StringPrintf("%08lX-%08lX", luid.HighPart, luid.LowPart));
 }
 
 bool AccessToken::Privilege::IsEnabled() const {
diff --git a/base/win/event_trace_consumer_unittest.cc b/base/win/event_trace_consumer_unittest.cc
index 2f1f92c..1909dff 100644
--- a/base/win/event_trace_consumer_unittest.cc
+++ b/base/win/event_trace_consumer_unittest.cc
@@ -16,8 +16,8 @@
 #include "base/files/scoped_temp_dir.h"
 #include "base/logging.h"
 #include "base/process/process_handle.h"
+#include "base/strings/string_number_conversions_win.h"
 #include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
 #include "base/win/event_trace_controller.h"
 #include "base/win/event_trace_provider.h"
 #include "base/win/scoped_handle.h"
@@ -81,7 +81,7 @@
 class EtwTraceConsumerBaseTest : public testing::Test {
  public:
   EtwTraceConsumerBaseTest()
-      : session_name_(StringPrintf(L"TestSession-%d", GetCurrentProcId())) {}
+      : session_name_(L"TestSession-" + NumberToWString(GetCurrentProcId())) {}
 
   void SetUp() override {
     // Cleanup any potentially dangling sessions.
diff --git a/base/win/event_trace_controller_unittest.cc b/base/win/event_trace_controller_unittest.cc
index 57467fc..d4ee08f 100644
--- a/base/win/event_trace_controller_unittest.cc
+++ b/base/win/event_trace_controller_unittest.cc
@@ -13,8 +13,8 @@
 #include "base/files/scoped_temp_dir.h"
 #include "base/logging.h"
 #include "base/process/process_handle.h"
+#include "base/strings/string_number_conversions_win.h"
 #include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
 #include "base/system/sys_info.h"
 #include "base/win/event_trace_controller.h"
 #include "base/win/event_trace_provider.h"
@@ -105,7 +105,7 @@
 class EtwTraceControllerTest : public testing::Test {
  public:
   EtwTraceControllerTest()
-      : session_name_(StringPrintf(L"TestSession-%d", GetCurrentProcId())) {}
+      : session_name_(L"TestSession-" + NumberToWString(GetCurrentProcId())) {}
 
   void SetUp() override {
     EtwTraceProperties ignore;
diff --git a/base/win/message_window.cc b/base/win/message_window.cc
index 1f6f6cc..8c79b7e 100644
--- a/base/win/message_window.cc
+++ b/base/win/message_window.cc
@@ -6,12 +6,23 @@
 
 #include <windows.h>
 
+#include <map>
 #include <utility>
 
+#include "base/check.h"
+#include "base/check_op.h"
+#include "base/compiler_specific.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/raw_ref.h"
+#include "base/no_destructor.h"
 #include "base/strings/string_util.h"
+#include "base/thread_annotations.h"
+#include "base/threading/thread_checker.h"
+#include "base/threading/thread_local.h"
 #include "base/win/current_module.h"
+#include "base/win/resource_exhaustion.h"
 #include "base/win/wrapped_window_proc.h"
 
 // To avoid conflicts with the macro from the Windows SDK...
@@ -19,6 +30,55 @@
 
 const wchar_t kMessageWindowClassName[] = L"Chrome_MessageWindow";
 
+namespace {
+
+// This class can be accessed from multiple threads,
+// this is handled by each thread having a different instance.
+class MessageWindowMap {
+ public:
+  static MessageWindowMap& GetInstanceForCurrentThread() {
+    static base::NoDestructor<base::ThreadLocalOwnedPointer<MessageWindowMap>>
+        instance;
+    if (!instance->Get()) {
+      instance->Set(base::WrapUnique(new MessageWindowMap));
+    }
+    return *(instance->Get());
+  }
+
+  MessageWindowMap(const MessageWindowMap&) = delete;
+  MessageWindowMap& operator=(const MessageWindowMap&) = delete;
+
+  // Each key should only be inserted once.
+  void Insert(HWND hwnd, base::win::MessageWindow& message_window) {
+    DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+    CHECK(map_.emplace(hwnd, message_window).second);
+  }
+
+  // Erase should only be called on an existing key.
+  void Erase(HWND hwnd) {
+    DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+    // Check that exactly one element is erased from the map.
+    CHECK_EQ(map_.erase(hwnd), 1u);
+  }
+
+  // Will return nullptr if `hwnd` is not in the map.
+  base::win::MessageWindow* Get(HWND hwnd) const {
+    DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+    if (auto search = map_.find(hwnd); search != map_.end()) {
+      return &(search->second.get());
+    }
+    return nullptr;
+  }
+
+ private:
+  MessageWindowMap() = default;
+  THREAD_CHECKER(thread_checker_);
+  std::map<HWND, const raw_ref<base::win::MessageWindow>> map_
+      GUARDED_BY_CONTEXT(thread_checker_);
+};
+
+}  // namespace
+
 namespace base {
 namespace win {
 
@@ -62,6 +122,7 @@
   if (atom_ == 0) {
     PLOG(ERROR)
         << "Failed to register the window class for a message-only window";
+    OnResourceExhausted();
   }
 }
 
@@ -127,45 +188,41 @@
                                            UINT message,
                                            WPARAM wparam,
                                            LPARAM lparam) {
-  MessageWindow* self =
-      reinterpret_cast<MessageWindow*>(GetWindowLongPtr(hwnd, GWLP_USERDATA));
+  // This can be called from different threads for different windows,
+  // each thread has its own MessageWindowMap instance.
+  auto& message_window_map = MessageWindowMap::GetInstanceForCurrentThread();
+  MessageWindow* self = message_window_map.Get(hwnd);
 
-  switch (message) {
-    // Set up the self before handling WM_CREATE.
-    case WM_CREATE: {
-      CREATESTRUCT* cs = reinterpret_cast<CREATESTRUCT*>(lparam);
-      self = reinterpret_cast<MessageWindow*>(cs->lpCreateParams);
+  // CreateWindow will send a WM_CREATE message during window creation.
+  if (UNLIKELY(!self && message == WM_CREATE)) {
+    CREATESTRUCT* const cs = reinterpret_cast<CREATESTRUCT*>(lparam);
+    self = reinterpret_cast<MessageWindow*>(cs->lpCreateParams);
 
-      // Make |hwnd| available to the message handler. At this point the control
-      // hasn't returned from CreateWindow() yet.
-      self->window_ = hwnd;
+    // Tell the MessageWindow instance the HWND that CreateWindow has produced.
+    self->window_ = hwnd;
 
-      // Store pointer to the self to the window's user data.
-      SetLastError(ERROR_SUCCESS);
-      LONG_PTR result = SetWindowLongPtr(hwnd, GWLP_USERDATA,
-                                         reinterpret_cast<LONG_PTR>(self));
-      CHECK(result != 0 || GetLastError() == ERROR_SUCCESS);
-      break;
-    }
-
-    // Clear the pointer to stop calling the self once WM_DESTROY is
-    // received.
-    case WM_DESTROY: {
-      SetLastError(ERROR_SUCCESS);
-      LONG_PTR result = SetWindowLongPtr(hwnd, GWLP_USERDATA, NULL);
-      CHECK(result != 0 || GetLastError() == ERROR_SUCCESS);
-      break;
-    }
+    // Associate the MessageWindow instance with the HWND in the map.
+    message_window_map.Insert(hwnd, *self);
   }
 
-  // Handle the message.
-  if (self) {
-    LRESULT message_result;
-    if (self->message_callback_.Run(message, wparam, lparam, &message_result))
-      return message_result;
+  if (UNLIKELY(!self)) {
+    return DefWindowProc(hwnd, message, wparam, lparam);
   }
 
-  return DefWindowProc(hwnd, message, wparam, lparam);
+  LRESULT message_result = {};
+  if (!self->message_callback_.Run(message, wparam, lparam, &message_result)) {
+    message_result = DefWindowProc(hwnd, message, wparam, lparam);
+  }
+
+  if (UNLIKELY(message == WM_DESTROY)) {
+    // Tell the MessageWindow instance that it no longer has an HWND.
+    self->window_ = nullptr;
+
+    // Remove this HWND's MessageWindow from the map since it is going away.
+    message_window_map.Erase(hwnd);
+  }
+
+  return message_result;
 }
 
 }  // namespace win
diff --git a/base/win/post_async_results.h b/base/win/post_async_results.h
index c44a74d..b5476fa 100644
--- a/base/win/post_async_results.h
+++ b/base/win/post_async_results.h
@@ -59,7 +59,7 @@
 // information.
 template <typename T>
 using AsyncResultsT = std::conditional_t<
-    std::is_convertible<AsyncAbiT<T>, IUnknown*>::value,
+    std::is_convertible_v<AsyncAbiT<T>, IUnknown*>,
     Microsoft::WRL::ComPtr<std::remove_pointer_t<AsyncAbiT<T>>>,
     AsyncAbiT<T>>;
 
diff --git a/base/win/registry.cc b/base/win/registry.cc
index 9e67b35..7b9dcb9 100644
--- a/base/win/registry.cc
+++ b/base/win/registry.cc
@@ -4,6 +4,7 @@
 
 #include "base/win/registry.h"
 
+#include <ntstatus.h>
 #include <stddef.h>
 
 #include <algorithm>
@@ -23,6 +24,8 @@
 #include "base/win/scoped_handle.h"
 #include "base/win/shlwapi.h"
 
+extern "C" NTSTATUS WINAPI NtDeleteKey(IN HANDLE KeyHandle);
+
 namespace base::win {
 
 namespace {
@@ -192,17 +195,7 @@
 }
 
 LONG RegKey::Open(HKEY rootkey, const wchar_t* subkey, REGSAM access) {
-  DCHECK(rootkey && subkey && access);
-  HKEY subhkey = nullptr;
-
-  LONG result = RegOpenKeyEx(rootkey, subkey, 0, access, &subhkey);
-  if (result == ERROR_SUCCESS) {
-    Close();
-    key_ = subhkey;
-    wow64access_ = access & kWow64AccessMask;
-  }
-
-  return result;
+  return Open(rootkey, subkey, /*options=*/0, access);
 }
 
 LONG RegKey::OpenKey(const wchar_t* relative_key_name, REGSAM access) {
@@ -264,20 +257,15 @@
          ERROR_SUCCESS;
 }
 
-DWORD RegKey::GetValueCount() const {
+base::expected<DWORD, LONG> RegKey::GetValueCount() const {
   DWORD count = 0;
   LONG result =
       RegQueryInfoKey(key_, nullptr, nullptr, nullptr, nullptr, nullptr,
                       nullptr, &count, nullptr, nullptr, nullptr, nullptr);
-  return (result == ERROR_SUCCESS) ? count : 0;
-}
-
-FILETIME RegKey::GetLastWriteTime() const {
-  FILETIME last_write_time;
-  LONG result = RegQueryInfoKey(key_, nullptr, nullptr, nullptr, nullptr,
-                                nullptr, nullptr, nullptr, nullptr, nullptr,
-                                nullptr, &last_write_time);
-  return (result == ERROR_SUCCESS) ? last_write_time : FILETIME{};
+  if (result == ERROR_SUCCESS) {
+    return base::ok(count);
+  }
+  return base::unexpected(result);
 }
 
 LONG RegKey::GetValueNameAt(DWORD index, std::wstring* name) const {
@@ -292,51 +280,34 @@
   return r;
 }
 
-LONG RegKey::DeleteKey(const wchar_t* name) {
+LONG RegKey::DeleteKey(const wchar_t* name, RecursiveDelete recursive) {
   DCHECK(name);
 
+  if (!Valid()) {
+    return ERROR_INVALID_HANDLE;
+  }
+
   // Verify the key exists before attempting delete to replicate previous
   // behavior.
-  // `RegOpenKeyEx()` will return an error if `key_` is invalid.
-  HKEY subkey = nullptr;
-  LONG result =
-      RegOpenKeyEx(key_, name, 0, READ_CONTROL | wow64access_, &subkey);
-  if (result != ERROR_SUCCESS) {
-    return result;
-  }
-  RegCloseKey(subkey);
-
-  return RegDelRecurse(key_, name, wow64access_);
-}
-
-LONG RegKey::DeleteEmptyKey(const wchar_t* name) {
-  DCHECK(name);
-
-  // `RegOpenKeyEx()` will return an error if `key_` is invalid.
-  HKEY target_key = nullptr;
-  LONG result =
-      RegOpenKeyEx(key_, name, 0, KEY_READ | wow64access_, &target_key);
-
+  RegKey target_key;
+  LONG result = target_key.Open(key_, name, REG_OPTION_OPEN_LINK,
+                                wow64access_ | KEY_QUERY_VALUE | DELETE);
   if (result != ERROR_SUCCESS) {
     return result;
   }
 
-  DWORD count = 0;
-  result =
-      RegQueryInfoKey(target_key, nullptr, nullptr, nullptr, nullptr, nullptr,
-                      nullptr, &count, nullptr, nullptr, nullptr, nullptr);
-
-  RegCloseKey(target_key);
-
-  if (result != ERROR_SUCCESS) {
-    return result;
+  if (recursive.value()) {
+    target_key.Close();
+    return RegDelRecurse(key_, name, wow64access_);
   }
 
-  if (count == 0) {
-    return RegDeleteKeyEx(key_, name, wow64access_, 0);
+  // Next, try to delete the key if it is a symbolic link.
+  if (auto deleted_link = target_key.DeleteIfLink(); deleted_link.has_value()) {
+    return deleted_link.value();
   }
 
-  return ERROR_DIR_NOT_EMPTY;
+  // It's not a symbolic link, so try to delete it without recursing.
+  return ::RegDeleteKeyEx(target_key.key_, L"", wow64access_, 0);
 }
 
 LONG RegKey::DeleteValue(const wchar_t* value_name) {
@@ -490,61 +461,104 @@
   return true;
 }
 
+LONG RegKey::Open(HKEY rootkey,
+                  const wchar_t* subkey,
+                  DWORD options,
+                  REGSAM access) {
+  DCHECK(options == 0 || options == REG_OPTION_OPEN_LINK) << options;
+  DCHECK(rootkey && subkey && access);
+  HKEY subhkey = nullptr;
+
+  LONG result = RegOpenKeyEx(rootkey, subkey, options, access, &subhkey);
+  if (result == ERROR_SUCCESS) {
+    Close();
+    key_ = subhkey;
+    wow64access_ = access & kWow64AccessMask;
+  }
+
+  return result;
+}
+
+expected<bool, LONG> RegKey::IsLink() const {
+  DWORD value_type = 0;
+  LONG result = ::RegQueryValueEx(key_, L"SymbolicLinkValue",
+                                  /*lpReserved=*/nullptr, &value_type,
+                                  /*lpData=*/nullptr, /*lpcbData=*/nullptr);
+  if (result == ERROR_FILE_NOT_FOUND) {
+    return ok(false);
+  }
+  if (result == ERROR_SUCCESS) {
+    return ok(value_type == REG_LINK);
+  }
+  return unexpected(result);
+}
+
+absl::optional<LONG> RegKey::DeleteIfLink() {
+  if (auto is_link = IsLink(); !is_link.has_value()) {
+    return is_link.error();  // Failed to determine if a link.
+  } else if (is_link.value() == false) {
+    return absl::nullopt;  // Not a link.
+  }
+
+  const NTSTATUS delete_result = ::NtDeleteKey(key_);
+  if (delete_result == STATUS_SUCCESS) {
+    return ERROR_SUCCESS;
+  }
+  using RtlNtStatusToDosErrorFunction = ULONG(WINAPI*)(NTSTATUS);
+  static const RtlNtStatusToDosErrorFunction rtl_nt_status_to_dos_error =
+      reinterpret_cast<RtlNtStatusToDosErrorFunction>(::GetProcAddress(
+          ::GetModuleHandle(L"ntdll.dll"), "RtlNtStatusToDosError"));
+  // The most common cause of failure is the presence of subkeys, which is
+  // reported as `STATUS_CANNOT_DELETE` and maps to `ERROR_ACCESS_DENIED`.
+  return rtl_nt_status_to_dos_error
+             ? static_cast<LONG>(rtl_nt_status_to_dos_error(delete_result))
+             : ERROR_ACCESS_DENIED;
+}
+
 // static
 LONG RegKey::RegDelRecurse(HKEY root_key, const wchar_t* name, REGSAM access) {
-  // First, see if the key can be deleted without having to recurse.
-  LONG result = RegDeleteKeyEx(root_key, name, access, 0);
+  // First, open the key; taking care not to traverse symbolic links.
+  RegKey target_key;
+  LONG result = target_key.Open(
+      root_key, name, REG_OPTION_OPEN_LINK,
+      access | KEY_ENUMERATE_SUB_KEYS | KEY_QUERY_VALUE | DELETE);
+  if (result == ERROR_FILE_NOT_FOUND) {  // The key doesn't exist.
+    return ERROR_SUCCESS;
+  }
+  if (result != ERROR_SUCCESS) {
+    return result;
+  }
+
+  // Next, try to delete the key if it is a symbolic link.
+  if (auto deleted_link = target_key.DeleteIfLink(); deleted_link.has_value()) {
+    return deleted_link.value();
+  }
+
+  // It's not a symbolic link, so try to delete it without recursing.
+  result = ::RegDeleteKeyEx(target_key.key_, L"", access, 0);
   if (result == ERROR_SUCCESS) {
     return result;
   }
 
-  HKEY target_key = nullptr;
-  result = RegOpenKeyEx(root_key, name, 0, KEY_ENUMERATE_SUB_KEYS | access,
-                        &target_key);
-
-  if (result == ERROR_FILE_NOT_FOUND) {
-    return ERROR_SUCCESS;
-  }
-  if (result != ERROR_SUCCESS)
-    return result;
-
-  std::wstring subkey_name(name);
-
-  // Check for an ending slash and add one if it is missing.
-  if (!subkey_name.empty() && subkey_name.back() != '\\') {
-    subkey_name.push_back('\\');
-  }
-
-  // Enumerate the keys
-  result = ERROR_SUCCESS;
-  const DWORD kMaxKeyNameLength = MAX_PATH;
-  const size_t base_key_length = subkey_name.length();
-  std::wstring key_name;
-  while (result == ERROR_SUCCESS) {
+  // Enumerate the keys.
+  const DWORD kMaxKeyNameLength = 256;  // Includes string terminator.
+  auto subkey_buffer = std::make_unique<wchar_t[]>(kMaxKeyNameLength);
+  while (true) {
     DWORD key_size = kMaxKeyNameLength;
-    result =
-        RegEnumKeyEx(target_key, 0, WriteInto(&key_name, kMaxKeyNameLength),
-                     &key_size, nullptr, nullptr, nullptr, nullptr);
-
-    if (result != ERROR_SUCCESS) {
+    if (::RegEnumKeyEx(target_key.key_, 0, &subkey_buffer[0], &key_size,
+                       nullptr, nullptr, nullptr, nullptr) != ERROR_SUCCESS) {
       break;
     }
-
-    key_name.resize(key_size);
-    subkey_name.resize(base_key_length);
-    subkey_name += key_name;
-
-    if (RegDelRecurse(root_key, subkey_name.c_str(), access) != ERROR_SUCCESS) {
+    CHECK_LT(key_size, kMaxKeyNameLength);
+    CHECK_EQ(subkey_buffer[key_size], L'\0');
+    if (RegDelRecurse(target_key.key_, &subkey_buffer[0], access) !=
+        ERROR_SUCCESS) {
       break;
     }
   }
 
-  RegCloseKey(target_key);
-
   // Try again to delete the key.
-  result = RegDeleteKeyEx(root_key, name, access, 0);
-
-  return result;
+  return ::RegDeleteKeyEx(target_key.key_, L"", access, 0);
 }
 
 // RegistryValueIterator ------------------------------------------------------
diff --git a/base/win/registry.h b/base/win/registry.h
index 4c7b1ae..ea7be2a 100644
--- a/base/win/registry.h
+++ b/base/win/registry.h
@@ -13,7 +13,10 @@
 
 #include "base/base_export.h"
 #include "base/functional/callback_forward.h"
+#include "base/types/expected.h"
+#include "base/types/strong_alias.h"
 #include "base/win/windows_types.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
 
 namespace base {
 namespace win {
@@ -78,12 +81,9 @@
   // occurrs while attempting to access it.
   bool HasValue(const wchar_t* value_name) const;
 
-  // Returns the number of values for this key, or 0 if the number cannot be
-  // determined.
-  DWORD GetValueCount() const;
-
-  // Returns the last write time or 0 on failure.
-  FILETIME GetLastWriteTime() const;
+  // Returns the number of values for this key, or an error code if the number
+  // cannot be determined.
+  base::expected<DWORD, LONG> GetValueCount() const;
 
   // Determines the nth value's name.
   LONG GetValueNameAt(DWORD index, std::wstring* name) const;
@@ -91,13 +91,12 @@
   // True while the key is valid.
   bool Valid() const { return key_ != nullptr; }
 
-  // Kills a key and everything that lives below it; please be careful when
-  // using it.
-  LONG DeleteKey(const wchar_t* name);
-
-  // Deletes an empty subkey.  If the subkey has subkeys or values then this
-  // will fail.
-  LONG DeleteEmptyKey(const wchar_t* name);
+  // Kills a key and, by default, everything that lives below it; please be
+  // careful when using it. `recursive` = false may be used to prevent
+  // recursion, in which case the key is only deleted if it has no subkeys.
+  using RecursiveDelete = base::StrongAlias<class RecursiveDeleteTag, bool>;
+  LONG DeleteKey(const wchar_t* name,
+                 RecursiveDelete recursive = RecursiveDelete(true));
 
   // Deletes a single value within the key.
   LONG DeleteValue(const wchar_t* name);
@@ -154,6 +153,26 @@
  private:
   class Watcher;
 
+  // Opens the key `subkey` under `rootkey` with the given options and
+  // access rights. `options` may be 0 or `REG_OPTION_OPEN_LINK`. Returns
+  // ERROR_SUCCESS or a Windows error code.
+  [[nodiscard]] LONG Open(HKEY rootkey,
+                          const wchar_t* subkey,
+                          DWORD options,
+                          REGSAM access);
+
+  // Returns true if the key is a symbolic link, false if it is not, or a
+  // Windows error code in case of a failure to determine. `this` *MUST* have
+  // been opened via at least `Open(..., REG_OPTION_OPEN_LINK,
+  // REG_QUERY_VALUE);`.
+  expected<bool, LONG> IsLink() const;
+
+  // Deletes the key if it is a symbolic link. Returns ERROR_SUCCESS if the key
+  // was a link and was deleted, a Windows error code if checking the key or
+  // deleting it failed, or `nullopt` if the key exists and is not a symbolic
+  // link.
+  absl::optional<LONG> DeleteIfLink();
+
   // Recursively deletes a key and all of its subkeys.
   static LONG RegDelRecurse(HKEY root_key, const wchar_t* name, REGSAM access);
 
diff --git a/base/win/registry_unittest.cc b/base/win/registry_unittest.cc
index 2191443..2935845 100644
--- a/base/win/registry_unittest.cc
+++ b/base/win/registry_unittest.cc
@@ -21,11 +21,13 @@
 #include "base/run_loop.h"
 #include "base/strings/strcat.h"
 #include "base/test/bind.h"
+#include "base/test/gmock_expected_support.h"
 #include "base/test/mock_callback.h"
 #include "base/test/task_environment.h"
 #include "base/test/test_mock_time_task_runner.h"
 #include "base/test/test_reg_util_win.h"
 #include "base/threading/simple_thread.h"
+#include "base/win/win_util.h"
 #include "base/win/windows_version.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -43,8 +45,8 @@
   RegistryTest() : root_key_(std::wstring(L"Software\\") + kRootKey) {}
 
   void SetUp() override {
-    ASSERT_NO_FATAL_FAILURE(
-        registry_override_.OverrideRegistry(HKEY_CURRENT_USER));
+    ASSERT_NO_FATAL_FAILURE(registry_override_.OverrideRegistry(
+        HKEY_CURRENT_USER, &override_path_));
 
     // Create the test's root key.
     RegKey key(HKEY_CURRENT_USER, L"", KEY_CREATE_SUB_KEY);
@@ -58,9 +60,12 @@
   // use by a test.
   const std::wstring& root_key() const { return root_key_; }
 
+  const std::wstring& override_path() const { return override_path_; }
+
  private:
   registry_util::RegistryOverrideManager registry_override_;
   const std::wstring root_key_;
+  std::wstring override_path_;
 };
 
 }  // namespace
@@ -84,7 +89,7 @@
   ASSERT_EQ(ERROR_SUCCESS, key.WriteValue(kDWORDValueName, kDWORDData));
   ASSERT_EQ(ERROR_SUCCESS, key.WriteValue(kInt64ValueName, &kInt64Data,
                                           sizeof(kInt64Data), REG_QWORD));
-  EXPECT_EQ(3U, key.GetValueCount());
+  EXPECT_THAT(key.GetValueCount(), base::test::ValueIs(3U));
   EXPECT_TRUE(key.HasValue(kStringValueName));
   EXPECT_TRUE(key.HasValue(kDWORDValueName));
   EXPECT_TRUE(key.HasValue(kInt64ValueName));
@@ -113,7 +118,7 @@
   ASSERT_EQ(ERROR_SUCCESS, key.DeleteValue(kStringValueName));
   ASSERT_EQ(ERROR_SUCCESS, key.DeleteValue(kDWORDValueName));
   ASSERT_EQ(ERROR_SUCCESS, key.DeleteValue(kInt64ValueName));
-  EXPECT_EQ(0U, key.GetValueCount());
+  EXPECT_THAT(key.GetValueCount(), base::test::ValueIs(0U));
   EXPECT_FALSE(key.HasValue(kStringValueName));
   EXPECT_FALSE(key.HasValue(kDWORDValueName));
   EXPECT_FALSE(key.HasValue(kInt64ValueName));
@@ -193,6 +198,48 @@
   EXPECT_FALSE(iterator.Valid());
 }
 
+TEST_F(RegistryTest, NonRecursiveDelete) {
+  RegKey key;
+  // Create root_key()
+  //                  \->Bar (TestValue)
+  //                     \->Foo (TestValue)
+  ASSERT_EQ(ERROR_SUCCESS, key.Open(HKEY_CURRENT_USER, root_key().c_str(),
+                                    KEY_CREATE_SUB_KEY));
+  ASSERT_EQ(ERROR_SUCCESS, key.CreateKey(L"Bar", KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.WriteValue(L"TestValue", L"TestData"));
+  ASSERT_EQ(ERROR_SUCCESS, key.CreateKey(L"Foo", KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.WriteValue(L"TestValue", L"TestData"));
+  key.Close();
+
+  const std::wstring bar_path = root_key() + L"\\Bar";
+  // Non-recursive delete of Bar from root_key() should fail.
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_CURRENT_USER, root_key().c_str(), KEY_QUERY_VALUE));
+  ASSERT_NE(ERROR_SUCCESS,
+            key.DeleteKey(L"Bar", RegKey::RecursiveDelete(false)));
+  key.Close();
+  ASSERT_TRUE(
+      RegKey(HKEY_CURRENT_USER, bar_path.c_str(), KEY_QUERY_VALUE).Valid());
+
+  // Non-recursive delete of Bar from itself should fail.
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_CURRENT_USER, bar_path.c_str(), KEY_QUERY_VALUE));
+  ASSERT_NE(ERROR_SUCCESS, key.DeleteKey(L"", RegKey::RecursiveDelete(false)));
+  key.Close();
+  ASSERT_TRUE(
+      RegKey(HKEY_CURRENT_USER, root_key().c_str(), KEY_QUERY_VALUE).Valid());
+
+  // Non-recursive delete of the subkey and then root_key() should succeed.
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_CURRENT_USER, bar_path.c_str(), KEY_QUERY_VALUE));
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.DeleteKey(L"Foo", RegKey::RecursiveDelete(false)));
+  ASSERT_EQ(ERROR_SUCCESS, key.DeleteKey(L"", RegKey::RecursiveDelete(false)));
+  key.Close();
+  ASSERT_FALSE(
+      RegKey(HKEY_CURRENT_USER, bar_path.c_str(), KEY_QUERY_VALUE).Valid());
+}
+
 TEST_F(RegistryTest, RecursiveDelete) {
   RegKey key;
   // Create root_key()
@@ -225,13 +272,6 @@
             key.Open(HKEY_CURRENT_USER, key_path.c_str(), KEY_READ));
 
   ASSERT_EQ(ERROR_SUCCESS,
-            key.Open(HKEY_CURRENT_USER, root_key().c_str(), KEY_WRITE));
-  ASSERT_NE(ERROR_SUCCESS, key.DeleteEmptyKey(L""));
-  ASSERT_NE(ERROR_SUCCESS, key.DeleteEmptyKey(L"Bar\\Foo"));
-  ASSERT_NE(ERROR_SUCCESS, key.DeleteEmptyKey(L"Bar"));
-  ASSERT_EQ(ERROR_SUCCESS, key.DeleteEmptyKey(L"Foo"));
-
-  ASSERT_EQ(ERROR_SUCCESS,
             key.Open(HKEY_CURRENT_USER, key_path.c_str(), KEY_CREATE_SUB_KEY));
   ASSERT_EQ(ERROR_SUCCESS, key.CreateKey(L"Bar", KEY_WRITE));
   ASSERT_EQ(ERROR_SUCCESS, key.CreateKey(L"Foo", KEY_WRITE));
@@ -460,10 +500,86 @@
   static const wchar_t kFooName[] = L"foo";
 
   EXPECT_EQ(key.DeleteKey(kFooName), ERROR_INVALID_HANDLE);
-  EXPECT_EQ(key.DeleteEmptyKey(kFooName), ERROR_INVALID_HANDLE);
   EXPECT_EQ(key.DeleteValue(kFooName), ERROR_INVALID_HANDLE);
 }
 
+// A test harness for tests of RegKey::DeleteKey; parameterized on whether to
+// perform non-recursive or recursive deletes.
+class DeleteKeyRegistryTest
+    : public RegistryTest,
+      public ::testing::WithParamInterface<RegKey::RecursiveDelete> {
+ protected:
+  DeleteKeyRegistryTest() = default;
+
+ private:
+};
+
+// Test that DeleteKey does not follow symbolic links.
+TEST_P(DeleteKeyRegistryTest, DoesNotFollowLinks) {
+  // Create a subkey that should not be deleted.
+  std::wstring target_path = root_key() + L"\\LinkTarget";
+  {
+    RegKey target;
+    ASSERT_EQ(target.Create(HKEY_CURRENT_USER, target_path.c_str(), KEY_WRITE),
+              ERROR_SUCCESS);
+    ASSERT_EQ(target.WriteValue(L"IsTarget", 1U), ERROR_SUCCESS);
+  }
+
+  // Create a link to the above key.
+  std::wstring source_path = root_key() + L"\\LinkSource";
+  {
+    HKEY link_handle = {};
+    ASSERT_EQ(RegCreateKeyEx(HKEY_CURRENT_USER, source_path.c_str(), 0, nullptr,
+                             REG_OPTION_CREATE_LINK | REG_OPTION_NON_VOLATILE,
+                             KEY_WRITE, nullptr, &link_handle, nullptr),
+              ERROR_SUCCESS);
+    RegKey link(std::exchange(link_handle, HKEY{}));
+    ASSERT_TRUE(link.Valid());
+
+    std::wstring user_sid;
+    ASSERT_TRUE(GetUserSidString(&user_sid));
+
+    std::wstring value =
+        base::StrCat({L"\\Registry\\User\\", user_sid, L"\\", override_path(),
+                      L"\\", root_key(), L"\\LinkTarget"});
+    ASSERT_EQ(link.WriteValue(L"SymbolicLinkValue", value.data(),
+                              value.size() * sizeof(wchar_t), REG_LINK),
+              ERROR_SUCCESS);
+  }
+
+  // Verify that the link works.
+  {
+    RegKey link;
+    ASSERT_EQ(link.Open(HKEY_CURRENT_USER, source_path.c_str(), KEY_READ),
+              ERROR_SUCCESS);
+    DWORD value = 0;
+    ASSERT_EQ(link.ReadValueDW(L"IsTarget", &value), ERROR_SUCCESS);
+    ASSERT_EQ(value, 1U);
+  }
+
+  // Now delete the link and ensure that it was deleted, but not the target.
+  ASSERT_EQ(RegKey(HKEY_CURRENT_USER, root_key().c_str(), KEY_READ)
+                .DeleteKey(L"LinkSource", GetParam()),
+            ERROR_SUCCESS);
+  {
+    RegKey source;
+    ASSERT_NE(source.Open(HKEY_CURRENT_USER, source_path.c_str(), KEY_READ),
+              ERROR_SUCCESS);
+  }
+  {
+    RegKey target;
+    ASSERT_EQ(target.Open(HKEY_CURRENT_USER, target_path.c_str(), KEY_READ),
+              ERROR_SUCCESS);
+  }
+}
+
+INSTANTIATE_TEST_SUITE_P(NonRecursive,
+                         DeleteKeyRegistryTest,
+                         ::testing::Values(RegKey::RecursiveDelete(false)));
+INSTANTIATE_TEST_SUITE_P(Recursive,
+                         DeleteKeyRegistryTest,
+                         ::testing::Values(RegKey::RecursiveDelete(true)));
+
 // A test harness for tests that use HKLM to test WoW redirection and such.
 // TODO(https://crbug.com/377917): The tests here that write to the registry are
 // disabled because they need work to handle parallel runs of different tests.
diff --git a/base/win/resource_exhaustion.cc b/base/win/resource_exhaustion.cc
new file mode 100644
index 0000000..20fd08e
--- /dev/null
+++ b/base/win/resource_exhaustion.cc
@@ -0,0 +1,31 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/resource_exhaustion.h"
+
+#include <utility>
+
+#include "base/functional/callback.h"
+#include "base/no_destructor.h"
+
+namespace base::win {
+
+namespace {
+
+OnResourceExhaustedFunction g_resource_exhausted_function = nullptr;
+
+}  // namespace
+
+void SetOnResourceExhaustedFunction(
+    OnResourceExhaustedFunction on_resource_exhausted) {
+  g_resource_exhausted_function = on_resource_exhausted;
+}
+
+void OnResourceExhausted() {
+  // Stop execution here if there is no callback installed.
+  CHECK(g_resource_exhausted_function) << "system resource exhausted.";
+  g_resource_exhausted_function();
+}
+
+}  // namespace base::win
diff --git a/base/win/resource_exhaustion.h b/base/win/resource_exhaustion.h
new file mode 100644
index 0000000..7a7608d
--- /dev/null
+++ b/base/win/resource_exhaustion.h
@@ -0,0 +1,26 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_RESOURCE_EXHAUSTION_H_
+#define BASE_WIN_RESOURCE_EXHAUSTION_H_
+
+#include "base/base_export.h"
+
+namespace base::win {
+
+using OnResourceExhaustedFunction = void (*)();
+
+// Sets a callback to be run in the event that a system resource is exhausted
+// such that a system restart is the only recovery. Typically, there is no
+// point in letting the process continue execution when this happens.
+BASE_EXPORT void SetOnResourceExhaustedFunction(
+    OnResourceExhaustedFunction on_resource_exhausted);
+
+// Reports that some system resource has been exhausted. A callback, if provided
+// will be run to allow for application-specific handling.
+BASE_EXPORT void OnResourceExhausted();
+
+}  // namespace base::win
+
+#endif  // BASE_WIN_RESOURCE_EXHAUSTION_H_
diff --git a/base/win/scoped_hglobal.h b/base/win/scoped_hglobal.h
index d7919b9..e61f816 100644
--- a/base/win/scoped_hglobal.h
+++ b/base/win/scoped_hglobal.h
@@ -9,41 +9,37 @@
 
 #include <stddef.h>
 
+#include <utility>
+
 namespace base {
 namespace win {
 
 // Like ScopedHandle except for HGLOBAL.
-template <class T>
+template <class Ptr>
 class ScopedHGlobal {
  public:
-  explicit ScopedHGlobal(HGLOBAL glob) : glob_(glob) {
-    data_ = static_cast<T>(GlobalLock(glob_));
-  }
+  explicit ScopedHGlobal(HGLOBAL glob)
+      : glob_(glob), data_(static_cast<Ptr>(GlobalLock(glob_))) {}
 
   ScopedHGlobal(const ScopedHGlobal&) = delete;
   ScopedHGlobal& operator=(const ScopedHGlobal&) = delete;
 
   ~ScopedHGlobal() { GlobalUnlock(glob_); }
 
-  T get() { return data_; }
+  Ptr data() { return data_; }
+  size_t size() const { return GlobalSize(glob_); }
 
-  size_t Size() const { return GlobalSize(glob_); }
-
-  T operator->() const {
+  Ptr operator->() const {
     assert(data_ != 0);
     return data_;
   }
 
-  T release() {
-    T data = data_;
-    data_ = NULL;
-    return data;
-  }
+  Ptr release() { return std::exchange(data_, nullptr); }
 
  private:
   HGLOBAL glob_;
 
-  T data_;
+  Ptr data_;
 };
 
 }  // namespace win
diff --git a/base/win/scoped_winrt_initializer.cc b/base/win/scoped_winrt_initializer.cc
index 85f83da..4c93dcf 100644
--- a/base/win/scoped_winrt_initializer.cc
+++ b/base/win/scoped_winrt_initializer.cc
@@ -5,61 +5,14 @@
 #include "base/win/scoped_winrt_initializer.h"
 
 #include <roapi.h>
-#include <windows.h>
-
-#include <ostream>
 
 #include "base/check_op.h"
-#include "base/threading/scoped_thread_priority.h"
 #include "base/win/com_init_util.h"
-#include "base/win/core_winrt_util.h"
 
 namespace base::win {
 
-namespace {
-
-FARPROC LoadComBaseFunction(const char* function_name) {
-  static HMODULE const handle = []() {
-    // Mitigate the issues caused by loading DLLs on a background thread
-    // (http://crbug/973868).
-    SCOPED_MAY_LOAD_LIBRARY_AT_BACKGROUND_PRIORITY();
-    return ::LoadLibraryEx(L"combase.dll", nullptr,
-                           LOAD_LIBRARY_SEARCH_SYSTEM32);
-  }();
-  return handle ? ::GetProcAddress(handle, function_name) : nullptr;
-}
-
-decltype(&::RoInitialize) GetRoInitializeFunction() {
-  static decltype(&::RoInitialize) const function =
-      reinterpret_cast<decltype(&::RoInitialize)>(
-          LoadComBaseFunction("RoInitialize"));
-  return function;
-}
-
-decltype(&::RoUninitialize) GetRoUninitializeFunction() {
-  static decltype(&::RoUninitialize) const function =
-      reinterpret_cast<decltype(&::RoUninitialize)>(
-          LoadComBaseFunction("RoUninitialize"));
-  return function;
-}
-
-HRESULT CallRoInitialize(RO_INIT_TYPE init_type) {
-  auto ro_initialize_func = GetRoInitializeFunction();
-  if (!ro_initialize_func)
-    return E_FAIL;
-  return ro_initialize_func(init_type);
-}
-
-void CallRoUninitialize() {
-  auto ro_uninitialize_func = GetRoUninitializeFunction();
-  if (ro_uninitialize_func)
-    ro_uninitialize_func();
-}
-
-}  // namespace
-
 ScopedWinrtInitializer::ScopedWinrtInitializer()
-    : hr_(CallRoInitialize(RO_INIT_MULTITHREADED)) {
+    : hr_(::RoInitialize(RO_INIT_MULTITHREADED)) {
   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
 #if DCHECK_IS_ON()
   if (SUCCEEDED(hr_))
@@ -72,7 +25,7 @@
 ScopedWinrtInitializer::~ScopedWinrtInitializer() {
   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
   if (SUCCEEDED(hr_))
-    CallRoUninitialize();
+    ::RoUninitialize();
 }
 
 bool ScopedWinrtInitializer::Succeeded() const {
diff --git a/base/win/security_util.cc b/base/win/security_util.cc
index 49d4693..a7c162e 100644
--- a/base/win/security_util.cc
+++ b/base/win/security_util.cc
@@ -10,6 +10,7 @@
 #include "base/check.h"
 #include "base/files/file_path.h"
 #include "base/logging.h"
+#include "base/threading/scoped_blocking_call.h"
 #include "base/win/access_control_list.h"
 #include "base/win/scoped_handle.h"
 #include "base/win/security_descriptor.h"
@@ -30,6 +31,8 @@
   if (sids.empty()) {
     return true;
   }
+  base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
+                                                base::BlockingType::MAY_BLOCK);
 
   absl::optional<SecurityDescriptor> sd =
       SecurityDescriptor::FromFile(path, DACL_SECURITY_INFORMATION);
diff --git a/base/win/security_util_unittest.cc b/base/win/security_util_unittest.cc
index 14d6139..477f743 100644
--- a/base/win/security_util_unittest.cc
+++ b/base/win/security_util_unittest.cc
@@ -12,7 +12,7 @@
 
 #include "base/files/file_util.h"
 #include "base/files/scoped_temp_dir.h"
-#include "base/strings/stringprintf.h"
+#include "base/strings/string_number_conversions_win.h"
 #include "base/win/scoped_handle.h"
 #include "base/win/scoped_localalloc.h"
 #include "base/win/sid.h"
@@ -99,8 +99,8 @@
       GrantAccessToPath(path, *sids, FILE_GENERIC_READ, NO_INHERITANCE, false));
   std::vector<Sid> large_sid_list;
   while (large_sid_list.size() < 0x10000) {
-    auto sid = Sid::FromSddlString(
-        base::StringPrintf(L"S-1-5-1234-%zu", large_sid_list.size()).c_str());
+    auto sid = Sid::FromSddlString(L"S-1-5-1234-" +
+                                   NumberToWString(large_sid_list.size()));
     ASSERT_TRUE(sid);
     large_sid_list.emplace_back(std::move(*sid));
   }
diff --git a/base/win/shortcut.cc b/base/win/shortcut.cc
index 38d79a0..5500017 100644
--- a/base/win/shortcut.cc
+++ b/base/win/shortcut.cc
@@ -9,6 +9,7 @@
 #include <shlobj.h>
 #include <wrl/client.h>
 
+#include "base/files/block_tests_writing_to_special_dirs.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/strings/string_util.h"
@@ -72,6 +73,9 @@
                                 ShortcutOperation operation) {
   ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
 
+  if (!BlockTestsWritingToSpecialDirs::CanWriteToPath(shortcut_path)) {
+    return false;
+  }
   // Make sure the parent directories exist when creating the shortcut.
   if (operation == ShortcutOperation::kCreateAlways &&
       !base::CreateDirectory(shortcut_path.DirName())) {
diff --git a/base/win/win_includes_unittest.cc b/base/win/win_includes_unittest.cc
index 830d01c..1b51aee 100644
--- a/base/win/win_includes_unittest.cc
+++ b/base/win/win_includes_unittest.cc
@@ -5,9 +5,9 @@
 // This file ensures that these header files don't include Windows.h and can
 // compile without including Windows.h. This helps to improve compile times.
 
-#include "base/allocator/partition_allocator/partition_alloc-inl.h"
-#include "base/allocator/partition_allocator/partition_tls.h"
-#include "base/allocator/partition_allocator/spinning_mutex.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc-inl.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_tls.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/spinning_mutex.h"
 #include "base/atomicops.h"
 #include "base/files/file_util.h"
 #include "base/files/platform_file.h"
diff --git a/base/win/window_enumerator.cc b/base/win/window_enumerator.cc
new file mode 100644
index 0000000..def62f4
--- /dev/null
+++ b/base/win/window_enumerator.cc
@@ -0,0 +1,74 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/window_enumerator.h"
+
+#include <windows.h>
+
+#include <string>
+
+#include "base/functional/callback.h"
+
+namespace base::win {
+
+namespace {
+
+BOOL CALLBACK OnWindowProc(HWND hwnd, LPARAM lparam) {
+  return !reinterpret_cast<WindowEnumeratorCallback*>(lparam)->Run(hwnd);
+}
+
+}  // namespace
+
+void EnumerateChildWindows(HWND parent, WindowEnumeratorCallback filter) {
+  ::EnumChildWindows(parent, &OnWindowProc, reinterpret_cast<LPARAM>(&filter));
+}
+
+bool IsTopmostWindow(HWND hwnd) {
+  return ::GetWindowLong(hwnd, GWL_EXSTYLE) & WS_EX_TOPMOST;
+}
+
+bool IsSystemDialog(HWND hwnd) {
+  static constexpr wchar_t kSystemDialogClass[] = L"#32770";
+  return GetWindowClass(hwnd) == kSystemDialogClass;
+}
+
+bool IsShellWindow(HWND hwnd) {
+  const std::wstring class_name = GetWindowClass(hwnd);
+
+  // 'Button' is the start button, 'Shell_TrayWnd' the taskbar, and
+  // 'Shell_SecondaryTrayWnd' is the taskbar on non-primary displays.
+  return class_name == L"Button" || class_name == L"Shell_TrayWnd" ||
+         class_name == L"Shell_SecondaryTrayWnd";
+}
+
+std::wstring GetWindowClass(HWND hwnd) {
+  constexpr int kMaxWindowClassNameLength = 256;
+  std::wstring window_class(kMaxWindowClassNameLength, L'\0');
+  const int name_len =
+      ::GetClassName(hwnd, window_class.data(), kMaxWindowClassNameLength);
+  if (name_len <= 0 || name_len > kMaxWindowClassNameLength) {
+    return {};
+  }
+  window_class.resize(static_cast<size_t>(name_len));
+  return window_class;
+}
+
+std::wstring GetWindowTextString(HWND hwnd) {
+  auto num_chars = ::GetWindowTextLength(hwnd);
+  if (num_chars <= 0) {
+    return {};
+  }
+  std::wstring text(static_cast<size_t>(num_chars), L'\0');
+  // MSDN says that GetWindowText will not write anything other than a string
+  // terminator to the last position in the buffer.
+  auto len = ::GetWindowText(hwnd, text.data(), num_chars + 1);
+  if (len <= 0) {
+    return std::wstring();
+  }
+  // GetWindowText may return a shorter string than reported above.
+  text.resize(static_cast<size_t>(len));
+  return text;
+}
+
+}  // namespace base::win
diff --git a/base/win/window_enumerator.h b/base/win/window_enumerator.h
new file mode 100644
index 0000000..663df52
--- /dev/null
+++ b/base/win/window_enumerator.h
@@ -0,0 +1,39 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_WINDOW_ENUMERATOR_H_
+#define BASE_WIN_WINDOW_ENUMERATOR_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/functional/callback.h"
+#include "base/win/windows_types.h"
+
+namespace base::win {
+
+// Enumerates immediate child windows of `parent`, running `filter` for each
+// window until `filter` returns true.
+using WindowEnumeratorCallback = base::RepeatingCallback<bool(HWND hwnd)>;
+BASE_EXPORT void EnumerateChildWindows(HWND parent,
+                                       WindowEnumeratorCallback filter);
+
+// Returns true if `hwnd` is an always-on-top window.
+BASE_EXPORT bool IsTopmostWindow(HWND hwnd);
+
+// Returns true if `hwnd` is a system dialog.
+BASE_EXPORT bool IsSystemDialog(HWND hwnd);
+
+// Returns true if `hwnd` is a window owned by the Windows shell.
+BASE_EXPORT bool IsShellWindow(HWND hwnd);
+
+// Returns the class name of `hwnd` or an empty string on error.
+BASE_EXPORT std::wstring GetWindowClass(HWND hwnd);
+
+// Returns the window text for `hwnd`, or an empty string on error.
+BASE_EXPORT std::wstring GetWindowTextString(HWND hwnd);
+
+}  // namespace base::win
+
+#endif  // BASE_WIN_WINDOW_ENUMERATOR_H_
diff --git a/base/win/window_enumerator_unittest.cc b/base/win/window_enumerator_unittest.cc
new file mode 100644
index 0000000..692d874
--- /dev/null
+++ b/base/win/window_enumerator_unittest.cc
@@ -0,0 +1,56 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/window_enumerator.h"
+
+#include <windows.h>
+
+#include <string>
+#include <vector>
+
+#include "base/test/bind.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base::win {
+
+TEST(WindowEnumeratorTest, EnumerateTopLevelWindows) {
+  EnumerateChildWindows(
+      ::GetDesktopWindow(), base::BindLambdaForTesting([&](HWND hwnd) {
+        const std::wstring window_class = GetWindowClass(hwnd);
+        EXPECT_EQ(window_class, [&]() {
+          constexpr int kMaxWindowClassNameLength = 256;
+          wchar_t buffer[kMaxWindowClassNameLength + 1] = {0};
+          const int name_len = ::GetClassName(hwnd, buffer, std::size(buffer));
+          if (name_len <= 0 || name_len > kMaxWindowClassNameLength) {
+            return std::wstring();
+          }
+          return std::wstring(&buffer[0], static_cast<size_t>(name_len));
+        }());
+
+        EXPECT_EQ(IsTopmostWindow(hwnd),
+                  (::GetWindowLong(hwnd, GWL_EXSTYLE) & WS_EX_TOPMOST) != 0);
+
+        EXPECT_EQ(IsSystemDialog(hwnd), window_class == L"#32770");
+
+        EXPECT_EQ(IsShellWindow(hwnd),
+                  window_class == L"Button" ||
+                      window_class == L"Shell_TrayWnd" ||
+                      window_class == L"Shell_SecondaryTrayWnd");
+        EXPECT_EQ(GetWindowTextString(hwnd), [&]() {
+          const int num_chars = ::GetWindowTextLength(hwnd);
+          if (!num_chars) {
+            return std::wstring();
+          }
+          std::vector<wchar_t> text(static_cast<size_t>(num_chars) + 1);
+          if (!::GetWindowText(hwnd, &text.front(),
+                               static_cast<int>(text.size()))) {
+            return std::wstring();
+          }
+          return std::wstring(text.begin(), --text.end());
+        }());
+        return false;
+      }));
+}
+
+}  // namespace base::win
diff --git a/base/win/windows_types.h b/base/win/windows_types.h
index 41ae16a..d148f9c 100644
--- a/base/win/windows_types.h
+++ b/base/win/windows_types.h
@@ -267,7 +267,9 @@
 
 // The trailing white-spaces after this macro are required, for compatibility
 // with the definition in winnt.h.
+// clang-format off
 #define RTL_SRWLOCK_INIT {0}                            // NOLINT
+// clang-format on
 #define SRWLOCK_INIT RTL_SRWLOCK_INIT
 
 // clang-format on
diff --git a/base/win/winrt_foundation_helpers.h b/base/win/winrt_foundation_helpers.h
index 185ba09..26def75 100644
--- a/base/win/winrt_foundation_helpers.h
+++ b/base/win/winrt_foundation_helpers.h
@@ -59,7 +59,7 @@
 // It queries the internals of Windows::Foundation to obtain this information.
 template <typename TComplex>
 using StorageType = std::conditional_t<
-    std::is_convertible<AbiType<TComplex>, IUnknown*>::value,
+    std::is_convertible_v<AbiType<TComplex>, IUnknown*>,
     Microsoft::WRL::ComPtr<std::remove_pointer_t<AbiType<TComplex>>>,
     AbiType<TComplex>>;
 
@@ -67,7 +67,7 @@
 // type is not a pointer to IUnknown.
 template <typename TComplex>
 using OptionalStorageType = std::conditional_t<
-    std::is_convertible<AbiType<TComplex>, IUnknown*>::value,
+    std::is_convertible_v<AbiType<TComplex>, IUnknown*>,
     Microsoft::WRL::ComPtr<std::remove_pointer_t<AbiType<TComplex>>>,
     absl::optional<AbiType<TComplex>>>;
 
diff --git a/base/write_build_date_header.py b/base/write_build_date_header.py
index 9484bd0..9ce3152 100755
--- a/base/write_build_date_header.py
+++ b/base/write_build_date_header.py
@@ -9,31 +9,29 @@
 import os
 import sys
 
-
 def main():
-  argument_parser = argparse.ArgumentParser()
-  argument_parser.add_argument('output_file', help='The file to write to')
-  argument_parser.add_argument('timestamp')
-  args = argument_parser.parse_args()
+    argument_parser = argparse.ArgumentParser()
+    argument_parser.add_argument('output_file', help='The file to write to')
+    argument_parser.add_argument('timestamp')
+    args = argument_parser.parse_args()
 
-  date_val = int(args.timestamp)
-  date = datetime.datetime.utcfromtimestamp(date_val)
-  output = ('// Generated by //base/write_build_date_header.py\n'
-            '#ifndef BASE_GENERATED_BUILD_DATE_TIMESTAMP \n'
-            f'#define BASE_GENERATED_BUILD_DATE_TIMESTAMP {date_val}'
-            f'  // {date:%b %d %Y %H:%M:%S}\n'
-            '#endif  // BASE_GENERATED_BUILD_DATE_TIMESTAMP \n')
+    date_val = int(args.timestamp)
+    date = datetime.datetime.fromtimestamp(date_val, tz=datetime.timezone.utc)
+    output = ('// Generated by //base/write_build_date_header.py\n'
+              '#ifndef BASE_GENERATED_BUILD_DATE_TIMESTAMP \n'
+              f'#define BASE_GENERATED_BUILD_DATE_TIMESTAMP {date_val}'
+              f'  // {date:%b %d %Y %H:%M:%S}\n'
+              '#endif  // BASE_GENERATED_BUILD_DATE_TIMESTAMP \n')
 
-  current_contents = ''
-  if os.path.isfile(args.output_file):
-    with open(args.output_file, 'r') as current_file:
-      current_contents = current_file.read()
+    current_contents = ''
+    if os.path.isfile(args.output_file):
+        with open(args.output_file, 'r') as current_file:
+            current_contents = current_file.read()
 
-  if current_contents != output:
-    with open(args.output_file, 'w') as output_file:
-      output_file.write(output)
-  return 0
-
+    if current_contents != output:
+        with open(args.output_file, 'w') as output_file:
+            output_file.write(output)
+    return 0
 
 if __name__ == '__main__':
-  sys.exit(main())
+    sys.exit(main())